Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/tools/objtool/klp-diff.c
122850 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
#define _GNU_SOURCE /* memmem() */
3
#include <subcmd/parse-options.h>
4
#include <stdlib.h>
5
#include <string.h>
6
#include <libgen.h>
7
#include <stdio.h>
8
#include <ctype.h>
9
10
#include <objtool/objtool.h>
11
#include <objtool/warn.h>
12
#include <objtool/arch.h>
13
#include <objtool/klp.h>
14
#include <objtool/util.h>
15
#include <arch/special.h>
16
17
#include <linux/align.h>
18
#include <linux/objtool_types.h>
19
#include <linux/livepatch_external.h>
20
#include <linux/stringify.h>
21
#include <linux/string.h>
22
#include <linux/jhash.h>
23
24
#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER))
25
26
struct elfs {
27
struct elf *orig, *patched, *out;
28
const char *modname;
29
};
30
31
struct export {
32
struct hlist_node hash;
33
char *mod, *sym;
34
};
35
36
static const char * const klp_diff_usage[] = {
37
"objtool klp diff [<options>] <in1.o> <in2.o> <out.o>",
38
NULL,
39
};
40
41
static const struct option klp_diff_options[] = {
42
OPT_GROUP("Options:"),
43
OPT_BOOLEAN('d', "debug", &debug, "enable debug output"),
44
OPT_END(),
45
};
46
47
static DEFINE_HASHTABLE(exports, 15);
48
49
static inline u32 str_hash(const char *str)
50
{
51
return jhash(str, strlen(str), 0);
52
}
53
54
static char *escape_str(const char *orig)
55
{
56
size_t len = 0;
57
const char *a;
58
char *b, *new;
59
60
for (a = orig; *a; a++) {
61
switch (*a) {
62
case '\001': len += 5; break;
63
case '\n':
64
case '\t': len += 2; break;
65
default: len++;
66
}
67
}
68
69
new = malloc(len + 1);
70
if (!new)
71
return NULL;
72
73
for (a = orig, b = new; *a; a++) {
74
switch (*a) {
75
case '\001': memcpy(b, "<SOH>", 5); b += 5; break;
76
case '\n': *b++ = '\\'; *b++ = 'n'; break;
77
case '\t': *b++ = '\\'; *b++ = 't'; break;
78
default: *b++ = *a;
79
}
80
}
81
82
*b = '\0';
83
return new;
84
}
85
86
static int read_exports(void)
87
{
88
const char *symvers = "Module.symvers";
89
char line[1024], *path = NULL;
90
unsigned int line_num = 1;
91
FILE *file;
92
93
file = fopen(symvers, "r");
94
if (!file) {
95
path = top_level_dir(symvers);
96
if (!path) {
97
ERROR("can't open '%s', \"objtool diff\" should be run from the kernel tree", symvers);
98
return -1;
99
}
100
101
file = fopen(path, "r");
102
if (!file) {
103
ERROR_GLIBC("fopen");
104
return -1;
105
}
106
}
107
108
while (fgets(line, 1024, file)) {
109
char *sym, *mod, *type;
110
struct export *export;
111
112
sym = strchr(line, '\t');
113
if (!sym) {
114
ERROR("malformed Module.symvers (sym) at line %d", line_num);
115
return -1;
116
}
117
118
*sym++ = '\0';
119
120
mod = strchr(sym, '\t');
121
if (!mod) {
122
ERROR("malformed Module.symvers (mod) at line %d", line_num);
123
return -1;
124
}
125
126
*mod++ = '\0';
127
128
type = strchr(mod, '\t');
129
if (!type) {
130
ERROR("malformed Module.symvers (type) at line %d", line_num);
131
return -1;
132
}
133
134
*type++ = '\0';
135
136
if (*sym == '\0' || *mod == '\0') {
137
ERROR("malformed Module.symvers at line %d", line_num);
138
return -1;
139
}
140
141
export = calloc(1, sizeof(*export));
142
if (!export) {
143
ERROR_GLIBC("calloc");
144
return -1;
145
}
146
147
export->mod = strdup(mod);
148
if (!export->mod) {
149
ERROR_GLIBC("strdup");
150
return -1;
151
}
152
153
export->sym = strdup(sym);
154
if (!export->sym) {
155
ERROR_GLIBC("strdup");
156
return -1;
157
}
158
159
hash_add(exports, &export->hash, str_hash(sym));
160
}
161
162
free(path);
163
fclose(file);
164
165
return 0;
166
}
167
168
static int read_sym_checksums(struct elf *elf)
169
{
170
struct section *sec;
171
172
sec = find_section_by_name(elf, ".discard.sym_checksum");
173
if (!sec) {
174
ERROR("'%s' missing .discard.sym_checksum section, file not processed by 'objtool --checksum'?",
175
elf->name);
176
return -1;
177
}
178
179
if (!sec->rsec) {
180
ERROR("missing reloc section for .discard.sym_checksum");
181
return -1;
182
}
183
184
if (sec_size(sec) % sizeof(struct sym_checksum)) {
185
ERROR("struct sym_checksum size mismatch");
186
return -1;
187
}
188
189
for (int i = 0; i < sec_size(sec) / sizeof(struct sym_checksum); i++) {
190
struct sym_checksum *sym_checksum;
191
struct reloc *reloc;
192
struct symbol *sym;
193
194
sym_checksum = (struct sym_checksum *)sec->data->d_buf + i;
195
196
reloc = find_reloc_by_dest(elf, sec, i * sizeof(*sym_checksum));
197
if (!reloc) {
198
ERROR("can't find reloc for sym_checksum[%d]", i);
199
return -1;
200
}
201
202
sym = reloc->sym;
203
204
if (is_sec_sym(sym)) {
205
ERROR("not sure how to handle section %s", sym->name);
206
return -1;
207
}
208
209
if (is_func_sym(sym))
210
sym->csum.checksum = sym_checksum->checksum;
211
}
212
213
return 0;
214
}
215
216
static struct symbol *first_file_symbol(struct elf *elf)
217
{
218
struct symbol *sym;
219
220
for_each_sym(elf, sym) {
221
if (is_file_sym(sym))
222
return sym;
223
}
224
225
return NULL;
226
}
227
228
static struct symbol *next_file_symbol(struct elf *elf, struct symbol *sym)
229
{
230
for_each_sym_continue(elf, sym) {
231
if (is_file_sym(sym))
232
return sym;
233
}
234
235
return NULL;
236
}
237
238
/*
239
* Certain static local variables should never be correlated. They will be
240
* used in place rather than referencing the originals.
241
*/
242
static bool is_uncorrelated_static_local(struct symbol *sym)
243
{
244
static const char * const vars[] = {
245
"__already_done.",
246
"__func__.",
247
"__key.",
248
"__warned.",
249
"_entry.",
250
"_entry_ptr.",
251
"_rs.",
252
"descriptor.",
253
"CSWTCH.",
254
};
255
256
if (!is_object_sym(sym) || !is_local_sym(sym))
257
return false;
258
259
if (!strcmp(sym->sec->name, ".data.once"))
260
return true;
261
262
for (int i = 0; i < ARRAY_SIZE(vars); i++) {
263
if (strstarts(sym->name, vars[i]))
264
return true;
265
}
266
267
return false;
268
}
269
270
/*
271
* Clang emits several useless .Ltmp_* code labels.
272
*/
273
static bool is_clang_tmp_label(struct symbol *sym)
274
{
275
return sym->type == STT_NOTYPE &&
276
is_text_sec(sym->sec) &&
277
strstarts(sym->name, ".Ltmp") &&
278
isdigit(sym->name[5]);
279
}
280
281
static bool is_special_section(struct section *sec)
282
{
283
static const char * const specials[] = {
284
".altinstructions",
285
".smp_locks",
286
"__bug_table",
287
"__ex_table",
288
"__jump_table",
289
"__mcount_loc",
290
291
/*
292
* Extract .static_call_sites here to inherit non-module
293
* preferential treatment. The later static call processing
294
* during klp module build will be skipped when it sees this
295
* section already exists.
296
*/
297
".static_call_sites",
298
};
299
300
static const char * const non_special_discards[] = {
301
".discard.addressable",
302
".discard.sym_checksum",
303
};
304
305
if (is_text_sec(sec))
306
return false;
307
308
for (int i = 0; i < ARRAY_SIZE(specials); i++) {
309
if (!strcmp(sec->name, specials[i]))
310
return true;
311
}
312
313
/* Most .discard data sections are special */
314
for (int i = 0; i < ARRAY_SIZE(non_special_discards); i++) {
315
if (!strcmp(sec->name, non_special_discards[i]))
316
return false;
317
}
318
319
return strstarts(sec->name, ".discard.");
320
}
321
322
/*
323
* These sections are referenced by special sections but aren't considered
324
* special sections themselves.
325
*/
326
static bool is_special_section_aux(struct section *sec)
327
{
328
static const char * const specials_aux[] = {
329
".altinstr_replacement",
330
".altinstr_aux",
331
};
332
333
for (int i = 0; i < ARRAY_SIZE(specials_aux); i++) {
334
if (!strcmp(sec->name, specials_aux[i]))
335
return true;
336
}
337
338
return false;
339
}
340
341
/*
342
* These symbols should never be correlated, so their local patched versions
343
* are used instead of linking to the originals.
344
*/
345
static bool dont_correlate(struct symbol *sym)
346
{
347
return is_file_sym(sym) ||
348
is_null_sym(sym) ||
349
is_sec_sym(sym) ||
350
is_prefix_func(sym) ||
351
is_uncorrelated_static_local(sym) ||
352
is_clang_tmp_label(sym) ||
353
is_string_sec(sym->sec) ||
354
is_special_section(sym->sec) ||
355
is_special_section_aux(sym->sec) ||
356
strstarts(sym->name, "__initcall__");
357
}
358
359
/*
360
* For each symbol in the original kernel, find its corresponding "twin" in the
361
* patched kernel.
362
*/
363
static int correlate_symbols(struct elfs *e)
364
{
365
struct symbol *file1_sym, *file2_sym;
366
struct symbol *sym1, *sym2;
367
368
file1_sym = first_file_symbol(e->orig);
369
file2_sym = first_file_symbol(e->patched);
370
371
/*
372
* Correlate any locals before the first FILE symbol. This has been
373
* seen when LTO inexplicably strips the initramfs_data.o FILE symbol
374
* due to the file only containing data and no code.
375
*/
376
for_each_sym(e->orig, sym1) {
377
if (sym1 == file1_sym || !is_local_sym(sym1))
378
break;
379
380
if (dont_correlate(sym1))
381
continue;
382
383
for_each_sym(e->patched, sym2) {
384
if (sym2 == file2_sym || !is_local_sym(sym2))
385
break;
386
387
if (sym2->twin || dont_correlate(sym2))
388
continue;
389
390
if (strcmp(sym1->demangled_name, sym2->demangled_name))
391
continue;
392
393
sym1->twin = sym2;
394
sym2->twin = sym1;
395
break;
396
}
397
}
398
399
/* Correlate locals after the first FILE symbol */
400
for (; ; file1_sym = next_file_symbol(e->orig, file1_sym),
401
file2_sym = next_file_symbol(e->patched, file2_sym)) {
402
403
if (!file1_sym && file2_sym) {
404
ERROR("FILE symbol mismatch: NULL != %s", file2_sym->name);
405
return -1;
406
}
407
408
if (file1_sym && !file2_sym) {
409
ERROR("FILE symbol mismatch: %s != NULL", file1_sym->name);
410
return -1;
411
}
412
413
if (!file1_sym)
414
break;
415
416
if (strcmp(file1_sym->name, file2_sym->name)) {
417
ERROR("FILE symbol mismatch: %s != %s", file1_sym->name, file2_sym->name);
418
return -1;
419
}
420
421
file1_sym->twin = file2_sym;
422
file2_sym->twin = file1_sym;
423
424
sym1 = file1_sym;
425
426
for_each_sym_continue(e->orig, sym1) {
427
if (is_file_sym(sym1) || !is_local_sym(sym1))
428
break;
429
430
if (dont_correlate(sym1))
431
continue;
432
433
sym2 = file2_sym;
434
for_each_sym_continue(e->patched, sym2) {
435
if (is_file_sym(sym2) || !is_local_sym(sym2))
436
break;
437
438
if (sym2->twin || dont_correlate(sym2))
439
continue;
440
441
if (strcmp(sym1->demangled_name, sym2->demangled_name))
442
continue;
443
444
sym1->twin = sym2;
445
sym2->twin = sym1;
446
break;
447
}
448
}
449
}
450
451
/* Correlate globals */
452
for_each_sym(e->orig, sym1) {
453
if (sym1->bind == STB_LOCAL)
454
continue;
455
456
sym2 = find_global_symbol_by_name(e->patched, sym1->name);
457
458
if (sym2 && !sym2->twin && !strcmp(sym1->name, sym2->name)) {
459
sym1->twin = sym2;
460
sym2->twin = sym1;
461
}
462
}
463
464
for_each_sym(e->orig, sym1) {
465
if (sym1->twin || dont_correlate(sym1))
466
continue;
467
WARN("no correlation: %s", sym1->name);
468
}
469
470
return 0;
471
}
472
473
/* "sympos" is used by livepatch to disambiguate duplicate symbol names */
474
static unsigned long find_sympos(struct elf *elf, struct symbol *sym)
475
{
476
bool vmlinux = str_ends_with(objname, "vmlinux.o");
477
unsigned long sympos = 0, nr_matches = 0;
478
bool has_dup = false;
479
struct symbol *s;
480
481
if (sym->bind != STB_LOCAL)
482
return 0;
483
484
if (vmlinux && sym->type == STT_FUNC) {
485
/*
486
* HACK: Unfortunately, symbol ordering can differ between
487
* vmlinux.o and vmlinux due to the linker script emitting
488
* .text.unlikely* before .text*. Count .text.unlikely* first.
489
*
490
* TODO: Disambiguate symbols more reliably (checksums?)
491
*/
492
for_each_sym(elf, s) {
493
if (strstarts(s->sec->name, ".text.unlikely") &&
494
!strcmp(s->name, sym->name)) {
495
nr_matches++;
496
if (s == sym)
497
sympos = nr_matches;
498
else
499
has_dup = true;
500
}
501
}
502
for_each_sym(elf, s) {
503
if (!strstarts(s->sec->name, ".text.unlikely") &&
504
!strcmp(s->name, sym->name)) {
505
nr_matches++;
506
if (s == sym)
507
sympos = nr_matches;
508
else
509
has_dup = true;
510
}
511
}
512
} else {
513
for_each_sym(elf, s) {
514
if (!strcmp(s->name, sym->name)) {
515
nr_matches++;
516
if (s == sym)
517
sympos = nr_matches;
518
else
519
has_dup = true;
520
}
521
}
522
}
523
524
if (!sympos) {
525
ERROR("can't find sympos for %s", sym->name);
526
return ULONG_MAX;
527
}
528
529
return has_dup ? sympos : 0;
530
}
531
532
static int clone_sym_relocs(struct elfs *e, struct symbol *patched_sym);
533
534
static struct symbol *__clone_symbol(struct elf *elf, struct symbol *patched_sym,
535
bool data_too)
536
{
537
struct section *out_sec = NULL;
538
unsigned long offset = 0;
539
struct symbol *out_sym;
540
541
if (data_too && !is_undef_sym(patched_sym)) {
542
struct section *patched_sec = patched_sym->sec;
543
544
out_sec = find_section_by_name(elf, patched_sec->name);
545
if (!out_sec) {
546
out_sec = elf_create_section(elf, patched_sec->name, 0,
547
patched_sec->sh.sh_entsize,
548
patched_sec->sh.sh_type,
549
patched_sec->sh.sh_addralign,
550
patched_sec->sh.sh_flags);
551
if (!out_sec)
552
return NULL;
553
}
554
555
if (is_string_sec(patched_sym->sec)) {
556
out_sym = elf_create_section_symbol(elf, out_sec);
557
if (!out_sym)
558
return NULL;
559
560
goto sym_created;
561
}
562
563
if (!is_sec_sym(patched_sym))
564
offset = ALIGN(sec_size(out_sec), out_sec->sh.sh_addralign);
565
566
if (patched_sym->len || is_sec_sym(patched_sym)) {
567
void *data = NULL;
568
size_t size;
569
570
/* bss doesn't have data */
571
if (patched_sym->sec->data->d_buf)
572
data = patched_sym->sec->data->d_buf + patched_sym->offset;
573
574
if (is_sec_sym(patched_sym))
575
size = sec_size(patched_sym->sec);
576
else
577
size = patched_sym->len;
578
579
if (!elf_add_data(elf, out_sec, data, size))
580
return NULL;
581
}
582
}
583
584
out_sym = elf_create_symbol(elf, patched_sym->name, out_sec,
585
patched_sym->bind, patched_sym->type,
586
offset, patched_sym->len);
587
if (!out_sym)
588
return NULL;
589
590
sym_created:
591
patched_sym->clone = out_sym;
592
out_sym->clone = patched_sym;
593
594
return out_sym;
595
}
596
597
static const char *sym_type(struct symbol *sym)
598
{
599
switch (sym->type) {
600
case STT_NOTYPE: return "NOTYPE";
601
case STT_OBJECT: return "OBJECT";
602
case STT_FUNC: return "FUNC";
603
case STT_SECTION: return "SECTION";
604
case STT_FILE: return "FILE";
605
default: return "UNKNOWN";
606
}
607
}
608
609
static const char *sym_bind(struct symbol *sym)
610
{
611
switch (sym->bind) {
612
case STB_LOCAL: return "LOCAL";
613
case STB_GLOBAL: return "GLOBAL";
614
case STB_WEAK: return "WEAK";
615
default: return "UNKNOWN";
616
}
617
}
618
619
/*
620
* Copy a symbol to the output object, optionally including its data and
621
* relocations.
622
*/
623
static struct symbol *clone_symbol(struct elfs *e, struct symbol *patched_sym,
624
bool data_too)
625
{
626
struct symbol *pfx;
627
628
if (patched_sym->clone)
629
return patched_sym->clone;
630
631
dbg_indent("%s%s", patched_sym->name, data_too ? " [+DATA]" : "");
632
633
/* Make sure the prefix gets cloned first */
634
if (is_func_sym(patched_sym) && data_too) {
635
pfx = get_func_prefix(patched_sym);
636
if (pfx)
637
clone_symbol(e, pfx, true);
638
}
639
640
if (!__clone_symbol(e->out, patched_sym, data_too))
641
return NULL;
642
643
if (data_too && clone_sym_relocs(e, patched_sym))
644
return NULL;
645
646
return patched_sym->clone;
647
}
648
649
static void mark_included_function(struct symbol *func)
650
{
651
struct symbol *pfx;
652
653
func->included = 1;
654
655
/* Include prefix function */
656
pfx = get_func_prefix(func);
657
if (pfx)
658
pfx->included = 1;
659
660
/* Make sure .cold parent+child always stay together */
661
if (func->cfunc && func->cfunc != func)
662
func->cfunc->included = 1;
663
if (func->pfunc && func->pfunc != func)
664
func->pfunc->included = 1;
665
}
666
667
/*
668
* Copy all changed functions (and their dependencies) from the patched object
669
* to the output object.
670
*/
671
static int mark_changed_functions(struct elfs *e)
672
{
673
struct symbol *sym_orig, *patched_sym;
674
bool changed = false;
675
676
/* Find changed functions */
677
for_each_sym(e->orig, sym_orig) {
678
if (!is_func_sym(sym_orig) || is_prefix_func(sym_orig))
679
continue;
680
681
patched_sym = sym_orig->twin;
682
if (!patched_sym)
683
continue;
684
685
if (sym_orig->csum.checksum != patched_sym->csum.checksum) {
686
patched_sym->changed = 1;
687
mark_included_function(patched_sym);
688
changed = true;
689
}
690
}
691
692
/* Find added functions and print them */
693
for_each_sym(e->patched, patched_sym) {
694
if (!is_func_sym(patched_sym) || is_prefix_func(patched_sym))
695
continue;
696
697
if (!patched_sym->twin) {
698
printf("%s: new function: %s\n", objname, patched_sym->name);
699
mark_included_function(patched_sym);
700
changed = true;
701
}
702
}
703
704
/* Print changed functions */
705
for_each_sym(e->patched, patched_sym) {
706
if (patched_sym->changed)
707
printf("%s: changed function: %s\n", objname, patched_sym->name);
708
}
709
710
return !changed ? -1 : 0;
711
}
712
713
static int clone_included_functions(struct elfs *e)
714
{
715
struct symbol *patched_sym;
716
717
for_each_sym(e->patched, patched_sym) {
718
if (patched_sym->included) {
719
if (!clone_symbol(e, patched_sym, true))
720
return -1;
721
}
722
}
723
724
return 0;
725
}
726
727
/*
728
* Determine whether a relocation should reference the section rather than the
729
* underlying symbol.
730
*/
731
static bool section_reference_needed(struct section *sec)
732
{
733
/*
734
* String symbols are zero-length and uncorrelated. It's easier to
735
* deal with them as section symbols.
736
*/
737
if (is_string_sec(sec))
738
return true;
739
740
/*
741
* .rodata has mostly anonymous data so there's no way to determine the
742
* length of a needed reference. just copy the whole section if needed.
743
*/
744
if (strstarts(sec->name, ".rodata"))
745
return true;
746
747
/* UBSAN anonymous data */
748
if (strstarts(sec->name, ".data..Lubsan") || /* GCC */
749
strstarts(sec->name, ".data..L__unnamed_")) /* Clang */
750
return true;
751
752
return false;
753
}
754
755
static bool is_reloc_allowed(struct reloc *reloc)
756
{
757
return section_reference_needed(reloc->sym->sec) == is_sec_sym(reloc->sym);
758
}
759
760
static struct export *find_export(struct symbol *sym)
761
{
762
struct export *export;
763
764
hash_for_each_possible(exports, export, hash, str_hash(sym->name)) {
765
if (!strcmp(export->sym, sym->name))
766
return export;
767
}
768
769
return NULL;
770
}
771
772
static const char *__find_modname(struct elfs *e)
773
{
774
struct section *sec;
775
char *name;
776
777
sec = find_section_by_name(e->orig, ".modinfo");
778
if (!sec) {
779
ERROR("missing .modinfo section");
780
return NULL;
781
}
782
783
name = memmem(sec->data->d_buf, sec_size(sec), "\0name=", 6);
784
if (name)
785
return name + 6;
786
787
name = strdup(e->orig->name);
788
if (!name) {
789
ERROR_GLIBC("strdup");
790
return NULL;
791
}
792
793
for (char *c = name; *c; c++) {
794
if (*c == '/')
795
name = c + 1;
796
else if (*c == '-')
797
*c = '_';
798
else if (*c == '.') {
799
*c = '\0';
800
break;
801
}
802
}
803
804
return name;
805
}
806
807
/* Get the object's module name as defined by the kernel (and klp_object) */
808
static const char *find_modname(struct elfs *e)
809
{
810
const char *modname;
811
812
if (e->modname)
813
return e->modname;
814
815
modname = __find_modname(e);
816
e->modname = modname;
817
return modname;
818
}
819
820
/*
821
* Copying a function from its native compiled environment to a kernel module
822
* removes its natural access to local functions/variables and unexported
823
* globals. References to such symbols need to be converted to KLP relocs so
824
* the kernel arch relocation code knows to apply them and where to find the
825
* symbols. Particularly, duplicate static symbols need to be disambiguated.
826
*/
827
static bool klp_reloc_needed(struct reloc *patched_reloc)
828
{
829
struct symbol *patched_sym = patched_reloc->sym;
830
struct export *export;
831
832
/* no external symbol to reference */
833
if (dont_correlate(patched_sym))
834
return false;
835
836
/* For included functions, a regular reloc will do. */
837
if (patched_sym->included)
838
return false;
839
840
/*
841
* If exported by a module, it has to be a klp reloc. Thanks to the
842
* clusterfunk that is late module patching, the patch module is
843
* allowed to be loaded before any modules it depends on.
844
*
845
* If exported by vmlinux, a normal reloc will do.
846
*/
847
export = find_export(patched_sym);
848
if (export)
849
return strcmp(export->mod, "vmlinux");
850
851
if (!patched_sym->twin) {
852
/*
853
* Presumably the symbol and its reference were added by the
854
* patch. The symbol could be defined in this .o or in another
855
* .o in the patch module.
856
*
857
* This check needs to be *after* the export check due to the
858
* possibility of the patch adding a new UNDEF reference to an
859
* exported symbol.
860
*/
861
return false;
862
}
863
864
/* Unexported symbol which lives in the original vmlinux or module. */
865
return true;
866
}
867
868
static int convert_reloc_sym_to_secsym(struct elf *elf, struct reloc *reloc)
869
{
870
struct symbol *sym = reloc->sym;
871
struct section *sec = sym->sec;
872
873
if (!sec->sym && !elf_create_section_symbol(elf, sec))
874
return -1;
875
876
reloc->sym = sec->sym;
877
set_reloc_sym(elf, reloc, sym->idx);
878
set_reloc_addend(elf, reloc, sym->offset + reloc_addend(reloc));
879
return 0;
880
}
881
882
static int convert_reloc_secsym_to_sym(struct elf *elf, struct reloc *reloc)
883
{
884
struct symbol *sym = reloc->sym;
885
struct section *sec = sym->sec;
886
887
/* If the symbol has a dedicated section, it's easy to find */
888
sym = find_symbol_by_offset(sec, 0);
889
if (sym && sym->len == sec_size(sec))
890
goto found_sym;
891
892
/* No dedicated section; find the symbol manually */
893
sym = find_symbol_containing(sec, arch_adjusted_addend(reloc));
894
if (!sym) {
895
/*
896
* This can happen for special section references to weak code
897
* whose symbol has been stripped by the linker.
898
*/
899
return -1;
900
}
901
902
found_sym:
903
reloc->sym = sym;
904
set_reloc_sym(elf, reloc, sym->idx);
905
set_reloc_addend(elf, reloc, reloc_addend(reloc) - sym->offset);
906
return 0;
907
}
908
909
/*
910
* Convert a relocation symbol reference to the needed format: either a section
911
* symbol or the underlying symbol itself.
912
*/
913
static int convert_reloc_sym(struct elf *elf, struct reloc *reloc)
914
{
915
if (is_reloc_allowed(reloc))
916
return 0;
917
918
if (section_reference_needed(reloc->sym->sec))
919
return convert_reloc_sym_to_secsym(elf, reloc);
920
else
921
return convert_reloc_secsym_to_sym(elf, reloc);
922
}
923
924
/*
925
* Convert a regular relocation to a klp relocation (sort of).
926
*/
927
static int clone_reloc_klp(struct elfs *e, struct reloc *patched_reloc,
928
struct section *sec, unsigned long offset,
929
struct export *export)
930
{
931
struct symbol *patched_sym = patched_reloc->sym;
932
s64 addend = reloc_addend(patched_reloc);
933
const char *sym_modname, *sym_orig_name;
934
static struct section *klp_relocs;
935
struct symbol *sym, *klp_sym;
936
unsigned long klp_reloc_off;
937
char sym_name[SYM_NAME_LEN];
938
struct klp_reloc klp_reloc;
939
unsigned long sympos;
940
941
if (!patched_sym->twin) {
942
ERROR("unexpected klp reloc for new symbol %s", patched_sym->name);
943
return -1;
944
}
945
946
/*
947
* Keep the original reloc intact for now to avoid breaking objtool run
948
* which relies on proper relocations for many of its features. This
949
* will be disabled later by "objtool klp post-link".
950
*
951
* Convert it to UNDEF (and WEAK to avoid modpost warnings).
952
*/
953
954
sym = patched_sym->clone;
955
if (!sym) {
956
/* STB_WEAK: avoid modpost undefined symbol warnings */
957
sym = elf_create_symbol(e->out, patched_sym->name, NULL,
958
STB_WEAK, patched_sym->type, 0, 0);
959
if (!sym)
960
return -1;
961
962
patched_sym->clone = sym;
963
sym->clone = patched_sym;
964
}
965
966
if (!elf_create_reloc(e->out, sec, offset, sym, addend, reloc_type(patched_reloc)))
967
return -1;
968
969
/*
970
* Create the KLP symbol.
971
*/
972
973
if (export) {
974
sym_modname = export->mod;
975
sym_orig_name = export->sym;
976
sympos = 0;
977
} else {
978
sym_modname = find_modname(e);
979
if (!sym_modname)
980
return -1;
981
982
sym_orig_name = patched_sym->twin->name;
983
sympos = find_sympos(e->orig, patched_sym->twin);
984
if (sympos == ULONG_MAX)
985
return -1;
986
}
987
988
/* symbol format: .klp.sym.modname.sym_name,sympos */
989
if (snprintf_check(sym_name, SYM_NAME_LEN, KLP_SYM_PREFIX "%s.%s,%ld",
990
sym_modname, sym_orig_name, sympos))
991
return -1;
992
993
klp_sym = find_symbol_by_name(e->out, sym_name);
994
if (!klp_sym) {
995
__dbg_indent("%s", sym_name);
996
997
/* STB_WEAK: avoid modpost undefined symbol warnings */
998
klp_sym = elf_create_symbol(e->out, sym_name, NULL,
999
STB_WEAK, patched_sym->type, 0, 0);
1000
if (!klp_sym)
1001
return -1;
1002
}
1003
1004
/*
1005
* Create the __klp_relocs entry. This will be converted to an actual
1006
* KLP rela by "objtool klp post-link".
1007
*
1008
* This intermediate step is necessary to prevent corruption by the
1009
* linker, which doesn't know how to properly handle two rela sections
1010
* applying to the same base section.
1011
*/
1012
1013
if (!klp_relocs) {
1014
klp_relocs = elf_create_section(e->out, KLP_RELOCS_SEC, 0,
1015
0, SHT_PROGBITS, 8, SHF_ALLOC);
1016
if (!klp_relocs)
1017
return -1;
1018
}
1019
1020
klp_reloc_off = sec_size(klp_relocs);
1021
memset(&klp_reloc, 0, sizeof(klp_reloc));
1022
1023
klp_reloc.type = reloc_type(patched_reloc);
1024
if (!elf_add_data(e->out, klp_relocs, &klp_reloc, sizeof(klp_reloc)))
1025
return -1;
1026
1027
/* klp_reloc.offset */
1028
if (!sec->sym && !elf_create_section_symbol(e->out, sec))
1029
return -1;
1030
1031
if (!elf_create_reloc(e->out, klp_relocs,
1032
klp_reloc_off + offsetof(struct klp_reloc, offset),
1033
sec->sym, offset, R_ABS64))
1034
return -1;
1035
1036
/* klp_reloc.sym */
1037
if (!elf_create_reloc(e->out, klp_relocs,
1038
klp_reloc_off + offsetof(struct klp_reloc, sym),
1039
klp_sym, addend, R_ABS64))
1040
return -1;
1041
1042
return 0;
1043
}
1044
1045
#define dbg_clone_reloc(sec, offset, patched_sym, addend, export, klp) \
1046
dbg_indent("%s+0x%lx: %s%s0x%lx [%s%s%s%s%s%s]", \
1047
sec->name, offset, patched_sym->name, \
1048
addend >= 0 ? "+" : "-", labs(addend), \
1049
sym_type(patched_sym), \
1050
patched_sym->type == STT_SECTION ? "" : " ", \
1051
patched_sym->type == STT_SECTION ? "" : sym_bind(patched_sym), \
1052
is_undef_sym(patched_sym) ? " UNDEF" : "", \
1053
export ? " EXPORTED" : "", \
1054
klp ? " KLP" : "")
1055
1056
/* Copy a reloc and its symbol to the output object */
1057
static int clone_reloc(struct elfs *e, struct reloc *patched_reloc,
1058
struct section *sec, unsigned long offset)
1059
{
1060
struct symbol *patched_sym = patched_reloc->sym;
1061
struct export *export = find_export(patched_sym);
1062
long addend = reloc_addend(patched_reloc);
1063
struct symbol *out_sym;
1064
bool klp;
1065
1066
if (!is_reloc_allowed(patched_reloc)) {
1067
ERROR_FUNC(patched_reloc->sec->base, reloc_offset(patched_reloc),
1068
"missing symbol for reference to %s+%ld",
1069
patched_sym->name, addend);
1070
return -1;
1071
}
1072
1073
klp = klp_reloc_needed(patched_reloc);
1074
1075
dbg_clone_reloc(sec, offset, patched_sym, addend, export, klp);
1076
1077
if (klp) {
1078
if (clone_reloc_klp(e, patched_reloc, sec, offset, export))
1079
return -1;
1080
1081
return 0;
1082
}
1083
1084
/*
1085
* Why !export sets 'data_too':
1086
*
1087
* Unexported non-klp symbols need to live in the patch module,
1088
* otherwise there will be unresolved symbols. Notably, this includes:
1089
*
1090
* - New functions/data
1091
* - String sections
1092
* - Special section entries
1093
* - Uncorrelated static local variables
1094
* - UBSAN sections
1095
*/
1096
out_sym = clone_symbol(e, patched_sym, patched_sym->included || !export);
1097
if (!out_sym)
1098
return -1;
1099
1100
/*
1101
* For strings, all references use section symbols, thanks to
1102
* section_reference_needed(). clone_symbol() has cloned an empty
1103
* version of the string section. Now copy the string itself.
1104
*/
1105
if (is_string_sec(patched_sym->sec)) {
1106
const char *str = patched_sym->sec->data->d_buf + addend;
1107
1108
__dbg_indent("\"%s\"", escape_str(str));
1109
1110
addend = elf_add_string(e->out, out_sym->sec, str);
1111
if (addend == -1)
1112
return -1;
1113
}
1114
1115
if (!elf_create_reloc(e->out, sec, offset, out_sym, addend,
1116
reloc_type(patched_reloc)))
1117
return -1;
1118
1119
return 0;
1120
}
1121
1122
/* Copy all relocs needed for a symbol's contents */
1123
static int clone_sym_relocs(struct elfs *e, struct symbol *patched_sym)
1124
{
1125
struct section *patched_rsec = patched_sym->sec->rsec;
1126
struct reloc *patched_reloc;
1127
unsigned long start, end;
1128
struct symbol *out_sym;
1129
1130
out_sym = patched_sym->clone;
1131
if (!out_sym) {
1132
ERROR("no clone for %s", patched_sym->name);
1133
return -1;
1134
}
1135
1136
if (!patched_rsec)
1137
return 0;
1138
1139
if (!is_sec_sym(patched_sym) && !patched_sym->len)
1140
return 0;
1141
1142
if (is_string_sec(patched_sym->sec))
1143
return 0;
1144
1145
if (is_sec_sym(patched_sym)) {
1146
start = 0;
1147
end = sec_size(patched_sym->sec);
1148
} else {
1149
start = patched_sym->offset;
1150
end = start + patched_sym->len;
1151
}
1152
1153
for_each_reloc(patched_rsec, patched_reloc) {
1154
unsigned long offset;
1155
1156
if (reloc_offset(patched_reloc) < start ||
1157
reloc_offset(patched_reloc) >= end)
1158
continue;
1159
1160
/*
1161
* Skip any reloc referencing .altinstr_aux. Its code is
1162
* always patched by alternatives. See ALTERNATIVE_TERNARY().
1163
*/
1164
if (patched_reloc->sym->sec &&
1165
!strcmp(patched_reloc->sym->sec->name, ".altinstr_aux"))
1166
continue;
1167
1168
if (convert_reloc_sym(e->patched, patched_reloc)) {
1169
ERROR_FUNC(patched_rsec->base, reloc_offset(patched_reloc),
1170
"failed to convert reloc sym '%s' to its proper format",
1171
patched_reloc->sym->name);
1172
return -1;
1173
}
1174
1175
offset = out_sym->offset + (reloc_offset(patched_reloc) - patched_sym->offset);
1176
1177
if (clone_reloc(e, patched_reloc, out_sym->sec, offset))
1178
return -1;
1179
}
1180
return 0;
1181
1182
}
1183
1184
static int create_fake_symbol(struct elf *elf, struct section *sec,
1185
unsigned long offset, size_t size)
1186
{
1187
char name[SYM_NAME_LEN];
1188
unsigned int type;
1189
static int ctr;
1190
char *c;
1191
1192
if (snprintf_check(name, SYM_NAME_LEN, "%s_%d", sec->name, ctr++))
1193
return -1;
1194
1195
for (c = name; *c; c++)
1196
if (*c == '.')
1197
*c = '_';
1198
1199
/*
1200
* STT_NOTYPE: Prevent objtool from validating .altinstr_replacement
1201
* while still allowing objdump to disassemble it.
1202
*/
1203
type = is_text_sec(sec) ? STT_NOTYPE : STT_OBJECT;
1204
return elf_create_symbol(elf, name, sec, STB_LOCAL, type, offset, size) ? 0 : -1;
1205
}
1206
1207
/*
1208
* Special sections (alternatives, etc) are basically arrays of structs.
1209
* For all the special sections, create a symbol for each struct entry. This
1210
* is a bit cumbersome, but it makes the extracting of the individual entries
1211
* much more straightforward.
1212
*
1213
* There are three ways to identify the entry sizes for a special section:
1214
*
1215
* 1) ELF section header sh_entsize: Ideally this would be used almost
1216
* everywhere. But unfortunately the toolchains make it difficult. The
1217
* assembler .[push]section directive syntax only takes entsize when
1218
* combined with SHF_MERGE. But Clang disallows combining SHF_MERGE with
1219
* SHF_WRITE. And some special sections do need to be writable.
1220
*
1221
* Another place this wouldn't work is .altinstr_replacement, whose entries
1222
* don't have a fixed size.
1223
*
1224
* 2) ANNOTATE_DATA_SPECIAL: This is a lightweight objtool annotation which
1225
* points to the beginning of each entry. The size of the entry is then
1226
* inferred by the location of the subsequent annotation (or end of
1227
* section).
1228
*
1229
* 3) Simple array of pointers: If the special section is just a basic array of
1230
* pointers, the entry size can be inferred by the number of relocations.
1231
* No annotations needed.
1232
*
1233
* Note I also tried to create per-entry symbols at the time of creation, in
1234
* the original [inline] asm. Unfortunately, creating uniquely named symbols
1235
* is trickier than one might think, especially with Clang inline asm. I
1236
* eventually just gave up trying to make that work, in favor of using
1237
* ANNOTATE_DATA_SPECIAL and creating the symbols here after the fact.
1238
*/
1239
static int create_fake_symbols(struct elf *elf)
1240
{
1241
struct section *sec;
1242
struct reloc *reloc;
1243
1244
/*
1245
* 1) Make symbols for all the ANNOTATE_DATA_SPECIAL entries:
1246
*/
1247
1248
sec = find_section_by_name(elf, ".discard.annotate_data");
1249
if (!sec || !sec->rsec)
1250
return 0;
1251
1252
for_each_reloc(sec->rsec, reloc) {
1253
unsigned long offset, size;
1254
struct reloc *next_reloc;
1255
1256
if (annotype(elf, sec, reloc) != ANNOTYPE_DATA_SPECIAL)
1257
continue;
1258
1259
offset = reloc_addend(reloc);
1260
1261
size = 0;
1262
next_reloc = reloc;
1263
for_each_reloc_continue(sec->rsec, next_reloc) {
1264
if (annotype(elf, sec, next_reloc) != ANNOTYPE_DATA_SPECIAL ||
1265
next_reloc->sym->sec != reloc->sym->sec)
1266
continue;
1267
1268
size = reloc_addend(next_reloc) - offset;
1269
break;
1270
}
1271
1272
if (!size)
1273
size = sec_size(reloc->sym->sec) - offset;
1274
1275
if (create_fake_symbol(elf, reloc->sym->sec, offset, size))
1276
return -1;
1277
}
1278
1279
/*
1280
* 2) Make symbols for sh_entsize, and simple arrays of pointers:
1281
*/
1282
1283
for_each_sec(elf, sec) {
1284
unsigned int entry_size;
1285
unsigned long offset;
1286
1287
if (!is_special_section(sec) || find_symbol_by_offset(sec, 0))
1288
continue;
1289
1290
if (!sec->rsec) {
1291
ERROR("%s: missing special section relocations", sec->name);
1292
return -1;
1293
}
1294
1295
entry_size = sec->sh.sh_entsize;
1296
if (!entry_size) {
1297
entry_size = arch_reloc_size(sec->rsec->relocs);
1298
if (sec_size(sec) != entry_size * sec_num_entries(sec->rsec)) {
1299
ERROR("%s: missing special section entsize or annotations", sec->name);
1300
return -1;
1301
}
1302
}
1303
1304
for (offset = 0; offset < sec_size(sec); offset += entry_size) {
1305
if (create_fake_symbol(elf, sec, offset, entry_size))
1306
return -1;
1307
}
1308
}
1309
1310
return 0;
1311
}
1312
1313
/* Keep a special section entry if it references an included function */
1314
static bool should_keep_special_sym(struct elf *elf, struct symbol *sym)
1315
{
1316
struct reloc *reloc;
1317
1318
if (is_sec_sym(sym) || !sym->sec->rsec)
1319
return false;
1320
1321
sym_for_each_reloc(elf, sym, reloc) {
1322
if (convert_reloc_sym(elf, reloc))
1323
continue;
1324
1325
if (is_func_sym(reloc->sym) && reloc->sym->included)
1326
return true;
1327
}
1328
1329
return false;
1330
}
1331
1332
/*
1333
* Klp relocations aren't allowed for __jump_table and .static_call_sites if
1334
* the referenced symbol lives in a kernel module, because such klp relocs may
1335
* be applied after static branch/call init, resulting in code corruption.
1336
*
1337
* Validate a special section entry to avoid that. Note that an inert
1338
* tracepoint or pr_debug() is harmless enough, in that case just skip the
1339
* entry and print a warning. Otherwise, return an error.
1340
*
1341
* TODO: This is only a temporary limitation which will be fixed when livepatch
1342
* adds support for submodules: fully self-contained modules which are embedded
1343
* in the top-level livepatch module's data and which can be loaded on demand
1344
* when their corresponding to-be-patched module gets loaded. Then klp relocs
1345
* can be retired.
1346
*
1347
* Return:
1348
* -1: error: validation failed
1349
* 1: warning: disabled tracepoint or pr_debug()
1350
* 0: success
1351
*/
1352
static int validate_special_section_klp_reloc(struct elfs *e, struct symbol *sym)
1353
{
1354
bool static_branch = !strcmp(sym->sec->name, "__jump_table");
1355
bool static_call = !strcmp(sym->sec->name, ".static_call_sites");
1356
const char *code_sym = NULL;
1357
unsigned long code_offset = 0;
1358
struct reloc *reloc;
1359
int ret = 0;
1360
1361
if (!static_branch && !static_call)
1362
return 0;
1363
1364
sym_for_each_reloc(e->patched, sym, reloc) {
1365
const char *sym_modname;
1366
struct export *export;
1367
1368
if (convert_reloc_sym(e->patched, reloc))
1369
continue;
1370
1371
/* Static branch/call keys are always STT_OBJECT */
1372
if (reloc->sym->type != STT_OBJECT) {
1373
1374
/* Save code location which can be printed below */
1375
if (reloc->sym->type == STT_FUNC && !code_sym) {
1376
code_sym = reloc->sym->name;
1377
code_offset = reloc_addend(reloc);
1378
}
1379
1380
continue;
1381
}
1382
1383
if (!klp_reloc_needed(reloc))
1384
continue;
1385
1386
export = find_export(reloc->sym);
1387
if (export) {
1388
sym_modname = export->mod;
1389
} else {
1390
sym_modname = find_modname(e);
1391
if (!sym_modname)
1392
return -1;
1393
}
1394
1395
/* vmlinux keys are ok */
1396
if (!strcmp(sym_modname, "vmlinux"))
1397
continue;
1398
1399
if (!code_sym)
1400
code_sym = "<unknown>";
1401
1402
if (static_branch) {
1403
if (strstarts(reloc->sym->name, "__tracepoint_")) {
1404
WARN("%s: disabling unsupported tracepoint %s",
1405
code_sym, reloc->sym->name + 13);
1406
ret = 1;
1407
continue;
1408
}
1409
1410
if (strstr(reloc->sym->name, "__UNIQUE_ID_ddebug_")) {
1411
WARN("%s: disabling unsupported pr_debug()",
1412
code_sym);
1413
ret = 1;
1414
continue;
1415
}
1416
1417
ERROR("%s+0x%lx: unsupported static branch key %s. Use static_key_enabled() instead",
1418
code_sym, code_offset, reloc->sym->name);
1419
return -1;
1420
}
1421
1422
/* static call */
1423
if (strstarts(reloc->sym->name, "__SCK__tp_func_")) {
1424
ret = 1;
1425
continue;
1426
}
1427
1428
ERROR("%s()+0x%lx: unsupported static call key %s. Use KLP_STATIC_CALL() instead",
1429
code_sym, code_offset, reloc->sym->name);
1430
return -1;
1431
}
1432
1433
return ret;
1434
}
1435
1436
static int clone_special_section(struct elfs *e, struct section *patched_sec)
1437
{
1438
struct symbol *patched_sym;
1439
1440
/*
1441
* Extract all special section symbols (and their dependencies) which
1442
* reference included functions.
1443
*/
1444
sec_for_each_sym(patched_sec, patched_sym) {
1445
int ret;
1446
1447
if (!is_object_sym(patched_sym))
1448
continue;
1449
1450
if (!should_keep_special_sym(e->patched, patched_sym))
1451
continue;
1452
1453
ret = validate_special_section_klp_reloc(e, patched_sym);
1454
if (ret < 0)
1455
return -1;
1456
if (ret > 0)
1457
continue;
1458
1459
if (!clone_symbol(e, patched_sym, true))
1460
return -1;
1461
}
1462
1463
return 0;
1464
}
1465
1466
/* Extract only the needed bits from special sections */
1467
static int clone_special_sections(struct elfs *e)
1468
{
1469
struct section *patched_sec;
1470
1471
for_each_sec(e->patched, patched_sec) {
1472
if (is_special_section(patched_sec)) {
1473
if (clone_special_section(e, patched_sec))
1474
return -1;
1475
}
1476
}
1477
1478
return 0;
1479
}
1480
1481
/*
1482
* Create .init.klp_objects and .init.klp_funcs sections which are intermediate
1483
* sections provided as input to the patch module's init code for building the
1484
* klp_patch, klp_object and klp_func structs for the livepatch API.
1485
*/
1486
static int create_klp_sections(struct elfs *e)
1487
{
1488
size_t obj_size = sizeof(struct klp_object_ext);
1489
size_t func_size = sizeof(struct klp_func_ext);
1490
struct section *obj_sec, *funcs_sec, *str_sec;
1491
struct symbol *funcs_sym, *str_sym, *sym;
1492
char sym_name[SYM_NAME_LEN];
1493
unsigned int nr_funcs = 0;
1494
const char *modname;
1495
void *obj_data;
1496
s64 addend;
1497
1498
obj_sec = elf_create_section_pair(e->out, KLP_OBJECTS_SEC, obj_size, 0, 0);
1499
if (!obj_sec)
1500
return -1;
1501
1502
funcs_sec = elf_create_section_pair(e->out, KLP_FUNCS_SEC, func_size, 0, 0);
1503
if (!funcs_sec)
1504
return -1;
1505
1506
funcs_sym = elf_create_section_symbol(e->out, funcs_sec);
1507
if (!funcs_sym)
1508
return -1;
1509
1510
str_sec = elf_create_section(e->out, KLP_STRINGS_SEC, 0, 0,
1511
SHT_PROGBITS, 1,
1512
SHF_ALLOC | SHF_STRINGS | SHF_MERGE);
1513
if (!str_sec)
1514
return -1;
1515
1516
if (elf_add_string(e->out, str_sec, "") == -1)
1517
return -1;
1518
1519
str_sym = elf_create_section_symbol(e->out, str_sec);
1520
if (!str_sym)
1521
return -1;
1522
1523
/* allocate klp_object_ext */
1524
obj_data = elf_add_data(e->out, obj_sec, NULL, obj_size);
1525
if (!obj_data)
1526
return -1;
1527
1528
modname = find_modname(e);
1529
if (!modname)
1530
return -1;
1531
1532
/* klp_object_ext.name */
1533
if (strcmp(modname, "vmlinux")) {
1534
addend = elf_add_string(e->out, str_sec, modname);
1535
if (addend == -1)
1536
return -1;
1537
1538
if (!elf_create_reloc(e->out, obj_sec,
1539
offsetof(struct klp_object_ext, name),
1540
str_sym, addend, R_ABS64))
1541
return -1;
1542
}
1543
1544
/* klp_object_ext.funcs */
1545
if (!elf_create_reloc(e->out, obj_sec, offsetof(struct klp_object_ext, funcs),
1546
funcs_sym, 0, R_ABS64))
1547
return -1;
1548
1549
for_each_sym(e->out, sym) {
1550
unsigned long offset = nr_funcs * func_size;
1551
unsigned long sympos;
1552
void *func_data;
1553
1554
if (!is_func_sym(sym) || sym->cold || !sym->clone || !sym->clone->changed)
1555
continue;
1556
1557
/* allocate klp_func_ext */
1558
func_data = elf_add_data(e->out, funcs_sec, NULL, func_size);
1559
if (!func_data)
1560
return -1;
1561
1562
/* klp_func_ext.old_name */
1563
addend = elf_add_string(e->out, str_sec, sym->clone->twin->name);
1564
if (addend == -1)
1565
return -1;
1566
1567
if (!elf_create_reloc(e->out, funcs_sec,
1568
offset + offsetof(struct klp_func_ext, old_name),
1569
str_sym, addend, R_ABS64))
1570
return -1;
1571
1572
/* klp_func_ext.new_func */
1573
if (!elf_create_reloc(e->out, funcs_sec,
1574
offset + offsetof(struct klp_func_ext, new_func),
1575
sym, 0, R_ABS64))
1576
return -1;
1577
1578
/* klp_func_ext.sympos */
1579
BUILD_BUG_ON(sizeof(sympos) != sizeof_field(struct klp_func_ext, sympos));
1580
sympos = find_sympos(e->orig, sym->clone->twin);
1581
if (sympos == ULONG_MAX)
1582
return -1;
1583
memcpy(func_data + offsetof(struct klp_func_ext, sympos), &sympos,
1584
sizeof_field(struct klp_func_ext, sympos));
1585
1586
nr_funcs++;
1587
}
1588
1589
/* klp_object_ext.nr_funcs */
1590
BUILD_BUG_ON(sizeof(nr_funcs) != sizeof_field(struct klp_object_ext, nr_funcs));
1591
memcpy(obj_data + offsetof(struct klp_object_ext, nr_funcs), &nr_funcs,
1592
sizeof_field(struct klp_object_ext, nr_funcs));
1593
1594
/*
1595
* Find callback pointers created by KLP_PRE_PATCH_CALLBACK() and
1596
* friends, and add them to the klp object.
1597
*/
1598
1599
if (snprintf_check(sym_name, SYM_NAME_LEN, KLP_PRE_PATCH_PREFIX "%s", modname))
1600
return -1;
1601
1602
sym = find_symbol_by_name(e->out, sym_name);
1603
if (sym) {
1604
struct reloc *reloc;
1605
1606
reloc = find_reloc_by_dest(e->out, sym->sec, sym->offset);
1607
1608
if (!elf_create_reloc(e->out, obj_sec,
1609
offsetof(struct klp_object_ext, callbacks) +
1610
offsetof(struct klp_callbacks, pre_patch),
1611
reloc->sym, reloc_addend(reloc), R_ABS64))
1612
return -1;
1613
}
1614
1615
if (snprintf_check(sym_name, SYM_NAME_LEN, KLP_POST_PATCH_PREFIX "%s", modname))
1616
return -1;
1617
1618
sym = find_symbol_by_name(e->out, sym_name);
1619
if (sym) {
1620
struct reloc *reloc;
1621
1622
reloc = find_reloc_by_dest(e->out, sym->sec, sym->offset);
1623
1624
if (!elf_create_reloc(e->out, obj_sec,
1625
offsetof(struct klp_object_ext, callbacks) +
1626
offsetof(struct klp_callbacks, post_patch),
1627
reloc->sym, reloc_addend(reloc), R_ABS64))
1628
return -1;
1629
}
1630
1631
if (snprintf_check(sym_name, SYM_NAME_LEN, KLP_PRE_UNPATCH_PREFIX "%s", modname))
1632
return -1;
1633
1634
sym = find_symbol_by_name(e->out, sym_name);
1635
if (sym) {
1636
struct reloc *reloc;
1637
1638
reloc = find_reloc_by_dest(e->out, sym->sec, sym->offset);
1639
1640
if (!elf_create_reloc(e->out, obj_sec,
1641
offsetof(struct klp_object_ext, callbacks) +
1642
offsetof(struct klp_callbacks, pre_unpatch),
1643
reloc->sym, reloc_addend(reloc), R_ABS64))
1644
return -1;
1645
}
1646
1647
if (snprintf_check(sym_name, SYM_NAME_LEN, KLP_POST_UNPATCH_PREFIX "%s", modname))
1648
return -1;
1649
1650
sym = find_symbol_by_name(e->out, sym_name);
1651
if (sym) {
1652
struct reloc *reloc;
1653
1654
reloc = find_reloc_by_dest(e->out, sym->sec, sym->offset);
1655
1656
if (!elf_create_reloc(e->out, obj_sec,
1657
offsetof(struct klp_object_ext, callbacks) +
1658
offsetof(struct klp_callbacks, post_unpatch),
1659
reloc->sym, reloc_addend(reloc), R_ABS64))
1660
return -1;
1661
}
1662
1663
return 0;
1664
}
1665
1666
/*
1667
* Copy all .modinfo import_ns= tags to ensure all namespaced exported symbols
1668
* can be accessed via normal relocs.
1669
*/
1670
static int copy_import_ns(struct elfs *e)
1671
{
1672
struct section *patched_sec, *out_sec = NULL;
1673
char *import_ns, *data_end;
1674
1675
patched_sec = find_section_by_name(e->patched, ".modinfo");
1676
if (!patched_sec)
1677
return 0;
1678
1679
import_ns = patched_sec->data->d_buf;
1680
if (!import_ns)
1681
return 0;
1682
1683
for (data_end = import_ns + sec_size(patched_sec);
1684
import_ns < data_end;
1685
import_ns += strlen(import_ns) + 1) {
1686
1687
import_ns = memmem(import_ns, data_end - import_ns, "import_ns=", 10);
1688
if (!import_ns)
1689
return 0;
1690
1691
if (!out_sec) {
1692
out_sec = find_section_by_name(e->out, ".modinfo");
1693
if (!out_sec) {
1694
out_sec = elf_create_section(e->out, ".modinfo", 0,
1695
patched_sec->sh.sh_entsize,
1696
patched_sec->sh.sh_type,
1697
patched_sec->sh.sh_addralign,
1698
patched_sec->sh.sh_flags);
1699
if (!out_sec)
1700
return -1;
1701
}
1702
}
1703
1704
if (!elf_add_data(e->out, out_sec, import_ns, strlen(import_ns) + 1))
1705
return -1;
1706
}
1707
1708
return 0;
1709
}
1710
1711
int cmd_klp_diff(int argc, const char **argv)
1712
{
1713
struct elfs e = {0};
1714
1715
argc = parse_options(argc, argv, klp_diff_options, klp_diff_usage, 0);
1716
if (argc != 3)
1717
usage_with_options(klp_diff_usage, klp_diff_options);
1718
1719
objname = argv[0];
1720
1721
e.orig = elf_open_read(argv[0], O_RDONLY);
1722
e.patched = elf_open_read(argv[1], O_RDONLY);
1723
e.out = NULL;
1724
1725
if (!e.orig || !e.patched)
1726
return -1;
1727
1728
if (read_exports())
1729
return -1;
1730
1731
if (read_sym_checksums(e.orig))
1732
return -1;
1733
1734
if (read_sym_checksums(e.patched))
1735
return -1;
1736
1737
if (correlate_symbols(&e))
1738
return -1;
1739
1740
if (mark_changed_functions(&e))
1741
return 0;
1742
1743
e.out = elf_create_file(&e.orig->ehdr, argv[2]);
1744
if (!e.out)
1745
return -1;
1746
1747
/*
1748
* Special section fake symbols are needed so that individual special
1749
* section entries can be extracted by clone_special_sections().
1750
*
1751
* Note the fake symbols are also needed by clone_included_functions()
1752
* because __WARN_printf() call sites add references to bug table
1753
* entries in the calling functions.
1754
*/
1755
if (create_fake_symbols(e.patched))
1756
return -1;
1757
1758
if (clone_included_functions(&e))
1759
return -1;
1760
1761
if (clone_special_sections(&e))
1762
return -1;
1763
1764
if (create_klp_sections(&e))
1765
return -1;
1766
1767
if (copy_import_ns(&e))
1768
return -1;
1769
1770
if (elf_write(e.out))
1771
return -1;
1772
1773
return elf_close(e.out);
1774
}
1775
1776