Skip to content

Instantly share code, notes, and snippets.

@ant4g0nist
Last active July 1, 2021 06:23
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save ant4g0nist/19a3aff4c3ba0feeeae04a9a56c47ba5 to your computer and use it in GitHub Desktop.
Save ant4g0nist/19a3aff4c3ba0feeeae04a9a56c47ba5 to your computer and use it in GitHub Desktop.
xnu-7195.81.3 πŸ™
diff -ru ../xnu-7195.60.75/Makefile ../xnu-7195.81.3/Makefile
--- ../xnu-7195.60.75/Makefile 2020-12-18 10:21:20.000000000 +0100
+++ ../xnu-7195.81.3/Makefile 2021-01-26 21:33:33.000000000 +0100
@@ -31,6 +31,7 @@
export MakeInc_rule=${VERSDIR}/makedefs/MakeInc.rule
export MakeInc_dir=${VERSDIR}/makedefs/MakeInc.dir
+
#
# Dispatch non-xnu build aliases to their own build
# systems. All xnu variants start with MakeInc_top.
@@ -186,7 +187,7 @@
install install_desktop install_embedded \
install_release_embedded install_development_embedded \
install_kernels \
- cscope tags TAGS checkstyle restyle check_uncrustify uncrustify \
+ cscope tags TAGS \
help
DEFAULT_TARGET = all
diff -ru ../xnu-7195.60.75/README.md ../xnu-7195.81.3/README.md
--- ../xnu-7195.60.75/README.md 2020-12-18 10:21:24.000000000 +0100
+++ ../xnu-7195.81.3/README.md 2021-01-26 21:33:36.000000000 +0100
@@ -169,18 +169,6 @@
$ make cscope # this will build cscope database
-Code Style
-==========
-
-Source files can be reformatted to comply with the xnu code style using the "restyle" make target invoked from the
-top-level project directory.
-
- $ make restyle # re-format all source files to be xnu code style conformant.
-
-Compliance can be checked using the "checkstyle" make target.
-
- $ make checkstyle # Check all relevant source files for xnu code style conformance.
-
How to install a new header file from XNU
=========================================
diff -ru ../xnu-7195.60.75/SETUP/config/config.h ../xnu-7195.81.3/SETUP/config/config.h
--- ../xnu-7195.60.75/SETUP/config/config.h 2020-12-18 10:19:45.000000000 +0100
+++ ../xnu-7195.81.3/SETUP/config/config.h 2021-01-26 21:31:58.000000000 +0100
@@ -83,6 +83,7 @@
*/
#define CONFIGDEP 0x01 /* obsolete? */
#define OPTIONSDEF 0x02 /* options definition entry */
+#define LIBRARYDEP 0x04 /* include file in library build */
struct device {
int d_type; /* CONTROLLER, DEVICE, bus adaptor */
diff -ru ../xnu-7195.60.75/SETUP/config/mkmakefile.c ../xnu-7195.81.3/SETUP/config/mkmakefile.c
--- ../xnu-7195.60.75/SETUP/config/mkmakefile.c 2020-12-18 10:19:45.000000000 +0100
+++ ../xnu-7195.81.3/SETUP/config/mkmakefile.c 2021-01-26 21:31:58.000000000 +0100
@@ -65,7 +65,7 @@
#include "config.h"
void read_files(void);
-void do_objs(FILE *fp, const char *msg, int ext);
+void do_objs(FILE *fp, const char *msg, int ext, int flags);
void do_files(FILE *fp, const char *msg, char ext);
void do_machdep(FILE *ofp);
void do_rules(FILE *f);
@@ -243,16 +243,18 @@
continue;
percent:
if (eq(line, "%OBJS\n")) {
- do_objs(ofp, "OBJS=", -1);
+ do_objs(ofp, "OBJS=", -1, 0);
+ } else if (eq(line, "%LIBOBJS\n")) {
+ do_objs(ofp, "LIBOBJS=", -1, LIBRARYDEP);
} else if (eq(line, "%CFILES\n")) {
do_files(ofp, "CFILES=", 'c');
- do_objs(ofp, "COBJS=", 'c');
+ do_objs(ofp, "COBJS=", 'c', 0);
} else if (eq(line, "%CXXFILES\n")) {
do_files(ofp, "CXXFILES=", 'p');
- do_objs(ofp, "CXXOBJS=", 'p');
+ do_objs(ofp, "CXXOBJS=", 'p', 0);
} else if (eq(line, "%SFILES\n")) {
do_files(ofp, "SFILES=", 's');
- do_objs(ofp, "SOBJS=", 's');
+ do_objs(ofp, "SOBJS=", 's', 0);
} else if (eq(line, "%MACHDEP\n")) {
do_machdep(ofp);
} else if (eq(line, "%RULES\n")) {
@@ -287,6 +289,7 @@
const char *devorprof;
int options;
int not_option;
+ int for_xnu_lib;
char pname[BUFSIZ];
char fname[1024];
char *rest = (char *) 0;
@@ -346,6 +349,7 @@
nreqs = 0;
devorprof = "";
needs = 0;
+ for_xnu_lib = 0;
if (eq(wd, "standard")) {
goto checkdev;
}
@@ -371,6 +375,10 @@
next_word(fp, wd);
goto save;
}
+ if (eq(wd, "xnu-library")) {
+ for_xnu_lib = 1;
+ goto nextopt;
+ }
nreqs++;
if (needs == 0 && nreqs == 1) {
needs = ns(wd);
@@ -469,6 +477,10 @@
goto getrest;
}
next_word(fp, wd);
+ if (wd && eq(wd, "xnu-library")) {
+ for_xnu_lib = 1;
+ next_word(fp, wd);
+ }
if (wd) {
devorprof = wd;
next_word(fp, wd);
@@ -508,6 +520,9 @@
if (pf && pf->f_type == INVISIBLE) {
pf->f_flags = 1; /* mark as duplicate */
}
+ if (for_xnu_lib) {
+ tp->f_flags |= LIBRARYDEP;
+ }
goto next;
}
@@ -541,7 +556,7 @@
}
void
-do_objs(FILE *fp, const char *msg, int ext)
+do_objs(FILE *fp, const char *msg, int ext, int flags)
{
struct file_list *tp;
int lpos, len;
@@ -557,6 +572,13 @@
}
/*
+ * Check flags (if any)
+ */
+ if (flags && ((tp->f_flags & flags) != flags)) {
+ continue;
+ }
+
+ /*
* Check for '.o' file in list
*/
cp = tp->f_fn + (len = strlen(tp->f_fn)) - 1;
Only in ../xnu-7195.60.75/SETUP: libT8020.os.DEVELOPMENT.a
Only in ../xnu-7195.60.75/SETUP: libT8020.os.RELEASE.a
Only in ../xnu-7195.60.75/SETUP: libT8101.os.DEVELOPMENT.a
Only in ../xnu-7195.60.75/SETUP: libT8101.os.RELEASE.a
diff -ru ../xnu-7195.60.75/bsd/conf/Makefile.template ../xnu-7195.81.3/bsd/conf/Makefile.template
--- ../xnu-7195.60.75/bsd/conf/Makefile.template 2020-12-18 10:21:22.000000000 +0100
+++ ../xnu-7195.81.3/bsd/conf/Makefile.template 2021-01-26 21:33:35.000000000 +0100
@@ -65,6 +65,8 @@
%OBJS
+%LIBOBJS
+
%CFILES
%CXXFILES
@@ -606,6 +608,12 @@
$(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
done > $(COMPONENT).filelist
+$(COMPONENT).libfilelist: $(LIBOBJS)
+ @$(LOG_LDFILELIST) "lib$(COMPONENT)"
+ $(_v)for obj in ${LIBOBJS}; do \
+ $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
+ done > $(COMPONENT).libfilelist
+
MAKESYSCALLS = $(SRCROOT)/bsd/kern/makesyscalls.sh
init_sysent.c: $(TARGET)/bsd.syscalls.master
@@ -624,7 +632,11 @@
@$(LOG_GENERATE) "$@$(Color0) from $(ColorF)$(<F)$(Color0)"
$(_v)$(MAKESYSCALLS) $< systrace > /dev/null
+ifeq ($(RC_ProjectName),xnu_libraries)
+do_all: $(COMPONENT).libfilelist
+else
do_all: $(COMPONENT).filelist
+endif
do_build_all:: do_all
diff -ru ../xnu-7195.60.75/bsd/conf/files ../xnu-7195.81.3/bsd/conf/files
--- ../xnu-7195.60.75/bsd/conf/files 2020-12-18 10:21:23.000000000 +0100
+++ ../xnu-7195.81.3/bsd/conf/files 2021-01-26 21:33:35.000000000 +0100
@@ -494,7 +494,7 @@
bsd/kern/posix_sem.c standard
bsd/kern/posix_shm.c standard
# XXXdbg - I need this in the journaling and block cache code
-bsd/kern/qsort.c standard
+bsd/kern/qsort.c standard xnu-library
bsd/kern/kpi_socket.c optional sockets
bsd/kern/kpi_socketfilter.c optional sockets
bsd/kern/proc_info.c standard
diff -ru ../xnu-7195.60.75/bsd/kern/kern_cs.c ../xnu-7195.81.3/bsd/kern/kern_cs.c
--- ../xnu-7195.60.75/bsd/kern/kern_cs.c 2020-12-18 10:19:58.000000000 +0100
+++ ../xnu-7195.81.3/bsd/kern/kern_cs.c 2021-01-26 21:32:10.000000000 +0100
@@ -63,6 +63,7 @@
#include <kern/task.h>
#include <vm/vm_map.h>
+#include <vm/pmap.h>
#include <vm/vm_kern.h>
@@ -231,6 +232,18 @@
if (p->p_csflags & CS_VALID) {
p->p_csflags |= CS_DEBUGGED;
}
+#if PMAP_CS
+ task_t procTask = proc_task(p);
+ if (procTask) {
+ vm_map_t proc_map = get_task_map_reference(procTask);
+ if (proc_map) {
+ if (vm_map_cs_wx_enable(proc_map) != KERN_SUCCESS) {
+ printf("CODE SIGNING: cs_allow_invalid() not allowed by pmap: pid %d\n", p->p_pid);
+ }
+ vm_map_deallocate(proc_map);
+ }
+ }
+#endif // MAP_CS
proc_unlock(p);
/* allow a debugged process to hide some (debug-only!) memory */
diff -ru ../xnu-7195.60.75/bsd/kern/kern_descrip.c ../xnu-7195.81.3/bsd/kern/kern_descrip.c
--- ../xnu-7195.60.75/bsd/kern/kern_descrip.c 2020-12-18 10:20:02.000000000 +0100
+++ ../xnu-7195.81.3/bsd/kern/kern_descrip.c 2021-01-26 21:32:14.000000000 +0100
@@ -760,7 +760,7 @@
proc_fdunlock(p);
return error;
}
- if (FP_ISGUARDED(fp, GUARD_DUP)) {
+ if (fp_isguarded(fp, GUARD_DUP)) {
error = fp_guard_exception(p, old, fp, kGUARD_EXC_DUP);
(void) fp_drop(p, old, fp, 1);
proc_fdunlock(p);
@@ -820,7 +820,7 @@
proc_fdunlock(p);
return error;
}
- if (FP_ISGUARDED(fp, GUARD_DUP)) {
+ if (fp_isguarded(fp, GUARD_DUP)) {
error = fp_guard_exception(p, old, fp, kGUARD_EXC_DUP);
(void) fp_drop(p, old, fp, 1);
proc_fdunlock(p);
@@ -861,7 +861,7 @@
}
if ((nfp = fdp->fd_ofiles[new]) != NULL) {
- if (FP_ISGUARDED(nfp, GUARD_CLOSE)) {
+ if (fp_isguarded(nfp, GUARD_CLOSE)) {
fp_drop(p, old, fp, 1);
error = fp_guard_exception(p,
new, nfp, kGUARD_EXC_CLOSE);
@@ -1047,7 +1047,7 @@
switch (uap->cmd) {
case F_DUPFD:
case F_DUPFD_CLOEXEC:
- if (FP_ISGUARDED(fp, GUARD_DUP)) {
+ if (fp_isguarded(fp, GUARD_DUP)) {
error = fp_guard_exception(p, fd, fp, kGUARD_EXC_DUP);
goto out;
}
@@ -1075,7 +1075,7 @@
if (uap->arg & FD_CLOEXEC) {
*pop |= UF_EXCLOSE;
} else {
- if (FILEPROC_TYPE(fp) == FTYPE_GUARDED) {
+ if (fp_isguarded(fp, 0)) {
error = fp_guard_exception(p,
fd, fp, kGUARD_EXC_NOCLOEXEC);
goto out;
@@ -3332,7 +3332,7 @@
return EBADF;
}
- if (FP_ISGUARDED(fp, GUARD_CLOSE)) {
+ if (fp_isguarded(fp, GUARD_CLOSE)) {
int error = fp_guard_exception(p, fd, fp, kGUARD_EXC_CLOSE);
proc_fdunlock(p);
return error;
@@ -5290,7 +5290,7 @@
goto out_unlock;
}
- if (FP_ISGUARDED(fp, GUARD_FILEPORT)) {
+ if (fp_isguarded(fp, GUARD_FILEPORT)) {
err = fp_guard_exception(p, fd, fp, kGUARD_EXC_FILEPORT);
goto out_unlock;
}
@@ -5517,7 +5517,7 @@
*/
switch (error) {
case ENODEV:
- if (FP_ISGUARDED(wfp, GUARD_DUP)) {
+ if (fp_isguarded(wfp, GUARD_DUP)) {
proc_fdunlock(p);
return EPERM;
}
diff -ru ../xnu-7195.60.75/bsd/kern/kern_exec.c ../xnu-7195.81.3/bsd/kern/kern_exec.c
--- ../xnu-7195.60.75/bsd/kern/kern_exec.c 2020-12-18 10:19:57.000000000 +0100
+++ ../xnu-7195.81.3/bsd/kern/kern_exec.c 2021-01-26 21:32:10.000000000 +0100
@@ -2463,7 +2463,7 @@
proc_fdlock(p);
if ((fp = fp_get_noref_locked(p, psfa->psfaa_filedes)) == NULL) {
error = EBADF;
- } else if (FILEPROC_TYPE(fp) == FTYPE_GUARDED) {
+ } else if (fp_isguarded(fp, 0)) {
error = fp_guard_exception(p, psfa->psfaa_filedes,
fp, kGUARD_EXC_NOCLOEXEC);
} else {
diff -ru ../xnu-7195.60.75/bsd/kern/kern_guarded.c ../xnu-7195.81.3/bsd/kern/kern_guarded.c
--- ../xnu-7195.60.75/bsd/kern/kern_guarded.c 2020-12-18 10:20:01.000000000 +0100
+++ ../xnu-7195.81.3/bsd/kern/kern_guarded.c 2021-01-26 21:32:14.000000000 +0100
@@ -56,7 +56,6 @@
#include <sys/reason.h>
#endif
-
#define f_flag fp_glob->fg_flag
extern int dofilewrite(vfs_context_t ctx, struct fileproc *fp,
user_addr_t bufp, user_size_t nbyte, off_t offset,
@@ -86,17 +85,25 @@
struct guarded_fileproc {
struct fileproc gf_fileproc;
- u_int gf_magic;
u_int gf_attrs;
guardid_t gf_guard;
};
-const size_t sizeof_guarded_fileproc = sizeof(struct guarded_fileproc);
+ZONE_DECLARE(gfp_zone, "guarded_fileproc",
+ sizeof(struct guarded_fileproc),
+ ZC_NOENCRYPT | ZC_ZFREE_CLEARMEM);
-#define FP_TO_GFP(fp) ((struct guarded_fileproc *)(fp))
-#define GFP_TO_FP(gfp) (&(gfp)->gf_fileproc)
+static inline struct guarded_fileproc *
+FP_TO_GFP(struct fileproc *fp)
+{
+ struct guarded_fileproc *gfp =
+ __container_of(fp, struct guarded_fileproc, gf_fileproc);
+
+ zone_require(gfp_zone, gfp);
+ return gfp;
+}
-#define GUARDED_FILEPROC_MAGIC 0x29083
+#define GFP_TO_FP(gfp) (&(gfp)->gf_fileproc)
struct gfp_crarg {
guardid_t gca_guard;
@@ -109,17 +116,12 @@
struct gfp_crarg *aarg = crarg;
struct guarded_fileproc *gfp;
- if ((gfp = kalloc(sizeof(*gfp))) == NULL) {
- return NULL;
- }
-
- bzero(gfp, sizeof(*gfp));
+ gfp = zalloc_flags(gfp_zone, Z_WAITOK | Z_ZERO);
struct fileproc *fp = &gfp->gf_fileproc;
os_ref_init(&fp->fp_iocount, &f_refgrp);
fp->fp_flags = FTYPE_GUARDED;
- gfp->gf_magic = GUARDED_FILEPROC_MAGIC;
gfp->gf_guard = aarg->gca_guard;
gfp->gf_attrs = aarg->gca_attrs;
@@ -130,13 +132,7 @@
guarded_fileproc_free(struct fileproc *fp)
{
struct guarded_fileproc *gfp = FP_TO_GFP(fp);
-
- if (FILEPROC_TYPE(fp) != FTYPE_GUARDED ||
- GUARDED_FILEPROC_MAGIC != gfp->gf_magic) {
- panic("%s: corrupt fp %p flags %x", __func__, fp, fp->fp_flags);
- }
-
- kfree(gfp, sizeof(*gfp));
+ zfree(gfp_zone, gfp);
}
static int
@@ -155,10 +151,6 @@
}
struct guarded_fileproc *gfp = FP_TO_GFP(fp);
- if (GUARDED_FILEPROC_MAGIC != gfp->gf_magic) {
- panic("%s: corrupt fp %p", __func__, fp);
- }
-
if (guard != gfp->gf_guard) {
(void) fp_drop(p, fd, fp, locked);
return EPERM; /* *not* a mismatch exception */
@@ -172,24 +164,20 @@
/*
* Expected use pattern:
*
- * if (FP_ISGUARDED(fp, GUARD_CLOSE)) {
+ * if (fp_isguarded(fp, GUARD_CLOSE)) {
* error = fp_guard_exception(p, fd, fp, kGUARD_EXC_CLOSE);
* proc_fdunlock(p);
* return error;
* }
+ *
+ * Passing `0` to `attrs` returns whether the fp is guarded at all.
*/
int
fp_isguarded(struct fileproc *fp, u_int attrs)
{
if (FILEPROC_TYPE(fp) == FTYPE_GUARDED) {
- struct guarded_fileproc *gfp = FP_TO_GFP(fp);
-
- if (GUARDED_FILEPROC_MAGIC != gfp->gf_magic) {
- panic("%s: corrupt gfp %p flags %x",
- __func__, gfp, fp->fp_flags);
- }
- return (attrs & gfp->gf_attrs) == attrs;
+ return (attrs & FP_TO_GFP(fp)->gf_attrs) == attrs;
}
return 0;
}
@@ -581,11 +569,6 @@
*/
struct guarded_fileproc *gfp = FP_TO_GFP(fp);
- if (GUARDED_FILEPROC_MAGIC != gfp->gf_magic) {
- panic("%s: corrupt gfp %p flags %x",
- __func__, gfp, fp->fp_flags);
- }
-
if (oldg == gfp->gf_guard &&
uap->guardflags == gfp->gf_attrs) {
/*
@@ -674,11 +657,6 @@
goto dropout;
}
- if (GUARDED_FILEPROC_MAGIC != gfp->gf_magic) {
- panic("%s: corrupt gfp %p flags %x",
- __func__, gfp, fp->fp_flags);
- }
-
if (oldg != gfp->gf_guard ||
uap->guardflags != gfp->gf_attrs) {
error = EPERM;
diff -ru ../xnu-7195.60.75/bsd/kern/kern_lockf.c ../xnu-7195.81.3/bsd/kern/kern_lockf.c
--- ../xnu-7195.60.75/bsd/kern/kern_lockf.c 2020-12-18 10:19:57.000000000 +0100
+++ ../xnu-7195.81.3/bsd/kern/kern_lockf.c 2021-01-26 21:32:10.000000000 +0100
@@ -506,7 +506,7 @@
{
struct lockf *block;
struct lockf **head = lock->lf_head;
- struct lockf **prev, *overlap, *ltmp;
+ struct lockf **prev, *overlap;
static const char lockstr[] = "lockf";
int priority, needtolink, error;
struct vnode *vp = lock->lf_vnode;
@@ -851,6 +851,7 @@
lf_wakelock(overlap, TRUE);
}
overlap->lf_type = lock->lf_type;
+ lf_move_blocked(overlap, lock);
FREE(lock, M_LOCKF);
lock = overlap; /* for lf_coalesce_adjacent() */
break;
@@ -860,6 +861,7 @@
* Check for common starting point and different types.
*/
if (overlap->lf_type == lock->lf_type) {
+ lf_move_blocked(overlap, lock);
FREE(lock, M_LOCKF);
lock = overlap; /* for lf_coalesce_adjacent() */
break;
@@ -891,14 +893,7 @@
overlap->lf_type == F_WRLCK) {
lf_wakelock(overlap, TRUE);
} else {
- while (!TAILQ_EMPTY(&overlap->lf_blkhd)) {
- ltmp = TAILQ_FIRST(&overlap->lf_blkhd);
- TAILQ_REMOVE(&overlap->lf_blkhd, ltmp,
- lf_block);
- TAILQ_INSERT_TAIL(&lock->lf_blkhd,
- ltmp, lf_block);
- ltmp->lf_next = lock;
- }
+ lf_move_blocked(lock, overlap);
}
/*
* Add the new lock if necessary and delete the overlap.
diff -ru ../xnu-7195.60.75/bsd/kern/proc_info.c ../xnu-7195.81.3/bsd/kern/proc_info.c
--- ../xnu-7195.60.75/bsd/kern/proc_info.c 2020-12-18 10:19:59.000000000 +0100
+++ ../xnu-7195.81.3/bsd/kern/proc_info.c 2021-01-26 21:32:12.000000000 +0100
@@ -2477,7 +2477,7 @@
fproc->fi_status |= PROC_FP_CLFORK;
}
}
- if (FILEPROC_TYPE(fp) == FTYPE_GUARDED) {
+ if (fp_isguarded(fp, 0)) {
fproc->fi_status |= PROC_FP_GUARDED;
fproc->fi_guardflags = 0;
if (fp_isguarded(fp, GUARD_CLOSE)) {
diff -ru ../xnu-7195.60.75/bsd/kern/sys_generic.c ../xnu-7195.81.3/bsd/kern/sys_generic.c
--- ../xnu-7195.60.75/bsd/kern/sys_generic.c 2020-12-18 10:20:00.000000000 +0100
+++ ../xnu-7195.81.3/bsd/kern/sys_generic.c 2021-01-26 21:32:12.000000000 +0100
@@ -502,7 +502,7 @@
}
if ((fp->f_flag & FWRITE) == 0) {
error = EBADF;
- } else if (FP_ISGUARDED(fp, GUARD_WRITE)) {
+ } else if (fp_isguarded(fp, GUARD_WRITE)) {
proc_fdlock(p);
error = fp_guard_exception(p, fd, fp, kGUARD_EXC_WRITE);
proc_fdunlock(p);
@@ -552,7 +552,7 @@
if ((fp->f_flag & FWRITE) == 0) {
error = EBADF;
- } else if (FP_ISGUARDED(fp, GUARD_WRITE)) {
+ } else if (fp_isguarded(fp, GUARD_WRITE)) {
proc_fdlock(p);
error = fp_guard_exception(p, fd, fp, kGUARD_EXC_WRITE);
proc_fdunlock(p);
@@ -670,7 +670,7 @@
error = EBADF;
goto ExitThisRoutine;
}
- if (FP_ISGUARDED(fp, GUARD_WRITE)) {
+ if (fp_isguarded(fp, GUARD_WRITE)) {
error = fp_guard_exception(p, fd, fp, kGUARD_EXC_WRITE);
goto ExitThisRoutine;
}
diff -ru ../xnu-7195.60.75/bsd/kern/uipc_usrreq.c ../xnu-7195.81.3/bsd/kern/uipc_usrreq.c
--- ../xnu-7195.60.75/bsd/kern/uipc_usrreq.c 2020-12-18 10:19:57.000000000 +0100
+++ ../xnu-7195.81.3/bsd/kern/uipc_usrreq.c 2021-01-26 21:32:10.000000000 +0100
@@ -2295,7 +2295,7 @@
} else if (!fg_sendable(tmpfp->fp_glob)) {
proc_fdunlock(p);
return EINVAL;
- } else if (FP_ISGUARDED(tmpfp, GUARD_SOCKET_IPC)) {
+ } else if (fp_isguarded(tmpfp, GUARD_SOCKET_IPC)) {
error = fp_guard_exception(p,
fds[i], tmpfp, kGUARD_EXC_SOCKET_IPC);
proc_fdunlock(p);
diff -ru ../xnu-7195.60.75/bsd/net/content_filter.c ../xnu-7195.81.3/bsd/net/content_filter.c
--- ../xnu-7195.60.75/bsd/net/content_filter.c 2020-12-18 10:20:05.000000000 +0100
+++ ../xnu-7195.81.3/bsd/net/content_filter.c 2021-01-26 21:32:18.000000000 +0100
@@ -7384,7 +7384,7 @@
cfc->cf_flags |= CFF_FLOW_CONTROLLED;
- cfil_rw_unlock_exclusive(&cfil_lck_rw);
+ cfil_rw_lock_exclusive_to_shared(&cfil_lck_rw);
} else if (error != 0) {
OSIncrementAtomic(&cfil_stats.cfs_stats_event_fail);
}
diff -ru ../xnu-7195.60.75/bsd/net/rtsock.c ../xnu-7195.81.3/bsd/net/rtsock.c
--- ../xnu-7195.60.75/bsd/net/rtsock.c 2020-12-18 10:20:07.000000000 +0100
+++ ../xnu-7195.81.3/bsd/net/rtsock.c 2021-01-26 21:32:20.000000000 +0100
@@ -1096,6 +1096,9 @@
rtinfo->rti_info[i] = &sa_zero;
return 0; /* should be EINVAL but for compat */
}
+ if (sa->sa_len < offsetof(struct sockaddr, sa_data)) {
+ return EINVAL;
+ }
/* accept it */
rtinfo->rti_info[i] = sa;
ADVANCE32(cp, sa);
diff -ru ../xnu-7195.60.75/bsd/netinet/flow_divert.c ../xnu-7195.81.3/bsd/netinet/flow_divert.c
--- ../xnu-7195.60.75/bsd/netinet/flow_divert.c 2020-12-18 10:20:16.000000000 +0100
+++ ../xnu-7195.81.3/bsd/netinet/flow_divert.c 2021-01-26 21:32:29.000000000 +0100
@@ -3383,6 +3383,17 @@
errno_t
flow_divert_connect_out(struct socket *so, struct sockaddr *to, proc_t p)
{
+#if CONTENT_FILTER
+ if (SOCK_TYPE(so) == SOCK_STREAM && !(so->so_flags & SOF_CONTENT_FILTER)) {
+ int error = cfil_sock_attach(so, NULL, to, CFS_CONNECTION_DIR_OUT);
+ if (error != 0) {
+ struct flow_divert_pcb *fd_cb = so->so_fd_pcb;
+ FDLOG(LOG_ERR, fd_cb, "Failed to attach cfil: %d", error);
+ return error;
+ }
+ }
+#endif /* CONTENT_FILTER */
+
return flow_divert_connect_out_internal(so, to, p, false);
}
diff -ru ../xnu-7195.60.75/bsd/netinet/tcp_input.c ../xnu-7195.81.3/bsd/netinet/tcp_input.c
--- ../xnu-7195.60.75/bsd/netinet/tcp_input.c 2020-12-18 10:20:15.000000000 +0100
+++ ../xnu-7195.81.3/bsd/netinet/tcp_input.c 2021-01-26 21:32:28.000000000 +0100
@@ -3273,7 +3273,7 @@
inp->in6p_flags & IN6P_AUTOFLOWLABEL) {
inp->inp_flow &= ~IPV6_FLOWLABEL_MASK;
inp->inp_flow |=
- (htonl(inp->inp_flowhash) & IPV6_FLOWLABEL_MASK);
+ (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
}
/* reset the incomp processing flag */
diff -ru ../xnu-7195.60.75/bsd/netinet/tcp_usrreq.c ../xnu-7195.81.3/bsd/netinet/tcp_usrreq.c
--- ../xnu-7195.60.75/bsd/netinet/tcp_usrreq.c 2020-12-18 10:20:14.000000000 +0100
+++ ../xnu-7195.81.3/bsd/netinet/tcp_usrreq.c 2021-01-26 21:32:27.000000000 +0100
@@ -1527,7 +1527,7 @@
if (inp->inp_flow == 0 && inp->in6p_flags & IN6P_AUTOFLOWLABEL) {
inp->inp_flow &= ~IPV6_FLOWLABEL_MASK;
inp->inp_flow |=
- (htonl(inp->inp_flowhash) & IPV6_FLOWLABEL_MASK);
+ (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
}
tcp_set_max_rwinscale(tp, so);
diff -ru ../xnu-7195.60.75/bsd/netinet6/icmp6.c ../xnu-7195.81.3/bsd/netinet6/icmp6.c
--- ../xnu-7195.60.75/bsd/netinet6/icmp6.c 2020-12-18 10:20:30.000000000 +0100
+++ ../xnu-7195.81.3/bsd/netinet6/icmp6.c 2021-01-26 21:32:44.000000000 +0100
@@ -986,6 +986,7 @@
}
#endif
eip6 = (struct ip6_hdr *)(icmp6 + 1);
+ bzero(&icmp6dst, sizeof(icmp6dst));
/* Detect the upper level protocol */
{
@@ -994,7 +995,6 @@
int eoff = off + sizeof(struct icmp6_hdr) +
sizeof(struct ip6_hdr);
struct ip6ctlparam ip6cp;
- struct in6_addr *finaldst = NULL;
int icmp6type = icmp6->icmp6_type;
struct ip6_frag *fh;
struct ip6_rthdr *rth;
@@ -1080,7 +1080,7 @@
/* just ignore a bogus header */
if ((rth0->ip6r0_len % 2) == 0 &&
(hops = rth0->ip6r0_len / 2)) {
- finaldst = (struct in6_addr *)(void *)(rth0 + 1) + (hops - 1);
+ icmp6dst.sin6_addr = *((struct in6_addr *)(void *)(rth0 + 1) + (hops - 1));
}
}
eoff += rthlen;
@@ -1148,13 +1148,10 @@
*/
eip6 = (struct ip6_hdr *)(icmp6 + 1);
- bzero(&icmp6dst, sizeof(icmp6dst));
icmp6dst.sin6_len = sizeof(struct sockaddr_in6);
icmp6dst.sin6_family = AF_INET6;
- if (finaldst == NULL) {
+ if (IN6_IS_ADDR_UNSPECIFIED(&icmp6dst.sin6_addr)) {
icmp6dst.sin6_addr = eip6->ip6_dst;
- } else {
- icmp6dst.sin6_addr = *finaldst;
}
if (in6_setscope(&icmp6dst.sin6_addr, m->m_pkthdr.rcvif, NULL)) {
goto freeit;
@@ -1169,14 +1166,11 @@
icmp6src.sin6_flowinfo =
(eip6->ip6_flow & IPV6_FLOWLABEL_MASK);
- if (finaldst == NULL) {
- finaldst = &eip6->ip6_dst;
- }
ip6cp.ip6c_m = m;
ip6cp.ip6c_icmp6 = icmp6;
ip6cp.ip6c_ip6 = (struct ip6_hdr *)(icmp6 + 1);
ip6cp.ip6c_off = eoff;
- ip6cp.ip6c_finaldst = finaldst;
+ ip6cp.ip6c_finaldst = &icmp6dst.sin6_addr;
ip6cp.ip6c_src = &icmp6src;
ip6cp.ip6c_nxt = nxt;
diff -ru ../xnu-7195.60.75/bsd/netinet6/ip6_id.c ../xnu-7195.81.3/bsd/netinet6/ip6_id.c
--- ../xnu-7195.60.75/bsd/netinet6/ip6_id.c 2020-12-18 10:20:32.000000000 +0100
+++ ../xnu-7195.81.3/bsd/netinet6/ip6_id.c 2021-01-26 21:32:46.000000000 +0100
@@ -168,26 +168,6 @@
.ru_reseed = 0
};
-static struct randomtab randomtab_20 = {
- .ru_bits = 20, /* resulting bits */
- .ru_out = 180, /* Time after wich will be reseeded */
- .ru_max = 200000, /* Uniq cycle, avoid blackjack prediction */
- .ru_gen = 2, /* Starting generator */
- .ru_n = 524269, /* RU_N-1 = 2^2*3^2*14563 */
- .ru_agen = 7, /* determine ru_a as RU_AGEN^(2*rand) */
- .ru_m = 279936, /* RU_M = 2^7*3^7 - don't change */
- .pfacts = { 2, 3, 14563, 0 }, /* factors of ru_n */
- .ru_counter = 0,
- .ru_msb = 0,
- .ru_x = 0,
- .ru_seed = 0,
- .ru_seed2 = 0,
- .ru_a = 0,
- .ru_b = 0,
- .ru_g = 0,
- .ru_reseed = 0
-};
-
static u_int32_t pmod(u_int32_t, u_int32_t, u_int32_t);
static void initid(struct randomtab *);
static u_int32_t randomid(struct randomtab *);
@@ -311,5 +291,5 @@
u_int32_t
ip6_randomflowlabel(void)
{
- return randomid(&randomtab_20) & 0xfffff;
+ return RandomULong() & IPV6_FLOWLABEL_MASK;
}
diff -ru ../xnu-7195.60.75/bsd/netinet6/ipsec.c ../xnu-7195.81.3/bsd/netinet6/ipsec.c
--- ../xnu-7195.60.75/bsd/netinet6/ipsec.c 2020-12-18 10:20:31.000000000 +0100
+++ ../xnu-7195.81.3/bsd/netinet6/ipsec.c 2021-01-26 21:32:45.000000000 +0100
@@ -2519,7 +2519,7 @@
/* construct new IPv4 header. see RFC 2401 5.1.2.1 */
/* ECN consideration. */
- ip64_ecn_ingress(ip4_ipsec_ecn, &ip->ip_tos, &ip6i->ip6_flow);
+ ip64_ecn_ingress(ip4_ipsec_ecn, &ip->ip_tos, &ip6->ip6_flow);
if (plen + sizeof(struct ip) < IP_MAXPACKET) {
ip->ip_len = htons((u_int16_t)(plen + sizeof(struct ip)));
@@ -2784,7 +2784,7 @@
/* construct new IPv6 header. see RFC 2401 5.1.2.2 */
/* ECN consideration. */
- ip46_ecn_ingress(ip6_ipsec_ecn, &ip6->ip6_flow, &oip->ip_tos);
+ ip46_ecn_ingress(ip6_ipsec_ecn, &ip6->ip6_flow, &ip->ip_tos);
if (plen < IPV6_MAXPACKET - sizeof(struct ip6_hdr)) {
ip6->ip6_plen = htons((u_int16_t)plen);
} else {
diff -ru ../xnu-7195.60.75/bsd/netinet6/raw_ip6.c ../xnu-7195.81.3/bsd/netinet6/raw_ip6.c
--- ../xnu-7195.60.75/bsd/netinet6/raw_ip6.c 2020-12-18 10:20:32.000000000 +0100
+++ ../xnu-7195.81.3/bsd/netinet6/raw_ip6.c 2021-01-26 21:32:46.000000000 +0100
@@ -531,7 +531,7 @@
if (in6p->inp_flow == 0 && in6p->in6p_flags & IN6P_AUTOFLOWLABEL) {
in6p->inp_flow &= ~IPV6_FLOWLABEL_MASK;
in6p->inp_flow |=
- (htonl(in6p->inp_flowhash) & IPV6_FLOWLABEL_MASK);
+ (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
}
M_PREPEND(m, sizeof(*ip6), M_WAIT, 1);
diff -ru ../xnu-7195.60.75/bsd/netinet6/udp6_output.c ../xnu-7195.81.3/bsd/netinet6/udp6_output.c
--- ../xnu-7195.60.75/bsd/netinet6/udp6_output.c 2020-12-18 10:20:30.000000000 +0100
+++ ../xnu-7195.81.3/bsd/netinet6/udp6_output.c 2021-01-26 21:32:44.000000000 +0100
@@ -372,7 +372,7 @@
if (in6p->inp_flow == 0 && in6p->in6p_flags & IN6P_AUTOFLOWLABEL) {
in6p->inp_flow &= ~IPV6_FLOWLABEL_MASK;
in6p->inp_flow |=
- (htonl(in6p->inp_flowhash) & IPV6_FLOWLABEL_MASK);
+ (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
}
if (af == AF_INET) {
diff -ru ../xnu-7195.60.75/bsd/netinet6/udp6_usrreq.c ../xnu-7195.81.3/bsd/netinet6/udp6_usrreq.c
--- ../xnu-7195.60.75/bsd/netinet6/udp6_usrreq.c 2020-12-18 10:20:30.000000000 +0100
+++ ../xnu-7195.81.3/bsd/netinet6/udp6_usrreq.c 2021-01-26 21:32:44.000000000 +0100
@@ -927,7 +927,7 @@
inp->in6p_flags & IN6P_AUTOFLOWLABEL) {
inp->inp_flow &= ~IPV6_FLOWLABEL_MASK;
inp->inp_flow |=
- (htonl(inp->inp_flowhash) & IPV6_FLOWLABEL_MASK);
+ (htonl(ip6_randomflowlabel()) & IPV6_FLOWLABEL_MASK);
}
}
return error;
diff -ru ../xnu-7195.60.75/bsd/sys/file_internal.h ../xnu-7195.81.3/bsd/sys/file_internal.h
--- ../xnu-7195.60.75/bsd/sys/file_internal.h 2020-12-18 10:20:18.000000000 +0100
+++ ../xnu-7195.81.3/bsd/sys/file_internal.h 2021-01-26 21:32:32.000000000 +0100
@@ -123,9 +123,6 @@
#define FILEPROC_TYPE(fp) ((fp)->fp_flags & FP_TYPEMASK)
-#define FP_ISGUARDED(fp, attribs) \
- ((FILEPROC_TYPE(fp) == FTYPE_GUARDED) ? fp_isguarded(fp, attribs) : 0)
-
typedef enum {
FTYPE_SIMPLE = 0,
FTYPE_GUARDED = (1 << _FP_TYPESHIFT)
diff -ru ../xnu-7195.60.75/bsd/vfs/kpi_vfs.c ../xnu-7195.81.3/bsd/vfs/kpi_vfs.c
--- ../xnu-7195.60.75/bsd/vfs/kpi_vfs.c 2020-12-18 10:19:55.000000000 +0100
+++ ../xnu-7195.81.3/bsd/vfs/kpi_vfs.c 2021-01-26 21:32:08.000000000 +0100
@@ -4990,7 +4990,8 @@
struct nameidata nd;
char smallname[64];
char *filename = NULL;
- size_t len;
+ size_t alloc_len;
+ size_t copy_len;
if ((dvp == NULLVP) ||
(basename == NULL) || (basename[0] == '\0') ||
@@ -4998,11 +4999,11 @@
return;
}
filename = &smallname[0];
- len = snprintf(filename, sizeof(smallname), "._%s", basename);
- if (len >= sizeof(smallname)) {
- len++; /* snprintf result doesn't include '\0' */
- filename = kheap_alloc(KHEAP_TEMP, len, Z_WAITOK);
- len = snprintf(filename, len, "._%s", basename);
+ alloc_len = snprintf(filename, sizeof(smallname), "._%s", basename);
+ if (alloc_len >= sizeof(smallname)) {
+ alloc_len++; /* snprintf result doesn't include '\0' */
+ filename = kheap_alloc(KHEAP_TEMP, alloc_len, Z_WAITOK);
+ copy_len = snprintf(filename, alloc_len, "._%s", basename);
}
NDINIT(&nd, LOOKUP, OP_SETATTR, NOFOLLOW | USEDVP, UIO_SYSSPACE,
CAST_USER_ADDR_T(filename), ctx);
@@ -5028,7 +5029,7 @@
vnode_put(xvp);
out2:
if (filename && filename != &smallname[0]) {
- kheap_free(KHEAP_TEMP, filename, len);
+ kheap_free(KHEAP_TEMP, filename, alloc_len);
}
}
#endif /* CONFIG_APPLEDOUBLE */
@@ -5436,7 +5437,8 @@
_err = (*vp->v_op[vnop_advlock_desc.vdesc_offset])(&a);
}
DTRACE_FSINFO(advlock, vnode_t, vp);
- if (op == F_UNLCK && flags == F_FLOCK) {
+ if (op == F_UNLCK &&
+ (flags & (F_FLOCK | F_OFD_LOCK)) != 0) {
post_event_if_success(vp, _err, NOTE_FUNLOCK);
}
}
diff -ru ../xnu-7195.60.75/bsd/vfs/vfs_xattr.c ../xnu-7195.81.3/bsd/vfs/vfs_xattr.c
--- ../xnu-7195.60.75/bsd/vfs/vfs_xattr.c 2020-12-18 10:19:55.000000000 +0100
+++ ../xnu-7195.81.3/bsd/vfs/vfs_xattr.c 2021-01-26 21:32:07.000000000 +0100
@@ -2463,7 +2463,8 @@
char smallname[64];
char *filename = NULL;
const char *basename = NULL;
- size_t len;
+ size_t alloc_len;
+ size_t copy_len;
errno_t error;
int opened = 0;
int referenced = 0;
@@ -2493,11 +2494,11 @@
goto out;
}
filename = &smallname[0];
- len = snprintf(filename, sizeof(smallname), "%s%s", ATTR_FILE_PREFIX, basename);
- if (len >= sizeof(smallname)) {
- len++; /* snprintf result doesn't include '\0' */
- filename = kheap_alloc(KHEAP_TEMP, len, Z_WAITOK);
- len = snprintf(filename, len, "%s%s", ATTR_FILE_PREFIX, basename);
+ alloc_len = snprintf(filename, sizeof(smallname), "%s%s", ATTR_FILE_PREFIX, basename);
+ if (alloc_len >= sizeof(smallname)) {
+ alloc_len++; /* snprintf result doesn't include '\0' */
+ filename = kheap_alloc(KHEAP_TEMP, alloc_len, Z_WAITOK);
+ copy_len = snprintf(filename, alloc_len, "%s%s", ATTR_FILE_PREFIX, basename);
}
/*
* Note that the lookup here does not authorize. Since we are looking
@@ -2687,7 +2688,7 @@
vnode_putname(basename);
}
if (filename && filename != &smallname[0]) {
- kheap_free(KHEAP_TEMP, filename, len);
+ kheap_free(KHEAP_TEMP, filename, alloc_len);
}
*xvpp = xvp; /* return a referenced vnode */
diff -ru ../xnu-7195.60.75/config/Makefile ../xnu-7195.81.3/config/Makefile
--- ../xnu-7195.60.75/config/Makefile 2020-12-18 10:21:19.000000000 +0100
+++ ../xnu-7195.81.3/config/Makefile 2021-01-26 21:33:32.000000000 +0100
@@ -85,6 +85,7 @@
$(_v)$(SOURCE)/generate_combined_symbolsets_plist.sh $@ $^ $(_vstdout)
$(_v)$(PLUTIL) -convert binary1 -s $@
+ifneq ($(RC_ProjectName),xnu_libraries)
$(OBJPATH)/allsymbols: $(OBJPATH)/$(KERNEL_FILE_NAME)
$(_v)$(NM) -gj $< | sort -u > $@
@@ -119,6 +120,9 @@
$(OBJPATH)/$(MI_SUPPORTED_KPI_FILENAME)
do_config_all:: build_symbol_sets
+else
+# We are building XNU as a static library - avoid creating symbol sets
+endif
# There's no simple static pattern rule for these paths, so hardcode dependencies in the command list
$(SYMROOT_INSTALL_KEXT_MACHO_FILES): ALWAYS
@@ -155,6 +159,7 @@
$(_v)$(INSTALL) $(INSTALL_FLAGS) $< $@
ifneq ($(INSTALL_KASAN_ONLY),1)
+ifneq ($(BUILD_XNU_LIBRARY),1)
do_config_install:: $(SYMROOT_INSTALL_KEXT_MACHO_FILES) \
$(SYMROOT_INSTALL_KEXT_PLISTS) \
$(DSTROOT_INSTALL_KEXT_MACHO_FILES) \
@@ -162,6 +167,7 @@
$(DSTROOT)/$(KRESDIR)/$(MD_SUPPORTED_KPI_FILENAME) \
$(DSTROOT)/$(KRESDIR)/$(MI_SUPPORTED_KPI_FILENAME)
endif
+endif
$(OBJPATH)/all-kpi.exp: $(EXPORTS_FILES)
$(_v)$(SOURCE)/generate_linker_exports.sh $@ $+ $(Kasan_EXPORTS)
diff -ru ../xnu-7195.60.75/config/MasterVersion ../xnu-7195.81.3/config/MasterVersion
--- ../xnu-7195.60.75/config/MasterVersion 2020-09-30 04:19:31.000000000 +0200
+++ ../xnu-7195.81.3/config/MasterVersion 2020-11-19 20:56:02.000000000 +0100
@@ -1,4 +1,4 @@
-20.2.0
+20.3.0
# The first line of this file contains the master version number for the kernel.
# All other instances of the kernel version in xnu are derived from this file.
Only in ../xnu-7195.60.75/doc: pac.md
diff -ru ../xnu-7195.60.75/iokit/Kernel/IOHibernateIO.cpp ../xnu-7195.81.3/iokit/Kernel/IOHibernateIO.cpp
--- ../xnu-7195.60.75/iokit/Kernel/IOHibernateIO.cpp 2020-12-18 10:19:44.000000000 +0100
+++ ../xnu-7195.81.3/iokit/Kernel/IOHibernateIO.cpp 2021-01-26 21:31:57.000000000 +0100
@@ -201,7 +201,17 @@
static uint32_t gIOHibernateFreeRatio = 0; // free page target (percent)
uint32_t gIOHibernateFreeTime = 0 * 1000; // max time to spend freeing pages (ms)
-static uint64_t gIOHibernateCompression = 0x80; // default compression 50%
+
+enum {
+ HIB_COMPR_RATIO_ARM64 = (0xa5), // compression ~65%. Since we don't support retries we start higher.
+ HIB_COMPR_RATIO_INTEL = (0x80) // compression 50%
+};
+
+#if defined(__arm64__)
+static uint64_t gIOHibernateCompression = HIB_COMPR_RATIO_ARM64;
+#else
+static uint64_t gIOHibernateCompression = HIB_COMPR_RATIO_INTEL;
+#endif /* __arm64__ */
boolean_t gIOHibernateStandbyDisabled;
static IODTNVRAM * gIOOptionsEntry;
@@ -2179,6 +2189,18 @@
header->sleepTime = gIOLastSleepTime.tv_sec;
header->compression = ((uint32_t)((compressedSize << 8) / uncompressedSize));
+#if defined(__arm64__)
+ /*
+ * We don't support retry on hibernation failure and so
+ * we don't want to set this value to anything smaller
+ * just because we may have been lucky this time around.
+ * Though we'll let it go higher.
+ */
+ if (header->compression < HIB_COMPR_RATIO_ARM64) {
+ header->compression = HIB_COMPR_RATIO_ARM64;
+ }
+#endif /* __arm64__ */
+
gIOHibernateCompression = header->compression;
count = vars->fileVars->fileExtents->getLength();
diff -ru ../xnu-7195.60.75/iokit/Kernel/IOPMrootDomain.cpp ../xnu-7195.81.3/iokit/Kernel/IOPMrootDomain.cpp
--- ../xnu-7195.60.75/iokit/Kernel/IOPMrootDomain.cpp 2020-12-18 10:19:44.000000000 +0100
+++ ../xnu-7195.81.3/iokit/Kernel/IOPMrootDomain.cpp 2021-01-26 21:31:57.000000000 +0100
@@ -3060,6 +3060,15 @@
isRTCAlarmWake = true;
fullWakeReason = kFullWakeReasonLocalUser;
requestUserActive(this, "RTC debug alarm");
+ } else {
+#if HIBERNATION
+ OSSharedPtr<OSObject> hibOptionsProp = copyProperty(kIOHibernateOptionsKey);
+ OSNumber * hibOptions = OSDynamicCast(OSNumber, hibOptionsProp.get());
+ if (hibOptions && !(hibOptions->unsigned32BitValue() & kIOHibernateOptionDarkWake)) {
+ fullWakeReason = kFullWakeReasonLocalUser;
+ requestUserActive(this, "hibernate user wake");
+ }
+#endif
}
// stay awake for at least 30 seconds
diff -ru ../xnu-7195.60.75/iokit/bsddev/IOKitBSDInit.cpp ../xnu-7195.81.3/iokit/bsddev/IOKitBSDInit.cpp
--- ../xnu-7195.60.75/iokit/bsddev/IOKitBSDInit.cpp 2020-12-18 10:19:38.000000000 +0100
+++ ../xnu-7195.81.3/iokit/bsddev/IOKitBSDInit.cpp 2021-01-26 21:31:51.000000000 +0100
@@ -528,10 +528,6 @@
// Clean up and reboot!
do_reboot:
- if (nvram != NULL) {
- nvram->release();
- }
-
if (boot_command_recover != NULL) {
boot_command_recover->release();
}
diff -ru ../xnu-7195.60.75/iokit/conf/Makefile.template ../xnu-7195.81.3/iokit/conf/Makefile.template
--- ../xnu-7195.60.75/iokit/conf/Makefile.template 2020-12-18 10:21:21.000000000 +0100
+++ ../xnu-7195.81.3/iokit/conf/Makefile.template 2021-01-26 21:33:33.000000000 +0100
@@ -47,6 +47,8 @@
%OBJS
+%LIBOBJS
+
%CFILES
%CXXFILES
@@ -222,7 +224,17 @@
$(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
done > $(COMPONENT).filelist
+$(COMPONENT).libfilelist: $(LIBOBJS)
+ @$(LOG_LDFILELIST) "lib$(COMPONENT)"
+ $(_v)for obj in ${LIBOBJS}; do \
+ $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
+ done > $(COMPONENT).libfilelist
+
+ifeq ($(RC_ProjectName),xnu_libraries)
+do_all: $(COMPONENT).libfilelist
+else
do_all: $(COMPONENT).filelist
+endif
do_build_all:: do_all
diff -ru ../xnu-7195.60.75/libkern/conf/Makefile.template ../xnu-7195.81.3/libkern/conf/Makefile.template
--- ../xnu-7195.60.75/libkern/conf/Makefile.template 2020-12-18 10:21:29.000000000 +0100
+++ ../xnu-7195.81.3/libkern/conf/Makefile.template 2021-01-26 21:33:41.000000000 +0100
@@ -43,6 +43,8 @@
%OBJS
+%LIBOBJS
+
%CFILES
%CXXFILES
@@ -157,7 +159,17 @@
$(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
done > $(COMPONENT).filelist
+$(COMPONENT).libfilelist: $(LIBOBJS)
+ @$(LOG_LDFILELIST) "lib$(COMPONENT)"
+ $(_v)for obj in ${LIBOBJS}; do \
+ $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
+ done > $(COMPONENT).libfilelist
+
+ifeq ($(RC_ProjectName),xnu_libraries)
+do_all: $(COMPONENT).libfilelist
+else
do_all: $(COMPONENT).filelist
+endif
do_build_all:: do_all
diff -ru ../xnu-7195.60.75/libsa/conf/Makefile.template ../xnu-7195.81.3/libsa/conf/Makefile.template
--- ../xnu-7195.60.75/libsa/conf/Makefile.template 2020-12-18 10:21:19.000000000 +0100
+++ ../xnu-7195.81.3/libsa/conf/Makefile.template 2021-01-26 21:33:31.000000000 +0100
@@ -40,6 +40,8 @@
%OBJS
+%LIBOBJS
+
%CFILES
%CXXFILES
@@ -74,8 +76,18 @@
$(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
done > $(COMPONENT).filelist
+$(COMPONENT).libfilelist: $(LIBOBJS)
+ @$(LOG_LDFILELIST) "lib$(COMPONENT)"
+ $(_v)for obj in ${LIBOBJS}; do \
+ $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
+ done > $(COMPONENT).libfilelist
+
+ifeq ($(RC_ProjectName),xnu_libraries)
+do_all: $(COMPONENT).libfilelist
+else
do_all: $(COMPONENT).filelist
+endif
do_build_all:: do_all
diff -ru ../xnu-7195.60.75/makedefs/MakeInc.cmd ../xnu-7195.81.3/makedefs/MakeInc.cmd
--- ../xnu-7195.60.75/makedefs/MakeInc.cmd 2020-12-18 10:21:24.000000000 +0100
+++ ../xnu-7195.81.3/makedefs/MakeInc.cmd 2021-01-26 21:33:36.000000000 +0100
@@ -185,6 +185,9 @@
ifeq ($(LIBTOOL),)
export LIBTOOL := $(shell $(XCRUN) -sdk $(SDKROOT) -find libtool)
endif
+ifeq ($(OTOOL),)
+ export OTOOL := $(shell $(XCRUN) -sdk $(SDKROOT) -find otool)
+endif
ifeq ($(NM),)
export NM := $(shell $(XCRUN) -sdk $(SDKROOT) -find nm)
endif
diff -ru ../xnu-7195.60.75/makedefs/MakeInc.def ../xnu-7195.81.3/makedefs/MakeInc.def
--- ../xnu-7195.60.75/makedefs/MakeInc.def 2020-12-18 10:21:24.000000000 +0100
+++ ../xnu-7195.81.3/makedefs/MakeInc.def 2021-01-26 21:33:36.000000000 +0100
@@ -69,8 +69,8 @@
MACHINE_FLAGS_ARM64_T8011 = -DARM64_BOARD_CONFIG_T8011 -mcpu=hurricane
MACHINE_FLAGS_ARM64_BCM2837 = -DARM64_BOARD_CONFIG_BCM2837
MACHINE_FLAGS_ARM64_T8020 = -DARM64_BOARD_CONFIG_T8020 -mcpu=vortex
-MACHINE_FLAGS_ARM64_T8101 = -DARM64_BOARD_CONFIG_T8101 -mcpu=firestorm
-MACHINE_FLAGS_ARM64_T8103 = -DARM64_BOARD_CONFIG_T8103 -mcpu=firestorm
+MACHINE_FLAGS_ARM64_T8101 = -DARM64_BOARD_CONFIG_T8101 -D__ARM_ARCH_8_5__=1
+MACHINE_FLAGS_ARM64_T8103 = -DARM64_BOARD_CONFIG_T8103 -D__ARM_ARCH_8_5__=1
#
@@ -228,6 +228,13 @@
ARCH_FLAGS_X86_64 = -arch x86_64
ARCH_FLAGS_X86_64H = -arch x86_64h
+ifeq ($(RC_ProjectName),xnu_libraries)
+WILL_BUILD_STATIC_KC := 1
+BUILD_STATIC_LINK := 1
+BUILD_XNU_LIBRARY := 1
+RC_NONARCH_CFLAGS += -D__BUILDING_XNU_LIBRARY__=1
+endif
+
ifneq ($(filter ARM ARM64,$(CURRENT_ARCH_CONFIG)),)
ifneq ($(findstring _Sim,$(RC_ProjectName)),)
@@ -744,10 +751,9 @@
#
LD_KERNEL_LIBS = -lcc_kext
LD_KERNEL_ARCHIVES = $(LDFLAGS_KERNEL_SDK) -lfirehose_kernel
-
# Link opensource binary library
-ifneq ($(filter T8020 T8020 T8101 T8101,$(CURRENT_MACHINE_CONFIG)),)
-LDFLAGS_KERNEL_ONLY += -rdynamic -Wl,-force_load,$(SRCROOT)/SETUP/lib$(CURRENT_MACHINE_CONFIG).os.$(CURRENT_KERNEL_CONFIG).a
+ifneq ($(filter T8020 T8101 T8020 T8101,$(CURRENT_MACHINE_CONFIG)),)
+ LDFLAGS_KERNEL_ONLY += -rdynamic -Wl,-force_load,$(KDKROOT)/System/Library/KernelSupport/lib$(CURRENT_MACHINE_CONFIG).os.$(CURRENT_KERNEL_CONFIG).a
endif
#
diff -ru ../xnu-7195.60.75/makedefs/MakeInc.kernel ../xnu-7195.81.3/makedefs/MakeInc.kernel
--- ../xnu-7195.60.75/makedefs/MakeInc.kernel 2020-12-18 10:21:24.000000000 +0100
+++ ../xnu-7195.81.3/makedefs/MakeInc.kernel 2021-01-26 21:33:36.000000000 +0100
@@ -46,13 +46,22 @@
# Rules for the highly parallel "build" phase, where each build configuration
# writes into their own $(TARGET) independent of other build configs
#
-# There are 4 primary build outputs:
+# There are 5 primary build outputs:
# 1) $(KERNEL_FILE_NAME).unstripped (raw linked kernel, unstripped)
# 2) $(KERNEL_FILE_NAME) (stripped kernel, with optional CTF data)
# 3) $(KERNEL_FILE_NAME).dSYM (dSYM)
# 4) $(KERNEL_FILE_NAME).link (bits for static linking)
+# 5) lib$(KERNEL_FILE_NAME).a (static archive for testing)
ifeq ($(BUILD_STATIC_LINK),1)
+ifeq ($(BUILD_XNU_LIBRARY),1)
+
+KERNEL_STATIC_LINK_TARGETS = \
+ $(TARGET)/lib$(KERNEL_FILE_NAME).a
+KERNEL_STATIC_LINK_DST = \
+ $(DSTROOT)/$(INSTALL_KERNEL_DIR)/lib$(KERNEL_FILE_NAME).a
+
+else
KERNEL_STATIC_LINK_TARGETS = \
$(TARGET)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).a
@@ -67,11 +76,17 @@
$(DSTROOT)/$(INSTALL_KERNEL_DIR)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).dSYM/$(DSYMLLDBMACROSDIR)/$(KERNEL_LLDBBOOTSTRAP_NAME)
endif
+endif
do_build_all:: do_build_kernel
.PHONY: do_build_kernel
+ifeq ($(BUILD_XNU_LIBRARY),1)
+do_build_kernel: $(KERNEL_STATIC_LINK_TARGETS)
+
+else
+
do_build_kernel: $(TARGET)/$(KERNEL_FILE_NAME) $(TARGET)/$(KERNEL_FILE_NAME).unstripped $(KERNEL_STATIC_LINK_TARGETS)
@:
@@ -84,6 +99,8 @@
do_build_kernel_dSYM: $(TARGET)/$(KERNEL_FILE_NAME).dSYM
@:
+endif
+
.LDFLAGS: ALWAYS
$(_v)$(REPLACECONTENTS) $@ $(LD) $(LDFLAGS_KERNEL) $(LDFLAGS_KERNEL_ONLY) $(LD_KERNEL_LIBS)
.CFLAGS: ALWAYS
@@ -113,6 +130,15 @@
$(_v)$(MV) $@/$(DSYMDWARFDIR)/$(KERNEL_FILE_NAME).unstripped $@/$(DSYMDWARFDIR)/$(KERNEL_FILE_NAME)
$(_v)$(TOUCH) $@
+ifeq ($(BUILD_XNU_LIBRARY),1)
+$(TARGET)/lib$(KERNEL_FILE_NAME).a: $(addprefix $(TARGET)/,$(foreach component,$(COMPONENT_LIST),$(component)/$(CURRENT_KERNEL_CONFIG)/$(component).libfilelist)) nonlto.o $(SRCROOT)/config/version.c $(SRCROOT)/config/MasterVersion .LDFLAGS $(filter %/MakeInc.kernel,$(MAKEFILE_LIST))
+ $(_v)${MAKE} -f $(firstword $(MAKEFILE_LIST)) version.o
+ @$(LOG_LIBTOOL) "$(@F)"
+ $(_v)$(CAT) $(filter %.libfilelist,$+) < /dev/null > link.filelist
+ $(_v)$(LIBTOOL) -static -csD -filelist link.filelist -o $@
+ $(_v)$(LN) $(call function_convert_build_config_to_objdir,$(CURRENT_BUILD_CONFIG))/lib$(KERNEL_FILE_NAME).a $(OBJROOT)/lib$(KERNEL_FILE_NAME).a
+endif
+
$(TARGET)/$(KERNEL_FILE_NAME).unstripped: $(addprefix $(TARGET)/,$(foreach component,$(COMPONENT_LIST),$(component)/$(CURRENT_KERNEL_CONFIG)/$(component).filelist)) lastkerneldataconst.o lastkernelconstructor.o nonlto.o $(SRCROOT)/config/version.c $(SRCROOT)/config/MasterVersion .LDFLAGS $(filter %/MakeInc.kernel,$(MAKEFILE_LIST))
$(_v)${MAKE} -f $(firstword $(MAKEFILE_LIST)) version.o
ifeq ($(PRE_LTO),1)
@@ -242,6 +268,7 @@
endif
endif
+ifneq ($(BUILD_XNU_LIBRARY),1)
ifeq ($(INSTALL_XNU_DEBUG_FILES),1)
do_build_install_primary:: do_install_xnu_debug_files
endif
@@ -250,6 +277,7 @@
do_install_xnu_debug_files: $(DSTROOT)/$(DEVELOPER_EXTRAS_DIR)/README.DEBUG-kernel.txt
@:
+endif
#
# If the timestamp indicates the DSTROOT kernel is out of
@@ -273,7 +301,14 @@
exit $$cmdstatus
ifeq ($(BUILD_STATIC_LINK),1)
+ifeq ($(BUILD_XNU_LIBRARY),1)
+$(DSTROOT)/$(INSTALL_KERNEL_DIR)/lib$(KERNEL_FILE_NAME).a: $(TARGET)/lib$(KERNEL_FILE_NAME).a ALWAYS
+ $(_v)$(MKDIR) $(dir $@)
+ @$(LOG_INSTALL) "$(@F)"
+ $(_v)$(INSTALL) $(INSTALL_FLAGS) $< $@
+
+else
$(DSTROOT)/$(INSTALL_KERNEL_DIR)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).a: $(TARGET)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).a ALWAYS
$(_v)$(MKDIR) $(dir $@)
@$(LOG_INSTALL) "$(@F)"
@@ -298,6 +333,7 @@
$(_v)$(MKDIR) $(dir $@)
@$(LOG_INSTALL) "$(@F)"
$(_v)$(INSTALL) $(INSTALL_FLAGS) $< $@
+endif
# BUILD_STATIC_LINK
endif
@@ -355,6 +391,16 @@
exit $$cmdstatus
.PHONY: do_install_machine_specific_kernel do_install_machine_specific_kernel_dSYM
+.PHONY: do_install_machine_specific_KDK_dSYM
+
+ifeq ($(BUILD_XNU_LIBRARY),1)
+
+do_install_machine_specific_kernel: $(KERNEL_STATIC_LINK_DST)
+ @:
+do_install_machine_specific_kernel_dSYM:
+ @:
+
+else
do_install_machine_specific_kernel: $(DSTROOT)/$(INSTALL_KERNEL_DIR)/$(KERNEL_FILE_NAME) \
$(SYMROOT)/$(KERNEL_FILE_NAME) \
@@ -368,8 +414,6 @@
$(SYMROOT)/$(KERNEL_FILE_NAME).dSYM/$(DSYMDWARFDIR)/$(KERNEL_FILE_NAME)
@:
-.PHONY: do_install_machine_specific_KDK_dSYM
-
do_install_machine_specific_KDK_dSYM: \
$(DSTROOT)/$(INSTALL_KERNEL_SYM_DIR)/$(KERNEL_FILE_NAME).dSYM/$(DSYMINFODIR)/Info.plist \
$(DSTROOT)/$(INSTALL_KERNEL_SYM_DIR)/$(KERNEL_FILE_NAME).dSYM/$(DSYMLLDBMACROSDIR)/lldbmacros \
@@ -377,6 +421,8 @@
$(DSTROOT)/$(INSTALL_KERNEL_SYM_DIR)/$(KERNEL_FILE_NAME).dSYM/$(DSYMDWARFDIR)/$(KERNEL_FILE_NAME)
@:
+endif
+
# The $(RM) is needed so that the $(LN) doesn't dereference an existing
# symlink during incremental builds and create a new symlink inside
# the target of the existing symlink
diff -ru ../xnu-7195.60.75/makedefs/MakeInc.top ../xnu-7195.81.3/makedefs/MakeInc.top
--- ../xnu-7195.60.75/makedefs/MakeInc.top 2020-12-18 10:21:24.000000000 +0100
+++ ../xnu-7195.81.3/makedefs/MakeInc.top 2021-01-26 21:33:36.000000000 +0100
@@ -606,7 +606,7 @@
install: installhdrs_desktop
else
-install: installhdrs install_textfiles install_config install_kernels install_aliases checkstyle
+install: installhdrs install_textfiles install_config install_kernels install_aliases
endif
.PHONY: install_embedded install_release_embedded install_development_embedded install_desktop
@@ -724,20 +724,6 @@
@-cat cscope.files | etags -l auto -S - 2> /dev/null
@rm -f cscope.files 2> /dev/null
-#
-# Check or reformat source code for official xnu code style
-#
-.PHONY: checkstyle restyle check_uncrustify uncrustify
-
-# User-friendly aliases for those who prefer to remember the name of the tool.
-check_uncrustify: checkstyle
-uncrustify: restyle
-
-checkstyle:
- ${_V}$(SRCROOT)/tools/uncrustify.sh
-
-restyle:
- ${_V}$(SRCROOT)/tools/uncrustify.sh -f
.PHONY: help
diff -ru ../xnu-7195.60.75/osfmk/arm/arm_init.c ../xnu-7195.81.3/osfmk/arm/arm_init.c
--- ../xnu-7195.60.75/osfmk/arm/arm_init.c 2020-12-18 10:21:06.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/arm/arm_init.c 2021-01-26 21:33:19.000000000 +0100
@@ -308,7 +308,7 @@
cpu_data_init(&BootCpuData);
#if defined(HAS_APPLE_PAC)
/* bootstrap cpu process dependent key for kernel has been loaded by start.s */
- BootCpuData.rop_key = KERNEL_ROP_ID;
+ BootCpuData.rop_key = ml_default_rop_pid();
BootCpuData.jop_key = ml_default_jop_pid();
#endif /* defined(HAS_APPLE_PAC) */
diff -ru ../xnu-7195.60.75/osfmk/arm/machine_routines.h ../xnu-7195.81.3/osfmk/arm/machine_routines.h
--- ../xnu-7195.60.75/osfmk/arm/machine_routines.h 2020-12-18 10:21:07.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/arm/machine_routines.h 2021-01-26 21:33:20.000000000 +0100
@@ -1269,6 +1269,7 @@
#define UNSIGN_PTR(p) \
SIGN(p) ? ((p) | PAC_MASK) : ((p) & ~PAC_MASK)
+uint64_t ml_default_rop_pid(void);
uint64_t ml_default_jop_pid(void);
void ml_task_set_rop_pid(task_t task, task_t parent_task, boolean_t inherit);
void ml_task_set_jop_pid(task_t task, task_t parent_task, boolean_t inherit);
diff -ru ../xnu-7195.60.75/osfmk/arm/machine_routines_apple.c ../xnu-7195.81.3/osfmk/arm/machine_routines_apple.c
--- ../xnu-7195.60.75/osfmk/arm/machine_routines_apple.c 2020-12-18 10:21:07.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/arm/machine_routines_apple.c 2021-01-26 21:33:20.000000000 +0100
@@ -27,6 +27,9 @@
*/
#include <pexpert/pexpert.h>
+#if __arm64__
+#include <pexpert/arm64/board_config.h>
+#endif /* __arm64__ */
#include <arm/cpuid_internal.h>
#include <arm/pmap.h>
@@ -51,3 +54,17 @@
}
#endif /* __arm64__ */
+
+#if HAS_APPLE_PAC
+uint64_t
+ml_default_rop_pid(void)
+{
+ return 0;
+}
+
+uint64_t
+ml_default_jop_pid(void)
+{
+ return 0;
+}
+#endif /* HAS_APPLE_PAC */
diff -ru ../xnu-7195.60.75/osfmk/arm/pmap.c ../xnu-7195.81.3/osfmk/arm/pmap.c
--- ../xnu-7195.60.75/osfmk/arm/pmap.c 2020-12-18 10:21:07.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/arm/pmap.c 2021-01-26 21:33:19.000000000 +0100
@@ -691,12 +691,6 @@
extern unsigned long segSizePPLDATA;
extern vm_offset_t segPPLTEXTB;
extern unsigned long segSizePPLTEXT;
-#if __APRR_SUPPORTED__
-extern vm_offset_t segPPLTRAMPB;
-extern unsigned long segSizePPLTRAMP;
-extern void ppl_trampoline_start;
-extern void ppl_trampoline_end;
-#endif
extern vm_offset_t segPPLDATACONSTB;
extern unsigned long segSizePPLDATACONST;
@@ -1943,10 +1937,6 @@
static void pmap_trim_self(pmap_t pmap);
static void pmap_trim_subord(pmap_t subord);
-#if __APRR_SUPPORTED__
-static uint64_t pte_to_xprr_perm(pt_entry_t pte);
-static pt_entry_t xprr_perm_to_pte(uint64_t perm);
-#endif /* __APRR_SUPPORTED__*/
/*
* Temporary prototypes, while we wait for pmap_enter to move to taking an
@@ -4094,111 +4084,9 @@
#endif
-#if __APRR_SUPPORTED__
-/*
- * Indicates whether the given PTE has special restrictions due to the current
- * APRR settings.
- */
-static boolean_t
-is_pte_aprr_protected(pt_entry_t pte)
-{
- uint64_t aprr_el0_value;
- uint64_t aprr_el1_value;
- uint64_t aprr_index;
-
- MRS(aprr_el0_value, APRR_EL0);
- MRS(aprr_el1_value, APRR_EL1);
- aprr_index = PTE_TO_APRR_INDEX(pte);
-
- /* Check to see if this mapping had APRR restrictions. */
- if ((APRR_EXTRACT_IDX_ATTR(aprr_el0_value, aprr_index) != APRR_EXTRACT_IDX_ATTR(APRR_EL0_RESET, aprr_index)) ||
- (APRR_EXTRACT_IDX_ATTR(aprr_el1_value, aprr_index) != APRR_EXTRACT_IDX_ATTR(APRR_EL1_RESET, aprr_index))
- ) {
- return TRUE;
- }
-
- return FALSE;
-}
-#endif /* __APRR_SUPPORTED__ */
-#if __APRR_SUPPORTED__
-static boolean_t
-is_pte_xprr_protected(pmap_t pmap __unused, pt_entry_t pte)
-{
-#if __APRR_SUPPORTED__
- return is_pte_aprr_protected(pte);
-#else /* __APRR_SUPPORTED__ */
-#error "XPRR configuration error"
-#endif /* __APRR_SUPPORTED__ */
-}
-#endif /* __APRR_SUPPORTED__*/
-
-#if __APRR_SUPPORTED__
-static uint64_t
-__unused pte_to_xprr_perm(pt_entry_t pte)
-{
-#if __APRR_SUPPORTED__
- switch (PTE_TO_APRR_INDEX(pte)) {
- case APRR_FIRM_RX_INDEX: return XPRR_FIRM_RX_PERM;
- case APRR_FIRM_RO_INDEX: return XPRR_FIRM_RO_PERM;
- case APRR_PPL_RW_INDEX: return XPRR_PPL_RW_PERM;
- case APRR_KERN_RW_INDEX: return XPRR_KERN_RW_PERM;
- case APRR_FIRM_RW_INDEX: return XPRR_FIRM_RW_PERM;
- case APRR_KERN0_RW_INDEX: return XPRR_KERN0_RW_PERM;
- case APRR_USER_JIT_INDEX: return XPRR_USER_JIT_PERM;
- case APRR_USER_RW_INDEX: return XPRR_USER_RW_PERM;
- case APRR_PPL_RX_INDEX: return XPRR_PPL_RX_PERM;
- case APRR_KERN_RX_INDEX: return XPRR_KERN_RX_PERM;
- case APRR_USER_XO_INDEX: return XPRR_USER_XO_PERM;
- case APRR_KERN_RO_INDEX: return XPRR_KERN_RO_PERM;
- case APRR_KERN0_RX_INDEX: return XPRR_KERN0_RO_PERM;
- case APRR_KERN0_RO_INDEX: return XPRR_KERN0_RO_PERM;
- case APRR_USER_RX_INDEX: return XPRR_USER_RX_PERM;
- case APRR_USER_RO_INDEX: return XPRR_USER_RO_PERM;
- default: return XPRR_MAX_PERM;
- }
-#else
-#error "XPRR configuration error"
-#endif /**/
-}
-
-#if __APRR_SUPPORTED__
-static uint64_t
-xprr_perm_to_aprr_index(uint64_t perm)
-{
- switch (perm) {
- case XPRR_FIRM_RX_PERM: return APRR_FIRM_RX_INDEX;
- case XPRR_FIRM_RO_PERM: return APRR_FIRM_RO_INDEX;
- case XPRR_PPL_RW_PERM: return APRR_PPL_RW_INDEX;
- case XPRR_KERN_RW_PERM: return APRR_KERN_RW_INDEX;
- case XPRR_FIRM_RW_PERM: return APRR_FIRM_RW_INDEX;
- case XPRR_KERN0_RW_PERM: return APRR_KERN0_RW_INDEX;
- case XPRR_USER_JIT_PERM: return APRR_USER_JIT_INDEX;
- case XPRR_USER_RW_PERM: return APRR_USER_RW_INDEX;
- case XPRR_PPL_RX_PERM: return APRR_PPL_RX_INDEX;
- case XPRR_KERN_RX_PERM: return APRR_KERN_RX_INDEX;
- case XPRR_USER_XO_PERM: return APRR_USER_XO_INDEX;
- case XPRR_KERN_RO_PERM: return APRR_KERN_RO_INDEX;
- case XPRR_KERN0_RX_PERM: return APRR_KERN0_RO_INDEX;
- case XPRR_KERN0_RO_PERM: return APRR_KERN0_RO_INDEX;
- case XPRR_USER_RX_PERM: return APRR_USER_RX_INDEX;
- case XPRR_USER_RO_PERM: return APRR_USER_RO_INDEX;
- default: return APRR_MAX_INDEX;
- }
-}
-#endif /* __APRR_SUPPORTED__ */
-static pt_entry_t
-__unused xprr_perm_to_pte(uint64_t perm)
-{
-#if __APRR_SUPPORTED__
- return APRR_INDEX_TO_PTE(xprr_perm_to_aprr_index(perm));
-#else
-#error "XPRR configuration error"
-#endif /**/
-}
-#endif /* __APRR_SUPPORTED__*/
/*
@@ -4712,21 +4600,6 @@
}
#endif /* CONFIG_CSR_FROM_DT */
-#if __APRR_SUPPORTED__
- if (((uintptr_t)(&ppl_trampoline_start)) % PAGE_SIZE) {
- panic("%s: ppl_trampoline_start is not page aligned, "
- "vstart=%#lx",
- __FUNCTION__,
- vstart);
- }
-
- if (((uintptr_t)(&ppl_trampoline_end)) % PAGE_SIZE) {
- panic("%s: ppl_trampoline_end is not page aligned, "
- "vstart=%#lx",
- __FUNCTION__,
- vstart);
- }
-#endif /* __APRR_SUPPORTED__ */
#endif /* XNU_MONITOR */
#if DEVELOPMENT || DEBUG
@@ -5035,16 +4908,6 @@
/* PPL text is RX for the PPL, RO for the kernel. */
pa_set_range_xprr_perm(monitor_start_pa, monitor_end_pa, XPRR_KERN_RX_PERM, XPRR_PPL_RX_PERM);
-#if __APRR_SUPPORTED__
- monitor_start_pa = kvtophys(segPPLTRAMPB);
- monitor_end_pa = monitor_start_pa + segSizePPLTRAMP;
-
- /*
- * The PPLTRAMP pages will be a mix of PPL RX/kernel RO and
- * PPL RX/kernel RX. However, all of these pages belong to the PPL.
- */
- pa_set_range_monitor(monitor_start_pa, monitor_end_pa);
-#endif
/*
* In order to support DTrace, the save areas for the PPL must be
@@ -5058,10 +4921,6 @@
pmap_set_range_xprr_perm(monitor_start_va, monitor_end_va, XPRR_PPL_RW_PERM, XPRR_KERN_RW_PERM);
}
-#if __APRR_SUPPORTED__
- /* The trampoline must also be specially protected. */
- pmap_set_range_xprr_perm((vm_offset_t)&ppl_trampoline_start, (vm_offset_t)&ppl_trampoline_end, XPRR_KERN_RX_PERM, XPRR_PPL_RX_PERM);
-#endif
if (segSizePPLDATACONST > 0) {
monitor_start_pa = kvtophys(segPPLDATACONSTB);
@@ -5086,13 +4945,7 @@
{
/* Mark the PPL as being locked down. */
-#if __APRR_SUPPORTED__
- pmap_ppl_locked_down = TRUE;
- /* Force a trap into to the PPL to update APRR_EL1. */
- pmap_return(FALSE, FALSE);
-#else
#error "XPRR configuration error"
-#endif /* __APRR_SUPPORTED__ */
}
#endif /* XNU_MONITOR */
@@ -6654,7 +6507,7 @@
pt_entry_t spte;
boolean_t managed = FALSE;
- spte = *cpte;
+ spte = *((volatile pt_entry_t*)cpte);
#if CONFIG_PGTRACE
if (pgtrace_enabled) {
@@ -6689,7 +6542,7 @@
if (OSAddAtomic16(-1, (SInt16 *) &(ptep_get_info(cpte)->refcnt)) <= 0) {
panic("pmap_remove_range_options: over-release of ptdp %p for pte %p", ptep_get_ptd(cpte), cpte);
}
- spte = *cpte;
+ spte = *((volatile pt_entry_t*)cpte);
}
/*
* It may be possible for the pte to transition from managed
@@ -6713,7 +6566,7 @@
}
pai = (int)pa_index(pa);
LOCK_PVH(pai);
- spte = *cpte;
+ spte = *((volatile pt_entry_t*)cpte);
pa = pte_to_pa(spte);
if (pai == (int)pa_index(pa)) {
managed = TRUE;
@@ -7469,30 +7322,6 @@
tmplate |= pt_attr_leaf_xn(pt_attr);
}
-#if __APRR_SUPPORTED__
- /**
- * Enforce the policy that PPL xPRR mappings can't have their permissions changed after the fact.
- *
- * Certain userspace applications (e.g., CrashReporter and debuggers) have a need to remap JIT mappings to
- * RO/RX, so we explicitly allow that. This doesn't compromise the security of the PPL since this only
- * affects userspace mappings, so allow reducing permissions on JIT mappings to RO/RX. This is similar for
- * user execute-only mappings.
- */
- if (__improbable(is_pte_xprr_protected(pmap, spte) && (pte_to_xprr_perm(spte) != XPRR_USER_JIT_PERM)
- && (pte_to_xprr_perm(spte) != XPRR_USER_XO_PERM))) {
- panic("%s: modifying an xPRR mapping pte_p=%p pmap=%p prot=%d options=%u, pv_h=%p, pveh_p=%p, pve_p=%p, pte=0x%llx, tmplate=0x%llx, va=0x%llx ppnum: 0x%x",
- __func__, pte_p, pmap, prot, options, pv_h, pveh_p, pve_p, (uint64_t)spte, (uint64_t)tmplate, (uint64_t)va, ppnum);
- }
-
- /**
- * Enforce the policy that we can't create a new PPL protected mapping here except for user execute-only
- * mappings (which doesn't compromise the security of the PPL since it's userspace-specific).
- */
- if (__improbable(is_pte_xprr_protected(pmap, tmplate) && (pte_to_xprr_perm(tmplate) != XPRR_USER_XO_PERM))) {
- panic("%s: creating an xPRR mapping pte_p=%p pmap=%p prot=%d options=%u, pv_h=%p, pveh_p=%p, pve_p=%p, pte=0x%llx, tmplate=0x%llx, va=0x%llx ppnum: 0x%x",
- __func__, pte_p, pmap, prot, options, pv_h, pveh_p, pve_p, (uint64_t)spte, (uint64_t)tmplate, (uint64_t)va, ppnum);
- }
-#endif /* __APRR_SUPPORTED__*/
if (*pte_p != ARM_PTE_TYPE_FAULT &&
!ARM_PTE_IS_COMPRESSED(*pte_p, pte_p) &&
@@ -7612,6 +7441,7 @@
if (pmap == kernel_pmap) {
panic("%s: called with kernel_pmap\n", __func__);
}
+ VALIDATE_PMAP(pmap);
pmap->disable_jop = true;
}
@@ -7739,7 +7569,7 @@
boolean_t force_write = FALSE;
#endif
- spte = *pte_p;
+ spte = *((volatile pt_entry_t*)pte_p);
if ((spte == ARM_PTE_TYPE_FAULT) ||
ARM_PTE_IS_COMPRESSED(spte, pte_p)) {
@@ -7763,7 +7593,7 @@
}
pai = (int)pa_index(pa);
LOCK_PVH(pai);
- spte = *pte_p;
+ spte = *((volatile pt_entry_t*)pte_p);
pa = pte_to_pa(spte);
if (pai == (int)pa_index(pa)) {
managed = TRUE;
@@ -7871,30 +7701,6 @@
/* We do not expect to write fast fault the entry. */
pte_set_was_writeable(tmplate, false);
-#if __APRR_SUPPORTED__
- /**
- * Enforce the policy that PPL xPRR mappings can't have their permissions changed after the fact.
- *
- * Certain userspace applications (e.g., CrashReporter and debuggers) have a need to remap JIT mappings to
- * RO/RX, so we explicitly allow that. This doesn't compromise the security of the PPL since this only
- * affects userspace mappings, so allow reducing permissions on JIT mappings to RO/RX/XO. This is similar
- * for user execute-only mappings.
- */
- if (__improbable(is_pte_xprr_protected(pmap, spte) && (pte_to_xprr_perm(spte) != XPRR_USER_JIT_PERM)
- && (pte_to_xprr_perm(spte) != XPRR_USER_XO_PERM))) {
- panic("%s: modifying a PPL mapping pte_p=%p pmap=%p prot=%d options=%u, pte=0x%llx, tmplate=0x%llx",
- __func__, pte_p, pmap, prot, options, (uint64_t)spte, (uint64_t)tmplate);
- }
-
- /**
- * Enforce the policy that we can't create a new PPL protected mapping here except for user execute-only
- * mappings (which doesn't compromise the security of the PPL since it's userspace-specific).
- */
- if (__improbable(is_pte_xprr_protected(pmap, tmplate) && (pte_to_xprr_perm(tmplate) != XPRR_USER_XO_PERM))) {
- panic("%s: creating an xPRR mapping pte_p=%p pmap=%p prot=%d options=%u, pte=0x%llx, tmplate=0x%llx",
- __func__, pte_p, pmap, prot, options, (uint64_t)spte, (uint64_t)tmplate);
- }
-#endif /* __APRR_SUPPORTED__*/
WRITE_PTE_FAST(pte_p, tmplate);
if (managed) {
@@ -8834,14 +8640,28 @@
const pt_attr_t * pt_attr = pmap_get_pt_attr(pmap);
pte_p = pmap_pte(pmap, v);
- assert(pte_p != PT_ENTRY_NULL);
- pa = pte_to_pa(*pte_p);
+ if (pte_p == PT_ENTRY_NULL) {
+ if (!wired) {
+ /*
+ * The PTE may have already been cleared by a disconnect/remove operation, and the L3 table
+ * may have been freed by a remove operation.
+ */
+ goto pmap_change_wiring_return;
+ } else {
+ panic("%s: Attempt to wire nonexistent PTE for pmap %p", __func__, pmap);
+ }
+ }
+ /*
+ * Use volatile loads to prevent the compiler from collapsing references to 'pa' back to loads of pte_p
+ * until we've grabbed the final PVH lock; PTE contents may change during this time.
+ */
+ pa = pte_to_pa(*((volatile pt_entry_t*)pte_p));
while (pa_valid(pa)) {
pmap_paddr_t new_pa;
LOCK_PVH((int)pa_index(pa));
- new_pa = pte_to_pa(*pte_p);
+ new_pa = pte_to_pa(*((volatile pt_entry_t*)pte_p));
if (pa == new_pa) {
break;
@@ -8851,6 +8671,18 @@
pa = new_pa;
}
+ /* PTE checks must be performed after acquiring the PVH lock (if applicable for the PA) */
+ if ((*pte_p == ARM_PTE_EMPTY) || (ARM_PTE_IS_COMPRESSED(*pte_p, pte_p))) {
+ if (!wired) {
+ /* PTE cleared by prior remove/disconnect operation */
+ goto pmap_change_wiring_cleanup;
+ } else {
+ panic("%s: Attempt to wire empty/compressed PTE %p (=0x%llx) for pmap %p",
+ __func__, pte_p, (uint64_t)*pte_p, pmap);
+ }
+ }
+
+ assertf((*pte_p & ARM_PTE_TYPE_VALID) == ARM_PTE_TYPE, "invalid pte %p (=0x%llx)", pte_p, (uint64_t)*pte_p);
if (wired != pte_is_wired(*pte_p)) {
pte_set_wired(pmap, pte_p, wired);
if (pmap != kernel_pmap) {
@@ -8865,10 +8697,12 @@
}
}
+pmap_change_wiring_cleanup:
if (pa_valid(pa)) {
UNLOCK_PVH((int)pa_index(pa));
}
+pmap_change_wiring_return:
pmap_unlock(pmap);
}
@@ -8980,12 +8814,7 @@
if (pa) {
return pa;
}
- pa = ((pmap_paddr_t)pmap_vtophys(kernel_pmap, va)) << PAGE_SHIFT;
- if (pa) {
- pa |= (va & PAGE_MASK);
- }
-
- return (pmap_paddr_t)pa;
+ return pmap_vtophys(kernel_pmap, va);
}
pmap_paddr_t
@@ -9601,7 +9430,7 @@
end_pte_p = start_pte_p + ((end - start) >> pt_attr_leaf_shift(pt_attr));
assert(end_pte_p >= start_pte_p);
for (curr_pte_p = start_pte_p; curr_pte_p < end_pte_p; curr_pte_p++) {
- pmap_paddr_t pa = pte_to_pa(*curr_pte_p);
+ pmap_paddr_t pa = pte_to_pa(*((volatile pt_entry_t*)curr_pte_p));
if (pa_valid(pa)) {
ppnum_t pn = (ppnum_t) atop(pa);
phys_attribute_clear_with_flush_range(pn, bits, options, NULL, flush_range);
@@ -10128,23 +9957,6 @@
pmap_clear_user_ttb_internal();
}
-#if defined(HAS_APPLE_PAC) && (__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)
- if (!arm_user_jop_disabled()) {
- uint64_t sctlr = __builtin_arm_rsr64("SCTLR_EL1");
- bool jop_enabled = sctlr & SCTLR_JOP_KEYS_ENABLED;
- if (!jop_enabled && !pmap->disable_jop) {
- // turn on JOP
- sctlr |= SCTLR_JOP_KEYS_ENABLED;
- __builtin_arm_wsr64("SCTLR_EL1", sctlr);
- arm_context_switch_requires_sync();
- } else if (jop_enabled && pmap->disable_jop) {
- // turn off JOP
- sctlr &= ~SCTLR_JOP_KEYS_ENABLED;
- __builtin_arm_wsr64("SCTLR_EL1", sctlr);
- arm_context_switch_requires_sync();
- }
- }
-#endif /* HAS_APPLE_PAC && (__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__) */
#endif /* (__ARM_VMSA__ == 7) */
}
@@ -10630,7 +10442,7 @@
ptep = pmap_pte(pmap, va);
if (ptep != PT_ENTRY_NULL) {
while (true) {
- spte = *ptep;
+ spte = *((volatile pt_entry_t*)ptep);
pa = pte_to_pa(spte);
@@ -10651,42 +10463,13 @@
}
pai = (int)pa_index(pa);
LOCK_PVH(pai);
-#if __APRR_SUPPORTED__
- if (*ptep == spte) {
- /*
- * Double-check the spte value, as we care
- * about the AF bit.
- */
- break;
- }
- UNLOCK_PVH(pai);
-#else /* !(__APRR_SUPPORTED__*/
break;
-#endif /* !(__APRR_SUPPORTED__*/
}
} else {
pmap_unlock(pmap);
return result;
}
-#if __APRR_SUPPORTED__
- /* Check to see if this mapping had APRR restrictions. */
- if (is_pte_xprr_protected(pmap, spte)) {
- /*
- * We have faulted on an XPRR managed mapping; decide if the access should be
- * reattempted or if it should cause an exception. Now that all JIT entitled
- * task threads always have MPRR enabled we're only here because of
- * an AF fault or an actual permission fault. AF faults will have result
- * changed to KERN_SUCCESS below upon arm_clear_fast_fault return.
- */
- if (was_af_fault && (spte & ARM_PTE_AF)) {
- result = KERN_SUCCESS;
- goto out;
- } else {
- result = KERN_PROTECTION_FAILURE;
- }
- }
-#endif /* __APRR_SUPPORTED__*/
if ((IS_REFFAULT_PAGE(pai)) ||
((fault_type & VM_PROT_WRITE) && IS_MODFAULT_PAGE(pai))) {
@@ -10717,9 +10500,6 @@
}
}
-#if __APRR_SUPPORTED__
-out:
-#endif /* __APRR_SUPPORTED__*/
UNLOCK_PVH(pai);
pmap_unlock(pmap);
return result;
@@ -11971,7 +11751,7 @@
if ((*cpte != ARM_PTE_TYPE_FAULT)
&& (!ARM_PTE_IS_COMPRESSED(*cpte, cpte))) {
- spte = *cpte;
+ spte = *((volatile pt_entry_t*)cpte);
while (!managed) {
pa = pte_to_pa(spte);
if (!pa_valid(pa)) {
@@ -11979,7 +11759,7 @@
}
pai = (int)pa_index(pa);
LOCK_PVH(pai);
- spte = *cpte;
+ spte = *((volatile pt_entry_t*)cpte);
pa = pte_to_pa(spte);
if (pai == (int)pa_index(pa)) {
managed = TRUE;
@@ -14694,7 +14474,7 @@
goto done;
}
- pa = pte_to_pa(*pte);
+ pa = pte_to_pa(*((volatile pt_entry_t*)pte));
if (pa == 0) {
if (ARM_PTE_IS_COMPRESSED(*pte, pte)) {
disp |= PMAP_QUERY_PAGE_COMPRESSED;
@@ -15299,13 +15079,8 @@
T_LOG("Make the first mapping XO.");
pmap_enter_addr(pmap, va_base, pa, VM_PROT_EXECUTE, VM_PROT_EXECUTE, 0, false);
-#if __APRR_SUPPORTED__
- T_LOG("Validate that reads to our mapping fault.");
- pmap_test_read(pmap, va_base, true);
-#else
T_LOG("Validate that reads to our mapping do not fault.");
pmap_test_read(pmap, va_base, false);
-#endif
T_LOG("Validate that writes to our mapping fault.");
pmap_test_write(pmap, va_base, true);
diff -ru ../xnu-7195.60.75/osfmk/arm/pmap.h ../xnu-7195.81.3/osfmk/arm/pmap.h
--- ../xnu-7195.60.75/osfmk/arm/pmap.h 2020-12-18 10:21:05.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/arm/pmap.h 2021-01-26 21:33:18.000000000 +0100
@@ -636,7 +636,8 @@
#define PMAP_SET_VM_MAP_CS_ENFORCED_INDEX 72
-#define PMAP_COUNT 73
+
+#define PMAP_COUNT 74
#define PMAP_INVALID_CPU_NUM (~0U)
diff -ru ../xnu-7195.60.75/osfmk/arm/trustcache.c ../xnu-7195.81.3/osfmk/arm/trustcache.c
--- ../xnu-7195.60.75/osfmk/arm/trustcache.c 2020-12-18 10:21:05.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/arm/trustcache.c 2021-01-26 21:33:18.000000000 +0100
@@ -59,19 +59,15 @@
void
trust_cache_init(void)
{
- size_t const len = segSizeEXTRADATA;
+ size_t const locked_down_dt_size = SecureDTIsLockedDown() ? PE_state.deviceTreeSize : 0;
+ size_t const len = segSizeEXTRADATA - locked_down_dt_size;
if (len == 0) {
-#if XNU_TARGET_OS_OSX
+ // We allow no trust cache at all.
printf("No external trust cache found (region len is 0).");
-#else
- panic("No external trust cache found (region len is 0).");
-#endif
return;
}
- size_t const locked_down_dt_size = SecureDTIsLockedDown() ? PE_state.deviceTreeSize : 0;
-
pmap_serialized_trust_caches = (struct serialized_trust_caches*)(segEXTRADATA +
locked_down_dt_size);
@@ -203,7 +199,6 @@
// Engineering Trust Caches.
if (pmap_serialized_trust_caches->num_caches > engineering_trust_cache_index) {
-#if DEVELOPMENT || DEBUG
for (uint32_t i = engineering_trust_cache_index; i < pmap_serialized_trust_caches->num_caches; i++) {
struct trust_cache_module1 const *module =
(struct trust_cache_module1 const *)(
@@ -215,10 +210,6 @@
(TC_LOOKUP_FOUND << TC_LOOKUP_RESULT_SHIFT);
}
}
-#else
- panic("Number of trust caches: %d. How could we let this happen?",
- pmap_serialized_trust_caches->num_caches);
-#endif
}
}
diff -ru ../xnu-7195.60.75/osfmk/arm64/arm_vm_init.c ../xnu-7195.81.3/osfmk/arm64/arm_vm_init.c
--- ../xnu-7195.60.75/osfmk/arm64/arm_vm_init.c 2020-12-18 10:21:08.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/arm64/arm_vm_init.c 2021-01-26 21:33:21.000000000 +0100
@@ -70,11 +70,7 @@
static_assert((((~ARM_KERNEL_PROTECT_EXCEPTION_START) + 1) * 2ULL) <= (ARM_TT_ROOT_SIZE + ARM_TT_ROOT_INDEX_MASK));
#endif /* __ARM_KERNEL_PROTECT__ */
-#if __APRR_SUPPORTED__ && XNU_MONITOR
-#define ARM_DYNAMIC_TABLE_XN ARM_TTE_TABLE_PXN
-#else
#define ARM_DYNAMIC_TABLE_XN (ARM_TTE_TABLE_PXN | ARM_TTE_TABLE_XN)
-#endif
#if KASAN
extern vm_offset_t shadow_pbase;
diff -ru ../xnu-7195.60.75/osfmk/arm64/cswitch.s ../xnu-7195.81.3/osfmk/arm64/cswitch.s
--- ../xnu-7195.60.75/osfmk/arm64/cswitch.s 2020-12-18 10:21:28.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/arm64/cswitch.s 2021-01-26 21:33:40.000000000 +0100
@@ -28,7 +28,6 @@
#include <machine/asm.h>
#include <arm64/machine_machdep.h>
#include <arm64/machine_routines_asm.h>
-#include <arm64/pac_asm.h>
#include <arm64/proc_reg.h>
#include "assym.s"
@@ -202,31 +201,6 @@
#endif
-#if defined(HAS_APPLE_PAC)
- ldr \new_key, [\thread, TH_ROP_PID]
- ldr \tmp_key, [\cpudatap, CPU_ROP_KEY]
- cmp \new_key, \tmp_key
- b.eq 1f
- str \new_key, [\cpudatap, CPU_ROP_KEY]
- msr APIBKeyLo_EL1, \new_key
- add \new_key, \new_key, #1
- msr APIBKeyHi_EL1, \new_key
- add \new_key, \new_key, #1
- msr APDBKeyLo_EL1, \new_key
- add \new_key, \new_key, #1
- msr APDBKeyHi_EL1, \new_key
- mov \wsync, #1
-1:
-
-#if HAS_PAC_FAST_A_KEY_SWITCHING
- IF_PAC_SLOW_A_KEY_SWITCHING Lskip_jop_keys_\@, \new_key
- ldr \new_key, [\thread, TH_JOP_PID]
- REPROGRAM_JOP_KEYS Lskip_jop_keys_\@, \new_key, \cpudatap, \tmp_key
- mov \wsync, #1
-Lskip_jop_keys_\@:
-#endif /* HAS_PAC_FAST_A_KEY_SWITCHING */
-
-#endif /* defined(HAS_APPLE_PAC) */
cbz \wsync, 1f
isb sy
diff -ru ../xnu-7195.60.75/osfmk/arm64/locore.s ../xnu-7195.81.3/osfmk/arm64/locore.s
--- ../xnu-7195.60.75/osfmk/arm64/locore.s 2020-12-18 10:21:28.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/arm64/locore.s 2021-01-26 21:33:40.000000000 +0100
@@ -35,7 +35,6 @@
#include <config_dtrace.h>
#include "assym.s"
#include <arm64/exception_asm.h>
-#include <arm64/pac_asm.h>
#include "dwarf_unwind.h"
#if __ARM_KERNEL_PROTECT__
@@ -58,161 +57,10 @@
/* Return to the PPL. */
mov x15, #0
mov w10, #PPL_STATE_EXCEPTION
-#if __APRR_SUPPORTED__
- b Ldisable_aif_and_enter_ppl
-#else
#error "XPRR configuration error"
-#endif /* __APRR_SUPPORTED__ */
1:
.endmacro
-#if __APRR_SUPPORTED__
-/*
- * EL1_SP0_VECTOR_PPL_CHECK
- *
- * Check to see if the exception was taken by the kernel or the PPL. Falls
- * through if kernel, hands off to the given label if PPL. Expects to run on
- * SP1.
- * arg0 - Label to go to if this was a PPL exception.
- */
-.macro EL1_SP0_VECTOR_PPL_CHECK
- sub sp, sp, ARM_CONTEXT_SIZE
- stp x0, x1, [sp, SS64_X0]
- mrs x0, APRR_EL1
- MOV64 x1, APRR_EL1_DEFAULT
- cmp x0, x1
- b.ne $0
- ldp x0, x1, [sp, SS64_X0]
- add sp, sp, ARM_CONTEXT_SIZE
-.endmacro
-
-#define STAY_ON_SP1 0
-#define SWITCH_TO_SP0 1
-
-#define INVOKE_PREFLIGHT 0
-#define NO_INVOKE_PREFLIGHT 1
-
-/*
- * EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE
- *
- * Verify whether an exception came from the PPL or from the kernel. If it came
- * from the PPL, save off the PPL state and transition out of the PPL.
- * arg0 - Label to go to if this was a kernel exception
- * arg1 - Label to go to (after leaving the PPL) if this was a PPL exception
- * arg2 - Indicates if this should switch back to SP0
- * x0 - xPRR_EL1_BR1 read by EL1_SP0_VECTOR_PPL_CHECK
- */
-.macro EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE
- /* Spill some more registers. */
- stp x2, x3, [sp, SS64_X2]
-
- /*
- * Check if the PPL is locked down; if not, we can treat this as a
- * kernel execption.
- */
- adrp x1, EXT(pmap_ppl_locked_down)@page
- ldr w1, [x1, #EXT(pmap_ppl_locked_down)@pageoff]
- cbz x1, 2f
-
- /* Ensure that APRR_EL1 is actually in PPL mode. */
- MOV64 x1, APRR_EL1_PPL
- cmp x0, x1
- b.ne .
-
- /*
- * Check if the CPU is in the PPL; if not we can treat this as a
- * kernel exception.
- */
- GET_PMAP_CPU_DATA x3, x1, x2
- ldr w1, [x3, PMAP_CPU_DATA_PPL_STATE]
- cmp x1, #PPL_STATE_KERNEL
- b.eq 2f
-
- /* Ensure that the CPU is in the expected PPL state. */
- cmp x1, #PPL_STATE_DISPATCH
- b.ne .
-
- /* Mark the CPU as dealing with an exception. */
- mov x1, #PPL_STATE_EXCEPTION
- str w1, [x3, PMAP_CPU_DATA_PPL_STATE]
-
- /* Load the bounds of the PPL trampoline. */
- adrp x0, EXT(ppl_no_exception_start)@page
- add x0, x0, EXT(ppl_no_exception_start)@pageoff
- adrp x1, EXT(ppl_no_exception_end)@page
- add x1, x1, EXT(ppl_no_exception_end)@pageoff
-
- /*
- * Ensure that the exception did not occur in the trampoline. If it
- * did, we are either being attacked or our state machine is
- * horrifically broken.
- */
- mrs x2, ELR_EL1
- cmp x2, x0
- b.lo 1f
- cmp x2, x1
- b.hi 1f
-
- /* We might be under attack; spin. */
- b .
-
-1:
- /* Get the PPL save area. */
- mov x1, x3
- ldr x0, [x3, PMAP_CPU_DATA_SAVE_AREA]
-
- /* Save our x0, x1 state. */
- ldp x2, x3, [sp, SS64_X0]
- stp x2, x3, [x0, SS64_X0]
-
- /* Restore SP1 to its original state. */
- mov x3, sp
- add sp, sp, ARM_CONTEXT_SIZE
-
- .if $2 == SWITCH_TO_SP0
- /* Switch back to SP0. */
- msr SPSel, #0
- mov x2, sp
- .else
- /* Load the SP0 value. */
- mrs x2, SP_EL0
- .endif
-
- /* Save off the stack pointer. */
- str x2, [x0, SS64_SP]
-
- INIT_SAVED_STATE_FLAVORS x0, w1, w2
-
- /* Save the context that was interrupted. */
- ldp x2, x3, [x3, SS64_X2]
- SPILL_REGISTERS PPL_MODE
-
- /*
- * Stash the function we wish to be invoked to deal with the exception;
- * usually this is some preflight function for the fleh_* handler.
- */
- adrp x25, $1@page
- add x25, x25, $1@pageoff
-
- /*
- * Indicate that this is a PPL exception, and that we should return to
- * the PPL.
- */
- mov x26, #1
-
- /* Transition back to kernel mode. */
- mov x15, #PPL_EXIT_EXCEPTION
- b ppl_return_to_kernel_mode
-2:
- /* Restore SP1 state. */
- ldp x2, x3, [sp, SS64_X2]
- ldp x0, x1, [sp, SS64_X0]
- add sp, sp, ARM_CONTEXT_SIZE
-
- /* Go to the specified label (usually the original exception vector). */
- b $0
-.endmacro
-#endif /* __APRR_SUPPORTED__ */
#endif /* XNU_MONITOR */
@@ -474,14 +322,6 @@
.endmacro
el1_sp0_synchronous_vector_long:
-#if XNU_MONITOR && __APRR_SUPPORTED__
- /*
- * We do not have enough space for new instructions in this vector, so
- * jump to outside code to check if this exception was taken in the PPL.
- */
- b el1_sp0_synchronous_vector_ppl_check
-Lel1_sp0_synchronous_vector_kernel:
-#endif
stp x0, x1, [sp, #-16]! // Save x0 and x1 to the exception stack
mrs x1, ESR_EL1 // Get the exception syndrome
/* If the stack pointer is corrupt, it will manifest either as a data abort
@@ -498,10 +338,6 @@
b fleh_dispatch64
el1_sp0_irq_vector_long:
-#if XNU_MONITOR && __APRR_SUPPORTED__
- EL1_SP0_VECTOR_PPL_CHECK el1_sp0_irq_vector_not_in_kernel_mode
-Lel1_sp0_irq_vector_kernel:
-#endif
EL1_SP0_VECTOR
SWITCH_TO_INT_STACK
adrp x1, EXT(fleh_irq)@page // Load address for fleh
@@ -510,10 +346,6 @@
el1_sp0_fiq_vector_long:
// ARM64_TODO write optimized decrementer
-#if XNU_MONITOR && __APRR_SUPPORTED__
- EL1_SP0_VECTOR_PPL_CHECK el1_sp0_fiq_vector_not_in_kernel_mode
-Lel1_sp0_fiq_vector_kernel:
-#endif
EL1_SP0_VECTOR
SWITCH_TO_INT_STACK
adrp x1, EXT(fleh_fiq)@page // Load address for fleh
@@ -521,10 +353,6 @@
b fleh_dispatch64
el1_sp0_serror_vector_long:
-#if XNU_MONITOR && __APRR_SUPPORTED__
- EL1_SP0_VECTOR_PPL_CHECK el1_sp0_serror_vector_not_in_kernel_mode
-Lel1_sp0_serror_vector_kernel:
-#endif
EL1_SP0_VECTOR
adrp x1, EXT(fleh_serror)@page // Load address for fleh
add x1, x1, EXT(fleh_serror)@pageoff
@@ -569,35 +397,12 @@
add x1, x1, fleh_serror_sp1@pageoff
b fleh_dispatch64
-#if defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)
-/**
- * On these CPUs, SCTLR_CP15BEN_ENABLED is res0, and SCTLR_{ITD,SED}_DISABLED are res1.
- * The rest of the bits in SCTLR_EL1_DEFAULT | SCTLR_PACIB_ENABLED are set in common_start.
- */
-#define SCTLR_EL1_INITIAL (SCTLR_EL1_DEFAULT | SCTLR_PACIB_ENABLED)
-#define SCTLR_EL1_EXPECTED ((SCTLR_EL1_INITIAL | SCTLR_SED_DISABLED | SCTLR_ITD_DISABLED) & ~SCTLR_CP15BEN_ENABLED)
-#endif
.macro EL0_64_VECTOR
stp x0, x1, [sp, #-16]! // Save x0 and x1 to the exception stack
#if __ARM_KERNEL_PROTECT__
mov x18, #0 // Zero x18 to avoid leaking data to user SS
#endif
-#if defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)
- // enable JOP for kernel
- mrs x0, SCTLR_EL1
- tbnz x0, SCTLR_PACIA_ENABLED_SHIFT, 1f
- // if (!jop_running) {
- MOV64 x1, SCTLR_JOP_KEYS_ENABLED
- orr x0, x0, x1
- msr SCTLR_EL1, x0
- isb sy
- MOV64 x1, SCTLR_EL1_EXPECTED | SCTLR_JOP_KEYS_ENABLED
- cmp x0, x1
- bne .
- // }
-1:
-#endif /* defined(HAS_APPLE_PAC) && !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__) */
mrs x0, TPIDR_EL1 // Load the thread register
mrs x1, SP_EL0 // Load the user stack pointer
add x0, x0, ACT_CONTEXT // Calculate where we store the user context pointer
@@ -643,13 +448,6 @@
add x1, x1, EXT(fleh_serror)@pageoff
b fleh_dispatch64
-#if XNU_MONITOR && __APRR_SUPPORTED__
-el1_sp0_synchronous_vector_ppl_check:
- EL1_SP0_VECTOR_PPL_CHECK el1_sp0_synchronous_vector_not_in_kernel_mode
-
- /* Jump back to the primary exception vector if we fell through. */
- b Lel1_sp0_synchronous_vector_kernel
-#endif
/*
* check_exception_stack
@@ -1212,59 +1010,6 @@
CMSR FPCR, x5, x4, 1
1:
-#if defined(HAS_APPLE_PAC)
- // if (eret to userspace) {
- and x2, x2, #(PSR64_MODE_EL_MASK)
- cmp x2, #(PSR64_MODE_EL0)
- bne Ldone_reconfigure_jop
- // thread_t thread = current_thread();
- // bool disable_jop;
- // if (arm_user_jop_disabled()) {
- // /* if global user JOP disabled, always turn off JOP regardless of thread flag (kernel running with JOP on) */
- // disable_jop = true;
- // } else {
- // disable_jop = thread->machine.disable_user_jop;
- // }
- mrs x2, TPIDR_EL1
- ldrb w1, [x2, TH_DISABLE_USER_JOP]
- cbz w1, Lenable_jop
- // if (disable_jop) {
- // if (cpu does not have discrete JOP-at-EL1 bit) {
- // disable_sctlr_jop_keys();
- // }
- // } else {
- // if (cpu does not have fast A-key switching) {
- // reprogram_jop_keys(thread->machine.jop_pid);
- // }
- // }
- // }
-Ldisable_jop:
-#if !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__)
- MOV64 x1, SCTLR_JOP_KEYS_ENABLED
- mrs x4, SCTLR_EL1
- bic x4, x4, x1
- msr SCTLR_EL1, x4
- MOV64 x1, SCTLR_EL1_EXPECTED
- cmp x4, x1
- bne .
-#endif /* !(__APCFG_SUPPORTED__ || __APSTS_SUPPORTED__) */
- b Ldone_reconfigure_jop
-Lenable_jop:
-#if HAS_PAC_SLOW_A_KEY_SWITCHING
- IF_PAC_FAST_A_KEY_SWITCHING Ldone_reconfigure_jop, x1
- ldr x1, [x2, TH_JOP_PID]
- ldr x2, [x2, ACT_CPUDATAP]
- REPROGRAM_JOP_KEYS Ldone_reconfigure_jop, x1, x2, x3
-#if defined(__ARM_ARCH_8_5__)
- /**
- * The new keys will be used after eret to userspace, so explicit sync is
- * required iff eret is non-synchronizing.
- */
- isb sy
-#endif /* defined(__ARM_ARCH_8_5__) */
-#endif /* HAS_PAC_SLOW_A_KEY_SWITCHING */
-Ldone_reconfigure_jop:
-#endif /* defined(HAS_APPLE_PAC) */
/* Restore arm_neon_saved_state64 */
ldp q0, q1, [x0, NS64_Q0]
@@ -1407,27 +1152,6 @@
#endif /* __ARM_KERNEL_PROTECT__ */
#if XNU_MONITOR
-#if __APRR_SUPPORTED__
- .text
- .align 2
-el1_sp0_synchronous_vector_not_in_kernel_mode:
- EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_synchronous_vector_kernel, fleh_synchronous_from_ppl, STAY_ON_SP1
-
- .text
- .align 2
-el1_sp0_fiq_vector_not_in_kernel_mode:
- EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_fiq_vector_kernel, fleh_fiq_from_ppl, SWITCH_TO_SP0
-
- .text
- .align 2
-el1_sp0_irq_vector_not_in_kernel_mode:
- EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_irq_vector_kernel, fleh_irq_from_ppl, SWITCH_TO_SP0
-
- .text
- .align 2
-el1_sp0_serror_vector_not_in_kernel_mode:
- EL1_SP0_VECTOR_NOT_IN_KERNEL_MODE Lel1_sp0_serror_vector_kernel, fleh_serror_from_ppl, SWITCH_TO_SP0
-#endif /* __APRR_SUPPORTED__ */
/*
* Functions to preflight the fleh handlers when the PPL has taken an exception;
@@ -1502,65 +1226,6 @@
b EXT(fleh_serror)
-#if XNU_MONITOR && __APRR_SUPPORTED__
-/*
- * aprr_ppl_enter
- *
- * Invokes the PPL
- * x15 - The index of the requested PPL function.
- */
- .text
- .align 2
- .globl EXT(aprr_ppl_enter)
-LEXT(aprr_ppl_enter)
- /* Push a frame. */
- ARM64_STACK_PROLOG
- stp x20, x21, [sp, #-0x20]!
- stp x29, x30, [sp, #0x10]
- add x29, sp, #0x10
-
- /* Increase the preemption count. */
- mrs x10, TPIDR_EL1
- ldr w12, [x10, ACT_PREEMPT_CNT]
- add w12, w12, #1
- str w12, [x10, ACT_PREEMPT_CNT]
-
- /* Is the PPL currently locked down? */
- adrp x13, EXT(pmap_ppl_locked_down)@page
- add x13, x13, EXT(pmap_ppl_locked_down)@pageoff
- ldr w14, [x13]
- cmp w14, wzr
-
- /* If not, just perform the call in the current context. */
- b.eq EXT(ppl_bootstrap_dispatch)
-
- mov w10, #PPL_STATE_KERNEL
- b Ldisable_aif_and_enter_ppl
-
- /* We align this to land the next few instructions on their own page. */
- .section __PPLTRAMP,__text,regular,pure_instructions
- .align 14
- .space (16*1024)-(4*8) // 8 insns
-
- /*
- * This label is used by exception handlers that are trying to return
- * to the PPL.
- */
-Ldisable_aif_and_enter_ppl:
- /* We must trampoline to the PPL context; disable AIF. */
- mrs x20, DAIF
- msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF)
-
- .globl EXT(ppl_no_exception_start)
-LEXT(ppl_no_exception_start)
- /* Switch APRR_EL1 to PPL mode. */
- MOV64 x14, APRR_EL1_PPL
- msr APRR_EL1, x14
-
- /* This ISB should be the last instruction on a page. */
- // TODO: can we static assert this?
- isb
-#endif /* XNU_MONITOR && __APRR_SUPPORTED__ */
// x15: ppl call number
@@ -1569,18 +1234,8 @@
.globl EXT(ppl_trampoline_start)
LEXT(ppl_trampoline_start)
-#if __APRR_SUPPORTED__
- /* Squash AIF AGAIN, because someone may have attacked us. */
- msr DAIFSet, #(DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF)
-#endif /* __APRR_SUPPORTED__ */
-#if __APRR_SUPPORTED__
- /* Verify the state of APRR_EL1. */
- MOV64 x14, APRR_EL1_PPL
- mrs x21, APRR_EL1
-#else /* __APRR_SUPPORTED__ */
#error "XPRR configuration error"
-#endif /* __APRR_SUPPORTED__ */
cmp x14, x21
b.ne Lppl_fail_dispatch
@@ -1617,11 +1272,7 @@
/* Find the save area, and return to the saved PPL context. */
ldr x0, [x12, PMAP_CPU_DATA_SAVE_AREA]
mov sp, x0
-#if __APRR_SUPPORTED__
- b Lexception_return_restore_registers
-#else
b EXT(return_to_ppl)
-#endif /* __APRR_SUPPORTED__ */
Lppl_mark_cpu_as_dispatching:
cmp w10, #PPL_STATE_KERNEL
@@ -1693,27 +1344,6 @@
/* Return to the kernel. */
b ppl_return_to_kernel_mode
-#if __APRR_SUPPORTED__
- /* We align this to land the next few instructions on their own page. */
- .align 14
- .space (16*1024)-(4*5) // 5 insns
-
-ppl_return_to_kernel_mode:
- /* Switch APRR_EL1 back to the kernel mode. */
- // must be 5 instructions
- MOV64 x14, APRR_EL1_DEFAULT
- msr APRR_EL1, x14
-
- .globl EXT(ppl_trampoline_end)
-LEXT(ppl_trampoline_end)
-
- /* This should be the first instruction on a page. */
- isb
-
- .globl EXT(ppl_no_exception_end)
-LEXT(ppl_no_exception_end)
- b ppl_exit
-#endif /* __APRR_SUPPORTED__ */
.text
diff -ru ../xnu-7195.60.75/osfmk/arm64/machine_routines.c ../xnu-7195.81.3/osfmk/arm64/machine_routines.c
--- ../xnu-7195.60.75/osfmk/arm64/machine_routines.c 2020-12-18 10:21:08.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/arm64/machine_routines.c 2021-01-26 21:33:20.000000000 +0100
@@ -2289,32 +2289,6 @@
}
#if defined(HAS_APPLE_PAC)
-static inline bool
-cpu_supports_userkeyen()
-{
-#if defined(APPLEFIRESTORM)
- return __builtin_arm_rsr64(ARM64_REG_APCTL_EL1) & APCTL_EL1_UserKeyEn;
-#elif HAS_APCTL_EL1_USERKEYEN
- return true;
-#else
- return false;
-#endif
-}
-
-/**
- * Returns the default JOP key. Depending on how the CPU diversifies userspace
- * JOP keys, this value may reflect either KERNKeyLo or APIAKeyLo.
- */
-uint64_t
-ml_default_jop_pid(void)
-{
- if (cpu_supports_userkeyen()) {
- return KERNEL_KERNKEY_ID;
- } else {
- return KERNEL_JOP_ID;
- }
-}
-
void
ml_task_set_disable_user_jop(task_t task, uint8_t disable_user_jop)
{
diff -ru ../xnu-7195.60.75/osfmk/arm64/machine_routines_asm.s ../xnu-7195.81.3/osfmk/arm64/machine_routines_asm.s
--- ../xnu-7195.60.75/osfmk/arm64/machine_routines_asm.s 2020-12-18 10:21:28.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/arm64/machine_routines_asm.s 2021-01-26 21:33:41.000000000 +0100
@@ -29,7 +29,6 @@
#include <machine/asm.h>
#include <arm64/exception_asm.h>
#include <arm64/machine_machdep.h>
-#include <arm64/pac_asm.h>
#include <arm64/proc_reg.h>
#include <arm/pmap.h>
#include <pexpert/arm64/board_config.h>
@@ -37,94 +36,6 @@
#include "assym.s"
-#if defined(HAS_APPLE_PAC)
-
-.macro SET_KERN_KEY dst, apctl_el1
- orr \dst, \apctl_el1, #APCTL_EL1_KernKeyEn
-.endmacro
-
-.macro CLEAR_KERN_KEY dst, apctl_el1
- and \dst, \apctl_el1, #~APCTL_EL1_KernKeyEn
-.endmacro
-
-/*
- * uint64_t ml_enable_user_jop_key(uint64_t user_jop_key)
- */
- .align 2
- .globl EXT(ml_enable_user_jop_key)
-LEXT(ml_enable_user_jop_key)
- mov x1, x0
- mrs x2, TPIDR_EL1
- ldr x2, [x2, ACT_CPUDATAP]
- ldr x0, [x2, CPU_JOP_KEY]
-
- cmp x0, x1
- b.eq Lskip_program_el0_jop_key
- /*
- * We can safely write to the JOP key registers without updating
- * current_cpu_datap()->jop_key. The complementary
- * ml_disable_user_jop_key() call will put back the old value. Interrupts
- * are also disabled, so nothing else will read this field in the meantime.
- */
- SET_JOP_KEY_REGISTERS x1, x2
-Lskip_program_el0_jop_key:
-
- /*
- * if (cpu has APCTL_EL1.UserKeyEn) {
- * set APCTL_EL1.KernKeyEn // KERNKey is mixed into EL0 keys
- * } else {
- * clear APCTL_EL1.KernKeyEn // KERNKey is not mixed into EL0 keys
- * }
- */
- mrs x1, ARM64_REG_APCTL_EL1
-#if defined(APPLEFIRESTORM)
- SET_KERN_KEY x2, x1
- CLEAR_KERN_KEY x3, x1
- tst x1, #(APCTL_EL1_UserKeyEn)
- csel x1, x2, x3, ne
-#elif defined(HAS_APCTL_EL1_USERKEYEN)
- SET_KERN_KEY x1, x1
-#else
- CLEAR_KERN_KEY x1, x1
-#endif
- msr ARM64_REG_APCTL_EL1, x1
- isb
- ret
-
-/*
- * void ml_disable_user_jop_key(uint64_t user_jop_key, uint64_t saved_jop_state)
- */
- .align 2
- .globl EXT(ml_disable_user_jop_key)
-LEXT(ml_disable_user_jop_key)
- cmp x0, x1
- b.eq Lskip_program_prev_jop_key
- SET_JOP_KEY_REGISTERS x1, x2
-Lskip_program_prev_jop_key:
-
- /*
- * if (cpu has APCTL_EL1.UserKeyEn) {
- * clear APCTL_EL1.KernKeyEn // KERNKey is not mixed into EL1 keys
- * } else {
- * set APCTL_EL1.KernKeyEn // KERNKey is mixed into EL1 keys
- * }
- */
- mrs x1, ARM64_REG_APCTL_EL1
-#if defined(APPLEFIRESTORM)
- CLEAR_KERN_KEY x2, x1
- SET_KERN_KEY x3, x1
- tst x1, #(APCTL_EL1_UserKeyEn)
- csel x1, x2, x3, ne
-#elif defined(HAS_APCTL_EL1_USERKEYEN)
- CLEAR_KERN_KEY x1, x1
-#else
- SET_KERN_KEY x1, x1
-#endif
- msr ARM64_REG_APCTL_EL1, x1
- isb
- ret
-
-#endif /* defined(HAS_APPLE_PAC) */
#if HAS_BP_RET
Only in ../xnu-7195.60.75/osfmk/arm64: pac_asm.h
diff -ru ../xnu-7195.60.75/osfmk/arm64/pinst.s ../xnu-7195.81.3/osfmk/arm64/pinst.s
--- ../xnu-7195.60.75/osfmk/arm64/pinst.s 2020-12-18 10:21:28.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/arm64/pinst.s 2021-01-26 21:33:40.000000000 +0100
@@ -123,48 +123,5 @@
check_instruction x2, x3, __pinst_spsel_1, 0xd65f03c0d50041bf
b __pinst_spsel_1
-#if __APRR_SUPPORTED__
-
-/*
- * APRR registers aren't covered by VMSA lockdown, so we'll keep these
- * gadgets in pinst for protection against undesired execution.
- */
-
- .text
- .section __LAST,__pinst
- .align 2
-
-__pinst_set_aprr_el0:
- msr APRR_EL0, x0
- ret
-
-__pinst_set_aprr_el1:
- msr APRR_EL1, x0
- ret
-
-__pinst_set_aprr_shadow_mask_en_el1:
- msr APRR_SHADOW_MASK_EN_EL1, x0
-
- ret
-
- .text
- .section __TEXT_EXEC,__text
- .align 2
-
- .globl _pinst_set_aprr_el0
-_pinst_set_aprr_el0:
- check_instruction x2, x3, __pinst_set_aprr_el0, 0xd65f03c0d51cf200
- b __pinst_set_aprr_el0
-
- .globl _pinst_set_aprr_el1
-_pinst_set_aprr_el1:
- check_instruction x2, x3, __pinst_set_aprr_el1, 0xd65f03c0d51cf220
- b __pinst_set_aprr_el1
-
- .globl _pinst_set_aprr_shadow_mask_en_el1
-_pinst_set_aprr_shadow_mask_en_el1:
- check_instruction x2, x3, __pinst_set_aprr_shadow_mask_en_el1, 0xd65f03c0d51cf2c0
- b __pinst_set_aprr_shadow_mask_en_el1
-#endif /* __APRR_SUPPORTED__ */
#endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
diff -ru ../xnu-7195.60.75/osfmk/arm64/platform_tests.c ../xnu-7195.81.3/osfmk/arm64/platform_tests.c
--- ../xnu-7195.60.75/osfmk/arm64/platform_tests.c 2020-12-18 10:21:08.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/arm64/platform_tests.c 2021-01-26 21:33:21.000000000 +0100
@@ -1170,20 +1170,6 @@
#if defined(HAS_APPLE_PAC)
-/*
- *
- * arm64_ropjop_test - basic xnu ROP/JOP test plan
- *
- * - assert ROP/JOP configured and running status match
- * - assert all AppleMode ROP/JOP features enabled
- * - ensure ROP/JOP keys are set and diversified
- * - sign a KVA (the address of this function),assert it was signed (changed)
- * - authenticate the newly signed KVA
- * - assert the authed KVA is the original KVA
- * - corrupt a signed ptr, auth it, ensure auth failed
- * - assert the failed authIB of corrupted pointer is tagged
- *
- */
kern_return_t
arm64_ropjop_test()
@@ -1195,51 +1181,11 @@
boolean_t config_jop_enabled = TRUE;
- /* assert all AppleMode ROP/JOP features enabled */
- uint64_t apctl = __builtin_arm_rsr64(ARM64_REG_APCTL_EL1);
-#if __APSTS_SUPPORTED__
- uint64_t apsts = __builtin_arm_rsr64(ARM64_REG_APSTS_EL1);
- T_EXPECT(apsts & APSTS_EL1_MKEYVld, NULL);
-#else
- T_EXPECT(apctl & APCTL_EL1_MKEYVld, NULL);
-#endif /* __APSTS_SUPPORTED__ */
- T_EXPECT(apctl & APCTL_EL1_AppleMode, NULL);
-
- bool kernkeyen = apctl & APCTL_EL1_KernKeyEn;
-#if HAS_APCTL_EL1_USERKEYEN
- bool userkeyen = apctl & APCTL_EL1_UserKeyEn;
-#else
- bool userkeyen = false;
-#endif
- /* for KernKey to work as a diversifier, it must be enabled at exactly one of {EL0, EL1/2} */
- T_EXPECT(kernkeyen || userkeyen, "KernKey is enabled");
- T_EXPECT(!(kernkeyen && userkeyen), "KernKey is not simultaneously enabled at userspace and kernel space");
-
- /* ROP/JOP keys enabled current status */
- bool status_jop_enabled, status_rop_enabled;
-#if __APSTS_SUPPORTED__ /* H13+ */
- status_jop_enabled = status_rop_enabled = apctl & APCTL_EL1_EnAPKey1;
-#elif __APCFG_SUPPORTED__ /* H12 */
- uint64_t apcfg_el1 = __builtin_arm_rsr64(APCFG_EL1);
- status_jop_enabled = status_rop_enabled = apcfg_el1 & APCFG_EL1_ELXENKEY;
-#else /* !__APCFG_SUPPORTED__ H11 */
- uint64_t sctlr_el1 = __builtin_arm_rsr64("SCTLR_EL1");
- status_jop_enabled = sctlr_el1 & SCTLR_PACIA_ENABLED;
- status_rop_enabled = sctlr_el1 & SCTLR_PACIB_ENABLED;
-#endif /* __APSTS_SUPPORTED__ */
-
- /* assert configured and running status match */
- T_EXPECT(config_rop_enabled == status_rop_enabled, NULL);
- T_EXPECT(config_jop_enabled == status_jop_enabled, NULL);
-
-
if (config_jop_enabled) {
/* jop key */
uint64_t apiakey_hi = __builtin_arm_rsr64(ARM64_REG_APIAKEYHI_EL1);
uint64_t apiakey_lo = __builtin_arm_rsr64(ARM64_REG_APIAKEYLO_EL1);
- /* ensure JOP key is set and diversified */
- T_EXPECT(apiakey_hi != KERNEL_ROP_ID && apiakey_lo != KERNEL_ROP_ID, NULL);
T_EXPECT(apiakey_hi != 0 && apiakey_lo != 0, NULL);
}
@@ -1248,8 +1194,6 @@
uint64_t apibkey_hi = __builtin_arm_rsr64(ARM64_REG_APIBKEYHI_EL1);
uint64_t apibkey_lo = __builtin_arm_rsr64(ARM64_REG_APIBKEYLO_EL1);
- /* ensure ROP key is set and diversified */
- T_EXPECT(apibkey_hi != KERNEL_ROP_ID && apibkey_lo != KERNEL_ROP_ID, NULL);
T_EXPECT(apibkey_hi != 0 && apibkey_lo != 0, NULL);
/* sign a KVA (the address of this function) */
diff -ru ../xnu-7195.60.75/osfmk/arm64/proc_reg.h ../xnu-7195.81.3/osfmk/arm64/proc_reg.h
--- ../xnu-7195.60.75/osfmk/arm64/proc_reg.h 2020-12-18 10:21:07.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/arm64/proc_reg.h 2021-01-26 21:33:20.000000000 +0100
@@ -1736,228 +1736,13 @@
#define CORESIGHT_REGIONS 4
#define CORESIGHT_SIZE 0x1000
-#if __APRR_SUPPORTED__
-/*
- * APRR_EL0/APRR_EL1
- *
- * 63 0
- * +--------------------+
- * | Attr[15:0]RWX[3:0] |
- * +--------------------+
- *
- * These registers consist of 16 4-bit fields.
- *
- * The attribute index consists of the access protection
- * and execution protections on a mapping. The index
- * for a given mapping type is constructed as follows.
- *
- * Attribute Index
- *
- * 3 2 1 0
- * +-------+-------+-----+----+
- * | AP[1] | AP[0] | PXN | XN |
- * +-------+-------+-----+----+
- *
- * The attribute for a given index determines what
- * protections are disabled for that mappings type
- * (protections beyond the scope of the standard ARM
- * protections for a mapping cannot be granted via
- * APRR).
- *
- * Attribute
- *
- * 3 2 1 0
- * +----------+---+---+---+
- * | Reserved | R | W | X |
- * +----------+---+---+---+
- *
- * Where:
- * R: Read is allowed.
- * W: Write is allowed.
- * X: Execute is allowed.
- */
-
-#define APRR_IDX_XN (1ULL)
-#define APRR_IDX_PXN (2ULL)
-
-
-#define APRR_IDX_XN_SHIFT (0ULL)
-#define APRR_IDX_PXN_SHIFT (1ULL)
-#define APRR_IDX_APSHIFT (2ULL)
-
-#endif /* __APRR_SUPPORTED__ */
-
-
-#if __APRR_SUPPORTED__
-
-#define APRR_ATTR_X (1ULL)
-#define APRR_ATTR_W (2ULL)
-#define APRR_ATTR_R (4ULL)
-
-#define APRR_ATTR_WX (APRR_ATTR_W | APRR_ATTR_X)
-#define APRR_ATTR_RX (APRR_ATTR_R | APRR_ATTR_X)
-#define APRR_ATTR_RWX (APRR_ATTR_R | APRR_ATTR_W | APRR_ATTR_X)
-
-#define APRR_ATTR_NONE (0ULL)
-#define APRR_ATTR_MASK (APRR_ATTR_RWX)
-
-#define APRR_RESERVED_MASK (0x8888888888888888ULL)
-#endif /* __APRR_SUPPORTED__ */
-
-#if __APRR_SUPPORTED__
-#define XPRR_FIRM_RX_PERM (0ULL)
-#define XPRR_PPL_RW_PERM (1ULL)
-#define XPRR_FIRM_RO_PERM (2ULL)
-#define XPRR_KERN_RW_PERM (3ULL)
-#define XPRR_FIRM_RW_PERM (4ULL)
-#define XPRR_USER_JIT_PERM (5ULL)
-#define XPRR_KERN0_RW_PERM (6ULL)
-#define XPRR_USER_RW_PERM (7ULL)
-#define XPRR_PPL_RX_PERM (8ULL)
-#define XPRR_USER_XO_PERM (9ULL)
-#define XPRR_KERN_RX_PERM (10ULL)
-#define XPRR_KERN_RO_PERM (11ULL)
-#define XPRR_KERN0_RX_PERM (12ULL)
-#define XPRR_USER_RX_PERM (13ULL)
-#define XPRR_KERN0_RO_PERM (14ULL)
-#define XPRR_USER_RO_PERM (15ULL)
-#define XPRR_MAX_PERM (15ULL)
-
-#define XPRR_VERSION_NONE (0ULL)
-#define XPRR_VERSION_APRR (1ULL)
-
-
-#endif /* __APRR_SUPPORTED__*/
-
-#if __APRR_SUPPORTED__
-/* Indices for attributes, named based on how we intend to use them. */
-#define APRR_FIRM_RX_INDEX (0ULL) /* AP_RWNA, PX, X */
-#define APRR_FIRM_RO_INDEX (1ULL) /* AP_RWNA, PX, XN */
-#define APRR_PPL_RW_INDEX (2ULL) /* AP_RWNA, PXN, X */
-#define APRR_KERN_RW_INDEX (3ULL) /* AP_RWNA, PXN, XN */
-#define APRR_FIRM_RW_INDEX (4ULL) /* AP_RWRW, PX, X */
-#define APRR_KERN0_RW_INDEX (5ULL) /* AP_RWRW, PX, XN */
-#define APRR_USER_JIT_INDEX (6ULL) /* AP_RWRW, PXN, X */
-#define APRR_USER_RW_INDEX (7ULL) /* AP_RWRW, PXN, XN */
-#define APRR_PPL_RX_INDEX (8ULL) /* AP_RONA, PX, X */
-#define APRR_KERN_RX_INDEX (9ULL) /* AP_RONA, PX, XN */
-#define APRR_USER_XO_INDEX (10ULL) /* AP_RONA, PXN, X */
-#define APRR_KERN_RO_INDEX (11ULL) /* AP_RONA, PXN, XN */
-#define APRR_KERN0_RX_INDEX (12ULL) /* AP_RORO, PX, X */
-#define APRR_KERN0_RO_INDEX (13ULL) /* AP_RORO, PX, XN */
-#define APRR_USER_RX_INDEX (14ULL) /* AP_RORO, PXN, X */
-#define APRR_USER_RO_INDEX (15ULL) /* AP_RORO, PXN, XN */
-#define APRR_MAX_INDEX (15ULL) /* For sanity checking index values */
-#endif /* __APRR_SUPPORTED */
-
-
-#if __APRR_SUPPORTED__
-#define APRR_SHIFT_FOR_IDX(x) \
- ((x) << 2ULL)
-
-/* Shifts for attributes, named based on how we intend to use them. */
-#define APRR_FIRM_RX_SHIFT (0ULL) /* AP_RWNA, PX, X */
-#define APRR_FIRM_RO_SHIFT (4ULL) /* AP_RWNA, PX, XN */
-#define APRR_PPL_RW_SHIFT (8ULL) /* AP_RWNA, PXN, X */
-#define APRR_KERN_RW_SHIFT (12ULL) /* AP_RWNA, PXN, XN */
-#define APRR_FIRM_RW_SHIFT (16ULL) /* AP_RWRW, PX, X */
-#define APRR_KERN0_RW_SHIFT (20ULL) /* AP_RWRW, PX, XN */
-#define APRR_USER_JIT_SHIFT (24ULL) /* AP_RWRW, PXN, X */
-#define APRR_USER_RW_SHIFT (28ULL) /* AP_RWRW, PXN, XN */
-#define APRR_PPL_RX_SHIFT (32ULL) /* AP_RONA, PX, X */
-#define APRR_KERN_RX_SHIFT (36ULL) /* AP_RONA, PX, XN */
-#define APRR_USER_XO_SHIFT (40ULL) /* AP_RONA, PXN, X */
-#define APRR_KERN_RO_SHIFT (44ULL) /* AP_RONA, PXN, XN */
-#define APRR_KERN0_RX_SHIFT (48ULL) /* AP_RORO, PX, X */
-#define APRR_KERN0_RO_SHIFT (52ULL) /* AP_RORO, PX, XN */
-#define APRR_USER_RX_SHIFT (56ULL) /* AP_RORO, PXN, X */
-#define APRR_USER_RO_SHIFT (60ULL) /* AP_RORO, PXN, XN */
-
-#define ARM_PTE_APRR_MASK \
- (ARM_PTE_APMASK | ARM_PTE_PNXMASK | ARM_PTE_NXMASK)
-
-#define ARM_PTE_XPRR_MASK ARM_PTE_APRR_MASK
-
-#define APRR_INDEX_TO_PTE(x) \
- ((pt_entry_t) \
- (((x) & 0x8) ? ARM_PTE_AP(0x2) : 0) | \
- (((x) & 0x4) ? ARM_PTE_AP(0x1) : 0) | \
- (((x) & 0x2) ? ARM_PTE_PNX : 0) | \
- (((x) & 0x1) ? ARM_PTE_NX : 0))
-
-#define PTE_TO_APRR_INDEX(x) \
- ((ARM_PTE_EXTRACT_AP(x) << APRR_IDX_APSHIFT) | \
- (((x) & ARM_PTE_PNXMASK) ? APRR_IDX_PXN : 0) | \
- (((x) & ARM_PTE_NXMASK) ? APRR_IDX_XN : 0))
-
-#endif /* __APRR_SUPPORTED__ */
-
-#if __APRR_SUPPORTED__
-
-#define APRR_EXTRACT_IDX_ATTR(_aprr_value, _idx) \
- (((_aprr_value) >> APRR_SHIFT_FOR_IDX(_idx)) & APRR_ATTR_MASK)
-
-#define APRR_REMOVE(x) (~(x))
-
-#define APRR_EL1_UNRESTRICTED (0x4455445566666677ULL)
-
-#define APRR_EL1_RESET \
- APRR_EL1_UNRESTRICTED
-
-/*
- * XO mappings bypass PAN protection (rdar://58360875)
- * Revoke ALL kernel access permissions for XO mappings.
- */
-#define APRR_EL1_BASE \
- (APRR_EL1_UNRESTRICTED & \
- APRR_REMOVE(APRR_ATTR_R << APRR_USER_XO_SHIFT))
-
-#if XNU_MONITOR
-#define APRR_EL1_DEFAULT \
- (APRR_EL1_BASE & \
- (APRR_REMOVE((APRR_ATTR_WX << APRR_PPL_RW_SHIFT) | \
- (APRR_ATTR_WX << APRR_USER_XO_SHIFT) | \
- (APRR_ATTR_WX << APRR_PPL_RX_SHIFT))))
-
-#define APRR_EL1_PPL \
- (APRR_EL1_BASE & \
- (APRR_REMOVE((APRR_ATTR_X << APRR_PPL_RW_SHIFT) | \
- (APRR_ATTR_WX << APRR_USER_XO_SHIFT) | \
- (APRR_ATTR_W << APRR_PPL_RX_SHIFT))))
-#else
-#define APRR_EL1_DEFAULT \
- APRR_EL1_BASE
-#endif
-
-#define APRR_EL0_UNRESTRICTED (0x4545010167670101ULL)
-#define APRR_EL0_RESET \
- APRR_EL0_UNRESTRICTED
-#if XNU_MONITOR
-#define APRR_EL0_BASE \
- (APRR_EL0_UNRESTRICTED & \
- (APRR_REMOVE((APRR_ATTR_RWX << APRR_PPL_RW_SHIFT) | \
- (APRR_ATTR_RWX << APRR_PPL_RX_SHIFT) | \
- (APRR_ATTR_RWX << APRR_USER_XO_SHIFT))))
-#else
-#define APRR_EL0_BASE \
- APRR_EL0_UNRESTRICTED
-#endif
-#define APRR_EL0_JIT_RW \
- (APRR_EL0_BASE & APRR_REMOVE(APRR_ATTR_X << APRR_USER_JIT_SHIFT))
-#define APRR_EL0_JIT_RX \
- (APRR_EL0_BASE & APRR_REMOVE(APRR_ATTR_W << APRR_USER_JIT_SHIFT))
-#define APRR_EL0_JIT_RWX \
- APRR_EL0_BASE
-#define APRR_EL0_DEFAULT \
- APRR_EL0_BASE
-#endif /* __APRR_SUPPORTED__ */
/*
@@ -1999,46 +1784,12 @@
#define ID_AA64ISAR0_EL1_AES_PMULL_EN (2ull << ID_AA64ISAR0_EL1_AES_OFFSET)
-#if __APCFG_SUPPORTED__
-/*
- * APCFG_EL1
- *
- * 63 2 1 0
- * +----------+-+-+
- * | reserved |K|R|
- * +----------+-+-+
- *
- * where:
- * R: Reserved
- * K: ElXEnKey - Enable ARMV8.3 defined {IA,IB,DA,DB} keys when CPU is
- * operating in EL1 (or higher) and when under Apple-Mode
- */
-
-#define APCFG_EL1_ELXENKEY_OFFSET 1
-#define APCFG_EL1_ELXENKEY_MASK (0x1ULL << APCFG_EL1_ELXENKEY_OFFSET)
-#define APCFG_EL1_ELXENKEY APCFG_EL1_ELXENKEY_MASK
-#endif /* __APCFG_SUPPORTED__ */
#define APSTATE_G_SHIFT (0)
#define APSTATE_P_SHIFT (1)
#define APSTATE_A_SHIFT (2)
#define APSTATE_AP_MASK ((1ULL << APSTATE_A_SHIFT) | (1ULL << APSTATE_P_SHIFT))
-#ifdef __APSTS_SUPPORTED__
-#define APCTL_EL1_AppleMode (1ULL << 0)
-#define APCTL_EL1_KernKeyEn (1ULL << 1)
-#define APCTL_EL1_EnAPKey0 (1ULL << 2)
-#define APCTL_EL1_EnAPKey1 (1ULL << 3)
-#ifdef HAS_APCTL_EL1_USERKEYEN
-#define APCTL_EL1_UserKeyEn_OFFSET 4
-#define APCTL_EL1_UserKeyEn (1ULL << APCTL_EL1_UserKeyEn_OFFSET)
-#endif /* HAS_APCTL_EL1_USERKEYEN */
-#define APSTS_EL1_MKEYVld (1ULL << 0)
-#else
-#define APCTL_EL1_AppleMode (1ULL << 0)
-#define APCTL_EL1_MKEYVld (1ULL << 1)
-#define APCTL_EL1_KernKeyEn (1ULL << 2)
-#endif
#define ACTLR_EL1_EnTSO (1ULL << 1)
#define ACTLR_EL1_EnAPFLG (1ULL << 4)
diff -ru ../xnu-7195.60.75/osfmk/arm64/start.s ../xnu-7195.81.3/osfmk/arm64/start.s
--- ../xnu-7195.60.75/osfmk/arm64/start.s 2020-12-18 10:21:28.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/arm64/start.s 2021-01-26 21:33:40.000000000 +0100
@@ -40,33 +40,6 @@
#endif /* __ARM_KERNEL_PROTECT__ */
-#if __APRR_SUPPORTED__
-
-.macro MSR_APRR_EL1_X0
-#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
- bl EXT(pinst_set_aprr_el1)
-#else
- msr APRR_EL1, x0
-#endif
-.endmacro
-
-.macro MSR_APRR_EL0_X0
-#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
- bl EXT(pinst_set_aprr_el0)
-#else
- msr APRR_EL0, x0
-#endif
-.endmacro
-
-.macro MSR_APRR_SHADOW_MASK_EN_EL1_X0
-#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
- bl EXT(pinst_set_aprr_shadow_mask_en_el1)
-#else
- msr APRR_SHADOW_MASK_EN_EL1, x0
-#endif
-.endmacro
-
-#endif /* __APRR_SUPPORTED__ */
.macro MSR_VBAR_EL1_X0
#if defined(KERNEL_INTEGRITY_KTRR)
@@ -163,25 +136,6 @@
msr VBAR_EL1, x0
#endif
-#if __APRR_SUPPORTED__
- MOV64 x0, APRR_EL1_DEFAULT
-#if XNU_MONITOR
- adrp x4, EXT(pmap_ppl_locked_down)@page
- ldrb w5, [x4, #EXT(pmap_ppl_locked_down)@pageoff]
- cmp w5, #0
- b.ne 1f
-
- // If the PPL is not locked down, we start in PPL mode.
- MOV64 x0, APRR_EL1_PPL
-1:
-#endif /* XNU_MONITOR */
-
- MSR_APRR_EL1_X0
-
- // Load up the default APRR_EL0 value.
- MOV64 x0, APRR_EL0_DEFAULT
- MSR_APRR_EL0_X0
-#endif /* __APRR_SUPPORTED__ */
#if defined(KERNEL_INTEGRITY_KTRR)
/*
@@ -607,29 +561,6 @@
add x0, x0, EXT(LowExceptionVectorBase)@pageoff
MSR_VBAR_EL1_X0
-#if __APRR_SUPPORTED__
- // Save the LR
- mov x1, lr
-
-#if XNU_MONITOR
- // If the PPL is supported, we start out in PPL mode.
- MOV64 x0, APRR_EL1_PPL
-#else
- // Otherwise, we start out in default mode.
- MOV64 x0, APRR_EL1_DEFAULT
-#endif
-
- // Set the APRR state for EL1.
- MSR_APRR_EL1_X0
-
- // Set the APRR state for EL0.
- MOV64 x0, APRR_EL0_DEFAULT
- MSR_APRR_EL0_X0
-
-
- // Restore the LR.
- mov lr, x1
-#endif /* __APRR_SUPPORTED__ */
// Get the kernel memory parameters from the boot args
ldr x22, [x20, BA_VIRT_BASE] // Get the kernel virt base
@@ -904,79 +835,13 @@
1:
#ifdef HAS_APPLE_PAC
-#ifdef __APSTS_SUPPORTED__
- mrs x0, ARM64_REG_APSTS_EL1
- and x1, x0, #(APSTS_EL1_MKEYVld)
- cbz x1, 1b // Poll APSTS_EL1.MKEYVld
- mrs x0, ARM64_REG_APCTL_EL1
- orr x0, x0, #(APCTL_EL1_AppleMode)
-#ifdef HAS_APCTL_EL1_USERKEYEN
- orr x0, x0, #(APCTL_EL1_UserKeyEn)
- and x0, x0, #~(APCTL_EL1_KernKeyEn)
-#else /* !HAS_APCTL_EL1_USERKEYEN */
- orr x0, x0, #(APCTL_EL1_KernKeyEn)
-#endif /* HAS_APCTL_EL1_USERKEYEN */
- and x0, x0, #~(APCTL_EL1_EnAPKey0)
- msr ARM64_REG_APCTL_EL1, x0
-
-#if defined(APPLEFIRESTORM)
- IF_PAC_FAST_A_KEY_SWITCHING 1f, x0
- orr x0, x0, #(APCTL_EL1_KernKeyEn)
- msr ARM64_REG_APCTL_EL1, x0
-1:
-#endif /* APPLEFIRESTORM */
-
-#else
- mrs x0, ARM64_REG_APCTL_EL1
- and x1, x0, #(APCTL_EL1_MKEYVld)
- cbz x1, 1b // Poll APCTL_EL1.MKEYVld
- orr x0, x0, #(APCTL_EL1_AppleMode)
- orr x0, x0, #(APCTL_EL1_KernKeyEn)
- msr ARM64_REG_APCTL_EL1, x0
-#endif /* APSTS_SUPPORTED */
-
- /* ISB necessary to ensure APCTL_EL1_AppleMode logic enabled before proceeding */
- isb sy
- /* Load static kernel key diversification values */
- ldr x0, =KERNEL_ROP_ID
- /* set ROP key. must write at least once to pickup mkey per boot diversification */
- msr APIBKeyLo_EL1, x0
- add x0, x0, #1
- msr APIBKeyHi_EL1, x0
- add x0, x0, #1
- msr APDBKeyLo_EL1, x0
- add x0, x0, #1
- msr APDBKeyHi_EL1, x0
- add x0, x0, #1
- msr ARM64_REG_KERNELKEYLO_EL1, x0
- add x0, x0, #1
- msr ARM64_REG_KERNELKEYHI_EL1, x0
- /* set JOP key. must write at least once to pickup mkey per boot diversification */
- add x0, x0, #1
- msr APIAKeyLo_EL1, x0
- add x0, x0, #1
- msr APIAKeyHi_EL1, x0
- add x0, x0, #1
- msr APDAKeyLo_EL1, x0
- add x0, x0, #1
- msr APDAKeyHi_EL1, x0
- /* set G key */
- add x0, x0, #1
- msr APGAKeyLo_EL1, x0
- add x0, x0, #1
- msr APGAKeyHi_EL1, x0
// Enable caches, MMU, ROP and JOP
MOV64 x0, SCTLR_EL1_DEFAULT
orr x0, x0, #(SCTLR_PACIB_ENABLED) /* IB is ROP */
-#if __APCFG_SUPPORTED__
- // for APCFG systems, JOP keys are always on for EL1.
- // JOP keys for EL0 will be toggled on the first time we pmap_switch to a pmap that has JOP enabled
-#else /* __APCFG_SUPPORTED__ */
MOV64 x1, SCTLR_JOP_KEYS_ENABLED
orr x0, x0, x1
-#endif /* !__APCFG_SUPPORTED__ */
#else /* HAS_APPLE_PAC */
// Enable caches and MMU
@@ -988,10 +853,8 @@
MOV64 x1, SCTLR_EL1_DEFAULT
#if HAS_APPLE_PAC
orr x1, x1, #(SCTLR_PACIB_ENABLED)
-#if !__APCFG_SUPPORTED__
MOV64 x2, SCTLR_JOP_KEYS_ENABLED
orr x1, x1, x2
-#endif /* !__APCFG_SUPPORTED__ */
#endif /* HAS_APPLE_PAC */
cmp x0, x1
bne .
diff -ru ../xnu-7195.60.75/osfmk/conf/Makefile.template ../xnu-7195.81.3/osfmk/conf/Makefile.template
--- ../xnu-7195.60.75/osfmk/conf/Makefile.template 2020-12-18 10:21:27.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/conf/Makefile.template 2021-01-26 21:33:40.000000000 +0100
@@ -61,6 +61,8 @@
%OBJS
+%LIBOBJS
+
%CFILES
%CXXFILES
@@ -394,7 +396,17 @@
$(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
done > $(COMPONENT).filelist
+$(COMPONENT).libfilelist: $(LIBOBJS)
+ @$(LOG_LDFILELIST) "lib$(COMPONENT)"
+ $(_v)for obj in ${LIBOBJS}; do \
+ $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
+ done > $(COMPONENT).libfilelist
+
+ifeq ($(RC_ProjectName),xnu_libraries)
+do_all: $(COMPONENT).libfilelist
+else
do_all: $(COMPONENT).filelist
+endif
do_build_all:: do_all
diff -ru ../xnu-7195.60.75/osfmk/conf/files.arm64 ../xnu-7195.81.3/osfmk/conf/files.arm64
--- ../xnu-7195.60.75/osfmk/conf/files.arm64 2020-12-18 10:21:27.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/conf/files.arm64 2021-01-26 21:33:40.000000000 +0100
@@ -42,7 +42,7 @@
osfmk/arm64/sleh.c standard
osfmk/arm64/start.s optional nos_arm_asm
osfmk/arm64/pinst.s optional nos_arm_asm
-osfmk/arm64/cswitch.s standard
+osfmk/arm64/cswitch.s optional nos_arm_asm
osfmk/arm/machine_cpuid.c standard
osfmk/arm/machine_routines_common.c standard
osfmk/arm64/machine_routines.c standard
diff -ru ../xnu-7195.60.75/osfmk/ipc/ipc_port.c ../xnu-7195.81.3/osfmk/ipc/ipc_port.c
--- ../xnu-7195.60.75/osfmk/ipc/ipc_port.c 2020-12-18 10:21:00.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/ipc/ipc_port.c 2021-01-26 21:33:13.000000000 +0100
@@ -967,6 +967,15 @@
/* check for a backup port */
pdrequest = port->ip_pdrequest;
+ /*
+ * Panic if a special reply has ip_pdrequest or ip_tempowner
+ * set, as this causes a type confusion while accessing the
+ * kdata union.
+ */
+ if (special_reply && (pdrequest || port->ip_tempowner)) {
+ panic("ipc_port_destroy: invalid state");
+ }
+
#if IMPORTANCE_INHERITANCE
/* determine how many assertions to drop and from whom */
if (port->ip_tempowner != 0) {
diff -ru ../xnu-7195.60.75/osfmk/ipc/ipc_right.c ../xnu-7195.81.3/osfmk/ipc/ipc_right.c
--- ../xnu-7195.60.75/osfmk/ipc/ipc_right.c 2020-12-18 10:21:01.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/ipc/ipc_right.c 2021-01-26 21:33:14.000000000 +0100
@@ -2024,13 +2024,14 @@
}
/*
- * Disallow moving receive-right kobjects, e.g. mk_timer ports
+ * Disallow moving receive-right kobjects/kolabel, e.g. mk_timer ports
* The ipc_port structure uses the kdata union of kobject and
* imp_task exclusively. Thus, general use of a kobject port as
* a receive right can cause type confusion in the importance
* code.
*/
- if (io_kotype(entry->ie_object) != IKOT_NONE) {
+ if (io_is_kobject(entry->ie_object) ||
+ io_is_kolabeled(entry->ie_object)) {
/*
* Distinguish an invalid right, e.g., trying to move
* a send right as a receive right, from this
@@ -2049,7 +2050,7 @@
assert(port->ip_receiver_name == name);
assert(port->ip_receiver == space);
- if (port->ip_immovable_receive) {
+ if (port->ip_immovable_receive || port->ip_specialreply) {
assert(port->ip_receiver != ipc_space_kernel);
ip_unlock(port);
assert(current_task() != kernel_task);
@@ -2718,6 +2719,14 @@
assert(port->ip_mscount == 0);
assert(port->ip_receiver_name == MACH_PORT_NULL);
+ /*
+ * Don't copyout kobjects or kolabels as receive right
+ */
+ if (io_is_kobject(entry->ie_object) ||
+ io_is_kolabeled(entry->ie_object)) {
+ panic("ipc_right_copyout: Copyout kobject/kolabel as receive right");
+ }
+
imq_lock(&port->ip_messages);
dest = port->ip_destination;
diff -ru ../xnu-7195.60.75/osfmk/ipc/ipc_voucher.c ../xnu-7195.81.3/osfmk/ipc/ipc_voucher.c
--- ../xnu-7195.60.75/osfmk/ipc/ipc_voucher.c 2020-12-18 10:21:00.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/ipc/ipc_voucher.c 2021-01-26 21:33:13.000000000 +0100
@@ -3129,9 +3129,13 @@
/* redeem of previous values is the value */
if (0 < prev_value_count) {
elem = (user_data_element_t)prev_values[0];
+
+ user_data_lock();
assert(0 < elem->e_made);
elem->e_made++;
- *out_value = prev_values[0];
+ user_data_unlock();
+
+ *out_value = (mach_voucher_attr_value_handle_t)elem;
return KERN_SUCCESS;
}
diff -ru ../xnu-7195.60.75/osfmk/ipc/mach_port.c ../xnu-7195.81.3/osfmk/ipc/mach_port.c
--- ../xnu-7195.60.75/osfmk/ipc/mach_port.c 2020-12-18 10:21:01.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/ipc/mach_port.c 2021-01-26 21:33:14.000000000 +0100
@@ -1676,8 +1676,12 @@
}
/* port is locked and active */
- /* you cannot register for port death notifications on a kobject */
- if (ip_kotype(port) != IKOT_NONE) {
+ /*
+ * you cannot register for port death notifications on a kobject,
+ * kolabel or special reply port
+ */
+ if (ip_is_kobject(port) || ip_is_kolabeled(port) ||
+ port->ip_specialreply) {
ip_unlock(port);
return KERN_INVALID_RIGHT;
}
diff -ru ../xnu-7195.60.75/osfmk/kern/startup.c ../xnu-7195.81.3/osfmk/kern/startup.c
--- ../xnu-7195.60.75/osfmk/kern/startup.c 2020-12-18 10:20:49.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/kern/startup.c 2021-01-26 21:33:02.000000000 +0100
@@ -933,6 +933,11 @@
timer_start(&processor->system_state, processor->last_dispatch);
processor->current_state = &processor->system_state;
+#if __AMP__
+ if (processor->processor_set->pset_cluster_type == PSET_AMP_P) {
+ timer_start(&thread->ptime, processor->last_dispatch);
+ }
+#endif
cpu_quiescent_counter_join(processor->last_dispatch);
diff -ru ../xnu-7195.60.75/osfmk/kern/task.c ../xnu-7195.81.3/osfmk/kern/task.c
--- ../xnu-7195.60.75/osfmk/kern/task.c 2020-12-18 10:20:46.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/kern/task.c 2021-01-26 21:33:00.000000000 +0100
@@ -904,7 +904,7 @@
{ panic("task_init\n");}
#if defined(HAS_APPLE_PAC)
- kernel_task->rop_pid = KERNEL_ROP_ID;
+ kernel_task->rop_pid = ml_default_rop_pid();
kernel_task->jop_pid = ml_default_jop_pid();
// kernel_task never runs at EL0, but machine_thread_state_convert_from/to_user() relies on
// disable_user_jop to be false for kernel threads (e.g. in exception delivery on thread_exception_daemon)
diff -ru ../xnu-7195.60.75/osfmk/kern/thread.c ../xnu-7195.81.3/osfmk/kern/thread.c
--- ../xnu-7195.60.75/osfmk/kern/thread.c 2020-12-18 10:20:43.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/kern/thread.c 2021-01-26 21:32:57.000000000 +0100
@@ -2916,14 +2916,23 @@
return KERN_INVALID_ARGUMENT;
}
+ bank_get_bank_ledger_thread_group_and_persona(voucher, &bankledger, &banktg, &persona_id);
+
+ thread_mtx_lock(thread);
+ /*
+ * Once the thread is started, we will look at `ith_voucher` without
+ * holding any lock.
+ *
+ * Setting the voucher hence can only be done by current_thread() or
+ * before it started. "started" flips under the thread mutex and must be
+ * tested under it too.
+ */
if (thread != current_thread() && thread->started) {
+ thread_mtx_unlock(thread);
return KERN_INVALID_ARGUMENT;
}
ipc_voucher_reference(voucher);
- bank_get_bank_ledger_thread_group_and_persona(voucher, &bankledger, &banktg, &persona_id);
-
- thread_mtx_lock(thread);
old_voucher = thread->ith_voucher;
thread->ith_voucher = voucher;
thread->ith_voucher_name = MACH_PORT_NULL;
diff -ru ../xnu-7195.60.75/osfmk/libsa/string.h ../xnu-7195.81.3/osfmk/libsa/string.h
--- ../xnu-7195.60.75/osfmk/libsa/string.h 2020-12-18 10:20:50.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/libsa/string.h 2021-01-26 21:33:03.000000000 +0100
@@ -95,13 +95,11 @@
__kpi_deprecated_arm64_macos_unavailable
extern char *strncat(char *, const char *, size_t);
-/* strcmp() is deprecated. Please use strncmp() instead. */
-__kpi_deprecated_arm64_macos_unavailable
extern int strcmp(const char *, const char *);
+extern int strncmp(const char *, const char *, size_t);
extern size_t strlcpy(char *, const char *, size_t);
extern size_t strlcat(char *, const char *, size_t);
-extern int strncmp(const char *, const char *, size_t);
extern int strcasecmp(const char *s1, const char *s2);
extern int strncasecmp(const char *s1, const char *s2, size_t n);
diff -ru ../xnu-7195.60.75/osfmk/mach/memory_object_types.h ../xnu-7195.81.3/osfmk/mach/memory_object_types.h
--- ../xnu-7195.60.75/osfmk/mach/memory_object_types.h 2020-12-18 10:21:02.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/mach/memory_object_types.h 2021-01-26 21:33:15.000000000 +0100
@@ -166,6 +166,11 @@
kern_return_t (*memory_object_data_reclaim)(
memory_object_t mem_obj,
boolean_t reclaim_backing_store);
+ boolean_t (*memory_object_backing_object)(
+ memory_object_t mem_obj,
+ memory_object_offset_t mem_obj_offset,
+ vm_object_t *backing_object,
+ vm_object_offset_t *backing_offset);
const char *memory_object_pager_name;
} * memory_object_pager_ops_t;
@@ -301,6 +306,11 @@
__BEGIN_DECLS
extern void memory_object_reference(memory_object_t object);
extern void memory_object_deallocate(memory_object_t object);
+extern boolean_t memory_object_backing_object(
+ memory_object_t mem_obj,
+ memory_object_offset_t offset,
+ vm_object_t *backing_object,
+ vm_object_offset_t *backing_offset);
extern void memory_object_default_reference(memory_object_default_t);
extern void memory_object_default_deallocate(memory_object_default_t);
diff -ru ../xnu-7195.60.75/osfmk/vm/bsd_vm.c ../xnu-7195.81.3/osfmk/vm/bsd_vm.c
--- ../xnu-7195.60.75/osfmk/vm/bsd_vm.c 2020-12-18 10:20:56.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/vm/bsd_vm.c 2021-01-26 21:33:09.000000000 +0100
@@ -109,6 +109,7 @@
.memory_object_map = vnode_pager_map,
.memory_object_last_unmap = vnode_pager_last_unmap,
.memory_object_data_reclaim = NULL,
+ .memory_object_backing_object = NULL,
.memory_object_pager_name = "vnode pager"
};
diff -ru ../xnu-7195.60.75/osfmk/vm/device_vm.c ../xnu-7195.81.3/osfmk/vm/device_vm.c
--- ../xnu-7195.60.75/osfmk/vm/device_vm.c 2020-12-18 10:20:59.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/vm/device_vm.c 2021-01-26 21:33:12.000000000 +0100
@@ -78,6 +78,7 @@
.memory_object_map = device_pager_map,
.memory_object_last_unmap = device_pager_last_unmap,
.memory_object_data_reclaim = NULL,
+ .memory_object_backing_object = NULL,
.memory_object_pager_name = "device pager"
};
diff -ru ../xnu-7195.60.75/osfmk/vm/memory_object.c ../xnu-7195.81.3/osfmk/vm/memory_object.c
--- ../xnu-7195.60.75/osfmk/vm/memory_object.c 2020-12-18 10:20:56.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/vm/memory_object.c 2021-01-26 21:33:09.000000000 +0100
@@ -2332,6 +2332,24 @@
reclaim_backing_store);
}
+boolean_t
+memory_object_backing_object
+(
+ memory_object_t memory_object,
+ memory_object_offset_t offset,
+ vm_object_t *backing_object,
+ vm_object_offset_t *backing_offset)
+{
+ if (memory_object->mo_pager_ops->memory_object_backing_object == NULL) {
+ return FALSE;
+ }
+ return (memory_object->mo_pager_ops->memory_object_backing_object)(
+ memory_object,
+ offset,
+ backing_object,
+ backing_offset);
+}
+
upl_t
convert_port_to_upl(
ipc_port_t port)
diff -ru ../xnu-7195.60.75/osfmk/vm/pmap.h ../xnu-7195.81.3/osfmk/vm/pmap.h
--- ../xnu-7195.60.75/osfmk/vm/pmap.h 2020-12-18 10:20:56.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/vm/pmap.h 2021-01-26 21:33:09.000000000 +0100
@@ -911,6 +911,8 @@
extern ledger_t pmap_ledger_alloc(void);
extern void pmap_ledger_free(ledger_t);
+extern kern_return_t pmap_cs_allow_invalid(pmap_t pmap);
+
#if __arm64__
extern bool pmap_is_exotic(pmap_t pmap);
#else /* __arm64__ */
diff -ru ../xnu-7195.60.75/osfmk/vm/vm_apple_protect.c ../xnu-7195.81.3/osfmk/vm/vm_apple_protect.c
--- ../xnu-7195.60.75/osfmk/vm/vm_apple_protect.c 2020-12-18 10:20:56.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/vm/vm_apple_protect.c 2021-01-26 21:33:09.000000000 +0100
@@ -113,6 +113,11 @@
kern_return_t apple_protect_pager_map(memory_object_t mem_obj,
vm_prot_t prot);
kern_return_t apple_protect_pager_last_unmap(memory_object_t mem_obj);
+boolean_t apple_protect_pager_backing_object(
+ memory_object_t mem_obj,
+ memory_object_offset_t mem_obj_offset,
+ vm_object_t *backing_object,
+ vm_object_offset_t *backing_offset);
#define CRYPT_INFO_DEBUG 0
void crypt_info_reference(struct pager_crypt_info *crypt_info);
@@ -135,6 +140,7 @@
.memory_object_map = apple_protect_pager_map,
.memory_object_last_unmap = apple_protect_pager_last_unmap,
.memory_object_data_reclaim = NULL,
+ .memory_object_backing_object = apple_protect_pager_backing_object,
.memory_object_pager_name = "apple_protect"
};
@@ -992,6 +998,25 @@
return KERN_SUCCESS;
}
+boolean_t
+apple_protect_pager_backing_object(
+ memory_object_t mem_obj,
+ memory_object_offset_t offset,
+ vm_object_t *backing_object,
+ vm_object_offset_t *backing_offset)
+{
+ apple_protect_pager_t pager;
+
+ PAGER_DEBUG(PAGER_ALL,
+ ("apple_protect_pager_backing_object: %p\n", mem_obj));
+
+ pager = apple_protect_pager_lookup(mem_obj);
+
+ *backing_object = pager->backing_object;
+ *backing_offset = pager->backing_offset + offset;
+
+ return TRUE;
+}
/*
*
diff -ru ../xnu-7195.60.75/osfmk/vm/vm_compressor_pager.c ../xnu-7195.81.3/osfmk/vm/vm_compressor_pager.c
--- ../xnu-7195.60.75/osfmk/vm/vm_compressor_pager.c 2020-12-18 10:20:58.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/vm/vm_compressor_pager.c 2021-01-26 21:33:12.000000000 +0100
@@ -133,6 +133,7 @@
.memory_object_map = compressor_memory_object_map,
.memory_object_last_unmap = compressor_memory_object_last_unmap,
.memory_object_data_reclaim = compressor_memory_object_data_reclaim,
+ .memory_object_backing_object = NULL,
.memory_object_pager_name = "compressor pager"
};
diff -ru ../xnu-7195.60.75/osfmk/vm/vm_fourk_pager.c ../xnu-7195.81.3/osfmk/vm/vm_fourk_pager.c
--- ../xnu-7195.60.75/osfmk/vm/vm_fourk_pager.c 2020-12-18 10:20:56.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/vm/vm_fourk_pager.c 2021-01-26 21:33:09.000000000 +0100
@@ -130,6 +130,7 @@
.memory_object_map = fourk_pager_map,
.memory_object_last_unmap = fourk_pager_last_unmap,
.memory_object_data_reclaim = NULL,
+ .memory_object_backing_object = NULL,
.memory_object_pager_name = "fourk_pager"
};
diff -ru ../xnu-7195.60.75/osfmk/vm/vm_map.c ../xnu-7195.81.3/osfmk/vm/vm_map.c
--- ../xnu-7195.60.75/osfmk/vm/vm_map.c 2020-12-18 10:20:58.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/vm/vm_map.c 2021-01-26 21:33:12.000000000 +0100
@@ -3203,8 +3203,8 @@
do {
new_entry = vm_map_entry_insert(map,
entry, tmp_start, tmp_end,
- object, offset, needs_copy,
- FALSE, FALSE,
+ object, offset, vmk_flags,
+ needs_copy, FALSE, FALSE,
cur_protection, max_protection,
VM_BEHAVIOR_DEFAULT,
(entry_for_jit && !VM_MAP_POLICY_ALLOW_JIT_INHERIT(map) ?
@@ -3868,6 +3868,7 @@
VM_MAP_PAGE_MASK(map)),
copy_object,
0, /* offset */
+ vmk_flags,
FALSE, /* needs_copy */
FALSE,
FALSE,
@@ -6158,6 +6159,12 @@
return KERN_PROTECTION_FAILURE;
}
+ if (current->used_for_jit &&
+ pmap_has_prot_policy(map->pmap, current->translated_allow_execute, current->protection)) {
+ vm_map_unlock(map);
+ return KERN_PROTECTION_FAILURE;
+ }
+
if ((new_prot & VM_PROT_WRITE) &&
(new_prot & VM_PROT_EXECUTE) &&
#if XNU_TARGET_OS_OSX
@@ -16199,6 +16206,7 @@
vm_map_offset_t end,
vm_object_t object,
vm_object_offset_t offset,
+ vm_map_kernel_flags_t vmk_flags,
boolean_t needs_copy,
boolean_t is_shared,
boolean_t in_transition,
@@ -16313,8 +16321,7 @@
* Insert the new entry into the list.
*/
- vm_map_store_entry_link(map, insp_entry, new_entry,
- VM_MAP_KERNEL_FLAGS_NONE);
+ vm_map_store_entry_link(map, insp_entry, new_entry, vmk_flags);
map->size += end - start;
/*
@@ -16804,13 +16811,6 @@
if (!copy) {
if (src_entry->used_for_jit == TRUE) {
if (same_map) {
-#if __APRR_SUPPORTED__
- /*
- * Disallow re-mapping of any JIT regions on APRR devices.
- */
- result = KERN_PROTECTION_FAILURE;
- break;
-#endif /* __APRR_SUPPORTED__*/
} else if (!VM_MAP_POLICY_ALLOW_JIT_SHARING(map)) {
/*
* Cannot allow an entry describing a JIT
@@ -20123,6 +20123,13 @@
return map->cs_enforcement;
}
+kern_return_t
+vm_map_cs_wx_enable(
+ vm_map_t map)
+{
+ return pmap_cs_allow_invalid(vm_map_pmap(map));
+}
+
void
vm_map_cs_enforcement_set(
vm_map_t map,
diff -ru ../xnu-7195.60.75/osfmk/vm/vm_map.h ../xnu-7195.81.3/osfmk/vm/vm_map.h
--- ../xnu-7195.60.75/osfmk/vm/vm_map.h 2020-12-18 10:20:57.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/vm/vm_map.h 2021-01-26 21:33:10.000000000 +0100
@@ -794,6 +794,7 @@
vm_map_offset_t end,
vm_object_t object,
vm_object_offset_t offset,
+ vm_map_kernel_flags_t vmk_flags,
boolean_t needs_copy,
boolean_t is_shared,
boolean_t in_transition,
@@ -1267,6 +1268,8 @@
vm_map_t map,
boolean_t val);
+extern kern_return_t vm_map_cs_wx_enable(vm_map_t map);
+
/* wire down a region */
#ifdef XNU_KERNEL_PRIVATE
diff -ru ../xnu-7195.60.75/osfmk/vm/vm_pageout.c ../xnu-7195.81.3/osfmk/vm/vm_pageout.c
--- ../xnu-7195.60.75/osfmk/vm/vm_pageout.c 2020-12-18 10:20:57.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/vm/vm_pageout.c 2021-01-26 21:33:10.000000000 +0100
@@ -7055,7 +7055,7 @@
if (upl->flags & UPL_SHADOWED) {
offset = 0;
} else {
- offset = upl_adjusted_offset(upl, VM_MAP_PAGE_MASK(map)) + upl->map_object->paging_offset;
+ offset = upl_adjusted_offset(upl, VM_MAP_PAGE_MASK(map)) - upl->map_object->paging_offset;
}
size = upl_adjusted_size(upl, VM_MAP_PAGE_MASK(map));
diff -ru ../xnu-7195.60.75/osfmk/vm/vm_shared_region_pager.c ../xnu-7195.81.3/osfmk/vm/vm_shared_region_pager.c
--- ../xnu-7195.60.75/osfmk/vm/vm_shared_region_pager.c 2020-12-18 10:20:56.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/vm/vm_shared_region_pager.c 2021-01-26 21:33:09.000000000 +0100
@@ -115,6 +115,11 @@
kern_return_t shared_region_pager_map(memory_object_t mem_obj,
vm_prot_t prot);
kern_return_t shared_region_pager_last_unmap(memory_object_t mem_obj);
+boolean_t shared_region_pager_backing_object(
+ memory_object_t mem_obj,
+ memory_object_offset_t mem_obj_offset,
+ vm_object_t *backing_object,
+ vm_object_offset_t *backing_offset);
/*
* Vector of VM operations for this EMM.
@@ -133,6 +138,7 @@
.memory_object_map = shared_region_pager_map,
.memory_object_last_unmap = shared_region_pager_last_unmap,
.memory_object_data_reclaim = NULL,
+ .memory_object_backing_object = shared_region_pager_backing_object,
.memory_object_pager_name = "shared_region"
};
@@ -1095,6 +1101,26 @@
return KERN_SUCCESS;
}
+boolean_t
+shared_region_pager_backing_object(
+ memory_object_t mem_obj,
+ memory_object_offset_t offset,
+ vm_object_t *backing_object,
+ vm_object_offset_t *backing_offset)
+{
+ shared_region_pager_t pager;
+
+ PAGER_DEBUG(PAGER_ALL,
+ ("shared_region_pager_backing_object: %p\n", mem_obj));
+
+ pager = shared_region_pager_lookup(mem_obj);
+
+ *backing_object = pager->srp_backing_object;
+ *backing_offset = pager->srp_backing_offset + offset;
+
+ return TRUE;
+}
+
/*
*
diff -ru ../xnu-7195.60.75/osfmk/vm/vm_swapfile_pager.c ../xnu-7195.81.3/osfmk/vm/vm_swapfile_pager.c
--- ../xnu-7195.60.75/osfmk/vm/vm_swapfile_pager.c 2020-12-18 10:20:57.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/vm/vm_swapfile_pager.c 2021-01-26 21:33:10.000000000 +0100
@@ -127,6 +127,7 @@
.memory_object_map = swapfile_pager_map,
.memory_object_last_unmap = swapfile_pager_last_unmap,
.memory_object_data_reclaim = NULL,
+ .memory_object_backing_object = NULL,
.memory_object_pager_name = "swapfile pager"
};
diff -ru ../xnu-7195.60.75/osfmk/vm/vm_user.c ../xnu-7195.81.3/osfmk/vm/vm_user.c
--- ../xnu-7195.60.75/osfmk/vm/vm_user.c 2020-12-18 10:20:58.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/vm/vm_user.c 2021-01-26 21:33:11.000000000 +0100
@@ -2727,7 +2727,17 @@
required_protection = protections;
}
cur_prot = VM_PROT_ALL;
- vmk_flags.vmkf_copy_pageable = TRUE;
+ if (target_map->pmap == kernel_pmap) {
+ /*
+ * Get "reserved" map entries to avoid deadlocking
+ * on the kernel map or a kernel submap if we
+ * run out of VM map entries and need to refill that
+ * zone.
+ */
+ vmk_flags.vmkf_copy_pageable = FALSE;
+ } else {
+ vmk_flags.vmkf_copy_pageable = TRUE;
+ }
vmk_flags.vmkf_copy_same_map = FALSE;
assert(map_size != 0);
kr = vm_map_copy_extract(target_map,
diff -ru ../xnu-7195.60.75/osfmk/x86_64/pmap.c ../xnu-7195.81.3/osfmk/x86_64/pmap.c
--- ../xnu-7195.60.75/osfmk/x86_64/pmap.c 2020-12-18 10:21:09.000000000 +0100
+++ ../xnu-7195.81.3/osfmk/x86_64/pmap.c 2021-01-26 21:33:22.000000000 +0100
@@ -3288,6 +3288,13 @@
// Unsupported on this architecture.
}
+kern_return_t
+pmap_cs_allow_invalid(__unused pmap_t pmap)
+{
+ // Unsupported on this architecture.
+ return KERN_SUCCESS;
+}
+
void *
pmap_claim_reserved_ppl_page(void)
{
diff -ru ../xnu-7195.60.75/pexpert/conf/Makefile.template ../xnu-7195.81.3/pexpert/conf/Makefile.template
--- ../xnu-7195.60.75/pexpert/conf/Makefile.template 2020-12-18 10:21:24.000000000 +0100
+++ ../xnu-7195.81.3/pexpert/conf/Makefile.template 2021-01-26 21:33:37.000000000 +0100
@@ -40,6 +40,8 @@
%OBJS
+%LIBOBJS
+
%CFILES
%CXXFILES
@@ -87,7 +89,17 @@
$(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
done > $(COMPONENT).filelist
+$(COMPONENT).libfilelist: $(LIBOBJS)
+ @$(LOG_LDFILELIST) "lib$(COMPONENT)"
+ $(_v)for obj in ${LIBOBJS}; do \
+ $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
+ done > $(COMPONENT).libfilelist
+
+ifeq ($(RC_ProjectName),xnu_libraries)
+do_all: $(COMPONENT).libfilelist
+else
do_all: $(COMPONENT).filelist
+endif
do_build_all:: do_all
diff -ru ../xnu-7195.60.75/pexpert/pexpert/arm64/H13.h ../xnu-7195.81.3/pexpert/pexpert/arm64/H13.h
--- ../xnu-7195.60.75/pexpert/pexpert/arm64/H13.h 2020-12-18 10:20:42.000000000 +0100
+++ ../xnu-7195.81.3/pexpert/pexpert/arm64/H13.h 2021-01-26 21:32:55.000000000 +0100
@@ -68,12 +68,6 @@
/* Optional CPU features -- an SoC may #undef these */
#define ARM_PARAMETERIZED_PMAP 1
#define __ARM_MIXED_PAGE_SIZE__ 1
-#define HAS_APCTL_EL1_USERKEYEN 1 /* Supports use of KernKey in EL0 */
-
-/*
- * APSTS_SUPPORTED: Pointer authentication status registers, MKEYVld flag moved here from APCTL on APPLELIGHTNING (H12)
- */
-#define __APSTS_SUPPORTED__ 1
#define __ARM_RANGE_TLBI__ 1
#define __ARM_E2H__ 1
diff -ru ../xnu-7195.60.75/pexpert/pexpert/arm64/apple_arm64_common.h ../xnu-7195.81.3/pexpert/pexpert/arm64/apple_arm64_common.h
--- ../xnu-7195.60.75/pexpert/pexpert/arm64/apple_arm64_common.h 2020-12-18 10:20:42.000000000 +0100
+++ ../xnu-7195.81.3/pexpert/pexpert/arm64/apple_arm64_common.h 2021-01-26 21:32:55.000000000 +0100
@@ -57,9 +57,6 @@
#if defined(CPU_HAS_APPLE_PAC) && defined(__arm64e__)
#define HAS_APPLE_PAC 1 /* Has Apple ARMv8.3a pointer authentication */
-#define KERNEL_ROP_ID 0xfeedfacefeedfacf /* placeholder static kernel ROP diversifier */
-#define KERNEL_KERNKEY_ID (KERNEL_ROP_ID + 4)
-#define KERNEL_JOP_ID (KERNEL_KERNKEY_ID + 2)
#endif
#include <pexpert/arm64/apple_arm64_regs.h>
diff -ru ../xnu-7195.60.75/pexpert/pexpert/arm64/apple_arm64_regs.h ../xnu-7195.81.3/pexpert/pexpert/arm64/apple_arm64_regs.h
--- ../xnu-7195.60.75/pexpert/pexpert/arm64/apple_arm64_regs.h 2020-12-18 10:20:42.000000000 +0100
+++ ../xnu-7195.81.3/pexpert/pexpert/arm64/apple_arm64_regs.h 2021-01-26 21:32:55.000000000 +0100
@@ -350,18 +350,8 @@
#if defined(HAS_APPLE_PAC)
-#ifdef ASSEMBLER
-#define ARM64_REG_APCTL_EL1 S3_4_c15_c0_4
-#define ARM64_REG_APSTS_EL1 S3_6_c15_c12_4
-#else /* ASSEMBLER */
-#define ARM64_REG_APCTL_EL1 "S3_4_c15_c0_4"
-#define ARM64_REG_APSTS_EL1 "S3_6_c15_c12_4"
-#endif /* ASSEMBLER */
#if ASSEMBLER
-#define ARM64_REG_KERNELKEYLO_EL1 S3_4_c15_c1_0
-#define ARM64_REG_KERNELKEYHI_EL1 S3_4_c15_c1_1
-
#define ARM64_REG_APIAKEYLO_EL1 S3_0_c2_c1_0
#define ARM64_REG_APIAKEYHI_EL1 S3_0_c2_c1_1
#define ARM64_REG_APIBKEYLO_EL1 S3_0_c2_c1_2
@@ -375,11 +365,6 @@
#define ARM64_REG_APGAKEYLO_EL1 S3_0_c2_c3_0
#define ARM64_REG_APGAKEYHI_EL1 S3_0_c2_c3_1
#else /* ASSEMBLER */
-#define ARM64_REG_APCTL_EL1 "S3_4_c15_c0_4"
-
-#define ARM64_REG_KERNELKEYLO_EL1 "S3_4_c15_c1_0"
-#define ARM64_REG_KERNELKEYHI_EL1 "S3_4_c15_c1_1"
-
#define ARM64_REG_APIAKEYLO_EL1 "S3_0_c2_c1_0"
#define ARM64_REG_APIAKEYHI_EL1 "S3_0_c2_c1_1"
#define ARM64_REG_APIBKEYLO_EL1 "S3_0_c2_c1_2"
diff -ru ../xnu-7195.60.75/san/Kasan_kasan.exports ../xnu-7195.81.3/san/Kasan_kasan.exports
--- ../xnu-7195.60.75/san/Kasan_kasan.exports 2020-12-18 10:21:24.000000000 +0100
+++ ../xnu-7195.81.3/san/Kasan_kasan.exports 2021-01-26 21:33:37.000000000 +0100
@@ -102,6 +102,7 @@
___asan_version_mismatch_check_apple_1000
___asan_version_mismatch_check_apple_1001
___asan_version_mismatch_check_apple_clang_1100
+___asan_version_mismatch_check_apple_clang_1200
___asan_init
___asan_memcpy
___asan_memmove
diff -ru ../xnu-7195.60.75/san/Makefile ../xnu-7195.81.3/san/Makefile
--- ../xnu-7195.60.75/san/Makefile 2020-12-18 10:21:24.000000000 +0100
+++ ../xnu-7195.81.3/san/Makefile 2021-01-26 21:33:37.000000000 +0100
@@ -58,6 +58,7 @@
SYMBOL_SET_BUILD += $(OBJPATH)/Kasan_kasan.symbolset
endif
+ifneq ($(RC_ProjectName),xnu_libraries)
# Our external dependency on allsymbols is fine because this runs in a later phase (config_install vs. config_all)
$(OBJPATH)/%.symbolset: $(SOURCE)/%.exports
@$(LOG_SYMBOLSET) "$*$(Color0) ($(ColorLF)$(CURRENT_ARCH_CONFIG_LC)$(Color0))"
@@ -85,7 +86,9 @@
exit $$cmdstatus
do_config_install:: $(SYMROOT_KEXT) $(DSTROOT_KEXT)
-
+else
+# We are building XNU as a static library - no need for the symbol kexts
+endif
# Install helper scripts
diff -ru ../xnu-7195.60.75/san/conf/Makefile.template ../xnu-7195.81.3/san/conf/Makefile.template
--- ../xnu-7195.60.75/san/conf/Makefile.template 2020-12-18 10:21:24.000000000 +0100
+++ ../xnu-7195.81.3/san/conf/Makefile.template 2021-01-26 21:33:37.000000000 +0100
@@ -36,6 +36,8 @@
%OBJS
+%LIBOBJS
+
%CFILES
%CXXFILES
@@ -68,13 +70,23 @@
$(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
done > $(COMPONENT).filelist
+$(COMPONENT).libfilelist: $(LIBOBJS)
+ @$(LOG_LDFILELIST) "lib$(COMPONENT)"
+ $(_v)for obj in ${LIBOBJS}; do \
+ $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
+ done > $(COMPONENT).libfilelist
+
$(TARGET)/$(CURRENT_KERNEL_CONFIG)/kasan_blacklist_dynamic.h: $(SRCROOT)/$(COMPONENT)/kasan-blacklist-dynamic
@$(LOG_GENERATE) "$(notdir $@)"
@$(SRCROOT)/$(COMPONENT)/tools/generate_dynamic_blacklist.py "$<" > "$@"
$(SRCROOT)/$(COMPONENT)/kasan_dynamic_blacklist.c: $(TARGET)/$(CURRENT_KERNEL_CONFIG)/kasan_blacklist_dynamic.h
+ifeq ($(RC_ProjectName),xnu_libraries)
+do_all: $(COMPONENT).libfilelist
+else
do_all: $(COMPONENT).filelist
+endif
do_build_all:: do_all
diff -ru ../xnu-7195.60.75/san/kasan.c ../xnu-7195.81.3/san/kasan.c
--- ../xnu-7195.60.75/san/kasan.c 2020-12-18 10:20:42.000000000 +0100
+++ ../xnu-7195.81.3/san/kasan.c 2021-01-26 21:32:56.000000000 +0100
@@ -1466,6 +1466,7 @@
UNUSED_ABI(__asan_version_mismatch_check_apple_1000, void);
UNUSED_ABI(__asan_version_mismatch_check_apple_1001, void);
UNUSED_ABI(__asan_version_mismatch_check_apple_clang_1100, void);
+UNUSED_ABI(__asan_version_mismatch_check_apple_clang_1200, void);
void OS_NORETURN UNSUPPORTED_API(__asan_init_v5, void);
void OS_NORETURN UNSUPPORTED_API(__asan_register_globals, uptr a, uptr b);
diff -ru ../xnu-7195.60.75/security/conf/Makefile.template ../xnu-7195.81.3/security/conf/Makefile.template
--- ../xnu-7195.60.75/security/conf/Makefile.template 2020-12-18 10:21:20.000000000 +0100
+++ ../xnu-7195.81.3/security/conf/Makefile.template 2021-01-26 21:33:33.000000000 +0100
@@ -45,6 +45,8 @@
%OBJS
+%LIBOBJS
+
%CFILES
%CXXFILES
@@ -90,7 +92,17 @@
$(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
done > $(COMPONENT).filelist
+$(COMPONENT).libfilelist: $(LIBOBJS)
+ @$(LOG_LDFILELIST) "lib$(COMPONENT)"
+ $(_v)for obj in ${LIBOBJS}; do \
+ $(ECHO) $(TARGET)/$(CURRENT_KERNEL_CONFIG)/$${obj}; \
+ done > $(COMPONENT).libfilelist
+
+ifeq ($(RC_ProjectName),xnu_libraries)
+do_all: $(COMPONENT).libfilelist
+else
do_all: $(COMPONENT).filelist
+endif
do_build_all:: do_all
diff -ru ../xnu-7195.60.75/tools/lldbmacros/Makefile ../xnu-7195.81.3/tools/lldbmacros/Makefile
--- ../xnu-7195.60.75/tools/lldbmacros/Makefile 2020-12-18 10:21:18.000000000 +0100
+++ ../xnu-7195.81.3/tools/lldbmacros/Makefile 2021-01-26 21:33:30.000000000 +0100
@@ -15,8 +15,10 @@
LLDBMACROS_DEST:=$(LLDBMACROS_BOOTSTRAP_DEST)/lldbmacros/
LLDBMACROS_USERDEBUG_FILES=
ifeq ($(BUILD_STATIC_LINK),1)
+ifneq ($(BUILD_XNU_LIBRARY),1)
KERNEL_STATIC_DSYM_LLDBMACROS := $(OBJPATH)/$(KERNEL_FILE_NAME).link/$(KERNEL_FILE_NAME).dSYM/$(DSYMLLDBMACROSDIR)/lldbmacros/
endif
+endif
LLDBMACROS_USERDEBUG_FILES:= \
usertaskdebugging/__init__.py \
@@ -93,19 +95,25 @@
$(eval $(call INSTALLPYTHON_RULE_template,$(LLDBMACROS_BOOTSTRAP_DEST)/$(KERNEL_LLDBBOOTSTRAP_NAME),$(LLDBMACROS_SOURCE)/core/xnu_lldb_init.py,kbpydir,$(DATA_UNIFDEF),$(LLDBMACROS_BOOTSTRAP_DEST)/))
ifeq ($(BUILD_STATIC_LINK),1)
+ifneq ($(BUILD_XNU_LIBRARY),1)
INSTALL_STATIC_DSYM_LLDBMACROS_PYTHON_FILES=$(addprefix $(KERNEL_STATIC_DSYM_LLDBMACROS), $(LLDBMACROS_PYTHON_FILES))
$(eval $(call INSTALLPYTHON_RULE_template,$(INSTALL_STATIC_DSYM_LLDBMACROS_PYTHON_FILES),$(LLDBMACROS_SOURCE)%,sdpydir,$(DATA_UNIFDEF),$(KERNEL_STATIC_DSYM_LLDBMACROS)))
$(eval $(call INSTALLPYTHON_RULE_template,$(KERNEL_STATIC_DSYM_LLDBMACROS)/../$(KERNEL_LLDBBOOTSTRAP_NAME),$(LLDBMACROS_SOURCE)/core/xnu_lldb_init.py,kbsdpydir,$(DATA_UNIFDEF),$(KERNEL_STATIC_DSYM_LLDBMACROS)/../))
endif
+endif
ifeq ($(BUILD_STATIC_LINK),1)
+ifneq ($(BUILD_XNU_LIBRARY),1)
STATIC_DSYM_LLDBMACROS_INSTALL_TARGETS := \
$(INSTALL_STATIC_DSYM_LLDBMACROS_PYTHON_FILES) \
$(KERNEL_STATIC_DSYM_LLDBMACROS)/../$(KERNEL_LLDBBOOTSTRAP_NAME)
endif
+endif
lldbmacros_install: $(INSTALL_LLDBMACROS_PYTHON_FILES) $(LLDBMACROS_BOOTSTRAP_DEST)/$(KERNEL_LLDBBOOTSTRAP_NAME) $(STATIC_DSYM_LLDBMACROS_INSTALL_TARGETS)
$(_v)$(MKDIR) $(LLDBMACROS_DEST)/builtinkexts
ifeq ($(BUILD_STATIC_LINK),1)
+ifneq ($(BUILD_XNU_LIBRARY),1)
$(_v)$(MKDIR) $(KERNEL_STATIC_DSYM_LLDBMACROS)/builtinkexts
endif
+endif
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment