Skip to content

Instantly share code, notes, and snippets.

@MasterDuke17
Last active January 21, 2021 14:56
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save MasterDuke17/fd2360d357bfc85e7dfba607aad7722f to your computer and use it in GitHub Desktop.
Save MasterDuke17/fd2360d357bfc85e7dfba607aad7722f to your computer and use it in GitHub Desktop.
diff --git src/6model/reprs/MVMSpeshCandidate.c src/6model/reprs/MVMSpeshCandidate.c
index 370571401..437f46bee 100644
--- src/6model/reprs/MVMSpeshCandidate.c
+++ src/6model/reprs/MVMSpeshCandidate.c
@@ -170,6 +170,8 @@ void MVM_spesh_candidate_add(MVMThreadContext *tc, MVMSpeshPlanned *p) {
MVMSpeshCandidate *candidate;
MVMSpeshCandidate **new_candidate_list;
MVMStaticFrameSpesh *spesh;
+ MVMSpeshCandidatesAndArgGuards *cands_and_arg_guards;
+ MVMSpeshCandidatesAndArgGuards *new_cands_and_arg_guards;
MVMuint64 start_time = 0, spesh_time = 0, jit_time = 0, end_time;
/* If we've reached our specialization limit, don't continue. */
@@ -209,8 +211,8 @@ void MVM_spesh_candidate_add(MVMThreadContext *tc, MVMSpeshPlanned *p) {
spesh_gc_point(tc);
/* Perform the optimization and, if we're logging, dump out the result. */
- if (p->cs_stats->cs)
- MVM_spesh_args(tc, sg, p->cs_stats->cs, p->type_tuple);
+ if (p->type_info.cs_stats->cs)
+ MVM_spesh_args(tc, sg, p->type_info.cs_stats->cs, p->type_info.type_tuple);
spesh_gc_point(tc);
MVM_spesh_facts_discover(tc, sg, p, 0);
spesh_gc_point(tc);
@@ -236,9 +238,9 @@ void MVM_spesh_candidate_add(MVMThreadContext *tc, MVMSpeshPlanned *p) {
tc->in_spesh = 1;
#endif
- candidate->body.cs = p->cs_stats->cs;
- candidate->body.type_tuple = p->type_tuple
- ? MVM_spesh_plan_copy_type_tuple(tc, candidate->body.cs, p->type_tuple)
+ candidate->body.cs = p->type_info.cs_stats->cs;
+ candidate->body.type_tuple = p->type_info.type_tuple
+ ? MVM_spesh_plan_copy_type_tuple(tc, candidate->body.cs, p->type_info.type_tuple)
: NULL;
candidate->body.bytecode = sc->bytecode;
candidate->body.bytecode_size = sc->bytecode_size;
@@ -309,16 +311,23 @@ void MVM_spesh_candidate_add(MVMThreadContext *tc, MVMSpeshPlanned *p) {
/* Create a new candidate list and copy any existing ones. Free memory
* using the FSA safepoint mechanism. */
spesh = p->sf->body.spesh;
+ cands_and_arg_guards = spesh->body.spesh_cands_and_arg_guards;
+ new_cands_and_arg_guards = MVM_fixed_size_alloc_zeroed(tc, tc->instance->fsa,
+ sizeof(MVMSpeshCandidatesAndArgGuards));
new_candidate_list = MVM_fixed_size_alloc(tc, tc->instance->fsa,
(spesh->body.num_spesh_candidates + 1) * sizeof(MVMSpeshCandidate *));
if (spesh->body.num_spesh_candidates) {
size_t orig_size = spesh->body.num_spesh_candidates * sizeof(MVMSpeshCandidate *);
- memcpy(new_candidate_list, spesh->body.spesh_candidates, orig_size);
+ memcpy(new_candidate_list, cands_and_arg_guards->spesh_candidates, orig_size);
MVM_fixed_size_free_at_safepoint(tc, tc->instance->fsa, orig_size,
- spesh->body.spesh_candidates);
+ cands_and_arg_guards->spesh_candidates);
+ MVM_spesh_arg_guard_destroy(tc, cands_and_arg_guards->spesh_arg_guard, 1);
+ MVM_fixed_size_free_at_safepoint(tc, tc->instance->fsa, sizeof(MVMSpeshCandidatesAndArgGuards),
+ cands_and_arg_guards);
}
MVM_ASSIGN_REF(tc, &(spesh->common.header), new_candidate_list[spesh->body.num_spesh_candidates], candidate);
- spesh->body.spesh_candidates = new_candidate_list;
+ new_cands_and_arg_guards->spesh_candidates = new_candidate_list;
+ spesh->body.spesh_cands_and_arg_guards = new_cands_and_arg_guards;
/* May now be referencing nursery objects, so barrier just in case. */
//if (spesh->common.header.flags2 & MVM_CF_SECOND_GEN)
@@ -333,15 +342,15 @@ void MVM_spesh_candidate_add(MVMThreadContext *tc, MVMSpeshPlanned *p) {
* of candidates to see if there's one for it to try and jump in to,
* and if the guards aren't in place first will see there is not, and
* not bother checking again. */
- MVM_spesh_arg_guard_regenerate(tc, &(spesh->body.spesh_arg_guard),
- spesh->body.spesh_candidates, spesh->body.num_spesh_candidates + 1);
+ MVM_spesh_arg_guard_regenerate(tc, &(new_cands_and_arg_guards->spesh_arg_guard),
+ new_cands_and_arg_guards->spesh_candidates, spesh->body.num_spesh_candidates + 1);
MVM_barrier();
spesh->body.num_spesh_candidates++;
/* If we're logging, dump the updated arg guards also. */
if (MVM_spesh_debug_enabled(tc)) {
char *guard_dump = MVM_spesh_dump_arg_guard(tc, p->sf,
- p->sf->body.spesh->body.spesh_arg_guard);
+ p->sf->body.spesh->body.spesh_cands_and_arg_guards->spesh_arg_guard);
MVM_spesh_debug_printf(tc, "%s========\n\n", guard_dump);
fflush(tc->instance->spesh_log_fh);
MVM_free(guard_dump);
@@ -357,10 +366,79 @@ void MVM_spesh_candidate_add(MVMThreadContext *tc, MVMSpeshPlanned *p) {
void MVM_spesh_candidate_discard_existing(MVMThreadContext *tc, MVMStaticFrame *sf) {
MVMStaticFrameSpesh *spesh = sf->body.spesh;
if (spesh) {
+ MVMSpeshCandidatesAndArgGuards *cands_and_arg_guards = spesh->body.spesh_cands_and_arg_guards;
MVMuint32 num_candidates = spesh->body.num_spesh_candidates;
MVMuint32 i;
for (i = 0; i < num_candidates; i++)
- spesh->body.spesh_candidates[i]->body.discarded = 1;
+ cands_and_arg_guards->spesh_candidates[i]->body.discarded = 1;
MVM_spesh_arg_guard_discard(tc, sf);
}
}
+
+/* Discards one candidates. */
+void MVM_spesh_candidate_discard_one(MVMThreadContext *tc, MVMStaticFrame *sf, MVMSpeshCandidate *cand) {
+ MVMStaticFrameSpesh *spesh = sf->body.spesh;
+ if (spesh) {
+ MVMSpeshCandidatesAndArgGuards *cands_and_arg_guards = spesh->body.spesh_cands_and_arg_guards;
+ if (!cands_and_arg_guards) {
+ //fprintf(stderr, "no cands_and_arg_guards!\n");
+ return;
+ }
+ MVMSpeshCandidatesAndArgGuards *new_cands_and_arg_guards;
+ MVMSpeshCandidate **new_cands;
+ MVMuint32 i, found = 0, new_num = spesh->body.num_spesh_candidates - 1;
+
+ /* Find the index of the given candidate. */
+ for (i = 0; i < spesh->body.num_spesh_candidates; i++) {
+ MVMSpeshCandidate *sc = cands_and_arg_guards->spesh_candidates[i];
+ if (sc == cand) {
+ //cand->body.discarded = 1;
+ found = 1;
+ spesh->body.num_spesh_candidates--;
+ MVM_barrier();
+ break;
+ }
+ }
+
+ if (!found) {
+ fprintf(stderr, "no matching cand found?! Didn't think this was going to happen.\n");
+ return;
+ }
+
+ /* Copy the existing candidates, minus the one to remove, and regenerate the arg guards. */
+ if (new_num > 0) {
+ fprintf(stderr, "i = %u, new_num = %u\n", i, new_num);
+ new_cands_and_arg_guards = MVM_fixed_size_alloc_zeroed(tc, tc->instance->fsa, sizeof(MVMSpeshCandidatesAndArgGuards));
+ new_cands = MVM_fixed_size_alloc( tc, tc->instance->fsa, new_num * sizeof(MVMSpeshCandidate *));
+
+ if (i == 0) { // Removing the head, so just copy the rest
+ memcpy(new_cands, &cands_and_arg_guards->spesh_candidates[1], new_num * sizeof(MVMSpeshCandidate *));
+ }
+ else if (i == new_num) { // Removing the tail, so just copy up to it
+ memcpy(new_cands, cands_and_arg_guards->spesh_candidates, new_num * sizeof(MVMSpeshCandidate *));
+ }
+ else { // Removing something in the middle, so need to copy up to and then after it
+ memcpy(new_cands, cands_and_arg_guards->spesh_candidates, i * sizeof(MVMSpeshCandidate *));
+ memcpy(&new_cands[i], &cands_and_arg_guards->spesh_candidates[i + 1], (new_num - i) * sizeof(MVMSpeshCandidate *));
+ }
+
+ new_cands_and_arg_guards->spesh_candidates = new_cands;
+ spesh->body.spesh_cands_and_arg_guards = new_cands_and_arg_guards;
+
+ MVM_spesh_arg_guard_regenerate(tc, &(new_cands_and_arg_guards->spesh_arg_guard),
+ new_cands_and_arg_guards->spesh_candidates, new_num);
+ }
+ /* There was only one, so remove it and the arg guards. */
+ else {
+ fprintf(stderr, "removing the only existing candidate\n");
+ MVM_spesh_arg_guard_discard(tc, sf);
+ spesh->body.spesh_cands_and_arg_guards = NULL;
+ }
+
+ MVM_spesh_arg_guard_destroy(tc, cands_and_arg_guards->spesh_arg_guard, 1);
+ MVM_fixed_size_free_at_safepoint(tc, tc->instance->fsa, (new_num + 1) * sizeof(MVMSpeshCandidate *),
+ cands_and_arg_guards->spesh_candidates);
+ MVM_fixed_size_free_at_safepoint(tc, tc->instance->fsa, sizeof(MVMSpeshCandidatesAndArgGuards),
+ cands_and_arg_guards);
+ }
+}
diff --git src/6model/reprs/MVMSpeshCandidate.h src/6model/reprs/MVMSpeshCandidate.h
index e0b189990..f3b70bbed 100644
--- src/6model/reprs/MVMSpeshCandidate.h
+++ src/6model/reprs/MVMSpeshCandidate.h
@@ -31,6 +31,9 @@ struct MVMSpeshCandidateBody {
/* Deoptimization mappings. */
MVMint32 *deopts;
+ /* Count of times an optimization was deopted. */
+ MVMuint32 deopt_count;
+
/* Bit field of named args used to put in place during deopt, since we
* typically don't update the array in specialized code. */
MVMuint64 deopt_named_used_bit_field;
@@ -88,3 +91,4 @@ const MVMREPROps * MVMSpeshCandidate_initialize(MVMThreadContext *tc);
/* Functions for creating and clearing up specializations. */
void MVM_spesh_candidate_add(MVMThreadContext *tc, MVMSpeshPlanned *p);
void MVM_spesh_candidate_discard_existing(MVMThreadContext *tc, MVMStaticFrame *sf);
+void MVM_spesh_candidate_discard_one(MVMThreadContext *tc, MVMStaticFrame *sf, MVMSpeshCandidate *cand);
diff --git src/6model/reprs/MVMSpeshLog.c src/6model/reprs/MVMSpeshLog.c
index 169c6e8db..64bba3a50 100644
--- src/6model/reprs/MVMSpeshLog.c
+++ src/6model/reprs/MVMSpeshLog.c
@@ -55,6 +55,10 @@ static void gc_mark(MVMThreadContext *tc, MVMSTable *st, void *data, MVMGCWorkli
case MVM_SPESH_LOG_INVOKE:
MVM_gc_worklist_add(tc, worklist, &(log->entries[i].invoke.sf));
break;
+ case MVM_SPESH_LOG_DEOPT:
+ MVM_gc_worklist_add(tc, worklist, &(log->entries[i].deopt.sf));
+ MVM_gc_worklist_add(tc, worklist, &(log->entries[i].deopt.spesh_cand));
+ break;
}
}
}
@@ -70,6 +74,8 @@ static void describe_refs (MVMThreadContext *tc, MVMHeapSnapshotState *ss, MVMST
MVMuint64 cache_5 = 0;
MVMuint64 cache_6 = 0;
MVMuint64 cache_7 = 0;
+ MVMuint64 cache_8 = 0;
+ MVMuint64 cache_9 = 0;
if (!body->entries)
return;
@@ -103,6 +109,12 @@ static void describe_refs (MVMThreadContext *tc, MVMHeapSnapshotState *ss, MVMST
MVM_profile_heap_add_collectable_rel_const_cstr_cached(tc, ss,
(MVMCollectable *)body->entries[i].invoke.sf, "Invoked staticframe entry", &cache_7);
break;
+ case MVM_SPESH_LOG_DEOPT:
+ MVM_profile_heap_add_collectable_rel_const_cstr_cached(tc, ss,
+ (MVMCollectable *)body->entries[i].deopt.sf, "Deopt staticframe entry", &cache_8);
+ MVM_profile_heap_add_collectable_rel_const_cstr_cached(tc, ss,
+ (MVMCollectable *)body->entries[i].deopt.spesh_cand, "Deopt spesh candidate entry", &cache_9);
+ break;
}
}
}
diff --git src/6model/reprs/MVMSpeshLog.h src/6model/reprs/MVMSpeshLog.h
index 40e7388a3..d95787e6d 100644
--- src/6model/reprs/MVMSpeshLog.h
+++ src/6model/reprs/MVMSpeshLog.h
@@ -25,6 +25,9 @@ typedef enum {
/* Return from a logged callframe to an unlogged one, needed to keep
* the spesh simulation stack in sync. */
MVM_SPESH_LOG_RETURN_TO_UNLOGGED,
+ /* Deopt information (static frame, spesh candidate, number of times
+ * it's deopted). */
+ MVM_SPESH_LOG_DEOPT,
} MVMSpeshLogEntryKind;
/* Flags on types. */
@@ -84,6 +87,12 @@ struct MVMSpeshLogEntry {
MVMuint32 bytecode_offset;
MVMuint16 guard_index;
} plugin;
+
+ /* Deopt (DEOPT). */
+ struct {
+ MVMStaticFrame *sf;
+ MVMSpeshCandidate *spesh_cand;
+ } deopt;
};
};
diff --git src/6model/reprs/MVMStaticFrameSpesh.c src/6model/reprs/MVMStaticFrameSpesh.c
index 86d5432b1..8b699edf2 100644
--- src/6model/reprs/MVMStaticFrameSpesh.c
+++ src/6model/reprs/MVMStaticFrameSpesh.c
@@ -25,12 +25,15 @@ static void copy_to(MVMThreadContext *tc, MVMSTable *st, void *src, MVMObject *d
/* Called by the VM to mark any GCable items. */
static void gc_mark(MVMThreadContext *tc, MVMSTable *st, void *data, MVMGCWorklist *worklist) {
MVMStaticFrameSpeshBody *body = (MVMStaticFrameSpeshBody *)data;
+ MVMSpeshCandidatesAndArgGuards *cands_and_arg_guards = body->spesh_cands_and_arg_guards;
MVM_spesh_stats_gc_mark(tc, body->spesh_stats, worklist);
- MVM_spesh_arg_guard_gc_mark(tc, body->spesh_arg_guard, worklist);
- if (body->num_spesh_candidates) {
- MVMuint32 i;
- for (i = 0; i < body->num_spesh_candidates; i++) {
- MVM_gc_worklist_add(tc, worklist, &body->spesh_candidates[i]);
+ if (cands_and_arg_guards) {
+ MVM_spesh_arg_guard_gc_mark(tc, cands_and_arg_guards->spesh_arg_guard, worklist);
+ if (body->num_spesh_candidates) {
+ MVMuint32 i;
+ for (i = 0; i < body->num_spesh_candidates; i++) {
+ MVM_gc_worklist_add(tc, worklist, &cands_and_arg_guards->spesh_candidates[i]);
+ }
}
}
MVM_gc_worklist_add(tc, worklist, &body->plugin_state);
@@ -39,9 +42,13 @@ static void gc_mark(MVMThreadContext *tc, MVMSTable *st, void *data, MVMGCWorkli
/* Called by the VM in order to free memory associated with this object. */
static void gc_free(MVMThreadContext *tc, MVMObject *obj) {
MVMStaticFrameSpesh *sfs = (MVMStaticFrameSpesh *)obj;
+ MVMSpeshCandidatesAndArgGuards *cands_and_arg_guards = sfs->body.spesh_cands_and_arg_guards;
MVM_spesh_stats_destroy(tc, sfs->body.spesh_stats);
MVM_free(sfs->body.spesh_stats);
- MVM_spesh_arg_guard_destroy(tc, sfs->body.spesh_arg_guard, 0);
+ if (cands_and_arg_guards && cands_and_arg_guards->spesh_arg_guard)
+ MVM_spesh_arg_guard_destroy(tc, cands_and_arg_guards->spesh_arg_guard, 1);
+ if (cands_and_arg_guards)
+ MVM_fixed_size_free_at_safepoint(tc, tc->instance->fsa, sizeof(MVMSpeshCandidatesAndArgGuards), cands_and_arg_guards);
}
static const MVMStorageSpec storage_spec = {
@@ -72,10 +79,11 @@ static void deserialize_stable_size(MVMThreadContext *tc, MVMSTable *st, MVMSeri
/* Calculates the non-GC-managed memory we hold on to. */
static MVMuint64 unmanaged_size(MVMThreadContext *tc, MVMSTable *st, void *data) {
MVMStaticFrameSpeshBody *body = (MVMStaticFrameSpeshBody *)data;
+ MVMSpeshCandidatesAndArgGuards *cands_and_arg_guards = body->spesh_cands_and_arg_guards;
MVMuint64 size = 0;
MVMuint32 spesh_idx;
for (spesh_idx = 0; spesh_idx < body->num_spesh_candidates; spesh_idx++) {
- MVMSpeshCandidate *cand = body->spesh_candidates[spesh_idx];
+ MVMSpeshCandidate *cand = cands_and_arg_guards->spesh_candidates[spesh_idx];
size += cand->body.bytecode_size;
@@ -113,20 +121,21 @@ static MVMuint64 unmanaged_size(MVMThreadContext *tc, MVMSTable *st, void *data)
static void describe_refs(MVMThreadContext *tc, MVMHeapSnapshotState *ss, MVMSTable *st, void *data) {
MVMStaticFrameSpeshBody *body = (MVMStaticFrameSpeshBody *)data;
+ MVMSpeshCandidatesAndArgGuards *cands_and_arg_guards = body->spesh_cands_and_arg_guards;
MVM_spesh_stats_gc_describe(tc, ss, body->spesh_stats);
- MVM_spesh_arg_guard_gc_describe(tc, ss, body->spesh_arg_guard);
+ MVM_spesh_arg_guard_gc_describe(tc, ss, cands_and_arg_guards->spesh_arg_guard);
if (body->num_spesh_candidates) {
MVMuint32 i, j;
for (i = 0; i < body->num_spesh_candidates; i++) {
- for (j = 0; j < body->spesh_candidates[i]->body.num_spesh_slots; j++)
+ for (j = 0; j < cands_and_arg_guards->spesh_candidates[i]->body.num_spesh_slots; j++)
MVM_profile_heap_add_collectable_rel_const_cstr(tc, ss,
- (MVMCollectable *)body->spesh_candidates[i]->body.spesh_slots[j],
+ (MVMCollectable *)cands_and_arg_guards->spesh_candidates[i]->body.spesh_slots[j],
"Spesh slot entry");
- for (j = 0; j < body->spesh_candidates[i]->body.num_inlines; j++)
+ for (j = 0; j < cands_and_arg_guards->spesh_candidates[i]->body.num_inlines; j++)
MVM_profile_heap_add_collectable_rel_const_cstr(tc, ss,
- (MVMCollectable *)body->spesh_candidates[i]->body.inlines[j].sf,
+ (MVMCollectable *)cands_and_arg_guards->spesh_candidates[i]->body.inlines[j].sf,
"Spesh inlined static frame");
}
}
diff --git src/6model/reprs/MVMStaticFrameSpesh.h src/6model/reprs/MVMStaticFrameSpesh.h
index f2e6f16b1..dbd3ffe88 100644
--- src/6model/reprs/MVMStaticFrameSpesh.h
+++ src/6model/reprs/MVMStaticFrameSpesh.h
@@ -2,7 +2,7 @@
* about a static frame (logged statistics, generated specializations, and
* so forth). */
-struct MVMStaticFrameSpeshBody {
+struct MVMSpeshCandidatesAndArgGuards {
/* Specialization argument guard tree, for selecting a specialization. */
MVMSpeshArgGuard *spesh_arg_guard;
@@ -10,6 +10,11 @@ struct MVMStaticFrameSpeshBody {
* move in memory; the array of pointers to them is managed using the
* fixed size allocator and freed at the next safepoint. */
MVMSpeshCandidate **spesh_candidates;
+};
+
+struct MVMStaticFrameSpeshBody {
+ MVMSpeshCandidatesAndArgGuards *spesh_cands_and_arg_guards;
+
MVMuint32 num_spesh_candidates;
/* Recorded count for data recording for the specializer. Incremented
diff --git src/core/bytecodedump.c src/core/bytecodedump.c
index 9197927af..7a5f4b0f0 100644
--- src/core/bytecodedump.c
+++ src/core/bytecodedump.c
@@ -562,8 +562,9 @@ void MVM_dump_bytecode_stackframe(MVMThreadContext *tc, MVMint32 depth) {
} else {
MVMuint32 spesh_cand_idx;
MVMStaticFrameSpesh *spesh = sf->body.spesh;
+ MVMSpeshCandidatesAndArgGuards *cands_and_arg_guards = spesh->body.spesh_cands_and_arg_guards;
for (spesh_cand_idx = 0; spesh_cand_idx < spesh->body.num_spesh_candidates; spesh_cand_idx++) {
- MVMSpeshCandidate *cand = spesh->body.spesh_candidates[spesh_cand_idx];
+ MVMSpeshCandidate *cand = cands_and_arg_guards->spesh_candidates[spesh_cand_idx];
if (cand->body.bytecode == effective_bytecode) {
MVM_dump_bytecode_of(tc, frame, cand);
}
diff --git src/core/fixedsizealloc.c src/core/fixedsizealloc.c
index 504e5ed0e..ce1aeb194 100644
--- src/core/fixedsizealloc.c
+++ src/core/fixedsizealloc.c
@@ -10,7 +10,7 @@
* behavior. */
/* Turn this on to switch to a mode where we debug by size. */
-#define FSA_SIZE_DEBUG 0
+#define FSA_SIZE_DEBUG 1
#if FSA_SIZE_DEBUG
typedef struct {
MVMuint64 alloc_size;
diff --git src/core/frame.c src/core/frame.c
index 7ec9018d8..c2887373d 100644
--- src/core/frame.c
+++ src/core/frame.c
@@ -400,6 +400,7 @@ void MVM_frame_invoke(MVMThreadContext *tc, MVMStaticFrame *static_frame,
MVMFrame *frame;
MVMuint8 *chosen_bytecode;
MVMStaticFrameSpesh *spesh;
+ MVMSpeshCandidatesAndArgGuards *cands_and_arg_guards;
/* If the frame was never invoked before, or never before at the current
* instrumentation level, we need to trigger the instrumentation level
@@ -489,13 +490,14 @@ void MVM_frame_invoke(MVMThreadContext *tc, MVMStaticFrame *static_frame,
/* See if any specializations apply. */
spesh = static_frame->body.spesh;
- if (spesh_cand < 0)
- spesh_cand = MVM_spesh_arg_guard_run(tc, spesh->body.spesh_arg_guard,
+ cands_and_arg_guards = spesh->body.spesh_cands_and_arg_guards;
+ if (spesh_cand < 0 && cands_and_arg_guards)
+ spesh_cand = MVM_spesh_arg_guard_run(tc, cands_and_arg_guards->spesh_arg_guard,
callsite, args, NULL);
#if MVM_SPESH_CHECK_PRESELECTION
else {
MVMint32 certain = -1;
- MVMint32 correct = MVM_spesh_arg_guard_run(tc, spesh->body.spesh_arg_guard,
+ MVMint32 correct = MVM_spesh_arg_guard_run(tc, cands_and_arg_guards->spesh_arg_guard,
callsite, args, &certain);
if (spesh_cand != correct && spesh_cand != certain) {
fprintf(stderr, "Inconsistent spesh preselection of '%s' (%s): got %d, not %d\n",
@@ -506,8 +508,8 @@ void MVM_frame_invoke(MVMThreadContext *tc, MVMStaticFrame *static_frame,
}
}
#endif
- if (spesh_cand >= 0) {
- MVMSpeshCandidate *chosen_cand = spesh->body.spesh_candidates[spesh_cand];
+ if (spesh_cand >= 0 && spesh_cand < (MVMint32)spesh->body.num_spesh_candidates && cands_and_arg_guards) {
+ MVMSpeshCandidate *chosen_cand = cands_and_arg_guards->spesh_candidates[spesh_cand];
if (static_frame->body.allocate_on_heap) {
MVMROOT4(tc, static_frame, code_ref, outer, chosen_cand, {
frame = allocate_frame(tc, static_frame, chosen_cand, 1);
diff --git src/gc/debug.h src/gc/debug.h
index 91d833abd..e2ecd01b5 100644
--- src/gc/debug.h
+++ src/gc/debug.h
@@ -4,7 +4,7 @@
* 2 = Checks on every object register access (slow)
* 3 = Collects garbage on every allocation
*/
-#define MVM_GC_DEBUG 0
+#define MVM_GC_DEBUG 3
#if MVM_GC_DEBUG
#define MVM_ASSERT_NOT_FROMSPACE(tc, c) do { \
diff --git src/spesh/arg_guard.c src/spesh/arg_guard.c
index 8c114d24d..684caaed7 100644
--- src/spesh/arg_guard.c
+++ src/spesh/arg_guard.c
@@ -318,7 +318,7 @@ void MVM_spesh_arg_guard_regenerate(MVMThreadContext *tc, MVMSpeshArgGuard **gua
MVMSpeshCandidate **candidates, MVMuint32 num_spesh_candidates) {
MVMSpeshArgGuard *tree;
- /* Make a first pass thorugh the candidates, grouping them by callsite.
+ /* Make a first pass through the candidates, grouping them by callsite.
* Along the way, work out how much space, at most, we'll need to store
* the tree (when there are multiple candidates, head sharing may mean
* we need less than this). */
@@ -651,8 +651,11 @@ void MVM_spesh_arg_guard_destroy(MVMThreadContext *tc, MVMSpeshArgGuard *ag, MVM
* candidates will no longer be reachable. */
void MVM_spesh_arg_guard_discard(MVMThreadContext *tc, MVMStaticFrame *sf) {
MVMStaticFrameSpesh *spesh = sf->body.spesh;
- if (spesh && spesh->body.spesh_arg_guard) {
- MVM_spesh_arg_guard_destroy(tc, spesh->body.spesh_arg_guard, 1);
- spesh->body.spesh_arg_guard = NULL;
+ if (spesh) {
+ MVMSpeshCandidatesAndArgGuards *cands_and_arg_guards = spesh->body.spesh_cands_and_arg_guards;
+ if (cands_and_arg_guards && cands_and_arg_guards->spesh_arg_guard) {
+ MVM_spesh_arg_guard_destroy(tc, cands_and_arg_guards->spesh_arg_guard, 1);
+ cands_and_arg_guards->spesh_arg_guard = NULL;
+ }
}
}
diff --git src/spesh/deopt.c src/spesh/deopt.c
index 87a408de3..96a02a85b 100644
--- src/spesh/deopt.c
+++ src/spesh/deopt.c
@@ -243,6 +243,10 @@ static void deopt_frame(MVMThreadContext *tc, MVMFrame *f, MVMuint32 deopt_idx,
materialize_replaced_objects(tc, f, deopt_idx);
});
+ /* Log that this opt was deopted, we want to undo the
+ * optimization if this happens too many times. */
+ MVM_spesh_log_deopt(tc, f->static_info, f->spesh_cand);
+
/* Check if we have inlines. */
if (f->spesh_cand->body.inlines) {
/* Yes, going to have to re-create the frames; uninline
diff --git src/spesh/dump.c src/spesh/dump.c
index 474c785ac..4735cb0dc 100644
--- src/spesh/dump.c
+++ src/spesh/dump.c
@@ -850,6 +850,9 @@ char * MVM_spesh_dump_planned(MVMThreadContext *tc, MVMSpeshPlanned *p) {
case MVM_SPESH_PLANNED_DERIVED_TYPES:
append(&ds, "Derived type");
break;
+ case MVM_SPESH_PLANNED_DEOPT:
+ append(&ds, "Deopt");
+ break;
}
append(&ds, " specialization of '");
append_str(tc, &ds, p->sf->body.name);
@@ -860,9 +863,9 @@ char * MVM_spesh_dump_planned(MVMThreadContext *tc, MVMSpeshPlanned *p) {
append(&ds, ")\n\n");
/* Dump the callsite of the specialization. */
- if (p->cs_stats->cs) {
+ if (p->kind != MVM_SPESH_PLANNED_DEOPT && p->type_info.cs_stats->cs) {
append(&ds, "The specialization is for the callsite:\n");
- dump_callsite(tc, &ds, p->cs_stats->cs);
+ dump_callsite(tc, &ds, p->type_info.cs_stats->cs);
}
else {
append(&ds, "The specialization is for when there is no interned callsite.\n");
@@ -871,43 +874,48 @@ char * MVM_spesh_dump_planned(MVMThreadContext *tc, MVMSpeshPlanned *p) {
/* Dump reasoning. */
switch (p->kind) {
case MVM_SPESH_PLANNED_CERTAIN:
- if (p->cs_stats->hits >= MVM_spesh_threshold(tc, p->sf))
+ if (p->type_info.cs_stats->hits >= MVM_spesh_threshold(tc, p->sf))
appendf(&ds,
"It was planned due to the callsite receiving %u hits.\n",
- p->cs_stats->hits);
- else if (p->cs_stats->osr_hits >= MVM_SPESH_PLAN_CS_MIN_OSR)
+ p->type_info.cs_stats->hits);
+ else if (p->type_info.cs_stats->osr_hits >= MVM_SPESH_PLAN_CS_MIN_OSR)
appendf(&ds,
"It was planned due to the callsite receiving %u OSR hits.\n",
- p->cs_stats->osr_hits);
+ p->type_info.cs_stats->osr_hits);
else
append(&ds, "It was planned for unknown reasons.\n");
if (!p->sf->body.specializable)
append(&ds, "The body contains no specializable instructions.\n");
break;
case MVM_SPESH_PLANNED_OBSERVED_TYPES: {
- MVMCallsite *cs = p->cs_stats->cs;
- MVMuint32 hit_percent = p->cs_stats->hits
- ? (100 * p->type_stats[0]->hits) / p->cs_stats->hits
+ MVMCallsite *cs = p->type_info.cs_stats->cs;
+ MVMuint32 hit_percent = p->type_info.cs_stats->hits
+ ? (100 * p->type_info.type_stats[0]->hits) / p->type_info.cs_stats->hits
: 0;
- MVMuint32 osr_hit_percent = p->cs_stats->osr_hits
- ? (100 * p->type_stats[0]->osr_hits) / p->cs_stats->osr_hits
+ MVMuint32 osr_hit_percent = p->type_info.cs_stats->osr_hits
+ ? (100 * p->type_info.type_stats[0]->osr_hits) / p->type_info.cs_stats->osr_hits
: 0;
append(&ds, "It was planned for the type tuple:\n");
- dump_stats_type_tuple(tc, &ds, cs, p->type_tuple, " ");
+ dump_stats_type_tuple(tc, &ds, cs, p->type_info.type_tuple, " ");
if (osr_hit_percent >= MVM_SPESH_PLAN_TT_OBS_PERCENT_OSR)
appendf(&ds, "Which received %u OSR hits (%u%% of the %u callsite OSR hits).\n",
- p->type_stats[0]->osr_hits, osr_hit_percent, p->cs_stats->osr_hits);
+ p->type_info.type_stats[0]->osr_hits, osr_hit_percent, p->type_info.cs_stats->osr_hits);
else if (hit_percent >= MVM_SPESH_PLAN_TT_OBS_PERCENT)
appendf(&ds, "Which received %u hits (%u%% of the %u callsite hits).\n",
- p->type_stats[0]->hits, hit_percent, p->cs_stats->hits);
+ p->type_info.type_stats[0]->hits, hit_percent, p->type_info.cs_stats->hits);
else
append(&ds, "For unknown reasons.\n");
break;
}
case MVM_SPESH_PLANNED_DERIVED_TYPES: {
- MVMCallsite *cs = p->cs_stats->cs;
+ MVMCallsite *cs = p->type_info.cs_stats->cs;
append(&ds, "It was planned for the type tuple:\n");
- dump_stats_type_tuple(tc, &ds, cs, p->type_tuple, " ");
+ dump_stats_type_tuple(tc, &ds, cs, p->type_info.type_tuple, " ");
+ break;
+ }
+ case MVM_SPESH_PLANNED_DEOPT: {
+ appendf(&ds, "Removing a spesh candidate because it was deopted too many times (%u)\n",
+ p->deopt_info.spesh_cand->body.deopt_count);
break;
}
}
diff --git src/spesh/facts.c src/spesh/facts.c
index b6a78c9cf..b7acad6b7 100644
--- src/spesh/facts.c
+++ src/spesh/facts.c
@@ -286,8 +286,8 @@ static void log_facts(MVMThreadContext *tc, MVMSpeshGraph *g, MVMSpeshBB *bb,
MVMuint32 agg_type_object = 0;
MVMuint32 agg_concrete = 0;
MVMuint32 i;
- for (i = 0; i < p->num_type_stats; i++) {
- MVMSpeshStatsByType *ts = p->type_stats[i];
+ for (i = 0; i < p->type_info.num_type_stats; i++) {
+ MVMSpeshStatsByType *ts = p->type_info.type_stats[i];
MVMuint32 j;
for (j = 0; j < ts->num_by_offset; j++) {
if (ts->by_offset[j].bytecode_offset == logged_ann->data.bytecode_offset) {
diff --git src/spesh/log.c src/spesh/log.c
index cb4f0005b..56d1d9fb5 100644
--- src/spesh/log.c
+++ src/spesh/log.c
@@ -147,6 +147,20 @@ void MVM_spesh_log_osr(MVMThreadContext *tc) {
commit_entry(tc, sl);
}
+/* Log a deopt occured. */
+void MVM_spesh_log_deopt(MVMThreadContext *tc, MVMStaticFrame *sf, MVMSpeshCandidate *spesh_cand) {
+ MVMSpeshLog *sl = tc->spesh_log;
+ if (sl) {
+ MVMint32 cid = tc->cur_frame->spesh_correlation_id;
+ MVMSpeshLogEntry *entry = &(sl->body.entries[sl->body.used]);
+ entry->kind = MVM_SPESH_LOG_DEOPT;
+ entry->id = cid;
+ entry->deopt.sf = sf;
+ entry->deopt.spesh_cand = spesh_cand;
+ commit_entry(tc, sl);
+ }
+}
+
/* Log a type. */
void MVM_spesh_log_type(MVMThreadContext *tc, MVMObject *value) {
MVMSpeshLog *sl = tc->spesh_log;
diff --git src/spesh/log.h src/spesh/log.h
index 9b4579a0f..4515c29a7 100644
--- src/spesh/log.h
+++ src/spesh/log.h
@@ -50,3 +50,4 @@ void MVM_spesh_log_return_type(MVMThreadContext *tc, MVMObject *value);
void MVM_spesh_log_return_to_unlogged(MVMThreadContext *tc);
void MVM_spesh_log_plugin_resolution(MVMThreadContext *tc, MVMuint32 bytecode_offset,
MVMuint16 guard_index);
+void MVM_spesh_log_deopt(MVMThreadContext *tc, MVMStaticFrame *sf, MVMSpeshCandidate *spesh_cand);
diff --git src/spesh/optimize.c src/spesh/optimize.c
index 599ed0956..248e9999a 100644
--- src/spesh/optimize.c
+++ src/spesh/optimize.c
@@ -1532,8 +1532,8 @@ static void optimize_getlex_per_invocant(MVMThreadContext *tc, MVMSpeshGraph *g,
}
if (ann) {
MVMuint32 i;
- for (i = 0; i < p->num_type_stats; i++) {
- MVMSpeshStatsByType *ts = p->type_stats[i];
+ for (i = 0; i < p->type_info.num_type_stats; i++) {
+ MVMSpeshStatsByType *ts = p->type_info.type_stats[i];
MVMuint32 j;
for (j = 0; j < ts->num_by_offset; j++) {
if (ts->by_offset[j].bytecode_offset == ann->data.bytecode_offset) {
@@ -1555,7 +1555,8 @@ static void optimize_getlex_per_invocant(MVMThreadContext *tc, MVMSpeshGraph *g,
static MVMint32 try_find_spesh_candidate(MVMThreadContext *tc, MVMStaticFrame *sf,
MVMSpeshCallInfo *arg_info,
MVMSpeshStatsType *type_tuple) {
- MVMSpeshArgGuard *ag = sf->body.spesh->body.spesh_arg_guard;
+ MVMSpeshCandidatesAndArgGuards *cands_and_arg_guards = sf->body.spesh->body.spesh_cands_and_arg_guards;
+ MVMSpeshArgGuard *ag = cands_and_arg_guards ? cands_and_arg_guards->spesh_arg_guard : NULL;
return type_tuple
? MVM_spesh_arg_guard_run_types(tc, ag, arg_info->cs, type_tuple)
: MVM_spesh_arg_guard_run_callinfo(tc, ag, arg_info);
@@ -1606,8 +1607,8 @@ static MVMSpeshStatsType * find_invokee_type_tuple(MVMThreadContext *tc, MVMSpes
return NULL;
/* Now look for the best type tuple. */
- for (i = 0; i < p->num_type_stats; i++) {
- MVMSpeshStatsByType *ts = p->type_stats[i];
+ for (i = 0; i < p->type_info.num_type_stats; i++) {
+ MVMSpeshStatsByType *ts = p->type_info.type_stats[i];
MVMuint32 j;
for (j = 0; j < ts->num_by_offset; j++) {
if (ts->by_offset[j].bytecode_offset == invoke_offset) {
@@ -1805,8 +1806,8 @@ MVMStaticFrame * find_invokee_static_frame(MVMThreadContext *tc, MVMSpeshPlanned
return NULL;
/* Now look for a stable invokee. */
- for (i = 0; i < p->num_type_stats; i++) {
- MVMSpeshStatsByType *ts = p->type_stats[i];
+ for (i = 0; i < p->type_info.num_type_stats; i++) {
+ MVMSpeshStatsByType *ts = p->type_info.type_stats[i];
MVMuint32 j;
for (j = 0; j < ts->num_by_offset; j++) {
if (ts->by_offset[j].bytecode_offset == invoke_offset) {
@@ -2071,8 +2072,9 @@ static void optimize_call(MVMThreadContext *tc, MVMSpeshGraph *g, MVMSpeshBB *bb
char *no_inline_reason = NULL;
const MVMOpInfo *no_inline_info = NULL;
MVMuint32 effective_size;
+ MVMSpeshCandidatesAndArgGuards *cands_and_arg_guards = target_sf->body.spesh->body.spesh_cands_and_arg_guards;
MVMSpeshGraph *inline_graph = MVM_spesh_inline_try_get_graph(tc, g,
- target_sf, target_sf->body.spesh->body.spesh_candidates[spesh_cand],
+ target_sf, cands_and_arg_guards->spesh_candidates[spesh_cand],
ins, &no_inline_reason, &effective_size, &no_inline_info);
log_inline(tc, g, target_sf, inline_graph, effective_size, no_inline_reason, 0, no_inline_info);
if (inline_graph) {
@@ -2086,7 +2088,7 @@ static void optimize_call(MVMThreadContext *tc, MVMSpeshGraph *g, MVMSpeshBB *bb
MVM_spesh_usages_add_unconditional_deopt_usage_by_reg(tc, g, code_ref_reg);
MVM_spesh_inline(tc, g, arg_info, bb, ins, inline_graph, target_sf,
code_ref_reg, prepargs_deopt_idx,
- (MVMuint16)target_sf->body.spesh->body.spesh_candidates[spesh_cand]->body.bytecode_size);
+ (MVMuint16)cands_and_arg_guards->spesh_candidates[spesh_cand]->body.bytecode_size);
optimize_bb(tc, g, optimize_from_bb, NULL);
if (MVM_spesh_debug_enabled(tc)) {
@@ -2212,8 +2214,8 @@ static void optimize_plugin(MVMThreadContext *tc, MVMSpeshGraph *g, MVMSpeshBB *
if (logged_ann) {
MVMuint32 agg_guard_index_count = 0;
MVMuint32 i;
- for (i = 0; i < p->num_type_stats; i++) {
- MVMSpeshStatsByType *ts = p->type_stats[i];
+ for (i = 0; i < p->type_info.num_type_stats; i++) {
+ MVMSpeshStatsByType *ts = p->type_info.type_stats[i];
MVMuint32 j;
for (j = 0; j < ts->num_by_offset; j++) {
if (ts->by_offset[j].bytecode_offset == logged_ann->data.bytecode_offset) {
diff --git src/spesh/osr.c src/spesh/osr.c
index d414baea4..a546c2e63 100644
--- src/spesh/osr.c
+++ src/spesh/osr.c
@@ -155,6 +155,7 @@ MVMCallsite * find_callsite_and_args(MVMThreadContext *tc, MVMRegister **args) {
/* Polls for an optimization and, when one is produced, jumps into it. */
void MVM_spesh_osr_poll_for_result(MVMThreadContext *tc) {
MVMStaticFrameSpesh *spesh = tc->cur_frame->static_info->body.spesh;
+ MVMSpeshCandidatesAndArgGuards *cands_and_arg_guards = spesh->body.spesh_cands_and_arg_guards;
MVMint32 num_cands = spesh->body.num_spesh_candidates;
MVMint32 seq_nr = tc->cur_frame->sequence_nr;
if (seq_nr != tc->osr_hunt_frame_nr || num_cands != tc->osr_hunt_num_spesh_candidates) {
@@ -164,11 +165,11 @@ void MVM_spesh_osr_poll_for_result(MVMThreadContext *tc) {
MVMRegister *args;
MVMCallsite *cs = find_callsite_and_args(tc, &args);
MVMint32 ag_result = MVM_spesh_arg_guard_run(tc,
- spesh->body.spesh_arg_guard,
+ (cands_and_arg_guards ? cands_and_arg_guards->spesh_arg_guard : NULL),
(cs && cs->is_interned ? cs : NULL),
args, NULL);
if (ag_result >= 0)
- perform_osr(tc, spesh->body.spesh_candidates[ag_result]);
+ perform_osr(tc, cands_and_arg_guards->spesh_candidates[ag_result]);
}
/* Update state for avoiding checks in the common case. */
diff --git src/spesh/plan.c src/spesh/plan.c
index bf700ab8b..41666b120 100644
--- src/spesh/plan.c
+++ src/spesh/plan.c
@@ -4,11 +4,12 @@
MVMint32 have_existing_specialization(MVMThreadContext *tc, MVMStaticFrame *sf,
MVMCallsite *cs, MVMSpeshStatsType *type_tuple) {
MVMStaticFrameSpesh *sfs = sf->body.spesh;
+ MVMSpeshCandidatesAndArgGuards *cands_and_arg_guards = sfs->body.spesh_cands_and_arg_guards;
MVMuint32 i;
for (i = 0; i < sfs->body.num_spesh_candidates; i++) {
- if (sfs->body.spesh_candidates[i]->body.cs == cs) {
+ if (cands_and_arg_guards->spesh_candidates[i]->body.cs == cs) {
/* Callsite matches. Is it a matching certain specialization? */
- MVMSpeshStatsType *cand_type_tuple = sfs->body.spesh_candidates[i]->body.type_tuple;
+ MVMSpeshStatsType *cand_type_tuple = cands_and_arg_guards->spesh_candidates[i]->body.type_tuple;
if (type_tuple == NULL && cand_type_tuple == NULL) {
/* Yes, so we're done. */
return 1;
@@ -30,10 +31,10 @@ MVMint32 have_existing_specialization(MVMThreadContext *tc, MVMStaticFrame *sf,
void add_planned(MVMThreadContext *tc, MVMSpeshPlan *plan, MVMSpeshPlannedKind kind,
MVMStaticFrame *sf, MVMSpeshStatsByCallsite *cs_stats,
MVMSpeshStatsType *type_tuple, MVMSpeshStatsByType **type_stats,
- MVMuint32 num_type_stats) {
+ MVMuint32 num_type_stats, MVMSpeshCandidate *cand) {
MVMSpeshPlanned *p;
- if (sf->body.bytecode_size > MVM_SPESH_MAX_BYTECODE_SIZE ||
- have_existing_specialization(tc, sf, cs_stats->cs, type_tuple)) {
+ if (kind != MVM_SPESH_PLANNED_DEOPT && (sf->body.bytecode_size > MVM_SPESH_MAX_BYTECODE_SIZE ||
+ have_existing_specialization(tc, sf, cs_stats->cs, type_tuple))) {
/* Clean up allocated memory.
* NB - the only caller is plan_for_cs, which means that we could do the
* allocations in here, except that we need the type tuple for the
@@ -50,19 +51,25 @@ void add_planned(MVMThreadContext *tc, MVMSpeshPlan *plan, MVMSpeshPlannedKind k
p = &(plan->planned[plan->num_planned++]);
p->kind = kind;
p->sf = sf;
- p->cs_stats = cs_stats;
- p->type_tuple = type_tuple;
- p->type_stats = type_stats;
- p->num_type_stats = num_type_stats;
- if (num_type_stats) {
- MVMuint32 i;
- p->max_depth = type_stats[0]->max_depth;
- for (i = 1; i < num_type_stats; i++)
- if (type_stats[i]->max_depth > p->max_depth)
- p->max_depth = type_stats[i]->max_depth;
+ if (kind == MVM_SPESH_PLANNED_DEOPT) {
+ p->max_depth = UINT32_MAX;
+ p->deopt_info.spesh_cand = cand;
}
else {
- p->max_depth = cs_stats->max_depth;
+ p->type_info.cs_stats = cs_stats;
+ p->type_info.type_tuple = type_tuple;
+ p->type_info.type_stats = type_stats;
+ p->type_info.num_type_stats = num_type_stats;
+ if (num_type_stats) {
+ MVMuint32 i;
+ p->max_depth = type_stats[0]->max_depth;
+ for (i = 1; i < num_type_stats; i++)
+ if (type_stats[i]->max_depth > p->max_depth)
+ p->max_depth = type_stats[i]->max_depth;
+ }
+ else {
+ p->max_depth = cs_stats->max_depth;
+ }
}
}
@@ -207,7 +214,7 @@ static void plan_for_cs(MVMThreadContext *tc, MVMSpeshPlan *plan, MVMStaticFrame
MVM_VECTOR_ELEMS(evidence) == 1
? MVM_SPESH_PLANNED_OBSERVED_TYPES
: MVM_SPESH_PLANNED_DERIVED_TYPES,
- sf, by_cs, chosen_tuple, evidence, MVM_VECTOR_ELEMS(evidence));
+ sf, by_cs, chosen_tuple, evidence, MVM_VECTOR_ELEMS(evidence), NULL);
specializations++;
/* Clean up and we're done. */
@@ -228,7 +235,18 @@ static void plan_for_cs(MVMThreadContext *tc, MVMSpeshPlan *plan, MVMStaticFrame
/* If we get here, and found no specializations to produce, we can add
* a certain specializaiton instead. */
if (!specializations)
- add_planned(tc, plan, MVM_SPESH_PLANNED_CERTAIN, sf, by_cs, NULL, NULL, 0);
+ add_planned(tc, plan, MVM_SPESH_PLANNED_CERTAIN, sf, by_cs, NULL, NULL, 0, NULL);
+}
+
+/* Look for the candidate to remove in the static frame. */
+static void plan_for_deopt(MVMThreadContext *tc, MVMSpeshPlan *plan, MVMStaticFrame *sf) {
+ MVMSpeshCandidatesAndArgGuards *scag = sf->body.spesh->body.spesh_cands_and_arg_guards;
+ for(unsigned int i = 0; i < sf->body.spesh->body.num_spesh_candidates; i++) {
+ MVMSpeshCandidate *cand = scag->spesh_candidates[i];
+ if (cand->body.deopt_count > 100) {
+ add_planned(tc, plan, MVM_SPESH_PLANNED_DEOPT, sf, NULL, NULL, NULL, 0, cand);
+ }
+ }
}
/* Considers the statistics of a given static frame and plans specializtions
@@ -237,7 +255,7 @@ void plan_for_sf(MVMThreadContext *tc, MVMSpeshPlan *plan, MVMStaticFrame *sf,
MVMuint64 *in_certain_specialization, MVMuint64 *in_observed_specialization, MVMuint64 *in_osr_specialization) {
MVMSpeshStats *ss = sf->body.spesh->body.spesh_stats;
MVMuint32 threshold = MVM_spesh_threshold(tc, sf);
- if (ss->hits >= threshold || ss->osr_hits >= MVM_SPESH_PLAN_SF_MIN_OSR) {
+ if (ss && (ss->hits >= threshold || ss->osr_hits >= MVM_SPESH_PLAN_SF_MIN_OSR)) {
/* The frame is hot enough; look through its callsites to see if any
* of those are. */
MVMuint32 i;
@@ -260,9 +278,11 @@ void twiddle_stack_depths(MVMThreadContext *tc, MVMSpeshPlanned *planned, MVMuin
for (i = 0; i < num_planned; i++) {
/* For each planned specialization, look for its calls. */
MVMSpeshPlanned *p = &(planned[i]);
+ if (p->kind == MVM_SPESH_PLANNED_DEOPT)
+ continue;
MVMuint32 j;
- for (j = 0; j < p->num_type_stats; j++) {
- MVMSpeshStatsByType *sbt = p->type_stats[j];
+ for (j = 0; j < p->type_info.num_type_stats; j++) {
+ MVMSpeshStatsByType *sbt = p->type_info.type_stats[j];
MVMuint32 k;
for (k = 0; k < sbt->num_by_offset; k++) {
MVMSpeshStatsByOffset *sbo = &(sbt->by_offset[k]);
@@ -315,6 +335,7 @@ MVMSpeshPlan * MVM_spesh_plan(MVMThreadContext *tc, MVMObject *updated_static_fr
for (i = 0; i < updated; i++) {
MVMObject *sf = MVM_repr_at_pos_o(tc, updated_static_frames, i);
plan_for_sf(tc, plan, (MVMStaticFrame *)sf, in_certain_specialization, in_observed_specialization, in_osr_specialization);
+ plan_for_deopt(tc, plan, (MVMStaticFrame *)sf);
}
twiddle_stack_depths(tc, plan->planned, plan->num_planned);
sort_plan(tc, plan->planned, plan->num_planned);
@@ -332,13 +353,16 @@ void MVM_spesh_plan_gc_mark(MVMThreadContext *tc, MVMSpeshPlan *plan, MVMGCWorkl
for (i = 0; i < plan->num_planned; i++) {
MVMSpeshPlanned *p = &(plan->planned[i]);
MVM_gc_worklist_add(tc, worklist, &(p->sf));
- if (p->type_tuple) {
- MVMCallsite *cs = p->cs_stats->cs;
+ if (p->kind == MVM_SPESH_PLANNED_DEOPT) {
+ MVM_gc_worklist_add(tc, worklist, &(p->deopt_info.spesh_cand));
+ }
+ else if (p->type_info.type_tuple) {
+ MVMCallsite *cs = p->type_info.cs_stats->cs;
MVMuint32 j;
for (j = 0; j < cs->flag_count; j++) {
if (cs->arg_flags[j] & MVM_CALLSITE_ARG_OBJ) {
- MVM_gc_worklist_add(tc, worklist, &(p->type_tuple[j].type));
- MVM_gc_worklist_add(tc, worklist, &(p->type_tuple[j].decont_type));
+ MVM_gc_worklist_add(tc, worklist, &(p->type_info.type_tuple[j].type));
+ MVM_gc_worklist_add(tc, worklist, &(p->type_info.type_tuple[j].decont_type));
}
}
}
@@ -350,21 +374,26 @@ void MVM_spesh_plan_gc_describe(MVMThreadContext *tc, MVMHeapSnapshotState *ss,
MVMuint64 cache_1 = 0;
MVMuint64 cache_2 = 0;
MVMuint64 cache_3 = 0;
+ MVMuint64 cache_4 = 0;
if (!plan)
return;
for (i = 0; i < plan->num_planned; i++) {
MVMSpeshPlanned *p = &(plan->planned[i]);
MVM_profile_heap_add_collectable_rel_const_cstr_cached(tc, ss,
(MVMCollectable*)(p->sf), "staticframe", &cache_1);
- if (p->type_tuple) {
- MVMCallsite *cs = p->cs_stats->cs;
+ if (p->kind == MVM_SPESH_PLANNED_DEOPT) {
+ MVM_profile_heap_add_collectable_rel_const_cstr_cached(tc, ss,
+ (MVMCollectable*)(p->deopt_info.spesh_cand), "spesh candidate", &cache_4);
+ }
+ else if (p->type_info.type_tuple) {
+ MVMCallsite *cs = p->type_info.cs_stats->cs;
MVMuint32 j;
for (j = 0; j < cs->flag_count; j++) {
if (cs->arg_flags[j] & MVM_CALLSITE_ARG_OBJ) {
MVM_profile_heap_add_collectable_rel_const_cstr_cached(tc, ss,
- (MVMCollectable*)(p->type_tuple[j].type), "argument type", &cache_2);
+ (MVMCollectable*)(p->type_info.type_tuple[j].type), "argument type", &cache_2);
MVM_profile_heap_add_collectable_rel_const_cstr_cached(tc, ss,
- (MVMCollectable*)(p->type_tuple[j].decont_type), "argument decont type", &cache_3);
+ (MVMCollectable*)(p->type_info.type_tuple[j].decont_type), "argument decont type", &cache_3);
}
}
}
@@ -375,8 +404,10 @@ void MVM_spesh_plan_gc_describe(MVMThreadContext *tc, MVMHeapSnapshotState *ss,
void MVM_spesh_plan_destroy(MVMThreadContext *tc, MVMSpeshPlan *plan) {
MVMuint32 i;
for (i = 0; i < plan->num_planned; i++) {
- MVM_free(plan->planned[i].type_stats);
- MVM_free(plan->planned[i].type_tuple);
+ if (plan->planned[i].kind != MVM_SPESH_PLANNED_DEOPT) {
+ MVM_free(plan->planned[i].type_info.type_stats);
+ MVM_free(plan->planned[i].type_info.type_tuple);
+ }
}
MVM_free(plan->planned);
MVM_free(plan);
diff --git src/spesh/plan.h src/spesh/plan.h
index 7a323823d..cb9da977c 100644
--- src/spesh/plan.h
+++ src/spesh/plan.h
@@ -34,12 +34,15 @@ typedef enum {
MVM_SPESH_PLANNED_OBSERVED_TYPES,
/* A specialization based on analysis of various argument types that
- * showed up. This may happen when one argument type is predcitable, but
+ * showed up. This may happen when one argument type is predictable, but
* others are not. */
- MVM_SPESH_PLANNED_DERIVED_TYPES
+ MVM_SPESH_PLANNED_DERIVED_TYPES,
+
+ /* An optimization has been deopted too many times, need to remove it. */
+ MVM_SPESH_PLANNED_DEOPT
} MVMSpeshPlannedKind;
-/* An planned specialization that should be produced. */
+/* A planned specialization that should be produced. */
struct MVMSpeshPlanned {
/* What kind of specialization we're planning. */
MVMSpeshPlannedKind kind;
@@ -52,23 +55,33 @@ struct MVMSpeshPlanned {
/* The static frame with the code to specialize. */
MVMStaticFrame *sf;
- /* The callsite statistics entry that this specialization was planned as
- * a result of (by extension, we find the callsite, if any). */
- MVMSpeshStatsByCallsite *cs_stats;
-
- /* The type tuple to produce the specialization for, if this is a type
- * based specialization. NULL for certain specializations. The memory
- * associated with this tuple will always have been allocated by the
- * planner, not shared with the statistics structure, even if this is a
- * specialization for an exactly observed type. */
- MVMSpeshStatsType *type_tuple;
-
- /* Type statistics, if any, that the plan was formed based upon. */
- MVMSpeshStatsByType **type_stats;
-
- /* Number of entries in the type_stats array. (For an observed type
- * specialization, this would be 1.) */
- MVMuint32 num_type_stats;
+ union {
+ struct {
+ /* The callsite statistics entry that this specialization was planned as
+ * a result of (by extension, we find the callsite, if any). */
+ MVMSpeshStatsByCallsite *cs_stats;
+
+ /* The type tuple to produce the specialization for, if this is a type
+ * based specialization. NULL for certain specializations. The memory
+ * associated with this tuple will always have been allocated by the
+ * planner, not shared with the statistics structure, even if this is a
+ * specialization for an exactly observed type. */
+ MVMSpeshStatsType *type_tuple;
+
+ /* Type statistics, if any, that the plan was formed based upon. */
+ MVMSpeshStatsByType **type_stats;
+
+ /* Number of entries in the type_stats array. (For an observed type
+ * specialization, this would be 1.) */
+ MVMuint32 num_type_stats;
+ } type_info;
+
+ struct {
+ /* The spesh candidate that's been deopted too many times, so it's going
+ * to be removed. */
+ MVMSpeshCandidate *spesh_cand;
+ } deopt_info;
+ };
};
MVMSpeshPlan * MVM_spesh_plan(MVMThreadContext *tc, MVMObject *updated_static_frames, MVMuint64 *certain_specialization, MVMuint64 *observed_specialization, MVMuint64 *osr_specialization);
diff --git src/spesh/stats.c src/spesh/stats.c
index 47650ef34..6d000e2ea 100644
--- src/spesh/stats.c
+++ src/spesh/stats.c
@@ -640,6 +640,10 @@ void MVM_spesh_stats_update(MVMThreadContext *tc, MVMSpeshLog *sl, MVMObject *sf
sim_stack_pop(tc, sims, sf_updated);
break;
}
+ case MVM_SPESH_LOG_DEOPT: {
+ e->deopt.spesh_cand->body.deopt_count++;
+ break;
+ }
}
}
save_or_free_sim_stack(tc, sims, log_from_tc, sf_updated);
diff --git src/spesh/worker.c src/spesh/worker.c
index 1808b3d77..721893de7 100644
--- src/spesh/worker.c
+++ src/spesh/worker.c
@@ -150,7 +150,13 @@ static void worker(MVMThreadContext *tc, MVMCallsite *callsite, MVMRegister *arg
/* Implement the plan and then discard it. */
n = tc->instance->spesh_plan->num_planned;
for (i = 0; i < n; i++) {
- MVM_spesh_candidate_add(tc, &(tc->instance->spesh_plan->planned[i]));
+ MVMSpeshPlanned *sp = &(tc->instance->spesh_plan->planned[i]);
+ if (sp->kind == MVM_SPESH_PLANNED_DEOPT) {
+ MVM_spesh_candidate_discard_one(tc, sp->sf, sp->deopt_info.spesh_cand);
+ }
+ else {
+ MVM_spesh_candidate_add(tc, sp);
+ }
GC_SYNC_POINT(tc);
}
MVM_spesh_plan_destroy(tc, tc->instance->spesh_plan);
diff --git src/types.h src/types.h
index 903ba0c39..09a30aa0c 100644
--- src/types.h
+++ src/types.h
@@ -213,6 +213,7 @@ typedef struct MVMStaticFrameInstrumentation MVMStaticFrameInstrumentation;
typedef struct MVMStaticFrameDebugLocal MVMStaticFrameDebugLocal;
typedef struct MVMStaticFrameSpesh MVMStaticFrameSpesh;
typedef struct MVMStaticFrameSpeshBody MVMStaticFrameSpeshBody;
+typedef struct MVMSpeshCandidatesAndArgGuards MVMSpeshCandidatesAndArgGuards;
typedef struct MVMStorageSpec MVMStorageSpec;
typedef struct MVMString MVMString;
typedef struct MVMStringBody MVMStringBody;
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment