-
-
Save MasterDuke17/12527f89669f925c6f242d54a640f952 to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
diff --git build/Makefile.in build/Makefile.in | |
index f04f09e51..7408e1de4 100644 | |
--- build/Makefile.in | |
+++ build/Makefile.in | |
@@ -114,7 +114,6 @@ OBJECTS1 = src/core/callsite@obj@ \ | |
src/core/nativecall_@nativecall_backend@@obj@ \ | |
src/core/continuation@obj@ \ | |
src/core/intcache@obj@ \ | |
- src/core/fixedsizealloc@obj@ \ | |
src/core/regionalloc@obj@ \ | |
src/debug/debugserver@obj@ \ | |
src/gen/config@obj@ \ | |
diff --git src/6model/reprs/ConcBlockingQueue.c src/6model/reprs/ConcBlockingQueue.c | |
index 33e0c90f7..7a53e34ad 100644 | |
--- src/6model/reprs/ConcBlockingQueue.c | |
+++ src/6model/reprs/ConcBlockingQueue.c | |
@@ -20,7 +20,7 @@ static MVMObject * type_object_for(MVMThreadContext *tc, MVMObject *HOW) { | |
/* Initializes a new instance. */ | |
static void initialize(MVMThreadContext *tc, MVMSTable *st, MVMObject *root, void *data) { | |
MVMConcBlockingQueue *cbq = (MVMConcBlockingQueue*)root; | |
- MVMConcBlockingQueueBody *body = MVM_fixed_size_alloc_zeroed(tc, tc->instance->fsa, sizeof(MVMConcBlockingQueueBody)); | |
+ MVMConcBlockingQueueBody *body = MVM_calloc(1, sizeof(MVMConcBlockingQueueBody)); | |
/* Initialize locks. */ | |
int init_stat; | |
@@ -35,7 +35,7 @@ static void initialize(MVMThreadContext *tc, MVMSTable *st, MVMObject *root, voi | |
uv_strerror(init_stat)); | |
/* Head and tail point to a null node. */ | |
- body->tail = body->head = MVM_fixed_size_alloc_zeroed(tc, tc->instance->fsa, sizeof(MVMConcBlockingQueueNode)); | |
+ body->tail = body->head = MVM_calloc(1, sizeof(MVMConcBlockingQueueNode)); | |
cbq->body = body; | |
} | |
@@ -71,7 +71,7 @@ static void gc_free(MVMThreadContext *tc, MVMObject *obj) { | |
MVMConcBlockingQueueNode *cur = body->head; | |
while (cur) { | |
MVMConcBlockingQueueNode *next = cur->next; | |
- MVM_fixed_size_free(tc, tc->instance->fsa, sizeof(MVMConcBlockingQueueNode), cur); | |
+ MVM_free(cur); | |
cur = next; | |
} | |
body->head = body->tail = NULL; | |
@@ -82,7 +82,7 @@ static void gc_free(MVMThreadContext *tc, MVMObject *obj) { | |
uv_cond_destroy(&body->head_cond); | |
/* Clean up body */ | |
- MVM_fixed_size_free(tc, tc->instance->fsa, sizeof(MVMConcBlockingQueueBody), body); | |
+ MVM_free(body); | |
} | |
static const MVMStorageSpec storage_spec = { | |
@@ -152,7 +152,7 @@ static void push(MVMThreadContext *tc, MVMSTable *st, MVMObject *root, void *dat | |
MVM_exception_throw_adhoc(tc, | |
"Cannot store a null value in a concurrent blocking queue"); | |
- add = MVM_fixed_size_alloc_zeroed(tc, tc->instance->fsa, sizeof(MVMConcBlockingQueueNode)); | |
+ add = MVM_calloc(1, sizeof(MVMConcBlockingQueueNode)); | |
interval_id = MVM_telemetry_interval_start(tc, "ConcBlockingQueue.push"); | |
MVMROOT2(tc, root, to_add, { | |
@@ -196,7 +196,7 @@ static void unshift(MVMThreadContext *tc, MVMSTable *st, MVMObject *root, void * | |
interval_id = MVM_telemetry_interval_start(tc, "ConcBlockingQueue.unshift"); | |
- add = MVM_fixed_size_alloc_zeroed(tc, tc->instance->fsa, sizeof(MVMConcBlockingQueueNode)); | |
+ add = MVM_calloc(1, sizeof(MVMConcBlockingQueueNode)); | |
/* We'll need to hold both the head and the tail lock, in case head == tail | |
* and push would update tail->next - without the tail lock, this could | |
@@ -248,7 +248,7 @@ static void shift(MVMThreadContext *tc, MVMSTable *st, MVMObject *root, void *da | |
}); | |
taken = body->head->next; | |
- MVM_fixed_size_free(tc, tc->instance->fsa, sizeof(MVMConcBlockingQueueNode), body->head); | |
+ MVM_free(body->head); | |
body->head = taken; | |
MVM_barrier(); | |
value->o = taken->value; | |
@@ -346,7 +346,7 @@ MVMObject * MVM_concblockingqueue_poll(MVMThreadContext *tc, MVMConcBlockingQueu | |
if (MVM_load(&body->elems) > 0) { | |
taken = body->head->next; | |
- MVM_fixed_size_free(tc, tc->instance->fsa, sizeof(MVMConcBlockingQueueNode), body->head); | |
+ MVM_free(body->head); | |
body->head = taken; | |
MVM_barrier(); | |
result = taken->value; | |
diff --git src/6model/reprs/MVMCapture.c src/6model/reprs/MVMCapture.c | |
index 0f7dedc9d..8183f81e6 100644 | |
--- src/6model/reprs/MVMCapture.c | |
+++ src/6model/reprs/MVMCapture.c | |
@@ -26,7 +26,7 @@ static void copy_to(MVMThreadContext *tc, MVMSTable *st, void *src, MVMObject *d | |
: MVM_callsite_copy(tc, src_body->callsite); | |
size_t arg_size = dest_body->callsite->flag_count * sizeof(MVMRegister); | |
if (arg_size) { | |
- dest_body->args = MVM_fixed_size_alloc(tc, tc->instance->fsa, arg_size); | |
+ dest_body->args = MVM_malloc(arg_size); | |
memcpy(dest_body->args, src_body->args, arg_size); | |
} | |
else { | |
@@ -51,9 +51,7 @@ static void gc_mark(MVMThreadContext *tc, MVMSTable *st, void *data, MVMGCWorkli | |
static void gc_free(MVMThreadContext *tc, MVMObject *obj) { | |
MVMCapture *capture = (MVMCapture *)obj; | |
if (capture->body.args) | |
- MVM_fixed_size_free(tc, tc->instance->fsa, | |
- capture->body.callsite->flag_count * sizeof(MVMRegister), | |
- capture->body.args); | |
+ MVM_free(capture->body.args); | |
if (capture->body.callsite && !capture->body.callsite->is_interned) | |
MVM_callsite_destroy(capture->body.callsite); | |
} | |
@@ -124,8 +122,7 @@ MVMObject * MVM_capture_from_args(MVMThreadContext *tc, MVMArgs arg_info) { | |
MVMCallsite *callsite = arg_info.callsite; | |
MVMRegister *args; | |
if (callsite->flag_count) { | |
- args = MVM_fixed_size_alloc(tc, tc->instance->fsa, | |
- callsite->flag_count * sizeof(MVMRegister)); | |
+ args = MVM_malloc(callsite->flag_count * sizeof(MVMRegister)); | |
MVMuint16 i; | |
for (i = 0; i < callsite->flag_count; i++) | |
args[i] = arg_info.source[arg_info.map[i]]; | |
@@ -368,8 +365,7 @@ MVMObject * MVM_capture_drop_args(MVMThreadContext *tc, MVMObject *capture_obj, | |
/* Form a new arguments buffer, dropping the specified argument. */ | |
MVMRegister *new_args; | |
if (new_callsite->flag_count) { | |
- new_args = MVM_fixed_size_alloc(tc, tc->instance->fsa, | |
- new_callsite->flag_count * sizeof(MVMRegister)); | |
+ new_args = MVM_malloc(new_callsite->flag_count * sizeof(MVMRegister)); | |
MVMuint32 from, to = 0; | |
for (from = 0; from < capture->body.callsite->flag_count; from++) { | |
if (from < idx || from >= idx + count) { | |
@@ -415,8 +411,7 @@ MVMObject * MVM_capture_insert_arg(MVMThreadContext *tc, MVMObject *capture_obj, | |
idx, kind); | |
/* Form a new arguments buffer, dropping the specified argument. */ | |
- MVMRegister *new_args = MVM_fixed_size_alloc(tc, tc->instance->fsa, | |
- new_callsite->flag_count * sizeof(MVMRegister)); | |
+ MVMRegister *new_args = MVM_malloc(new_callsite->flag_count * sizeof(MVMRegister)); | |
MVMuint32 from, to = 0; | |
for (from = 0; from < capture->body.callsite->flag_count; from++) { | |
if (from == idx) { | |
@@ -467,8 +462,7 @@ MVMObject * MVM_capture_replace_arg(MVMThreadContext *tc, MVMObject *capture_obj | |
new_callsite->arg_flags[idx] = kind; | |
/* Form a new arguments buffer, replacing the specified argument. */ | |
- MVMRegister *new_args = MVM_fixed_size_alloc(tc, tc->instance->fsa, | |
- callsite->flag_count * sizeof(MVMRegister)); | |
+ MVMRegister *new_args = MVM_malloc(callsite->flag_count * sizeof(MVMRegister)); | |
MVMuint32 from = 0; | |
for (from = 0; from < capture->body.callsite->flag_count; from++) { | |
new_args[from] = capture->body.args[from]; | |
diff --git src/6model/reprs/MVMCompUnit.c src/6model/reprs/MVMCompUnit.c | |
index 34983974d..2a5c226bc 100644 | |
--- src/6model/reprs/MVMCompUnit.c | |
+++ src/6model/reprs/MVMCompUnit.c | |
@@ -80,17 +80,11 @@ static void gc_free(MVMThreadContext *tc, MVMObject *obj) { | |
MVM_free(body->inline_tweak_mutex); | |
MVM_free(body->coderefs); | |
if (body->callsites) | |
- MVM_fixed_size_free(tc, tc->instance->fsa, | |
- body->num_callsites * sizeof(MVMCallsite *), | |
- body->callsites); | |
+ MVM_free(body->callsites); | |
if (body->extops) | |
- MVM_fixed_size_free(tc, tc->instance->fsa, | |
- body->num_extops * sizeof(MVMExtOpRecord), | |
- body->extops); | |
+ MVM_free(body->extops); | |
if (body->strings) | |
- MVM_fixed_size_free(tc, tc->instance->fsa, | |
- body->num_strings * sizeof(MVMString *), | |
- body->strings); | |
+ MVM_free(body->strings); | |
MVM_free(body->scs); | |
MVM_free(body->scs_to_resolve); | |
MVM_free(body->sc_handle_idxs); | |
diff --git src/6model/reprs/MVMContinuation.c src/6model/reprs/MVMContinuation.c | |
index b14157970..9798cb6ed 100644 | |
--- src/6model/reprs/MVMContinuation.c | |
+++ src/6model/reprs/MVMContinuation.c | |
@@ -50,7 +50,7 @@ static void gc_free(MVMThreadContext *tc, MVMObject *obj) { | |
MVMActiveHandler *cur_ah = ctx->body.active_handlers; | |
while (cur_ah != NULL) { | |
MVMActiveHandler *next_ah = cur_ah->next_handler; | |
- MVM_fixed_size_free(tc, tc->instance->fsa, sizeof(MVMActiveHandler), cur_ah); | |
+ MVM_free(cur_ah); | |
cur_ah = next_ah; | |
} | |
} | |
diff --git src/6model/reprs/MVMSpeshCandidate.c src/6model/reprs/MVMSpeshCandidate.c | |
index 8092ab63c..632cfe49f 100644 | |
--- src/6model/reprs/MVMSpeshCandidate.c | |
+++ src/6model/reprs/MVMSpeshCandidate.c | |
@@ -347,15 +347,13 @@ void MVM_spesh_candidate_add(MVMThreadContext *tc, MVMSpeshPlanned *p) { | |
MVM_spesh_graph_destroy(tc, sg); | |
/* Create a new candidate list and copy any existing ones. Free memory | |
- * using the FSA safepoint mechanism. */ | |
+ * using the safepoint mechanism. */ | |
spesh = p->sf->body.spesh; | |
- new_candidate_list = MVM_fixed_size_alloc(tc, tc->instance->fsa, | |
- (spesh->body.num_spesh_candidates + 1) * sizeof(MVMSpeshCandidate *)); | |
+ new_candidate_list = MVM_malloc((spesh->body.num_spesh_candidates + 1) * sizeof(MVMSpeshCandidate *)); | |
if (spesh->body.num_spesh_candidates) { | |
size_t orig_size = spesh->body.num_spesh_candidates * sizeof(MVMSpeshCandidate *); | |
memcpy(new_candidate_list, spesh->body.spesh_candidates, orig_size); | |
- MVM_fixed_size_free_at_safepoint(tc, tc->instance->fsa, orig_size, | |
- spesh->body.spesh_candidates); | |
+ MVM_free_at_safepoint(tc, spesh->body.spesh_candidates); | |
} | |
MVM_ASSIGN_REF(tc, &(spesh->common.header), new_candidate_list[spesh->body.num_spesh_candidates], candidate); | |
spesh->body.spesh_candidates = new_candidate_list; | |
diff --git src/6model/reprs/MultiDimArray.c src/6model/reprs/MultiDimArray.c | |
index ad24b04c9..11511bd4b 100644 | |
--- src/6model/reprs/MultiDimArray.c | |
+++ src/6model/reprs/MultiDimArray.c | |
@@ -59,8 +59,7 @@ static MVMObject * allocate(MVMThreadContext *tc, MVMSTable *st) { | |
MVMMultiDimArrayREPRData *repr_data = (MVMMultiDimArrayREPRData *)st->REPR_data; | |
if (repr_data) { | |
MVMObject *obj = MVM_gc_allocate_object(tc, st); | |
- ((MVMMultiDimArray *)obj)->body.dimensions = MVM_fixed_size_alloc_zeroed(tc, | |
- tc->instance->fsa, repr_data->num_dimensions * sizeof(MVMint64)); | |
+ ((MVMMultiDimArray *)obj)->body.dimensions = MVM_calloc(repr_data->num_dimensions, sizeof(MVMint64)); | |
return obj; | |
} | |
else { | |
@@ -214,8 +213,8 @@ static void copy_to(MVMThreadContext *tc, MVMSTable *st, void *src, MVMObject *d | |
if (src_body->slots.any) { | |
size_t dim_size = repr_data->num_dimensions * sizeof(MVMint64); | |
size_t data_size = flat_size(repr_data, src_body->dimensions); | |
- dest_body->dimensions = MVM_fixed_size_alloc(tc, tc->instance->fsa, dim_size); | |
- dest_body->slots.any = MVM_fixed_size_alloc(tc, tc->instance->fsa, data_size); | |
+ dest_body->dimensions = MVM_malloc(dim_size); | |
+ dest_body->slots.any = MVM_malloc(data_size); | |
memcpy(dest_body->dimensions, src_body->dimensions, dim_size); | |
memcpy(dest_body->slots.any, src_body->slots.any, data_size); | |
} | |
@@ -248,14 +247,9 @@ static void gc_mark(MVMThreadContext *tc, MVMSTable *st, void *data, MVMGCWorkli | |
/* Called by the VM in order to free memory associated with this object. */ | |
static void gc_free(MVMThreadContext *tc, MVMObject *obj) { | |
MVMMultiDimArray *arr = (MVMMultiDimArray *)obj; | |
- MVMMultiDimArrayREPRData *repr_data = (MVMMultiDimArrayREPRData *)STABLE(obj)->REPR_data; | |
if (arr->body.slots.any) | |
- MVM_fixed_size_free(tc, tc->instance->fsa, | |
- flat_size(repr_data, arr->body.dimensions), | |
- arr->body.slots.any); | |
- MVM_fixed_size_free(tc, tc->instance->fsa, | |
- repr_data->num_dimensions * sizeof(MVMint64), | |
- arr->body.dimensions); | |
+ MVM_free(arr->body.slots.any); | |
+ MVM_free(arr->body.dimensions); | |
} | |
/* Marks the representation data in an STable.*/ | |
@@ -351,8 +345,7 @@ static void deserialize(MVMThreadContext *tc, MVMSTable *st, MVMObject *root, vo | |
body->dimensions[i] = MVM_serialization_read_int(tc, reader); | |
/* Allocate storage. */ | |
- body->slots.any = MVM_fixed_size_alloc_zeroed(tc, tc->instance->fsa, | |
- flat_size(repr_data, body->dimensions)); | |
+ body->slots.any = MVM_calloc(flat_elements(repr_data->num_dimensions, body->dimensions), repr_data->elem_size); | |
/* Read in elements. */ | |
flat_elems = flat_elements(repr_data->num_dimensions, body->dimensions); | |
@@ -673,8 +666,7 @@ static void set_dimensions(MVMThreadContext *tc, MVMSTable *st, MVMObject *root, | |
* real world use case, but we should ensure the VM is memory safe). | |
*/ | |
MVMMultiDimArrayBody *body = (MVMMultiDimArrayBody *)data; | |
- size_t size = flat_size(repr_data, dimensions); | |
- void *storage = MVM_fixed_size_alloc_zeroed(tc, tc->instance->fsa, size); | |
+ void *storage = MVM_calloc(flat_elements(repr_data->num_dimensions, dimensions), repr_data->elem_size); | |
if (MVM_trycas(&(body->slots.any), NULL, storage)) { | |
/* Now memory is in place, safe to un-zero dimensions. */ | |
memcpy(body->dimensions, dimensions, num_dimensions * sizeof(MVMint64)); | |
diff --git src/6model/reprs/NFA.c src/6model/reprs/NFA.c | |
index 91ad88bcf..23e4c83b6 100644 | |
--- src/6model/reprs/NFA.c | |
+++ src/6model/reprs/NFA.c | |
@@ -47,9 +47,9 @@ static void gc_free(MVMThreadContext *tc, MVMObject *obj) { | |
MVMint64 i; | |
for (i = 0; i < nfa->body.num_states; i++) | |
if (nfa->body.num_state_edges[i]) | |
- MVM_fixed_size_free(tc, tc->instance->fsa, nfa->body.num_state_edges[i] * sizeof(MVMNFAStateInfo), nfa->body.states[i]); | |
- MVM_fixed_size_free(tc, tc->instance->fsa, nfa->body.num_states * sizeof(MVMNFAStateInfo *), nfa->body.states); | |
- MVM_fixed_size_free(tc, tc->instance->fsa, nfa->body.num_states * sizeof(MVMint64), nfa->body.num_state_edges); | |
+ MVM_free(nfa->body.states[i]); | |
+ MVM_free(nfa->body.states); | |
+ MVM_free(nfa->body.num_state_edges); | |
} | |
@@ -198,14 +198,12 @@ static void sort_states_and_add_synth_cp_node(MVMThreadContext *tc, MVMNFABody * | |
/* If enough edges, insert synthetic and so the sort. */ | |
if (applicable_edges >= 4) { | |
MVMint64 num_new_edges = num_orig_edges + 1; | |
- MVMNFAStateInfo *new_edges = MVM_fixed_size_alloc(tc, tc->instance->fsa, | |
- num_new_edges * sizeof(MVMNFAStateInfo)); | |
+ MVMNFAStateInfo *new_edges = MVM_malloc(num_new_edges * sizeof(MVMNFAStateInfo)); | |
new_edges[0].act = MVM_NFA_EDGE_SYNTH_CP_COUNT; | |
new_edges[0].arg.i = applicable_edges; | |
memcpy(new_edges + 1, body->states[s], num_orig_edges * sizeof(MVMNFAStateInfo)); | |
qsort(new_edges, num_new_edges, sizeof(MVMNFAStateInfo), opt_edge_comp); | |
- MVM_fixed_size_free(tc, tc->instance->fsa, num_orig_edges * sizeof(MVMNFAStateInfo), | |
- body->states[s]); | |
+ MVM_free(body->states[s]); | |
body->states[s] = new_edges; | |
body->num_state_edges[s] = num_new_edges; | |
} | |
@@ -225,16 +223,16 @@ static void deserialize(MVMThreadContext *tc, MVMSTable *st, MVMObject *root, vo | |
if (body->num_states > 0) { | |
/* Read state edge list counts. */ | |
- body->num_state_edges = MVM_fixed_size_alloc(tc, tc->instance->fsa, body->num_states * sizeof(MVMint64)); | |
+ body->num_state_edges = MVM_malloc(body->num_states * sizeof(MVMint64)); | |
for (i = 0; i < body->num_states; i++) | |
body->num_state_edges[i] = MVM_serialization_read_int(tc, reader); | |
/* Read state graph. */ | |
- body->states = MVM_fixed_size_alloc(tc, tc->instance->fsa, body->num_states * sizeof(MVMNFAStateInfo *)); | |
+ body->states = MVM_malloc(body->num_states * sizeof(MVMNFAStateInfo *)); | |
for (i = 0; i < body->num_states; i++) { | |
MVMint64 edges = body->num_state_edges[i]; | |
if (edges > 0) { | |
- body->states[i] = MVM_fixed_size_alloc(tc, tc->instance->fsa, edges * sizeof(MVMNFAStateInfo)); | |
+ body->states[i] = MVM_malloc(edges * sizeof(MVMNFAStateInfo)); | |
} | |
for (j = 0; j < edges; j++) { | |
body->states[i][j].act = MVM_serialization_read_int(tc, reader); | |
@@ -254,12 +252,12 @@ static void deserialize(MVMThreadContext *tc, MVMSTable *st, MVMObject *root, vo | |
} | |
else { | |
MVMint32 num_codes = -cp_or_synth_count; | |
- MVMCodepoint *codes = MVM_fixed_size_alloc(tc, tc->instance->fsa, num_codes * sizeof(MVMCodepoint)); | |
+ MVMCodepoint *codes = MVM_malloc(num_codes * sizeof(MVMCodepoint)); | |
MVMint32 k; | |
for (k = 0; k < num_codes; k++) | |
codes[k] = (MVMCodepoint)MVM_serialization_read_int(tc, reader); | |
body->states[i][j].arg.g = MVM_nfg_codes_to_grapheme(tc, codes, num_codes); | |
- MVM_fixed_size_free(tc, tc->instance->fsa, num_codes * sizeof(MVMCodepoint), codes); | |
+ MVM_free(codes); | |
} | |
break; | |
} | |
@@ -392,8 +390,8 @@ MVMObject * MVM_nfa_from_statelist(MVMThreadContext *tc, MVMObject *states, MVMO | |
num_states = MVM_repr_elems(tc, states) - 1; | |
nfa->num_states = num_states; | |
if (num_states > 0) { | |
- nfa->num_state_edges = MVM_fixed_size_alloc_zeroed(tc, tc->instance->fsa, num_states * sizeof(MVMint64)); | |
- nfa->states = MVM_fixed_size_alloc_zeroed(tc, tc->instance->fsa, num_states * sizeof(MVMNFAStateInfo *)); | |
+ nfa->num_state_edges = MVM_calloc(num_states, sizeof(MVMint64)); | |
+ nfa->states = MVM_calloc(num_states, sizeof(MVMNFAStateInfo *)); | |
} | |
for (i = 0; i < num_states; i++) { | |
MVMObject *edge_info = MVM_repr_at_pos_o(tc, states, i + 1); | |
@@ -403,7 +401,7 @@ MVMObject * MVM_nfa_from_statelist(MVMThreadContext *tc, MVMObject *states, MVMO | |
nfa->num_state_edges[i] = edges; | |
if (edges > 0) { | |
- nfa->states[i] = MVM_fixed_size_alloc(tc, tc->instance->fsa, edges * sizeof(MVMNFAStateInfo)); | |
+ nfa->states[i] = MVM_malloc(edges * sizeof(MVMNFAStateInfo)); | |
} | |
for (j = 0; j < elems; j += 3) { | |
diff --git src/6model/reprs/P6opaque.c src/6model/reprs/P6opaque.c | |
index d893625d9..4e3c48841 100644 | |
--- src/6model/reprs/P6opaque.c | |
+++ src/6model/reprs/P6opaque.c | |
@@ -143,7 +143,7 @@ static void gc_free(MVMThreadContext *tc, MVMObject *obj) { | |
/* If we replaced the object body, free the replacement. */ | |
void *replaced = ((MVMP6opaque *)obj)->body.replaced; | |
if (replaced) | |
- MVM_fixed_size_free(tc, tc->instance->fsa, STABLE(obj)->size - sizeof(MVMObject), replaced); | |
+ MVM_free(replaced); | |
} | |
/* Marks the representation data in an STable.*/ | |
@@ -1244,7 +1244,7 @@ static void allocate_replaced_body(MVMThreadContext *tc, MVMObject *obj, MVMSTab | |
/* Allocate new memory. */ | |
size_t new_size = new_type->size - sizeof(MVMObject); | |
size_t old_size = STABLE(obj)->size - sizeof(MVMObject); | |
- void *new = MVM_fixed_size_alloc_zeroed(tc, tc->instance->fsa, new_size); | |
+ void *new = MVM_calloc(1, new_size); | |
/* Copy existing to new. | |
* XXX Need more care here, as may have to re-barrier pointers. */ | |
@@ -1253,7 +1253,7 @@ static void allocate_replaced_body(MVMThreadContext *tc, MVMObject *obj, MVMSTab | |
/* Pointer switch, taking care of existing body issues. */ | |
if (body->replaced) { | |
body->replaced = new; | |
- MVM_fixed_size_free_at_safepoint(tc, tc->instance->fsa, old_size, old); | |
+ MVM_free_at_safepoint(tc, old); | |
} | |
else { | |
body->replaced = new; | |
diff --git src/6model/sc.c src/6model/sc.c | |
index bfe7d9d3e..dd1b4a059 100644 | |
--- src/6model/sc.c | |
+++ src/6model/sc.c | |
@@ -72,16 +72,15 @@ void MVM_sc_add_all_scs_entry(MVMThreadContext *tc, MVMSerializationContextBody | |
/* First time; allocate, and NULL first slot as it is | |
* the "no SC" sentinel value. */ | |
tc->instance->all_scs_alloc = 32; | |
- tc->instance->all_scs = MVM_fixed_size_alloc(tc, tc->instance->fsa, | |
- tc->instance->all_scs_alloc * sizeof(MVMSerializationContextBody *)); | |
+ tc->instance->all_scs = MVM_malloc(tc->instance->all_scs_alloc * sizeof(MVMSerializationContextBody *)); | |
tc->instance->all_scs[0] = NULL; | |
tc->instance->all_scs_next_idx++; | |
} | |
else { | |
MVMuint32 orig_alloc = tc->instance->all_scs_alloc; | |
tc->instance->all_scs_alloc += 32; | |
- tc->instance->all_scs = MVM_fixed_size_realloc_at_safepoint(tc, | |
- tc->instance->fsa, tc->instance->all_scs, | |
+ tc->instance->all_scs = MVM_realloc_at_safepoint(tc, | |
+ tc->instance->all_scs, | |
orig_alloc * sizeof(MVMSerializationContextBody *), | |
tc->instance->all_scs_alloc * sizeof(MVMSerializationContextBody *)); | |
} | |
@@ -92,12 +91,7 @@ void MVM_sc_add_all_scs_entry(MVMThreadContext *tc, MVMSerializationContextBody | |
} | |
void MVM_sc_all_scs_destroy(MVMThreadContext *tc) { | |
- MVM_fixed_size_free( | |
- tc, | |
- tc->instance->fsa, | |
- tc->instance->all_scs_alloc * sizeof(MVMSerializationContextBody *), | |
- tc->instance->all_scs | |
- ); | |
+ MVM_free(tc->instance->all_scs); | |
} | |
/* Given an SC, returns its unique handle. */ | |
diff --git src/core/alloc.h src/core/alloc.h | |
index efd562783..2878b9ca9 100644 | |
--- src/core/alloc.h | |
+++ src/core/alloc.h | |
@@ -72,6 +72,15 @@ MVM_STATIC_INLINE void MVM_free_at_safepoint(MVMThreadContext *tc, void *ptr) { | |
MVM_VECTOR_PUSH(tc->instance->free_at_safepoint, ptr); | |
} | |
+MVM_STATIC_INLINE void * MVM_realloc_at_safepoint(MVMThreadContext *tc, void *p, size_t old_bytes, size_t new_bytes) { | |
+ if (old_bytes == new_bytes) | |
+ return p; | |
+ void *allocd = MVM_malloc(new_bytes); | |
+ memcpy(allocd, p, new_bytes > old_bytes ? old_bytes : new_bytes); | |
+ MVM_free_at_safepoint(tc, p); | |
+ return allocd; | |
+} | |
+ | |
MVM_STATIC_INLINE void MVM_alloc_safepoint(MVMThreadContext *tc) { | |
/* No need to acquire mutex since we're in the GC when calling this. */ | |
while (MVM_VECTOR_ELEMS(tc->instance->free_at_safepoint)) | |
diff --git src/core/args.c src/core/args.c | |
index f5832bcc0..72a17a726 100644 | |
--- src/core/args.c | |
+++ src/core/args.c | |
@@ -15,8 +15,7 @@ MVM_STATIC_INLINE void mark_named_used(MVMArgProcContext *ctx, MVMuint32 idx) { | |
/* An identity map is just an array { 0, 1, 2, ... }. */ | |
static MVMuint16 * create_identity_map(MVMThreadContext *tc, MVMuint32 size) { | |
- MVMuint16 *map = MVM_fixed_size_alloc(tc, tc->instance->fsa, | |
- size * sizeof(MVMuint16)); | |
+ MVMuint16 *map = MVM_malloc(size * sizeof(MVMuint16)); | |
MVMuint32 i; | |
for (i = 0; i < size; i++) | |
map[i] = i; | |
@@ -34,13 +33,9 @@ void MVM_args_setup_identity_map(MVMThreadContext *tc) { | |
/* Free memory associated with the identity map(s). */ | |
void MVM_args_destroy_identity_map(MVMThreadContext *tc) { | |
- MVM_fixed_size_free(tc, tc->instance->fsa, | |
- tc->instance->identity_arg_map_alloc * sizeof(MVMuint16), | |
- tc->instance->identity_arg_map); | |
+ MVM_free(tc->instance->identity_arg_map); | |
if (tc->instance->identity_arg_map != tc->instance->small_identity_arg_map) | |
- MVM_fixed_size_free(tc, tc->instance->fsa, | |
- MVM_ARGS_SMALL_IDENTITY_MAP_SIZE * sizeof(MVMuint16), | |
- tc->instance->small_identity_arg_map); | |
+ MVM_free(tc->instance->small_identity_arg_map); | |
} | |
/* Perform flattening of arguments as provided, and return the resulting | |
diff --git src/core/args.h src/core/args.h | |
index 46f93f30c..e69ed5719 100644 | |
--- src/core/args.h | |
+++ src/core/args.h | |
@@ -60,8 +60,7 @@ MVM_STATIC_INLINE void MVM_args_proc_setup(MVMThreadContext *tc, MVMArgProcConte | |
MVMuint16 num_nameds = arg_info.callsite->flag_count - arg_info.callsite->num_pos; | |
ctx->named_used_size = num_nameds; | |
if (MVM_UNLIKELY(num_nameds > 64)) | |
- ctx->named_used.byte_array = MVM_fixed_size_alloc_zeroed(tc, tc->instance->fsa, | |
- num_nameds); | |
+ ctx->named_used.byte_array = MVM_calloc(1, num_nameds); | |
else | |
ctx->named_used.bit_field = 0; | |
} | |
@@ -69,8 +68,7 @@ MVM_STATIC_INLINE void MVM_args_proc_setup(MVMThreadContext *tc, MVMArgProcConte | |
/* Clean up an arguments processing context. */ | |
MVM_STATIC_INLINE void MVM_args_proc_cleanup(MVMThreadContext *tc, MVMArgProcContext *ctx) { | |
if (ctx->named_used_size > 64) { | |
- MVM_fixed_size_free(tc, tc->instance->fsa, ctx->named_used_size, | |
- ctx->named_used.byte_array); | |
+ MVM_free(ctx->named_used.byte_array); | |
ctx->named_used_size = 0; | |
} | |
} | |
diff --git src/core/bytecode.c src/core/bytecode.c | |
index 9c55b6e33..64995c692 100644 | |
--- src/core/bytecode.c | |
+++ src/core/bytecode.c | |
@@ -333,8 +333,7 @@ static MVMExtOpRecord * deserialize_extop_records(MVMThreadContext *tc, MVMCompU | |
if (num == 0) | |
return NULL; | |
- extops = MVM_fixed_size_alloc_zeroed(tc, tc->instance->fsa, | |
- num * sizeof(MVMExtOpRecord)); | |
+ extops = MVM_calloc(num, sizeof(MVMExtOpRecord)); | |
pos = rs->extop_seg; | |
for (i = 0; i < num; i++) { | |
@@ -350,7 +349,7 @@ static MVMExtOpRecord * deserialize_extop_records(MVMThreadContext *tc, MVMCompU | |
/* Lookup name string. */ | |
if (name_idx >= cu->body.num_strings) { | |
cleanup_all(rs); | |
- MVM_fixed_size_free(tc, tc->instance->fsa, num * sizeof(MVMExtOpRecord), extops); | |
+ MVM_free(extops); | |
MVM_exception_throw_adhoc(tc, | |
"String heap index beyond end of string heap"); | |
} | |
@@ -453,7 +452,7 @@ static MVMExtOpRecord * deserialize_extop_records(MVMThreadContext *tc, MVMCompU | |
fail: | |
cleanup_all(rs); | |
- MVM_fixed_size_free(tc, tc->instance->fsa, num * sizeof(MVMExtOpRecord), extops); | |
+ MVM_free(extops); | |
MVM_exception_throw_adhoc(tc, "Invalid operand descriptor"); | |
} | |
} | |
@@ -827,8 +826,7 @@ static MVMCallsite ** deserialize_callsites(MVMThreadContext *tc, MVMCompUnit *c | |
/* Allocate space for callsites. */ | |
if (rs->expected_callsites == 0) | |
return NULL; | |
- callsites = MVM_fixed_size_alloc(tc, tc->instance->fsa, | |
- sizeof(MVMCallsite *) * rs->expected_callsites); | |
+ callsites = MVM_malloc(sizeof(MVMCallsite *) * rs->expected_callsites); | |
/* Load callsites. */ | |
pos = rs->callsite_seg; | |
@@ -873,9 +871,7 @@ static MVMCallsite ** deserialize_callsites(MVMThreadContext *tc, MVMCompUnit *c | |
MVM_free_null(callsites[i]); | |
} | |
} | |
- MVM_fixed_size_free(tc, tc->instance->fsa, | |
- sizeof(MVMCallsite *) * rs->expected_callsites, | |
- callsites); | |
+ MVM_free(callsites); | |
MVM_exception_throw_adhoc(tc, "Flattened named args must be objects"); | |
} | |
has_flattening = 1; | |
@@ -890,9 +886,7 @@ static MVMCallsite ** deserialize_callsites(MVMThreadContext *tc, MVMCompUnit *c | |
MVM_free_null(callsites[i]); | |
} | |
} | |
- MVM_fixed_size_free(tc, tc->instance->fsa, | |
- sizeof(MVMCallsite *) * rs->expected_callsites, | |
- callsites); | |
+ MVM_free(callsites); | |
MVM_exception_throw_adhoc(tc, "Flattened positional args must be objects"); | |
} | |
if (nameds_slots) { | |
@@ -903,9 +897,7 @@ static MVMCallsite ** deserialize_callsites(MVMThreadContext *tc, MVMCompUnit *c | |
MVM_free_null(callsites[i]); | |
} | |
} | |
- MVM_fixed_size_free(tc, tc->instance->fsa, | |
- sizeof(MVMCallsite *) * rs->expected_callsites, | |
- callsites); | |
+ MVM_free(callsites); | |
MVM_exception_throw_adhoc(tc, "Flattened positional args must appear before named args"); | |
} | |
has_flattening = 1; | |
@@ -924,9 +916,7 @@ static MVMCallsite ** deserialize_callsites(MVMThreadContext *tc, MVMCompUnit *c | |
MVM_free_null(callsites[i]); | |
} | |
} | |
- MVM_fixed_size_free(tc, tc->instance->fsa, | |
- sizeof(MVMCallsite *) * rs->expected_callsites, | |
- callsites); | |
+ MVM_free(callsites); | |
MVM_exception_throw_adhoc(tc, "All positional args must appear before named args, violated by arg %d in callsite %d", j, i); | |
} | |
else { | |
@@ -990,8 +980,7 @@ void MVM_bytecode_unpack(MVMThreadContext *tc, MVMCompUnit *cu) { | |
rs = dissect_bytecode(tc, cu); | |
/* Allocate space for the strings heap; we deserialize it lazily. */ | |
- cu_body->strings = MVM_fixed_size_alloc_zeroed(tc, tc->instance->fsa, | |
- rs->expected_strings * sizeof(MVMString *)); | |
+ cu_body->strings = MVM_calloc(rs->expected_strings, sizeof(MVMString *)); | |
cu_body->num_strings = rs->expected_strings; | |
cu_body->orig_strings = rs->expected_strings; | |
cu_body->string_heap_fast_table = MVM_calloc( | |
@@ -1020,7 +1009,7 @@ void MVM_bytecode_unpack(MVMThreadContext *tc, MVMCompUnit *cu) { | |
if (rs->hll_str_idx > rs->expected_strings) { | |
MVM_free(cu_body->string_heap_fast_table); | |
- MVM_fixed_size_free(tc, tc->instance->fsa, rs->expected_strings * sizeof(MVMString *), cu_body->strings); | |
+ MVM_free(cu_body->strings); | |
MVM_exception_throw_adhoc(tc, "Unpacking bytecode: HLL name string index out of range: %d > %d", rs->hll_str_idx, rs->expected_strings); | |
} | |
diff --git src/core/callsite.c src/core/callsite.c | |
index 1493f724d..e0871d812 100644 | |
--- src/core/callsite.c | |
+++ src/core/callsite.c | |
@@ -44,10 +44,8 @@ void MVM_callsite_initialize_common(MVMThreadContext *tc) { | |
/* Initialize the intern storage. */ | |
MVMCallsiteInterns *interns = tc->instance->callsite_interns; | |
interns->max_arity = MVM_INTERN_ARITY_SOFT_LIMIT - 1; | |
- interns->by_arity = MVM_fixed_size_alloc_zeroed(tc, tc->instance->fsa, | |
- MVM_INTERN_ARITY_SOFT_LIMIT * sizeof(MVMCallsite **)); | |
- interns->num_by_arity = MVM_fixed_size_alloc_zeroed(tc, tc->instance->fsa, | |
- MVM_INTERN_ARITY_SOFT_LIMIT * sizeof(MVMuint32)); | |
+ interns->by_arity = MVM_calloc(MVM_INTERN_ARITY_SOFT_LIMIT, sizeof(MVMCallsite **)); | |
+ interns->num_by_arity = MVM_calloc(MVM_INTERN_ARITY_SOFT_LIMIT, sizeof(MVMuint32)); | |
/* Intern callsites. | |
* If you add a callsite to this list, remember to add it to the check in | |
@@ -258,12 +256,12 @@ MVM_PUBLIC void MVM_callsite_intern(MVMThreadContext *tc, MVMCallsite **cs_ptr, | |
if (num_flags > interns->max_arity) { | |
MVMuint32 prev_elems = interns->max_arity + 1; | |
MVMuint32 new_elems = num_flags + 1; | |
- interns->by_arity = MVM_fixed_size_realloc_at_safepoint(tc, tc->instance->fsa, | |
+ interns->by_arity = MVM_realloc_at_safepoint(tc, | |
interns->by_arity, | |
prev_elems * sizeof(MVMCallsite **), | |
new_elems * sizeof(MVMCallsite **)); | |
memset(interns->by_arity + prev_elems, 0, (new_elems - prev_elems) * sizeof(MVMCallsite *)); | |
- interns->num_by_arity = MVM_fixed_size_realloc_at_safepoint(tc, tc->instance->fsa, | |
+ interns->num_by_arity = MVM_realloc_at_safepoint(tc, | |
interns->num_by_arity, | |
prev_elems * sizeof(MVMuint32), | |
new_elems * sizeof(MVMuint32)); | |
@@ -276,12 +274,11 @@ MVM_PUBLIC void MVM_callsite_intern(MVMThreadContext *tc, MVMCallsite **cs_ptr, | |
MVMuint32 cur_size = interns->num_by_arity[num_flags]; | |
if (cur_size % MVM_INTERN_ARITY_GROW == 0) { | |
interns->by_arity[num_flags] = cur_size != 0 | |
- ? MVM_fixed_size_realloc_at_safepoint(tc, tc->instance->fsa, | |
+ ? MVM_realloc_at_safepoint(tc, | |
interns->by_arity[num_flags], | |
cur_size * sizeof(MVMCallsite *), | |
(cur_size + MVM_INTERN_ARITY_GROW) * sizeof(MVMCallsite *)) | |
- : MVM_fixed_size_alloc(tc, tc->instance->fsa, | |
- MVM_INTERN_ARITY_GROW * sizeof(MVMCallsite *)); | |
+ : MVM_malloc(MVM_INTERN_ARITY_GROW * sizeof(MVMCallsite *)); | |
} | |
/* Install the new callsite. */ | |
@@ -346,17 +343,11 @@ void MVM_callsite_cleanup_interns(MVMInstance *instance) { | |
if (!is_common(callsite)) | |
MVM_callsite_destroy(callsite); | |
} | |
- MVM_fixed_size_free(instance->main_thread, instance->fsa, | |
- callsite_count * sizeof(MVMCallsite *), | |
- callsites); | |
+ MVM_free(callsites); | |
} | |
} | |
- MVM_fixed_size_free(instance->main_thread, instance->fsa, | |
- interns->max_arity * sizeof(MVMCallsite **), | |
- interns->by_arity); | |
- MVM_fixed_size_free(instance->main_thread, instance->fsa, | |
- interns->max_arity * sizeof(MVMuint32), | |
- interns->num_by_arity); | |
+ MVM_free(interns->by_arity); | |
+ MVM_free(interns->num_by_arity); | |
MVM_free(instance->callsite_interns); | |
} | |
diff --git src/core/callstack.c src/core/callstack.c | |
index 51efe1af5..e303127d1 100644 | |
--- src/core/callstack.c | |
+++ src/core/callstack.c | |
@@ -265,15 +265,12 @@ MVMint32 MVM_callstack_ensure_work_and_env_space(MVMThreadContext *tc, MVMuint32 | |
/* Allocate the extra space on the callstack. */ | |
region->alloc += diff; | |
- /* If the environment size changed, then need to realloc using the | |
- * FSA. */ | |
+ /* If the environment size changed, then need to realloc. */ | |
if (new_env_size > cur_frame->allocd_env) { | |
- MVMRegister *new_env = MVM_fixed_size_alloc_zeroed(tc, tc->instance->fsa, | |
- new_env_size); | |
+ MVMRegister *new_env = MVM_calloc(1, new_env_size); | |
if (cur_frame->allocd_env) { | |
memcpy(new_env, cur_frame->env, cur_frame->allocd_env); | |
- MVM_fixed_size_free(tc, tc->instance->fsa, cur_frame->allocd_env, | |
- cur_frame->env); | |
+ MVM_free(cur_frame->env); | |
} | |
cur_frame->env = new_env; | |
} | |
@@ -611,7 +608,7 @@ static void exit_heap_frame(MVMThreadContext *tc, MVMFrame *returner) { | |
/* Preserve the extras if the frame has been used in a ctx operation | |
* and marked with caller info. */ | |
if (!(e->caller_deopt_idx || e->caller_jit_position)) { | |
- MVM_fixed_size_free_at_safepoint(tc, tc->instance->fsa, sizeof(MVMFrameExtra), e); | |
+ MVM_free_at_safepoint(tc, e); | |
returner->extra = NULL; | |
} | |
} | |
@@ -795,7 +792,7 @@ MVMuint64 MVM_callstack_unwind_frame(MVMThreadContext *tc, MVMuint8 exceptional) | |
case MVM_CALLSTACK_RECORD_FRAME: { | |
MVMFrame *frame = &(((MVMCallStackFrame *)tc->stack_top)->frame); | |
if (frame->extra) | |
- MVM_fixed_size_free(tc, tc->instance->fsa, sizeof(MVMFrameExtra), frame->extra); | |
+ MVM_free(frame->extra); | |
exit_frame(tc, frame); | |
move_to_prev_record(tc); | |
break; | |
diff --git src/core/compunit.c src/core/compunit.c | |
index ac2d47148..8b734ee23 100644 | |
--- src/core/compunit.c | |
+++ src/core/compunit.c | |
@@ -115,13 +115,12 @@ MVMuint16 MVM_cu_callsite_add(MVMThreadContext *tc, MVMCompUnit *cu, MVMCallsite | |
/* Not known; let's add it. */ | |
size_t orig_size = cu->body.num_callsites * sizeof(MVMCallsite *); | |
size_t new_size = (cu->body.num_callsites + 1) * sizeof(MVMCallsite *); | |
- MVMCallsite **new_callsites = MVM_fixed_size_alloc(tc, tc->instance->fsa, new_size); | |
+ MVMCallsite **new_callsites = MVM_malloc(new_size); | |
memcpy(new_callsites, cu->body.callsites, orig_size); | |
idx = cu->body.num_callsites; | |
new_callsites[idx] = cs->is_interned ? cs : MVM_callsite_copy(tc, cs); | |
if (cu->body.callsites) | |
- MVM_fixed_size_free_at_safepoint(tc, tc->instance->fsa, orig_size, | |
- cu->body.callsites); | |
+ MVM_free_at_safepoint(tc, cu->body.callsites); | |
cu->body.callsites = new_callsites; | |
cu->body.num_callsites++; | |
} | |
@@ -149,13 +148,12 @@ MVMuint32 MVM_cu_string_add(MVMThreadContext *tc, MVMCompUnit *cu, MVMString *st | |
/* Not known; let's add it. */ | |
size_t orig_size = cu->body.num_strings * sizeof(MVMString *); | |
size_t new_size = (cu->body.num_strings + 1) * sizeof(MVMString *); | |
- MVMString **new_strings = MVM_fixed_size_alloc(tc, tc->instance->fsa, new_size); | |
+ MVMString **new_strings = MVM_malloc(new_size); | |
memcpy(new_strings, cu->body.strings, orig_size); | |
idx = cu->body.num_strings; | |
new_strings[idx] = str; | |
if (cu->body.strings) | |
- MVM_fixed_size_free_at_safepoint(tc, tc->instance->fsa, orig_size, | |
- cu->body.strings); | |
+ MVM_free_at_safepoint(tc, cu->body.strings); | |
cu->body.strings = new_strings; | |
cu->body.num_strings++; | |
} | |
diff --git src/core/exceptions.c src/core/exceptions.c | |
index 16eeeab5e..41816ac2d 100644 | |
--- src/core/exceptions.c | |
+++ src/core/exceptions.c | |
@@ -359,7 +359,7 @@ static void run_handler(MVMThreadContext *tc, LocatedHandler lh, MVMObject *ex_o | |
case MVM_EX_ACTION_INVOKE: { | |
/* Create active handler record. */ | |
- MVMActiveHandler *ah = MVM_fixed_size_alloc(tc, tc->instance->fsa, sizeof(MVMActiveHandler)); | |
+ MVMActiveHandler *ah = MVM_malloc(sizeof(MVMActiveHandler)); | |
/* Ensure we have an exception object. */ | |
MVMFrame *cur_frame = tc->cur_frame; | |
@@ -441,7 +441,7 @@ static void unwind_after_handler(MVMThreadContext *tc, void *sr_data) { | |
} | |
/* Clean up. */ | |
tc->active_handlers = ah->next_handler; | |
- MVM_fixed_size_free(tc, tc->instance->fsa, sizeof(MVMActiveHandler), ah); | |
+ MVM_free(ah); | |
/* Do the unwinding as needed. */ | |
if (exception && exception->body.return_after_unwind) { | |
@@ -463,7 +463,7 @@ static void cleanup_active_handler(MVMThreadContext *tc, void *sr_data) { | |
/* Clean up. */ | |
tc->active_handlers = ah->next_handler; | |
- MVM_fixed_size_free(tc, tc->instance->fsa, sizeof(MVMActiveHandler), ah); | |
+ MVM_free(ah); | |
} | |
char * MVM_exception_backtrace_line(MVMThreadContext *tc, MVMFrame *cur_frame, | |
diff --git src/core/fixkey_hash_table.c src/core/fixkey_hash_table.c | |
index f34bb3aa8..c0a315d79 100644 | |
--- src/core/fixkey_hash_table.c | |
+++ src/core/fixkey_hash_table.c | |
@@ -6,11 +6,8 @@ void hash_demolish_internal(MVMThreadContext *tc, | |
struct MVMFixKeyHashTableControl *control) { | |
size_t allocated_items = MVM_fixkey_hash_allocated_items(control); | |
size_t entries_size = sizeof(MVMString ***) * allocated_items; | |
- size_t metadata_size = MVM_hash_round_size_up(allocated_items + 1); | |
- size_t total_size | |
- = entries_size + sizeof(struct MVMFixKeyHashTableControl) + metadata_size; | |
char *start = (char *)control - entries_size; | |
- MVM_fixed_size_free(tc, tc->instance->fsa, total_size, start); | |
+ MVM_free(start); | |
} | |
/* Frees the entire contents of the hash, leaving you just the hashtable itself, | |
@@ -28,7 +25,7 @@ void MVM_fixkey_hash_demolish(MVMThreadContext *tc, MVMFixKeyHashTable *hashtabl | |
while (bucket < entries_in_use) { | |
if (*metadata) { | |
MVMString ***indirection = (MVMString ***) entry_raw; | |
- MVM_fixed_size_free(tc, tc->instance->fsa, control->entry_size, *indirection); | |
+ MVM_free(*indirection); | |
} | |
++bucket; | |
++metadata; | |
@@ -61,7 +58,7 @@ MVM_STATIC_INLINE struct MVMFixKeyHashTableControl *hash_allocate_common(MVMThre | |
assert(total_size == MVM_hash_round_size_up(total_size)); | |
struct MVMFixKeyHashTableControl *control = | |
- (struct MVMFixKeyHashTableControl *) ((char *)MVM_fixed_size_alloc(tc, tc->instance->fsa, total_size) + entries_size); | |
+ (struct MVMFixKeyHashTableControl *) ((char *)MVM_malloc(total_size) + entries_size); | |
control->official_size_log2 = official_size_log2; | |
control->max_items = max_items; | |
@@ -336,7 +333,7 @@ void *MVM_fixkey_hash_lvalue_fetch_nocheck(MVMThreadContext *tc, | |
return indirection; | |
} | |
- MVMString **entry = MVM_fixed_size_alloc(tc, tc->instance->fsa, control->entry_size); | |
+ MVMString **entry = MVM_malloc(control->entry_size); | |
/* and we then set *this* to NULL to signal to our caller that this is a | |
* new allocation. */ | |
*entry = NULL; | |
diff --git src/core/fixkey_hash_table.h src/core/fixkey_hash_table.h | |
index 4a44a718d..2d92aeb15 100644 | |
--- src/core/fixkey_hash_table.h | |
+++ src/core/fixkey_hash_table.h | |
@@ -58,11 +58,10 @@ Not all the optimisations described above are in place yet. Starting with | |
* delete yet, delete isn't implemented... | |
* | |
* The normal case is that the caller specify the `entry_size`, and the hash | |
- * will allocate memory for new entries using `MVM_fixed_size_alloc` (when | |
- * needed), and all the APIs return pointers to this memory, with the layer of | |
- * indirection completely hidden internally. `MVM_fixkey_hash_demolish` will | |
- * release all of the allocated blocks back to the FSA before freeing the hash | |
- * itself. | |
+ * will allocate memory for new entries (when needed), and all the APIs | |
+ * return pointers to this memory, with the layer of indirection completely | |
+ * hidden internally. `MVM_fixkey_hash_demolish` will release all of the | |
+ * allocated blocks before freeing the hash itself. | |
* | |
* It can be useful to indirect to static storage. Hence `entry_size == 0` is | |
* treated as a special case (the allocated storage must be at least 1 pointer | |
@@ -84,7 +83,7 @@ struct MVMFixKeyHashTableControl { | |
MVMHashNumItems cur_items; | |
MVMHashNumItems max_items; /* hit this and we grow */ | |
/* size of the (real) entry. | |
- * If non-zero, allocated with MVM_fixed_size_alloc | |
+ * If non-zero, allocated | |
* If zero, see the comments above. */ | |
MVMuint16 entry_size; | |
MVMuint8 official_size_log2; | |
diff --git src/core/frame.c src/core/frame.c | |
index bec44093a..83f2d5e4f 100644 | |
--- src/core/frame.c | |
+++ src/core/frame.c | |
@@ -129,10 +129,10 @@ static void instrumentation_level_barrier(MVMThreadContext *tc, MVMStaticFrame * | |
void MVM_frame_destroy(MVMThreadContext *tc, MVMFrame *frame) { | |
MVM_args_proc_cleanup(tc, &frame->params); | |
if (frame->env && !MVM_FRAME_IS_ON_CALLSTACK(tc, frame)) | |
- MVM_fixed_size_free(tc, tc->instance->fsa, frame->allocd_env, frame->env); | |
+ MVM_free(frame->env); | |
if (frame->extra) { | |
MVMFrameExtra *e = frame->extra; | |
- MVM_fixed_size_free(tc, tc->instance->fsa, sizeof(MVMFrameExtra), e); | |
+ MVM_free(e); | |
} | |
} | |
@@ -167,7 +167,7 @@ static MVMFrame * create_context_only(MVMThreadContext *tc, MVMStaticFrame *stat | |
* is vivified to prevent the clone (which is what creates the correct | |
* BEGIN/INIT semantics). */ | |
if (static_frame->body.env_size) { | |
- frame->env = MVM_fixed_size_alloc_zeroed(tc, tc->instance->fsa, static_frame->body.env_size); | |
+ frame->env = MVM_calloc(1, static_frame->body.env_size); | |
frame->allocd_env = static_frame->body.env_size; | |
if (autoclose) { | |
MVMROOT2(tc, frame, static_frame, { | |
@@ -267,7 +267,7 @@ static MVMFrame * allocate_unspecialized_frame(MVMThreadContext *tc, | |
/* If we have an environment, that needs allocating separately for | |
* heap-based frames. */ | |
if (env_size) { | |
- frame->env = MVM_fixed_size_alloc_zeroed(tc, tc->instance->fsa, env_size); | |
+ frame->env = MVM_calloc(1, env_size); | |
frame->allocd_env = env_size; | |
} | |
} | |
@@ -313,7 +313,7 @@ static MVMFrame * allocate_specialized_frame(MVMThreadContext *tc, | |
/* If we have an environment, that needs allocating separately for | |
* heap-based frames. */ | |
if (env_size) { | |
- frame->env = MVM_fixed_size_alloc_zeroed(tc, tc->instance->fsa, env_size); | |
+ frame->env = MVM_calloc(1, env_size); | |
frame->allocd_env = env_size; | |
} | |
} | |
@@ -652,8 +652,7 @@ MVMFrame * MVM_frame_move_to_heap(MVMThreadContext *tc, MVMFrame *frame) { | |
* out-live the callstack entry. */ | |
MVMuint32 env_size = cur_to_promote->allocd_env; | |
if (env_size) { | |
- MVMRegister *heap_env = MVM_fixed_size_alloc(tc, | |
- tc->instance->fsa, env_size); | |
+ MVMRegister *heap_env = MVM_malloc(env_size); | |
memcpy(heap_env, cur_to_promote->env, env_size); | |
cur_to_promote->env = heap_env; | |
} | |
@@ -1681,7 +1680,7 @@ MVMuint16 MVM_frame_lexical_primspec(MVMThreadContext *tc, MVMFrame *f, MVMStrin | |
* frame. This is used to hold data that only a handful of frames need. */ | |
MVMFrameExtra * MVM_frame_extra(MVMThreadContext *tc, MVMFrame *f) { | |
if (!f->extra) | |
- f->extra = MVM_fixed_size_alloc_zeroed(tc, tc->instance->fsa, sizeof(MVMFrameExtra)); | |
+ f->extra = MVM_calloc(1, sizeof(MVMFrameExtra)); | |
return f->extra; | |
} | |
diff --git src/core/index_hash_table.c src/core/index_hash_table.c | |
index ceeca8be5..bf6028293 100644 | |
--- src/core/index_hash_table.c | |
+++ src/core/index_hash_table.c | |
@@ -6,11 +6,8 @@ MVM_STATIC_INLINE void hash_demolish_internal(MVMThreadContext *tc, | |
struct MVMIndexHashTableControl *control) { | |
size_t allocated_items = MVM_index_hash_allocated_items(control); | |
size_t entries_size = MVM_hash_round_size_up(sizeof(struct MVMIndexHashEntry) * allocated_items); | |
- size_t metadata_size = MVM_hash_round_size_up(allocated_items + 1); | |
- size_t total_size | |
- = entries_size + sizeof(struct MVMIndexHashTableControl) + metadata_size; | |
char *start = (char *)control - entries_size; | |
- MVM_fixed_size_free(tc, tc->instance->fsa, total_size, start); | |
+ MVM_free(start); | |
} | |
/* Frees the entire contents of the hash, leaving you just the hashtable itself, | |
@@ -53,7 +50,7 @@ MVM_STATIC_INLINE struct MVMIndexHashTableControl *hash_allocate_common(MVMThrea | |
assert(total_size == MVM_hash_round_size_up(total_size)); | |
struct MVMIndexHashTableControl *control = | |
- (struct MVMIndexHashTableControl *) ((char *)MVM_fixed_size_alloc(tc, tc->instance->fsa, total_size) + entries_size); | |
+ (struct MVMIndexHashTableControl *) ((char *)MVM_malloc(total_size) + entries_size); | |
control->official_size_log2 = official_size_log2; | |
control->max_items = max_items; | |
diff --git src/core/index_hash_table_funcs.h src/core/index_hash_table_funcs.h | |
index d0f77ca95..f10850b5b 100644 | |
--- src/core/index_hash_table_funcs.h | |
+++ src/core/index_hash_table_funcs.h | |
@@ -78,7 +78,7 @@ MVM_STATIC_INLINE void MVM_index_hash_shallow_copy(MVMThreadContext *tc, | |
const char *start = (const char *)control - entries_size; | |
size_t total_size | |
= entries_size + sizeof(struct MVMIndexHashTableControl) + metadata_size; | |
- char *target = MVM_fixed_size_alloc(tc, tc->instance->fsa, total_size); | |
+ char *target = MVM_malloc(total_size); | |
memcpy(target, start, total_size); | |
dest->table = (struct MVMIndexHashTableControl *)(target + entries_size); | |
} | |
diff --git src/core/ptr_hash_table.c src/core/ptr_hash_table.c | |
index 20776e128..1e10aadfe 100644 | |
--- src/core/ptr_hash_table.c | |
+++ src/core/ptr_hash_table.c | |
@@ -6,11 +6,8 @@ MVM_STATIC_INLINE void hash_demolish_internal(MVMThreadContext *tc, | |
struct MVMPtrHashTableControl *control) { | |
size_t allocated_items = MVM_ptr_hash_allocated_items(control); | |
size_t entries_size = sizeof(struct MVMPtrHashEntry) * allocated_items; | |
- size_t metadata_size = MVM_hash_round_size_up(allocated_items + 1); | |
- size_t total_size | |
- = entries_size + sizeof(struct MVMPtrHashTableControl) + metadata_size; | |
char *start = (char *)control - entries_size; | |
- MVM_fixed_size_free(tc, tc->instance->fsa, total_size, start); | |
+ MVM_free(start); | |
} | |
/* Frees the entire contents of the hash, leaving you just the hashtable itself, | |
@@ -43,7 +40,7 @@ MVM_STATIC_INLINE struct MVMPtrHashTableControl *hash_allocate_common(MVMThreadC | |
assert(total_size == MVM_hash_round_size_up(total_size)); | |
struct MVMPtrHashTableControl *control = | |
- (struct MVMPtrHashTableControl *) ((char *)MVM_fixed_size_alloc(tc, tc->instance->fsa, total_size) + entries_size); | |
+ (struct MVMPtrHashTableControl *) ((char *)MVM_malloc(total_size) + entries_size); | |
control->official_size_log2 = official_size_log2; | |
control->max_items = max_items; | |
diff --git src/core/str_hash_table.c src/core/str_hash_table.c | |
index 69facf55f..35bdbd9e1 100644 | |
--- src/core/str_hash_table.c | |
+++ src/core/str_hash_table.c | |
@@ -29,18 +29,14 @@ MVMuint32 MVM_round_up_log_base2(MVMuint32 v) { | |
MVM_STATIC_INLINE void hash_demolish_internal(MVMThreadContext *tc, | |
struct MVMStrHashTableControl *control) { | |
if (control->cur_items == 0 && control->max_items == 0) { | |
- MVM_fixed_size_free(tc, tc->instance->fsa, sizeof(*control), control); | |
+ MVM_free(control); | |
return; | |
} | |
size_t allocated_items = MVM_str_hash_allocated_items(control); | |
size_t entries_size = control->entry_size * allocated_items; | |
- size_t metadata_size = MVM_hash_round_size_up(allocated_items + 1); | |
char *start = (char *)control - entries_size; | |
- | |
- size_t total_size | |
- = entries_size + sizeof(struct MVMStrHashTableControl) + metadata_size; | |
- MVM_fixed_size_free_at_safepoint(tc, tc->instance->fsa, total_size, start); | |
+ MVM_free_at_safepoint(tc, start); | |
} | |
/* Frees the entire contents of the hash, leaving you just the hashtable itself, | |
@@ -132,7 +128,7 @@ MVM_STATIC_INLINE struct MVMStrHashTableControl *hash_allocate_common(MVMThreadC | |
assert(total_size == MVM_hash_round_size_up(total_size)); | |
struct MVMStrHashTableControl *control = | |
- (struct MVMStrHashTableControl *) ((char *) MVM_fixed_size_alloc(tc, tc->instance->fsa, total_size) + entries_size); | |
+ (struct MVMStrHashTableControl *) ((char *) MVM_malloc(total_size) + entries_size); | |
control->official_size_log2 = official_size_log2; | |
control->max_items = max_items; | |
@@ -170,7 +166,7 @@ void MVM_str_hash_build(MVMThreadContext *tc, | |
struct MVMStrHashTableControl *control; | |
if (!entries) { | |
- control = MVM_fixed_size_alloc(tc, tc->instance->fsa, sizeof(*control)); | |
+ control = MVM_malloc(sizeof(*control)); | |
/* cur_items and max_items both 0 signals that we only allocated a | |
* control structure. */ | |
memset(control, 0, sizeof(*control)); | |
@@ -342,7 +338,7 @@ static struct MVMStrHashTableControl *maybe_grow_hash(MVMThreadContext *tc, | |
control->serial = 0; | |
control->last_delete_at = 0; | |
#endif | |
- MVM_fixed_size_free_at_safepoint(tc, tc->instance->fsa, sizeof(*control_orig), control_orig); | |
+ MVM_free_at_safepoint(tc, control_orig); | |
return control; | |
} | |
diff --git src/core/str_hash_table_funcs.h src/core/str_hash_table_funcs.h | |
index 39ceb3d04..cf1e427d9 100644 | |
--- src/core/str_hash_table_funcs.h | |
+++ src/core/str_hash_table_funcs.h | |
@@ -100,7 +100,7 @@ MVM_STATIC_INLINE void MVM_str_hash_shallow_copy(MVMThreadContext *tc, | |
MVM_oops(tc, "MVM_str_hash_shallow_copy called with a stale hashtable pointer"); | |
} | |
if (control->cur_items == 0 && control->max_items == 0) { | |
- struct MVMStrHashTableControl *empty = MVM_fixed_size_alloc(tc, tc->instance->fsa, sizeof(*empty)); | |
+ struct MVMStrHashTableControl *empty = MVM_malloc(sizeof(*empty)); | |
memcpy(empty, control, sizeof(*empty)); | |
dest->table = empty; | |
} else { | |
@@ -110,7 +110,7 @@ MVM_STATIC_INLINE void MVM_str_hash_shallow_copy(MVMThreadContext *tc, | |
const char *start = (const char *)control - entries_size; | |
size_t total_size | |
= entries_size + sizeof(struct MVMStrHashTableControl) + metadata_size; | |
- char *target = (char *) MVM_fixed_size_alloc(tc, tc->instance->fsa, total_size); | |
+ char *target = (char *) MVM_malloc(total_size); | |
memcpy(target, start, total_size); | |
dest->table = (struct MVMStrHashTableControl *)(target + entries_size); | |
} | |
diff --git src/core/threadcontext.c src/core/threadcontext.c | |
index e09e1025f..c107b168f 100644 | |
--- src/core/threadcontext.c | |
+++ src/core/threadcontext.c | |
@@ -31,9 +31,6 @@ MVMThreadContext * MVM_tc_create(MVMThreadContext *parent, MVMInstance *instance | |
/* Set up the second generation allocator. */ | |
tc->gen2 = MVM_gc_gen2_create(instance); | |
- /* The fixed size allocator also keeps pre-thread state. */ | |
- MVM_fixed_size_create_thread(tc); | |
- | |
/* Allocate a call stack for the thread. */ | |
MVM_callstack_init(tc); | |
@@ -80,7 +77,7 @@ void MVM_tc_destroy(MVMThreadContext *tc) { | |
while (tc->active_handlers) { | |
MVMActiveHandler *ah = tc->active_handlers; | |
tc->active_handlers = ah->next_handler; | |
- MVM_fixed_size_free(tc, tc->instance->fsa, sizeof(MVMActiveHandler), ah); | |
+ MVM_free(ah); | |
} | |
/* Free the native callback cache. Needs the fixed size allocator. */ | |
@@ -104,9 +101,6 @@ void MVM_tc_destroy(MVMThreadContext *tc) { | |
/* Destroy the second generation allocator. */ | |
MVM_gc_gen2_destroy(tc->instance, tc->gen2); | |
- /* Destory the per-thread fixed size allocator state. */ | |
- MVM_fixed_size_destroy_thread(tc); | |
- | |
/* Destroy the callstack, releasing all allocated memory. */ | |
MVM_callstack_destroy(tc); | |
diff --git src/core/uni_hash_table.c src/core/uni_hash_table.c | |
index a612a08b5..4c80e0012 100644 | |
--- src/core/uni_hash_table.c | |
+++ src/core/uni_hash_table.c | |
@@ -6,11 +6,8 @@ MVM_STATIC_INLINE void hash_demolish_internal(MVMThreadContext *tc, | |
struct MVMUniHashTableControl *control) { | |
size_t allocated_items = MVM_uni_hash_allocated_items(control); | |
size_t entries_size = sizeof(struct MVMUniHashEntry) * allocated_items; | |
- size_t metadata_size = MVM_hash_round_size_up(allocated_items + 1); | |
- size_t total_size | |
- = entries_size + sizeof(struct MVMUniHashTableControl) + metadata_size; | |
char *start = (char *)control - entries_size; | |
- MVM_fixed_size_free(tc, tc->instance->fsa, total_size, start); | |
+ MVM_free(start); | |
} | |
/* Frees the entire contents of the hash, leaving you just the hashtable itself, | |
@@ -43,7 +40,7 @@ MVM_STATIC_INLINE struct MVMUniHashTableControl *hash_allocate_common(MVMThreadC | |
assert(total_size == MVM_hash_round_size_up(total_size)); | |
struct MVMUniHashTableControl *control = | |
- (struct MVMUniHashTableControl *) ((char *)MVM_fixed_size_alloc(tc, tc->instance->fsa, total_size) + entries_size); | |
+ (struct MVMUniHashTableControl *) ((char *)MVM_malloc(total_size) + entries_size); | |
control->official_size_log2 = official_size_log2; | |
control->max_items = max_items; | |
diff --git src/debug/debugserver.c src/debug/debugserver.c | |
index 14c5fced1..a94cb2531 100644 | |
--- src/debug/debugserver.c | |
+++ src/debug/debugserver.c | |
@@ -187,7 +187,7 @@ MVM_PUBLIC void MVM_debugserver_register_line(MVMThreadContext *tc, char *filena | |
if (table->files_used++ >= table->files_alloc) { | |
MVMuint32 old_alloc = table->files_alloc; | |
table->files_alloc *= 2; | |
- table->files = MVM_fixed_size_realloc_at_safepoint(tc, tc->instance->fsa, table->files, | |
+ table->files = MVM_realloc_at_safepoint(tc, table->files, | |
old_alloc * sizeof(MVMDebugServerBreakpointFileTable), | |
table->files_alloc * sizeof(MVMDebugServerBreakpointFileTable)); | |
memset((char *)(table->files + old_alloc), 0, (table->files_alloc - old_alloc) * sizeof(MVMDebugServerBreakpointFileTable) - 1); | |
@@ -206,7 +206,7 @@ MVM_PUBLIC void MVM_debugserver_register_line(MVMThreadContext *tc, char *filena | |
found->filename_length = filename_len; | |
found->lines_active_alloc = line_no + 32; | |
- found->lines_active = MVM_fixed_size_alloc_zeroed(tc, tc->instance->fsa, found->lines_active_alloc * sizeof(MVMuint8)); | |
+ found->lines_active = MVM_calloc(found->lines_active_alloc, sizeof(MVMuint8)); | |
*file_idx = table->files_used - 1; | |
@@ -220,7 +220,7 @@ MVM_PUBLIC void MVM_debugserver_register_line(MVMThreadContext *tc, char *filena | |
found->lines_active_alloc *= 2; | |
if (tc->instance->debugserver->debugspam_protocol) | |
fprintf(stderr, "increasing line number table for %s from %u to %u slots\n", found->filename, old_size, found->lines_active_alloc); | |
- found->lines_active = MVM_fixed_size_realloc_at_safepoint(tc, tc->instance->fsa, | |
+ found->lines_active = MVM_realloc_at_safepoint(tc, | |
found->lines_active, old_size, found->lines_active_alloc); | |
memset((char *)found->lines_active + old_size, 0, found->lines_active_alloc - old_size - 1); | |
} | |
@@ -1052,13 +1052,12 @@ void MVM_debugserver_add_breakpoint(MVMThreadContext *tc, cmp_ctx_t *ctx, reques | |
* the breakpoint information already exists */ | |
if (found->breakpoints_alloc == 0) { | |
found->breakpoints_alloc = 4; | |
- found->breakpoints = MVM_fixed_size_alloc_zeroed(tc, tc->instance->fsa, | |
- found->breakpoints_alloc * sizeof(MVMDebugServerBreakpointInfo)); | |
+ found->breakpoints = MVM_calloc(found->breakpoints_alloc, sizeof(MVMDebugServerBreakpointInfo)); | |
} | |
if (found->breakpoints_used++ >= found->breakpoints_alloc) { | |
MVMuint32 old_alloc = found->breakpoints_alloc; | |
found->breakpoints_alloc *= 2; | |
- found->breakpoints = MVM_fixed_size_realloc_at_safepoint(tc, tc->instance->fsa, found->breakpoints, | |
+ found->breakpoints = MVM_realloc_at_safepoint(tc, found->breakpoints, | |
old_alloc * sizeof(MVMDebugServerBreakpointInfo), | |
found->breakpoints_alloc * sizeof(MVMDebugServerBreakpointInfo)); | |
if (tc->instance->debugserver->debugspam_protocol) | |
@@ -3465,7 +3464,7 @@ MVM_PUBLIC void MVM_debugserver_init(MVMThreadContext *tc, MVMuint32 port) { | |
debugserver->breakpoints->files_alloc = 32; | |
debugserver->breakpoints->files_used = 0; | |
debugserver->breakpoints->files = | |
- MVM_fixed_size_alloc_zeroed(tc, vm->fsa, debugserver->breakpoints->files_alloc * sizeof(MVMDebugServerBreakpointFileTable)); | |
+ MVM_calloc(debugserver->breakpoints->files_alloc, sizeof(MVMDebugServerBreakpointFileTable)); | |
debugserver->event_id = 2; | |
debugserver->port = port; | |
diff --git src/disp/inline_cache.c src/disp/inline_cache.c | |
index 64683d88b..66dc7c645 100644 | |
--- src/disp/inline_cache.c | |
+++ src/disp/inline_cache.c | |
@@ -26,8 +26,7 @@ static MVMObject * getlexstatic_initial(MVMThreadContext *tc, | |
/* Set up result node and try to install it. */ | |
MVMStaticFrame *sf = tc->cur_frame->static_info; | |
- MVMDispInlineCacheEntryResolvedGetLexStatic *new_entry = MVM_fixed_size_alloc(tc, | |
- tc->instance->fsa, sizeof(MVMDispInlineCacheEntryResolvedGetLexStatic)); | |
+ MVMDispInlineCacheEntryResolvedGetLexStatic *new_entry = MVM_malloc(sizeof(MVMDispInlineCacheEntryResolvedGetLexStatic)); | |
new_entry->base.run_getlexstatic = getlexstatic_resolved; | |
MVM_ASSIGN_REF(tc, &(sf->common.header), new_entry->result, result); | |
try_update_cache_entry(tc, entry_ptr, &unlinked_getlexstatic, &(new_entry->base)); | |
@@ -299,8 +298,7 @@ MVMuint32 MVM_disp_inline_cache_transition(MVMThreadContext *tc, | |
/* Now go by the initial state. */ | |
if (kind == MVM_INLINE_CACHE_KIND_INITIAL) { | |
/* Unlinked -> monomorphic transition. */ | |
- MVMDispInlineCacheEntryMonomorphicDispatch *new_entry = MVM_fixed_size_alloc(tc, | |
- tc->instance->fsa, sizeof(MVMDispInlineCacheEntryMonomorphicDispatch)); | |
+ MVMDispInlineCacheEntryMonomorphicDispatch *new_entry = MVM_malloc(sizeof(MVMDispInlineCacheEntryMonomorphicDispatch)); | |
new_entry->base.run_dispatch = dispatch_monomorphic; | |
new_entry->dp = dp; | |
gc_barrier_program(tc, root, dp); | |
@@ -309,8 +307,7 @@ MVMuint32 MVM_disp_inline_cache_transition(MVMThreadContext *tc, | |
else if (kind == MVM_INLINE_CACHE_KIND_INITIAL_FLATTENING) { | |
/* Unlinked flattening -> monomorphic flattening transition. Since we shall | |
* retain the callsite to assert against, we force interning of it. */ | |
- MVMDispInlineCacheEntryMonomorphicDispatchFlattening *new_entry = MVM_fixed_size_alloc(tc, | |
- tc->instance->fsa, sizeof(MVMDispInlineCacheEntryMonomorphicDispatchFlattening)); | |
+ MVMDispInlineCacheEntryMonomorphicDispatchFlattening *new_entry = MVM_malloc(sizeof(MVMDispInlineCacheEntryMonomorphicDispatchFlattening)); | |
new_entry->base.run_dispatch = dispatch_monomorphic_flattening; | |
if (!initial_cs->is_interned) | |
MVM_callsite_intern(tc, &initial_cs, 1, 0); | |
@@ -322,12 +319,10 @@ MVMuint32 MVM_disp_inline_cache_transition(MVMThreadContext *tc, | |
} | |
else if (kind == MVM_INLINE_CACHE_KIND_MONOMORPHIC_DISPATCH) { | |
/* Monomorphic -> polymorphic transition. */ | |
- MVMDispInlineCacheEntryPolymorphicDispatch *new_entry = MVM_fixed_size_alloc(tc, | |
- tc->instance->fsa, sizeof(MVMDispInlineCacheEntryPolymorphicDispatch)); | |
+ MVMDispInlineCacheEntryPolymorphicDispatch *new_entry = MVM_malloc(sizeof(MVMDispInlineCacheEntryPolymorphicDispatch)); | |
new_entry->base.run_dispatch = dispatch_polymorphic; | |
new_entry->num_dps = 2; | |
- new_entry->dps = MVM_fixed_size_alloc(tc, tc->instance->fsa, | |
- new_entry->num_dps * sizeof(MVMDispProgram *)); | |
+ new_entry->dps = MVM_malloc(new_entry->num_dps * sizeof(MVMDispProgram *)); | |
new_entry->dps[0] = ((MVMDispInlineCacheEntryMonomorphicDispatch *)entry)->dp; | |
new_entry->dps[1] = dp; | |
set_max_temps(new_entry); | |
@@ -336,21 +331,18 @@ MVMuint32 MVM_disp_inline_cache_transition(MVMThreadContext *tc, | |
} | |
else if (kind == MVM_INLINE_CACHE_KIND_MONOMORPHIC_DISPATCH_FLATTENING) { | |
/* Monomorphic flattening -> polymorphic flattening transition. */ | |
- MVMDispInlineCacheEntryPolymorphicDispatchFlattening *new_entry = MVM_fixed_size_alloc(tc, | |
- tc->instance->fsa, sizeof(MVMDispInlineCacheEntryPolymorphicDispatchFlattening)); | |
+ MVMDispInlineCacheEntryPolymorphicDispatchFlattening *new_entry = MVM_malloc(sizeof(MVMDispInlineCacheEntryPolymorphicDispatchFlattening)); | |
new_entry->base.run_dispatch = dispatch_polymorphic_flattening; | |
new_entry->num_dps = 2; | |
- new_entry->flattened_css = MVM_fixed_size_alloc(tc, tc->instance->fsa, | |
- new_entry->num_dps * sizeof(MVMCallsite *)); | |
+ new_entry->flattened_css = MVM_malloc(new_entry->num_dps * sizeof(MVMCallsite *)); | |
if (!initial_cs->is_interned) | |
MVM_callsite_intern(tc, &initial_cs, 1, 0); | |
new_entry->flattened_css[0] = ((MVMDispInlineCacheEntryMonomorphicDispatchFlattening *)entry) | |
->flattened_cs; | |
new_entry->flattened_css[1] = initial_cs; | |
- new_entry->dps = MVM_fixed_size_alloc(tc, tc->instance->fsa, | |
- new_entry->num_dps * sizeof(MVMDispProgram *)); | |
+ new_entry->dps = MVM_malloc(new_entry->num_dps * sizeof(MVMDispProgram *)); | |
new_entry->dps[0] = ((MVMDispInlineCacheEntryMonomorphicDispatchFlattening *)entry)->dp; | |
new_entry->dps[1] = dp; | |
@@ -372,12 +364,10 @@ MVMuint32 MVM_disp_inline_cache_transition(MVMThreadContext *tc, | |
#endif | |
return 0; | |
} | |
- MVMDispInlineCacheEntryPolymorphicDispatch *new_entry = MVM_fixed_size_alloc(tc, | |
- tc->instance->fsa, sizeof(MVMDispInlineCacheEntryPolymorphicDispatch)); | |
+ MVMDispInlineCacheEntryPolymorphicDispatch *new_entry = MVM_malloc(sizeof(MVMDispInlineCacheEntryPolymorphicDispatch)); | |
new_entry->base.run_dispatch = dispatch_polymorphic; | |
new_entry->num_dps = prev_entry->num_dps + 1; | |
- new_entry->dps = MVM_fixed_size_alloc(tc, tc->instance->fsa, | |
- new_entry->num_dps * sizeof(MVMDispProgram *)); | |
+ new_entry->dps = MVM_malloc(new_entry->num_dps * sizeof(MVMDispProgram *)); | |
memcpy(new_entry->dps, prev_entry->dps, prev_entry->num_dps * sizeof(MVMDispProgram *)); | |
new_entry->dps[prev_entry->num_dps] = dp; | |
set_max_temps(new_entry); | |
@@ -398,21 +388,18 @@ MVMuint32 MVM_disp_inline_cache_transition(MVMThreadContext *tc, | |
#endif | |
return 0; | |
} | |
- MVMDispInlineCacheEntryPolymorphicDispatchFlattening *new_entry = MVM_fixed_size_alloc(tc, | |
- tc->instance->fsa, sizeof(MVMDispInlineCacheEntryPolymorphicDispatchFlattening)); | |
+ MVMDispInlineCacheEntryPolymorphicDispatchFlattening *new_entry = MVM_malloc(sizeof(MVMDispInlineCacheEntryPolymorphicDispatchFlattening)); | |
new_entry->base.run_dispatch = dispatch_polymorphic_flattening; | |
new_entry->num_dps = prev_entry->num_dps + 1; | |
- new_entry->flattened_css = MVM_fixed_size_alloc(tc, tc->instance->fsa, | |
- new_entry->num_dps * sizeof(MVMCallsite *)); | |
+ new_entry->flattened_css = MVM_malloc(new_entry->num_dps * sizeof(MVMCallsite *)); | |
memcpy(new_entry->flattened_css, prev_entry->flattened_css, | |
prev_entry->num_dps * sizeof(MVMCallsite * *)); | |
if (!initial_cs->is_interned) | |
MVM_callsite_intern(tc, &initial_cs, 1, 0); | |
new_entry->flattened_css[prev_entry->num_dps] = initial_cs; | |
- new_entry->dps = MVM_fixed_size_alloc(tc, tc->instance->fsa, | |
- new_entry->num_dps * sizeof(MVMDispProgram *)); | |
+ new_entry->dps = MVM_malloc(new_entry->num_dps * sizeof(MVMDispProgram *)); | |
memcpy(new_entry->dps, prev_entry->dps, prev_entry->num_dps * sizeof(MVMDispProgram *)); | |
new_entry->dps[prev_entry->num_dps] = dp; | |
@@ -610,8 +597,7 @@ void cleanup_entry(MVMThreadContext *tc, MVMDispInlineCacheEntry *entry, MVMuint | |
/* Never free initial getlexstatic state. */ | |
} | |
else if (entry->run_getlexstatic == getlexstatic_resolved) { | |
- MVM_fixed_size_free_at_safepoint(tc, tc->instance->fsa, | |
- sizeof(MVMDispInlineCacheEntryResolvedGetLexStatic), entry); | |
+ MVM_free_at_safepoint(tc, entry); | |
} | |
else if (entry->run_dispatch == dispatch_initial || | |
entry->run_dispatch == dispatch_initial_flattening) { | |
@@ -620,14 +606,12 @@ void cleanup_entry(MVMThreadContext *tc, MVMDispInlineCacheEntry *entry, MVMuint | |
else if (entry->run_dispatch == dispatch_monomorphic) { | |
if (destroy_dps) | |
MVM_disp_program_destroy(tc, ((MVMDispInlineCacheEntryMonomorphicDispatch *)entry)->dp); | |
- MVM_fixed_size_free_at_safepoint(tc, tc->instance->fsa, | |
- sizeof(MVMDispInlineCacheEntryMonomorphicDispatch), entry); | |
+ MVM_free_at_safepoint(tc, entry); | |
} | |
else if (entry->run_dispatch == dispatch_monomorphic_flattening) { | |
if (destroy_dps) | |
MVM_disp_program_destroy(tc, ((MVMDispInlineCacheEntryMonomorphicDispatchFlattening *)entry)->dp); | |
- MVM_fixed_size_free_at_safepoint(tc, tc->instance->fsa, | |
- sizeof(MVMDispInlineCacheEntryMonomorphicDispatchFlattening), entry); | |
+ MVM_free_at_safepoint(tc, entry); | |
} | |
else if (entry->run_dispatch == dispatch_polymorphic) { | |
MVMuint32 num_dps = ((MVMDispInlineCacheEntryPolymorphicDispatch *)entry)->num_dps; | |
@@ -635,11 +619,8 @@ void cleanup_entry(MVMThreadContext *tc, MVMDispInlineCacheEntry *entry, MVMuint | |
if (destroy_dps) | |
for (dpi = 0; dpi < num_dps; dpi++) | |
MVM_disp_program_destroy(tc, ((MVMDispInlineCacheEntryPolymorphicDispatch *)entry)->dps[dpi]); | |
- MVM_fixed_size_free_at_safepoint(tc, tc->instance->fsa, | |
- num_dps * sizeof(MVMDispProgram *), | |
- ((MVMDispInlineCacheEntryPolymorphicDispatch *)entry)->dps); | |
- MVM_fixed_size_free_at_safepoint(tc, tc->instance->fsa, | |
- sizeof(MVMDispInlineCacheEntryPolymorphicDispatch), entry); | |
+ MVM_free_at_safepoint(tc, ((MVMDispInlineCacheEntryPolymorphicDispatch *)entry)->dps); | |
+ MVM_free_at_safepoint(tc, entry); | |
} | |
else if (entry->run_dispatch == dispatch_polymorphic_flattening) { | |
MVMuint32 num_dps = ((MVMDispInlineCacheEntryPolymorphicDispatchFlattening *)entry)->num_dps; | |
@@ -647,14 +628,9 @@ void cleanup_entry(MVMThreadContext *tc, MVMDispInlineCacheEntry *entry, MVMuint | |
if (destroy_dps) | |
for (dpi = 0; dpi < num_dps; dpi++) | |
MVM_disp_program_destroy(tc, ((MVMDispInlineCacheEntryPolymorphicDispatchFlattening *)entry)->dps[dpi]); | |
- MVM_fixed_size_free_at_safepoint(tc, tc->instance->fsa, | |
- num_dps * sizeof(MVMCallsite *), | |
- ((MVMDispInlineCacheEntryPolymorphicDispatchFlattening *)entry)->flattened_css); | |
- MVM_fixed_size_free_at_safepoint(tc, tc->instance->fsa, | |
- num_dps * sizeof(MVMDispProgram *), | |
- ((MVMDispInlineCacheEntryPolymorphicDispatchFlattening *)entry)->dps); | |
- MVM_fixed_size_free_at_safepoint(tc, tc->instance->fsa, | |
- sizeof(MVMDispInlineCacheEntryPolymorphicDispatchFlattening), entry); | |
+ MVM_free_at_safepoint(tc, ((MVMDispInlineCacheEntryPolymorphicDispatchFlattening *)entry)->flattened_css); | |
+ MVM_free_at_safepoint(tc, ((MVMDispInlineCacheEntryPolymorphicDispatchFlattening *)entry)->dps); | |
+ MVM_free_at_safepoint(tc, entry); | |
} | |
else { | |
MVM_oops(tc, "Unimplemented cleanup_entry case"); | |
diff --git src/disp/program.c src/disp/program.c | |
index 09234e28d..50427804e 100644 | |
--- src/disp/program.c | |
+++ src/disp/program.c | |
@@ -1702,8 +1702,7 @@ MVMObject * resume_init_capture(MVMThreadContext *tc, MVMDispResumptionData *res | |
MVMDispProgramResumption *resumption = resume_data->resumption; | |
MVMCallsite *callsite = resumption->init_callsite; | |
rec_resumption->initial_resume_args = callsite->flag_count | |
- ? MVM_fixed_size_alloc(tc, tc->instance->fsa, | |
- callsite->flag_count * sizeof(MVMRegister)) | |
+ ? MVM_malloc(callsite->flag_count * sizeof(MVMRegister)) | |
: NULL; | |
for (MVMuint16 i = 0; i < callsite->flag_count; i++) | |
rec_resumption->initial_resume_args[i] = MVM_disp_resume_get_init_arg(tc, | |
@@ -2701,8 +2700,7 @@ static void produce_resumption_init_values(MVMThreadContext *tc, compile_state * | |
/* Allocate storage for the resumption init value sources according to | |
* the callsite size. */ | |
MVMuint16 arg_count = init_capture->body.callsite->flag_count; | |
- res->init_values = MVM_fixed_size_alloc(tc, tc->instance->fsa, | |
- arg_count * sizeof(MVMDispProgramResumptionInitValue)); | |
+ res->init_values = MVM_malloc(arg_count * sizeof(MVMDispProgramResumptionInitValue)); | |
/* Go through the capture and source each value. */ | |
for (MVMuint16 i = 0; i < arg_count; i++) { | |
@@ -3785,10 +3783,7 @@ void MVM_disp_program_destroy(MVMThreadContext *tc, MVMDispProgram *dp) { | |
for (MVMuint32 i = 0; i < dp->num_resumptions; i++) { | |
MVMDispProgramResumption *resumption = &(dp->resumptions[i]); | |
if (resumption->init_values) { | |
- MVMuint16 arg_count = resumption->init_callsite->flag_count; | |
- MVM_fixed_size_free(tc, tc->instance->fsa, | |
- arg_count * sizeof(MVMDispProgramResumptionInitValue), | |
- resumption->init_values); | |
+ MVM_free(resumption->init_values); | |
} | |
} | |
MVM_free(dp->resumptions); | |
@@ -3813,10 +3808,7 @@ void MVM_disp_program_recording_destroy(MVMThreadContext *tc, MVMDispProgramReco | |
for (i = 0; i < MVM_VECTOR_ELEMS(rec->resumptions); i++) { | |
MVMDispProgramRecordingResumption *resumption = &(rec->resumptions[i]); | |
if (resumption->initial_resume_args) { | |
- MVMCallsite *init_callsite = ((MVMCapture *)resumption->initial_resume_capture.capture)->body.callsite; | |
- MVM_fixed_size_free(tc, tc->instance->fsa, | |
- init_callsite->flag_count * sizeof(MVMRegister), | |
- resumption->initial_resume_args); | |
+ MVM_free(resumption->initial_resume_args); | |
} | |
destroy_recording_capture(tc, &(resumption->initial_resume_capture)); | |
} | |
diff --git src/disp/registry.c src/disp/registry.c | |
index 9ca3e2af8..ddefd6a77 100644 | |
--- src/disp/registry.c | |
+++ src/disp/registry.c | |
@@ -2,12 +2,10 @@ | |
/* Allocates a dispatcher table. */ | |
MVMDispRegistryTable * allocate_table(MVMThreadContext *tc, MVMuint32 num_entries) { | |
- MVMDispRegistryTable *table = MVM_fixed_size_alloc_zeroed(tc, tc->instance->fsa, | |
- sizeof(MVMDispRegistryTable)); | |
+ MVMDispRegistryTable *table = MVM_calloc(1, sizeof(MVMDispRegistryTable)); | |
table->num_dispatchers = 0; | |
table->alloc_dispatchers = num_entries; | |
- table->dispatchers = MVM_fixed_size_alloc_zeroed(tc, tc->instance->fsa, | |
- table->alloc_dispatchers * sizeof(MVMDispDefinition *)); | |
+ table->dispatchers = MVM_calloc(table->alloc_dispatchers, sizeof(MVMDispDefinition *)); | |
return table; | |
} | |
@@ -37,11 +35,8 @@ static void grow_registry_if_needed(MVMThreadContext *tc) { | |
reg->table = new_table; | |
/* Free the previous table at the next safepoint. */ | |
- MVM_fixed_size_free_at_safepoint(tc, tc->instance->fsa, | |
- current_table->alloc_dispatchers * sizeof(MVMDispDefinition *), | |
- current_table->dispatchers); | |
- MVM_fixed_size_free_at_safepoint(tc, tc->instance->fsa, | |
- sizeof(MVMDispRegistryTable), current_table); | |
+ MVM_free_at_safepoint(tc, current_table->dispatchers); | |
+ MVM_free_at_safepoint(tc, current_table); | |
} | |
} | |
@@ -54,7 +49,7 @@ static void register_internal(MVMThreadContext *tc, MVMString *id, MVMObject *di | |
MVMDispRegistry *reg = &(tc->instance->disp_registry); | |
/* Allocate and populate the dispatch definition. */ | |
- MVMDispDefinition *def = MVM_fixed_size_alloc(tc, tc->instance->fsa, sizeof(MVMDispDefinition)); | |
+ MVMDispDefinition *def = MVM_malloc(sizeof(MVMDispDefinition)); | |
def->id = id; | |
def->dispatch = dispatch; | |
def->resume = resume != NULL && IS_CONCRETE(resume) ? resume : NULL; | |
@@ -180,11 +175,8 @@ void MVM_disp_registry_destroy(MVMThreadContext *tc) { | |
MVMuint32 i; | |
for (i = 0; i < table->alloc_dispatchers; i++) | |
if (table->dispatchers[i]) | |
- MVM_fixed_size_free(tc, tc->instance->fsa, sizeof(MVMDispDefinition), | |
- table->dispatchers[i]); | |
- MVM_fixed_size_free(tc, tc->instance->fsa, | |
- table->alloc_dispatchers * sizeof(MVMDispDefinition *), | |
- table->dispatchers); | |
- MVM_fixed_size_free(tc, tc->instance->fsa, sizeof(MVMDispRegistryTable), table); | |
+ MVM_free(table->dispatchers[i]); | |
+ MVM_free(table->dispatchers); | |
+ MVM_free(table); | |
uv_mutex_destroy(®->mutex_update); | |
} | |
diff --git src/disp/resume.c src/disp/resume.c | |
index 1f60c8e4f..84ebbfa79 100644 | |
--- src/disp/resume.c | |
+++ src/disp/resume.c | |
@@ -33,7 +33,7 @@ static MVMuint32 setup_resumption(MVMThreadContext *tc, MVMDispResumptionData *d | |
/* For the innermost (or only) one, we write into the record. | |
* For more, we need to allocate. */ | |
MVMDispResumptionState *target = prev | |
- ? MVM_fixed_size_alloc(tc, tc->instance->fsa, sizeof(MVMDispResumptionState)) | |
+ ? MVM_malloc(sizeof(MVMDispResumptionState)) | |
: state; | |
target->disp = dp->resumptions[i].disp; | |
target->state = tc->instance->VMNull; | |
@@ -373,7 +373,7 @@ void MVM_disp_resume_destroy_resumption_state(MVMThreadContext *tc, | |
MVMDispResumptionState *current = res_state->next; | |
while (current) { | |
MVMDispResumptionState *next = current->next; | |
- MVM_fixed_size_free(tc, tc->instance->fsa, sizeof(MVMDispResumptionState), current); | |
+ MVM_free(current); | |
current = next; | |
} | |
} | |
diff --git src/gc/orchestrate.c src/gc/orchestrate.c | |
index 4d58423e6..447cc15ca 100644 | |
--- src/gc/orchestrate.c | |
+++ src/gc/orchestrate.c | |
@@ -187,8 +187,7 @@ static void finish_gc(MVMThreadContext *tc, MVMuint8 gen, MVMuint8 is_coordinato | |
MVM_profile_heap_take_snapshot(tc); | |
GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, | |
- "Thread %d run %d : Co-ordinator handling fixed-size allocator safepoint frees\n"); | |
- MVM_fixed_size_safepoint(tc, tc->instance->fsa); | |
+ "Thread %d run %d : Co-ordinator handling allocator safepoint frees\n"); | |
MVM_alloc_safepoint(tc); | |
GCDEBUG_LOG(tc, MVM_GC_DEBUG_ORCHESTRATE, | |
"Thread %d run %d : Co-ordinator signalling in-trays clear\n"); | |
diff --git src/moar.c src/moar.c | |
index 13df05e57..bc76f5a5c 100644 | |
--- src/moar.c | |
+++ src/moar.c | |
@@ -157,9 +157,6 @@ MVMInstance * MVM_vm_create_instance(void) { | |
/* Safe point free list. */ | |
init_mutex(instance->mutex_free_at_safepoint, "safepoint free list"); | |
- /* Create fixed size allocator. */ | |
- instance->fsa = MVM_fixed_size_create(instance->main_thread); | |
- | |
/* Set up REPR registry mutex. */ | |
init_mutex(instance->mutex_repr_registry, "REPR registry"); | |
MVM_index_hash_build(instance->main_thread, &instance->repr_hash, MVM_REPR_CORE_COUNT); | |
@@ -749,9 +746,6 @@ void MVM_vm_destroy_instance(MVMInstance *instance) { | |
MVM_tc_destroy(instance->main_thread); | |
uv_mutex_destroy(&instance->mutex_threads); | |
- /* Clean up fixed size allocator */ | |
- MVM_fixed_size_destroy(instance->fsa); | |
- | |
uv_mutex_destroy(&instance->subscriptions.mutex_event_subscription); | |
/* Clear up VM instance memory. */ | |
diff --git src/moar.h src/moar.h | |
index f31b03033..81d694fa4 100644 | |
--- src/moar.h | |
+++ src/moar.h | |
@@ -138,13 +138,12 @@ MVM_PUBLIC MVMint32 MVM_jit_support(void); | |
#include "disp/inline_cache.h" | |
#include "core/instance.h" | |
#include "core/interp.h" | |
-#include "core/fixedsizealloc.h" | |
#include "core/callsite.h" | |
+#include "core/alloc.h" | |
#include "core/args.h" | |
#include "disp/program.h" | |
#include "disp/syscall.h" | |
#include "disp/resume.h" | |
-#include "core/alloc.h" | |
#include "core/frame.h" | |
#include "core/callstack.h" | |
#include "core/validation.h" | |
diff --git src/profiler/instrument.c src/profiler/instrument.c | |
index 8a4563c25..3c28221fe 100644 | |
--- src/profiler/instrument.c | |
+++ src/profiler/instrument.c | |
@@ -1023,12 +1023,7 @@ void MVM_profile_instrumented_free_data(MVMThreadContext *tc) { | |
MVM_VECTOR_DESTROY(ptd->staticframe_array); | |
MVM_VECTOR_DESTROY(ptd->type_array); | |
for (MVMuint32 i = 0; i < ptd->num_gcs; i++) | |
- MVM_fixed_size_free( | |
- tc, | |
- tc->instance->fsa, | |
- ptd->gcs[i].alloc_dealloc * sizeof(MVMProfileDeallocationCount), | |
- ptd->gcs[i].deallocs | |
- ); | |
+ MVM_free(ptd->gcs[i].deallocs); | |
MVM_free(ptd->gcs); | |
MVM_free(ptd); | |
tc->prof_data = NULL; | |
diff --git src/profiler/log.c src/profiler/log.c | |
index e24414d45..5797c0520 100644 | |
--- src/profiler/log.c | |
+++ src/profiler/log.c | |
@@ -368,17 +368,14 @@ void log_one_allocation(MVMThreadContext *tc, MVMObject *obj, MVMProfileCallNode | |
/* No entry; create one. */ | |
if (pcn->num_alloc == pcn->alloc_alloc) { | |
- size_t old_alloc = pcn->alloc_alloc; | |
if (pcn->alloc_alloc == 0) { | |
pcn->alloc_alloc++; | |
- pcn->alloc = MVM_fixed_size_alloc(tc, tc->instance->fsa, | |
- pcn->alloc_alloc * sizeof(MVMProfileAllocationCount)); | |
+ pcn->alloc = MVM_malloc(pcn->alloc_alloc * sizeof(MVMProfileAllocationCount)); | |
} | |
else { | |
pcn->alloc_alloc *= 2; | |
- pcn->alloc = MVM_fixed_size_realloc(tc, tc->instance->fsa, | |
+ pcn->alloc = MVM_realloc( | |
pcn->alloc, | |
- old_alloc * sizeof(MVMProfileAllocationCount), | |
pcn->alloc_alloc * sizeof(MVMProfileAllocationCount)); | |
} | |
} | |
@@ -459,17 +456,14 @@ void MVM_profiler_log_gc_deallocate(MVMThreadContext *tc, MVMObject *object) { | |
/* No entry; create one. */ | |
if (pgc->num_dealloc == pgc->alloc_dealloc) { | |
- size_t old_alloc = pgc->alloc_dealloc; | |
if (pgc->alloc_dealloc == 0) { | |
pgc->alloc_dealloc++; | |
- pgc->deallocs = MVM_fixed_size_alloc(tc, tc->instance->fsa, | |
- pgc->alloc_dealloc * sizeof(MVMProfileDeallocationCount)); | |
+ pgc->deallocs = MVM_malloc(pgc->alloc_dealloc * sizeof(MVMProfileDeallocationCount)); | |
} | |
else { | |
pgc->alloc_dealloc *= 2; | |
- pgc->deallocs = MVM_fixed_size_realloc(tc, tc->instance->fsa, | |
+ pgc->deallocs = MVM_realloc( | |
pgc->deallocs, | |
- old_alloc * sizeof(MVMProfileDeallocationCount), | |
pgc->alloc_dealloc * sizeof(MVMProfileDeallocationCount)); | |
} | |
} | |
diff --git src/spesh/arg_guard.c src/spesh/arg_guard.c | |
index 299d7a697..80a82072a 100644 | |
--- src/spesh/arg_guard.c | |
+++ src/spesh/arg_guard.c | |
@@ -50,7 +50,7 @@ static MVMSpeshArgGuard * allocate_tree(MVMThreadContext *tc, MVMuint32 total_no | |
/* Allocate as a single blob of memory from the FSA. */ | |
size_t node_size = total_nodes * sizeof(MVMSpeshArgGuardNode); | |
size_t size = sizeof(MVMSpeshArgGuard) + node_size; | |
- MVMSpeshArgGuard *tree = MVM_fixed_size_alloc(tc, tc->instance->fsa, size); | |
+ MVMSpeshArgGuard *tree = MVM_malloc(size); | |
tree->nodes = (MVMSpeshArgGuardNode *)((char *)tree + sizeof(MVMSpeshArgGuard)); | |
tree->used_nodes = 0; | |
tree->num_nodes = total_nodes; | |
@@ -630,12 +630,10 @@ void MVM_spesh_arg_guard_gc_describe(MVMThreadContext *tc, MVMHeapSnapshotState | |
* to zero, the memory is freed immediately. */ | |
void MVM_spesh_arg_guard_destroy(MVMThreadContext *tc, MVMSpeshArgGuard *ag, MVMuint32 safe) { | |
if (ag) { | |
- size_t total_size = sizeof(MVMSpeshArgGuard) + | |
- ag->num_nodes * sizeof(MVMSpeshArgGuardNode); | |
if (safe) | |
- MVM_fixed_size_free_at_safepoint(tc, tc->instance->fsa, total_size, ag); | |
+ MVM_free_at_safepoint(tc, ag); | |
else | |
- MVM_fixed_size_free(tc, tc->instance->fsa, total_size, ag); | |
+ MVM_free(ag); | |
} | |
} | |
diff --git src/spesh/inline.c src/spesh/inline.c | |
index 4978b4955..4d1d6dd36 100644 | |
--- src/spesh/inline.c | |
+++ src/spesh/inline.c | |
@@ -24,13 +24,11 @@ static void demand_extop(MVMThreadContext *tc, MVMCompUnit *target_cu, | |
if (extops[i].info == info) { | |
MVMuint32 orig_size = target_cu->body.num_extops * sizeof(MVMExtOpRecord); | |
MVMuint32 new_size = (target_cu->body.num_extops + 1) * sizeof(MVMExtOpRecord); | |
- MVMExtOpRecord *new_extops = MVM_fixed_size_alloc(tc, | |
- tc->instance->fsa, new_size); | |
+ MVMExtOpRecord *new_extops = MVM_malloc(new_size); | |
memcpy(new_extops, target_cu->body.extops, orig_size); | |
memcpy(&new_extops[target_cu->body.num_extops], &extops[i], sizeof(MVMExtOpRecord)); | |
if (target_cu->body.extops) | |
- MVM_fixed_size_free_at_safepoint(tc, tc->instance->fsa, orig_size, | |
- target_cu->body.extops); | |
+ MVM_free_at_safepoint(tc, target_cu->body.extops); | |
target_cu->body.extops = new_extops; | |
target_cu->body.num_extops++; | |
uv_mutex_unlock(target_cu->body.inline_tweak_mutex); | |
diff --git src/strings/nfg.c src/strings/nfg.c | |
index 66318ef7d..854727c51 100644 | |
--- src/strings/nfg.c | |
+++ src/strings/nfg.c | |
@@ -43,7 +43,7 @@ static MVMGrapheme32 lookup_synthetic(MVMThreadContext *tc, MVMCodepoint *codes, | |
static MVMNFGTrieNode * twiddle_trie_node(MVMThreadContext *tc, MVMNFGTrieNode *current, MVMCodepoint *cur_code, MVMint32 codes_remaining, MVMGrapheme32 synthetic) { | |
/* Make a new empty node, which we'll maybe copy some things from the | |
* current node into. */ | |
- MVMNFGTrieNode *new_node = MVM_fixed_size_alloc(tc, tc->instance->fsa, sizeof(MVMNFGTrieNode)); | |
+ MVMNFGTrieNode *new_node = MVM_malloc(sizeof(MVMNFGTrieNode)); | |
/* If we've more codes remaining... */ | |
if (codes_remaining > 0) { | |
@@ -57,8 +57,7 @@ static MVMNFGTrieNode * twiddle_trie_node(MVMThreadContext *tc, MVMNFGTrieNode * | |
if (idx >= 0) { | |
/* Make a copy of the next_codes list. */ | |
size_t the_size = current->num_entries * sizeof(MVMNFGTrieNodeEntry); | |
- MVMNFGTrieNodeEntry *new_next_codes = MVM_fixed_size_alloc(tc, | |
- tc->instance->fsa, the_size); | |
+ MVMNFGTrieNodeEntry *new_next_codes = MVM_malloc(the_size); | |
memcpy(new_next_codes, current->next_codes, the_size); | |
/* Update the copy to point to the new child. */ | |
@@ -68,8 +67,7 @@ static MVMNFGTrieNode * twiddle_trie_node(MVMThreadContext *tc, MVMNFGTrieNode * | |
* existing child list at the next safe point. */ | |
new_node->num_entries = current->num_entries; | |
new_node->next_codes = new_next_codes; | |
- MVM_fixed_size_free_at_safepoint(tc, tc->instance->fsa, the_size, | |
- current->next_codes); | |
+ MVM_free_at_safepoint(tc, current->next_codes); | |
} | |
/* Otherwise, we're going to need to insert the new child into a | |
@@ -79,8 +77,7 @@ static MVMNFGTrieNode * twiddle_trie_node(MVMThreadContext *tc, MVMNFGTrieNode * | |
MVMint32 orig_entries = current ? current->num_entries : 0; | |
MVMint32 new_entries = orig_entries + 1; | |
size_t new_size = new_entries * sizeof(MVMNFGTrieNodeEntry); | |
- MVMNFGTrieNodeEntry *new_next_codes = MVM_fixed_size_alloc(tc, | |
- tc->instance->fsa, new_size); | |
+ MVMNFGTrieNodeEntry *new_next_codes = MVM_malloc(new_size); | |
/* Go through original entries, copying those that are for a lower | |
* code point than the one we're inserting a child for. */ | |
@@ -103,9 +100,7 @@ static MVMNFGTrieNode * twiddle_trie_node(MVMThreadContext *tc, MVMNFGTrieNode * | |
new_node->num_entries = new_entries; | |
new_node->next_codes = new_next_codes; | |
if (orig_entries) | |
- MVM_fixed_size_free_at_safepoint(tc, tc->instance->fsa, | |
- orig_entries * sizeof(MVMNFGTrieNodeEntry), | |
- current->next_codes); | |
+ MVM_free_at_safepoint(tc, current->next_codes); | |
} | |
/* Always need to copy synthetic set on the existing node also; | |
@@ -129,8 +124,7 @@ static MVMNFGTrieNode * twiddle_trie_node(MVMThreadContext *tc, MVMNFGTrieNode * | |
/* Free any existing node at next safe point, return the new one. */ | |
if (current) | |
- MVM_fixed_size_free_at_safepoint(tc, tc->instance->fsa, | |
- sizeof(MVMNFGTrieNode), current); | |
+ MVM_free_at_safepoint(tc, current); | |
return new_node; | |
} | |
static void add_synthetic_to_trie(MVMThreadContext *tc, MVMCodepoint *codes, MVMint32 num_codes, MVMGrapheme32 synthetic) { | |
@@ -153,10 +147,10 @@ static MVMGrapheme32 add_synthetic(MVMThreadContext *tc, MVMCodepoint *codes, MV | |
if (nfg->num_synthetics % MVM_SYNTHETIC_GROW_ELEMS == 0) { | |
size_t orig_size = nfg->num_synthetics * sizeof(MVMNFGSynthetic); | |
size_t new_size = (nfg->num_synthetics + MVM_SYNTHETIC_GROW_ELEMS) * sizeof(MVMNFGSynthetic); | |
- MVMNFGSynthetic *new_synthetics = MVM_fixed_size_alloc(tc, tc->instance->fsa, new_size); | |
+ MVMNFGSynthetic *new_synthetics = MVM_malloc(new_size); | |
if (orig_size) { | |
memcpy(new_synthetics, nfg->synthetics, orig_size); | |
- MVM_fixed_size_free_at_safepoint(tc, tc->instance->fsa, orig_size, nfg->synthetics); | |
+ MVM_free_at_safepoint(tc, nfg->synthetics); | |
} | |
nfg->synthetics = new_synthetics; | |
} | |
@@ -197,8 +191,7 @@ static MVMGrapheme32 add_synthetic(MVMThreadContext *tc, MVMCodepoint *codes, MV | |
} | |
- synth->codes = MVM_fixed_size_alloc(tc, tc->instance->fsa, | |
- num_codes * sizeof(MVMCodepoint)); | |
+ synth->codes = MVM_malloc(num_codes * sizeof(MVMCodepoint)); | |
memcpy(synth->codes, codes, (synth->num_codes * sizeof(MVMCodepoint))); | |
synth->case_uc = 0; | |
synth->case_lc = 0; | |
@@ -477,8 +470,8 @@ static void nfg_trie_node_destroy(MVMThreadContext *tc, MVMNFGTrieNode *node) { | |
nfg_trie_node_destroy(tc, node->next_codes[i].node); | |
} | |
if (node->next_codes) | |
- MVM_fixed_size_free(tc, tc->instance->fsa, node->num_entries * sizeof(MVMNFGTrieNodeEntry), node->next_codes); | |
- MVM_fixed_size_free(tc, tc->instance->fsa, sizeof(MVMNFGTrieNode), node); | |
+ MVM_free(node->next_codes); | |
+ MVM_free(node); | |
} | |
/* Free all memory allocated to hold synthetic graphemes. These are global | |
@@ -492,15 +485,8 @@ void MVM_nfg_destroy(MVMThreadContext *tc) { | |
/* Free all synthetics. */ | |
if (nfg->synthetics) { | |
- size_t used_synths_in_block = nfg->num_synthetics % MVM_SYNTHETIC_GROW_ELEMS; | |
- size_t synths_to_free = used_synths_in_block | |
- ? nfg->num_synthetics + (MVM_SYNTHETIC_GROW_ELEMS - used_synths_in_block) | |
- : nfg->num_synthetics; | |
- | |
for (i = 0; i < nfg->num_synthetics; i++) { | |
- MVM_fixed_size_free(tc, tc->instance->fsa, | |
- nfg->synthetics[i].num_codes * sizeof(MVMCodepoint), | |
- nfg->synthetics[i].codes); | |
+ MVM_free(nfg->synthetics[i].codes); | |
if (nfg->synthetics[i].case_uc != CASE_UNCHANGED) | |
MVM_free(nfg->synthetics[i].case_uc); | |
if (nfg->synthetics[i].case_lc != CASE_UNCHANGED) | |
@@ -511,9 +497,7 @@ void MVM_nfg_destroy(MVMThreadContext *tc) { | |
MVM_free(nfg->synthetics[i].case_fc); | |
} | |
- MVM_fixed_size_free(tc, tc->instance->fsa, | |
- synths_to_free * sizeof(MVMNFGSynthetic), | |
- nfg->synthetics); | |
+ MVM_free(nfg->synthetics); | |
} | |
MVM_free(nfg); | |
diff --git src/strings/ops.c src/strings/ops.c | |
index c5a7d067f..ff0f25d9c 100644 | |
--- src/strings/ops.c | |
+++ src/strings/ops.c | |
@@ -2054,7 +2054,7 @@ MVMString * MVM_string_join(MVMThreadContext *tc, MVMString *separator, MVMObjec | |
: 1; | |
else | |
sstrands = 1; | |
- pieces = MVM_fixed_size_alloc(tc, tc->instance->fsa, bytes); | |
+ pieces = MVM_malloc(bytes); | |
num_pieces = 0; | |
total_graphs = 0; | |
total_strands = 0; | |
@@ -2095,7 +2095,7 @@ MVMString * MVM_string_join(MVMThreadContext *tc, MVMString *separator, MVMObjec | |
return pieces[0]; | |
/* We now know the total eventual number of graphemes. */ | |
if (total_graphs == 0) { | |
- MVM_fixed_size_free(tc, tc->instance->fsa, bytes, pieces); | |
+ MVM_free(pieces); | |
return tc->instance->str_consts.empty; | |
} | |
result->body.num_graphs = total_graphs; | |
@@ -2173,7 +2173,7 @@ MVMString * MVM_string_join(MVMThreadContext *tc, MVMString *separator, MVMObjec | |
} | |
} | |
- MVM_fixed_size_free(tc, tc->instance->fsa, bytes, pieces); | |
+ MVM_free(pieces); | |
STRAND_CHECK(tc, result); | |
/* if concat is stable and NFG_CHECK on, run a NFG_CHECK on it since it | |
* should be properly constructed now */ | |
diff --git tools/check-throw-without-free.py tools/check-throw-without-free.py | |
index ac20b0a37..8056a0d4c 100644 | |
--- tools/check-throw-without-free.py | |
+++ tools/check-throw-without-free.py | |
@@ -24,8 +24,8 @@ def collect_control_flows(bb, path, seen): | |
return paths | |
#alloc_funcs = {'MVM_string_utf8_c8_encode_C_string'} # currently adding this causes a segfault in gcc when compiling src/io/filewatchers.c | |
-alloc_funcs = {'MVM_malloc', 'MVM_calloc', 'MVM_fixed_size_allocate', 'MVM_fixed_size_allocate_zeroed', 'ANSIToUTF8', 'MVM_bytecode_dump', 'MVM_exception_backtrace_line', 'MVM_nativecall_unmarshal_string', 'MVM_serialization_read_cstr', 'MVM_spesh_dump', 'MVM_spesh_dump_arg_guard', 'MVM_spesh_dump_planned', 'MVM_spesh_dump_stats', 'MVM_staticframe_file_location', 'MVM_string_ascii_encode', 'MVM_string_ascii_encode_any', 'MVM_string_ascii_encode_substr', 'MVM_string_encode', 'MVM_string_gb18030_encode_substr', 'MVM_string_gb2312_encode_substr', 'MVM_string_latin1_encode', 'MVM_string_latin1_encode_substr', 'MVM_string_shiftjis_encode_substr', 'MVM_string_utf16_encode', 'MVM_string_utf16_encode_substr', 'MVM_string_utf16_encode_substr_main', 'MVM_string_utf16be_encode_substr', 'MVM_string_utf16le_encode_substr', 'MVM_string_utf8_c8_encode', 'MVM_string_utf8_c8_encode_substr', 'MVM_string_utf8_encode', 'MVM_string_utf8_encode_C_string', 'MVM_string_utf8_encode_substr', 'MVM_string_utf8_maybe_encode_C_string', 'MVM_string_windows1251_encode_substr', 'MVM_string_windows1252_encode_substr', 'MVM_string_windows125X_encode_substr', 'NFG_check_make_debug_string', 'NFG_checker', 'UnicodeToUTF8', 'UnicodeToUTF8', 'base64_encode', 'callback_handler', 'get_signature_char', 'twoway_memmem_uint32', 'u64toa_naive_worker'} | |
-free_funcs = {'MVM_free', 'MVM_free_null', 'MVM_fixed_size_free', 'MVM_fixed_size_free_at_safepoint', 'free_repr_data', 'cleanup_all', 'MVM_tc_destroy'} | |
+alloc_funcs = {'MVM_malloc', 'MVM_calloc', 'ANSIToUTF8', 'MVM_bytecode_dump', 'MVM_exception_backtrace_line', 'MVM_nativecall_unmarshal_string', 'MVM_serialization_read_cstr', 'MVM_spesh_dump', 'MVM_spesh_dump_arg_guard', 'MVM_spesh_dump_planned', 'MVM_spesh_dump_stats', 'MVM_staticframe_file_location', 'MVM_string_ascii_encode', 'MVM_string_ascii_encode_any', 'MVM_string_ascii_encode_substr', 'MVM_string_encode', 'MVM_string_gb18030_encode_substr', 'MVM_string_gb2312_encode_substr', 'MVM_string_latin1_encode', 'MVM_string_latin1_encode_substr', 'MVM_string_shiftjis_encode_substr', 'MVM_string_utf16_encode', 'MVM_string_utf16_encode_substr', 'MVM_string_utf16_encode_substr_main', 'MVM_string_utf16be_encode_substr', 'MVM_string_utf16le_encode_substr', 'MVM_string_utf8_c8_encode', 'MVM_string_utf8_c8_encode_substr', 'MVM_string_utf8_encode', 'MVM_string_utf8_encode_C_string', 'MVM_string_utf8_encode_substr', 'MVM_string_utf8_maybe_encode_C_string', 'MVM_string_windows1251_encode_substr', 'MVM_string_windows1252_encode_substr', 'MVM_string_windows125X_encode_substr', 'NFG_check_make_debug_string', 'NFG_checker', 'UnicodeToUTF8', 'UnicodeToUTF8', 'base64_encode', 'callback_handler', 'get_signature_char', 'twoway_memmem_uint32', 'u64toa_naive_worker'} | |
+free_funcs = {'MVM_free', 'MVM_free_null', 'free_repr_data', 'cleanup_all', 'MVM_tc_destroy'} | |
def check_code_for_throw_without_free(fun): | |
for bb in fun.cfg.basic_blocks: |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment