MonoInst *var = cfg->varinfo [i];
LLVMTypeRef vtype;
- if (var->opcode == OP_GSHAREDVT_LOCAL || var->opcode == OP_GSHAREDVT_ARG_REGOFFSET) {
- } else if (var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (mini_type_is_vtype (var->inst_vtype) && !MONO_CLASS_IS_SIMD (ctx->cfg, var->klass))) {
+ if ((var->opcode == OP_GSHAREDVT_LOCAL || var->opcode == OP_GSHAREDVT_ARG_REGOFFSET))
+ continue;
+
+#ifdef TARGET_WASM
+ // For GC stack scanning to work, have to spill all reference variables to the stack
+ // Some ref variables have type intptr
+ if (MONO_TYPE_IS_REFERENCE (var->inst_vtype) || var->inst_vtype->type == MONO_TYPE_I)
+ var->flags |= MONO_INST_INDIRECT;
+#endif
+
+ if (var->flags & (MONO_INST_VOLATILE|MONO_INST_INDIRECT) || (mini_type_is_vtype (var->inst_vtype) && !MONO_CLASS_IS_SIMD (ctx->cfg, var->klass))) {
vtype = type_to_llvm_type (ctx, var->inst_vtype);
if (!ctx_ok (ctx))
return;
builder = ctx->builder;
}
+ /* Handle PHI nodes first */
+ /* They should be grouped at the start of the bb */
+ for (ins = bb->code; ins; ins = ins->next) {
+ emit_dbg_loc (ctx, builder, ins->cil_code);
+
+ if (ins->opcode == OP_NOP)
+ continue;
+ if (!MONO_IS_PHI (ins))
+ break;
+
+ int i;
+ gboolean empty = TRUE;
+
+ /* Check that all input bblocks really branch to us */
+ for (i = 0; i < bb->in_count; ++i) {
+ if (bb->in_bb [i]->last_ins && bb->in_bb [i]->last_ins->opcode == OP_NOT_REACHED)
+ ins->inst_phi_args [i + 1] = -1;
+ else
+ empty = FALSE;
+ }
+
+ if (empty) {
+ /* LLVM doesn't like phi instructions with zero operands */
+ ctx->is_dead [ins->dreg] = TRUE;
+ continue;
+ }
+
+ /* Created earlier, insert it now */
+ LLVMInsertIntoBuilder (builder, values [ins->dreg]);
+
+ for (i = 0; i < ins->inst_phi_args [0]; i++) {
+ int sreg1 = ins->inst_phi_args [i + 1];
+ int count, j;
+
+ /*
+ * Count the number of times the incoming bblock branches to us,
+ * since llvm requires a separate entry for each.
+ */
+ if (bb->in_bb [i]->last_ins && bb->in_bb [i]->last_ins->opcode == OP_SWITCH) {
+ MonoInst *switch_ins = bb->in_bb [i]->last_ins;
+
+ count = 0;
+ for (j = 0; j < GPOINTER_TO_UINT (switch_ins->klass); ++j) {
+ if (switch_ins->inst_many_bb [j] == bb)
+ count ++;
+ }
+ } else {
+ count = 1;
+ }
+
+ /* Remember for later */
+ for (j = 0; j < count; ++j) {
+ PhiNode *node = (PhiNode*)mono_mempool_alloc0 (ctx->mempool, sizeof (PhiNode));
+ node->bb = bb;
+ node->phi = ins;
+ node->in_bb = bb->in_bb [i];
+ node->sreg = sreg1;
+ bblocks [bb->in_bb [i]->block_num].phi_nodes = g_slist_prepend_mempool (ctx->mempool, bblocks [bb->in_bb [i]->block_num].phi_nodes, node);
+ }
+ }
+ }
+ // Add volatile stores for PHI nodes
+ // These need to be emitted after the PHI nodes
+ for (ins = bb->code; ins; ins = ins->next) {
+ const char *spec = LLVM_INS_INFO (ins->opcode);
+
+ if (ins->opcode == OP_NOP)
+ continue;
+ if (!MONO_IS_PHI (ins))
+ break;
+
+ if (spec [MONO_INST_DEST] != 'v')
+ emit_volatile_store (ctx, ins->dreg);
+ }
+
has_terminator = FALSE;
starting_builder = builder;
for (ins = bb->code; ins; ins = ins->next) {
}
//mono_print_ins (ins);
+ gboolean skip_volatile_store = FALSE;
switch (ins->opcode) {
case OP_NOP:
case OP_NOT_NULL:
case OP_FPHI:
case OP_VPHI:
case OP_XPHI: {
- int i;
- gboolean empty = TRUE;
-
- /* Check that all input bblocks really branch to us */
- for (i = 0; i < bb->in_count; ++i) {
- if (bb->in_bb [i]->last_ins && bb->in_bb [i]->last_ins->opcode == OP_NOT_REACHED)
- ins->inst_phi_args [i + 1] = -1;
- else
- empty = FALSE;
- }
-
- if (empty) {
- /* LLVM doesn't like phi instructions with zero operands */
- ctx->is_dead [ins->dreg] = TRUE;
- break;
- }
-
- /* Created earlier, insert it now */
- LLVMInsertIntoBuilder (builder, values [ins->dreg]);
-
- for (i = 0; i < ins->inst_phi_args [0]; i++) {
- int sreg1 = ins->inst_phi_args [i + 1];
- int count, j;
-
- /*
- * Count the number of times the incoming bblock branches to us,
- * since llvm requires a separate entry for each.
- */
- if (bb->in_bb [i]->last_ins && bb->in_bb [i]->last_ins->opcode == OP_SWITCH) {
- MonoInst *switch_ins = bb->in_bb [i]->last_ins;
-
- count = 0;
- for (j = 0; j < GPOINTER_TO_UINT (switch_ins->klass); ++j) {
- if (switch_ins->inst_many_bb [j] == bb)
- count ++;
- }
- } else {
- count = 1;
- }
-
- /* Remember for later */
- for (j = 0; j < count; ++j) {
- PhiNode *node = (PhiNode*)mono_mempool_alloc0 (ctx->mempool, sizeof (PhiNode));
- node->bb = bb;
- node->phi = ins;
- node->in_bb = bb->in_bb [i];
- node->sreg = sreg1;
- bblocks [bb->in_bb [i]->block_num].phi_nodes = g_slist_prepend_mempool (ctx->mempool, bblocks [bb->in_bb [i]->block_num].phi_nodes, node);
- }
- }
+ // Handled above
+ skip_volatile_store = TRUE;
break;
}
case OP_MOVE:
}
/* Add stores for volatile variables */
- if (spec [MONO_INST_DEST] != ' ' && spec [MONO_INST_DEST] != 'v' && !MONO_IS_STORE_MEMBASE (ins))
+ if (!skip_volatile_store && spec [MONO_INST_DEST] != ' ' && spec [MONO_INST_DEST] != 'v' && !MONO_IS_STORE_MEMBASE (ins))
emit_volatile_store (ctx, ins->dreg);
}
}
#endif
+#ifdef HOST_WASM
+extern gboolean mono_wasm_enable_gc;
+#endif
+
void
sgen_perform_collection (size_t requested_size, int generation_to_collect, const char *reason, gboolean forced_serial, gboolean stw)
{
#ifdef HOST_WASM
- g_assert (stw); //can't handle non-stw mode (IE, domain unload)
- //we ignore forced_serial
+ if (!mono_wasm_enable_gc) {
+ g_assert (stw); //can't handle non-stw mode (IE, domain unload)
+ //we ignore forced_serial
+
+ //There's a window for racing where we're executing other bg jobs before the GC, they trigger a GC request and it overrides this one.
+ //I belive this case to be benign as it will, in the worst case, upgrade a minor to a major collection.
+ if (gc_request.generation_to_collect <= generation_to_collect) {
+ gc_request.requested_size = requested_size;
+ gc_request.generation_to_collect = generation_to_collect;
+ gc_request.reason = reason;
+ sgen_client_schedule_background_job (gc_pump_callback);
+ }
- //There's a window for racing where we're executing other bg jobs before the GC, they trigger a GC request and it overrides this one.
- //I belive this case to be benign as it will, in the worst case, upgrade a minor to a major collection.
- if (gc_request.generation_to_collect <= generation_to_collect) {
- gc_request.requested_size = requested_size;
- gc_request.generation_to_collect = generation_to_collect;
- gc_request.reason = reason;
- sgen_client_schedule_background_job (gc_pump_callback);
+ sgen_degraded_mode = 1; //enable degraded mode so allocation can continue
+ return;
}
+#endif
- sgen_degraded_mode = 1; //enable degraded mode so allocation can continue
-#else
sgen_perform_collection_inner (requested_size, generation_to_collect, reason, forced_serial, stw);
-#endif
}
/*
* ######################################################################