Skip to content

Commit

Permalink
Merge pull request ocaml#367 from sadiqj/unused_rpc_code
Browse files Browse the repository at this point in the history
remove some unused rpc consumers
  • Loading branch information
kayceesrk committed Jul 10, 2020
2 parents 43af4ac + 0277791 commit 1216aab
Show file tree
Hide file tree
Showing 2 changed files with 0 additions and 160 deletions.
115 changes: 0 additions & 115 deletions runtime/major_gc.c
Original file line number Diff line number Diff line change
Expand Up @@ -383,95 +383,6 @@ static uintnat default_slice_budget() {
return computed_work;
}

enum steal_result { Shared, Not_shared, No_work };

struct steal_payload {
caml_domain_state* thief;
/* The major cycle at which the stealing was initiated. */
uintnat major_cycle;
enum steal_result result;
};

static void handle_steal_req (struct domain* targetd, void* plv,
interrupt* done) {
struct steal_payload* pl = plv;
uintnat steal_size, new_size;
caml_domain_state* target = targetd->state;

if (pl->major_cycle != caml_major_cycles_completed) {
pl->result = Not_shared;
} else if (target->stealing || target->mark_stack == NULL || target->mark_stack->count == 0) {
pl->result = No_work;
} else {
CAMLassert(pl->thief->mark_stack->count == 0);

if (target->mark_stack->count < 16) {
steal_size = target->mark_stack->count;
} else {
steal_size = target->mark_stack->count / 2
+ target->mark_stack->count % 2;
}
new_size = target->mark_stack->count - steal_size;
memcpy(pl->thief->mark_stack->stack, &target->mark_stack->stack[new_size],
steal_size * sizeof(mark_entry));
target->mark_stack->count = new_size;

/* Mark stack size and domain local marking_done flag are updated in
* steal_mark_work(). */
CAMLassert (pl->thief->marking_done);
atomic_fetch_add(&num_domains_to_mark, 1);

if (new_size == 0) {
CAMLassert(!target->marking_done);
atomic_fetch_add_verify_ge0(&num_domains_to_mark, -1);
target->marking_done = 1;
}

pl->thief->mark_stack->count = steal_size;
pl->thief->marking_done = 0;
pl->result = Shared;
}
caml_acknowledge_interrupt(done);
}

/* Return domain_id of victim if success. Otherwise, return -1. */
static int steal_mark_work () {
struct steal_payload pl;
struct domain* domain_self = caml_domain_self ();
int my_id = domain_self->state->id;
struct domain* victim;
int i;

pl.thief = Caml_state;
pl.major_cycle = caml_major_cycles_completed;

if (atomic_load_acq(&num_domains_to_mark) == 0)
return -1;

Caml_state->stealing = 1;
for (i = (my_id + 1) % Max_domains; i != my_id; i = (i + 1) % Max_domains) {
victim = caml_domain_of_id(i);

if(victim->state && caml_domain_rpc(victim, &handle_steal_req, &pl)) {
if (pl.result == Shared) {
caml_gc_log("Stolen mark work (size=%"ARCH_INTNAT_PRINTF_FORMAT"u) from domain %d",
domain_self->state->mark_stack->count, i);
Caml_state->stealing = 0;
return i;
}
}

/* Abort stealing if marking was done or a major cycle was completed. */
if (atomic_load_acq(&num_domains_to_mark) == 0 ||
pl.major_cycle != caml_major_cycles_completed)
break;
}

caml_gc_log("Mark work stealing failure");
Caml_state->stealing = 0;
return -1;
}

static void mark_stack_prune(struct mark_stack* stk);
static struct pool* find_pool_to_rescan();

Expand Down Expand Up @@ -1092,7 +1003,6 @@ static intnat major_collection_slice(intnat howmuch,
intnat sweep_work = 0, mark_work = 0;
intnat available, left;
uintnat blocks_marked_before = domain_state->stat_blocks_marked;
int steal_result;
int was_marking = 0;
uintnat saved_ephe_cycle;
uintnat saved_major_cycle = caml_major_cycles_completed;
Expand Down Expand Up @@ -1157,31 +1067,6 @@ static intnat major_collection_slice(intnat howmuch,
available = budget > Chunk_size ? Chunk_size : budget;
left = mark(available);
budget -= available - left;
/*
Disabled to avoid high pausetimes
FIXME: we would like to handle steal interrupts regularly,
but bad things happen if the GC cycles ends via an STW
interrupt here.
caml_handle_incoming_interrupts();
if( saved_major_cycle != caml_major_cycles_completed ) {
if (log_events) {
caml_ev_end("major_gc/mark");
caml_ev_end("major_gc/slice");
}
if (budget_left) *budget_left = budget;
return computed_work;
}
*/
} else if (0) {
if (was_marking) {
if (log_events) caml_ev_end("major_gc/mark");
was_marking = 0;
}
if (log_events) caml_ev_begin("major_gc/steal");
steal_result = steal_mark_work();
if (log_events) caml_ev_end("major_gc/steal");
if (steal_result == -1) break;
} else {
break;
}
Expand Down
45 changes: 0 additions & 45 deletions runtime/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -402,51 +402,6 @@ CAMLexport value caml_alloc_shr_noexc(mlsize_t wosize, tag_t tag) {
return Val_hp(v);
}

struct read_fault_req {
value obj;
int field;
caml_root* ret;
};

static void send_read_fault(struct read_fault_req*);

static void handle_read_fault(struct domain* target, void* reqp, interrupt* done) {
CAMLparam0();
CAMLlocal1(ret);
struct read_fault_req* req = reqp;
value v = Op_val(req->obj)[req->field];

if (Is_minor(v) && caml_owner_of_young_block(v) == target) {
caml_modify_root(*req->ret, ret);
/* Update the field so that future requests don't fault. We must
use a CAS here, since another thread may modify the field and
we must avoid overwriting its update */
caml_atomic_cas_field(req->obj, req->field, v, ret);
} else {
/* Race condition: by the time we handled the fault, the field was
already modified and no longer points to our heap. We recurse
into the read barrier. This always terminates: in the worst
case, all domains get tied up servicing one fault and then
there are no more left running to win the race */
send_read_fault(req);
}
caml_acknowledge_interrupt(done);
CAMLreturn0;
}

static void send_read_fault(struct read_fault_req* req)
{
value v = Op_val(req->obj)[req->field];
if (Is_minor(v)) {
if (!caml_domain_rpc(caml_owner_of_young_block(v), &handle_read_fault, req)) {
send_read_fault(req);
}
Assert(!Is_minor(*req->ret));
} else {
caml_modify_root (*req->ret, v);
}
}

CAMLexport value caml_read_barrier(value obj, intnat field)
{
/* ctk21: no-op the read barrier as part of experiment */
Expand Down

0 comments on commit 1216aab

Please sign in to comment.