View Sentinel.java
public SLinkedList {
// header always points to the sentinel node; header is NEVER null
ListNode header;
public SLinkedList() {
// point to a dummy ListNode that serves as the sentinel node
// its data doesn't matter
header = new ListNode(-1);
}
View sort.java
void sort(int[] number)
{
PriorityQueue<Integer, Integer> p = new HeapPriorityQueue();
// insert array data into p
for (int i=0; i< number.length; i++) {
int x = number[i];
p.insert(x, x);
}
// take out in sorted order putting into the array
View sort.java
void sort(int[] number)
{
PriorityQueue<Integer, Integer> p = new HeapPriorityQueue();
// insert array data into p
for (int i=0; i< number.length; i++) {
p.insert(x, x);
}
// take out in sorted order putting into the array
int i = 0;
View gist:0fba250de87cb33b5b2c16868107014a
void sort(int[] number)
{
PriorityQueue<Integer, Integer> p = new HeapPriorityQueue();
// insert array data into p
for (int i=0; i< number.length; i++) {
p.insert(x, x);
}
// take out in sorted order putting into the array
int i = 0;
View inf.java
for (int i=0; ; i++) {
}
View decomposegroupby_78b76ef
class DecomposeGroupBy(rules.Rule):
"""Convert a logical group by into a two-phase group by.
The local half of the aggregate before the shuffle step, whereas the remote
half runs after the shuffle step.
TODO: omit this optimization if the data is already shuffled, or
if the cardinality of the grouping keys is high.
"""
View gist:709bf367785ade27eaba
blockdist = BlockDistribution[P partitions, N elements]
global int localsum[P] (blockdist); // for per-partition sums (alternatively represented as partition-private storage)
global int A[N] (blockdist); // input
global int B[N] (blockdist); // intermediate
global int C[N] (blockdist); // result
// compute running sum, within each partition
// we can only break dependences between groups of iterations with knowledge of how blockdist works
forall i in 0..N {
View symmetric vector iterator
template <GlobalCompletionEvent* GCE=&impl::local_gce, typename T, typename F>
void forall(GlobalAddress<aligned_vector<T>> symmetric_array, F loop_body) {
auto origin = mycore();
GCE->enroll(cores());
on_all_cores([=] {
auto num_elements = symmetric_array->vector.size();
forall_here<SyncMode::Async,GCE>( 0, num_elements, [=](int64_t start, int64_t iters) {
for (int64_t j=start; j<start+iters; j++) {
auto el = symmetric_array->vector[j];
loop_body(el);
View vulcan.py
{
'public:vulcan:edgesConnected' : [('nowGroup', 'LONG_TYPE'), ('currentTime', 'LONG_TYPE'), ('currentGroup', 'LONG_TYPE'), ('nextGroup', 'LONG_TYPE'), ('sharedParticleCount', 'LONG_TYPE')],
}