Streams work together through two mechanisms:
- Their programmatic API, such as
read()
,write()
,on('data')
, etc. - The format of the data being operated on.
Traditional streams operate on a simple stream of bytes. Ordering matters,
<!DOCTYPE html> | |
<meta charset="utf-8"> | |
<link rel="stylesheet" href="http://cmx.io/v/0.1/cmx.css"/> | |
<script src="http://cmx.io/v/0.1/cmx.js"></script> | |
<style>.cmx-user-scene4 .cmx-text-border .cmx-path {stroke: orange}</style> | |
<body> | |
<div style="max-width:900px; -webkit-transform:rotate(0deg);"> | |
<scene id="scene1"> | |
<actor t="translate(75,88)"> |
// ids being exported by a timer loop | |
var exporting = []; | |
var gotSomeIds = function(ids) { | |
if (!ids) { | |
// selection done... we can finalize export | |
} | |
Array.prototype.push.apply(exporting, ids); | |
}; |
function onClickHandler(evt) { | |
var target = evt.target; | |
var dataset = target.dataset || {}; | |
var parentDataset = target.parentNode ? | |
(target.parentNode.dataset || {}) : {}; | |
var uuid = dataset.uuid; | |
var node = target; | |
if (!uuid) { | |
uuid = parentDataset.uuid; | |
node = target.parentNode; |
./b2g/chrome/content/shell.js: for (let crashid of pending) { | |
./b2g/chrome/content/dbg-browser-actors.js: for (let [browser, actor] of this._actorByBrowser) { | |
./js/examples/jorendb.js: for (var cmd of commandArray) { | |
./js/src/devtools/rootAnalysis/CFG.js: for (var xbody of functionBodies) { | |
./js/src/devtools/rootAnalysis/CFG.js: for (var edge of body.PEdge) { | |
./js/src/devtools/rootAnalysis/CFG.js: for (var edge of body.PEdge) { | |
./js/src/devtools/rootAnalysis/CFG.js: for (var e of predecessors[edge.Index[0]]) | |
./js/src/devtools/rootAnalysis/CFG.js: for (var nedge of successors[point]) { | |
./js/src/devtools/rootAnalysis/computeCallgraph.js: for (var entry of index[name]) { | |
./js/src/devtools/rootAnalysis/computeCallgraph.js: for (var field of csu.FunctionField) { |
import numpy | |
import scipy | |
from scipy import stats | |
def median_test(samples1, samples2): | |
median = numpy.median(samples1 + samples2) | |
below1, above1 = count_partition(samples1, median) | |
below2, above2 = count_partition(samples2, median) |
// | |
// Example uses of a ServiceWorker Cache to discuss how data flows | |
// within gecko. See comments below | |
// | |
addEventListener('fetch', function(event) { | |
event.respondWith( | |
event.default().then(function(response) { | |
caches.get('content').then(function(cache) { | |
var cacheResponse = response.clone(); | |
// Transfers data from network->parent process->disk |
nsCOMPtr<nsIInputStream> reader; | |
nsCOMPtr<nsIOutputStream> writer; | |
nsresult rv = NS_NewPipe(getter_AddRefs(reader), getter_AddRefs(writer), | |
0, UINT32_MAX, // unlimited size to avoid writer WOULD_BLOCK case | |
true, true); // non-blocking reader/writer | |
NS_ENSURE_SUCCESS(rv, rv); | |
nsRefPtr<InternalResponse> ir = new InternalResponse(); | |
ir->SetBody(reader); |
var req = new Request('https://example.com' { bodyAsWriter: true }); | |
// throws because writer is not set yet | |
req.bodyWriter.write(/*...*/); | |
// fetch calls setWriter | |
fetch(req).then(/*...*/); | |
req.bodyWriter.write(/*...*/) |
myStream.read(function handleChunk(chunk) { | |
if (!chunk) { | |
processDone(); | |
return; | |
} | |
processChunk(chunk); | |
myStream.read(handleChunk); | |
}); |