Skip to content

Instantly share code, notes, and snippets.

@larscwallin
Created March 30, 2022 10:21
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save larscwallin/050aa7ea082751c1b80ea00a0fa968d6 to your computer and use it in GitHub Desktop.
Save larscwallin/050aa7ea082751c1b80ea00a0fa968d6 to your computer and use it in GitHub Desktop.
This file has been truncated, but you can view the full file.
/*!
*
* WebGazer.js: Scalable Webcam EyeTracking Using User Interactions
* Copyright (c) 2016-2021, Brown HCI Group
* Licensed under GPLv3. Companies with a valuation of less than $1M can use WebGazer.js under LGPLv3.
*
*/
var webgazer =
/******/ (function(modules) { // webpackBootstrap
/******/ // The module cache
/******/ var installedModules = {};
/******/
/******/ // The require function
/******/ function __webpack_require__(moduleId) {
/******/
/******/ // Check if module is in cache
/******/ if(installedModules[moduleId]) {
/******/ return installedModules[moduleId].exports;
/******/ }
/******/ // Create a new module (and put it into the cache)
/******/ var module = installedModules[moduleId] = {
/******/ i: moduleId,
/******/ l: false,
/******/ exports: {}
/******/ };
/******/
/******/ // Execute the module function
/******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__);
/******/
/******/ // Flag the module as loaded
/******/ module.l = true;
/******/
/******/ // Return the exports of the module
/******/ return module.exports;
/******/ }
/******/
/******/
/******/ // expose the modules object (__webpack_modules__)
/******/ __webpack_require__.m = modules;
/******/
/******/ // expose the module cache
/******/ __webpack_require__.c = installedModules;
/******/
/******/ // define getter function for harmony exports
/******/ __webpack_require__.d = function(exports, name, getter) {
/******/ if(!__webpack_require__.o(exports, name)) {
/******/ Object.defineProperty(exports, name, { enumerable: true, get: getter });
/******/ }
/******/ };
/******/
/******/ // define __esModule on exports
/******/ __webpack_require__.r = function(exports) {
/******/ if(typeof Symbol !== 'undefined' && Symbol.toStringTag) {
/******/ Object.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });
/******/ }
/******/ Object.defineProperty(exports, '__esModule', { value: true });
/******/ };
/******/
/******/ // create a fake namespace object
/******/ // mode & 1: value is a module id, require it
/******/ // mode & 2: merge all properties of value into the ns
/******/ // mode & 4: return value when already ns object
/******/ // mode & 8|1: behave like require
/******/ __webpack_require__.t = function(value, mode) {
/******/ if(mode & 1) value = __webpack_require__(value);
/******/ if(mode & 8) return value;
/******/ if((mode & 4) && typeof value === 'object' && value && value.__esModule) return value;
/******/ var ns = Object.create(null);
/******/ __webpack_require__.r(ns);
/******/ Object.defineProperty(ns, 'default', { enumerable: true, value: value });
/******/ if(mode & 2 && typeof value != 'string') for(var key in value) __webpack_require__.d(ns, key, function(key) { return value[key]; }.bind(null, key));
/******/ return ns;
/******/ };
/******/
/******/ // getDefaultExport function for compatibility with non-harmony modules
/******/ __webpack_require__.n = function(module) {
/******/ var getter = module && module.__esModule ?
/******/ function getDefault() { return module['default']; } :
/******/ function getModuleExports() { return module; };
/******/ __webpack_require__.d(getter, 'a', getter);
/******/ return getter;
/******/ };
/******/
/******/ // Object.prototype.hasOwnProperty.call
/******/ __webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); };
/******/
/******/ // __webpack_public_path__
/******/ __webpack_require__.p = "";
/******/
/******/
/******/ // Load entry module and return exports
/******/ return __webpack_require__(__webpack_require__.s = 304);
/******/ })
/************************************************************************/
/******/ ([
/* 0 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
// ESM COMPAT FLAG
__webpack_require__.r(__webpack_exports__);
// EXPORTS
__webpack_require__.d(__webpack_exports__, "AdadeltaOptimizer", function() { return /* reexport */ adadelta_optimizer_AdadeltaOptimizer; });
__webpack_require__.d(__webpack_exports__, "AdagradOptimizer", function() { return /* reexport */ adagrad_optimizer_AdagradOptimizer; });
__webpack_require__.d(__webpack_exports__, "AdamOptimizer", function() { return /* reexport */ adam_optimizer_AdamOptimizer; });
__webpack_require__.d(__webpack_exports__, "AdamaxOptimizer", function() { return /* reexport */ adamax_optimizer_AdamaxOptimizer; });
__webpack_require__.d(__webpack_exports__, "MomentumOptimizer", function() { return /* reexport */ momentum_optimizer_MomentumOptimizer; });
__webpack_require__.d(__webpack_exports__, "Optimizer", function() { return /* reexport */ optimizer_Optimizer; });
__webpack_require__.d(__webpack_exports__, "RMSPropOptimizer", function() { return /* reexport */ rmsprop_optimizer_RMSPropOptimizer; });
__webpack_require__.d(__webpack_exports__, "SGDOptimizer", function() { return /* reexport */ sgd_optimizer_SGDOptimizer; });
__webpack_require__.d(__webpack_exports__, "Tensor", function() { return /* reexport */ dist_tensor["a" /* Tensor */]; });
__webpack_require__.d(__webpack_exports__, "TensorBuffer", function() { return /* reexport */ dist_tensor["b" /* TensorBuffer */]; });
__webpack_require__.d(__webpack_exports__, "Variable", function() { return /* reexport */ dist_tensor["c" /* Variable */]; });
__webpack_require__.d(__webpack_exports__, "Rank", function() { return /* reexport */ dist_types["a" /* Rank */]; });
__webpack_require__.d(__webpack_exports__, "sumOutType", function() { return /* reexport */ dist_types["b" /* sumOutType */]; });
__webpack_require__.d(__webpack_exports__, "upcastType", function() { return /* reexport */ dist_types["c" /* upcastType */]; });
__webpack_require__.d(__webpack_exports__, "abs", function() { return /* reexport */ ops["b" /* abs */]; });
__webpack_require__.d(__webpack_exports__, "acos", function() { return /* reexport */ ops["c" /* acos */]; });
__webpack_require__.d(__webpack_exports__, "acosh", function() { return /* reexport */ ops["d" /* acosh */]; });
__webpack_require__.d(__webpack_exports__, "add", function() { return /* reexport */ ops["e" /* add */]; });
__webpack_require__.d(__webpack_exports__, "addN", function() { return /* reexport */ ops["f" /* addN */]; });
__webpack_require__.d(__webpack_exports__, "all", function() { return /* reexport */ ops["g" /* all */]; });
__webpack_require__.d(__webpack_exports__, "any", function() { return /* reexport */ ops["h" /* any */]; });
__webpack_require__.d(__webpack_exports__, "argMax", function() { return /* reexport */ ops["i" /* argMax */]; });
__webpack_require__.d(__webpack_exports__, "argMin", function() { return /* reexport */ ops["j" /* argMin */]; });
__webpack_require__.d(__webpack_exports__, "asin", function() { return /* reexport */ ops["k" /* asin */]; });
__webpack_require__.d(__webpack_exports__, "asinh", function() { return /* reexport */ ops["l" /* asinh */]; });
__webpack_require__.d(__webpack_exports__, "atan", function() { return /* reexport */ ops["m" /* atan */]; });
__webpack_require__.d(__webpack_exports__, "atan2", function() { return /* reexport */ ops["n" /* atan2 */]; });
__webpack_require__.d(__webpack_exports__, "atanh", function() { return /* reexport */ ops["o" /* atanh */]; });
__webpack_require__.d(__webpack_exports__, "avgPool", function() { return /* reexport */ ops["p" /* avgPool */]; });
__webpack_require__.d(__webpack_exports__, "avgPool3d", function() { return /* reexport */ ops["q" /* avgPool3d */]; });
__webpack_require__.d(__webpack_exports__, "basicLSTMCell", function() { return /* reexport */ ops["r" /* basicLSTMCell */]; });
__webpack_require__.d(__webpack_exports__, "batchToSpaceND", function() { return /* reexport */ ops["w" /* batchToSpaceND */]; });
__webpack_require__.d(__webpack_exports__, "batchNorm", function() { return /* reexport */ ops["s" /* batchNorm */]; });
__webpack_require__.d(__webpack_exports__, "batchNorm2d", function() { return /* reexport */ ops["t" /* batchNorm2d */]; });
__webpack_require__.d(__webpack_exports__, "batchNorm3d", function() { return /* reexport */ ops["u" /* batchNorm3d */]; });
__webpack_require__.d(__webpack_exports__, "batchNorm4d", function() { return /* reexport */ ops["v" /* batchNorm4d */]; });
__webpack_require__.d(__webpack_exports__, "bincount", function() { return /* reexport */ ops["x" /* bincount */]; });
__webpack_require__.d(__webpack_exports__, "broadcastTo", function() { return /* reexport */ ops["z" /* broadcastTo */]; });
__webpack_require__.d(__webpack_exports__, "buffer", function() { return /* reexport */ ops["A" /* buffer */]; });
__webpack_require__.d(__webpack_exports__, "cast", function() { return /* reexport */ ops["B" /* cast */]; });
__webpack_require__.d(__webpack_exports__, "ceil", function() { return /* reexport */ ops["C" /* ceil */]; });
__webpack_require__.d(__webpack_exports__, "clipByValue", function() { return /* reexport */ ops["D" /* clipByValue */]; });
__webpack_require__.d(__webpack_exports__, "clone", function() { return /* reexport */ ops["E" /* clone */]; });
__webpack_require__.d(__webpack_exports__, "complex", function() { return /* reexport */ ops["F" /* complex */]; });
__webpack_require__.d(__webpack_exports__, "concat", function() { return /* reexport */ ops["G" /* concat */]; });
__webpack_require__.d(__webpack_exports__, "concat1d", function() { return /* reexport */ ops["H" /* concat1d */]; });
__webpack_require__.d(__webpack_exports__, "concat2d", function() { return /* reexport */ ops["I" /* concat2d */]; });
__webpack_require__.d(__webpack_exports__, "concat3d", function() { return /* reexport */ ops["J" /* concat3d */]; });
__webpack_require__.d(__webpack_exports__, "concat4d", function() { return /* reexport */ ops["K" /* concat4d */]; });
__webpack_require__.d(__webpack_exports__, "conv1d", function() { return /* reexport */ ops["L" /* conv1d */]; });
__webpack_require__.d(__webpack_exports__, "conv2d", function() { return /* reexport */ ops["M" /* conv2d */]; });
__webpack_require__.d(__webpack_exports__, "conv2dTranspose", function() { return /* reexport */ ops["N" /* conv2dTranspose */]; });
__webpack_require__.d(__webpack_exports__, "conv3d", function() { return /* reexport */ ops["O" /* conv3d */]; });
__webpack_require__.d(__webpack_exports__, "conv3dTranspose", function() { return /* reexport */ ops["P" /* conv3dTranspose */]; });
__webpack_require__.d(__webpack_exports__, "cos", function() { return /* reexport */ ops["Q" /* cos */]; });
__webpack_require__.d(__webpack_exports__, "cosh", function() { return /* reexport */ ops["R" /* cosh */]; });
__webpack_require__.d(__webpack_exports__, "cumsum", function() { return /* reexport */ ops["T" /* cumsum */]; });
__webpack_require__.d(__webpack_exports__, "denseBincount", function() { return /* reexport */ ops["U" /* denseBincount */]; });
__webpack_require__.d(__webpack_exports__, "depthToSpace", function() { return /* reexport */ ops["V" /* depthToSpace */]; });
__webpack_require__.d(__webpack_exports__, "depthwiseConv2d", function() { return /* reexport */ ops["W" /* depthwiseConv2d */]; });
__webpack_require__.d(__webpack_exports__, "diag", function() { return /* reexport */ ops["X" /* diag */]; });
__webpack_require__.d(__webpack_exports__, "dilation2d", function() { return /* reexport */ ops["Y" /* dilation2d */]; });
__webpack_require__.d(__webpack_exports__, "div", function() { return /* reexport */ ops["Z" /* div */]; });
__webpack_require__.d(__webpack_exports__, "divNoNan", function() { return /* reexport */ ops["ab" /* divNoNan */]; });
__webpack_require__.d(__webpack_exports__, "dot", function() { return /* reexport */ ops["bb" /* dot */]; });
__webpack_require__.d(__webpack_exports__, "einsum", function() { return /* reexport */ ops["db" /* einsum */]; });
__webpack_require__.d(__webpack_exports__, "elu", function() { return /* reexport */ ops["eb" /* elu */]; });
__webpack_require__.d(__webpack_exports__, "equal", function() { return /* reexport */ ops["gb" /* equal */]; });
__webpack_require__.d(__webpack_exports__, "erf", function() { return /* reexport */ ops["hb" /* erf */]; });
__webpack_require__.d(__webpack_exports__, "exp", function() { return /* reexport */ ops["ib" /* exp */]; });
__webpack_require__.d(__webpack_exports__, "expandDims", function() { return /* reexport */ ops["jb" /* expandDims */]; });
__webpack_require__.d(__webpack_exports__, "expm1", function() { return /* reexport */ ops["kb" /* expm1 */]; });
__webpack_require__.d(__webpack_exports__, "eye", function() { return /* reexport */ ops["lb" /* eye */]; });
__webpack_require__.d(__webpack_exports__, "fill", function() { return /* reexport */ ops["nb" /* fill */]; });
__webpack_require__.d(__webpack_exports__, "floor", function() { return /* reexport */ ops["ob" /* floor */]; });
__webpack_require__.d(__webpack_exports__, "floorDiv", function() { return /* reexport */ ops["pb" /* floorDiv */]; });
__webpack_require__.d(__webpack_exports__, "gather", function() { return /* reexport */ ops["rb" /* gather */]; });
__webpack_require__.d(__webpack_exports__, "greater", function() { return /* reexport */ ops["tb" /* greater */]; });
__webpack_require__.d(__webpack_exports__, "greaterEqual", function() { return /* reexport */ ops["ub" /* greaterEqual */]; });
__webpack_require__.d(__webpack_exports__, "imag", function() { return /* reexport */ ops["wb" /* imag */]; });
__webpack_require__.d(__webpack_exports__, "isFinite", function() { return /* reexport */ ops["Ab" /* isFinite */]; });
__webpack_require__.d(__webpack_exports__, "isInf", function() { return /* reexport */ ops["Bb" /* isInf */]; });
__webpack_require__.d(__webpack_exports__, "isNaN", function() { return /* reexport */ ops["Cb" /* isNaN */]; });
__webpack_require__.d(__webpack_exports__, "leakyRelu", function() { return /* reexport */ ops["Db" /* leakyRelu */]; });
__webpack_require__.d(__webpack_exports__, "less", function() { return /* reexport */ ops["Eb" /* less */]; });
__webpack_require__.d(__webpack_exports__, "lessEqual", function() { return /* reexport */ ops["Fb" /* lessEqual */]; });
__webpack_require__.d(__webpack_exports__, "linspace", function() { return /* reexport */ ops["Hb" /* linspace */]; });
__webpack_require__.d(__webpack_exports__, "localResponseNormalization", function() { return /* reexport */ ops["Ib" /* localResponseNormalization */]; });
__webpack_require__.d(__webpack_exports__, "log", function() { return /* reexport */ ops["Jb" /* log */]; });
__webpack_require__.d(__webpack_exports__, "log1p", function() { return /* reexport */ ops["Kb" /* log1p */]; });
__webpack_require__.d(__webpack_exports__, "logSigmoid", function() { return /* reexport */ ops["Lb" /* logSigmoid */]; });
__webpack_require__.d(__webpack_exports__, "logSoftmax", function() { return /* reexport */ ops["Mb" /* logSoftmax */]; });
__webpack_require__.d(__webpack_exports__, "logSumExp", function() { return /* reexport */ ops["Nb" /* logSumExp */]; });
__webpack_require__.d(__webpack_exports__, "logicalAnd", function() { return /* reexport */ ops["Ob" /* logicalAnd */]; });
__webpack_require__.d(__webpack_exports__, "logicalNot", function() { return /* reexport */ ops["Pb" /* logicalNot */]; });
__webpack_require__.d(__webpack_exports__, "logicalOr", function() { return /* reexport */ ops["Qb" /* logicalOr */]; });
__webpack_require__.d(__webpack_exports__, "logicalXor", function() { return /* reexport */ ops["Rb" /* logicalXor */]; });
__webpack_require__.d(__webpack_exports__, "matMul", function() { return /* reexport */ ops["Tb" /* matMul */]; });
__webpack_require__.d(__webpack_exports__, "max", function() { return /* reexport */ ops["Ub" /* max */]; });
__webpack_require__.d(__webpack_exports__, "maxPool", function() { return /* reexport */ ops["Vb" /* maxPool */]; });
__webpack_require__.d(__webpack_exports__, "maxPool3d", function() { return /* reexport */ ops["Wb" /* maxPool3d */]; });
__webpack_require__.d(__webpack_exports__, "maxPoolWithArgmax", function() { return /* reexport */ ops["Xb" /* maxPoolWithArgmax */]; });
__webpack_require__.d(__webpack_exports__, "maximum", function() { return /* reexport */ ops["Yb" /* maximum */]; });
__webpack_require__.d(__webpack_exports__, "mean", function() { return /* reexport */ ops["Zb" /* mean */]; });
__webpack_require__.d(__webpack_exports__, "meshgrid", function() { return /* reexport */ ops["ac" /* meshgrid */]; });
__webpack_require__.d(__webpack_exports__, "min", function() { return /* reexport */ ops["bc" /* min */]; });
__webpack_require__.d(__webpack_exports__, "minimum", function() { return /* reexport */ ops["cc" /* minimum */]; });
__webpack_require__.d(__webpack_exports__, "mirrorPad", function() { return /* reexport */ ops["dc" /* mirrorPad */]; });
__webpack_require__.d(__webpack_exports__, "mod", function() { return /* reexport */ ops["ec" /* mod */]; });
__webpack_require__.d(__webpack_exports__, "moments", function() { return /* reexport */ ops["fc" /* moments */]; });
__webpack_require__.d(__webpack_exports__, "mul", function() { return /* reexport */ ops["hc" /* mul */]; });
__webpack_require__.d(__webpack_exports__, "multiRNNCell", function() { return /* reexport */ ops["ic" /* multiRNNCell */]; });
__webpack_require__.d(__webpack_exports__, "multinomial", function() { return /* reexport */ ops["jc" /* multinomial */]; });
__webpack_require__.d(__webpack_exports__, "neg", function() { return /* reexport */ ops["kc" /* neg */]; });
__webpack_require__.d(__webpack_exports__, "notEqual", function() { return /* reexport */ ops["mc" /* notEqual */]; });
__webpack_require__.d(__webpack_exports__, "oneHot", function() { return /* reexport */ ops["nc" /* oneHot */]; });
__webpack_require__.d(__webpack_exports__, "ones", function() { return /* reexport */ ops["oc" /* ones */]; });
__webpack_require__.d(__webpack_exports__, "onesLike", function() { return /* reexport */ ops["pc" /* onesLike */]; });
__webpack_require__.d(__webpack_exports__, "outerProduct", function() { return /* reexport */ ops["rc" /* outerProduct */]; });
__webpack_require__.d(__webpack_exports__, "pad", function() { return /* reexport */ ops["sc" /* pad */]; });
__webpack_require__.d(__webpack_exports__, "pad1d", function() { return /* reexport */ ops["tc" /* pad1d */]; });
__webpack_require__.d(__webpack_exports__, "pad2d", function() { return /* reexport */ ops["uc" /* pad2d */]; });
__webpack_require__.d(__webpack_exports__, "pad3d", function() { return /* reexport */ ops["vc" /* pad3d */]; });
__webpack_require__.d(__webpack_exports__, "pad4d", function() { return /* reexport */ ops["wc" /* pad4d */]; });
__webpack_require__.d(__webpack_exports__, "pool", function() { return /* reexport */ ops["xc" /* pool */]; });
__webpack_require__.d(__webpack_exports__, "pow", function() { return /* reexport */ ops["yc" /* pow */]; });
__webpack_require__.d(__webpack_exports__, "prelu", function() { return /* reexport */ ops["zc" /* prelu */]; });
__webpack_require__.d(__webpack_exports__, "print", function() { return /* reexport */ ops["Ac" /* print */]; });
__webpack_require__.d(__webpack_exports__, "prod", function() { return /* reexport */ ops["Bc" /* prod */]; });
__webpack_require__.d(__webpack_exports__, "rand", function() { return /* reexport */ ops["Cc" /* rand */]; });
__webpack_require__.d(__webpack_exports__, "randomGamma", function() { return /* reexport */ ops["Dc" /* randomGamma */]; });
__webpack_require__.d(__webpack_exports__, "randomNormal", function() { return /* reexport */ ops["Ec" /* randomNormal */]; });
__webpack_require__.d(__webpack_exports__, "randomUniform", function() { return /* reexport */ ops["Fc" /* randomUniform */]; });
__webpack_require__.d(__webpack_exports__, "range", function() { return /* reexport */ ops["Gc" /* range */]; });
__webpack_require__.d(__webpack_exports__, "real", function() { return /* reexport */ ops["Hc" /* real */]; });
__webpack_require__.d(__webpack_exports__, "reciprocal", function() { return /* reexport */ ops["Ic" /* reciprocal */]; });
__webpack_require__.d(__webpack_exports__, "relu", function() { return /* reexport */ ops["Jc" /* relu */]; });
__webpack_require__.d(__webpack_exports__, "relu6", function() { return /* reexport */ ops["Kc" /* relu6 */]; });
__webpack_require__.d(__webpack_exports__, "reshape", function() { return /* reexport */ ops["Lc" /* reshape */]; });
__webpack_require__.d(__webpack_exports__, "reverse", function() { return /* reexport */ ops["Mc" /* reverse */]; });
__webpack_require__.d(__webpack_exports__, "reverse1d", function() { return /* reexport */ ops["Nc" /* reverse1d */]; });
__webpack_require__.d(__webpack_exports__, "reverse2d", function() { return /* reexport */ ops["Oc" /* reverse2d */]; });
__webpack_require__.d(__webpack_exports__, "reverse3d", function() { return /* reexport */ ops["Pc" /* reverse3d */]; });
__webpack_require__.d(__webpack_exports__, "reverse4d", function() { return /* reexport */ ops["Qc" /* reverse4d */]; });
__webpack_require__.d(__webpack_exports__, "round", function() { return /* reexport */ ops["Sc" /* round */]; });
__webpack_require__.d(__webpack_exports__, "rsqrt", function() { return /* reexport */ ops["Tc" /* rsqrt */]; });
__webpack_require__.d(__webpack_exports__, "scalar", function() { return /* reexport */ ops["Uc" /* scalar */]; });
__webpack_require__.d(__webpack_exports__, "selu", function() { return /* reexport */ ops["Wc" /* selu */]; });
__webpack_require__.d(__webpack_exports__, "separableConv2d", function() { return /* reexport */ ops["Xc" /* separableConv2d */]; });
__webpack_require__.d(__webpack_exports__, "setdiff1dAsync", function() { return /* reexport */ ops["Yc" /* setdiff1dAsync */]; });
__webpack_require__.d(__webpack_exports__, "sigmoid", function() { return /* reexport */ ops["Zc" /* sigmoid */]; });
__webpack_require__.d(__webpack_exports__, "sign", function() { return /* reexport */ ops["ad" /* sign */]; });
__webpack_require__.d(__webpack_exports__, "sin", function() { return /* reexport */ ops["cd" /* sin */]; });
__webpack_require__.d(__webpack_exports__, "sinh", function() { return /* reexport */ ops["dd" /* sinh */]; });
__webpack_require__.d(__webpack_exports__, "slice", function() { return /* reexport */ ops["ed" /* slice */]; });
__webpack_require__.d(__webpack_exports__, "slice1d", function() { return /* reexport */ ops["fd" /* slice1d */]; });
__webpack_require__.d(__webpack_exports__, "slice2d", function() { return /* reexport */ ops["gd" /* slice2d */]; });
__webpack_require__.d(__webpack_exports__, "slice3d", function() { return /* reexport */ ops["hd" /* slice3d */]; });
__webpack_require__.d(__webpack_exports__, "slice4d", function() { return /* reexport */ ops["id" /* slice4d */]; });
__webpack_require__.d(__webpack_exports__, "softmax", function() { return /* reexport */ ops["jd" /* softmax */]; });
__webpack_require__.d(__webpack_exports__, "softplus", function() { return /* reexport */ ops["kd" /* softplus */]; });
__webpack_require__.d(__webpack_exports__, "spaceToBatchND", function() { return /* reexport */ ops["ld" /* spaceToBatchND */]; });
__webpack_require__.d(__webpack_exports__, "fft", function() { return /* reexport */ ops["mb" /* fft */]; });
__webpack_require__.d(__webpack_exports__, "ifft", function() { return /* reexport */ ops["vb" /* ifft */]; });
__webpack_require__.d(__webpack_exports__, "irfft", function() { return /* reexport */ ops["zb" /* irfft */]; });
__webpack_require__.d(__webpack_exports__, "rfft", function() { return /* reexport */ ops["Rc" /* rfft */]; });
__webpack_require__.d(__webpack_exports__, "split", function() { return /* reexport */ ops["pd" /* split */]; });
__webpack_require__.d(__webpack_exports__, "sqrt", function() { return /* reexport */ ops["qd" /* sqrt */]; });
__webpack_require__.d(__webpack_exports__, "square", function() { return /* reexport */ ops["rd" /* square */]; });
__webpack_require__.d(__webpack_exports__, "squaredDifference", function() { return /* reexport */ ops["sd" /* squaredDifference */]; });
__webpack_require__.d(__webpack_exports__, "squeeze", function() { return /* reexport */ ops["td" /* squeeze */]; });
__webpack_require__.d(__webpack_exports__, "stack", function() { return /* reexport */ ops["ud" /* stack */]; });
__webpack_require__.d(__webpack_exports__, "step", function() { return /* reexport */ ops["vd" /* step */]; });
__webpack_require__.d(__webpack_exports__, "stridedSlice", function() { return /* reexport */ ops["wd" /* stridedSlice */]; });
__webpack_require__.d(__webpack_exports__, "sub", function() { return /* reexport */ ops["xd" /* sub */]; });
__webpack_require__.d(__webpack_exports__, "sum", function() { return /* reexport */ ops["yd" /* sum */]; });
__webpack_require__.d(__webpack_exports__, "tan", function() { return /* reexport */ ops["zd" /* tan */]; });
__webpack_require__.d(__webpack_exports__, "tanh", function() { return /* reexport */ ops["Ad" /* tanh */]; });
__webpack_require__.d(__webpack_exports__, "tensor", function() { return /* reexport */ ops["Bd" /* tensor */]; });
__webpack_require__.d(__webpack_exports__, "tensor1d", function() { return /* reexport */ ops["Cd" /* tensor1d */]; });
__webpack_require__.d(__webpack_exports__, "tensor2d", function() { return /* reexport */ ops["Dd" /* tensor2d */]; });
__webpack_require__.d(__webpack_exports__, "tensor3d", function() { return /* reexport */ ops["Ed" /* tensor3d */]; });
__webpack_require__.d(__webpack_exports__, "tensor4d", function() { return /* reexport */ ops["Fd" /* tensor4d */]; });
__webpack_require__.d(__webpack_exports__, "tensor5d", function() { return /* reexport */ ops["Gd" /* tensor5d */]; });
__webpack_require__.d(__webpack_exports__, "tensor6d", function() { return /* reexport */ ops["Hd" /* tensor6d */]; });
__webpack_require__.d(__webpack_exports__, "tile", function() { return /* reexport */ ops["Id" /* tile */]; });
__webpack_require__.d(__webpack_exports__, "topk", function() { return /* reexport */ ops["Jd" /* topk */]; });
__webpack_require__.d(__webpack_exports__, "truncatedNormal", function() { return /* reexport */ ops["Ld" /* truncatedNormal */]; });
__webpack_require__.d(__webpack_exports__, "unique", function() { return /* reexport */ ops["Md" /* unique */]; });
__webpack_require__.d(__webpack_exports__, "unsortedSegmentSum", function() { return /* reexport */ ops["Nd" /* unsortedSegmentSum */]; });
__webpack_require__.d(__webpack_exports__, "unstack", function() { return /* reexport */ ops["Od" /* unstack */]; });
__webpack_require__.d(__webpack_exports__, "variable", function() { return /* reexport */ ops["Pd" /* variable */]; });
__webpack_require__.d(__webpack_exports__, "where", function() { return /* reexport */ ops["Qd" /* where */]; });
__webpack_require__.d(__webpack_exports__, "whereAsync", function() { return /* reexport */ ops["Rd" /* whereAsync */]; });
__webpack_require__.d(__webpack_exports__, "zeros", function() { return /* reexport */ ops["Sd" /* zeros */]; });
__webpack_require__.d(__webpack_exports__, "zerosLike", function() { return /* reexport */ ops["Td" /* zerosLike */]; });
__webpack_require__.d(__webpack_exports__, "booleanMaskAsync", function() { return /* reexport */ ops["y" /* booleanMaskAsync */]; });
__webpack_require__.d(__webpack_exports__, "transpose", function() { return /* reexport */ ops["Kd" /* transpose */]; });
__webpack_require__.d(__webpack_exports__, "norm", function() { return /* reexport */ ops["lc" /* norm */]; });
__webpack_require__.d(__webpack_exports__, "movingAverage", function() { return /* reexport */ ops["gc" /* movingAverage */]; });
__webpack_require__.d(__webpack_exports__, "scatterND", function() { return /* reexport */ ops["Vc" /* scatterND */]; });
__webpack_require__.d(__webpack_exports__, "sparseToDense", function() { return /* reexport */ ops["nd" /* sparseToDense */]; });
__webpack_require__.d(__webpack_exports__, "gatherND", function() { return /* reexport */ ops["sb" /* gatherND */]; });
__webpack_require__.d(__webpack_exports__, "dropout", function() { return /* reexport */ ops["cb" /* dropout */]; });
__webpack_require__.d(__webpack_exports__, "enclosingPowerOfTwo", function() { return /* reexport */ ops["fb" /* enclosingPowerOfTwo */]; });
__webpack_require__.d(__webpack_exports__, "cosineWindow", function() { return /* reexport */ ops["S" /* cosineWindow */]; });
__webpack_require__.d(__webpack_exports__, "inTopKAsync", function() { return /* reexport */ ops["yb" /* inTopKAsync */]; });
__webpack_require__.d(__webpack_exports__, "op", function() { return /* reexport */ ops["qc" /* op */]; });
__webpack_require__.d(__webpack_exports__, "OP_SCOPE_SUFFIX", function() { return /* reexport */ ops["a" /* OP_SCOPE_SUFFIX */]; });
__webpack_require__.d(__webpack_exports__, "image", function() { return /* reexport */ ops["xb" /* image */]; });
__webpack_require__.d(__webpack_exports__, "linalg", function() { return /* reexport */ ops["Gb" /* linalg */]; });
__webpack_require__.d(__webpack_exports__, "losses", function() { return /* reexport */ ops["Sb" /* losses */]; });
__webpack_require__.d(__webpack_exports__, "spectral", function() { return /* reexport */ ops["od" /* spectral */]; });
__webpack_require__.d(__webpack_exports__, "fused", function() { return /* reexport */ ops["qb" /* fused */]; });
__webpack_require__.d(__webpack_exports__, "signal", function() { return /* reexport */ ops["bd" /* signal */]; });
__webpack_require__.d(__webpack_exports__, "sparse", function() { return /* reexport */ ops["md" /* sparse */]; });
__webpack_require__.d(__webpack_exports__, "Reduction", function() { return /* reexport */ loss_ops_utils["a" /* Reduction */]; });
__webpack_require__.d(__webpack_exports__, "train", function() { return /* reexport */ train; });
__webpack_require__.d(__webpack_exports__, "enableProdMode", function() { return /* reexport */ globals["g" /* enableProdMode */]; });
__webpack_require__.d(__webpack_exports__, "enableDebugMode", function() { return /* reexport */ globals["f" /* enableDebugMode */]; });
__webpack_require__.d(__webpack_exports__, "disableDeprecationWarnings", function() { return /* reexport */ globals["c" /* disableDeprecationWarnings */]; });
__webpack_require__.d(__webpack_exports__, "deprecationWarn", function() { return /* reexport */ globals["b" /* deprecationWarn */]; });
__webpack_require__.d(__webpack_exports__, "disposeVariables", function() { return /* reexport */ globals["e" /* disposeVariables */]; });
__webpack_require__.d(__webpack_exports__, "engine", function() { return /* reexport */ globals["h" /* engine */]; });
__webpack_require__.d(__webpack_exports__, "memory", function() { return /* reexport */ globals["m" /* memory */]; });
__webpack_require__.d(__webpack_exports__, "profile", function() { return /* reexport */ globals["n" /* profile */]; });
__webpack_require__.d(__webpack_exports__, "tidy", function() { return /* reexport */ globals["t" /* tidy */]; });
__webpack_require__.d(__webpack_exports__, "dispose", function() { return /* reexport */ globals["d" /* dispose */]; });
__webpack_require__.d(__webpack_exports__, "keep", function() { return /* reexport */ globals["l" /* keep */]; });
__webpack_require__.d(__webpack_exports__, "time", function() { return /* reexport */ globals["u" /* time */]; });
__webpack_require__.d(__webpack_exports__, "setBackend", function() { return /* reexport */ globals["r" /* setBackend */]; });
__webpack_require__.d(__webpack_exports__, "ready", function() { return /* reexport */ globals["o" /* ready */]; });
__webpack_require__.d(__webpack_exports__, "getBackend", function() { return /* reexport */ globals["k" /* getBackend */]; });
__webpack_require__.d(__webpack_exports__, "removeBackend", function() { return /* reexport */ globals["q" /* removeBackend */]; });
__webpack_require__.d(__webpack_exports__, "findBackend", function() { return /* reexport */ globals["i" /* findBackend */]; });
__webpack_require__.d(__webpack_exports__, "findBackendFactory", function() { return /* reexport */ globals["j" /* findBackendFactory */]; });
__webpack_require__.d(__webpack_exports__, "registerBackend", function() { return /* reexport */ globals["p" /* registerBackend */]; });
__webpack_require__.d(__webpack_exports__, "backend", function() { return /* reexport */ globals["a" /* backend */]; });
__webpack_require__.d(__webpack_exports__, "setPlatform", function() { return /* reexport */ globals["s" /* setPlatform */]; });
__webpack_require__.d(__webpack_exports__, "getKernel", function() { return /* reexport */ kernel_registry["c" /* getKernel */]; });
__webpack_require__.d(__webpack_exports__, "getGradient", function() { return /* reexport */ kernel_registry["b" /* getGradient */]; });
__webpack_require__.d(__webpack_exports__, "getKernelsForBackend", function() { return /* reexport */ kernel_registry["d" /* getKernelsForBackend */]; });
__webpack_require__.d(__webpack_exports__, "registerKernel", function() { return /* reexport */ kernel_registry["f" /* registerKernel */]; });
__webpack_require__.d(__webpack_exports__, "registerGradient", function() { return /* reexport */ kernel_registry["e" /* registerGradient */]; });
__webpack_require__.d(__webpack_exports__, "unregisterKernel", function() { return /* reexport */ kernel_registry["h" /* unregisterKernel */]; });
__webpack_require__.d(__webpack_exports__, "unregisterGradient", function() { return /* reexport */ kernel_registry["g" /* unregisterGradient */]; });
__webpack_require__.d(__webpack_exports__, "copyRegisteredKernels", function() { return /* reexport */ kernel_registry["a" /* copyRegisteredKernels */]; });
__webpack_require__.d(__webpack_exports__, "customGrad", function() { return /* reexport */ gradients["a" /* customGrad */]; });
__webpack_require__.d(__webpack_exports__, "grad", function() { return /* reexport */ gradients["b" /* grad */]; });
__webpack_require__.d(__webpack_exports__, "grads", function() { return /* reexport */ gradients["c" /* grads */]; });
__webpack_require__.d(__webpack_exports__, "valueAndGrad", function() { return /* reexport */ gradients["d" /* valueAndGrad */]; });
__webpack_require__.d(__webpack_exports__, "valueAndGrads", function() { return /* reexport */ gradients["e" /* valueAndGrads */]; });
__webpack_require__.d(__webpack_exports__, "variableGrads", function() { return /* reexport */ gradients["f" /* variableGrads */]; });
__webpack_require__.d(__webpack_exports__, "Environment", function() { return /* reexport */ environment["b" /* Environment */]; });
__webpack_require__.d(__webpack_exports__, "env", function() { return /* reexport */ environment["c" /* env */]; });
__webpack_require__.d(__webpack_exports__, "ENV", function() { return /* reexport */ environment["a" /* ENV */]; });
__webpack_require__.d(__webpack_exports__, "version_core", function() { return /* reexport */ version; });
__webpack_require__.d(__webpack_exports__, "nextFrame", function() { return /* reexport */ browser_util["a" /* nextFrame */]; });
__webpack_require__.d(__webpack_exports__, "browser", function() { return /* reexport */ browser_namespaceObject; });
__webpack_require__.d(__webpack_exports__, "io", function() { return /* reexport */ io_namespaceObject; });
__webpack_require__.d(__webpack_exports__, "math", function() { return /* reexport */ math_namespaceObject; });
__webpack_require__.d(__webpack_exports__, "serialization", function() { return /* reexport */ serialization_namespaceObject; });
__webpack_require__.d(__webpack_exports__, "test_util", function() { return /* reexport */ test_util; });
__webpack_require__.d(__webpack_exports__, "util", function() { return /* reexport */ util; });
__webpack_require__.d(__webpack_exports__, "backend_util", function() { return /* reexport */ backend_util_namespaceObject; });
__webpack_require__.d(__webpack_exports__, "tensor_util", function() { return /* reexport */ tensor_util; });
__webpack_require__.d(__webpack_exports__, "slice_util", function() { return /* reexport */ slice_util; });
__webpack_require__.d(__webpack_exports__, "gather_util", function() { return /* reexport */ gather_nd_util_namespaceObject; });
__webpack_require__.d(__webpack_exports__, "scatter_util", function() { return /* reexport */ scatter_nd_util; });
__webpack_require__.d(__webpack_exports__, "device_util", function() { return /* reexport */ device_util; });
__webpack_require__.d(__webpack_exports__, "kernel_impls", function() { return /* reexport */ kernel_impls_namespaceObject; });
__webpack_require__.d(__webpack_exports__, "KernelBackend", function() { return /* reexport */ backend["b" /* KernelBackend */]; });
__webpack_require__.d(__webpack_exports__, "DataStorage", function() { return /* reexport */ backend["a" /* DataStorage */]; });
__webpack_require__.d(__webpack_exports__, "Abs", function() { return /* reexport */ kernel_names["a" /* Abs */]; });
__webpack_require__.d(__webpack_exports__, "Acos", function() { return /* reexport */ kernel_names["b" /* Acos */]; });
__webpack_require__.d(__webpack_exports__, "Acosh", function() { return /* reexport */ kernel_names["c" /* Acosh */]; });
__webpack_require__.d(__webpack_exports__, "Add", function() { return /* reexport */ kernel_names["d" /* Add */]; });
__webpack_require__.d(__webpack_exports__, "AddN", function() { return /* reexport */ kernel_names["e" /* AddN */]; });
__webpack_require__.d(__webpack_exports__, "All", function() { return /* reexport */ kernel_names["f" /* All */]; });
__webpack_require__.d(__webpack_exports__, "Any", function() { return /* reexport */ kernel_names["g" /* Any */]; });
__webpack_require__.d(__webpack_exports__, "ArgMax", function() { return /* reexport */ kernel_names["h" /* ArgMax */]; });
__webpack_require__.d(__webpack_exports__, "ArgMin", function() { return /* reexport */ kernel_names["i" /* ArgMin */]; });
__webpack_require__.d(__webpack_exports__, "Asin", function() { return /* reexport */ kernel_names["j" /* Asin */]; });
__webpack_require__.d(__webpack_exports__, "Asinh", function() { return /* reexport */ kernel_names["k" /* Asinh */]; });
__webpack_require__.d(__webpack_exports__, "Atan", function() { return /* reexport */ kernel_names["l" /* Atan */]; });
__webpack_require__.d(__webpack_exports__, "Atanh", function() { return /* reexport */ kernel_names["n" /* Atanh */]; });
__webpack_require__.d(__webpack_exports__, "Atan2", function() { return /* reexport */ kernel_names["m" /* Atan2 */]; });
__webpack_require__.d(__webpack_exports__, "AvgPool", function() { return /* reexport */ kernel_names["o" /* AvgPool */]; });
__webpack_require__.d(__webpack_exports__, "AvgPoolGrad", function() { return /* reexport */ kernel_names["r" /* AvgPoolGrad */]; });
__webpack_require__.d(__webpack_exports__, "AvgPool3D", function() { return /* reexport */ kernel_names["p" /* AvgPool3D */]; });
__webpack_require__.d(__webpack_exports__, "AvgPool3DGrad", function() { return /* reexport */ kernel_names["q" /* AvgPool3DGrad */]; });
__webpack_require__.d(__webpack_exports__, "BatchMatMul", function() { return /* reexport */ kernel_names["s" /* BatchMatMul */]; });
__webpack_require__.d(__webpack_exports__, "BatchToSpaceND", function() { return /* reexport */ kernel_names["t" /* BatchToSpaceND */]; });
__webpack_require__.d(__webpack_exports__, "Bincount", function() { return /* reexport */ kernel_names["u" /* Bincount */]; });
__webpack_require__.d(__webpack_exports__, "BroadcastTo", function() { return /* reexport */ kernel_names["v" /* BroadcastTo */]; });
__webpack_require__.d(__webpack_exports__, "Cast", function() { return /* reexport */ kernel_names["w" /* Cast */]; });
__webpack_require__.d(__webpack_exports__, "Ceil", function() { return /* reexport */ kernel_names["x" /* Ceil */]; });
__webpack_require__.d(__webpack_exports__, "ClipByValue", function() { return /* reexport */ kernel_names["y" /* ClipByValue */]; });
__webpack_require__.d(__webpack_exports__, "Complex", function() { return /* reexport */ kernel_names["z" /* Complex */]; });
__webpack_require__.d(__webpack_exports__, "ComplexAbs", function() { return /* reexport */ kernel_names["A" /* ComplexAbs */]; });
__webpack_require__.d(__webpack_exports__, "Concat", function() { return /* reexport */ kernel_names["B" /* Concat */]; });
__webpack_require__.d(__webpack_exports__, "Conv2D", function() { return /* reexport */ kernel_names["C" /* Conv2D */]; });
__webpack_require__.d(__webpack_exports__, "Conv2DBackpropFilter", function() { return /* reexport */ kernel_names["D" /* Conv2DBackpropFilter */]; });
__webpack_require__.d(__webpack_exports__, "Conv2DBackpropInput", function() { return /* reexport */ kernel_names["E" /* Conv2DBackpropInput */]; });
__webpack_require__.d(__webpack_exports__, "Conv3D", function() { return /* reexport */ kernel_names["F" /* Conv3D */]; });
__webpack_require__.d(__webpack_exports__, "Conv3DBackpropFilterV2", function() { return /* reexport */ kernel_names["G" /* Conv3DBackpropFilterV2 */]; });
__webpack_require__.d(__webpack_exports__, "Conv3DBackpropInputV2", function() { return /* reexport */ kernel_names["H" /* Conv3DBackpropInputV2 */]; });
__webpack_require__.d(__webpack_exports__, "Cos", function() { return /* reexport */ kernel_names["I" /* Cos */]; });
__webpack_require__.d(__webpack_exports__, "Cosh", function() { return /* reexport */ kernel_names["J" /* Cosh */]; });
__webpack_require__.d(__webpack_exports__, "Cumsum", function() { return /* reexport */ kernel_names["L" /* Cumsum */]; });
__webpack_require__.d(__webpack_exports__, "CropAndResize", function() { return /* reexport */ kernel_names["K" /* CropAndResize */]; });
__webpack_require__.d(__webpack_exports__, "DenseBincount", function() { return /* reexport */ kernel_names["M" /* DenseBincount */]; });
__webpack_require__.d(__webpack_exports__, "DepthToSpace", function() { return /* reexport */ kernel_names["N" /* DepthToSpace */]; });
__webpack_require__.d(__webpack_exports__, "DepthwiseConv2dNative", function() { return /* reexport */ kernel_names["O" /* DepthwiseConv2dNative */]; });
__webpack_require__.d(__webpack_exports__, "DepthwiseConv2dNativeBackpropFilter", function() { return /* reexport */ kernel_names["P" /* DepthwiseConv2dNativeBackpropFilter */]; });
__webpack_require__.d(__webpack_exports__, "DepthwiseConv2dNativeBackpropInput", function() { return /* reexport */ kernel_names["Q" /* DepthwiseConv2dNativeBackpropInput */]; });
__webpack_require__.d(__webpack_exports__, "Diag", function() { return /* reexport */ kernel_names["R" /* Diag */]; });
__webpack_require__.d(__webpack_exports__, "Dilation2D", function() { return /* reexport */ kernel_names["S" /* Dilation2D */]; });
__webpack_require__.d(__webpack_exports__, "Dilation2DBackpropInput", function() { return /* reexport */ kernel_names["U" /* Dilation2DBackpropInput */]; });
__webpack_require__.d(__webpack_exports__, "Dilation2DBackpropFilter", function() { return /* reexport */ kernel_names["T" /* Dilation2DBackpropFilter */]; });
__webpack_require__.d(__webpack_exports__, "RealDiv", function() { return /* reexport */ kernel_names["lc" /* RealDiv */]; });
__webpack_require__.d(__webpack_exports__, "Einsum", function() { return /* reexport */ kernel_names["V" /* Einsum */]; });
__webpack_require__.d(__webpack_exports__, "Elu", function() { return /* reexport */ kernel_names["W" /* Elu */]; });
__webpack_require__.d(__webpack_exports__, "EluGrad", function() { return /* reexport */ kernel_names["X" /* EluGrad */]; });
__webpack_require__.d(__webpack_exports__, "Erf", function() { return /* reexport */ kernel_names["Z" /* Erf */]; });
__webpack_require__.d(__webpack_exports__, "Equal", function() { return /* reexport */ kernel_names["Y" /* Equal */]; });
__webpack_require__.d(__webpack_exports__, "Exp", function() { return /* reexport */ kernel_names["ab" /* Exp */]; });
__webpack_require__.d(__webpack_exports__, "ExpandDims", function() { return /* reexport */ kernel_names["bb" /* ExpandDims */]; });
__webpack_require__.d(__webpack_exports__, "Expm1", function() { return /* reexport */ kernel_names["cb" /* Expm1 */]; });
__webpack_require__.d(__webpack_exports__, "FFT", function() { return /* reexport */ kernel_names["db" /* FFT */]; });
__webpack_require__.d(__webpack_exports__, "Fill", function() { return /* reexport */ kernel_names["eb" /* Fill */]; });
__webpack_require__.d(__webpack_exports__, "FlipLeftRight", function() { return /* reexport */ kernel_names["fb" /* FlipLeftRight */]; });
__webpack_require__.d(__webpack_exports__, "Floor", function() { return /* reexport */ kernel_names["gb" /* Floor */]; });
__webpack_require__.d(__webpack_exports__, "FloorDiv", function() { return /* reexport */ kernel_names["hb" /* FloorDiv */]; });
__webpack_require__.d(__webpack_exports__, "FusedBatchNorm", function() { return /* reexport */ kernel_names["jb" /* FusedBatchNorm */]; });
__webpack_require__.d(__webpack_exports__, "GatherV2", function() { return /* reexport */ kernel_names["nb" /* GatherV2 */]; });
__webpack_require__.d(__webpack_exports__, "GatherNd", function() { return /* reexport */ kernel_names["mb" /* GatherNd */]; });
__webpack_require__.d(__webpack_exports__, "Greater", function() { return /* reexport */ kernel_names["ob" /* Greater */]; });
__webpack_require__.d(__webpack_exports__, "GreaterEqual", function() { return /* reexport */ kernel_names["pb" /* GreaterEqual */]; });
__webpack_require__.d(__webpack_exports__, "Identity", function() { return /* reexport */ kernel_names["rb" /* Identity */]; });
__webpack_require__.d(__webpack_exports__, "IFFT", function() { return /* reexport */ kernel_names["qb" /* IFFT */]; });
__webpack_require__.d(__webpack_exports__, "Imag", function() { return /* reexport */ kernel_names["sb" /* Imag */]; });
__webpack_require__.d(__webpack_exports__, "IsFinite", function() { return /* reexport */ kernel_names["tb" /* IsFinite */]; });
__webpack_require__.d(__webpack_exports__, "IsInf", function() { return /* reexport */ kernel_names["ub" /* IsInf */]; });
__webpack_require__.d(__webpack_exports__, "IsNan", function() { return /* reexport */ kernel_names["vb" /* IsNan */]; });
__webpack_require__.d(__webpack_exports__, "LeakyRelu", function() { return /* reexport */ kernel_names["yb" /* LeakyRelu */]; });
__webpack_require__.d(__webpack_exports__, "Less", function() { return /* reexport */ kernel_names["zb" /* Less */]; });
__webpack_require__.d(__webpack_exports__, "LessEqual", function() { return /* reexport */ kernel_names["Ab" /* LessEqual */]; });
__webpack_require__.d(__webpack_exports__, "LinSpace", function() { return /* reexport */ kernel_names["Bb" /* LinSpace */]; });
__webpack_require__.d(__webpack_exports__, "Log", function() { return /* reexport */ kernel_names["Cb" /* Log */]; });
__webpack_require__.d(__webpack_exports__, "Log1p", function() { return /* reexport */ kernel_names["Db" /* Log1p */]; });
__webpack_require__.d(__webpack_exports__, "LogicalAnd", function() { return /* reexport */ kernel_names["Fb" /* LogicalAnd */]; });
__webpack_require__.d(__webpack_exports__, "LogicalNot", function() { return /* reexport */ kernel_names["Gb" /* LogicalNot */]; });
__webpack_require__.d(__webpack_exports__, "LogicalOr", function() { return /* reexport */ kernel_names["Hb" /* LogicalOr */]; });
__webpack_require__.d(__webpack_exports__, "LogSoftmax", function() { return /* reexport */ kernel_names["Eb" /* LogSoftmax */]; });
__webpack_require__.d(__webpack_exports__, "LRN", function() { return /* reexport */ kernel_names["wb" /* LRN */]; });
__webpack_require__.d(__webpack_exports__, "LRNGrad", function() { return /* reexport */ kernel_names["xb" /* LRNGrad */]; });
__webpack_require__.d(__webpack_exports__, "Max", function() { return /* reexport */ kernel_names["Ib" /* Max */]; });
__webpack_require__.d(__webpack_exports__, "Maximum", function() { return /* reexport */ kernel_names["Ob" /* Maximum */]; });
__webpack_require__.d(__webpack_exports__, "MaxPool", function() { return /* reexport */ kernel_names["Jb" /* MaxPool */]; });
__webpack_require__.d(__webpack_exports__, "MaxPoolGrad", function() { return /* reexport */ kernel_names["Mb" /* MaxPoolGrad */]; });
__webpack_require__.d(__webpack_exports__, "MaxPool3D", function() { return /* reexport */ kernel_names["Kb" /* MaxPool3D */]; });
__webpack_require__.d(__webpack_exports__, "MaxPool3DGrad", function() { return /* reexport */ kernel_names["Lb" /* MaxPool3DGrad */]; });
__webpack_require__.d(__webpack_exports__, "MaxPoolWithArgmax", function() { return /* reexport */ kernel_names["Nb" /* MaxPoolWithArgmax */]; });
__webpack_require__.d(__webpack_exports__, "Mean", function() { return /* reexport */ kernel_names["Pb" /* Mean */]; });
__webpack_require__.d(__webpack_exports__, "Min", function() { return /* reexport */ kernel_names["Qb" /* Min */]; });
__webpack_require__.d(__webpack_exports__, "Minimum", function() { return /* reexport */ kernel_names["Rb" /* Minimum */]; });
__webpack_require__.d(__webpack_exports__, "MirrorPad", function() { return /* reexport */ kernel_names["Sb" /* MirrorPad */]; });
__webpack_require__.d(__webpack_exports__, "Mod", function() { return /* reexport */ kernel_names["Tb" /* Mod */]; });
__webpack_require__.d(__webpack_exports__, "Multinomial", function() { return /* reexport */ kernel_names["Ub" /* Multinomial */]; });
__webpack_require__.d(__webpack_exports__, "Multiply", function() { return /* reexport */ kernel_names["Vb" /* Multiply */]; });
__webpack_require__.d(__webpack_exports__, "Neg", function() { return /* reexport */ kernel_names["Wb" /* Neg */]; });
__webpack_require__.d(__webpack_exports__, "NotEqual", function() { return /* reexport */ kernel_names["ac" /* NotEqual */]; });
__webpack_require__.d(__webpack_exports__, "NonMaxSuppressionV3", function() { return /* reexport */ kernel_names["Xb" /* NonMaxSuppressionV3 */]; });
__webpack_require__.d(__webpack_exports__, "NonMaxSuppressionV4", function() { return /* reexport */ kernel_names["Yb" /* NonMaxSuppressionV4 */]; });
__webpack_require__.d(__webpack_exports__, "NonMaxSuppressionV5", function() { return /* reexport */ kernel_names["Zb" /* NonMaxSuppressionV5 */]; });
__webpack_require__.d(__webpack_exports__, "OnesLike", function() { return /* reexport */ kernel_names["cc" /* OnesLike */]; });
__webpack_require__.d(__webpack_exports__, "OneHot", function() { return /* reexport */ kernel_names["bc" /* OneHot */]; });
__webpack_require__.d(__webpack_exports__, "Pack", function() { return /* reexport */ kernel_names["dc" /* Pack */]; });
__webpack_require__.d(__webpack_exports__, "PadV2", function() { return /* reexport */ kernel_names["ec" /* PadV2 */]; });
__webpack_require__.d(__webpack_exports__, "Pool", function() { return /* reexport */ kernel_names["fc" /* Pool */]; });
__webpack_require__.d(__webpack_exports__, "Pow", function() { return /* reexport */ kernel_names["gc" /* Pow */]; });
__webpack_require__.d(__webpack_exports__, "Prelu", function() { return /* reexport */ kernel_names["hc" /* Prelu */]; });
__webpack_require__.d(__webpack_exports__, "Prod", function() { return /* reexport */ kernel_names["ic" /* Prod */]; });
__webpack_require__.d(__webpack_exports__, "Range", function() { return /* reexport */ kernel_names["jc" /* Range */]; });
__webpack_require__.d(__webpack_exports__, "Real", function() { return /* reexport */ kernel_names["kc" /* Real */]; });
__webpack_require__.d(__webpack_exports__, "Reciprocal", function() { return /* reexport */ kernel_names["mc" /* Reciprocal */]; });
__webpack_require__.d(__webpack_exports__, "Relu", function() { return /* reexport */ kernel_names["nc" /* Relu */]; });
__webpack_require__.d(__webpack_exports__, "Reshape", function() { return /* reexport */ kernel_names["pc" /* Reshape */]; });
__webpack_require__.d(__webpack_exports__, "ResizeNearestNeighbor", function() { return /* reexport */ kernel_names["sc" /* ResizeNearestNeighbor */]; });
__webpack_require__.d(__webpack_exports__, "ResizeNearestNeighborGrad", function() { return /* reexport */ kernel_names["tc" /* ResizeNearestNeighborGrad */]; });
__webpack_require__.d(__webpack_exports__, "ResizeBilinear", function() { return /* reexport */ kernel_names["qc" /* ResizeBilinear */]; });
__webpack_require__.d(__webpack_exports__, "ResizeBilinearGrad", function() { return /* reexport */ kernel_names["rc" /* ResizeBilinearGrad */]; });
__webpack_require__.d(__webpack_exports__, "Relu6", function() { return /* reexport */ kernel_names["oc" /* Relu6 */]; });
__webpack_require__.d(__webpack_exports__, "Reverse", function() { return /* reexport */ kernel_names["uc" /* Reverse */]; });
__webpack_require__.d(__webpack_exports__, "Round", function() { return /* reexport */ kernel_names["wc" /* Round */]; });
__webpack_require__.d(__webpack_exports__, "Rsqrt", function() { return /* reexport */ kernel_names["xc" /* Rsqrt */]; });
__webpack_require__.d(__webpack_exports__, "ScatterNd", function() { return /* reexport */ kernel_names["yc" /* ScatterNd */]; });
__webpack_require__.d(__webpack_exports__, "Select", function() { return /* reexport */ kernel_names["zc" /* Select */]; });
__webpack_require__.d(__webpack_exports__, "Selu", function() { return /* reexport */ kernel_names["Ac" /* Selu */]; });
__webpack_require__.d(__webpack_exports__, "Slice", function() { return /* reexport */ kernel_names["Fc" /* Slice */]; });
__webpack_require__.d(__webpack_exports__, "Sin", function() { return /* reexport */ kernel_names["Dc" /* Sin */]; });
__webpack_require__.d(__webpack_exports__, "Sinh", function() { return /* reexport */ kernel_names["Ec" /* Sinh */]; });
__webpack_require__.d(__webpack_exports__, "Sign", function() { return /* reexport */ kernel_names["Cc" /* Sign */]; });
__webpack_require__.d(__webpack_exports__, "Sigmoid", function() { return /* reexport */ kernel_names["Bc" /* Sigmoid */]; });
__webpack_require__.d(__webpack_exports__, "Softplus", function() { return /* reexport */ kernel_names["Hc" /* Softplus */]; });
__webpack_require__.d(__webpack_exports__, "Sqrt", function() { return /* reexport */ kernel_names["Mc" /* Sqrt */]; });
__webpack_require__.d(__webpack_exports__, "Sum", function() { return /* reexport */ kernel_names["Sc" /* Sum */]; });
__webpack_require__.d(__webpack_exports__, "SpaceToBatchND", function() { return /* reexport */ kernel_names["Ic" /* SpaceToBatchND */]; });
__webpack_require__.d(__webpack_exports__, "SplitV", function() { return /* reexport */ kernel_names["Lc" /* SplitV */]; });
__webpack_require__.d(__webpack_exports__, "Softmax", function() { return /* reexport */ kernel_names["Gc" /* Softmax */]; });
__webpack_require__.d(__webpack_exports__, "SparseReshape", function() { return /* reexport */ kernel_names["Jc" /* SparseReshape */]; });
__webpack_require__.d(__webpack_exports__, "SparseToDense", function() { return /* reexport */ kernel_names["Kc" /* SparseToDense */]; });
__webpack_require__.d(__webpack_exports__, "SquaredDifference", function() { return /* reexport */ kernel_names["Oc" /* SquaredDifference */]; });
__webpack_require__.d(__webpack_exports__, "Square", function() { return /* reexport */ kernel_names["Nc" /* Square */]; });
__webpack_require__.d(__webpack_exports__, "StridedSlice", function() { return /* reexport */ kernel_names["Qc" /* StridedSlice */]; });
__webpack_require__.d(__webpack_exports__, "Sub", function() { return /* reexport */ kernel_names["Rc" /* Sub */]; });
__webpack_require__.d(__webpack_exports__, "Tan", function() { return /* reexport */ kernel_names["Tc" /* Tan */]; });
__webpack_require__.d(__webpack_exports__, "Tanh", function() { return /* reexport */ kernel_names["Uc" /* Tanh */]; });
__webpack_require__.d(__webpack_exports__, "Tile", function() { return /* reexport */ kernel_names["Vc" /* Tile */]; });
__webpack_require__.d(__webpack_exports__, "TopK", function() { return /* reexport */ kernel_names["Wc" /* TopK */]; });
__webpack_require__.d(__webpack_exports__, "Transform", function() { return /* reexport */ kernel_names["Xc" /* Transform */]; });
__webpack_require__.d(__webpack_exports__, "Transpose", function() { return /* reexport */ kernel_names["Yc" /* Transpose */]; });
__webpack_require__.d(__webpack_exports__, "Unique", function() { return /* reexport */ kernel_names["Zc" /* Unique */]; });
__webpack_require__.d(__webpack_exports__, "Unpack", function() { return /* reexport */ kernel_names["ad" /* Unpack */]; });
__webpack_require__.d(__webpack_exports__, "UnsortedSegmentSum", function() { return /* reexport */ kernel_names["bd" /* UnsortedSegmentSum */]; });
__webpack_require__.d(__webpack_exports__, "ZerosLike", function() { return /* reexport */ kernel_names["cd" /* ZerosLike */]; });
__webpack_require__.d(__webpack_exports__, "Step", function() { return /* reexport */ kernel_names["Pc" /* Step */]; });
__webpack_require__.d(__webpack_exports__, "FromPixels", function() { return /* reexport */ kernel_names["ib" /* FromPixels */]; });
__webpack_require__.d(__webpack_exports__, "RotateWithOffset", function() { return /* reexport */ kernel_names["vc" /* RotateWithOffset */]; });
__webpack_require__.d(__webpack_exports__, "_FusedMatMul", function() { return /* reexport */ kernel_names["dd" /* _FusedMatMul */]; });
__webpack_require__.d(__webpack_exports__, "FusedConv2D", function() { return /* reexport */ kernel_names["kb" /* FusedConv2D */]; });
__webpack_require__.d(__webpack_exports__, "FusedDepthwiseConv2D", function() { return /* reexport */ kernel_names["lb" /* FusedDepthwiseConv2D */]; });
// NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/io/io.js
var io_namespaceObject = {};
__webpack_require__.r(io_namespaceObject);
__webpack_require__.d(io_namespaceObject, "copyModel", function() { return copyModel; });
__webpack_require__.d(io_namespaceObject, "listModels", function() { return listModels; });
__webpack_require__.d(io_namespaceObject, "moveModel", function() { return moveModel; });
__webpack_require__.d(io_namespaceObject, "removeModel", function() { return removeModel; });
__webpack_require__.d(io_namespaceObject, "browserFiles", function() { return browserFiles; });
__webpack_require__.d(io_namespaceObject, "browserHTTPRequest", function() { return browserHTTPRequest; });
__webpack_require__.d(io_namespaceObject, "concatenateArrayBuffers", function() { return io_utils["d" /* concatenateArrayBuffers */]; });
__webpack_require__.d(io_namespaceObject, "decodeWeights", function() { return io_utils["e" /* decodeWeights */]; });
__webpack_require__.d(io_namespaceObject, "encodeWeights", function() { return io_utils["f" /* encodeWeights */]; });
__webpack_require__.d(io_namespaceObject, "fromMemory", function() { return fromMemory; });
__webpack_require__.d(io_namespaceObject, "getLoadHandlers", function() { return getLoadHandlers; });
__webpack_require__.d(io_namespaceObject, "getModelArtifactsInfoForJSON", function() { return io_utils["g" /* getModelArtifactsInfoForJSON */]; });
__webpack_require__.d(io_namespaceObject, "getSaveHandlers", function() { return getSaveHandlers; });
__webpack_require__.d(io_namespaceObject, "http", function() { return http; });
__webpack_require__.d(io_namespaceObject, "isHTTPScheme", function() { return isHTTPScheme; });
__webpack_require__.d(io_namespaceObject, "loadWeights", function() { return loadWeights; });
__webpack_require__.d(io_namespaceObject, "registerLoadRouter", function() { return registerLoadRouter; });
__webpack_require__.d(io_namespaceObject, "registerSaveRouter", function() { return registerSaveRouter; });
__webpack_require__.d(io_namespaceObject, "weightsLoaderFactory", function() { return weightsLoaderFactory; });
__webpack_require__.d(io_namespaceObject, "withSaveHandler", function() { return withSaveHandler; });
// NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/math.js
var math_namespaceObject = {};
__webpack_require__.r(math_namespaceObject);
__webpack_require__.d(math_namespaceObject, "confusionMatrix", function() { return confusionMatrix; });
// NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/ops/browser.js
var browser_namespaceObject = {};
__webpack_require__.r(browser_namespaceObject);
__webpack_require__.d(browser_namespaceObject, "fromPixelsAsync", function() { return fromPixelsAsync; });
__webpack_require__.d(browser_namespaceObject, "toPixels", function() { return toPixels; });
__webpack_require__.d(browser_namespaceObject, "fromPixels", function() { return fromPixels; });
// NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/ops/gather_nd_util.js
var gather_nd_util_namespaceObject = {};
__webpack_require__.r(gather_nd_util_namespaceObject);
__webpack_require__.d(gather_nd_util_namespaceObject, "prepareAndValidate", function() { return prepareAndValidate; });
// NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/serialization.js
var serialization_namespaceObject = {};
__webpack_require__.r(serialization_namespaceObject);
__webpack_require__.d(serialization_namespaceObject, "Serializable", function() { return Serializable; });
__webpack_require__.d(serialization_namespaceObject, "SerializationMap", function() { return SerializationMap; });
__webpack_require__.d(serialization_namespaceObject, "registerClass", function() { return registerClass; });
// NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/ops/segment_util.js
var segment_util_namespaceObject = {};
__webpack_require__.r(segment_util_namespaceObject);
__webpack_require__.d(segment_util_namespaceObject, "segOpComputeOptimalWindowSize", function() { return segOpComputeOptimalWindowSize; });
__webpack_require__.d(segment_util_namespaceObject, "computeOutShape", function() { return segment_util_computeOutShape; });
__webpack_require__.d(segment_util_namespaceObject, "collectGatherOpShapeInfo", function() { return collectGatherOpShapeInfo; });
// NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/backends/backend_util.js
var backend_util_namespaceObject = {};
__webpack_require__.r(backend_util_namespaceObject);
__webpack_require__.d(backend_util_namespaceObject, "axesAreInnerMostDims", function() { return axis_util["b" /* axesAreInnerMostDims */]; });
__webpack_require__.d(backend_util_namespaceObject, "combineLocations", function() { return axis_util["c" /* combineLocations */]; });
__webpack_require__.d(backend_util_namespaceObject, "computeOutAndReduceShapes", function() { return axis_util["d" /* computeOutAndReduceShapes */]; });
__webpack_require__.d(backend_util_namespaceObject, "expandShapeToKeepDim", function() { return axis_util["e" /* expandShapeToKeepDim */]; });
__webpack_require__.d(backend_util_namespaceObject, "assertAxesAreInnerMostDims", function() { return axis_util["a" /* assertAxesAreInnerMostDims */]; });
__webpack_require__.d(backend_util_namespaceObject, "getAxesPermutation", function() { return axis_util["f" /* getAxesPermutation */]; });
__webpack_require__.d(backend_util_namespaceObject, "getUndoAxesPermutation", function() { return axis_util["h" /* getUndoAxesPermutation */]; });
__webpack_require__.d(backend_util_namespaceObject, "getInnerMostAxes", function() { return axis_util["g" /* getInnerMostAxes */]; });
__webpack_require__.d(backend_util_namespaceObject, "getBroadcastDims", function() { return broadcast_util["b" /* getBroadcastDims */]; });
__webpack_require__.d(backend_util_namespaceObject, "getReductionAxes", function() { return broadcast_util["c" /* getReductionAxes */]; });
__webpack_require__.d(backend_util_namespaceObject, "assertAndGetBroadcastShape", function() { return broadcast_util["a" /* assertAndGetBroadcastShape */]; });
__webpack_require__.d(backend_util_namespaceObject, "assertParamsConsistent", function() { return assertParamsConsistent; });
__webpack_require__.d(backend_util_namespaceObject, "computeOutShape", function() { return computeOutShape; });
__webpack_require__.d(backend_util_namespaceObject, "computeDilation2DInfo", function() { return conv_util["d" /* computeDilation2DInfo */]; });
__webpack_require__.d(backend_util_namespaceObject, "computePool2DInfo", function() { return conv_util["e" /* computePool2DInfo */]; });
__webpack_require__.d(backend_util_namespaceObject, "computePool3DInfo", function() { return conv_util["f" /* computePool3DInfo */]; });
__webpack_require__.d(backend_util_namespaceObject, "computeConv2DInfo", function() { return conv_util["a" /* computeConv2DInfo */]; });
__webpack_require__.d(backend_util_namespaceObject, "computeConv3DInfo", function() { return conv_util["b" /* computeConv3DInfo */]; });
__webpack_require__.d(backend_util_namespaceObject, "computeDefaultPad", function() { return conv_util["c" /* computeDefaultPad */]; });
__webpack_require__.d(backend_util_namespaceObject, "tupleValuesAreOne", function() { return conv_util["i" /* tupleValuesAreOne */]; });
__webpack_require__.d(backend_util_namespaceObject, "eitherStridesOrDilationsAreOne", function() { return conv_util["h" /* eitherStridesOrDilationsAreOne */]; });
__webpack_require__.d(backend_util_namespaceObject, "convertConv2DDataFormat", function() { return conv_util["g" /* convertConv2DDataFormat */]; });
__webpack_require__.d(backend_util_namespaceObject, "getFusedDyActivation", function() { return fused_util["c" /* getFusedDyActivation */]; });
__webpack_require__.d(backend_util_namespaceObject, "getFusedBiasGradient", function() { return fused_util["b" /* getFusedBiasGradient */]; });
__webpack_require__.d(backend_util_namespaceObject, "applyActivation", function() { return fused_util["a" /* applyActivation */]; });
__webpack_require__.d(backend_util_namespaceObject, "shouldFuse", function() { return fused_util["d" /* shouldFuse */]; });
__webpack_require__.d(backend_util_namespaceObject, "PARALLELIZE_THRESHOLD", function() { return PARALLELIZE_THRESHOLD; });
__webpack_require__.d(backend_util_namespaceObject, "computeOptimalWindowSize", function() { return computeOptimalWindowSize; });
__webpack_require__.d(backend_util_namespaceObject, "slice_util", function() { return slice_util; });
__webpack_require__.d(backend_util_namespaceObject, "upcastType", function() { return dist_types["c" /* upcastType */]; });
__webpack_require__.d(backend_util_namespaceObject, "getImageCenter", function() { return getImageCenter; });
__webpack_require__.d(backend_util_namespaceObject, "getReshaped", function() { return getReshaped; });
__webpack_require__.d(backend_util_namespaceObject, "getPermuted", function() { return getPermuted; });
__webpack_require__.d(backend_util_namespaceObject, "getReshapedPermuted", function() { return getReshapedPermuted; });
__webpack_require__.d(backend_util_namespaceObject, "getSliceBeginCoords", function() { return getSliceBeginCoords; });
__webpack_require__.d(backend_util_namespaceObject, "getSliceSize", function() { return getSliceSize; });
__webpack_require__.d(backend_util_namespaceObject, "prepareAndValidate", function() { return prepareAndValidate; });
__webpack_require__.d(backend_util_namespaceObject, "validateUpdateShape", function() { return scatter_nd_util["validateUpdateShape"]; });
__webpack_require__.d(backend_util_namespaceObject, "validateInput", function() { return scatter_nd_util["validateInput"]; });
__webpack_require__.d(backend_util_namespaceObject, "calculateShapes", function() { return scatter_nd_util["calculateShapes"]; });
__webpack_require__.d(backend_util_namespaceObject, "SELU_SCALEALPHA", function() { return selu_util["b" /* SELU_SCALEALPHA */]; });
__webpack_require__.d(backend_util_namespaceObject, "SELU_SCALE", function() { return selu_util["a" /* SELU_SCALE */]; });
__webpack_require__.d(backend_util_namespaceObject, "ERF_P", function() { return ERF_P; });
__webpack_require__.d(backend_util_namespaceObject, "ERF_A1", function() { return ERF_A1; });
__webpack_require__.d(backend_util_namespaceObject, "ERF_A2", function() { return ERF_A2; });
__webpack_require__.d(backend_util_namespaceObject, "ERF_A3", function() { return ERF_A3; });
__webpack_require__.d(backend_util_namespaceObject, "ERF_A4", function() { return ERF_A4; });
__webpack_require__.d(backend_util_namespaceObject, "ERF_A5", function() { return ERF_A5; });
__webpack_require__.d(backend_util_namespaceObject, "warn", function() { return warn; });
__webpack_require__.d(backend_util_namespaceObject, "log", function() { return log; });
__webpack_require__.d(backend_util_namespaceObject, "mergeRealAndImagArrays", function() { return mergeRealAndImagArrays; });
__webpack_require__.d(backend_util_namespaceObject, "splitRealAndImagArrays", function() { return splitRealAndImagArrays; });
__webpack_require__.d(backend_util_namespaceObject, "complexWithEvenIndex", function() { return complexWithEvenIndex; });
__webpack_require__.d(backend_util_namespaceObject, "complexWithOddIndex", function() { return complexWithOddIndex; });
__webpack_require__.d(backend_util_namespaceObject, "getComplexWithIndex", function() { return getComplexWithIndex; });
__webpack_require__.d(backend_util_namespaceObject, "assignToTypedArray", function() { return assignToTypedArray; });
__webpack_require__.d(backend_util_namespaceObject, "exponents", function() { return exponents; });
__webpack_require__.d(backend_util_namespaceObject, "exponent", function() { return exponent; });
__webpack_require__.d(backend_util_namespaceObject, "decodeEinsumEquation", function() { return decodeEinsumEquation; });
__webpack_require__.d(backend_util_namespaceObject, "getEinsumPermutation", function() { return getEinsumPermutation; });
__webpack_require__.d(backend_util_namespaceObject, "checkEinsumDimSizes", function() { return checkEinsumDimSizes; });
__webpack_require__.d(backend_util_namespaceObject, "getEinsumComputePath", function() { return getEinsumComputePath; });
__webpack_require__.d(backend_util_namespaceObject, "isIdentityPermutation", function() { return isIdentityPermutation; });
__webpack_require__.d(backend_util_namespaceObject, "prepareSplitSize", function() { return prepareSplitSize; });
__webpack_require__.d(backend_util_namespaceObject, "segment_util", function() { return segment_util_namespaceObject; });
__webpack_require__.d(backend_util_namespaceObject, "fromUint8ToStringArray", function() { return fromUint8ToStringArray; });
__webpack_require__.d(backend_util_namespaceObject, "fromStringArrayToUint8", function() { return fromStringArrayToUint8; });
// NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/backends/kernel_impls.js
var kernel_impls_namespaceObject = {};
__webpack_require__.r(kernel_impls_namespaceObject);
__webpack_require__.d(kernel_impls_namespaceObject, "nonMaxSuppressionV3Impl", function() { return non_max_suppression_impl["a" /* nonMaxSuppressionV3Impl */]; });
__webpack_require__.d(kernel_impls_namespaceObject, "nonMaxSuppressionV4Impl", function() { return non_max_suppression_impl["b" /* nonMaxSuppressionV4Impl */]; });
__webpack_require__.d(kernel_impls_namespaceObject, "nonMaxSuppressionV5Impl", function() { return non_max_suppression_impl["c" /* nonMaxSuppressionV5Impl */]; });
__webpack_require__.d(kernel_impls_namespaceObject, "whereImpl", function() { return where_impl["a" /* whereImpl */]; });
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/engine.js + 2 modules
var engine = __webpack_require__(5);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/flags.js
var flags = __webpack_require__(143);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/environment.js
var environment = __webpack_require__(22);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/io_utils.js
var io_utils = __webpack_require__(40);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/router_registry.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
class IORouterRegistry {
constructor() {
this.saveRouters = [];
this.loadRouters = [];
}
static getInstance() {
if (IORouterRegistry.instance == null) {
IORouterRegistry.instance = new IORouterRegistry();
}
return IORouterRegistry.instance;
}
/**
* Register a save-handler router.
*
* @param saveRouter A function that maps a URL-like string onto an instance
* of `IOHandler` with the `save` method defined or `null`.
*/
static registerSaveRouter(saveRouter) {
IORouterRegistry.getInstance().saveRouters.push(saveRouter);
}
/**
* Register a load-handler router.
*
* @param loadRouter A function that maps a URL-like string onto an instance
* of `IOHandler` with the `load` method defined or `null`.
*/
static registerLoadRouter(loadRouter) {
IORouterRegistry.getInstance().loadRouters.push(loadRouter);
}
/**
* Look up IOHandler for saving, given a URL-like string.
*
* @param url
* @returns If only one match is found, an instance of IOHandler with the
* `save` method defined. If no match is found, `null`.
* @throws Error, if more than one match is found.
*/
static getSaveHandlers(url) {
return IORouterRegistry.getHandlers(url, 'save');
}
/**
* Look up IOHandler for loading, given a URL-like string.
*
* @param url
* @param loadOptions Optional, custom load options.
* @returns All valid handlers for `url`, given the currently registered
* handler routers.
*/
static getLoadHandlers(url, loadOptions) {
return IORouterRegistry.getHandlers(url, 'load', loadOptions);
}
static getHandlers(url, handlerType, loadOptions) {
const validHandlers = [];
const routers = handlerType === 'load' ?
IORouterRegistry.getInstance().loadRouters :
IORouterRegistry.getInstance().saveRouters;
routers.forEach(router => {
const handler = router(url, loadOptions);
if (handler !== null) {
validHandlers.push(handler);
}
});
return validHandlers;
}
}
const registerSaveRouter = (loudRouter) => IORouterRegistry.registerSaveRouter(loudRouter);
const registerLoadRouter = (loudRouter) => IORouterRegistry.registerLoadRouter(loudRouter);
const getSaveHandlers = (url) => IORouterRegistry.getSaveHandlers(url);
const getLoadHandlers = (url, loadOptions) => IORouterRegistry.getLoadHandlers(url, loadOptions);
//# sourceMappingURL=router_registry.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/indexed_db.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const DATABASE_NAME = 'tensorflowjs';
const DATABASE_VERSION = 1;
// Model data and ModelArtifactsInfo (metadata) are stored in two separate
// stores for efficient access of the list of stored models and their metadata.
// 1. The object store for model data: topology, weights and weight manifests.
const MODEL_STORE_NAME = 'models_store';
// 2. The object store for ModelArtifactsInfo, including meta-information such
// as the type of topology (JSON vs binary), byte size of the topology, byte
// size of the weights, etc.
const INFO_STORE_NAME = 'model_info_store';
/**
* Delete the entire database for tensorflow.js, including the models store.
*/
async function deleteDatabase() {
const idbFactory = getIndexedDBFactory();
return new Promise((resolve, reject) => {
const deleteRequest = idbFactory.deleteDatabase(DATABASE_NAME);
deleteRequest.onsuccess = () => resolve();
deleteRequest.onerror = error => reject(error);
});
}
function getIndexedDBFactory() {
if (!Object(environment["c" /* env */])().getBool('IS_BROWSER')) {
// TODO(cais): Add more info about what IOHandler subtypes are available.
// Maybe point to a doc page on the web and/or automatically determine
// the available IOHandlers and print them in the error message.
throw new Error('Failed to obtain IndexedDB factory because the current environment' +
'is not a web browser.');
}
// tslint:disable-next-line:no-any
const theWindow = typeof window === 'undefined' ? self : window;
const factory = theWindow.indexedDB || theWindow.mozIndexedDB ||
theWindow.webkitIndexedDB || theWindow.msIndexedDB ||
theWindow.shimIndexedDB;
if (factory == null) {
throw new Error('The current browser does not appear to support IndexedDB.');
}
return factory;
}
function setUpDatabase(openRequest) {
const db = openRequest.result;
db.createObjectStore(MODEL_STORE_NAME, { keyPath: 'modelPath' });
db.createObjectStore(INFO_STORE_NAME, { keyPath: 'modelPath' });
}
/**
* IOHandler subclass: Browser IndexedDB.
*
* See the doc string of `browserIndexedDB` for more details.
*/
class indexed_db_BrowserIndexedDB {
constructor(modelPath) {
this.indexedDB = getIndexedDBFactory();
if (modelPath == null || !modelPath) {
throw new Error('For IndexedDB, modelPath must not be null, undefined or empty.');
}
this.modelPath = modelPath;
}
async save(modelArtifacts) {
// TODO(cais): Support saving GraphDef models.
if (modelArtifacts.modelTopology instanceof ArrayBuffer) {
throw new Error('BrowserLocalStorage.save() does not support saving model topology ' +
'in binary formats yet.');
}
return this.databaseAction(this.modelPath, modelArtifacts);
}
async load() {
return this.databaseAction(this.modelPath);
}
/**
* Perform database action to put model artifacts into or read model artifacts
* from IndexedDB object store.
*
* Whether the action is put or get depends on whether `modelArtifacts` is
* specified. If it is specified, the action will be put; otherwise the action
* will be get.
*
* @param modelPath A unique string path for the model.
* @param modelArtifacts If specified, it will be the model artifacts to be
* stored in IndexedDB.
* @returns A `Promise` of `SaveResult`, if the action is put, or a `Promise`
* of `ModelArtifacts`, if the action is get.
*/
databaseAction(modelPath, modelArtifacts) {
return new Promise((resolve, reject) => {
const openRequest = this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION);
openRequest.onupgradeneeded = () => setUpDatabase(openRequest);
openRequest.onsuccess = () => {
const db = openRequest.result;
if (modelArtifacts == null) {
// Read model out from object store.
const modelTx = db.transaction(MODEL_STORE_NAME, 'readonly');
const modelStore = modelTx.objectStore(MODEL_STORE_NAME);
const getRequest = modelStore.get(this.modelPath);
getRequest.onsuccess = () => {
if (getRequest.result == null) {
db.close();
return reject(new Error(`Cannot find model with path '${this.modelPath}' ` +
`in IndexedDB.`));
}
else {
resolve(getRequest.result.modelArtifacts);
}
};
getRequest.onerror = error => {
db.close();
return reject(getRequest.error);
};
modelTx.oncomplete = () => db.close();
}
else {
// Put model into object store.
const modelArtifactsInfo = Object(io_utils["g" /* getModelArtifactsInfoForJSON */])(modelArtifacts);
// First, put ModelArtifactsInfo into info store.
const infoTx = db.transaction(INFO_STORE_NAME, 'readwrite');
let infoStore = infoTx.objectStore(INFO_STORE_NAME);
const putInfoRequest = infoStore.put({ modelPath: this.modelPath, modelArtifactsInfo });
let modelTx;
putInfoRequest.onsuccess = () => {
// Second, put model data into model store.
modelTx = db.transaction(MODEL_STORE_NAME, 'readwrite');
const modelStore = modelTx.objectStore(MODEL_STORE_NAME);
const putModelRequest = modelStore.put({
modelPath: this.modelPath,
modelArtifacts,
modelArtifactsInfo
});
putModelRequest.onsuccess = () => resolve({ modelArtifactsInfo });
putModelRequest.onerror = error => {
// If the put-model request fails, roll back the info entry as
// well.
infoStore = infoTx.objectStore(INFO_STORE_NAME);
const deleteInfoRequest = infoStore.delete(this.modelPath);
deleteInfoRequest.onsuccess = () => {
db.close();
return reject(putModelRequest.error);
};
deleteInfoRequest.onerror = error => {
db.close();
return reject(putModelRequest.error);
};
};
};
putInfoRequest.onerror = error => {
db.close();
return reject(putInfoRequest.error);
};
infoTx.oncomplete = () => {
if (modelTx == null) {
db.close();
}
else {
modelTx.oncomplete = () => db.close();
}
};
}
};
openRequest.onerror = error => reject(openRequest.error);
});
}
}
indexed_db_BrowserIndexedDB.URL_SCHEME = 'indexeddb://';
const indexedDBRouter = (url) => {
if (!Object(environment["c" /* env */])().getBool('IS_BROWSER')) {
return null;
}
else {
if (!Array.isArray(url) && url.startsWith(indexed_db_BrowserIndexedDB.URL_SCHEME)) {
return browserIndexedDB(url.slice(indexed_db_BrowserIndexedDB.URL_SCHEME.length));
}
else {
return null;
}
}
};
IORouterRegistry.registerSaveRouter(indexedDBRouter);
IORouterRegistry.registerLoadRouter(indexedDBRouter);
/**
* Creates a browser IndexedDB IOHandler for saving and loading models.
*
* ```js
* const model = tf.sequential();
* model.add(
* tf.layers.dense({units: 1, inputShape: [100], activation: 'sigmoid'}));
*
* const saveResult = await model.save('indexeddb://MyModel'));
* console.log(saveResult);
* ```
*
* @param modelPath A unique identifier for the model to be saved. Must be a
* non-empty string.
* @returns An instance of `BrowserIndexedDB` (sublcass of `IOHandler`),
* which can be used with, e.g., `tf.Model.save`.
*/
function browserIndexedDB(modelPath) {
return new indexed_db_BrowserIndexedDB(modelPath);
}
function maybeStripScheme(key) {
return key.startsWith(indexed_db_BrowserIndexedDB.URL_SCHEME) ?
key.slice(indexed_db_BrowserIndexedDB.URL_SCHEME.length) :
key;
}
class BrowserIndexedDBManager {
constructor() {
this.indexedDB = getIndexedDBFactory();
}
async listModels() {
return new Promise((resolve, reject) => {
const openRequest = this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION);
openRequest.onupgradeneeded = () => setUpDatabase(openRequest);
openRequest.onsuccess = () => {
const db = openRequest.result;
const tx = db.transaction(INFO_STORE_NAME, 'readonly');
const store = tx.objectStore(INFO_STORE_NAME);
// tslint:disable:max-line-length
// Need to cast `store` as `any` here because TypeScript's DOM
// library does not have the `getAll()` method even though the
// method is supported in the latest version of most mainstream
// browsers:
// https://developer.mozilla.org/en-US/docs/Web/API/IDBObjectStore/getAll
// tslint:enable:max-line-length
// tslint:disable-next-line:no-any
const getAllInfoRequest = store.getAll();
getAllInfoRequest.onsuccess = () => {
const out = {};
for (const item of getAllInfoRequest.result) {
out[item.modelPath] = item.modelArtifactsInfo;
}
resolve(out);
};
getAllInfoRequest.onerror = error => {
db.close();
return reject(getAllInfoRequest.error);
};
tx.oncomplete = () => db.close();
};
openRequest.onerror = error => reject(openRequest.error);
});
}
async removeModel(path) {
path = maybeStripScheme(path);
return new Promise((resolve, reject) => {
const openRequest = this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION);
openRequest.onupgradeneeded = () => setUpDatabase(openRequest);
openRequest.onsuccess = () => {
const db = openRequest.result;
const infoTx = db.transaction(INFO_STORE_NAME, 'readwrite');
const infoStore = infoTx.objectStore(INFO_STORE_NAME);
const getInfoRequest = infoStore.get(path);
let modelTx;
getInfoRequest.onsuccess = () => {
if (getInfoRequest.result == null) {
db.close();
return reject(new Error(`Cannot find model with path '${path}' ` +
`in IndexedDB.`));
}
else {
// First, delete the entry in the info store.
const deleteInfoRequest = infoStore.delete(path);
const deleteModelData = () => {
// Second, delete the entry in the model store.
modelTx = db.transaction(MODEL_STORE_NAME, 'readwrite');
const modelStore = modelTx.objectStore(MODEL_STORE_NAME);
const deleteModelRequest = modelStore.delete(path);
deleteModelRequest.onsuccess = () => resolve(getInfoRequest.result.modelArtifactsInfo);
deleteModelRequest.onerror = error => reject(getInfoRequest.error);
};
// Proceed with deleting model data regardless of whether deletion
// of info data succeeds or not.
deleteInfoRequest.onsuccess = deleteModelData;
deleteInfoRequest.onerror = error => {
deleteModelData();
db.close();
return reject(getInfoRequest.error);
};
}
};
getInfoRequest.onerror = error => {
db.close();
return reject(getInfoRequest.error);
};
infoTx.oncomplete = () => {
if (modelTx == null) {
db.close();
}
else {
modelTx.oncomplete = () => db.close();
}
};
};
openRequest.onerror = error => reject(openRequest.error);
});
}
}
//# sourceMappingURL=indexed_db.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/util_base.js
var util_base = __webpack_require__(8);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/local_storage.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const PATH_SEPARATOR = '/';
const PATH_PREFIX = 'tensorflowjs_models';
const INFO_SUFFIX = 'info';
const MODEL_TOPOLOGY_SUFFIX = 'model_topology';
const WEIGHT_SPECS_SUFFIX = 'weight_specs';
const WEIGHT_DATA_SUFFIX = 'weight_data';
const MODEL_METADATA_SUFFIX = 'model_metadata';
/**
* Purge all tensorflow.js-saved model artifacts from local storage.
*
* @returns Paths of the models purged.
*/
function purgeLocalStorageArtifacts() {
if (!Object(environment["c" /* env */])().getBool('IS_BROWSER') || typeof window === 'undefined' ||
typeof window.localStorage === 'undefined') {
throw new Error('purgeLocalStorageModels() cannot proceed because local storage is ' +
'unavailable in the current environment.');
}
const LS = window.localStorage;
const purgedModelPaths = [];
for (let i = 0; i < LS.length; ++i) {
const key = LS.key(i);
const prefix = PATH_PREFIX + PATH_SEPARATOR;
if (key.startsWith(prefix) && key.length > prefix.length) {
LS.removeItem(key);
const modelName = getModelPathFromKey(key);
if (purgedModelPaths.indexOf(modelName) === -1) {
purgedModelPaths.push(modelName);
}
}
}
return purgedModelPaths;
}
function getModelKeys(path) {
return {
info: [PATH_PREFIX, path, INFO_SUFFIX].join(PATH_SEPARATOR),
topology: [PATH_PREFIX, path, MODEL_TOPOLOGY_SUFFIX].join(PATH_SEPARATOR),
weightSpecs: [PATH_PREFIX, path, WEIGHT_SPECS_SUFFIX].join(PATH_SEPARATOR),
weightData: [PATH_PREFIX, path, WEIGHT_DATA_SUFFIX].join(PATH_SEPARATOR),
modelMetadata: [PATH_PREFIX, path, MODEL_METADATA_SUFFIX].join(PATH_SEPARATOR)
};
}
/**
* Get model path from a local-storage key.
*
* E.g., 'tensorflowjs_models/my/model/1/info' --> 'my/model/1'
*
* @param key
*/
function getModelPathFromKey(key) {
const items = key.split(PATH_SEPARATOR);
if (items.length < 3) {
throw new Error(`Invalid key format: ${key}`);
}
return items.slice(1, items.length - 1).join(PATH_SEPARATOR);
}
function local_storage_maybeStripScheme(key) {
return key.startsWith(local_storage_BrowserLocalStorage.URL_SCHEME) ?
key.slice(local_storage_BrowserLocalStorage.URL_SCHEME.length) :
key;
}
/**
* IOHandler subclass: Browser Local Storage.
*
* See the doc string to `browserLocalStorage` for more details.
*/
class local_storage_BrowserLocalStorage {
constructor(modelPath) {
if (!Object(environment["c" /* env */])().getBool('IS_BROWSER') || typeof window === 'undefined' ||
typeof window.localStorage === 'undefined') {
// TODO(cais): Add more info about what IOHandler subtypes are
// available.
// Maybe point to a doc page on the web and/or automatically determine
// the available IOHandlers and print them in the error message.
throw new Error('The current environment does not support local storage.');
}
this.LS = window.localStorage;
if (modelPath == null || !modelPath) {
throw new Error('For local storage, modelPath must not be null, undefined or empty.');
}
this.modelPath = modelPath;
this.keys = getModelKeys(this.modelPath);
}
/**
* Save model artifacts to browser local storage.
*
* See the documentation to `browserLocalStorage` for details on the saved
* artifacts.
*
* @param modelArtifacts The model artifacts to be stored.
* @returns An instance of SaveResult.
*/
async save(modelArtifacts) {
if (modelArtifacts.modelTopology instanceof ArrayBuffer) {
throw new Error('BrowserLocalStorage.save() does not support saving model topology ' +
'in binary formats yet.');
}
else {
const topology = JSON.stringify(modelArtifacts.modelTopology);
const weightSpecs = JSON.stringify(modelArtifacts.weightSpecs);
const modelArtifactsInfo = Object(io_utils["g" /* getModelArtifactsInfoForJSON */])(modelArtifacts);
try {
this.LS.setItem(this.keys.info, JSON.stringify(modelArtifactsInfo));
this.LS.setItem(this.keys.topology, topology);
this.LS.setItem(this.keys.weightSpecs, weightSpecs);
this.LS.setItem(this.keys.weightData, Object(io_utils["a" /* arrayBufferToBase64String */])(modelArtifacts.weightData));
const result = {
format: modelArtifacts.format,
generatedBy: modelArtifacts.generatedBy,
convertedBy: modelArtifacts.convertedBy
};
if (modelArtifacts.signature != null) {
result.signature = modelArtifacts.signature;
}
if (modelArtifacts.userDefinedMetadata != null) {
result.userDefinedMetadata = modelArtifacts.userDefinedMetadata;
}
if (modelArtifacts.modelInitializer != null) {
result.modelInitializer = modelArtifacts.modelInitializer;
}
this.LS.setItem(this.keys.modelMetadata, JSON.stringify(result));
return { modelArtifactsInfo };
}
catch (err) {
// If saving failed, clean up all items saved so far.
this.LS.removeItem(this.keys.info);
this.LS.removeItem(this.keys.topology);
this.LS.removeItem(this.keys.weightSpecs);
this.LS.removeItem(this.keys.weightData);
this.LS.removeItem(this.keys.modelMetadata);
throw new Error(`Failed to save model '${this.modelPath}' to local storage: ` +
`size quota being exceeded is a possible cause of this failure: ` +
`modelTopologyBytes=${modelArtifactsInfo.modelTopologyBytes}, ` +
`weightSpecsBytes=${modelArtifactsInfo.weightSpecsBytes}, ` +
`weightDataBytes=${modelArtifactsInfo.weightDataBytes}.`);
}
}
}
/**
* Load a model from local storage.
*
* See the documentation to `browserLocalStorage` for details on the saved
* artifacts.
*
* @returns The loaded model (if loading succeeds).
*/
async load() {
const info = JSON.parse(this.LS.getItem(this.keys.info));
if (info == null) {
throw new Error(`In local storage, there is no model with name '${this.modelPath}'`);
}
if (info.modelTopologyType !== 'JSON') {
throw new Error('BrowserLocalStorage does not support loading non-JSON model ' +
'topology yet.');
}
const out = {};
// Load topology.
const topology = JSON.parse(this.LS.getItem(this.keys.topology));
if (topology == null) {
throw new Error(`In local storage, the topology of model '${this.modelPath}' ` +
`is missing.`);
}
out.modelTopology = topology;
// Load weight specs.
const weightSpecs = JSON.parse(this.LS.getItem(this.keys.weightSpecs));
if (weightSpecs == null) {
throw new Error(`In local storage, the weight specs of model '${this.modelPath}' ` +
`are missing.`);
}
out.weightSpecs = weightSpecs;
// Load meta-data fields.
const metadataString = this.LS.getItem(this.keys.modelMetadata);
if (metadataString != null) {
const metadata = JSON.parse(metadataString);
out.format = metadata['format'];
out.generatedBy = metadata['generatedBy'];
out.convertedBy = metadata['convertedBy'];
if (metadata['signature'] != null) {
out.signature = metadata['signature'];
}
if (metadata['userDefinedMetadata'] != null) {
out.userDefinedMetadata = metadata['userDefinedMetadata'];
}
if (metadata['modelInitializer'] != null) {
out.modelInitializer = metadata['modelInitializer'];
}
}
// Load weight data.
const weightDataBase64 = this.LS.getItem(this.keys.weightData);
if (weightDataBase64 == null) {
throw new Error(`In local storage, the binary weight values of model ` +
`'${this.modelPath}' are missing.`);
}
out.weightData = Object(io_utils["b" /* base64StringToArrayBuffer */])(weightDataBase64);
return out;
}
}
local_storage_BrowserLocalStorage.URL_SCHEME = 'localstorage://';
const localStorageRouter = (url) => {
if (!Object(environment["c" /* env */])().getBool('IS_BROWSER')) {
return null;
}
else {
if (!Array.isArray(url) && url.startsWith(local_storage_BrowserLocalStorage.URL_SCHEME)) {
return browserLocalStorage(url.slice(local_storage_BrowserLocalStorage.URL_SCHEME.length));
}
else {
return null;
}
}
};
IORouterRegistry.registerSaveRouter(localStorageRouter);
IORouterRegistry.registerLoadRouter(localStorageRouter);
/**
* Factory function for local storage IOHandler.
*
* This `IOHandler` supports both `save` and `load`.
*
* For each model's saved artifacts, four items are saved to local storage.
* - `${PATH_SEPARATOR}/${modelPath}/info`: Contains meta-info about the
* model, such as date saved, type of the topology, size in bytes, etc.
* - `${PATH_SEPARATOR}/${modelPath}/topology`: Model topology. For Keras-
* style models, this is a stringized JSON.
* - `${PATH_SEPARATOR}/${modelPath}/weight_specs`: Weight specs of the
* model, can be used to decode the saved binary weight values (see
* item below).
* - `${PATH_SEPARATOR}/${modelPath}/weight_data`: Concatenated binary
* weight values, stored as a base64-encoded string.
*
* Saving may throw an `Error` if the total size of the artifacts exceed the
* browser-specific quota.
*
* @param modelPath A unique identifier for the model to be saved. Must be a
* non-empty string.
* @returns An instance of `IOHandler`, which can be used with, e.g.,
* `tf.Model.save`.
*/
function browserLocalStorage(modelPath) {
return new local_storage_BrowserLocalStorage(modelPath);
}
class local_storage_BrowserLocalStorageManager {
constructor() {
Object(util_base["b" /* assert */])(Object(environment["c" /* env */])().getBool('IS_BROWSER'), () => 'Current environment is not a web browser');
Object(util_base["b" /* assert */])(typeof window === 'undefined' ||
typeof window.localStorage !== 'undefined', () => 'Current browser does not appear to support localStorage');
this.LS = window.localStorage;
}
async listModels() {
const out = {};
const prefix = PATH_PREFIX + PATH_SEPARATOR;
const suffix = PATH_SEPARATOR + INFO_SUFFIX;
for (let i = 0; i < this.LS.length; ++i) {
const key = this.LS.key(i);
if (key.startsWith(prefix) && key.endsWith(suffix)) {
const modelPath = getModelPathFromKey(key);
out[modelPath] = JSON.parse(this.LS.getItem(key));
}
}
return out;
}
async removeModel(path) {
path = local_storage_maybeStripScheme(path);
const keys = getModelKeys(path);
if (this.LS.getItem(keys.info) == null) {
throw new Error(`Cannot find model at path '${path}'`);
}
const info = JSON.parse(this.LS.getItem(keys.info));
this.LS.removeItem(keys.info);
this.LS.removeItem(keys.topology);
this.LS.removeItem(keys.weightSpecs);
this.LS.removeItem(keys.weightData);
return info;
}
}
//# sourceMappingURL=local_storage.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/model_management.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Classes and functions for model management across multiple storage mediums.
*
* Supported client actions:
* - Listing models on all registered storage mediums.
* - Remove model by URL from any registered storage mediums, by using URL
* string.
* - Moving or copying model from one path to another in the same medium or from
* one medium to another, by using URL strings.
*/
const URL_SCHEME_SUFFIX = '://';
class model_management_ModelStoreManagerRegistry {
constructor() {
this.managers = {};
}
static getInstance() {
if (model_management_ModelStoreManagerRegistry.instance == null) {
model_management_ModelStoreManagerRegistry.instance = new model_management_ModelStoreManagerRegistry();
}
return model_management_ModelStoreManagerRegistry.instance;
}
/**
* Register a save-handler router.
*
* @param saveRouter A function that maps a URL-like string onto an instance
* of `IOHandler` with the `save` method defined or `null`.
*/
static registerManager(scheme, manager) {
Object(util_base["b" /* assert */])(scheme != null, () => 'scheme must not be undefined or null.');
if (scheme.endsWith(URL_SCHEME_SUFFIX)) {
scheme = scheme.slice(0, scheme.indexOf(URL_SCHEME_SUFFIX));
}
Object(util_base["b" /* assert */])(scheme.length > 0, () => 'scheme must not be an empty string.');
const registry = model_management_ModelStoreManagerRegistry.getInstance();
Object(util_base["b" /* assert */])(registry.managers[scheme] == null, () => `A model store manager is already registered for scheme '${scheme}'.`);
registry.managers[scheme] = manager;
}
static getManager(scheme) {
const manager = this.getInstance().managers[scheme];
if (manager == null) {
throw new Error(`Cannot find model manager for scheme '${scheme}'`);
}
return manager;
}
static getSchemes() {
return Object.keys(this.getInstance().managers);
}
}
/**
* Helper method for parsing a URL string into a scheme and a path.
*
* @param url E.g., 'localstorage://my-model'
* @returns A dictionary with two fields: scheme and path.
* Scheme: e.g., 'localstorage' in the example above.
* Path: e.g., 'my-model' in the example above.
*/
function parseURL(url) {
if (url.indexOf(URL_SCHEME_SUFFIX) === -1) {
throw new Error(`The url string provided does not contain a scheme. ` +
`Supported schemes are: ` +
`${model_management_ModelStoreManagerRegistry.getSchemes().join(',')}`);
}
return {
scheme: url.split(URL_SCHEME_SUFFIX)[0],
path: url.split(URL_SCHEME_SUFFIX)[1],
};
}
async function cloneModelInternal(sourceURL, destURL, deleteSource = false) {
Object(util_base["b" /* assert */])(sourceURL !== destURL, () => `Old path and new path are the same: '${sourceURL}'`);
const loadHandlers = IORouterRegistry.getLoadHandlers(sourceURL);
Object(util_base["b" /* assert */])(loadHandlers.length > 0, () => `Copying failed because no load handler is found for source URL ${sourceURL}.`);
Object(util_base["b" /* assert */])(loadHandlers.length < 2, () => `Copying failed because more than one (${loadHandlers.length}) ` +
`load handlers for source URL ${sourceURL}.`);
const loadHandler = loadHandlers[0];
const saveHandlers = IORouterRegistry.getSaveHandlers(destURL);
Object(util_base["b" /* assert */])(saveHandlers.length > 0, () => `Copying failed because no save handler is found for destination ` +
`URL ${destURL}.`);
Object(util_base["b" /* assert */])(saveHandlers.length < 2, () => `Copying failed because more than one (${loadHandlers.length}) ` +
`save handlers for destination URL ${destURL}.`);
const saveHandler = saveHandlers[0];
const sourceScheme = parseURL(sourceURL).scheme;
const sourcePath = parseURL(sourceURL).path;
const sameMedium = sourceScheme === parseURL(sourceURL).scheme;
const modelArtifacts = await loadHandler.load();
// If moving within the same storage medium, remove the old model as soon as
// the loading is done. Without doing this, it is possible that the combined
// size of the two models will cause the cloning to fail.
if (deleteSource && sameMedium) {
await model_management_ModelStoreManagerRegistry.getManager(sourceScheme)
.removeModel(sourcePath);
}
const saveResult = await saveHandler.save(modelArtifacts);
// If moving between mediums, the deletion is done after the save succeeds.
// This guards against the case in which saving to the destination medium
// fails.
if (deleteSource && !sameMedium) {
await model_management_ModelStoreManagerRegistry.getManager(sourceScheme)
.removeModel(sourcePath);
}
return saveResult.modelArtifactsInfo;
}
/**
* List all models stored in registered storage mediums.
*
* For a web browser environment, the registered mediums are Local Storage and
* IndexedDB.
*
* ```js
* // First create and save a model.
* const model = tf.sequential();
* model.add(tf.layers.dense(
* {units: 1, inputShape: [10], activation: 'sigmoid'}));
* await model.save('localstorage://demo/management/model1');
*
* // Then list existing models.
* console.log(JSON.stringify(await tf.io.listModels()));
*
* // Delete the model.
* await tf.io.removeModel('localstorage://demo/management/model1');
*
* // List models again.
* console.log(JSON.stringify(await tf.io.listModels()));
* ```
*
* @returns A `Promise` of a dictionary mapping URLs of existing models to
* their model artifacts info. URLs include medium-specific schemes, e.g.,
* 'indexeddb://my/model/1'. Model artifacts info include type of the
* model's topology, byte sizes of the topology, weights, etc.
*
* @doc {
* heading: 'Models',
* subheading: 'Management',
* namespace: 'io',
* ignoreCI: true
* }
*/
async function listModels() {
const schemes = model_management_ModelStoreManagerRegistry.getSchemes();
const out = {};
for (const scheme of schemes) {
const schemeOut = await model_management_ModelStoreManagerRegistry.getManager(scheme).listModels();
for (const path in schemeOut) {
const url = scheme + URL_SCHEME_SUFFIX + path;
out[url] = schemeOut[path];
}
}
return out;
}
/**
* Remove a model specified by URL from a reigstered storage medium.
*
* ```js
* // First create and save a model.
* const model = tf.sequential();
* model.add(tf.layers.dense(
* {units: 1, inputShape: [10], activation: 'sigmoid'}));
* await model.save('localstorage://demo/management/model1');
*
* // Then list existing models.
* console.log(JSON.stringify(await tf.io.listModels()));
*
* // Delete the model.
* await tf.io.removeModel('localstorage://demo/management/model1');
*
* // List models again.
* console.log(JSON.stringify(await tf.io.listModels()));
* ```
*
* @param url A URL to a stored model, with a scheme prefix, e.g.,
* 'localstorage://my-model-1', 'indexeddb://my/model/2'.
* @returns ModelArtifactsInfo of the deleted model (if and only if deletion
* is successful).
* @throws Error if deletion fails, e.g., if no model exists at `path`.
*
* @doc {
* heading: 'Models',
* subheading: 'Management',
* namespace: 'io',
* ignoreCI: true
* }
*/
async function removeModel(url) {
const schemeAndPath = parseURL(url);
const manager = model_management_ModelStoreManagerRegistry.getManager(schemeAndPath.scheme);
return manager.removeModel(schemeAndPath.path);
}
/**
* Copy a model from one URL to another.
*
* This function supports:
*
* 1. Copying within a storage medium, e.g.,
* `tf.io.copyModel('localstorage://model-1', 'localstorage://model-2')`
* 2. Copying between two storage mediums, e.g.,
* `tf.io.copyModel('localstorage://model-1', 'indexeddb://model-1')`
*
* ```js
* // First create and save a model.
* const model = tf.sequential();
* model.add(tf.layers.dense(
* {units: 1, inputShape: [10], activation: 'sigmoid'}));
* await model.save('localstorage://demo/management/model1');
*
* // Then list existing models.
* console.log(JSON.stringify(await tf.io.listModels()));
*
* // Copy the model, from Local Storage to IndexedDB.
* await tf.io.copyModel(
* 'localstorage://demo/management/model1',
* 'indexeddb://demo/management/model1');
*
* // List models again.
* console.log(JSON.stringify(await tf.io.listModels()));
*
* // Remove both models.
* await tf.io.removeModel('localstorage://demo/management/model1');
* await tf.io.removeModel('indexeddb://demo/management/model1');
* ```
*
* @param sourceURL Source URL of copying.
* @param destURL Destination URL of copying.
* @returns ModelArtifactsInfo of the copied model (if and only if copying
* is successful).
* @throws Error if copying fails, e.g., if no model exists at `sourceURL`, or
* if `oldPath` and `newPath` are identical.
*
* @doc {
* heading: 'Models',
* subheading: 'Management',
* namespace: 'io',
* ignoreCI: true
* }
*/
async function copyModel(sourceURL, destURL) {
const deleteSource = false;
return cloneModelInternal(sourceURL, destURL, deleteSource);
}
/**
* Move a model from one URL to another.
*
* This function supports:
*
* 1. Moving within a storage medium, e.g.,
* `tf.io.moveModel('localstorage://model-1', 'localstorage://model-2')`
* 2. Moving between two storage mediums, e.g.,
* `tf.io.moveModel('localstorage://model-1', 'indexeddb://model-1')`
*
* ```js
* // First create and save a model.
* const model = tf.sequential();
* model.add(tf.layers.dense(
* {units: 1, inputShape: [10], activation: 'sigmoid'}));
* await model.save('localstorage://demo/management/model1');
*
* // Then list existing models.
* console.log(JSON.stringify(await tf.io.listModels()));
*
* // Move the model, from Local Storage to IndexedDB.
* await tf.io.moveModel(
* 'localstorage://demo/management/model1',
* 'indexeddb://demo/management/model1');
*
* // List models again.
* console.log(JSON.stringify(await tf.io.listModels()));
*
* // Remove the moved model.
* await tf.io.removeModel('indexeddb://demo/management/model1');
* ```
*
* @param sourceURL Source URL of moving.
* @param destURL Destination URL of moving.
* @returns ModelArtifactsInfo of the copied model (if and only if copying
* is successful).
* @throws Error if moving fails, e.g., if no model exists at `sourceURL`, or
* if `oldPath` and `newPath` are identical.
*
* @doc {
* heading: 'Models',
* subheading: 'Management',
* namespace: 'io',
* ignoreCI: true
* }
*/
async function moveModel(sourceURL, destURL) {
const deleteSource = true;
return cloneModelInternal(sourceURL, destURL, deleteSource);
}
//# sourceMappingURL=model_management.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/platforms/platform_browser.js
/**
* @license
* Copyright 2019 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
class PlatformBrowser {
fetch(path, init) {
return fetch(path, init);
}
now() {
return performance.now();
}
encode(text, encoding) {
if (encoding !== 'utf-8' && encoding !== 'utf8') {
throw new Error(`Browser's encoder only supports utf-8, but got ${encoding}`);
}
if (this.textEncoder == null) {
this.textEncoder = new TextEncoder();
}
return this.textEncoder.encode(text);
}
decode(bytes, encoding) {
return new TextDecoder(encoding).decode(bytes);
}
}
if (Object(environment["c" /* env */])().get('IS_BROWSER')) {
Object(environment["c" /* env */])().setPlatform('browser', new PlatformBrowser());
// Register LocalStorage IOHandler
try {
model_management_ModelStoreManagerRegistry.registerManager(local_storage_BrowserLocalStorage.URL_SCHEME, new local_storage_BrowserLocalStorageManager());
}
catch (err) {
}
// Register IndexedDB IOHandler
try {
model_management_ModelStoreManagerRegistry.registerManager(indexed_db_BrowserIndexedDB.URL_SCHEME, new BrowserIndexedDBManager());
}
catch (err) {
}
}
//# sourceMappingURL=platform_browser.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/platforms/platform_node.js
var platform_node = __webpack_require__(278);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/buffer.js
var buffer = __webpack_require__(47);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/cast.js
var cast = __webpack_require__(12);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/clone.js
var clone = __webpack_require__(70);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/print.js
var print = __webpack_require__(150);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/tensor.js + 1 modules
var dist_tensor = __webpack_require__(6);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/base_side_effects.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
// Required side effectful code for tfjs-core
// Set up Engine and ENV
Object(engine["b" /* getOrMakeEngine */])();
// Register backend-agnostic flags.
// Register platforms
// Set up OpHandler
const opHandler = {
buffer: buffer["a" /* buffer */],
cast: cast["a" /* cast */],
clone: clone["a" /* clone */],
print: print["a" /* print */]
};
Object(dist_tensor["f" /* setOpHandler */])(opHandler);
//# sourceMappingURL=base_side_effects.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/browser_files.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* IOHandlers related to files, such as browser-triggered file downloads,
* user-selected files in browser.
*/
const DEFAULT_FILE_NAME_PREFIX = 'model';
const DEFAULT_JSON_EXTENSION_NAME = '.json';
const DEFAULT_WEIGHT_DATA_EXTENSION_NAME = '.weights.bin';
function defer(f) {
return new Promise(resolve => setTimeout(resolve)).then(f);
}
class browser_files_BrowserDownloads {
constructor(fileNamePrefix) {
if (!Object(environment["c" /* env */])().getBool('IS_BROWSER')) {
// TODO(cais): Provide info on what IOHandlers are available under the
// current environment.
throw new Error('browserDownloads() cannot proceed because the current environment ' +
'is not a browser.');
}
if (fileNamePrefix.startsWith(browser_files_BrowserDownloads.URL_SCHEME)) {
fileNamePrefix = fileNamePrefix.slice(browser_files_BrowserDownloads.URL_SCHEME.length);
}
if (fileNamePrefix == null || fileNamePrefix.length === 0) {
fileNamePrefix = DEFAULT_FILE_NAME_PREFIX;
}
this.modelTopologyFileName = fileNamePrefix + DEFAULT_JSON_EXTENSION_NAME;
this.weightDataFileName =
fileNamePrefix + DEFAULT_WEIGHT_DATA_EXTENSION_NAME;
}
async save(modelArtifacts) {
if (typeof (document) === 'undefined') {
throw new Error('Browser downloads are not supported in ' +
'this environment since `document` is not present');
}
const weightsURL = window.URL.createObjectURL(new Blob([modelArtifacts.weightData], { type: 'application/octet-stream' }));
if (modelArtifacts.modelTopology instanceof ArrayBuffer) {
throw new Error('BrowserDownloads.save() does not support saving model topology ' +
'in binary formats yet.');
}
else {
const weightsManifest = [{
paths: ['./' + this.weightDataFileName],
weights: modelArtifacts.weightSpecs
}];
const modelTopologyAndWeightManifest = {
modelTopology: modelArtifacts.modelTopology,
format: modelArtifacts.format,
generatedBy: modelArtifacts.generatedBy,
convertedBy: modelArtifacts.convertedBy,
weightsManifest
};
if (modelArtifacts.signature != null) {
modelTopologyAndWeightManifest.signature = modelArtifacts.signature;
}
if (modelArtifacts.userDefinedMetadata != null) {
modelTopologyAndWeightManifest.userDefinedMetadata =
modelArtifacts.userDefinedMetadata;
}
if (modelArtifacts.modelInitializer != null) {
modelTopologyAndWeightManifest.modelInitializer =
modelArtifacts.modelInitializer;
}
const modelTopologyAndWeightManifestURL = window.URL.createObjectURL(new Blob([JSON.stringify(modelTopologyAndWeightManifest)], { type: 'application/json' }));
// If anchor elements are not provided, create them without attaching them
// to parents, so that the downloaded file names can be controlled.
const jsonAnchor = this.jsonAnchor == null ? document.createElement('a') :
this.jsonAnchor;
jsonAnchor.download = this.modelTopologyFileName;
jsonAnchor.href = modelTopologyAndWeightManifestURL;
// Trigger downloads by evoking a click event on the download anchors.
// When multiple downloads are started synchronously, Firefox will only
// save the last one.
await defer(() => jsonAnchor.dispatchEvent(new MouseEvent('click')));
if (modelArtifacts.weightData != null) {
const weightDataAnchor = this.weightDataAnchor == null ?
document.createElement('a') :
this.weightDataAnchor;
weightDataAnchor.download = this.weightDataFileName;
weightDataAnchor.href = weightsURL;
await defer(() => weightDataAnchor.dispatchEvent(new MouseEvent('click')));
}
return { modelArtifactsInfo: Object(io_utils["g" /* getModelArtifactsInfoForJSON */])(modelArtifacts) };
}
}
}
browser_files_BrowserDownloads.URL_SCHEME = 'downloads://';
class browser_files_BrowserFiles {
constructor(files) {
if (files == null || files.length < 1) {
throw new Error(`When calling browserFiles, at least 1 file is required, ` +
`but received ${files}`);
}
this.files = files;
}
async load() {
const jsonFile = this.files[0];
const weightFiles = this.files.slice(1);
return new Promise((resolve, reject) => {
const jsonReader = new FileReader();
jsonReader.onload = (event) => {
// tslint:disable-next-line:no-any
const modelJSON = JSON.parse(event.target.result);
const modelTopology = modelJSON.modelTopology;
if (modelTopology == null) {
reject(new Error(`modelTopology field is missing from file ${jsonFile.name}`));
return;
}
if (weightFiles.length === 0) {
resolve({ modelTopology });
}
const weightsManifest = modelJSON.weightsManifest;
if (weightsManifest == null) {
reject(new Error(`weightManifest field is missing from file ${jsonFile.name}`));
return;
}
let pathToFile;
try {
pathToFile =
this.checkManifestAndWeightFiles(weightsManifest, weightFiles);
}
catch (err) {
reject(err);
return;
}
const weightSpecs = [];
const paths = [];
const perFileBuffers = [];
weightsManifest.forEach(weightsGroup => {
weightsGroup.paths.forEach(path => {
paths.push(path);
perFileBuffers.push(null);
});
weightSpecs.push(...weightsGroup.weights);
});
weightsManifest.forEach(weightsGroup => {
weightsGroup.paths.forEach(path => {
const weightFileReader = new FileReader();
weightFileReader.onload = (event) => {
// tslint:disable-next-line:no-any
const weightData = event.target.result;
const index = paths.indexOf(path);
perFileBuffers[index] = weightData;
if (perFileBuffers.indexOf(null) === -1) {
const result = {
modelTopology,
weightSpecs,
weightData: Object(io_utils["d" /* concatenateArrayBuffers */])(perFileBuffers),
format: modelJSON.format,
generatedBy: modelJSON.generatedBy,
convertedBy: modelJSON.convertedBy
};
if (modelJSON.signature != null) {
result.signature = modelJSON.signature;
}
if (modelJSON.userDefinedMetadata != null) {
result.userDefinedMetadata = modelJSON.userDefinedMetadata;
}
if (modelJSON.modelInitializer != null) {
result.modelInitializer = modelJSON.modelInitializer;
}
resolve(result);
}
};
weightFileReader.onerror = error => reject(`Failed to weights data from file of path '${path}'.`);
weightFileReader.readAsArrayBuffer(pathToFile[path]);
});
});
};
jsonReader.onerror = error => reject(`Failed to read model topology and weights manifest JSON ` +
`from file '${jsonFile.name}'. BrowserFiles supports loading ` +
`Keras-style tf.Model artifacts only.`);
jsonReader.readAsText(jsonFile);
});
}
/**
* Check the compatibility between weights manifest and weight files.
*/
checkManifestAndWeightFiles(manifest, files) {
const basenames = [];
const fileNames = files.map(file => Object(io_utils["c" /* basename */])(file.name));
const pathToFile = {};
for (const group of manifest) {
group.paths.forEach(path => {
const pathBasename = Object(io_utils["c" /* basename */])(path);
if (basenames.indexOf(pathBasename) !== -1) {
throw new Error(`Duplicate file basename found in weights manifest: ` +
`'${pathBasename}'`);
}
basenames.push(pathBasename);
if (fileNames.indexOf(pathBasename) === -1) {
throw new Error(`Weight file with basename '${pathBasename}' is not provided.`);
}
else {
pathToFile[path] = files[fileNames.indexOf(pathBasename)];
}
});
}
if (basenames.length !== files.length) {
throw new Error(`Mismatch in the number of files in weights manifest ` +
`(${basenames.length}) and the number of weight files provided ` +
`(${files.length}).`);
}
return pathToFile;
}
}
const browserDownloadsRouter = (url) => {
if (!Object(environment["c" /* env */])().getBool('IS_BROWSER')) {
return null;
}
else {
if (!Array.isArray(url) && url.startsWith(browser_files_BrowserDownloads.URL_SCHEME)) {
return browserDownloads(url.slice(browser_files_BrowserDownloads.URL_SCHEME.length));
}
else {
return null;
}
}
};
IORouterRegistry.registerSaveRouter(browserDownloadsRouter);
/**
* Creates an IOHandler that triggers file downloads from the browser.
*
* The returned `IOHandler` instance can be used as model exporting methods such
* as `tf.Model.save` and supports only saving.
*
* ```js
* const model = tf.sequential();
* model.add(tf.layers.dense(
* {units: 1, inputShape: [10], activation: 'sigmoid'}));
* const saveResult = await model.save('downloads://mymodel');
* // This will trigger downloading of two files:
* // 'mymodel.json' and 'mymodel.weights.bin'.
* console.log(saveResult);
* ```
*
* @param fileNamePrefix Prefix name of the files to be downloaded. For use with
* `tf.Model`, `fileNamePrefix` should follow either of the following two
* formats:
* 1. `null` or `undefined`, in which case the default file
* names will be used:
* - 'model.json' for the JSON file containing the model topology and
* weights manifest.
* - 'model.weights.bin' for the binary file containing the binary weight
* values.
* 2. A single string or an Array of a single string, as the file name prefix.
* For example, if `'foo'` is provided, the downloaded JSON
* file and binary weights file will be named 'foo.json' and
* 'foo.weights.bin', respectively.
* @param config Additional configuration for triggering downloads.
* @returns An instance of `BrowserDownloads` `IOHandler`.
*
* @doc {
* heading: 'Models',
* subheading: 'Loading',
* namespace: 'io',
* ignoreCI: true
* }
*/
function browserDownloads(fileNamePrefix = 'model') {
return new browser_files_BrowserDownloads(fileNamePrefix);
}
/**
* Creates an IOHandler that loads model artifacts from user-selected files.
*
* This method can be used for loading from files such as user-selected files
* in the browser.
* When used in conjunction with `tf.loadLayersModel`, an instance of
* `tf.LayersModel` (Keras-style) can be constructed from the loaded artifacts.
*
* ```js
* // Note: This code snippet won't run properly without the actual file input
* // elements in the HTML DOM.
*
* // Suppose there are two HTML file input (`<input type="file" ...>`)
* // elements.
* const uploadJSONInput = document.getElementById('upload-json');
* const uploadWeightsInput = document.getElementById('upload-weights');
* const model = await tf.loadLayersModel(tf.io.browserFiles(
* [uploadJSONInput.files[0], uploadWeightsInput.files[0]]));
* ```
*
* @param files `File`s to load from. Currently, this function supports only
* loading from files that contain Keras-style models (i.e., `tf.Model`s), for
* which an `Array` of `File`s is expected (in that order):
* - A JSON file containing the model topology and weight manifest.
* - Optionally, One or more binary files containing the binary weights.
* These files must have names that match the paths in the `weightsManifest`
* contained by the aforementioned JSON file, or errors will be thrown
* during loading. These weights files have the same format as the ones
* generated by `tensorflowjs_converter` that comes with the `tensorflowjs`
* Python PIP package. If no weights files are provided, only the model
* topology will be loaded from the JSON file above.
* @returns An instance of `Files` `IOHandler`.
*
* @doc {
* heading: 'Models',
* subheading: 'Loading',
* namespace: 'io',
* ignoreCI: true
* }
*/
function browserFiles(files) {
return new browser_files_BrowserFiles(files);
}
//# sourceMappingURL=browser_files.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/progress.js
/**
* @license
* Copyright 2019 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Monitor Promise.all progress, fire onProgress callback function.
*
* @param promises Promise list going to be monitored
* @param onProgress Callback function. Fired when a promise resolved.
* @param startFraction Optional fraction start. Default to 0.
* @param endFraction Optional fraction end. Default to 1.
*/
function monitorPromisesProgress(promises, onProgress, startFraction, endFraction) {
checkPromises(promises);
startFraction = startFraction == null ? 0 : startFraction;
endFraction = endFraction == null ? 1 : endFraction;
checkFraction(startFraction, endFraction);
let resolvedPromise = 0;
const registerMonitor = (promise) => {
promise.then(value => {
const fraction = startFraction +
++resolvedPromise / promises.length * (endFraction - startFraction);
// pass fraction as parameter to callback function.
onProgress(fraction);
return value;
});
return promise;
};
function checkPromises(promises) {
Object(util_base["b" /* assert */])(promises != null && Array.isArray(promises) && promises.length > 0, () => 'promises must be a none empty array');
}
function checkFraction(startFraction, endFraction) {
Object(util_base["b" /* assert */])(startFraction >= 0 && startFraction <= 1, () => `Progress fraction must be in range [0, 1], but ` +
`got startFraction ${startFraction}`);
Object(util_base["b" /* assert */])(endFraction >= 0 && endFraction <= 1, () => `Progress fraction must be in range [0, 1], but ` +
`got endFraction ${endFraction}`);
Object(util_base["b" /* assert */])(endFraction >= startFraction, () => `startFraction must be no more than endFraction, but ` +
`got startFraction ${startFraction} and endFraction ` +
`${endFraction}`);
}
return Promise.all(promises.map(registerMonitor));
}
//# sourceMappingURL=progress.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/types.js
var types = __webpack_require__(148);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/weights_loader.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Reads binary weights data from a number of URLs.
*
* @param fetchURLs URLs to send the HTTP requests at, using `fetch` calls.
* @param requestOptions RequestInit (options) for the HTTP requests.
* @param fetchFunc Optional overriding value for the `window.fetch` function.
* @param onProgress Optional, progress callback function, fired periodically
* before the load is completed.
* @returns A `Promise` of an Array of `ArrayBuffer`. The Array has the same
* length as `fetchURLs`.
*/
async function loadWeightsAsArrayBuffer(fetchURLs, loadOptions) {
if (loadOptions == null) {
loadOptions = {};
}
const fetchFunc = loadOptions.fetchFunc == null ? Object(environment["c" /* env */])().platform.fetch :
loadOptions.fetchFunc;
// Create the requests for all of the weights in parallel.
const requests = fetchURLs.map(fetchURL => fetchFunc(fetchURL, loadOptions.requestInit, { isBinary: true }));
const fetchStartFraction = 0;
const fetchEndFraction = 0.5;
const responses = loadOptions.onProgress == null ?
await Promise.all(requests) :
await monitorPromisesProgress(requests, loadOptions.onProgress, fetchStartFraction, fetchEndFraction);
const bufferPromises = responses.map(response => response.arrayBuffer());
const bufferStartFraction = 0.5;
const bufferEndFraction = 1;
const buffers = loadOptions.onProgress == null ?
await Promise.all(bufferPromises) :
await monitorPromisesProgress(bufferPromises, loadOptions.onProgress, bufferStartFraction, bufferEndFraction);
return buffers;
}
/**
* Reads a weights manifest JSON configuration, fetches the weights and
* returns them as `Tensor`s.
*
* @param manifest The weights manifest JSON.
* @param filePathPrefix The path prefix for filenames given in the manifest.
* Defaults to the empty string.
* @param weightNames The names of the weights to be fetched.
*/
async function loadWeights(manifest, filePathPrefix = '', weightNames, requestInit) {
// TODO(nsthorat): Groups are currently fetched atomically. If you need a
// single weight from a group, the whole group will be fetched. At a future
// date, we should support fetching only the individual shards within a
// group that are needed to reconstruct the requested weight.
// TODO(cais): Use `decodeWeights` for implementation.
const fetchWeights = (fetchUrls) => loadWeightsAsArrayBuffer(fetchUrls, { requestInit });
const loadWeights = weightsLoaderFactory(fetchWeights);
return loadWeights(manifest, filePathPrefix, weightNames);
}
/**
* Creates a function, which reads a weights manifest JSON configuration,
* fetches the weight files using the specified function and returns them as
* `Tensor`s.
*
* ```js
* // example for creating a nodejs weight loader, which reads the weight files
* // from disk using fs.readFileSync
*
* import * as fs from 'fs'
*
* const fetchWeightsFromDisk = (filePaths: string[]) =>
* filePaths.map(filePath => fs.readFileSync(filePath).buffer)
*
* const loadWeights = tf.io.weightsLoaderFactory(fetchWeightsFromDisk)
*
* const manifest = JSON.parse(
* fs.readFileSync('./my_model-weights_manifest').toString()
* )
* const weightMap = await loadWeights(manifest, './')
* ```
* @param fetchWeightsFunction The function used for fetching the weight files.
* @returns Weight loading function.
*/
function weightsLoaderFactory(fetchWeightsFunction) {
return async (manifest, filePathPrefix = '', weightNames) => {
// Collect all the groups, weights, and their relative offsets to be
// fetched.
const groupIndicesToFetchMap = manifest.map(() => false);
const groupWeightsToFetch = {};
const weightsFound = weightNames != null ? weightNames.map(() => false) : [];
const allManifestWeightNames = [];
manifest.forEach((manifestGroupConfig, groupIndex) => {
let groupOffset = 0;
manifestGroupConfig.weights.forEach(weightsEntry => {
const rawDtype = ('quantization' in weightsEntry) ?
weightsEntry.quantization.dtype :
weightsEntry.dtype;
const weightsBytes = types["a" /* DTYPE_VALUE_SIZE_MAP */][rawDtype] *
util_base["O" /* sizeFromShape */](weightsEntry.shape);
const enqueueWeightsForFetchingFn = () => {
groupIndicesToFetchMap[groupIndex] = true;
if (groupWeightsToFetch[groupIndex] == null) {
groupWeightsToFetch[groupIndex] = [];
}
groupWeightsToFetch[groupIndex].push({
manifestEntry: weightsEntry,
groupOffset,
sizeBytes: weightsBytes
});
};
if (weightNames != null) {
weightNames.forEach((weightName, weightIndex) => {
if (weightName === weightsEntry.name) {
enqueueWeightsForFetchingFn();
weightsFound[weightIndex] = true;
}
});
}
else {
enqueueWeightsForFetchingFn();
}
allManifestWeightNames.push(weightsEntry.name);
groupOffset += weightsBytes;
});
});
if (!weightsFound.every(found => found)) {
const weightsNotFound = weightNames.filter((_, i) => !weightsFound[i]);
throw new Error(`Could not find weights in manifest with names: ` +
`${weightsNotFound.join(', ')}. \n` +
`Manifest JSON has weights with names: ` +
`${allManifestWeightNames.join(', ')}.`);
}
// Convert the one-hot boolean groupId => shouldFetch map to a list of group
// IDs.
const groupIndicesToFetch = groupIndicesToFetchMap.reduce((accumulator, shouldFetch, i) => {
if (shouldFetch) {
accumulator.push(i);
}
return accumulator;
}, []);
const fetchUrls = [];
groupIndicesToFetch.forEach(i => {
manifest[i].paths.forEach(filepath => {
const fetchUrl = filePathPrefix +
(!filePathPrefix.endsWith('/') ? '/' : '') + filepath;
fetchUrls.push(fetchUrl);
});
});
const buffers = await fetchWeightsFunction(fetchUrls);
const weightsTensorMap = {};
let bufferIndexOffset = 0;
groupIndicesToFetch.forEach(i => {
const numBuffers = manifest[i].paths.length;
let groupBytes = 0;
for (let i = 0; i < numBuffers; i++) {
groupBytes += buffers[bufferIndexOffset + i].byteLength;
}
// Create a buffer for the whole group.
const groupBuffer = new ArrayBuffer(groupBytes);
const groupByteBuffer = new Uint8Array(groupBuffer);
let groupBufferOffset = 0;
for (let i = 0; i < numBuffers; i++) {
const buffer = new Uint8Array(buffers[bufferIndexOffset + i]);
groupByteBuffer.set(buffer, groupBufferOffset);
groupBufferOffset += buffer.byteLength;
}
const weightsEntries = groupWeightsToFetch[i];
weightsEntries.forEach(weightsEntry => {
const byteBuffer = groupBuffer.slice(weightsEntry.groupOffset, weightsEntry.groupOffset + weightsEntry.sizeBytes);
const nameToTensorMap = Object(io_utils["e" /* decodeWeights */])(byteBuffer, [weightsEntry.manifestEntry]);
for (const name in nameToTensorMap) {
weightsTensorMap[name] = nameToTensorMap[name];
}
});
bufferIndexOffset += numBuffers;
});
return weightsTensorMap;
};
}
//# sourceMappingURL=weights_loader.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/http.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* IOHandler implementations based on HTTP requests in the web browser.
*
* Uses [`fetch`](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API).
*/
const OCTET_STREAM_MIME_TYPE = 'application/octet-stream';
const JSON_TYPE = 'application/json';
class http_HTTPRequest {
constructor(path, loadOptions) {
this.DEFAULT_METHOD = 'POST';
if (loadOptions == null) {
loadOptions = {};
}
this.weightPathPrefix = loadOptions.weightPathPrefix;
this.onProgress = loadOptions.onProgress;
this.weightUrlConverter = loadOptions.weightUrlConverter;
if (loadOptions.fetchFunc != null) {
Object(util_base["b" /* assert */])(typeof loadOptions.fetchFunc === 'function', () => 'Must pass a function that matches the signature of ' +
'`fetch` (see ' +
'https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API)');
this.fetch = loadOptions.fetchFunc;
}
else {
this.fetch = Object(environment["c" /* env */])().platform.fetch;
}
Object(util_base["b" /* assert */])(path != null && path.length > 0, () => 'URL path for http must not be null, undefined or ' +
'empty.');
if (Array.isArray(path)) {
Object(util_base["b" /* assert */])(path.length === 2, () => 'URL paths for http must have a length of 2, ' +
`(actual length is ${path.length}).`);
}
this.path = path;
if (loadOptions.requestInit != null &&
loadOptions.requestInit.body != null) {
throw new Error('requestInit is expected to have no pre-existing body, but has one.');
}
this.requestInit = loadOptions.requestInit || {};
}
async save(modelArtifacts) {
if (modelArtifacts.modelTopology instanceof ArrayBuffer) {
throw new Error('BrowserHTTPRequest.save() does not support saving model topology ' +
'in binary formats yet.');
}
const init = Object.assign({ method: this.DEFAULT_METHOD }, this.requestInit);
init.body = new FormData();
const weightsManifest = [{
paths: ['./model.weights.bin'],
weights: modelArtifacts.weightSpecs,
}];
const modelTopologyAndWeightManifest = {
modelTopology: modelArtifacts.modelTopology,
format: modelArtifacts.format,
generatedBy: modelArtifacts.generatedBy,
convertedBy: modelArtifacts.convertedBy,
weightsManifest
};
if (modelArtifacts.signature != null) {
modelTopologyAndWeightManifest.signature = modelArtifacts.signature;
}
if (modelArtifacts.userDefinedMetadata != null) {
modelTopologyAndWeightManifest.userDefinedMetadata =
modelArtifacts.userDefinedMetadata;
}
if (modelArtifacts.modelInitializer != null) {
modelTopologyAndWeightManifest.modelInitializer =
modelArtifacts.modelInitializer;
}
init.body.append('model.json', new Blob([JSON.stringify(modelTopologyAndWeightManifest)], { type: JSON_TYPE }), 'model.json');
if (modelArtifacts.weightData != null) {
init.body.append('model.weights.bin', new Blob([modelArtifacts.weightData], { type: OCTET_STREAM_MIME_TYPE }), 'model.weights.bin');
}
const response = await this.fetch(this.path, init);
if (response.ok) {
return {
modelArtifactsInfo: Object(io_utils["g" /* getModelArtifactsInfoForJSON */])(modelArtifacts),
responses: [response],
};
}
else {
throw new Error(`BrowserHTTPRequest.save() failed due to HTTP response status ` +
`${response.status}.`);
}
}
/**
* Load model artifacts via HTTP request(s).
*
* See the documentation to `tf.io.http` for details on the saved
* artifacts.
*
* @returns The loaded model artifacts (if loading succeeds).
*/
async load() {
const modelConfigRequest = await this.fetch(this.path, this.requestInit);
if (!modelConfigRequest.ok) {
throw new Error(`Request to ${this.path} failed with status code ` +
`${modelConfigRequest.status}. Please verify this URL points to ` +
`the model JSON of the model to load.`);
}
let modelConfig;
try {
modelConfig = await modelConfigRequest.json();
}
catch (e) {
let message = `Failed to parse model JSON of response from ${this.path}.`;
// TODO(nsthorat): Remove this after some time when we're comfortable that
// .pb files are mostly gone.
if (this.path.endsWith('.pb')) {
message += ' Your path contains a .pb file extension. ' +
'Support for .pb models have been removed in TensorFlow.js 1.0 ' +
'in favor of .json models. You can re-convert your Python ' +
'TensorFlow model using the TensorFlow.js 1.0 conversion scripts ' +
'or you can convert your.pb models with the \'pb2json\'' +
'NPM script in the tensorflow/tfjs-converter repository.';
}
else {
message += ' Please make sure the server is serving valid ' +
'JSON for this request.';
}
throw new Error(message);
}
const modelTopology = modelConfig.modelTopology;
const weightsManifest = modelConfig.weightsManifest;
const generatedBy = modelConfig.generatedBy;
const convertedBy = modelConfig.convertedBy;
const format = modelConfig.format;
const signature = modelConfig.signature;
const userDefinedMetadata = modelConfig.userDefinedMetadata;
// We do not allow both modelTopology and weightsManifest to be missing.
if (modelTopology == null && weightsManifest == null) {
throw new Error(`The JSON from HTTP path ${this.path} contains neither model ` +
`topology or manifest for weights.`);
}
let weightSpecs;
let weightData;
if (weightsManifest != null) {
const results = await this.loadWeights(weightsManifest);
[weightSpecs, weightData] = results;
}
const artifacts = {
modelTopology,
weightSpecs,
weightData,
generatedBy,
convertedBy,
format
};
if (signature != null) {
artifacts.signature = signature;
}
if (userDefinedMetadata != null) {
artifacts.userDefinedMetadata = userDefinedMetadata;
}
const initializer = modelConfig.modelInitializer;
if (initializer) {
artifacts.modelInitializer = initializer;
}
return artifacts;
}
async loadWeights(weightsManifest) {
const weightPath = Array.isArray(this.path) ? this.path[1] : this.path;
const [prefix, suffix] = parseUrl(weightPath);
const pathPrefix = this.weightPathPrefix || prefix;
const weightSpecs = [];
for (const entry of weightsManifest) {
weightSpecs.push(...entry.weights);
}
const fetchURLs = [];
const urlPromises = [];
for (const weightsGroup of weightsManifest) {
for (const path of weightsGroup.paths) {
if (this.weightUrlConverter != null) {
urlPromises.push(this.weightUrlConverter(path));
}
else {
fetchURLs.push(pathPrefix + path + suffix);
}
}
}
if (this.weightUrlConverter) {
fetchURLs.push(...await Promise.all(urlPromises));
}
const buffers = await loadWeightsAsArrayBuffer(fetchURLs, {
requestInit: this.requestInit,
fetchFunc: this.fetch,
onProgress: this.onProgress
});
return [weightSpecs, Object(io_utils["d" /* concatenateArrayBuffers */])(buffers)];
}
}
http_HTTPRequest.URL_SCHEME_REGEX = /^https?:\/\//;
/**
* Extract the prefix and suffix of the url, where the prefix is the path before
* the last file, and suffix is the search params after the last file.
* ```
* const url = 'http://tfhub.dev/model/1/tensorflowjs_model.pb?tfjs-format=file'
* [prefix, suffix] = parseUrl(url)
* // prefix = 'http://tfhub.dev/model/1/'
* // suffix = '?tfjs-format=file'
* ```
* @param url the model url to be parsed.
*/
function parseUrl(url) {
const lastSlash = url.lastIndexOf('/');
const lastSearchParam = url.lastIndexOf('?');
const prefix = url.substring(0, lastSlash);
const suffix = lastSearchParam > lastSlash ? url.substring(lastSearchParam) : '';
return [prefix + '/', suffix];
}
function isHTTPScheme(url) {
return url.match(http_HTTPRequest.URL_SCHEME_REGEX) != null;
}
const httpRouter = (url, loadOptions) => {
if (typeof fetch === 'undefined' &&
(loadOptions == null || loadOptions.fetchFunc == null)) {
// `http` uses `fetch` or `node-fetch`, if one wants to use it in
// an environment that is not the browser or node they have to setup a
// global fetch polyfill.
return null;
}
else {
let isHTTP = true;
if (Array.isArray(url)) {
isHTTP = url.every(urlItem => isHTTPScheme(urlItem));
}
else {
isHTTP = isHTTPScheme(url);
}
if (isHTTP) {
return http(url, loadOptions);
}
}
return null;
};
IORouterRegistry.registerSaveRouter(httpRouter);
IORouterRegistry.registerLoadRouter(httpRouter);
/**
* Creates an IOHandler subtype that sends model artifacts to HTTP server.
*
* An HTTP request of the `multipart/form-data` mime type will be sent to the
* `path` URL. The form data includes artifacts that represent the topology
* and/or weights of the model. In the case of Keras-style `tf.Model`, two
* blobs (files) exist in form-data:
* - A JSON file consisting of `modelTopology` and `weightsManifest`.
* - A binary weights file consisting of the concatenated weight values.
* These files are in the same format as the one generated by
* [tfjs_converter](https://js.tensorflow.org/tutorials/import-keras.html).
*
* The following code snippet exemplifies the client-side code that uses this
* function:
*
* ```js
* const model = tf.sequential();
* model.add(
* tf.layers.dense({units: 1, inputShape: [100], activation: 'sigmoid'}));
*
* const saveResult = await model.save(tf.io.http(
* 'http://model-server:5000/upload', {requestInit: {method: 'PUT'}}));
* console.log(saveResult);
* ```
*
* If the default `POST` method is to be used, without any custom parameters
* such as headers, you can simply pass an HTTP or HTTPS URL to `model.save`:
*
* ```js
* const saveResult = await model.save('http://model-server:5000/upload');
* ```
*
* The following GitHub Gist
* https://gist.github.com/dsmilkov/1b6046fd6132d7408d5257b0976f7864
* implements a server based on [flask](https://github.com/pallets/flask) that
* can receive the request. Upon receiving the model artifacts via the requst,
* this particular server reconsistutes instances of [Keras
* Models](https://keras.io/models/model/) in memory.
*
*
* @param path A URL path to the model.
* Can be an absolute HTTP path (e.g.,
* 'http://localhost:8000/model-upload)') or a relative path (e.g.,
* './model-upload').
* @param requestInit Request configurations to be used when sending
* HTTP request to server using `fetch`. It can contain fields such as
* `method`, `credentials`, `headers`, `mode`, etc. See
* https://developer.mozilla.org/en-US/docs/Web/API/Request/Request
* for more information. `requestInit` must not have a body, because the
* body will be set by TensorFlow.js. File blobs representing the model
* topology (filename: 'model.json') and the weights of the model (filename:
* 'model.weights.bin') will be appended to the body. If `requestInit` has a
* `body`, an Error will be thrown.
* @param loadOptions Optional configuration for the loading. It includes the
* following fields:
* - weightPathPrefix Optional, this specifies the path prefix for weight
* files, by default this is calculated from the path param.
* - fetchFunc Optional, custom `fetch` function. E.g., in Node.js,
* the `fetch` from node-fetch can be used here.
* - onProgress Optional, progress callback function, fired periodically
* before the load is completed.
* @returns An instance of `IOHandler`.
*
* @doc {
* heading: 'Models',
* subheading: 'Loading',
* namespace: 'io',
* ignoreCI: true
* }
*/
function http(path, loadOptions) {
return new http_HTTPRequest(path, loadOptions);
}
/**
* Deprecated. Use `tf.io.http`.
* @param path
* @param loadOptions
*/
function browserHTTPRequest(path, loadOptions) {
return http(path, loadOptions);
}
//# sourceMappingURL=http.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/passthrough.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
class PassthroughLoader {
constructor(modelArtifacts) {
this.modelArtifacts = modelArtifacts;
}
async load() {
return this.modelArtifacts;
}
}
class PassthroughSaver {
constructor(saveHandler) {
this.saveHandler = saveHandler;
}
async save(modelArtifacts) {
return this.saveHandler(modelArtifacts);
}
}
/**
* Creates an IOHandler that loads model artifacts from memory.
*
* When used in conjunction with `tf.loadLayersModel`, an instance of
* `tf.LayersModel` (Keras-style) can be constructed from the loaded artifacts.
*
* ```js
* const model = await tf.loadLayersModel(tf.io.fromMemory(
* modelTopology, weightSpecs, weightData));
* ```
*
* @param modelArtifacts a object containing model topology (i.e., parsed from
* the JSON format).
* @param weightSpecs An array of `WeightsManifestEntry` objects describing the
* names, shapes, types, and quantization of the weight data.
* @param weightData A single `ArrayBuffer` containing the weight data,
* concatenated in the order described by the weightSpecs.
* @param trainingConfig Model training configuration. Optional.
*
* @returns A passthrough `IOHandler` that simply loads the provided data.
*/
function fromMemory(modelArtifacts, weightSpecs, weightData, trainingConfig) {
if (arguments.length === 1) {
const isModelArtifacts = modelArtifacts.modelTopology != null ||
modelArtifacts.weightSpecs != null;
if (isModelArtifacts) {
return new PassthroughLoader(modelArtifacts);
}
else {
// Legacy support: with only modelTopology.
// TODO(cais): Remove this deprecated API.
console.warn('Please call tf.io.fromMemory() with only one argument. ' +
'The argument should be of type ModelArtifacts. ' +
'The multi-argument signature of tf.io.fromMemory() has been ' +
'deprecated and will be removed in a future release.');
return new PassthroughLoader({ modelTopology: modelArtifacts });
}
}
else {
// Legacy support.
// TODO(cais): Remove this deprecated API.
console.warn('Please call tf.io.fromMemory() with only one argument. ' +
'The argument should be of type ModelArtifacts. ' +
'The multi-argument signature of tf.io.fromMemory() has been ' +
'deprecated and will be removed in a future release.');
return new PassthroughLoader({
modelTopology: modelArtifacts,
weightSpecs,
weightData,
trainingConfig
});
}
}
/**
* Creates an IOHandler that passes saved model artifacts to a callback.
*
* ```js
* function handleSave(artifacts) {
* // ... do something with the artifacts ...
* return {modelArtifactsInfo: {...}, ...};
* }
*
* const saveResult = model.save(tf.io.withSaveHandler(handleSave));
* ```
*
* @param saveHandler A function that accepts a `ModelArtifacts` and returns a
* `SaveResult`.
*/
function withSaveHandler(saveHandler) {
return new PassthroughSaver(saveHandler);
}
//# sourceMappingURL=passthrough.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/io/io.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
// Importing local_storage and indexed_db is necessary for the routers to be
// registered.
//# sourceMappingURL=io.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/tensor_util_env.js
var tensor_util_env = __webpack_require__(2);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/mat_mul.js
var mat_mul = __webpack_require__(24);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/one_hot.js
var one_hot = __webpack_require__(106);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/operation.js
var operation = __webpack_require__(4);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/transpose.js
var transpose = __webpack_require__(52);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/confusion_matrix.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes the confusion matrix from true labels and predicted labels.
*
* ```js
* const labels = tf.tensor1d([0, 1, 2, 1, 0], 'int32');
* const predictions = tf.tensor1d([0, 2, 2, 1, 0], 'int32');
* const numClasses = 3;
* const out = tf.math.confusionMatrix(labels, predictions, numClasses);
* out.print();
* // Expected output matrix:
* // [[2, 0, 0],
* // [0, 1, 1],
* // [0, 0, 1]]
* ```
*
* @param labels The target labels, assumed to be 0-based integers
* for the classes. The shape is `[numExamples]`, where
* `numExamples` is the number of examples included.
* @param predictions The predicted classes, assumed to be
* 0-based integers for the classes. Must have the same shape as `labels`.
* @param numClasses Number of all classes, as an integer.
* Its value must be larger than the largest element in `labels` and
* `predictions`.
* @returns The confusion matrix as a int32-type 2D tensor. The value at
* row `r` and column `c` is the number of times examples of actual class
* `r` were predicted as class `c`.
*
* @doc {heading: 'Operations', subheading: 'Evaluation'}
*/
function confusionMatrix_(labels, predictions, numClasses) {
const $labels = Object(tensor_util_env["a" /* convertToTensor */])(labels, 'labels', 'confusionMatrix');
const $predictions = Object(tensor_util_env["a" /* convertToTensor */])(predictions, 'predictions', 'confusionMatrix');
util_base["b" /* assert */](numClasses == null || numClasses > 0 && Number.isInteger(numClasses), () => `If provided, numClasses must be a positive integer, ` +
`but got ${numClasses}`);
util_base["b" /* assert */]($labels.rank === 1, () => `Expected the rank of labels to be 1, but got ${$labels.rank}`);
util_base["b" /* assert */]($predictions.rank === 1, () => `Expected the rank of predictions to be 1, ` +
`but got ${$predictions.rank}`);
util_base["b" /* assert */]($labels.shape[0] === $predictions.shape[0], () => `Mismatch in the number of examples: ` +
`${$labels.shape[0]} vs. ${$predictions.shape[0]}. ` +
`Labels and predictions should have the same number of elements.`);
util_base["b" /* assert */](numClasses > 0 && Number.isInteger(numClasses), () => `numClasses is required to be a positive integer, but got ` +
`${numClasses}`);
// TODO(cais): In the future, if oneHot supports tensors inputs for
// `numClasses`, `confusionMatrix` can make `numClasses` optional.
const oneHotLabels = Object(one_hot["a" /* oneHot */])(Object(cast["a" /* cast */])($labels, 'int32'), numClasses);
const oneHotPredictions = Object(one_hot["a" /* oneHot */])(Object(cast["a" /* cast */])($predictions, 'int32'), numClasses);
const oneHotLabelsT = Object(transpose["a" /* transpose */])(oneHotLabels);
const product = Object(mat_mul["a" /* matMul */])(oneHotLabelsT, oneHotPredictions);
return Object(cast["a" /* cast */])(product, 'int32');
}
const confusionMatrix = Object(operation["b" /* op */])({ confusionMatrix_ });
//# sourceMappingURL=confusion_matrix.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/math.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Exports under the tf.math.* namespace.
*/
//# sourceMappingURL=math.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/kernel_names.js
var kernel_names = __webpack_require__(3);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/kernel_registry.js
var kernel_registry = __webpack_require__(62);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/tensor3d.js
var tensor3d = __webpack_require__(178);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/browser.js
/**
* @license
* Copyright 2019 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
let fromPixels2DContext;
/**
* Creates a `tf.Tensor` from an image.
*
* ```js
* const image = new ImageData(1, 1);
* image.data[0] = 100;
* image.data[1] = 150;
* image.data[2] = 200;
* image.data[3] = 255;
*
* tf.browser.fromPixels(image).print();
* ```
*
* @param pixels The input image to construct the tensor from. The
* supported image types are all 4-channel. You can also pass in an image
* object with following attributes:
* `{data: Uint8Array; width: number; height: number}`
* @param numChannels The number of channels of the output tensor. A
* numChannels value less than 4 allows you to ignore channels. Defaults to
* 3 (ignores alpha channel of input image).
*
* @returns A Tensor3D with the shape `[height, width, numChannels]`.
*
* @doc {heading: 'Browser', namespace: 'browser', ignoreCI: true}
*/
function fromPixels_(pixels, numChannels = 3) {
// Sanity checks.
if (numChannels > 4) {
throw new Error('Cannot construct Tensor with more than 4 channels from pixels.');
}
if (pixels == null) {
throw new Error('pixels passed to tf.browser.fromPixels() can not be null');
}
let isPixelData = false;
let isImageData = false;
let isVideo = false;
let isImage = false;
let isCanvasLike = false;
let isImageBitmap = false;
if (pixels.data instanceof Uint8Array) {
isPixelData = true;
}
else if (typeof (ImageData) !== 'undefined' && pixels instanceof ImageData) {
isImageData = true;
}
else if (typeof (HTMLVideoElement) !== 'undefined' &&
pixels instanceof HTMLVideoElement) {
isVideo = true;
}
else if (typeof (HTMLImageElement) !== 'undefined' &&
pixels instanceof HTMLImageElement) {
isImage = true;
// tslint:disable-next-line: no-any
}
else if (pixels.getContext != null) {
isCanvasLike = true;
}
else if (typeof (ImageBitmap) !== 'undefined' && pixels instanceof ImageBitmap) {
isImageBitmap = true;
}
else {
throw new Error('pixels passed to tf.browser.fromPixels() must be either an ' +
`HTMLVideoElement, HTMLImageElement, HTMLCanvasElement, ImageData ` +
`in browser, or OffscreenCanvas, ImageData in webworker` +
` or {data: Uint32Array, width: number, height: number}, ` +
`but was ${pixels.constructor.name}`);
}
if (isVideo) {
const HAVE_CURRENT_DATA_READY_STATE = 2;
if (isVideo &&
pixels.readyState <
HAVE_CURRENT_DATA_READY_STATE) {
throw new Error('The video element has not loaded data yet. Please wait for ' +
'`loadeddata` event on the <video> element.');
}
}
// If the current backend has 'FromPixels' registered, it has a more
// efficient way of handling pixel uploads, so we call that.
const kernel = Object(kernel_registry["c" /* getKernel */])(kernel_names["ib" /* FromPixels */], engine["a" /* ENGINE */].backendName);
if (kernel != null) {
const inputs = { pixels };
const attrs = { numChannels };
return engine["a" /* ENGINE */].runKernel(kernel_names["ib" /* FromPixels */], inputs, attrs);
}
const [width, height] = isVideo ?
[
pixels.videoWidth,
pixels.videoHeight
] :
[pixels.width, pixels.height];
let vals;
if (isCanvasLike) {
vals =
// tslint:disable-next-line:no-any
pixels.getContext('2d').getImageData(0, 0, width, height).data;
}
else if (isImageData || isPixelData) {
vals = pixels.data;
}
else if (isImage || isVideo || isImageBitmap) {
if (fromPixels2DContext == null) {
fromPixels2DContext = document.createElement('canvas').getContext('2d');
}
fromPixels2DContext.canvas.width = width;
fromPixels2DContext.canvas.height = height;
fromPixels2DContext.drawImage(pixels, 0, 0, width, height);
vals = fromPixels2DContext.getImageData(0, 0, width, height).data;
}
let values;
if (numChannels === 4) {
values = new Int32Array(vals);
}
else {
const numPixels = width * height;
values = new Int32Array(numPixels * numChannels);
for (let i = 0; i < numPixels; i++) {
for (let channel = 0; channel < numChannels; ++channel) {
values[i * numChannels + channel] = vals[i * 4 + channel];
}
}
}
const outShape = [height, width, numChannels];
return Object(tensor3d["a" /* tensor3d */])(values, outShape, 'int32');
}
// Helper functions for |fromPixelsAsync| to check whether the input can
// be wrapped into imageBitmap.
function browser_isPixelData(pixels) {
return (pixels != null) && (pixels.data instanceof Uint8Array);
}
function isImageBitmapFullySupported() {
return typeof window !== 'undefined' &&
typeof (ImageBitmap) !== 'undefined' &&
window.hasOwnProperty('createImageBitmap');
}
function isNonEmptyPixels(pixels) {
return pixels != null && pixels.width !== 0 && pixels.height !== 0;
}
function canWrapPixelsToImageBitmap(pixels) {
return isImageBitmapFullySupported() && !(pixels instanceof ImageBitmap) &&
isNonEmptyPixels(pixels) && !browser_isPixelData(pixels);
}
/**
* Creates a `tf.Tensor` from an image in async way.
*
* ```js
* const image = new ImageData(1, 1);
* image.data[0] = 100;
* image.data[1] = 150;
* image.data[2] = 200;
* image.data[3] = 255;
*
* (await tf.browser.fromPixelsAsync(image)).print();
* ```
* This API is the async version of fromPixels. The API will first
* check |WRAP_TO_IMAGEBITMAP| flag, and try to wrap the input to
* imageBitmap if the flag is set to true.
*
* @param pixels The input image to construct the tensor from. The
* supported image types are all 4-channel. You can also pass in an image
* object with following attributes:
* `{data: Uint8Array; width: number; height: number}`
* @param numChannels The number of channels of the output tensor. A
* numChannels value less than 4 allows you to ignore channels. Defaults to
* 3 (ignores alpha channel of input image).
*
* @doc {heading: 'Browser', namespace: 'browser', ignoreCI: true}
*/
async function fromPixelsAsync(pixels, numChannels = 3) {
let inputs = null;
// Check whether the backend needs to wrap |pixels| to imageBitmap and
// whether |pixels| can be wrapped to imageBitmap.
if (Object(environment["c" /* env */])().getBool('WRAP_TO_IMAGEBITMAP') &&
canWrapPixelsToImageBitmap(pixels)) {
// Force the imageBitmap creation to not do any premultiply alpha
// ops.
let imageBitmap;
try {
// wrap in try-catch block, because createImageBitmap may not work
// properly in some browsers, e.g.
// https://bugzilla.mozilla.org/show_bug.cgi?id=1335594
// tslint:disable-next-line: no-any
imageBitmap = await createImageBitmap(pixels, { premultiplyAlpha: 'none' });
}
catch (e) {
imageBitmap = null;
}
// createImageBitmap will clip the source size.
// In some cases, the input will have larger size than its content.
// E.g. new Image(10, 10) but with 1 x 1 content. Using
// createImageBitmap will clip the size from 10 x 10 to 1 x 1, which
// is not correct. We should avoid wrapping such resouce to
// imageBitmap.
if (imageBitmap != null && imageBitmap.width === pixels.width &&
imageBitmap.height === pixels.height) {
inputs = imageBitmap;
}
else {
inputs = pixels;
}
}
else {
inputs = pixels;
}
return fromPixels_(inputs, numChannels);
}
/**
* Draws a `tf.Tensor` of pixel values to a byte array or optionally a
* canvas.
*
* When the dtype of the input is 'float32', we assume values in the range
* [0-1]. Otherwise, when input is 'int32', we assume values in the range
* [0-255].
*
* Returns a promise that resolves when the canvas has been drawn to.
*
* @param img A rank-2 tensor with shape `[height, width]`, or a rank-3 tensor
* of shape `[height, width, numChannels]`. If rank-2, draws grayscale. If
* rank-3, must have depth of 1, 3 or 4. When depth of 1, draws
* grayscale. When depth of 3, we draw with the first three components of
* the depth dimension corresponding to r, g, b and alpha = 1. When depth of
* 4, all four components of the depth dimension correspond to r, g, b, a.
* @param canvas The canvas to draw to.
*
* @doc {heading: 'Browser', namespace: 'browser'}
*/
async function toPixels(img, canvas) {
let $img = Object(tensor_util_env["a" /* convertToTensor */])(img, 'img', 'toPixels');
if (!(img instanceof dist_tensor["a" /* Tensor */])) {
// Assume int32 if user passed a native array.
const originalImgTensor = $img;
$img = Object(cast["a" /* cast */])(originalImgTensor, 'int32');
originalImgTensor.dispose();
}
if ($img.rank !== 2 && $img.rank !== 3) {
throw new Error(`toPixels only supports rank 2 or 3 tensors, got rank ${$img.rank}.`);
}
const [height, width] = $img.shape.slice(0, 2);
const depth = $img.rank === 2 ? 1 : $img.shape[2];
if (depth > 4 || depth === 2) {
throw new Error(`toPixels only supports depth of size ` +
`1, 3 or 4 but got ${depth}`);
}
if ($img.dtype !== 'float32' && $img.dtype !== 'int32') {
throw new Error(`Unsupported type for toPixels: ${$img.dtype}.` +
` Please use float32 or int32 tensors.`);
}
const data = await $img.data();
const multiplier = $img.dtype === 'float32' ? 255 : 1;
const bytes = new Uint8ClampedArray(width * height * 4);
for (let i = 0; i < height * width; ++i) {
const rgba = [0, 0, 0, 255];
for (let d = 0; d < depth; d++) {
const value = data[i * depth + d];
if ($img.dtype === 'float32') {
if (value < 0 || value > 1) {
throw new Error(`Tensor values for a float32 Tensor must be in the ` +
`range [0 - 1] but encountered ${value}.`);
}
}
else if ($img.dtype === 'int32') {
if (value < 0 || value > 255) {
throw new Error(`Tensor values for a int32 Tensor must be in the ` +
`range [0 - 255] but encountered ${value}.`);
}
}
if (depth === 1) {
rgba[0] = value * multiplier;
rgba[1] = value * multiplier;
rgba[2] = value * multiplier;
}
else {
rgba[d] = value * multiplier;
}
}
const j = i * 4;
bytes[j + 0] = Math.round(rgba[0]);
bytes[j + 1] = Math.round(rgba[1]);
bytes[j + 2] = Math.round(rgba[2]);
bytes[j + 3] = Math.round(rgba[3]);
}
if (canvas != null) {
canvas.width = width;
canvas.height = height;
const ctx = canvas.getContext('2d');
const imageData = new ImageData(bytes, width, height);
ctx.putImageData(imageData, 0, 0);
}
if ($img !== img) {
$img.dispose();
}
return bytes;
}
const fromPixels = Object(operation["b" /* op */])({ fromPixels_ });
//# sourceMappingURL=browser.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/gather_nd_util.js
/**
* Validate gather nd inputs.
*
* @param tensor The tensor contains the source values.
* @param indices The tensor contains the indices to slice the source.
*
* @returns [resultShape, numUpdates, sliceSize, strides]
*/
function prepareAndValidate(tensor, indices) {
const tensorRank = tensor.shape.length;
const indicesRank = indices.shape.length;
if (tensorRank < 1) {
throw new Error('tf.gatherND() expects the input to be rank 1 or higher,' +
` but the rank was ${tensorRank}.`);
}
if (indicesRank < 1) {
throw new Error('tf.gatherND() expects the indices to be rank 1 or higher,' +
` but the rank was ${indicesRank}.`);
}
if (indices.dtype !== 'int32') {
throw new Error('tf.gatherND() expects the indices to be int32 type,' +
` but the dtype was ${indices.dtype}.`);
}
if (indices.shape[indicesRank - 1] > tensorRank) {
throw new Error('index innermost dimension length must be <= tensor rank; saw: ' +
`${indices.shape[indicesRank - 1]} vs. ${tensorRank}`);
}
if (Object(util_base["O" /* sizeFromShape */])(tensor.shape) === 0) {
throw new Error('Requested more than 0 entries, but input is empty.' +
` Input shape: ${tensor.shape}.`);
}
const indicesShape = indices.shape;
const sliceRank = indicesShape[indicesShape.length - 1];
// The result shape is
// indices.shape[:-1] + params.shape[indices.shape[-1]:]
let nResult = 1;
for (let i = 0; i < indicesShape.length - 1; ++i) {
nResult *= indicesShape[i];
}
const inputShape = tensor.shape;
const resultShape = indicesShape.slice();
resultShape.pop();
let sliceSize = 1;
for (let i = sliceRank; i < tensorRank; ++i) {
sliceSize *= inputShape[i];
resultShape.push(inputShape[i]);
}
const strides = [...Object(util_base["j" /* computeStrides */])(tensor.shape).map(stride => stride / sliceSize),
1].slice(0, sliceRank);
return [resultShape, nResult, sliceSize, strides];
}
//# sourceMappingURL=gather_nd_util.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/scatter_nd_util.js
var scatter_nd_util = __webpack_require__(112);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/slice_util.js
var slice_util = __webpack_require__(113);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/serialization.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Serializable defines the serialization contract.
*
* TFJS requires serializable classes to return their className when asked
* to avoid issues with minification.
*/
class Serializable {
/**
* Return the class name for this class to use in serialization contexts.
*
* Generally speaking this will be the same thing that constructor.name
* would have returned. However, the class name needs to be robust
* against minification for serialization/deserialization to work properly.
*
* There's also places such as initializers.VarianceScaling, where
* implementation details between different languages led to different
* class hierarchies and a non-leaf node is used for serialization purposes.
*/
getClassName() {
return this.constructor
.className;
}
/**
* Creates an instance of T from a ConfigDict.
*
* This works for most descendants of serializable. A few need to
* provide special handling.
* @param cls A Constructor for the class to instantiate.
* @param config The Configuration for the object.
*/
/** @nocollapse */
static fromConfig(cls, config) {
return new cls(config);
}
}
/**
* Maps string keys to class constructors.
*
* Used during (de)serialization from the cross-language JSON format, which
* requires the class name in the serialization format matches the class
* names as used in Python, should it exist.
*/
class SerializationMap {
constructor() {
this.classNameMap = {};
}
/**
* Returns the singleton instance of the map.
*/
static getMap() {
if (SerializationMap.instance == null) {
SerializationMap.instance = new SerializationMap();
}
return SerializationMap.instance;
}
/**
* Registers the class as serializable.
*/
static register(cls) {
SerializationMap.getMap().classNameMap[cls.className] =
[cls, cls.fromConfig];
}
}
/**
* Register a class with the serialization map of TensorFlow.js.
*
* This is often used for registering custom Layers, so they can be
* serialized and deserialized.
*
* Example:
*
* ```js
* class MyCustomLayer extends tf.layers.Layer {
* static className = 'MyCustomLayer';
*
* constructor(config) {
* super(config);
* }
* }
* tf.serialization.registerClass(MyCustomLayer);
* ```
*
* @param cls The class to be registered. It must have a public static member
* called `className` defined and the value must be a non-empty string.
*
* @doc {heading: 'Models', subheading: 'Serialization', ignoreCI: true}
*/
function registerClass(cls) {
Object(util_base["b" /* assert */])(cls.className != null, () => `Class being registered does not have the static className ` +
`property defined.`);
Object(util_base["b" /* assert */])(typeof cls.className === 'string', () => `className is required to be a string, but got type ` +
typeof cls.className);
Object(util_base["b" /* assert */])(cls.className.length > 0, () => `Class being registered has an empty-string as its className, ` +
`which is disallowed.`);
SerializationMap.register(cls);
}
//# sourceMappingURL=serialization.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/tensor_util.js
var tensor_util = __webpack_require__(23);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/test_util.js
var test_util = __webpack_require__(139);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/util.js
var util = __webpack_require__(10);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/version.js
/** @license See the LICENSE file. */
// This code is auto-generated, do not modify this file!
const version = '3.5.0';
//# sourceMappingURL=version.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/globals.js
var globals = __webpack_require__(26);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/add.js
var add = __webpack_require__(13);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/div.js
var div = __webpack_require__(15);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/mul.js
var mul = __webpack_require__(9);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/sqrt.js
var sqrt = __webpack_require__(37);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/square.js
var square = __webpack_require__(25);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/zeros_like.js
var zeros_like = __webpack_require__(20);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients.js
var gradients = __webpack_require__(34);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/scalar.js
var scalar = __webpack_require__(16);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/optimizers/optimizer.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/** @doc {heading: 'Training', subheading: 'Classes', namespace: 'train'} */
class optimizer_Optimizer extends Serializable {
/**
* Executes `f()` and minimizes the scalar output of `f()` by computing
* gradients of y with respect to the list of trainable variables provided by
* `varList`. If no list is provided, it defaults to all trainable variables.
*
* @param f The function to execute and whose output to minimize.
* @param returnCost Whether to return the scalar cost value produced by
* executing `f()`.
* @param varList An optional list of variables to update. If specified, only
* the trainable variables in varList will be updated by minimize. Defaults to
* all trainable variables.
*
* @doc {heading: 'Training', subheading: 'Optimizers'}
*/
minimize(f, returnCost = false, varList) {
const { value, grads } = this.computeGradients(f, varList);
if (varList != null) {
const gradArray = varList.map(v => ({ name: v.name, tensor: grads[v.name] }));
this.applyGradients(gradArray);
}
else {
this.applyGradients(grads);
}
// Dispose gradients.
Object(globals["d" /* dispose */])(grads);
if (returnCost) {
return value;
}
else {
value.dispose();
return null;
}
}
/**
* The number of iterations that this optimizer instance has been invoked for.
*/
get iterations() {
if (this.iterations_ == null) {
this.iterations_ = 0;
}
return this.iterations_;
}
incrementIterations() {
this.iterations_ = this.iterations + 1;
}
/**
* Executes f() and computes the gradient of the scalar output of f() with
* respect to the list of trainable variables provided by `varList`. If no
* list is provided, it defaults to all trainable variables.
*
* @param f The function to execute and whose output to use for computing
* gradients with respect to variables.
* @param varList An optional list of variables to compute gradients with
* respect to. If specified, only the trainable variables in varList will have
* gradients computed with respect to. Defaults to all trainable variables.
*
* @doc {heading: 'Training', subheading: 'Optimizers'}
*/
computeGradients(f, varList) {
return Object(gradients["f" /* variableGrads */])(f, varList);
}
/**
* Dispose the variables (if any) owned by this optimizer instance.
*/
dispose() {
if (this.iterations_ != null) {
Object(globals["d" /* dispose */])(this.iterations_);
}
}
async saveIterations() {
if (this.iterations_ == null) {
this.iterations_ = 0;
}
return {
name: 'iter',
// TODO(cais): Use 'int64' type when available.
tensor: Object(scalar["a" /* scalar */])(this.iterations_, 'int32')
};
}
async getWeights() {
throw new Error('getWeights() is not implemented for this optimizer yet.');
}
async setWeights(weightValues) {
throw new Error(`setWeights() is not implemented for this optimizer class ` +
`${this.getClassName()}`);
}
/**
* Extract the first element of the weight values and set it
* as the iterations counter variable of this instance of optimizer.
*
* @param weightValues
* @returns Weight values with the first element consumed and excluded.
*/
async extractIterations(weightValues) {
this.iterations_ = (await weightValues[0].tensor.data())[0];
return weightValues.slice(1);
}
}
Object.defineProperty(optimizer_Optimizer, Symbol.hasInstance, {
value: (instance) => {
return instance.minimize != null && instance.computeGradients != null &&
instance.applyGradients != null;
}
});
//# sourceMappingURL=optimizer.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/optimizers/adadelta_optimizer.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/** @doclink Optimizer */
class adadelta_optimizer_AdadeltaOptimizer extends optimizer_Optimizer {
constructor(learningRate, rho, epsilon = null) {
super();
this.learningRate = learningRate;
this.rho = rho;
this.epsilon = epsilon;
this.accumulatedGrads = [];
this.accumulatedUpdates = [];
if (epsilon == null) {
this.epsilon = engine["a" /* ENGINE */].backend.epsilon();
}
}
applyGradients(variableGradients) {
const variableNames = Array.isArray(variableGradients) ?
variableGradients.map(item => item.name) :
Object.keys(variableGradients);
variableNames.forEach((name, i) => {
const value = engine["a" /* ENGINE */].registeredVariables[name];
const trainable = false;
if (this.accumulatedGrads[i] == null) {
this.accumulatedGrads[i] = {
originalName: `${name}/accum_grad`,
variable: Object(globals["t" /* tidy */])(() => Object(zeros_like["a" /* zerosLike */])(value).variable(trainable))
};
}
if (this.accumulatedUpdates[i] == null) {
this.accumulatedUpdates[i] = {
originalName: `${name}/accum_var`,
variable: Object(globals["t" /* tidy */])(() => Object(zeros_like["a" /* zerosLike */])(value).variable(trainable))
};
}
const gradient = Array.isArray(variableGradients) ?
variableGradients[i].tensor :
variableGradients[name];
if (gradient == null) {
return;
}
const accumulatedGrad = this.accumulatedGrads[i].variable;
const accumulatedUpdate = this.accumulatedUpdates[i].variable;
Object(globals["t" /* tidy */])(() => {
const newAccumulatedGrad = Object(add["a" /* add */])(Object(mul["a" /* mul */])(accumulatedGrad, this.rho), Object(mul["a" /* mul */])(Object(square["a" /* square */])(gradient), 1 - this.rho));
const updates = Object(mul["a" /* mul */])(Object(div["a" /* div */])(Object(sqrt["a" /* sqrt */])(Object(add["a" /* add */])(accumulatedUpdate, this.epsilon)), Object(sqrt["a" /* sqrt */])(Object(add["a" /* add */])(accumulatedGrad, this.epsilon))), gradient);
const newAccumulatedUpdate = Object(add["a" /* add */])(Object(mul["a" /* mul */])(accumulatedUpdate, this.rho), Object(mul["a" /* mul */])(Object(square["a" /* square */])(updates), 1 - this.rho));
accumulatedGrad.assign(newAccumulatedGrad);
accumulatedUpdate.assign(newAccumulatedUpdate);
const newValue = Object(add["a" /* add */])(Object(mul["a" /* mul */])(updates, -this.learningRate), value);
value.assign(newValue);
});
});
this.incrementIterations();
}
dispose() {
if (this.accumulatedUpdates != null) {
Object(globals["d" /* dispose */])(this.accumulatedGrads.map(v => v.variable));
Object(globals["d" /* dispose */])(this.accumulatedUpdates.map(v => v.variable));
}
}
async getWeights() {
// Order matters for Python compatibility.
const variables = [...this.accumulatedGrads, ...this.accumulatedUpdates];
return [await this.saveIterations()].concat(variables.map(v => ({ name: v.originalName, tensor: v.variable })));
}
async setWeights(weightValues) {
weightValues = await this.extractIterations(weightValues);
const variableCount = weightValues.length / 2;
const trainable = false;
this.accumulatedGrads =
weightValues.slice(0, variableCount).map(v => ({
originalName: v.name,
variable: v.tensor.variable(trainable)
}));
this.accumulatedUpdates =
weightValues.slice(variableCount, variableCount * 2)
.map(v => ({
originalName: v.name,
variable: v.tensor.variable(trainable)
}));
}
getConfig() {
return {
'learningRate': this.learningRate,
'rho': this.rho,
'epsilon': this.epsilon
};
}
/** @nocollapse */
static fromConfig(cls, config) {
return new cls(config['learningRate'], config['rho'], config['epsilon']);
}
}
/** @nocollapse */
adadelta_optimizer_AdadeltaOptimizer.className = 'Adadelta'; // Name matters for Python compatibility.
registerClass(adadelta_optimizer_AdadeltaOptimizer);
//# sourceMappingURL=adadelta_optimizer.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/fill.js
var fill = __webpack_require__(115);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/optimizers/adagrad_optimizer.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/** @doclink Optimizer */
class adagrad_optimizer_AdagradOptimizer extends optimizer_Optimizer {
constructor(learningRate, initialAccumulatorValue = 0.1) {
super();
this.learningRate = learningRate;
this.initialAccumulatorValue = initialAccumulatorValue;
this.accumulatedGrads = [];
}
applyGradients(variableGradients) {
const variableNames = Array.isArray(variableGradients) ?
variableGradients.map(item => item.name) :
Object.keys(variableGradients);
variableNames.forEach((name, i) => {
const value = engine["a" /* ENGINE */].registeredVariables[name];
if (this.accumulatedGrads[i] == null) {
const trainable = false;
this.accumulatedGrads[i] = {
originalName: `${name}/accumulator`,
variable: Object(globals["t" /* tidy */])(() => Object(fill["a" /* fill */])(value.shape, this.initialAccumulatorValue)
.variable(trainable))
};
}
const gradient = Array.isArray(variableGradients) ?
variableGradients[i].tensor :
variableGradients[name];
if (gradient == null) {
return;
}
const accumulatedGrad = this.accumulatedGrads[i].variable;
Object(globals["t" /* tidy */])(() => {
const newAccumulatedGrad = Object(add["a" /* add */])(accumulatedGrad, Object(square["a" /* square */])(gradient));
accumulatedGrad.assign(newAccumulatedGrad);
const newValue = Object(add["a" /* add */])(Object(mul["a" /* mul */])(Object(div["a" /* div */])(gradient, Object(sqrt["a" /* sqrt */])(Object(add["a" /* add */])(newAccumulatedGrad, engine["a" /* ENGINE */].backend.epsilon()))), -this.learningRate), value);
value.assign(newValue);
});
});
this.incrementIterations();
}
dispose() {
if (this.accumulatedGrads != null) {
Object(globals["d" /* dispose */])(this.accumulatedGrads.map(v => v.variable));
}
}
async getWeights() {
// Order matters for Python compatibility.
return [await this.saveIterations()].concat(this.accumulatedGrads.map(v => ({ name: v.originalName, tensor: v.variable })));
}
async setWeights(weightValues) {
weightValues = await this.extractIterations(weightValues);
const trainable = false;
this.accumulatedGrads = weightValues.map(v => ({ originalName: v.name, variable: v.tensor.variable(trainable) }));
}
getConfig() {
return {
'learningRate': this.learningRate,
'initialAccumulatorValue': this.initialAccumulatorValue,
};
}
/** @nocollapse */
static fromConfig(cls, config) {
return new cls(config['learningRate'], config['initialAccumulatorValue']);
}
}
/** @nocollapse */
adagrad_optimizer_AdagradOptimizer.className = 'Adagrad'; // Note: Name matters for Python compatibility.
registerClass(adagrad_optimizer_AdagradOptimizer);
//# sourceMappingURL=adagrad_optimizer.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/pow.js
var pow = __webpack_require__(53);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/sub.js
var sub = __webpack_require__(14);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/optimizers/adam_optimizer.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
class adam_optimizer_AdamOptimizer extends optimizer_Optimizer {
constructor(learningRate, beta1, beta2, epsilon = null) {
super();
this.learningRate = learningRate;
this.beta1 = beta1;
this.beta2 = beta2;
this.epsilon = epsilon;
this.accumulatedFirstMoment = [];
this.accumulatedSecondMoment = [];
Object(globals["t" /* tidy */])(() => {
// accB* will be updated by batch.
this.accBeta1 = Object(scalar["a" /* scalar */])(beta1).variable();
this.accBeta2 = Object(scalar["a" /* scalar */])(beta2).variable();
});
if (epsilon == null) {
this.epsilon = engine["a" /* ENGINE */].backend.epsilon();
}
}
applyGradients(variableGradients) {
const varNames = Array.isArray(variableGradients) ?
variableGradients.map(v => v.name) :
Object.keys(variableGradients);
Object(globals["t" /* tidy */])(() => {
const oneMinusAccBeta1 = Object(sub["a" /* sub */])(1, this.accBeta1);
const oneMinusAccBeta2 = Object(sub["a" /* sub */])(1, this.accBeta2);
varNames.forEach((name, i) => {
const value = engine["a" /* ENGINE */].registeredVariables[name];
const trainable = false;
if (this.accumulatedFirstMoment[i] == null) {
this.accumulatedFirstMoment[i] = {
originalName: `${name}/m`,
variable: Object(globals["t" /* tidy */])(() => Object(zeros_like["a" /* zerosLike */])(value).variable(trainable))
};
}
if (this.accumulatedSecondMoment[i] == null) {
this.accumulatedSecondMoment[i] = {
originalName: `${name}/v`,
variable: Object(globals["t" /* tidy */])(() => Object(zeros_like["a" /* zerosLike */])(value).variable(trainable))
};
}
const gradient = Array.isArray(variableGradients) ?
variableGradients[i].tensor :
variableGradients[name];
if (gradient == null) {
return;
}
const firstMoment = this.accumulatedFirstMoment[i].variable;
const secondMoment = this.accumulatedSecondMoment[i].variable;
const newFirstMoment = Object(add["a" /* add */])(Object(mul["a" /* mul */])(firstMoment, this.beta1), Object(mul["a" /* mul */])(gradient, 1 - this.beta1));
const newSecondMoment = Object(add["a" /* add */])(Object(mul["a" /* mul */])(secondMoment, this.beta2), Object(mul["a" /* mul */])(Object(square["a" /* square */])(gradient), 1 - this.beta2));
const biasCorrectedFirstMoment = Object(div["a" /* div */])(newFirstMoment, oneMinusAccBeta1);
const biasCorrectedSecondMoment = Object(div["a" /* div */])(newSecondMoment, oneMinusAccBeta2);
firstMoment.assign(newFirstMoment);
secondMoment.assign(newSecondMoment);
const newValue = Object(add["a" /* add */])(Object(mul["a" /* mul */])(Object(div["a" /* div */])(biasCorrectedFirstMoment, Object(add["a" /* add */])(Object(sqrt["a" /* sqrt */])(biasCorrectedSecondMoment), this.epsilon)), -this.learningRate), value);
value.assign(newValue);
});
this.accBeta1.assign(Object(mul["a" /* mul */])(this.accBeta1, this.beta1));
this.accBeta2.assign(Object(mul["a" /* mul */])(this.accBeta2, this.beta2));
});
this.incrementIterations();
}
dispose() {
this.accBeta1.dispose();
this.accBeta2.dispose();
if (this.accumulatedFirstMoment != null) {
Object(globals["d" /* dispose */])(this.accumulatedFirstMoment.map(v => v.variable));
}
if (this.accumulatedSecondMoment != null) {
Object(globals["d" /* dispose */])(this.accumulatedSecondMoment.map(v => v.variable));
}
}
async getWeights() {
// Order matters for Python compatibility.
const variables = [...this.accumulatedFirstMoment, ...this.accumulatedSecondMoment];
return [await this.saveIterations()].concat(variables.map(v => ({ name: v.originalName, tensor: v.variable })));
}
async setWeights(weightValues) {
weightValues = await this.extractIterations(weightValues);
Object(globals["t" /* tidy */])(() => {
this.accBeta1.assign(Object(pow["a" /* pow */])(this.beta1, this.iterations_ + 1));
this.accBeta2.assign(Object(pow["a" /* pow */])(this.beta2, this.iterations_ + 1));
});
const variableCount = weightValues.length / 2;
const trainable = false;
this.accumulatedFirstMoment =
weightValues.slice(0, variableCount).map(v => ({
originalName: v.name,
variable: v.tensor.variable(trainable)
}));
this.accumulatedSecondMoment =
weightValues.slice(variableCount, variableCount * 2)
.map(v => ({
originalName: v.name,
variable: v.tensor.variable(trainable)
}));
}
getConfig() {
return {
'learningRate': this.learningRate,
'beta1': this.beta1,
'beta2': this.beta2,
'epsilon': this.epsilon,
};
}
/** @nocollapse */
static fromConfig(cls, config) {
return new cls(config['learningRate'], config['beta1'], config['beta2'], config['epsilon']);
}
}
/** @nocollapse */
adam_optimizer_AdamOptimizer.className = 'Adam'; // Note: Name matters for Python compatibility.
registerClass(adam_optimizer_AdamOptimizer);
//# sourceMappingURL=adam_optimizer.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/abs.js
var abs = __webpack_require__(43);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/maximum.js
var maximum = __webpack_require__(90);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/optimizers/adamax_optimizer.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
class adamax_optimizer_AdamaxOptimizer extends optimizer_Optimizer {
constructor(learningRate, beta1, beta2, epsilon = null, decay = 0.0) {
super();
this.learningRate = learningRate;
this.beta1 = beta1;
this.beta2 = beta2;
this.epsilon = epsilon;
this.decay = decay;
this.accumulatedFirstMoment = [];
this.accumulatedWeightedInfNorm = [];
Object(globals["t" /* tidy */])(() => {
this.iteration = Object(scalar["a" /* scalar */])(0).variable();
this.accBeta1 = Object(scalar["a" /* scalar */])(beta1).variable();
});
if (epsilon == null) {
this.epsilon = engine["a" /* ENGINE */].backend.epsilon();
}
}
applyGradients(variableGradients) {
const variableNames = Array.isArray(variableGradients) ?
variableGradients.map(item => item.name) :
Object.keys(variableGradients);
Object(globals["t" /* tidy */])(() => {
const oneMinusAccBeta1 = Object(sub["a" /* sub */])(1, this.accBeta1);
const lr = Object(div["a" /* div */])(-this.learningRate, Object(add["a" /* add */])(Object(mul["a" /* mul */])(this.iteration, this.decay), 1));
variableNames.forEach((name, i) => {
const value = engine["a" /* ENGINE */].registeredVariables[name];
const trainable = false;
if (this.accumulatedFirstMoment[i] == null) {
this.accumulatedFirstMoment[i] = {
originalName: `${name}/m`,
variable: Object(zeros_like["a" /* zerosLike */])(value).variable(trainable)
};
}
if (this.accumulatedWeightedInfNorm[i] == null) {
this.accumulatedWeightedInfNorm[i] = {
originalName: `${name}/v`,
variable: Object(zeros_like["a" /* zerosLike */])(value).variable(trainable)
};
}
const gradient = Array.isArray(variableGradients) ?
variableGradients[i].tensor :
variableGradients[name];
if (gradient == null) {
return;
}
const firstMoment = this.accumulatedFirstMoment[i].variable;
const weightedInfNorm = this.accumulatedWeightedInfNorm[i].variable;
const newFirstMoment = Object(add["a" /* add */])(Object(mul["a" /* mul */])(firstMoment, this.beta1), Object(mul["a" /* mul */])(gradient, 1 - this.beta1));
const ut0 = Object(mul["a" /* mul */])(weightedInfNorm, this.beta2);
const ut1 = Object(abs["a" /* abs */])(gradient);
const newWeightedInfNorm = Object(maximum["a" /* maximum */])(ut0, ut1);
firstMoment.assign(newFirstMoment);
weightedInfNorm.assign(newWeightedInfNorm);
const newValue = Object(add["a" /* add */])(Object(mul["a" /* mul */])(Object(div["a" /* div */])(lr, oneMinusAccBeta1), Object(div["a" /* div */])(newFirstMoment, Object(add["a" /* add */])(newWeightedInfNorm, this.epsilon))), value);
value.assign(newValue);
});
this.iteration.assign(Object(add["a" /* add */])(this.iteration, 1));
this.accBeta1.assign(Object(mul["a" /* mul */])(this.accBeta1, this.beta1));
});
this.incrementIterations();
}
dispose() {
this.accBeta1.dispose();
this.iteration.dispose();
if (this.accumulatedFirstMoment != null) {
Object(globals["d" /* dispose */])(this.accumulatedFirstMoment.map(v => v.variable));
}
if (this.accumulatedWeightedInfNorm != null) {
Object(globals["d" /* dispose */])(this.accumulatedWeightedInfNorm.map(v => v.variable));
}
}
async getWeights() {
throw new Error('getWeights() is not implemented for Adamax yet.');
}
async setWeights(weightValues) {
throw new Error('setWeights() is not implemented for Adamax yet.');
}
getConfig() {
return {
'learningRate': this.learningRate,
'beta1': this.beta1,
'beta2': this.beta2,
'epsilon': this.epsilon,
'decay': this.decay
};
}
/** @nocollapse */
static fromConfig(cls, config) {
return new cls(config['learningRate'], config['beta1'], config['beta2'], config['epsilon'], config['decay']);
}
}
/** @nocollapse */
adamax_optimizer_AdamaxOptimizer.className = 'Adamax'; // Note: Name matters for Python compatbility.
registerClass(adamax_optimizer_AdamaxOptimizer);
//# sourceMappingURL=adamax_optimizer.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/optimizers/sgd_optimizer.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/** @doclink Optimizer */
class sgd_optimizer_SGDOptimizer extends optimizer_Optimizer {
constructor(learningRate) {
super();
this.learningRate = learningRate;
this.setLearningRate(learningRate);
}
applyGradients(variableGradients) {
const varNames = Array.isArray(variableGradients) ?
variableGradients.map(v => v.name) :
Object.keys(variableGradients);
varNames.forEach((name, i) => {
const gradient = Array.isArray(variableGradients) ?
variableGradients[i].tensor :
variableGradients[name];
if (gradient == null) {
return;
}
const value = engine["a" /* ENGINE */].registeredVariables[name];
Object(globals["t" /* tidy */])(() => {
const newValue = Object(add["a" /* add */])(Object(mul["a" /* mul */])(this.c, gradient), value);
value.assign(newValue);
});
});
this.incrementIterations();
}
/**
* Sets the learning rate of the optimizer.
*/
setLearningRate(learningRate) {
this.learningRate = learningRate;
if (this.c != null) {
this.c.dispose();
}
this.c = Object(globals["l" /* keep */])(Object(scalar["a" /* scalar */])(-learningRate));
}
dispose() {
this.c.dispose();
}
async getWeights() {
return [await this.saveIterations()];
}
async setWeights(weightValues) {
weightValues = await this.extractIterations(weightValues);
if (weightValues.length !== 0) {
throw new Error('SGD optimizer does not have settable weights.');
}
}
getConfig() {
return { 'learningRate': this.learningRate };
}
/** @nocollapse */
static fromConfig(cls, config) {
return new cls(config['learningRate']);
}
}
/** @nocollapse */
sgd_optimizer_SGDOptimizer.className = 'SGD'; // Note: Name matters for Python compatibility.
registerClass(sgd_optimizer_SGDOptimizer);
//# sourceMappingURL=sgd_optimizer.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/optimizers/momentum_optimizer.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/** @doclink Optimizer */
class momentum_optimizer_MomentumOptimizer extends sgd_optimizer_SGDOptimizer {
constructor(learningRate, momentum, useNesterov = false) {
super(learningRate);
this.learningRate = learningRate;
this.momentum = momentum;
this.useNesterov = useNesterov;
this.accumulations = [];
this.m = Object(scalar["a" /* scalar */])(this.momentum);
}
applyGradients(variableGradients) {
const variableNames = Array.isArray(variableGradients) ?
variableGradients.map(item => item.name) :
Object.keys(variableGradients);
variableNames.forEach((name, i) => {
const value = engine["a" /* ENGINE */].registeredVariables[name];
if (this.accumulations[i] == null) {
const trainable = false;
this.accumulations[i] = {
originalName: `${name}/momentum`,
variable: Object(globals["t" /* tidy */])(() => Object(zeros_like["a" /* zerosLike */])(value).variable(trainable))
};
}
const accumulation = this.accumulations[i].variable;
const gradient = Array.isArray(variableGradients) ?
variableGradients[i].tensor :
variableGradients[name];
if (gradient == null) {
return;
}
Object(globals["t" /* tidy */])(() => {
let newValue;
const newAccumulation = Object(add["a" /* add */])(Object(mul["a" /* mul */])(this.m, accumulation), gradient);
if (this.useNesterov) {
newValue = Object(add["a" /* add */])(Object(mul["a" /* mul */])(this.c, Object(add["a" /* add */])(gradient, Object(mul["a" /* mul */])(newAccumulation, this.m))), value);
}
else {
newValue = Object(add["a" /* add */])(Object(mul["a" /* mul */])(this.c, newAccumulation), value);
}
accumulation.assign(newAccumulation);
value.assign(newValue);
});
});
this.incrementIterations();
}
dispose() {
this.m.dispose();
if (this.accumulations != null) {
Object(globals["d" /* dispose */])(this.accumulations.map(v => v.variable));
}
}
/**
* Sets the momentum of the optimizer.
*
* @param momentum
*/
setMomentum(momentum) {
this.momentum = momentum;
}
async getWeights() {
// Order matters for Python compatibility.
return [await this.saveIterations()].concat(this.accumulations.map(v => ({ name: v.originalName, tensor: v.variable })));
}
async setWeights(weightValues) {
weightValues = await this.extractIterations(weightValues);
const trainable = false;
this.accumulations = weightValues.map(v => ({ originalName: v.name, variable: v.tensor.variable(trainable) }));
}
getConfig() {
return {
'learningRate': this.learningRate,
'momentum': this.momentum,
'useNesterov': this.useNesterov
};
}
/** @nocollapse */
static fromConfig(cls, config) {
return new cls(config['learningRate'], config['momentum'], config['useNesterov']);
}
}
/** @nocollapse */
momentum_optimizer_MomentumOptimizer.className = 'Momentum'; // Name matters for Python compatibility.
registerClass(momentum_optimizer_MomentumOptimizer);
//# sourceMappingURL=momentum_optimizer.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/optimizers/rmsprop_optimizer.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/** @doclink Optimizer */
class rmsprop_optimizer_RMSPropOptimizer extends optimizer_Optimizer {
constructor(learningRate, decay = 0.9, momentum = 0.0, epsilon = null, centered = false) {
super();
this.learningRate = learningRate;
this.decay = decay;
this.momentum = momentum;
this.epsilon = epsilon;
this.accumulatedMeanSquares = [];
this.accumulatedMoments = [];
this.accumulatedMeanGrads = [];
this.centered = centered;
if (epsilon == null) {
this.epsilon = engine["a" /* ENGINE */].backend.epsilon();
}
if (learningRate == null) {
throw new Error(`learningRate for RMSPropOptimizer must be defined.`);
}
}
applyGradients(variableGradients) {
const variableNames = Array.isArray(variableGradients) ?
variableGradients.map(item => item.name) :
Object.keys(variableGradients);
variableNames.forEach((name, i) => {
const value = engine["a" /* ENGINE */].registeredVariables[name];
const trainable = false;
if (this.accumulatedMeanSquares[i] == null) {
this.accumulatedMeanSquares[i] = {
originalName: `${name}/rms`,
variable: Object(globals["t" /* tidy */])(() => Object(zeros_like["a" /* zerosLike */])(value).variable(trainable))
};
}
if (this.accumulatedMoments[i] == null) {
this.accumulatedMoments[i] = {
originalName: `${name}/momentum`,
variable: Object(globals["t" /* tidy */])(() => Object(zeros_like["a" /* zerosLike */])(value).variable(trainable))
};
}
if (this.accumulatedMeanGrads[i] == null && this.centered) {
this.accumulatedMeanGrads[i] = {
originalName: `${name}/mg`,
variable: Object(globals["t" /* tidy */])(() => Object(zeros_like["a" /* zerosLike */])(value).variable(trainable))
};
}
const gradient = Array.isArray(variableGradients) ?
variableGradients[i].tensor :
variableGradients[name];
if (gradient == null) {
return;
}
const accumulatedMeanSquare = this.accumulatedMeanSquares[i].variable;
const accumulatedMoments = this.accumulatedMoments[i].variable;
Object(globals["t" /* tidy */])(() => {
const newAccumulatedMeanSquare = Object(add["a" /* add */])(Object(mul["a" /* mul */])(accumulatedMeanSquare, this.decay), Object(mul["a" /* mul */])(Object(square["a" /* square */])(gradient), 1 - this.decay));
if (this.centered) {
const accumulatedMeanGrad = this.accumulatedMeanGrads[i].variable;
// Centered gradient
const newAccumulatedMeanGrad = Object(add["a" /* add */])(Object(mul["a" /* mul */])(accumulatedMeanGrad, this.decay), Object(mul["a" /* mul */])(gradient, 1 - this.decay));
const gradContribution = Object(div["a" /* div */])(Object(mul["a" /* mul */])(gradient, this.learningRate), Object(sqrt["a" /* sqrt */])(Object(sub["a" /* sub */])(newAccumulatedMeanSquare, Object(add["a" /* add */])(Object(square["a" /* square */])(newAccumulatedMeanGrad), this.epsilon))));
const newAccumulatedMoments = Object(add["a" /* add */])(Object(mul["a" /* mul */])(accumulatedMoments, this.momentum), gradContribution);
accumulatedMeanSquare.assign(newAccumulatedMeanSquare);
accumulatedMeanGrad.assign(newAccumulatedMeanGrad);
accumulatedMoments.assign(newAccumulatedMoments);
const newValue = Object(sub["a" /* sub */])(value, newAccumulatedMoments);
value.assign(newValue);
}
else {
// Plain gradient
const newAccumulatedMeanSquare = Object(add["a" /* add */])(Object(mul["a" /* mul */])(accumulatedMeanSquare, this.decay), Object(mul["a" /* mul */])(Object(square["a" /* square */])(gradient), 1 - this.decay));
const newAccumulatedMoments = Object(add["a" /* add */])(Object(mul["a" /* mul */])(accumulatedMoments, this.momentum), Object(div["a" /* div */])(Object(mul["a" /* mul */])(gradient, this.learningRate), Object(sqrt["a" /* sqrt */])(Object(add["a" /* add */])(newAccumulatedMeanSquare, this.epsilon))));
accumulatedMeanSquare.assign(newAccumulatedMeanSquare);
accumulatedMoments.assign(newAccumulatedMoments);
const newValue = Object(sub["a" /* sub */])(value, newAccumulatedMoments);
value.assign(newValue);
}
});
});
this.incrementIterations();
}
dispose() {
if (this.accumulatedMeanSquares != null) {
Object(globals["d" /* dispose */])(this.accumulatedMeanSquares.map(v => v.variable));
}
if (this.accumulatedMeanGrads != null && this.centered) {
Object(globals["d" /* dispose */])(this.accumulatedMeanGrads.map(v => v.variable));
}
if (this.accumulatedMoments != null) {
Object(globals["d" /* dispose */])(this.accumulatedMoments.map(v => v.variable));
}
}
async getWeights() {
// Order matters for Python compatibility.
const variables = [...this.accumulatedMeanSquares, ...this.accumulatedMoments];
if (this.centered) {
variables.push(...this.accumulatedMeanGrads);
}
return [await this.saveIterations()].concat(variables.map(v => ({ name: v.originalName, tensor: v.variable })));
}
async setWeights(weightValues) {
weightValues = await this.extractIterations(weightValues);
const variableCount = this.centered ? weightValues.length / 3 : weightValues.length / 2;
const trainable = false;
this.accumulatedMeanSquares =
weightValues.slice(0, variableCount).map(v => ({
originalName: v.name,
variable: v.tensor.variable(trainable)
}));
this.accumulatedMoments =
weightValues.slice(variableCount, variableCount * 2)
.map(v => ({
originalName: v.name,
variable: v.tensor.variable(trainable)
}));
if (this.centered) {
this.accumulatedMeanGrads =
weightValues.slice(variableCount * 2, variableCount * 3)
.map(v => ({
originalName: v.name,
variable: v.tensor.variable(trainable)
}));
}
}
getConfig() {
return {
'learningRate': this.learningRate,
'decay': this.decay,
'momentum': this.momentum,
'epsilon': this.epsilon,
'centered': this.centered
};
}
/** @nocollapse */
static fromConfig(cls, config) {
return new cls(config['learningRate'], config['decay'], config['momentum'], config['epsilon'], config['centered']);
}
}
/** @nocollapse */
rmsprop_optimizer_RMSPropOptimizer.className = 'RMSProp'; // Note: Name matters for Python compatibility.
registerClass(rmsprop_optimizer_RMSPropOptimizer);
//# sourceMappingURL=rmsprop_optimizer.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/types.js
var dist_types = __webpack_require__(89);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/ops.js + 73 modules
var ops = __webpack_require__(21);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/loss_ops_utils.js
var loss_ops_utils = __webpack_require__(35);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/optimizers/optimizer_constructors.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
class optimizer_constructors_OptimizerConstructors {
/**
* Constructs a `tf.SGDOptimizer` that uses stochastic gradient descent.
*
* ```js
* // Fit a quadratic function by learning the coefficients a, b, c.
* const xs = tf.tensor1d([0, 1, 2, 3]);
* const ys = tf.tensor1d([1.1, 5.9, 16.8, 33.9]);
*
* const a = tf.scalar(Math.random()).variable();
* const b = tf.scalar(Math.random()).variable();
* const c = tf.scalar(Math.random()).variable();
*
* // y = a * x^2 + b * x + c.
* const f = x => a.mul(x.square()).add(b.mul(x)).add(c);
* const loss = (pred, label) => pred.sub(label).square().mean();
*
* const learningRate = 0.01;
* const optimizer = tf.train.sgd(learningRate);
*
* // Train the model.
* for (let i = 0; i < 10; i++) {
* optimizer.minimize(() => loss(f(xs), ys));
* }
*
* // Make predictions.
* console.log(
* `a: ${a.dataSync()}, b: ${b.dataSync()}, c: ${c.dataSync()}`);
* const preds = f(xs).dataSync();
* preds.forEach((pred, i) => {
* console.log(`x: ${i}, pred: ${pred}`);
* });
* ```
*
* @param learningRate The learning rate to use for the SGD algorithm.
*
* @doc {heading: 'Training', subheading: 'Optimizers', namespace: 'train'}
*/
static sgd(learningRate) {
return new sgd_optimizer_SGDOptimizer(learningRate);
}
/**
* Constructs a `tf.MomentumOptimizer` that uses momentum gradient
* descent.
*
* See
* [http://proceedings.mlr.press/v28/sutskever13.pdf](
* http://proceedings.mlr.press/v28/sutskever13.pdf)
*
* @param learningRate The learning rate to use for the Momentum gradient
* descent algorithm.
* @param momentum The momentum to use for the momentum gradient descent
* algorithm.
*
* @doc {heading: 'Training', subheading: 'Optimizers', namespace: 'train'}
*/
static momentum(learningRate, momentum, useNesterov = false) {
return new momentum_optimizer_MomentumOptimizer(learningRate, momentum, useNesterov);
}
/**
* Constructs a `tf.RMSPropOptimizer` that uses RMSProp gradient
* descent. This implementation uses plain momentum and is not centered
* version of RMSProp.
*
* See
* [http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf](
* http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf)
*
* @param learningRate The learning rate to use for the RMSProp gradient
* descent algorithm.
* @param decay The discounting factor for the history/coming gradient.
* @param momentum The momentum to use for the RMSProp gradient descent
* algorithm.
* @param epsilon Small value to avoid zero denominator.
* @param centered If true, gradients are normalized by the estimated
* variance of the gradient.
*
* @doc {heading: 'Training', subheading: 'Optimizers', namespace: 'train'}
*/
static rmsprop(learningRate, decay = .9, momentum = 0.0, epsilon = null, centered = false) {
return new rmsprop_optimizer_RMSPropOptimizer(learningRate, decay, momentum, epsilon, centered);
}
/**
* Constructs a `tf.AdamOptimizer` that uses the Adam algorithm.
* See [https://arxiv.org/abs/1412.6980](https://arxiv.org/abs/1412.6980)
*
* @param learningRate The learning rate to use for the Adam gradient
* descent algorithm.
* @param beta1 The exponential decay rate for the 1st moment estimates.
* @param beta2 The exponential decay rate for the 2nd moment estimates.
* @param epsilon A small constant for numerical stability.
*
* @doc {heading: 'Training', subheading: 'Optimizers', namespace: 'train'}
*/
static adam(learningRate = 0.001, beta1 = 0.9, beta2 = 0.999, epsilon = null) {
return new adam_optimizer_AdamOptimizer(learningRate, beta1, beta2, epsilon);
}
/**
* Constructs a `tf.AdadeltaOptimizer` that uses the Adadelta algorithm.
* See [https://arxiv.org/abs/1212.5701](https://arxiv.org/abs/1212.5701)
*
* @param learningRate The learning rate to use for the Adadelta gradient
* descent algorithm.
* @param rho The learning rate decay over each update.
* @param epsilon A constant epsilon used to better condition the grad
* update.
*
* @doc {heading: 'Training', subheading: 'Optimizers', namespace: 'train'}
*/
static adadelta(learningRate = .001, rho = .95, epsilon = null) {
return new adadelta_optimizer_AdadeltaOptimizer(learningRate, rho, epsilon);
}
/**
* Constructs a `tf.AdamaxOptimizer` that uses the Adamax algorithm.
* See [https://arxiv.org/abs/1412.6980](https://arxiv.org/abs/1412.6980)
*
* @param learningRate The learning rate to use for the Adamax gradient
* descent algorithm.
* @param beta1 The exponential decay rate for the 1st moment estimates.
* @param beta2 The exponential decay rate for the 2nd moment estimates.
* @param epsilon A small constant for numerical stability.
* @param decay The learning rate decay over each update.
*
* @doc {heading: 'Training', subheading: 'Optimizers', namespace: 'train'}
*/
static adamax(learningRate = 0.002, beta1 = 0.9, beta2 = 0.999, epsilon = null, decay = 0.0) {
return new adamax_optimizer_AdamaxOptimizer(learningRate, beta1, beta2, epsilon, decay);
}
/**
* Constructs a `tf.AdagradOptimizer` that uses the Adagrad algorithm.
* See
* [http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf](
* http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)
* or
* [http://ruder.io/optimizing-gradient-descent/index.html#adagrad](
* http://ruder.io/optimizing-gradient-descent/index.html#adagrad)
*
* @param learningRate The learning rate to use for the Adagrad gradient
* descent algorithm.
* @param initialAccumulatorValue Starting value for the accumulators, must be
* positive.
*
* @doc {heading: 'Training', subheading: 'Optimizers', namespace: 'train'}
*/
static adagrad(learningRate, initialAccumulatorValue = 0.1) {
return new adagrad_optimizer_AdagradOptimizer(learningRate, initialAccumulatorValue);
}
}
//# sourceMappingURL=optimizer_constructors.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/train.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
// So typings can propagate.
// tslint:disable-next-line:no-unused-expression
[momentum_optimizer_MomentumOptimizer, sgd_optimizer_SGDOptimizer, adadelta_optimizer_AdadeltaOptimizer, adagrad_optimizer_AdagradOptimizer,
rmsprop_optimizer_RMSPropOptimizer, adamax_optimizer_AdamaxOptimizer, adam_optimizer_AdamOptimizer];
const train = {
sgd: optimizer_constructors_OptimizerConstructors.sgd,
momentum: optimizer_constructors_OptimizerConstructors.momentum,
adadelta: optimizer_constructors_OptimizerConstructors.adadelta,
adagrad: optimizer_constructors_OptimizerConstructors.adagrad,
rmsprop: optimizer_constructors_OptimizerConstructors.rmsprop,
adamax: optimizer_constructors_OptimizerConstructors.adamax,
adam: optimizer_constructors_OptimizerConstructors.adam
};
//# sourceMappingURL=train.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/browser_util.js
var browser_util = __webpack_require__(252);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/axis_util.js
var axis_util = __webpack_require__(39);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/broadcast_util.js
var broadcast_util = __webpack_require__(17);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/concat_util.js
/**
* @license
* Copyright 2017 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
function assertParamsConsistent(shapes, axis) {
const rank = shapes[0].length;
shapes.forEach((shape, i) => {
util_base["b" /* assert */](shape.length === rank, () => `Error in concat${rank}D: rank of tensors[${i}] must be the same ` +
`as the rank of the rest (${rank})`);
});
util_base["b" /* assert */](axis >= 0 && axis < rank, () => `Error in concat${rank}D: axis must be between 0 and ${rank - 1}.`);
const firstShape = shapes[0];
shapes.forEach((shape, i) => {
for (let r = 0; r < rank; r++) {
util_base["b" /* assert */]((r === axis) || (shape[r] === firstShape[r]), () => `Error in concat${rank}D: Shape of tensors[${i}] (${shape}) ` +
`does not match the shape of the rest (${firstShape}) ` +
`along the non-concatenated axis ${i}.`);
}
});
}
function computeOutShape(shapes, axis) {
const outputShape = shapes[0].slice();
for (let i = 1; i < shapes.length; i++) {
outputShape[axis] += shapes[i][axis];
}
return outputShape;
}
//# sourceMappingURL=concat_util.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/conv_util.js
var conv_util = __webpack_require__(31);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/fused_util.js
var fused_util = __webpack_require__(44);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/reduce_util.js
/**
* @license
* Copyright 2017 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Inputs of size above this threshold will be parallelized by calling multiple
* shader programs.
*/
const PARALLELIZE_THRESHOLD = 30;
function computeOptimalWindowSize(inSize) {
if (inSize <= PARALLELIZE_THRESHOLD) {
return inSize;
}
return Object(util_base["G" /* nearestDivisor */])(inSize, Math.floor(Math.sqrt(inSize)));
}
//# sourceMappingURL=reduce_util.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/rotate_util.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
// Returns the image center in pixels.
function getImageCenter(center, imageHeight, imageWidth) {
const centerX = imageWidth * (typeof center === 'number' ? center : center[0]);
const centerY = imageHeight * (typeof center === 'number' ? center : center[1]);
return [centerX, centerY];
}
//# sourceMappingURL=rotate_util.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/array_ops_util.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Gets the new shape of the input Tensor after it's been reshaped
* to:
* [blockShape[0], ..., blockShape[M-1], batch / prod(blockShape),
* inputShape[1], ..., inputShape[N-1]]
*
* See step 1: https://www.tensorflow.org/api_docs/python/tf/batch_to_space_nd
*/
function getReshaped(inputShape, blockShape, prod, batchToSpace = true) {
let reshaped = [];
if (batchToSpace) {
reshaped = reshaped.concat(blockShape.slice(0));
reshaped.push(inputShape[0] / prod);
reshaped = reshaped.concat(inputShape.slice(1));
}
else {
reshaped = reshaped.concat(inputShape[0]);
const spatialLength = blockShape.length;
for (let i = 0; i < spatialLength; ++i) {
reshaped =
reshaped.concat([inputShape[i + 1] / blockShape[i], blockShape[i]]);
}
reshaped = reshaped.concat(inputShape.slice(spatialLength + 1));
}
return reshaped;
}
/**
* Gets the permutation that will transpose the dimensions of the
* reshaped tensor to shape:
*
* [batch / prod(block_shape),inputShape[1], blockShape[0], ...,
* inputShape[M], blockShape[M-1],inputShape[M+1], ..., inputShape[N-1]]
*
* see step 2: https://www.tensorflow.org/api_docs/python/tf/batch_to_space_nd
*/
function getPermuted(reshapedRank, blockShapeRank, batchToSpace = true) {
const permuted = [];
if (batchToSpace) {
permuted.push(blockShapeRank);
for (let i = blockShapeRank + 1; i < reshapedRank; ++i) {
if (i <= 2 * blockShapeRank) {
permuted.push(i);
permuted.push(i - (blockShapeRank + 1));
}
else {
permuted.push(i);
}
}
}
else {
const permutedBeforeBatch = [];
const permutedAfterBatch = [];
for (let i = 1; i < reshapedRank; ++i) {
if (i >= blockShapeRank * 2 + 1 || i % 2 === 1) {
permutedAfterBatch.push(i);
}
else {
permutedBeforeBatch.push(i);
}
}
permuted.push(...permutedBeforeBatch);
permuted.push(0);
permuted.push(...permutedAfterBatch);
}
return permuted;
}
/**
* Gets the shape of the reshaped and permuted input Tensor before any cropping
* is applied. The new shape will be:
*
* [batch / prod(blockShape),inputShape[1] * blockShape[0], ...,
* inputShape[M] * blockShape[M-1],inputShape[M+1], ..., inputShape[N-1]]
*
* See step 3: https://www.tensorflow.org/api_docs/python/tf/batch_to_space_nd
*/
function getReshapedPermuted(inputShape, blockShape, prod, batchToSpace = true) {
const reshapedPermuted = [];
if (batchToSpace) {
reshapedPermuted.push(inputShape[0] / prod);
}
else {
reshapedPermuted.push(inputShape[0] * prod);
}
for (let i = 1; i < inputShape.length; ++i) {
if (i <= blockShape.length) {
if (batchToSpace) {
reshapedPermuted.push(blockShape[i - 1] * inputShape[i]);
}
else {
reshapedPermuted.push(inputShape[i] / blockShape[i - 1]);
}
}
else {
reshapedPermuted.push(inputShape[i]);
}
}
return reshapedPermuted;
}
/**
* Converts the crops argument into the beginning coordinates of a slice
* operation.
*/
function getSliceBeginCoords(crops, blockShape) {
const sliceBeginCoords = [0];
for (let i = 0; i < blockShape; ++i) {
sliceBeginCoords.push(crops[i][0]);
}
return sliceBeginCoords;
}
/**
* Converts the crops argument into the size of a slice operation. When
* combined with getSliceBeginCoords this function allows the reshaped and
* permuted Tensor to be cropped to its final output shape of:
*
* inputShape[1] * blockShape[0] - crops[0,0] - crops[0,1], ...,
* inputShape[M] * blockShape[M-1] -crops[M-1,0] -
* crops[M-1,1],inputShape[M+1], ..., inputShape[N-1]]
*
* See step 4: https://www.tensorflow.org/api_docs/python/tf/batch_to_space_nd
*/
function getSliceSize(uncroppedShape, crops, blockShape) {
const sliceSize = uncroppedShape.slice(0, 1);
for (let i = 0; i < blockShape; ++i) {
sliceSize.push(uncroppedShape[i + 1] - crops[i][0] - crops[i][1]);
}
return sliceSize;
}
//# sourceMappingURL=array_ops_util.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/selu_util.js
var selu_util = __webpack_require__(147);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/erf_util.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const ERF_P = 0.3275911;
const ERF_A1 = 0.254829592;
const ERF_A2 = -0.284496736;
const ERF_A3 = 1.421413741;
const ERF_A4 = -1.453152027;
const ERF_A5 = 1.061405429;
//# sourceMappingURL=erf_util.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/log.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
function warn(...msg) {
if (!Object(environment["c" /* env */])().getBool('IS_TEST')) {
console.warn(...msg);
}
}
function log(...msg) {
if (!Object(environment["c" /* env */])().getBool('IS_TEST')) {
console.log(...msg);
}
}
//# sourceMappingURL=log.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/backends/complex_util.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Merges real and imaginary Float32Arrays into a single complex Float32Array.
*
* The memory layout is interleaved as follows:
* real: [r0, r1, r2]
* imag: [i0, i1, i2]
* complex: [r0, i0, r1, i1, r2, i2]
*
* This is the inverse of splitRealAndImagArrays.
*
* @param real The real values of the complex tensor values.
* @param imag The imag values of the complex tensor values.
* @returns A complex tensor as a Float32Array with merged values.
*/
function mergeRealAndImagArrays(real, imag) {
if (real.length !== imag.length) {
throw new Error(`Cannot merge real and imag arrays of different lengths. real:` +
`${real.length}, imag: ${imag.length}.`);
}
const result = new Float32Array(real.length * 2);
for (let i = 0; i < result.length; i += 2) {
result[i] = real[i / 2];
result[i + 1] = imag[i / 2];
}
return result;
}
/**
* Splits a complex Float32Array into real and imag parts.
*
* The memory layout is interleaved as follows:
* complex: [r0, i0, r1, i1, r2, i2]
* real: [r0, r1, r2]
* imag: [i0, i1, i2]
*
* This is the inverse of mergeRealAndImagArrays.
*
* @param complex The complex tensor values.
* @returns An object with real and imag Float32Array components of the complex
* tensor.
*/
function splitRealAndImagArrays(complex) {
const real = new Float32Array(complex.length / 2);
const imag = new Float32Array(complex.length / 2);
for (let i = 0; i < complex.length; i += 2) {
real[i / 2] = complex[i];
imag[i / 2] = complex[i + 1];
}
return { real, imag };
}
/**
* Extracts even indexed complex values in the given array.
* @param complex The complex tensor values
*/
function complexWithEvenIndex(complex) {
const len = Math.ceil(complex.length / 4);
const real = new Float32Array(len);
const imag = new Float32Array(len);
for (let i = 0; i < complex.length; i += 4) {
real[Math.floor(i / 4)] = complex[i];
imag[Math.floor(i / 4)] = complex[i + 1];
}
return { real, imag };
}
/**
* Extracts odd indexed comple values in the given array.
* @param complex The complex tensor values
*/
function complexWithOddIndex(complex) {
const len = Math.floor(complex.length / 4);
const real = new Float32Array(len);
const imag = new Float32Array(len);
for (let i = 2; i < complex.length; i += 4) {
real[Math.floor(i / 4)] = complex[i];
imag[Math.floor(i / 4)] = complex[i + 1];
}
return { real, imag };
}
/**
* Get the map representing a complex value in the given array.
* @param complex The complex tensor values.
* @param index An index of the target complex value.
*/
function getComplexWithIndex(complex, index) {
const real = complex[index * 2];
const imag = complex[index * 2 + 1];
return { real, imag };
}
/**
* Insert a given complex value into the TypedArray.
* @param data The array in which the complex value is inserted.
* @param c The complex value to be inserted.
* @param index An index of the target complex value.
*/
function assignToTypedArray(data, real, imag, index) {
data[index * 2] = real;
data[index * 2 + 1] = imag;
}
/**
* Make the list of exponent terms used by FFT.
*/
function exponents(n, inverse) {
const real = new Float32Array(n / 2);
const imag = new Float32Array(n / 2);
for (let i = 0; i < Math.ceil(n / 2); i++) {
const x = (inverse ? 2 : -2) * Math.PI * (i / n);
real[i] = Math.cos(x);
imag[i] = Math.sin(x);
}
return { real, imag };
}
/**
* Make the exponent term used by FFT.
*/
function exponent(k, n, inverse) {
const x = (inverse ? 2 : -2) * Math.PI * (k / n);
const real = Math.cos(x);
const imag = Math.sin(x);
return { real, imag };
}
//# sourceMappingURL=complex_util.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/backends/einsum_util.js
/**
* @license
* Copyright 2021 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const ARROW = '->';
const ARROW_REGEX = /->/g;
const COMMA = ',';
const ELLIPSIS = '...';
/**
* Parse an equation for einsum.
*
* @param equation The einsum equation (e.g., "ij,jk->ik").
* @param numTensors Number of tensors provided along with `equation`. Used to
* check matching number of input tensors.
* @returns An object consisting of the following fields:
* - allDims: all dimension names as strings.
* - summedDims: a list of all dimensions being summed over, as indices to
* the elements of `allDims`.
* - idDims: indices of the dimensions in each input tensor, as indices to
* the elements of `allDims.
*/
function decodeEinsumEquation(equation, numTensors) {
equation = equation.replace(/\s/g, ''); // Remove witespace in equation.
const numArrows = (equation.length - equation.replace(ARROW_REGEX, '').length) /
ARROW.length;
if (numArrows < 1) {
throw new Error('Equations without an arrow are not supported.');
}
else if (numArrows > 1) {
throw new Error(`Equation must contain exactly one arrow ("${ARROW}").`);
}
const [inputString, outputString] = equation.split(ARROW);
Object(util_base["b" /* assert */])(inputString.indexOf(ELLIPSIS) === -1, () => `The ellipsis notation ("${ELLIPSIS}") is not supported yet.`);
const inputTerms = inputString.split(COMMA);
const numInputs = inputTerms.length;
if (numTensors !== numInputs) {
throw new Error(`Expected ${numInputs} input tensors, received ${numTensors}`);
}
if (numInputs > 2) {
throw new Error('Support for more than 2 input tensors is not implemented yet.');
}
const allDims = [];
for (let i = 0; i < outputString.length; ++i) {
const dimName = outputString[i];
if (!inputTerms.some(inputTerm => inputTerm.indexOf(dimName) !== -1)) {
throw new Error(`Output subscripts contain the label ${dimName} ` +
`not present in the input subscripts.`);
}
if (allDims.indexOf(dimName) === -1) {
allDims.push(dimName);
}
}
for (let i = 0; i < inputString.length; ++i) {
const dimName = inputString[i];
if (allDims.indexOf(dimName) === -1 && dimName !== COMMA) {
allDims.push(dimName);
}
}
const idDims = new Array(inputTerms.length);
for (let i = 0; i < numInputs; ++i) {
if (new Set(inputTerms[i].split('')).size !== inputTerms[i].length) {
throw new Error(`Found duplicate axes in input component ${inputTerms[i]}. ` +
`Support for duplicate axes in input is not implemented yet.`);
}
idDims[i] = [];
for (let j = 0; j < inputTerms[i].length; ++j) {
idDims[i].push(allDims.indexOf(inputTerms[i][j]));
}
}
const numDims = allDims.length; // Number of unique dimensions.
const numOutDims = outputString.length; // Number of output dimensions.
const summedDims = []; // Dimensions being summed over.
for (let i = numOutDims; i < numDims; ++i) {
summedDims.push(i);
}
return { allDims, summedDims, idDims };
}
/**
* Get the permutation for a given input tensor.
*
* @param nDims Total number of dimension of all tensors involved in the einsum
* operation.
* @param idDims Dimension indices involve in the tensor in question.
* @returns An object consisting of the following fields:
* - permutationIndices: Indices to permute the axes of the tensor with.
* - expandDims: Indices to the dimension that need to be expanded from the
* tensor after permutation.
*/
function getEinsumPermutation(nDims, idDims) {
let permutationIndices = new Array(nDims);
permutationIndices.fill(-1);
for (let i = 0; i < idDims.length; ++i) {
permutationIndices[idDims[i]] = i;
}
const expandDims = [];
for (let i = 0; i < nDims; ++i) {
if (permutationIndices[i] === -1) {
expandDims.push(i);
}
}
permutationIndices = permutationIndices.filter(d => d !== -1);
return { permutationIndices, expandDims };
}
/**
* Checks that the dimension sizes from different input tensors match the
* equation.
*/
function checkEinsumDimSizes(nDims, idDims, tensors) {
const dimSizes = new Array(nDims);
for (let i = 0; i < tensors.length; ++i) {
const shape = tensors[i].shape;
for (let j = 0; j < idDims[i].length; ++j) {
if (dimSizes[idDims[i][j]] === undefined) {
dimSizes[idDims[i][j]] = shape[j];
}
else {
Object(util_base["b" /* assert */])(dimSizes[idDims[i][j]] === shape[j], () => `Expected dimension ${dimSizes[idDims[i][j]]} at axis ${j} ` +
`of input shaped ${JSON.stringify(shape)}, ` +
`but got dimension ${shape[j]}`);
}
}
}
}
/**
* Gets path of computation for einsum.
*
* @param summedDims indices to the dimensions being summed over.
* @param idDims A look up table for the dimensions present in each input
* tensor. Each consituent array contains indices for the dimensions in the
* corresponding input tensor.
*
* @return A map with two fields:
* - path: The path of computation, with each element indicating the dimension
* being summed over after the element-wise multiplication in that step.
* - steps: With the same length as `path`. Each element contains the indices
* to the input tensors being used for element-wise multiplication in the
* corresponding step.
*/
function getEinsumComputePath(summedDims, idDims) {
const path = summedDims;
const steps = [];
let nSteps = 0;
if (summedDims.length === 0) {
// Einsum that involes no summing: e.g., transpose and outer product.
path.push(-1);
}
nSteps = summedDims.length + 1;
for (let i = 0; i < nSteps; ++i) {
steps.push([]);
}
const computedTermIndices = [];
for (let i = 0; i < path.length; ++i) {
const summedDim = path[i];
const termIndices = findTermsWithDim(idDims, summedDim);
for (const termIndex of termIndices) {
if (computedTermIndices.indexOf(termIndex) === -1) {
steps[i].push(termIndex);
computedTermIndices.push(termIndex);
}
}
}
return { path, steps };
}
/** Determines if an axes permutation is the identity permutation. */
function isIdentityPermutation(perm) {
return perm.every((dim, index) => dim === index);
}
function findTermsWithDim(idDims, dim) {
const termIndices = [];
for (let i = 0; i < idDims.length; ++i) {
if (idDims[i].length === 0 || idDims[i].indexOf(dim) !== -1 || dim === -1) {
termIndices.push(i);
}
}
return termIndices;
}
//# sourceMappingURL=einsum_util.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/split_util.js
/**
* Prepare the split size array. When the input is a number, the axis is evenly
* divided among the split size. When the input contains the negative value, the
* rest of the axis is allocated toward that.
*/
function prepareSplitSize(x, numOrSizeSplits, axis = 0) {
let splitSizes = [];
if (typeof (numOrSizeSplits) === 'number') {
Object(util_base["b" /* assert */])(x.shape[axis] % numOrSizeSplits === 0, () => 'Number of splits must evenly divide the axis.');
splitSizes =
new Array(numOrSizeSplits).fill(x.shape[axis] / numOrSizeSplits);
}
else {
const numOfNegs = numOrSizeSplits.reduce((count, value) => {
if (value === -1) {
count += 1;
}
return count;
}, 0);
Object(util_base["b" /* assert */])(numOfNegs <= 1, () => 'There should be only one negative value in split array.');
const negIndex = numOrSizeSplits.indexOf(-1);
// Allow the number of split array to be -1, which indicates the rest
// of dimension is allocated to that split.
if (negIndex !== -1) {
const total = numOrSizeSplits.reduce((a, b) => b > 0 ? a + b : a);
numOrSizeSplits[negIndex] = x.shape[axis] - total;
}
Object(util_base["b" /* assert */])(x.shape[axis] === numOrSizeSplits.reduce((a, b) => a + b), () => 'The sum of sizes must match the size of the axis dimension.');
splitSizes = numOrSizeSplits;
}
return splitSizes;
}
//# sourceMappingURL=split_util.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/segment_util.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
function segOpComputeOptimalWindowSize(inSize, numSegments) {
let done = false;
let res;
if (inSize <= PARALLELIZE_THRESHOLD) {
res = inSize;
done = true;
}
else {
res = Object(util_base["G" /* nearestDivisor */])(inSize, Math.floor(Math.sqrt(inSize)));
}
while (!done) {
if (res > numSegments || res === inSize) {
done = true;
}
else {
res = Object(util_base["G" /* nearestDivisor */])(inSize, res + 1);
}
}
return res;
}
function segment_util_computeOutShape(aShape, axis, numSegments) {
const outShape = [];
const rank = aShape.length;
for (let dim = 0; dim < rank; dim++) {
if (dim !== axis) {
outShape.push(aShape[dim]);
}
else {
outShape.push(numSegments);
}
}
return outShape;
}
function collectGatherOpShapeInfo(x, indices, axis, batchDims) {
const indicesRank = indices.shape.length;
const xRank = x.shape.length;
if (batchDims !== 0) {
if (batchDims < -indicesRank || batchDims > indicesRank) {
throw new Error(`Expect batchDims in the range of [-${indicesRank}, ${indicesRank}], but got ${batchDims}`);
}
}
if (batchDims < 0) {
batchDims += indicesRank;
}
if (batchDims > xRank) {
throw new Error(`batchDims (${batchDims}) must be less than rank(x) (
${xRank}).`);
}
if (axis < batchDims) {
throw new Error(`batchDims (${batchDims}) must be less than or equal to axis (${axis}).`);
}
for (let i = 0; i < batchDims; ++i) {
if (x.shape[i] !== indices.shape[i]) {
throw new Error(`x.shape[${i}]: ${x.shape[i]} should be equal to indices.shape[${i}]: ${indices.shape[i]}.`);
}
}
const dimSize = x.shape[axis];
const outputShape = [];
let batchSize = 1;
let outerSize = 1;
let sliceSize = 1;
for (let i = 0; i < batchDims; ++i) {
outputShape.push(x.shape[i]);
batchSize *= x.shape[i];
}
for (let i = batchDims; i < axis; i++) {
outputShape.push(x.shape[i]);
outerSize *= x.shape[i];
}
for (let i = batchDims; i < indicesRank; i++) {
outputShape.push(indices.shape[i]);
}
for (let i = axis + 1; i < xRank; i++) {
outputShape.push(x.shape[i]);
sliceSize *= x.shape[i];
}
return { batchSize, sliceSize, outerSize, dimSize, outputShape };
}
//# sourceMappingURL=segment_util.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/backends/backend_util.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
// Utilities needed by backend consumers of tf-core.
function fromUint8ToStringArray(vals) {
try {
// Decode the bytes into string.
return vals.map(val => Object(util["decodeString"])(val));
}
catch (err) {
throw new Error(`Failed to decode encoded string bytes into utf-8, error: ${err}`);
}
}
function fromStringArrayToUint8(strings) {
return strings.map(s => Object(util["encodeString"])(s));
}
//# sourceMappingURL=backend_util.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/device_util.js
var device_util = __webpack_require__(149);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/backends/non_max_suppression_impl.js + 1 modules
var non_max_suppression_impl = __webpack_require__(79);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/backends/where_impl.js
var where_impl = __webpack_require__(182);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/backends/kernel_impls.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
//# sourceMappingURL=kernel_impls.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/backends/backend.js
var backend = __webpack_require__(144);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/base.js
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
// base.ts is tfjs-core without auto registration of things like flags,
// gradients, chained ops or the opHandler. See base_side_effects.ts for parts
// tfjs core that are required side effects.
/**
* @fileoverview
* @suppress {partialAlias} Optimization disabled due to passing the module
* object into a function below:
*
* import * as ops from './ops/ops';
* setOpHandler(ops);
*/
// Serialization.
// Optimizers.
// Top-level method exports.
// Second level exports.
// Backend specific.
// Export all kernel names / info.
//# sourceMappingURL=base.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/index.js
/**
* @license
* Copyright 2017 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
// Required side effectful code.
// All exports from this package should be in base.
//# sourceMappingURL=index.js.map
/***/ }),
/* 1 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "d", function() { return getParamValue; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "e", function() { return getTensor; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "f", function() { return getTensorsForCurrentContenxt; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "b", function() { return getNodeNameAndIndex; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "g", function() { return parseNodeName; });
/* unused harmony export split */
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "c", function() { return getPadding; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return cloneTensor; });
/* harmony import */ var _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(0);
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
function getParamValue(paramName, node, tensorMap, context, resourceManager) {
const inputParam = node.inputParams[paramName];
if (inputParam && inputParam.inputIndexStart !== undefined) {
const start = inputParam.inputIndexStart;
const end = inputParam.inputIndexEnd === 0 ?
undefined :
(inputParam.inputIndexEnd === undefined ? start + 1 :
inputParam.inputIndexEnd);
if (inputParam.type === 'tensor') {
return getTensor(node.inputNames[inputParam.inputIndexStart], tensorMap, context, resourceManager);
}
if (inputParam.type === 'tensors') {
const inputs = node.inputNames.slice(start, end);
return inputs.map(name => getTensor(name, tensorMap, context, resourceManager));
}
const tensor = getTensor(node.inputNames.slice(start)[0], tensorMap, context, resourceManager);
const data = tensor.dataSync();
return inputParam.type === 'number' ?
data[0] :
_tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["util"].toNestedArray(tensor.shape, data);
}
const attrParam = node.attrParams[paramName];
return attrParam && attrParam.value;
}
/**
* Retrieve the tensor from tensorsMap based on input name.
* @param name Node input name
* @param tensorsMap Tensors map keyed by the node
* @param context contains tensors and information for running the current node.
* @param resourceManager Optional. Contains global resources of the model.
*/
function getTensor(name, tensorsMap, context, resourceManager) {
const [nodeName, index] = parseNodeName(name);
if (resourceManager != null) {
const tensor = resourceManager.getHashTableHandleByName(nodeName);
if (tensor != null) {
return tensor;
}
}
const contextId = context.currentContextIds.find(contextId => {
return !!tensorsMap[getNodeNameWithContextId(nodeName, contextId)];
});
return contextId !== undefined ?
tensorsMap[getNodeNameWithContextId(nodeName, contextId)][index] :
undefined;
}
/**
* Retrieve the tensors based on input name for current context.
* @param name Node input name
* @param tensorsMap Tensors map keyed by the node
*/
function getTensorsForCurrentContenxt(name, tensorsMap, context) {
return tensorsMap[getNodeNameWithContextId(name, context.currentContextId)];
}
/**
* Returns the node name and index from the Node input name.
* @param inputName The input name of the node, in format of
* node_name:output_index, i.e. MatMul:0, if the output_index is not set, it is
* default to 0.
*/
function getNodeNameAndIndex(inputName, context) {
const [nodeName, index] = parseNodeName(inputName);
return [
getNodeNameWithContextId(nodeName, context && context.currentContextId),
index
];
}
function getNodeNameWithContextId(name, contextId) {
return !!contextId ? `${name}-${contextId}` : name;
}
function parseNodeName(name) {
const parts = name.split(':');
if (parts.length === 1) {
return [name, 0];
}
const nodeName = parts[0];
return [nodeName, Number(parts[parts.length - 1])];
}
function split(arr, size) {
const res = [];
for (let i = 0; i < arr.length; i += size) {
res.push(arr.slice(i, i + size));
}
return res;
}
function getPadding(node, tensorMap, context) {
let pad = getParamValue('pad', node, tensorMap, context);
if (pad === 'explicit') {
// This is 1d array, we need to convert it to 2d array
pad = getParamValue('explicitPaddings', node, tensorMap, context);
const explicitPadding = [[0, 0], [0, 0], [0, 0], [0, 0]];
for (let i = 0; i < 4; i++) {
explicitPadding[i][0] = pad[i * 2];
explicitPadding[i][1] = pad[i * 2 + 1];
}
return explicitPadding;
}
return pad;
}
/**
* Reuse the tensor if it is marked as keep, otherwise clone the tensor to
* avoid disposal. This is important for TensorArray and TensorList ops, since
* internally they use a tensor as the id for TensorArray and TensorList, and
* to simplify lookup, they also use Tensor.id as the key to the internal map.
* These id tensors have been marked as kept in the backend, we need avoid clone
* them in order to create new Tensor.id.
* @param tensor
*/
function cloneTensor(tensor) {
return tensor.kept ? tensor : Object(_tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["clone"])(tensor);
}
//# sourceMappingURL=utils.js.map
/***/ }),
/* 2 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "c", function() { return inferShape; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return convertToTensor; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "b", function() { return convertToTensorArray; });
/* harmony import */ var _engine__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(5);
/* harmony import */ var _environment__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(22);
/* harmony import */ var _tensor__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(6);
/* harmony import */ var _util__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(8);
/* harmony import */ var _util__WEBPACK_IMPORTED_MODULE_4__ = __webpack_require__(10);
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
function inferShape(val, dtype) {
let firstElem = val;
if (Object(_util__WEBPACK_IMPORTED_MODULE_3__[/* isTypedArray */ "A"])(val)) {
return dtype === 'string' ? [] : [val.length];
}
if (!Array.isArray(val)) {
return []; // Scalar.
}
const shape = [];
while (Array.isArray(firstElem) ||
Object(_util__WEBPACK_IMPORTED_MODULE_3__[/* isTypedArray */ "A"])(firstElem) && dtype !== 'string') {
shape.push(firstElem.length);
firstElem = firstElem[0];
}
if (Array.isArray(val) &&
Object(_environment__WEBPACK_IMPORTED_MODULE_1__[/* env */ "c"])().getBool('TENSORLIKE_CHECK_SHAPE_CONSISTENCY')) {
deepAssertShapeConsistency(val, shape, []);
}
return shape;
}
function deepAssertShapeConsistency(val, shape, indices) {
indices = indices || [];
if (!(Array.isArray(val)) && !Object(_util__WEBPACK_IMPORTED_MODULE_3__[/* isTypedArray */ "A"])(val)) {
Object(_util__WEBPACK_IMPORTED_MODULE_3__[/* assert */ "b"])(shape.length === 0, () => `Element arr[${indices.join('][')}] is a primitive, ` +
`but should be an array/TypedArray of ${shape[0]} elements`);
return;
}
Object(_util__WEBPACK_IMPORTED_MODULE_3__[/* assert */ "b"])(shape.length > 0, () => `Element arr[${indices.join('][')}] should be a primitive, ` +
`but is an array of ${val.length} elements`);
Object(_util__WEBPACK_IMPORTED_MODULE_3__[/* assert */ "b"])(val.length === shape[0], () => `Element arr[${indices.join('][')}] should have ${shape[0]} ` +
`elements, but has ${val.length} elements`);
const subShape = shape.slice(1);
for (let i = 0; i < val.length; ++i) {
deepAssertShapeConsistency(val[i], subShape, indices.concat(i));
}
}
function assertDtype(expectedDtype, actualDType, argName, functionName) {
if (expectedDtype === 'string_or_numeric') {
return;
}
if (expectedDtype == null) {
throw new Error(`Expected dtype cannot be null.`);
}
if (expectedDtype !== 'numeric' && expectedDtype !== actualDType ||
expectedDtype === 'numeric' && actualDType === 'string') {
throw new Error(`Argument '${argName}' passed to '${functionName}' must ` +
`be ${expectedDtype} tensor, but got ${actualDType} tensor`);
}
}
function convertToTensor(x, argName, functionName, parseAsDtype = 'numeric') {
if (x instanceof _tensor__WEBPACK_IMPORTED_MODULE_2__[/* Tensor */ "a"]) {
assertDtype(parseAsDtype, x.dtype, argName, functionName);
return x;
}
let inferredDtype = Object(_util__WEBPACK_IMPORTED_MODULE_3__[/* inferDtype */ "r"])(x);
// If the user expects a bool/int/float, use that info to update the
// inferredDtype when it is not a string.
if (inferredDtype !== 'string' &&
['bool', 'int32', 'float32'].indexOf(parseAsDtype) >= 0) {
inferredDtype = parseAsDtype;
}
assertDtype(parseAsDtype, inferredDtype, argName, functionName);
if ((x == null) ||
(!Object(_util__WEBPACK_IMPORTED_MODULE_3__[/* isTypedArray */ "A"])(x) && !Array.isArray(x) && typeof x !== 'number' &&
typeof x !== 'boolean' && typeof x !== 'string')) {
const type = x == null ? 'null' : x.constructor.name;
throw new Error(`Argument '${argName}' passed to '${functionName}' must be a ` +
`Tensor or TensorLike, but got '${type}'`);
}
const inferredShape = inferShape(x, inferredDtype);
if (!Object(_util__WEBPACK_IMPORTED_MODULE_3__[/* isTypedArray */ "A"])(x) && !Array.isArray(x)) {
x = [x];
}
const skipTypedArray = true;
const values = inferredDtype !== 'string' ?
Object(_util__WEBPACK_IMPORTED_MODULE_4__["toTypedArray"])(x, inferredDtype) :
Object(_util__WEBPACK_IMPORTED_MODULE_3__[/* flatten */ "m"])(x, [], skipTypedArray);
return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].makeTensor(values, inferredShape, inferredDtype);
}
function convertToTensorArray(arg, argName, functionName, parseAsDtype = 'numeric') {
if (!Array.isArray(arg)) {
throw new Error(`Argument ${argName} passed to ${functionName} must be a ` +
'`Tensor[]` or `TensorLike[]`');
}
const tensors = arg;
return tensors.map((t, i) => convertToTensor(t, `${argName}[${i}]`, functionName, parseAsDtype));
}
//# sourceMappingURL=tensor_util_env.js.map
/***/ }),
/* 3 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return Abs; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "b", function() { return Acos; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "c", function() { return Acosh; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "d", function() { return Add; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "e", function() { return AddN; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "f", function() { return All; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "g", function() { return Any; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "h", function() { return ArgMax; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "i", function() { return ArgMin; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "j", function() { return Asin; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "k", function() { return Asinh; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "l", function() { return Atan; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "n", function() { return Atanh; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "m", function() { return Atan2; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "o", function() { return AvgPool; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "r", function() { return AvgPoolGrad; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "p", function() { return AvgPool3D; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "q", function() { return AvgPool3DGrad; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "s", function() { return BatchMatMul; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "t", function() { return BatchToSpaceND; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "u", function() { return Bincount; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "v", function() { return BroadcastTo; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "w", function() { return Cast; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "x", function() { return Ceil; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "y", function() { return ClipByValue; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "z", function() { return Complex; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "A", function() { return ComplexAbs; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "B", function() { return Concat; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "C", function() { return Conv2D; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "D", function() { return Conv2DBackpropFilter; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "E", function() { return Conv2DBackpropInput; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "F", function() { return Conv3D; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "G", function() { return Conv3DBackpropFilterV2; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "H", function() { return Conv3DBackpropInputV2; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "I", function() { return Cos; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "J", function() { return Cosh; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "L", function() { return Cumsum; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "K", function() { return CropAndResize; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "M", function() { return DenseBincount; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "N", function() { return DepthToSpace; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "O", function() { return DepthwiseConv2dNative; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "P", function() { return DepthwiseConv2dNativeBackpropFilter; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Q", function() { return DepthwiseConv2dNativeBackpropInput; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "R", function() { return Diag; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "S", function() { return Dilation2D; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "U", function() { return Dilation2DBackpropInput; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "T", function() { return Dilation2DBackpropFilter; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "lc", function() { return RealDiv; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "V", function() { return Einsum; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "W", function() { return Elu; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "X", function() { return EluGrad; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Z", function() { return Erf; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Y", function() { return Equal; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "ab", function() { return Exp; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "bb", function() { return ExpandDims; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "cb", function() { return Expm1; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "db", function() { return FFT; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "eb", function() { return Fill; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "fb", function() { return FlipLeftRight; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "gb", function() { return Floor; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "hb", function() { return FloorDiv; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "jb", function() { return FusedBatchNorm; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "nb", function() { return GatherV2; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "mb", function() { return GatherNd; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "ob", function() { return Greater; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "pb", function() { return GreaterEqual; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "rb", function() { return Identity; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "qb", function() { return IFFT; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "sb", function() { return Imag; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "tb", function() { return IsFinite; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "ub", function() { return IsInf; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "vb", function() { return IsNan; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "yb", function() { return LeakyRelu; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "zb", function() { return Less; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Ab", function() { return LessEqual; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Bb", function() { return LinSpace; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Cb", function() { return Log; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Db", function() { return Log1p; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Fb", function() { return LogicalAnd; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Gb", function() { return LogicalNot; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Hb", function() { return LogicalOr; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Eb", function() { return LogSoftmax; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "wb", function() { return LRN; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "xb", function() { return LRNGrad; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Ib", function() { return Max; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Ob", function() { return Maximum; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Jb", function() { return MaxPool; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Mb", function() { return MaxPoolGrad; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Kb", function() { return MaxPool3D; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Lb", function() { return MaxPool3DGrad; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Nb", function() { return MaxPoolWithArgmax; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Pb", function() { return Mean; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Qb", function() { return Min; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Rb", function() { return Minimum; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Sb", function() { return MirrorPad; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Tb", function() { return Mod; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Ub", function() { return Multinomial; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Vb", function() { return Multiply; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Wb", function() { return Neg; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "ac", function() { return NotEqual; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Xb", function() { return NonMaxSuppressionV3; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Yb", function() { return NonMaxSuppressionV4; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Zb", function() { return NonMaxSuppressionV5; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "cc", function() { return OnesLike; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "bc", function() { return OneHot; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "dc", function() { return Pack; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "ec", function() { return PadV2; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "fc", function() { return Pool; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "gc", function() { return Pow; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "hc", function() { return Prelu; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "ic", function() { return Prod; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "jc", function() { return Range; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "kc", function() { return Real; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "mc", function() { return Reciprocal; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "nc", function() { return Relu; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "pc", function() { return Reshape; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "sc", function() { return ResizeNearestNeighbor; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "tc", function() { return ResizeNearestNeighborGrad; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "qc", function() { return ResizeBilinear; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "rc", function() { return ResizeBilinearGrad; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "oc", function() { return Relu6; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "uc", function() { return Reverse; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "wc", function() { return Round; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "xc", function() { return Rsqrt; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "yc", function() { return ScatterNd; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "zc", function() { return Select; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Ac", function() { return Selu; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Fc", function() { return Slice; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Dc", function() { return Sin; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Ec", function() { return Sinh; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Cc", function() { return Sign; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Bc", function() { return Sigmoid; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Hc", function() { return Softplus; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Mc", function() { return Sqrt; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Sc", function() { return Sum; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Ic", function() { return SpaceToBatchND; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Lc", function() { return SplitV; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Gc", function() { return Softmax; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Jc", function() { return SparseReshape; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Kc", function() { return SparseToDense; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Oc", function() { return SquaredDifference; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Nc", function() { return Square; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Qc", function() { return StridedSlice; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Rc", function() { return Sub; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Tc", function() { return Tan; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Uc", function() { return Tanh; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Vc", function() { return Tile; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Wc", function() { return TopK; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Xc", function() { return Transform; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Yc", function() { return Transpose; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Zc", function() { return Unique; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "ad", function() { return Unpack; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "bd", function() { return UnsortedSegmentSum; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "cd", function() { return ZerosLike; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Pc", function() { return Step; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "ib", function() { return FromPixels; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "vc", function() { return RotateWithOffset; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "dd", function() { return _FusedMatMul; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "kb", function() { return FusedConv2D; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "lb", function() { return FusedDepthwiseConv2D; });
const Abs = 'Abs';
const Acos = 'Acos';
const Acosh = 'Acosh';
const Add = 'Add';
const AddN = 'AddN';
const All = 'All';
const Any = 'Any';
const ArgMax = 'ArgMax';
const ArgMin = 'ArgMin';
const Asin = 'Asin';
const Asinh = 'Asinh';
const Atan = 'Atan';
const Atanh = 'Atanh';
const Atan2 = 'Atan2';
const AvgPool = 'AvgPool';
const AvgPoolGrad = 'AvgPoolGrad';
const AvgPool3D = 'AvgPool3D';
const AvgPool3DGrad = 'AvgPool3DGrad';
const BatchMatMul = 'BatchMatMul';
const BatchToSpaceND = 'BatchToSpaceND';
const Bincount = 'Bincount';
const BroadcastTo = 'BroadcastTo';
const Cast = 'Cast';
const Ceil = 'Ceil';
const ClipByValue = 'ClipByValue';
const Complex = 'Complex';
const ComplexAbs = 'ComplexAbs';
const Concat = 'Concat';
const Conv2D = 'Conv2D';
const Conv2DBackpropFilter = 'Conv2DBackpropFilter';
const Conv2DBackpropInput = 'Conv2DBackpropInput';
const Conv3D = 'Conv3D';
const Conv3DBackpropFilterV2 = 'Conv3DBackpropFilterV2';
const Conv3DBackpropInputV2 = 'Conv3DBackpropInputV2';
const Cos = 'Cos';
const Cosh = 'Cosh';
const Cumsum = 'Cumsum';
const CropAndResize = 'CropAndResize';
const DenseBincount = 'DenseBincount';
const DepthToSpace = 'DepthToSpace';
const DepthwiseConv2dNative = 'DepthwiseConv2dNative';
const DepthwiseConv2dNativeBackpropFilter = 'DepthwiseConv2dNativeBackpropFilter';
const DepthwiseConv2dNativeBackpropInput = 'DepthwiseConv2dNativeBackpropInput';
const Diag = 'Diag';
const Dilation2D = 'Dilation2D';
const Dilation2DBackpropInput = 'Dilation2DBackpropInput';
const Dilation2DBackpropFilter = 'Dilation2DBackpropFilter';
const RealDiv = 'RealDiv';
const Einsum = 'Einsum';
const Elu = 'Elu';
const EluGrad = 'EluGrad';
const Erf = 'Erf';
const Equal = 'Equal';
const Exp = 'Exp';
const ExpandDims = 'ExpandDims';
const Expm1 = 'Expm1';
const FFT = 'FFT';
const Fill = 'Fill';
const FlipLeftRight = 'FlipLeftRight';
const Floor = 'Floor';
const FloorDiv = 'FloorDiv';
const FusedBatchNorm = 'FusedBatchNorm';
const GatherV2 = 'GatherV2';
const GatherNd = 'GatherNd';
const Greater = 'Greater';
const GreaterEqual = 'GreaterEqual';
const Identity = 'Identity';
const IFFT = 'IFFT';
const Imag = 'Imag';
const IsFinite = 'IsFinite';
const IsInf = 'IsInf';
const IsNan = 'IsNan';
const LeakyRelu = 'LeakyRelu';
const Less = 'Less';
const LessEqual = 'LessEqual';
const LinSpace = 'LinSpace';
const Log = 'Log';
const Log1p = 'Log1p';
const LogicalAnd = 'LogicalAnd';
const LogicalNot = 'LogicalNot';
const LogicalOr = 'LogicalOr';
const LogSoftmax = 'LogSoftmax';
const LRN = 'LRN';
const LRNGrad = 'LRNGrad';
const Max = 'Max';
const Maximum = 'Maximum';
const MaxPool = 'MaxPool';
const MaxPoolGrad = 'MaxPoolGrad';
const MaxPool3D = 'MaxPool3D';
const MaxPool3DGrad = 'MaxPool3DGrad';
const MaxPoolWithArgmax = 'MaxPoolWithArgmax';
const Mean = 'Mean';
const Min = 'Min';
const Minimum = 'Minimum';
const MirrorPad = 'MirrorPad';
const Mod = 'Mod';
const Multinomial = 'Multinomial';
const Multiply = 'Multiply';
const Neg = 'Neg';
const NotEqual = 'NotEqual';
const NonMaxSuppressionV3 = 'NonMaxSuppressionV3';
const NonMaxSuppressionV4 = 'NonMaxSuppressionV4';
const NonMaxSuppressionV5 = 'NonMaxSuppressionV5';
const OnesLike = 'OnesLike';
const OneHot = 'OneHot';
const Pack = 'Pack';
const PadV2 = 'PadV2';
const Pool = 'Pool';
const Pow = 'Pow';
const Prelu = 'Prelu';
const Prod = 'Prod';
const Range = 'Range';
const Real = 'Real';
const Reciprocal = 'Reciprocal';
const Relu = 'Relu';
const Reshape = 'Reshape';
const ResizeNearestNeighbor = 'ResizeNearestNeighbor';
const ResizeNearestNeighborGrad = 'ResizeNearestNeighborGrad';
const ResizeBilinear = 'ResizeBilinear';
const ResizeBilinearGrad = 'ResizeBilinearGrad';
const Relu6 = 'Relu6';
const Reverse = 'Reverse';
const Round = 'Round';
const Rsqrt = 'Rsqrt';
const ScatterNd = 'ScatterNd';
const Select = 'Select';
const Selu = 'Selu';
const Slice = 'Slice';
const Sin = 'Sin';
const Sinh = 'Sinh';
const Sign = 'Sign';
const Sigmoid = 'Sigmoid';
const Softplus = 'Softplus';
const Sqrt = 'Sqrt';
const Sum = 'Sum';
const SpaceToBatchND = 'SpaceToBatchND';
const SplitV = 'SplitV';
const Softmax = 'Softmax';
const SparseReshape = 'SparseReshape';
const SparseToDense = 'SparseToDense';
const SquaredDifference = 'SquaredDifference';
const Square = 'Square';
const StridedSlice = 'StridedSlice';
const Sub = 'Sub';
const Tan = 'Tan';
const Tanh = 'Tanh';
const Tile = 'Tile';
const TopK = 'TopK';
const Transform = 'Transform';
const Transpose = 'Transpose';
const Unique = 'Unique';
const Unpack = 'Unpack';
const UnsortedSegmentSum = 'UnsortedSegmentSum';
const ZerosLike = 'ZerosLike';
/**
* TensorFlow.js-only kernels
*/
const Step = 'Step';
const FromPixels = 'FromPixels';
const RotateWithOffset = 'RotateWithOffset';
const _FusedMatMul = '_FusedMatMul';
const FusedConv2D = 'FusedConv2D';
const FusedDepthwiseConv2D = 'FusedDepthwiseConv2D';
//# sourceMappingURL=kernel_names.js.map
/***/ }),
/* 4 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return OP_SCOPE_SUFFIX; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "b", function() { return op; });
/* harmony import */ var _engine__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(5);
/* harmony import */ var _util__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(8);
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
const OP_SCOPE_SUFFIX = '__op';
/**
* Used for wrapping functions that perform math operations on
* Tensors. The function will be wrapped in a named scope that cleans all
* memory usage after the function is done.
*/
function op(f) {
const keys = Object.keys(f);
if (keys.length !== 1) {
throw new Error(`Please provide an object with a single key ` +
`(operation name) mapping to a function. Got an object with ` +
`${keys.length} keys.`);
}
let opName = keys[0];
const fn = f[opName];
// Strip the underscore from the end of the function name.
if (opName.endsWith('_')) {
opName = opName.substring(0, opName.length - 1);
}
// add an __op suffix to distinguish ops from kernels in tf.profile
opName = opName + OP_SCOPE_SUFFIX;
// tslint:disable-next-line:no-any
const f2 = (...args) => {
_engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].startScope(opName);
try {
const result = fn(...args);
if (Object(_util__WEBPACK_IMPORTED_MODULE_1__[/* isPromise */ "x"])(result)) {
console.error('Cannot return a Promise inside of tidy.');
}
_engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].endScope(result);
return result;
}
catch (ex) {
_engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].endScope(null);
throw ex;
}
};
Object.defineProperty(f2, 'name', { value: opName, configurable: true });
// tslint:disable-next-line:no-any
return f2;
}
//# sourceMappingURL=operation.js.map
/***/ }),
/* 5 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
// EXPORTS
__webpack_require__.d(__webpack_exports__, "b", function() { return /* binding */ getOrMakeEngine; });
__webpack_require__.d(__webpack_exports__, "a", function() { return /* binding */ ENGINE; });
// UNUSED EXPORTS: Engine, add
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/backends/backend.js
var backends_backend = __webpack_require__(144);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/environment.js
var dist_environment = __webpack_require__(22);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/global_util.js
var global_util = __webpack_require__(109);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/kernel_names.js
var kernel_names = __webpack_require__(3);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/kernel_registry.js
var kernel_registry = __webpack_require__(62);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/util.js
var util = __webpack_require__(10);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/util_base.js
var util_base = __webpack_require__(8);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/profiler.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
class profiler_Profiler {
constructor(backendTimer, logger) {
this.backendTimer = backendTimer;
this.logger = logger;
if (logger == null) {
this.logger = new profiler_Logger();
}
}
profileKernel(kernelName, inputs, f) {
let outputs;
const holdResultWrapperFn = () => {
outputs = f();
};
let timer;
const start = util["now"]();
if (this.backendTimer.timerAvailable()) {
timer = this.backendTimer.time(holdResultWrapperFn);
}
else {
holdResultWrapperFn();
for (const output of outputs) {
output.dataSync();
}
timer = Promise.resolve({ kernelMs: util["now"]() - start });
}
if (Object(dist_environment["c" /* env */])().getBool('CHECK_COMPUTATION_FOR_ERRORS')) {
for (let i = 0; i < outputs.length; i++) {
const output = outputs[i];
// Dangling promise here because we don't want to propagate up
// asynchronicity.
output.data().then(tensorVals => {
checkComputationForErrors(tensorVals, output.dtype, kernelName);
});
}
}
const kernelProfile = {
kernelName,
outputs,
inputs,
timeMs: timer.then(timing => timing.kernelMs),
extraInfo: timer.then(timing => timing.getExtraProfileInfo != null ?
timing.getExtraProfileInfo() :
'')
};
return kernelProfile;
}
logKernelProfile(kernelProfile) {
const { kernelName, outputs, timeMs, inputs, extraInfo } = kernelProfile;
outputs.forEach(result => {
Promise.all([result.data(), timeMs, extraInfo]).then(valueContainer => {
this.logger.logKernelProfile(kernelName, result, valueContainer[0], valueContainer[1], inputs, valueContainer[2]);
});
});
}
}
function checkComputationForErrors(vals, dtype, kernelName) {
if (dtype !== 'float32') {
// Only floating point computations will generate NaN values
return false;
}
for (let i = 0; i < vals.length; i++) {
const num = vals[i];
if (isNaN(num) || !isFinite(num)) {
// Throwing custom exception so behavior is testable.
console.warn(`Found ${num} in the result of '${kernelName}'`);
return true;
}
}
return false;
}
class profiler_Logger {
logKernelProfile(name, result, vals, timeMs, inputs, extraInfo) {
const time = typeof timeMs === 'number' ? util_base["L" /* rightPad */](`${timeMs}ms`, 9) :
timeMs['error'];
const paddedName = util_base["L" /* rightPad */](name, 25);
const rank = result.rank;
const size = result.size;
const shape = util_base["L" /* rightPad */](result.shape.toString(), 14);
let inputShapesDescription = '';
for (const name in inputs) {
const input = inputs[name];
if (input != null) {
// The input might be a non-tensor (e.g HTMLImageElement), in which case
// we claim the output shape as input shape.
const inputShape = input.shape || result.shape;
const inputRank = inputShape.length;
inputShapesDescription +=
`${name}: ${inputRank}D ${inputRank > 0 ? inputShape : ''} `;
}
}
console.log(`%c${paddedName}\t%c${time}\t%c${rank}D ${shape}\t%c${size}\t%c${inputShapesDescription}\t%c${extraInfo}`, 'font-weight:bold', 'color:red', 'color:blue', 'color: orange', 'color: green', 'color: steelblue');
}
}
//# sourceMappingURL=profiler.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/tape.js
/**
* @license
* Copyright 2017 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes a list of TapeNodes that connect x to y, filtering everything else
* out and preserving the order of the original tape elements.
*
* @param tape The tape elements to filter.
* @param xs The input Tensors.
* @param y The output Tensor.
*/
function getFilteredNodesXToY(tape, xs, y) {
// Forward pass to compute all the nodes and Tensors that are transitively a
// function of x.
const tensorsFromX = {};
const nodesFromX = {};
for (let i = 0; i < xs.length; i++) {
tensorsFromX[xs[i].id] = true;
}
for (let i = 0; i < tape.length; i++) {
const node = tape[i];
const nodeInputs = node.inputs;
for (const inputName in nodeInputs) {
const input = nodeInputs[inputName];
let anyInputFromX = false;
for (let j = 0; j < xs.length; j++) {
if (tensorsFromX[input.id]) {
node.outputs.forEach(output => tensorsFromX[output.id] = true);
anyInputFromX = true;
nodesFromX[node.id] = true;
break;
}
}
if (anyInputFromX) {
break;
}
}
}
// Backward pass to find all of the nodes and Tensors that lead to y.
const tensorsLeadToY = {};
tensorsLeadToY[y.id] = true;
const nodesToY = {};
for (let i = tape.length - 1; i >= 0; i--) {
const node = tape[i];
const nodeInputs = node.inputs;
// If any of the outputs lead to y, mark all of the inputs as leading to y.
for (let j = 0; j < node.outputs.length; j++) {
if (tensorsLeadToY[node.outputs[j].id]) {
for (const inputName in nodeInputs) {
tensorsLeadToY[nodeInputs[inputName].id] = true;
nodesToY[node.id] = true;
}
break;
}
}
}
// Return the paths that come from x and lead to y.
const filteredTape = [];
for (let i = 0; i < tape.length; i++) {
const node = tape[i];
if (nodesFromX[node.id] && nodesToY[node.id]) {
// Prune the inputs from the node that aren't a function of x.
const prunedInputs = {};
for (const inputName in node.inputs) {
const nodeInput = node.inputs[inputName];
if (tensorsFromX[nodeInput.id]) {
prunedInputs[inputName] = nodeInput;
}
}
// Copy the node and overwrite inputsAndArgs to the pruned version.
const prunedNode = Object.assign({}, node);
prunedNode.inputs = prunedInputs;
prunedNode.outputs = node.outputs;
filteredTape.push(prunedNode);
}
}
return filteredTape;
}
/**
* Backpropagate gradients through the filtered TapeNodes.
*
* @param tensorAccumulatedGradientMap A map of Tensor to its gradient. This map
* is mutated by this method.
* @param filteredTape The filtered TapeNodes to backprop through.
*/
function backpropagateGradients(tensorAccumulatedGradientMap, filteredTape, tidy, add) {
// Walk the tape backward and keep a map of Tensor to its gradient.
for (let i = filteredTape.length - 1; i >= 0; i--) {
const node = filteredTape[i];
const dys = [];
node.outputs.forEach(o => {
const gradTensor = tensorAccumulatedGradientMap[o.id];
if (gradTensor != null) {
dys.push(gradTensor);
}
else {
// This particular output is not in the back-propagation subgraph, so it
// does not affect the final output, thus we put null for its dy.
dys.push(null);
}
});
if (node.gradient == null) {
throw new Error(`Cannot compute gradient: gradient function not found ` +
`for ${node.kernelName}.`);
}
// Backprop dy through this node and accumulate gradients over the inputs.
const inputGradients = node.gradient(dys);
for (const inputName in node.inputs) {
if (!(inputName in inputGradients)) {
throw new Error(`Cannot backprop through input ${inputName}. ` +
`Available gradients found: ${Object.keys(inputGradients)}.`);
}
// Call the gradient function.
const dx = tidy(() => inputGradients[inputName]());
if (dx.dtype !== 'float32') {
throw new Error(`Error in gradient for op ${node.kernelName}. The gradient of input ` +
`${inputName} must have 'float32' dtype, but has '${dx.dtype}'`);
}
const x = node.inputs[inputName];
if (!util_base["a" /* arraysEqual */](dx.shape, x.shape)) {
throw new Error(`Error in gradient for op ${node.kernelName}. The gradient of input ` +
`'${inputName}' has shape '${dx.shape}', which does not match ` +
`the shape of the input '${x.shape}'`);
}
if (tensorAccumulatedGradientMap[x.id] == null) {
tensorAccumulatedGradientMap[x.id] = dx;
}
else {
const curGradient = tensorAccumulatedGradientMap[x.id];
tensorAccumulatedGradientMap[x.id] = add(curGradient, dx);
curGradient.dispose();
}
}
}
}
//# sourceMappingURL=tape.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/tensor.js + 1 modules
var tensor = __webpack_require__(6);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/tensor_util.js
var tensor_util = __webpack_require__(23);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/engine.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
function isRegisteredKernelInvocation(kernelInvocation) {
return kernelInvocation.kernelName != null;
}
class EngineState {
constructor() {
// Public since optimizers will use it.
this.registeredVariables = {};
this.nextTapeNodeId = 0;
this.numBytes = 0;
this.numTensors = 0;
this.numStringTensors = 0;
this.numDataBuffers = 0;
// Number of nested tf.grad() statements when computing higher-order
// gradients. E.g. `1` for first-order gradients and `2` for second-order
// gradients. Used to track if the tape should be removed after a backprop.
this.gradientDepth = 0;
// Number of nested kernel calls. When kernel depth is greater than 1, we turn
// off the tape.
this.kernelDepth = 0;
this.scopeStack = [];
/**
* Keeps track of the number of data moves during a kernel execution. We
* maintain a stack since kernels can call other kernels, recursively.
*/
this.numDataMovesStack = [];
this.nextScopeId = 0;
this.tensorInfo = new WeakMap();
this.profiling = false;
this.activeProfile = {
newBytes: 0,
newTensors: 0,
peakBytes: 0,
kernels: [],
result: null,
get kernelNames() {
return Array.from(new Set(this.kernels.map(k => k.name)));
}
};
}
dispose() {
for (const variableName in this.registeredVariables) {
this.registeredVariables[variableName].dispose();
}
}
}
class engine_Engine {
constructor(ENV) {
this.ENV = ENV;
this.registry = {};
this.registryFactory = {};
this.pendingBackendInitId = 0;
this.state = new EngineState();
}
async ready() {
if (this.pendingBackendInit != null) {
return this.pendingBackendInit.then(() => { });
}
if (this.backendInstance != null) {
return;
}
const sortedBackends = this.getSortedBackends();
for (let i = 0; i < sortedBackends.length; i++) {
const backendName = sortedBackends[i];
const success = await this.initializeBackend(backendName).success;
if (success) {
await this.setBackend(backendName);
return;
}
}
throw new Error(`Could not initialize any backends, all backend initializations ` +
`failed.`);
}
get backend() {
if (this.pendingBackendInit != null) {
throw new Error(`Backend '${this.backendName}' has not yet been initialized. Make ` +
`sure to await tf.ready() or await tf.setBackend() before calling ` +
`other methods`);
}
if (this.backendInstance == null) {
const { name, asyncInit } = this.initializeBackendsAndReturnBest();
if (asyncInit) {
throw new Error(`The highest priority backend '${name}' has not yet been ` +
`initialized. Make sure to await tf.ready() or ` +
`await tf.setBackend() before calling other methods`);
}
this.setBackend(name);
}
return this.backendInstance;
}
backendNames() {
return Object.keys(this.registryFactory);
}
findBackend(backendName) {
if (!(backendName in this.registry)) {
// If the backend hasn't been initialized but we have a registry entry for
// it, initialize it and return it.
if (backendName in this.registryFactory) {
const { asyncInit } = this.initializeBackend(backendName);
if (asyncInit) {
// Backend is not ready yet.
return null;
}
}
else {
return null;
}
}
return this.registry[backendName];
}
findBackendFactory(backendName) {
if (!(backendName in this.registryFactory)) {
return null;
}
return this.registryFactory[backendName].factory;
}
registerBackend(backendName, factory, priority = 1) {
if (backendName in this.registryFactory) {
console.warn(`${backendName} backend was already registered. ` +
`Reusing existing backend factory.`);
return false;
}
this.registryFactory[backendName] = { factory, priority };
return true;
}
async setBackend(backendName) {
if (this.registryFactory[backendName] == null) {
throw new Error(`Backend name '${backendName}' not found in registry`);
}
this.backendName = backendName;
if (this.registry[backendName] == null) {
this.backendInstance = null;
const { success, asyncInit } = this.initializeBackend(backendName);
const result = asyncInit ? await success : success;
if (!result) {
return false;
}
}
this.backendInstance = this.registry[backendName];
this.setupRegisteredKernels();
// Reset the profiler.
this.profiler = new profiler_Profiler(this.backendInstance);
return true;
}
setupRegisteredKernels() {
const kernels = Object(kernel_registry["d" /* getKernelsForBackend */])(this.backendName);
kernels.forEach(kernel => {
if (kernel.setupFunc != null) {
kernel.setupFunc(this.backendInstance);
}
});
}
disposeRegisteredKernels(backendName) {
const kernels = Object(kernel_registry["d" /* getKernelsForBackend */])(backendName);
kernels.forEach(kernel => {
if (kernel.disposeFunc != null) {
kernel.disposeFunc(this.registry[backendName]);
}
});
}
/**
* Initializes a backend by looking up the backend name in the factory
* registry and calling the factory method. Returns a boolean representing
* whether the initialization of the backend suceeded. Throws an error if
* there is no backend in the factory registry.
*/
initializeBackend(backendName) {
const registryFactoryEntry = this.registryFactory[backendName];
if (registryFactoryEntry == null) {
throw new Error(`Cannot initialize backend ${backendName}, no registration found.`);
}
try {
const backend = registryFactoryEntry.factory();
/* Test if the factory returns a promise.
Done in a more liberal way than
previous 'Promise.resolve(backend)===backend'
as we needed to account for custom Promise
implementations (e.g. Angular) */
if (backend && !(backend instanceof backends_backend["b" /* KernelBackend */]) &&
typeof backend.then === 'function') {
const promiseId = ++this.pendingBackendInitId;
const success = backend
.then(backendInstance => {
// Outdated promise. Another backend was set in the meantime.
if (promiseId < this.pendingBackendInitId) {
return false;
}
this.registry[backendName] = backendInstance;
this.pendingBackendInit = null;
return true;
})
.catch(err => {
// Outdated promise. Another backend was set in the meantime.
if (promiseId < this.pendingBackendInitId) {
return false;
}
this.pendingBackendInit = null;
console.warn(`Initialization of backend ${backendName} failed`);
console.warn(err.stack || err.message);
return false;
});
this.pendingBackendInit = success;
return { success, asyncInit: true };
}
else {
this.registry[backendName] = backend;
return { success: true, asyncInit: false };
}
}
catch (err) {
console.warn(`Initialization of backend ${backendName} failed`);
console.warn(err.stack || err.message);
return { success: false, asyncInit: false };
}
}
removeBackend(backendName) {
if (!(backendName in this.registryFactory)) {
throw new Error(`${backendName} backend not found in registry`);
}
if (this.backendName === backendName && this.pendingBackendInit != null) {
// There is a pending promise of the backend we want to remove. Make it
// obsolete.
this.pendingBackendInitId++;
}
if (backendName in this.registry) {
this.disposeRegisteredKernels(backendName);
this.registry[backendName].dispose();
delete this.registry[backendName];
}
delete this.registryFactory[backendName];
// Unset the backend if it is active.
if (this.backendName === backendName) {
this.pendingBackendInit = null;
this.backendName = null;
this.backendInstance = null;
}
}
getSortedBackends() {
if (Object.keys(this.registryFactory).length === 0) {
throw new Error('No backend found in registry.');
}
return Object.keys(this.registryFactory).sort((a, b) => {
// Highest priority comes first.
return this.registryFactory[b].priority -
this.registryFactory[a].priority;
});
}
initializeBackendsAndReturnBest() {
const sortedBackends = this.getSortedBackends();
for (let i = 0; i < sortedBackends.length; i++) {
const backendName = sortedBackends[i];
const { success, asyncInit } = this.initializeBackend(backendName);
if (asyncInit || success) {
return { name: backendName, asyncInit };
}
}
throw new Error(`Could not initialize any backends, all backend initializations ` +
`failed.`);
}
moveData(backend, dataId) {
const info = this.state.tensorInfo.get(dataId);
const srcBackend = info.backend;
const values = this.readSync(dataId);
const refCount = srcBackend.refCount(dataId);
// Delete the tensor from the old backend and move it to the new
// backend.
srcBackend.disposeData(dataId, true);
info.backend = backend;
backend.move(dataId, values, info.shape, info.dtype, refCount);
if (this.shouldCheckForMemLeaks()) {
// Track the number of moves during a kernel execution to correctly
// detect memory leaks.
this.state.numDataMovesStack[this.state.numDataMovesStack.length - 1]++;
}
}
tidy(nameOrFn, fn) {
let name = null;
if (fn == null) {
// Called with only 1 argument.
if (typeof nameOrFn !== 'function') {
throw new Error('Please provide a function to tidy()');
}
fn = nameOrFn;
}
else {
// Called with 2 arguments.
if (typeof nameOrFn !== 'string' && !(nameOrFn instanceof String)) {
throw new Error('When calling with two arguments, the first argument ' +
'to tidy() must be a string');
}
if (typeof fn !== 'function') {
throw new Error('When calling with two arguments, the 2nd argument ' +
'to tidy() must be a function');
}
name = nameOrFn;
// TODO(nsthorat,smilkov): Do operation logging and performance
// profiling.
}
let result;
return this.scopedRun(() => this.startScope(name), () => this.endScope(result), () => {
result = fn();
if (result instanceof Promise) {
console.error('Cannot return a Promise inside of tidy.');
}
return result;
});
}
scopedRun(start, end, f) {
start();
try {
const res = f();
end();
return res;
}
catch (ex) {
end();
throw ex;
}
}
nextTensorId() {
return engine_Engine.nextTensorId++;
}
nextVariableId() {
return engine_Engine.nextVariableId++;
}
/**
* This method is called instead of the public-facing tensor.clone() when
* saving a tensor for backwards pass. It makes sure to add the clone
* operation to the tape regardless of being called inside a kernel
* execution.
*/
clone(x) {
const y = ENGINE.runKernel(kernel_names["rb" /* Identity */], { x });
const inputs = { x };
const grad = (dy) => ({
x: () => {
const dtype = 'float32';
const gradInputs = { x: dy };
const attrs = { dtype };
return ENGINE.runKernel(kernel_names["w" /* Cast */], gradInputs,
// tslint:disable-next-line: no-unnecessary-type-assertion
attrs);
}
});
const saved = [];
this.addTapeNode(this.state.activeScope.name, inputs, [y], grad, saved, {});
return y;
}
/**
* Execute a kernel with the given name and return the output tensor.
*
* @param kernelName The name of the kernel to execute.
* @param inputs A map of input names to tensors.
* @param attrs A map of attribute names to their values. An attribute is a
* primitive (non-tensor) input to the kernel.
* @param inputsToSave A list of tensors, inputs to save for the backprop
* computation.
* @param outputsToSave A list of booleans, specifying which output to save
* for the backprop computation. These are booleans since the output
* tensors are not visible to the user.
*/
runKernel(kernelName, inputs, attrs) {
const hasKernel = Object(kernel_registry["c" /* getKernel */])(kernelName, this.backendName) != null;
if (!hasKernel) {
throw new Error(`Kernel '${kernelName}' not registered for backend '${this.backendName}'`);
}
return this.runKernelFunc({ kernelName, inputs, attrs });
}
shouldCheckForMemLeaks() {
return this.ENV.getBool('IS_TEST');
}
checkKernelForMemLeak(kernelName, numDataIdsBefore, outInfos) {
const numDataIdsAfter = this.backend.numDataIds();
// Count the number of data ids associated with the result of the kernel.
let numOutputDataIds = 0;
outInfos.forEach(info => {
// Complex numbers allocate 3 data ids, one for 'real', one for
// 'imaginary', and one for the container that holds the former two.
numOutputDataIds += (info.dtype === 'complex64' ? 3 : 1);
});
// Account for the number of moves during kernel execution. A "data move"
// can happen in the middle of a kernel execution, placing a new (key,value)
// pair in the data storage. Since data moves have net zero effect (we
// always remove the data from the old backend), we have to cancel them out
// when detecting memory leaks.
const numMoves = this.state.numDataMovesStack[this.state.numDataMovesStack.length - 1];
const dataIdsLeaked = numDataIdsAfter - numDataIdsBefore - numOutputDataIds - numMoves;
if (dataIdsLeaked > 0) {
throw new Error(`Backend '${this.backendName}' has an internal memory leak ` +
`(${dataIdsLeaked} data ids) after running '${kernelName}'`);
}
}
/**
* Internal helper method to execute a kernel Func
*
* Use `runKernel` to execute kernels from outside of engine.
*/
runKernelFunc(kernelParams) {
let outputs;
let saved = [];
const isTapeOn = this.isTapeOn();
const startingBytecount = this.state.numBytes;
const startingNumTensors = this.state.numTensors;
if (this.shouldCheckForMemLeaks()) {
this.state.numDataMovesStack.push(0);
}
let kernelFunc;
if (this.backendName == null) {
// backend has not been initialized yet (backend initialization is lazy
// can be deferred until an op/ kernel is run).
// The below getter has side effects that will try to initialize the
// backend and set properties like this.backendName
// tslint:disable-next-line: no-unused-expression
this.backend;
}
let out;
const kernelOrScopeName = isRegisteredKernelInvocation(kernelParams) ?
kernelParams.kernelName :
this.state.activeScope != null ? this.state.activeScope.name : '';
// Create the kernelFunc from either a registered kernel OR passed in
// forward/backward functions (used by custom grad). In this context a
// kernelFunc wraps a kernel implementation with some bookkeeping.
if (isRegisteredKernelInvocation(kernelParams)) {
const { kernelName, inputs, attrs } = kernelParams;
if (this.backendName == null) {
// backend has not been initialized yet (backend initialization is lazy
// can be deferred until an op/ kernel is run).
// The below getter has side effects that will try to initialize the
// backend and set properties like this.backendName
// tslint:disable-next-line: no-unused-expression
this.backend;
}
const kernel = Object(kernel_registry["c" /* getKernel */])(kernelName, this.backendName);
util_base["b" /* assert */](kernel != null, () => `Cannot find registered kernel '${kernelName}' for backend '${this.backendName}'`);
kernelFunc = () => {
const numDataIdsBefore = this.backend.numDataIds();
out = kernel.kernelFunc({ inputs, attrs, backend: this.backend });
const outInfos = Array.isArray(out) ? out : [out];
if (this.shouldCheckForMemLeaks()) {
this.checkKernelForMemLeak(kernelName, numDataIdsBefore, outInfos);
}
const outTensors = outInfos.map((outInfo) => {
// todo (yassogba) remove this option (Tensor) when node backend
// methods have been modularized and they all return tensorInfo.
// TensorInfos do not have a rank attribute.
if (outInfo.rank != null) {
return outInfo;
}
const { dataId, shape, dtype } = outInfo;
return this.makeTensorFromDataId(dataId, shape, dtype);
});
// Save any required inputs and outputs.
// Do not save unless we are recording to the tape. Otherwise it would
// cause a mem leak since there would be no backprop for these tensors
// (which would otherwise dispose them).
if (isTapeOn) {
const tensorsToSave = this.getTensorsForGradient(kernelName, inputs, outTensors);
saved = this.saveTensorsForBackwardMode(tensorsToSave);
}
return outTensors;
};
}
else {
const { forwardFunc } = kernelParams;
// Running a customGrad op.
const saveFunc = (tensors) => {
// Do not save unless we are recording to the tape. Otherwise it would
// cause a mem leak since we would never run backprop, which disposes
// the kept tensors.
if (!isTapeOn) {
return;
}
saved = tensors.map(tensor => this.keep(this.clone(tensor)));
};
kernelFunc = () => {
const numDataIdsBefore = this.backend.numDataIds();
out = this.tidy(() => forwardFunc(this.backend, saveFunc));
const outs = (Array.isArray(out) ? out : [out]);
if (this.shouldCheckForMemLeaks()) {
// Scope name is used to print a more helpful error message if needed.
this.checkKernelForMemLeak(kernelOrScopeName, numDataIdsBefore, outs);
}
return outs;
};
}
//
// Run the kernelFunc. Optionally profiling it.
//
const { inputs, attrs } = kernelParams;
const backwardsFunc = isRegisteredKernelInvocation(kernelParams) ?
null :
kernelParams.backwardsFunc;
let kernelProfile;
this.scopedRun(
// Stop recording to a tape when running a kernel.
() => this.state.kernelDepth++, () => this.state.kernelDepth--, () => {
if (!this.ENV.getBool('DEBUG') && !this.state.profiling) {
outputs = kernelFunc();
}
else {
kernelProfile = this.profiler.profileKernel(kernelOrScopeName, inputs, () => kernelFunc());
if (this.ENV.getBool('DEBUG')) {
this.profiler.logKernelProfile(kernelProfile);
}
outputs = kernelProfile.outputs;
}
});
if (isTapeOn) {
this.addTapeNode(kernelOrScopeName, inputs, outputs, backwardsFunc, saved, attrs);
}
if (this.state.profiling) {
this.state.activeProfile.kernels.push({
name: kernelOrScopeName,
bytesAdded: this.state.numBytes - startingBytecount,
totalBytesSnapshot: this.state.numBytes,
tensorsAdded: this.state.numTensors - startingNumTensors,
totalTensorsSnapshot: this.state.numTensors,
inputShapes: Object.keys(inputs).map(key => inputs[key] != null ? inputs[key].shape : null),
outputShapes: outputs.map(item => item.shape),
kernelTimeMs: kernelProfile.timeMs,
extraInfo: kernelProfile.extraInfo
});
}
return (Array.isArray(out) ? outputs : outputs[0]);
}
/**
* Saves tensors used in forward mode for use in backward mode.
*
* @param tensors the list of tensors to save.
*/
saveTensorsForBackwardMode(tensors) {
const saved = tensors.map(tensor => this.keep(this.clone(tensor)));
return saved;
}
/**
* Returns a list of tensors to save for a given gradient calculation.
*
* @param kernelName name of kernel to look up gradient for.
* @param inputs a map of input tensors.
* @param outputs an array of output tensors from forward mode of kernel.
*/
getTensorsForGradient(kernelName, inputs, outputs) {
const gradConfig = Object(kernel_registry["b" /* getGradient */])(kernelName);
if (gradConfig != null) {
const inputsToSave = gradConfig.inputsToSave || [];
const outputsToSave = gradConfig.outputsToSave || [];
// If saveAllInputs is true, all inputs will be saved. Otherwise, inputs
// specified in inputsToSave will be saved.
let inputTensorsToSave;
if (gradConfig.saveAllInputs) {
util_base["b" /* assert */](Array.isArray(inputs), () => 'saveAllInputs is true, expected inputs to be an array.');
inputTensorsToSave = Object.keys(inputs).map((key) => inputs[key]);
}
else {
inputTensorsToSave = inputsToSave.map((inputName) => inputs[inputName]);
}
const outputTensorsToSave = outputs.filter((_, i) => outputsToSave[i]);
return inputTensorsToSave.concat(outputTensorsToSave);
}
// We return an empty list rather than throw an error because the kernel we
// are looking up may not actually be relevant to backproping through the
// overall function
//
// See 'does not error if irrelevant (pruned) ops are missing grads' test
// in gradients_test.ts for an example.
return [];
}
/**
* Internal method used by public APIs for tensor creation. Makes a new
* tensor with the provided shape, dtype and values. It always
* creates a new data id and writes the values to the underlying backend.
*/
makeTensor(values, shape, dtype, backend) {
if (values == null) {
throw new Error('Values passed to engine.makeTensor() are null');
}
dtype = dtype || 'float32';
backend = backend || this.backend;
let backendVals = values;
if (dtype === 'string' && util_base["z" /* isString */](values[0])) {
backendVals = values.map(d => util["encodeString"](d));
}
const dataId = backend.write(backendVals, shape, dtype);
const t = new tensor["a" /* Tensor */](shape, dtype, dataId, this.nextTensorId());
this.trackTensor(t, backend);
// Count bytes for string tensors.
if (dtype === 'string') {
const info = this.state.tensorInfo.get(dataId);
const newBytes = Object(util_base["f" /* bytesFromStringArray */])(backendVals);
this.state.numBytes += newBytes - info.bytes;
info.bytes = newBytes;
}
return t;
}
/**
* Internal method used by backends. Makes a new tensor
* that is a wrapper around an existing data id. It doesn't create
* a new data id, only increments the ref count used in memory tracking.
*/
makeTensorFromDataId(dataId, shape, dtype, backend) {
dtype = dtype || 'float32';
const t = new tensor["a" /* Tensor */](shape, dtype, dataId, this.nextTensorId());
this.trackTensor(t, backend);
return t;
}
makeVariable(initialValue, trainable = true, name, dtype) {
name = name || this.nextVariableId().toString();
if (dtype != null && dtype !== initialValue.dtype) {
initialValue = initialValue.cast(dtype);
}
const v = new tensor["c" /* Variable */](initialValue, trainable, name, this.nextTensorId());
if (this.state.registeredVariables[v.name] != null) {
throw new Error(`Variable with name ${v.name} was already registered`);
}
this.state.registeredVariables[v.name] = v;
this.incRef(v, this.backend);
return v;
}
trackTensor(a, backend) {
this.state.numTensors++;
if (a.dtype === 'string') {
this.state.numStringTensors++;
}
// Bytes for complex numbers are counted by their components. Bytes for
// string tensors are counted when writing values.
let bytes = 0;
if (a.dtype !== 'complex64' && a.dtype !== 'string') {
bytes = a.size * util_base["g" /* bytesPerElement */](a.dtype);
}
this.state.numBytes += bytes;
if (!this.state.tensorInfo.has(a.dataId)) {
this.state.numDataBuffers++;
this.state.tensorInfo.set(a.dataId, {
backend: backend || this.backend,
dtype: a.dtype,
shape: a.shape,
bytes
});
}
if (!(a instanceof tensor["c" /* Variable */])) {
this.track(a);
}
}
// Track the tensor by dataId and increase the refCount for the dataId in the
// backend.
// TODO(pyu10055): This is currently used by makeVariable method, to increase
// refCount on the backend for the dataId. It can potentially be replaced with
// Identity op indead of calling backend directly.
incRef(a, backend) {
this.trackTensor(a, backend);
this.backend.incRef(a.dataId);
}
removeDataId(dataId, backend) {
if (this.state.tensorInfo.has(dataId) &&
this.state.tensorInfo.get(dataId).backend === backend) {
this.state.tensorInfo.delete(dataId);
this.state.numDataBuffers--;
}
}
disposeTensor(a) {
if (!this.state.tensorInfo.has(a.dataId)) {
return;
}
const info = this.state.tensorInfo.get(a.dataId);
this.state.numTensors--;
if (a.dtype === 'string') {
this.state.numStringTensors--;
this.state.numBytes -= info.bytes;
}
// Don't count bytes for complex numbers as they are counted by their
// components.
if (a.dtype !== 'complex64' && a.dtype !== 'string') {
const bytes = a.size * util_base["g" /* bytesPerElement */](a.dtype);
this.state.numBytes -= bytes;
}
// Remove the reference to dataId if backend dispose the data successfully
if (info.backend.disposeData(a.dataId)) {
this.removeDataId(a.dataId, info.backend);
}
// TODO(nsthorat): Construct an error and save the stack trace for
// debugging when in debug mode. Creating a stack trace is too expensive
// to do unconditionally.
}
disposeVariables() {
for (const varName in this.state.registeredVariables) {
const v = this.state.registeredVariables[varName];
this.disposeVariable(v);
}
}
disposeVariable(v) {
this.disposeTensor(v);
if (this.state.registeredVariables[v.name] != null) {
delete this.state.registeredVariables[v.name];
}
}
memory() {
const info = this.backend.memory();
info.numTensors = this.state.numTensors;
info.numDataBuffers = this.state.numDataBuffers;
info.numBytes = this.state.numBytes;
if (this.state.numStringTensors > 0) {
info.unreliable = true;
if (info.reasons == null) {
info.reasons = [];
}
info.reasons.push('Memory usage by string tensors is approximate ' +
'(2 bytes per character)');
}
return info;
}
async profile(query) {
this.state.profiling = true;
const startBytes = this.state.numBytes;
const startNumTensors = this.state.numTensors;
this.state.activeProfile.kernels = [];
this.state.activeProfile.result = await query();
this.state.profiling = false;
this.state.activeProfile.peakBytes = Math.max(...this.state.activeProfile.kernels.map(d => d.totalBytesSnapshot));
this.state.activeProfile.newBytes = this.state.numBytes - startBytes;
this.state.activeProfile.newTensors =
this.state.numTensors - startNumTensors;
for (const kernel of this.state.activeProfile.kernels) {
kernel.kernelTimeMs = await kernel.kernelTimeMs;
kernel.extraInfo = await kernel.extraInfo;
}
return this.state.activeProfile;
}
isTapeOn() {
return this.state.gradientDepth > 0 && this.state.kernelDepth === 0;
}
addTapeNode(kernelName, inputs, outputs, gradientsFunc, saved, attrs) {
const tapeNode = { id: this.state.nextTapeNodeId++, kernelName, inputs, outputs, saved };
const gradConfig = Object(kernel_registry["b" /* getGradient */])(kernelName);
if (gradConfig != null) {
gradientsFunc = gradConfig.gradFunc;
}
if (gradientsFunc != null) {
tapeNode.gradient = (dys) => {
// TODO(smilkov): To optimize back-prop, pass dys that are not used in
// the backprop graph to the user as null instead of zeros
dys = dys.map((dy, i) => {
if (dy == null) {
const output = outputs[i];
const vals = util_base["F" /* makeZerosTypedArray */](output.size, output.dtype);
return this.makeTensor(vals, output.shape, output.dtype);
}
return dy;
});
// Grad functions of ops with single outputs expect a dy, while ops
// with multiple outputs expect dys (array of dy).
return gradientsFunc(dys.length > 1 ? dys : dys[0], saved, attrs);
};
}
this.state.activeTape.push(tapeNode);
}
keep(result) {
result.kept = true;
return result;
}
startTape() {
if (this.state.gradientDepth === 0) {
this.state.activeTape = [];
}
this.state.gradientDepth++;
}
endTape() {
this.state.gradientDepth--;
}
/**
* Start a scope. Use this with endScope() to achieve the same functionality
* as scope() without the need for a function closure.
*/
startScope(name) {
const scopeInfo = {
track: [],
name: 'unnamed scope',
id: this.state.nextScopeId++
};
if (name) {
scopeInfo.name = name;
}
this.state.scopeStack.push(scopeInfo);
this.state.activeScope = scopeInfo;
}
/**
* End a scope. Use this with startScope() to achieve the same functionality
* as scope() without the need for a function closure.
*/
endScope(result) {
const tensorsToTrackInParent = Object(tensor_util["getTensorsInContainer"])(result);
const tensorsToTrackInParentSet = new Set(tensorsToTrackInParent.map(t => t.id));
// Dispose the arrays tracked in this scope.
for (let i = 0; i < this.state.activeScope.track.length; i++) {
const tensor = this.state.activeScope.track[i];
if (!tensor.kept && !tensorsToTrackInParentSet.has(tensor.id)) {
tensor.dispose();
}
}
const oldScope = this.state.scopeStack.pop();
this.state.activeScope = this.state.scopeStack.length === 0 ?
null :
this.state.scopeStack[this.state.scopeStack.length - 1];
// Track the current result in the parent scope.
tensorsToTrackInParent.forEach(tensor => {
// Only track the tensor if was allocated in the inner scope and is not
// globally kept.
if (!tensor.kept && tensor.scopeId === oldScope.id) {
this.track(tensor);
}
});
}
/**
* Returns gradients of `f` with respect to each of the `xs`. The gradients
* returned are of the same length as `xs`, but some might be null if `f`
* was not a function of that `x`. It also takes optional dy to multiply the
* gradient, which defaults to `1`.
*/
gradients(f, xs, dy, allowNoGradients = false) {
util_base["b" /* assert */](xs.length > 0, () => 'gradients() received an empty list of xs.');
if (dy != null && dy.dtype !== 'float32') {
throw new Error(`dy must have 'float32' dtype, but has '${dy.dtype}'`);
}
const y = this.scopedRun(() => this.startTape(), () => this.endTape(), () => this.tidy('forward', f));
util_base["b" /* assert */](y instanceof tensor["a" /* Tensor */], () => 'The result y returned by f() must be a tensor.');
// Filter out the nodes that don't connect x => y.
const filteredTape = getFilteredNodesXToY(this.state.activeTape, xs, y);
if (!allowNoGradients && filteredTape.length === 0 && xs.length > 0) {
throw new Error('Cannot compute gradient of y=f(x) with respect to x. Make sure ' +
'that the f you passed encloses all operations that lead from x ' +
'to y.');
}
return this.tidy('backward', () => {
const accumulatedGradientMap = {};
accumulatedGradientMap[y.id] = (dy == null) ? ones(y.shape) : dy;
// Backprop gradients through the filtered nodes.
backpropagateGradients(accumulatedGradientMap, filteredTape,
// Pass the tidy function to avoid circular dep with `tape.ts`.
f => this.tidy(f),
// Pass an add function to avoide a circular dep with `tape.ts`.
engine_add);
const grads = xs.map(x => accumulatedGradientMap[x.id]);
if (this.state.gradientDepth === 0) {
// This means that we are not computing higher-order gradients
// and can clean up the tape.
this.state.activeTape.forEach(node => {
for (const tensor of node.saved) {
tensor.dispose();
}
});
this.state.activeTape = null;
}
return { value: y, grads };
});
}
customGrad(f) {
util_base["b" /* assert */](util_base["u" /* isFunction */](f), () => 'The f passed in customGrad(f) must be a function.');
return (...inputs) => {
util_base["b" /* assert */](inputs.every(t => t instanceof tensor["a" /* Tensor */]), () => 'The args passed in customGrad(f)(x1, x2,...) must all be ' +
'tensors');
let res;
const inputMap = {};
inputs.forEach((input, i) => {
inputMap[i] = input;
});
const forwardFunc = (_, save) => {
res = f(...[...inputs, save]);
util_base["b" /* assert */](res.value instanceof tensor["a" /* Tensor */], () => 'The function f passed in customGrad(f) must return an ' +
'object where `obj.value` is a tensor');
util_base["b" /* assert */](util_base["u" /* isFunction */](res.gradFunc), () => 'The function f passed in customGrad(f) must return an ' +
'object where `obj.gradFunc` is a function.');
return res.value;
};
const backwardsFunc = (dy, saved) => {
const gradRes = res.gradFunc(dy, saved);
const grads = Array.isArray(gradRes) ? gradRes : [gradRes];
util_base["b" /* assert */](grads.length === inputs.length, () => 'The function f passed in customGrad(f) must return an ' +
'object where `obj.gradFunc` is a function that returns ' +
'the same number of tensors as inputs passed to f(...).');
util_base["b" /* assert */](grads.every(t => t instanceof tensor["a" /* Tensor */]), () => 'The function f passed in customGrad(f) must return an ' +
'object where `obj.gradFunc` is a function that returns ' +
'a list of only tensors.');
const gradMap = {};
grads.forEach((grad, i) => {
gradMap[i] = () => grad;
});
return gradMap;
};
return this.runKernelFunc({
forwardFunc,
backwardsFunc,
inputs: inputMap,
});
};
}
readSync(dataId) {
// Route the read to the correct backend.
const info = this.state.tensorInfo.get(dataId);
return info.backend.readSync(dataId);
}
read(dataId) {
// Route the read to the correct backend.
const info = this.state.tensorInfo.get(dataId);
return info.backend.read(dataId);
}
async time(query) {
const start = Object(util["now"])();
const timingInfo = await this.backend.time(query);
timingInfo.wallMs = Object(util["now"])() - start;
return timingInfo;
}
/**
* Tracks a Tensor in the current scope to be automatically cleaned up
* when the current scope ends, and returns the value.
*
* @param result The Tensor to track in the current scope.
*/
track(result) {
if (this.state.activeScope != null) {
result.scopeId = this.state.activeScope.id;
this.state.activeScope.track.push(result);
}
return result;
}
get registeredVariables() {
return this.state.registeredVariables;
}
/**
* Resets the engine state. Removes all backends but does not remove
* registered backend factories.
*/
reset() {
// Make any pending promise obsolete.
this.pendingBackendInitId++;
this.state.dispose();
this.ENV.reset();
this.state = new EngineState();
for (const backendName in this.registry) {
this.disposeRegisteredKernels(backendName);
this.registry[backendName].dispose();
delete this.registry[backendName];
}
this.backendName = null;
this.backendInstance = null;
this.pendingBackendInit = null;
}
}
engine_Engine.nextTensorId = 0;
engine_Engine.nextVariableId = 0;
function ones(shape) {
const values = Object(util_base["D" /* makeOnesTypedArray */])(Object(util_base["O" /* sizeFromShape */])(shape), 'float32');
return ENGINE.makeTensor(values, shape, 'float32');
}
function getOrMakeEngine() {
const ns = Object(global_util["b" /* getGlobalNamespace */])();
if (ns._tfengine == null) {
const environment = new dist_environment["b" /* Environment */](ns);
ns._tfengine = new engine_Engine(environment);
}
Object(dist_environment["d" /* setEnvironmentGlobal */])(ns._tfengine.ENV);
// Tell the current tensor interface that the global engine is responsible
// for tracking.
Object(tensor["g" /* setTensorTracker */])(() => ns._tfengine);
return ns._tfengine;
}
const ENGINE = getOrMakeEngine();
/**
* A implementation of the add op for use within engine and tape.
*
* This allows us to avoid a circular dependency between add.ts and engine.
* It is exported to be available in tape tests.
*/
function engine_add(a, b) {
// We duplicate Add here to avoid a circular dependency with add.ts.
const inputs = { a, b };
return ENGINE.runKernel(kernel_names["d" /* Add */], inputs);
}
//# sourceMappingURL=engine.js.map
/***/ }),
/* 6 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
// EXPORTS
__webpack_require__.d(__webpack_exports__, "b", function() { return /* binding */ tensor_TensorBuffer; });
__webpack_require__.d(__webpack_exports__, "g", function() { return /* binding */ setTensorTracker; });
__webpack_require__.d(__webpack_exports__, "f", function() { return /* binding */ setOpHandler; });
__webpack_require__.d(__webpack_exports__, "e", function() { return /* binding */ setDeprecationWarningFn; });
__webpack_require__.d(__webpack_exports__, "a", function() { return /* binding */ tensor_Tensor; });
__webpack_require__.d(__webpack_exports__, "d", function() { return /* binding */ getGlobalTensorClass; });
__webpack_require__.d(__webpack_exports__, "c", function() { return /* binding */ tensor_Variable; });
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/global_util.js
var global_util = __webpack_require__(109);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/util_base.js
var util_base = __webpack_require__(8);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/tensor_format.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
// Maximum number of values before we decide to show ellipsis.
const FORMAT_LIMIT_NUM_VALS = 20;
// Number of first and last values to show when displaying a, b,...,y, z.
const FORMAT_NUM_FIRST_LAST_VALS = 3;
// Number of significant digits to show.
const FORMAT_NUM_SIG_DIGITS = 7;
function tensorToString(vals, shape, dtype, verbose) {
const strides = Object(util_base["j" /* computeStrides */])(shape);
const padPerCol = computeMaxSizePerColumn(vals, shape, dtype, strides);
const rank = shape.length;
const valsLines = subTensorToString(vals, shape, dtype, strides, padPerCol);
const lines = ['Tensor'];
if (verbose) {
lines.push(` dtype: ${dtype}`);
lines.push(` rank: ${rank}`);
lines.push(` shape: [${shape}]`);
lines.push(` values:`);
}
lines.push(valsLines.map(l => ' ' + l).join('\n'));
return lines.join('\n');
}
function computeMaxSizePerColumn(vals, shape, dtype, strides) {
const n = Object(util_base["O" /* sizeFromShape */])(shape);
const numCols = strides[strides.length - 1];
const padPerCol = new Array(numCols).fill(0);
const rank = shape.length;
const valuesOrTuples = dtype === 'complex64' ? createComplexTuples(vals) : vals;
if (rank > 1) {
for (let row = 0; row < n / numCols; row++) {
const offset = row * numCols;
for (let j = 0; j < numCols; j++) {
padPerCol[j] = Math.max(padPerCol[j], valToString(valuesOrTuples[offset + j], 0, dtype).length);
}
}
}
return padPerCol;
}
function valToString(val, pad, dtype) {
let valStr;
if (Array.isArray(val)) {
valStr = `${parseFloat(val[0].toFixed(FORMAT_NUM_SIG_DIGITS))} + ` +
`${parseFloat(val[1].toFixed(FORMAT_NUM_SIG_DIGITS))}j`;
}
else if (Object(util_base["z" /* isString */])(val)) {
valStr = `'${val}'`;
}
else if (dtype === 'bool') {
valStr = boolNumToString(val);
}
else {
valStr = parseFloat(val.toFixed(FORMAT_NUM_SIG_DIGITS)).toString();
}
return Object(util_base["L" /* rightPad */])(valStr, pad);
}
function boolNumToString(v) {
return v === 0 ? 'false' : 'true';
}
function subTensorToString(vals, shape, dtype, strides, padPerCol, isLast = true) {
const storagePerElement = dtype === 'complex64' ? 2 : 1;
const size = shape[0];
const rank = shape.length;
if (rank === 0) {
if (dtype === 'complex64') {
const complexTuple = createComplexTuples(vals);
return [valToString(complexTuple[0], 0, dtype)];
}
if (dtype === 'bool') {
return [boolNumToString(vals[0])];
}
return [vals[0].toString()];
}
if (rank === 1) {
if (size > FORMAT_LIMIT_NUM_VALS) {
const firstValsSize = FORMAT_NUM_FIRST_LAST_VALS * storagePerElement;
let firstVals = Array.from(vals.slice(0, firstValsSize));
let lastVals = Array.from(vals.slice((size - FORMAT_NUM_FIRST_LAST_VALS) * storagePerElement, size * storagePerElement));
if (dtype === 'complex64') {
firstVals = createComplexTuples(firstVals);
lastVals = createComplexTuples(lastVals);
}
return [
'[' +
firstVals.map((x, i) => valToString(x, padPerCol[i], dtype))
.join(', ') +
', ..., ' +
lastVals
.map((x, i) => valToString(x, padPerCol[size - FORMAT_NUM_FIRST_LAST_VALS + i], dtype))
.join(', ') +
']'
];
}
const displayVals = dtype === 'complex64' ? createComplexTuples(vals) :
Array.from(vals);
return [
'[' +
displayVals.map((x, i) => valToString(x, padPerCol[i], dtype))
.join(', ') +
']'
];
}
// The array is rank 2 or more.
const subshape = shape.slice(1);
const substrides = strides.slice(1);
const stride = strides[0] * storagePerElement;
const lines = [];
if (size > FORMAT_LIMIT_NUM_VALS) {
for (let i = 0; i < FORMAT_NUM_FIRST_LAST_VALS; i++) {
const start = i * stride;
const end = start + stride;
lines.push(...subTensorToString(vals.slice(start, end), subshape, dtype, substrides, padPerCol, false /* isLast */));
}
lines.push('...');
for (let i = size - FORMAT_NUM_FIRST_LAST_VALS; i < size; i++) {
const start = i * stride;
const end = start + stride;
lines.push(...subTensorToString(vals.slice(start, end), subshape, dtype, substrides, padPerCol, i === size - 1 /* isLast */));
}
}
else {
for (let i = 0; i < size; i++) {
const start = i * stride;
const end = start + stride;
lines.push(...subTensorToString(vals.slice(start, end), subshape, dtype, substrides, padPerCol, i === size - 1 /* isLast */));
}
}
const sep = rank === 2 ? ',' : '';
lines[0] = '[' + lines[0] + sep;
for (let i = 1; i < lines.length - 1; i++) {
lines[i] = ' ' + lines[i] + sep;
}
let newLineSep = ',\n';
for (let i = 2; i < rank; i++) {
newLineSep += '\n';
}
lines[lines.length - 1] =
' ' + lines[lines.length - 1] + ']' + (isLast ? '' : newLineSep);
return lines;
}
function createComplexTuples(vals) {
const complexTuples = [];
for (let i = 0; i < vals.length; i += 2) {
complexTuples.push([vals[i], vals[i + 1]]);
}
return complexTuples;
}
//# sourceMappingURL=tensor_format.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/util.js
var util = __webpack_require__(10);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/tensor.js
/**
* @license
* Copyright 2017 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* A mutable object, similar to `tf.Tensor`, that allows users to set values
* at locations before converting to an immutable `tf.Tensor`.
*
* See `tf.buffer` for creating a tensor buffer.
*
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
class tensor_TensorBuffer {
constructor(shape, dtype, values) {
this.dtype = dtype;
this.shape = shape.slice();
this.size = util_base["O" /* sizeFromShape */](shape);
if (values != null) {
const n = values.length;
util_base["b" /* assert */](n === this.size, () => `Length of values '${n}' does not match the size ` +
`inferred by the shape '${this.size}'.`);
}
if (dtype === 'complex64') {
throw new Error(`complex64 dtype TensorBuffers are not supported. Please create ` +
`a TensorBuffer for the real and imaginary parts separately and ` +
`call tf.complex(real, imag).`);
}
this.values = values || util_base["n" /* getArrayFromDType */](dtype, this.size);
this.strides = Object(util_base["j" /* computeStrides */])(shape);
}
/**
* Sets a value in the buffer at a given location.
*
* @param value The value to set.
* @param locs The location indices.
*
* @doc {heading: 'Tensors', subheading: 'Creation'}
*/
set(value, ...locs) {
if (locs.length === 0) {
locs = [0];
}
util_base["b" /* assert */](locs.length === this.rank, () => `The number of provided coordinates (${locs.length}) must ` +
`match the rank (${this.rank})`);
const index = this.locToIndex(locs);
this.values[index] = value;
}
/**
* Returns the value in the buffer at the provided location.
*
* @param locs The location indices.
*
* @doc {heading: 'Tensors', subheading: 'Creation'}
*/
get(...locs) {
if (locs.length === 0) {
locs = [0];
}
let i = 0;
for (const loc of locs) {
if (loc < 0 || loc >= this.shape[i]) {
const msg = `Requested out of range element at ${locs}. ` +
` Buffer shape=${this.shape}`;
throw new Error(msg);
}
i++;
}
let index = locs[locs.length - 1];
for (let i = 0; i < locs.length - 1; ++i) {
index += this.strides[i] * locs[i];
}
return this.values[index];
}
locToIndex(locs) {
if (this.rank === 0) {
return 0;
}
else if (this.rank === 1) {
return locs[0];
}
let index = locs[locs.length - 1];
for (let i = 0; i < locs.length - 1; ++i) {
index += this.strides[i] * locs[i];
}
return index;
}
indexToLoc(index) {
if (this.rank === 0) {
return [];
}
else if (this.rank === 1) {
return [index];
}
const locs = new Array(this.shape.length);
for (let i = 0; i < locs.length - 1; ++i) {
locs[i] = Math.floor(index / this.strides[i]);
index -= locs[i] * this.strides[i];
}
locs[locs.length - 1] = index;
return locs;
}
get rank() {
return this.shape.length;
}
/**
* Creates an immutable `tf.Tensor` object from the buffer.
*
* @doc {heading: 'Tensors', subheading: 'Creation'}
*/
toTensor() {
return trackerFn().makeTensor(this.values, this.shape, this.dtype);
}
}
// For tracking tensor creation and disposal.
let trackerFn = null;
// Used by chaining methods to call into ops.
let opHandler = null;
// Used to warn about deprecated methods.
let deprecationWarningFn = null;
// This here so that we can use this method on dev branches and keep the
// functionality at master.
// tslint:disable-next-line:no-unused-expression
[deprecationWarningFn];
/**
* An external consumer can register itself as the tensor tracker. This way
* the Tensor class can notify the tracker for every tensor created and
* disposed.
*/
function setTensorTracker(fn) {
trackerFn = fn;
}
/**
* An external consumer can register itself as the op handler. This way the
* Tensor class can have chaining methods that call into ops via the op
* handler.
*/
function setOpHandler(handler) {
opHandler = handler;
}
/**
* Sets the deprecation warning function to be used by this file. This way the
* Tensor class can be a leaf but still use the environment.
*/
function setDeprecationWarningFn(fn) {
deprecationWarningFn = fn;
}
/**
* A `tf.Tensor` object represents an immutable, multidimensional array of
* numbers that has a shape and a data type.
*
* For performance reasons, functions that create tensors do not necessarily
* perform a copy of the data passed to them (e.g. if the data is passed as a
* `Float32Array`), and changes to the data will change the tensor. This is not
* a feature and is not supported. To avoid this behavior, use the tensor before
* changing the input data or create a copy with `copy = tf.add(yourTensor, 0)`.
*
* See `tf.tensor` for details on how to create a `tf.Tensor`.
*
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
class tensor_Tensor {
constructor(shape, dtype, dataId, id) {
/** Whether this tensor has been globally kept. */
this.kept = false;
this.isDisposedInternal = false;
this.shape = shape.slice();
this.dtype = dtype || 'float32';
this.size = util_base["O" /* sizeFromShape */](shape);
this.strides = Object(util_base["j" /* computeStrides */])(shape);
this.dataId = dataId;
this.id = id;
this.rankType = (this.rank < 5 ? this.rank.toString() : 'higher');
}
get rank() {
return this.shape.length;
}
/**
* Returns a promise of `tf.TensorBuffer` that holds the underlying data.
*
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
async buffer() {
const vals = await this.data();
return opHandler.buffer(this.shape, this.dtype, vals);
}
/**
* Returns a `tf.TensorBuffer` that holds the underlying data.
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
bufferSync() {
return opHandler.buffer(this.shape, this.dtype, this.dataSync());
}
/**
* Returns the tensor data as a nested array. The transfer of data is done
* asynchronously.
*
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
async array() {
const vals = await this.data();
return Object(util_base["T" /* toNestedArray */])(this.shape, vals, this.dtype === 'complex64');
}
/**
* Returns the tensor data as a nested array. The transfer of data is done
* synchronously.
*
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
arraySync() {
return Object(util_base["T" /* toNestedArray */])(this.shape, this.dataSync(), this.dtype === 'complex64');
}
/**
* Asynchronously downloads the values from the `tf.Tensor`. Returns a
* promise of `TypedArray` that resolves when the computation has finished.
*
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
async data() {
this.throwIfDisposed();
const data = trackerFn().read(this.dataId);
if (this.dtype === 'string') {
const bytes = await data;
try {
return bytes.map(b => util["decodeString"](b));
}
catch (_a) {
throw new Error('Failed to decode the string bytes into utf-8. ' +
'To get the original bytes, call tensor.bytes().');
}
}
return data;
}
/**
* Synchronously downloads the values from the `tf.Tensor`. This blocks the
* UI thread until the values are ready, which can cause performance issues.
*
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
dataSync() {
this.throwIfDisposed();
const data = trackerFn().readSync(this.dataId);
if (this.dtype === 'string') {
try {
return data.map(b => util["decodeString"](b));
}
catch (_a) {
throw new Error('Failed to decode the string bytes into utf-8. ' +
'To get the original bytes, call tensor.bytes().');
}
}
return data;
}
/** Returns the underlying bytes of the tensor's data. */
async bytes() {
this.throwIfDisposed();
const data = await trackerFn().read(this.dataId);
if (this.dtype === 'string') {
return data;
}
else {
return new Uint8Array(data.buffer);
}
}
/**
* Disposes `tf.Tensor` from memory.
*
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
dispose() {
if (this.isDisposed) {
return;
}
trackerFn().disposeTensor(this);
this.isDisposedInternal = true;
}
get isDisposed() {
return this.isDisposedInternal;
}
throwIfDisposed() {
if (this.isDisposed) {
throw new Error(`Tensor is disposed.`);
}
}
/**
* Prints the `tf.Tensor`. See `tf.print` for details.
*
* @param verbose Whether to print verbose information about the tensor,
* including dtype and size.
*
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
print(verbose = false) {
return opHandler.print(this, verbose);
}
/**
* Returns a copy of the tensor. See `tf.clone` for details.
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
clone() {
this.throwIfDisposed();
return opHandler.clone(this);
}
/**
* Returns a human-readable description of the tensor. Useful for logging.
*
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
toString(verbose = false) {
const vals = this.dataSync();
return tensorToString(vals, this.shape, this.dtype, verbose);
}
cast(dtype) {
this.throwIfDisposed();
return opHandler.cast(this, dtype);
}
variable(trainable = true, name, dtype) {
this.throwIfDisposed();
return trackerFn().makeVariable(this, trainable, name, dtype);
}
}
Object.defineProperty(tensor_Tensor, Symbol.hasInstance, {
value: (instance) => {
// Implementation note: we should use properties of the object that will be
// defined before the constructor body has finished executing (methods).
// This is because when this code is transpiled by babel, babel will call
// classCallCheck before the constructor body is run.
// See https://github.com/tensorflow/tfjs/issues/3384 for backstory.
return !!instance && instance.data != null && instance.dataSync != null &&
instance.throwIfDisposed != null;
}
});
function getGlobalTensorClass() {
// Use getGlobal so that we can augment the Tensor class across package
// boundaries becase the node resolution alg may result in different modules
// being returned for this file depending on the path they are loaded from.
return Object(global_util["a" /* getGlobal */])('Tensor', () => {
return tensor_Tensor;
});
}
// Global side effect. Cache global reference to Tensor class
getGlobalTensorClass();
/**
* A mutable `tf.Tensor`, useful for persisting state, e.g. for training.
*
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
class tensor_Variable extends tensor_Tensor {
constructor(initialValue, trainable, name, tensorId) {
super(initialValue.shape, initialValue.dtype, initialValue.dataId, tensorId);
this.trainable = trainable;
this.name = name;
}
/**
* Assign a new `tf.Tensor` to this variable. The new `tf.Tensor` must have
* the same shape and dtype as the old `tf.Tensor`.
*
* @param newValue New tensor to be assigned to this variable.
*
* @doc {heading: 'Tensors', subheading: 'Classes'}
*/
assign(newValue) {
if (newValue.dtype !== this.dtype) {
throw new Error(`dtype of the new value (${newValue.dtype}) and ` +
`previous value (${this.dtype}) must match`);
}
if (!util_base["a" /* arraysEqual */](newValue.shape, this.shape)) {
throw new Error(`shape of the new value (${newValue.shape}) and ` +
`previous value (${this.shape}) must match`);
}
trackerFn().disposeTensor(this);
this.dataId = newValue.dataId;
trackerFn().incRef(this, null /* backend */);
}
dispose() {
trackerFn().disposeVariable(this);
this.isDisposedInternal = true;
}
}
Object.defineProperty(tensor_Variable, Symbol.hasInstance, {
value: (instance) => {
return instance instanceof tensor_Tensor && instance.assign != null &&
instance.assign instanceof Function;
}
});
//# sourceMappingURL=tensor.js.map
/***/ }),
/* 7 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return reshape; });
/* harmony import */ var _engine__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(5);
/* harmony import */ var _kernel_names__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(3);
/* harmony import */ var _tensor_util_env__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(2);
/* harmony import */ var _operation__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(4);
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Reshapes a `tf.Tensor` to a given shape.
*
* Given an input tensor, returns a new tensor with the same values as the
* input tensor with shape `shape`.
*
* If one component of shape is the special value -1, the size of that
* dimension is computed so that the total size remains constant. In
* particular, a shape of [-1] flattens into 1-D. At most one component of
* shape can be -1.
*
* If shape is 1-D or higher, then the operation returns a tensor with shape
* shape filled with the values of tensor. In this case, the number of
* elements implied by shape must be the same as the number of elements in
* tensor.
*
* ```js
* const x = tf.tensor1d([1, 2, 3, 4]);
* x.reshape([2, 2]).print();
* ```
*
* @param x The input tensor to be reshaped.
* @param shape An array of integers defining the output tensor shape.
*
* @doc {heading: 'Tensors', subheading: 'Transformations'}
*/
function reshape_(x, shape) {
const $x = Object(_tensor_util_env__WEBPACK_IMPORTED_MODULE_2__[/* convertToTensor */ "a"])(x, 'x', 'reshape', 'string_or_numeric');
const inputs = { x: $x };
const attrs = { shape };
return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].runKernel(_kernel_names__WEBPACK_IMPORTED_MODULE_1__[/* Reshape */ "pc"], inputs, attrs);
}
const reshape = Object(_operation__WEBPACK_IMPORTED_MODULE_3__[/* op */ "b"])({ reshape_ });
//# sourceMappingURL=reshape.js.map
/***/ }),
/* 8 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "M", function() { return shuffle; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "N", function() { return shuffleCombo; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "i", function() { return clamp; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "H", function() { return nearestLargerEven; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "R", function() { return sum; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "J", function() { return randUniform; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "l", function() { return distSquared; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "b", function() { return assert; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "e", function() { return assertShapesMatch; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "d", function() { return assertNonNull; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "m", function() { return flatten; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "O", function() { return sizeFromShape; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "y", function() { return isScalarShape; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return arraysEqual; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "v", function() { return isInt; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "S", function() { return tanh; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "P", function() { return sizeToSquarishShape; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "k", function() { return createShuffledIndices; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "L", function() { return rightPad; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "K", function() { return repeatedTry; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "s", function() { return inferFromImplicitShape; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "I", function() { return parseAxisParam; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "Q", function() { return squeezeShape; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "o", function() { return getTypedArrayFromDType; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "n", function() { return getArrayFromDType; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "h", function() { return checkConversionForErrors; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "B", function() { return isValidDtype; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "p", function() { return hasEncodingLoss; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "A", function() { return isTypedArray; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "g", function() { return bytesPerElement; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "f", function() { return bytesFromStringArray; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "z", function() { return isString; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "t", function() { return isBoolean; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "w", function() { return isNumber; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "r", function() { return inferDtype; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "u", function() { return isFunction; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "G", function() { return nearestDivisor; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "j", function() { return computeStrides; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "T", function() { return toNestedArray; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "D", function() { return makeOnesTypedArray; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "F", function() { return makeZerosTypedArray; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "E", function() { return makeZerosNestedTypedArray; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "c", function() { return assertNonNegativeIntegerDimensions; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "C", function() { return locToIndex; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "q", function() { return indexToLoc; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "x", function() { return isPromise; });
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Shuffles the array in-place using Fisher-Yates algorithm.
*
* ```js
* const a = [1, 2, 3, 4, 5];
* tf.util.shuffle(a);
* console.log(a);
* ```
*
* @param array The array to shuffle in-place.
*
* @doc {heading: 'Util', namespace: 'util'}
*/
// tslint:disable-next-line:no-any
function shuffle(array) {
let counter = array.length;
let temp = 0;
let index = 0;
// While there are elements in the array
while (counter > 0) {
// Pick a random index
index = (Math.random() * counter) | 0;
// Decrease counter by 1
counter--;
// And swap the last element with it
temp = array[counter];
array[counter] = array[index];
array[index] = temp;
}
}
/**
* Shuffles two arrays in-place the same way using Fisher-Yates algorithm.
*
* ```js
* const a = [1,2,3,4,5];
* const b = [11,22,33,44,55];
* tf.util.shuffleCombo(a, b);
* console.log(a, b);
* ```
*
* @param array The first array to shuffle in-place.
* @param array2 The second array to shuffle in-place with the same permutation
* as the first array.
*
* @doc {heading: 'Util', namespace: 'util'}
*/
function shuffleCombo(
// tslint:disable-next-line:no-any
array,
// tslint:disable-next-line:no-any
array2) {
if (array.length !== array2.length) {
throw new Error(`Array sizes must match to be shuffled together ` +
`First array length was ${array.length}` +
`Second array length was ${array2.length}`);
}
let counter = array.length;
let temp, temp2;
let index = 0;
// While there are elements in the array
while (counter > 0) {
// Pick a random index
index = (Math.random() * counter) | 0;
// Decrease counter by 1
counter--;
// And swap the last element of each array with it
temp = array[counter];
temp2 = array2[counter];
array[counter] = array[index];
array2[counter] = array2[index];
array[index] = temp;
array2[index] = temp2;
}
}
/** Clamps a value to a specified range. */
function clamp(min, x, max) {
return Math.max(min, Math.min(x, max));
}
function nearestLargerEven(val) {
return val % 2 === 0 ? val : val + 1;
}
function sum(arr) {
let sum = 0;
for (let i = 0; i < arr.length; i++) {
sum += arr[i];
}
return sum;
}
/**
* Returns a sample from a uniform [a, b) distribution.
*
* @param a The minimum support (inclusive).
* @param b The maximum support (exclusive).
* @return A pseudorandom number on the half-open interval [a,b).
*/
function randUniform(a, b) {
const r = Math.random();
return (b * r) + (1 - r) * a;
}
/** Returns the squared Euclidean distance between two vectors. */
function distSquared(a, b) {
let result = 0;
for (let i = 0; i < a.length; i++) {
const diff = Number(a[i]) - Number(b[i]);
result += diff * diff;
}
return result;
}
/**
* Asserts that the expression is true. Otherwise throws an error with the
* provided message.
*
* ```js
* const x = 2;
* tf.util.assert(x === 2, 'x is not 2');
* ```
*
* @param expr The expression to assert (as a boolean).
* @param msg A function that returns the message to report when throwing an
* error. We use a function for performance reasons.
*
* @doc {heading: 'Util', namespace: 'util'}
*/
function assert(expr, msg) {
if (!expr) {
throw new Error(typeof msg === 'string' ? msg : msg());
}
}
function assertShapesMatch(shapeA, shapeB, errorMessagePrefix = '') {
assert(arraysEqual(shapeA, shapeB), () => errorMessagePrefix + ` Shapes ${shapeA} and ${shapeB} must match`);
}
function assertNonNull(a) {
assert(a != null, () => `The input to the tensor constructor must be a non-null value.`);
}
// NOTE: We explicitly type out what T extends instead of any so that
// util.flatten on a nested array of number doesn't try to infer T as a
// number[][], causing us to explicitly type util.flatten<number>().
/**
* Flattens an arbitrarily nested array.
*
* ```js
* const a = [[1, 2], [3, 4], [5, [6, [7]]]];
* const flat = tf.util.flatten(a);
* console.log(flat);
* ```
*
* @param arr The nested array to flatten.
* @param result The destination array which holds the elements.
* @param skipTypedArray If true, avoids flattening the typed arrays. Defaults
* to false.
*
* @doc {heading: 'Util', namespace: 'util'}
*/
function flatten(arr, result = [], skipTypedArray = false) {
if (result == null) {
result = [];
}
if (Array.isArray(arr) || isTypedArray(arr) && !skipTypedArray) {
for (let i = 0; i < arr.length; ++i) {
flatten(arr[i], result, skipTypedArray);
}
}
else {
result.push(arr);
}
return result;
}
/**
* Returns the size (number of elements) of the tensor given its shape.
*
* ```js
* const shape = [3, 4, 2];
* const size = tf.util.sizeFromShape(shape);
* console.log(size);
* ```
*
* @doc {heading: 'Util', namespace: 'util'}
*/
function sizeFromShape(shape) {
if (shape.length === 0) {
// Scalar.
return 1;
}
let size = shape[0];
for (let i = 1; i < shape.length; i++) {
size *= shape[i];
}
return size;
}
function isScalarShape(shape) {
return shape.length === 0;
}
function arraysEqual(n1, n2) {
if (n1 === n2) {
return true;
}
if (n1 == null || n2 == null) {
return false;
}
if (n1.length !== n2.length) {
return false;
}
for (let i = 0; i < n1.length; i++) {
if (n1[i] !== n2[i]) {
return false;
}
}
return true;
}
function isInt(a) {
return a % 1 === 0;
}
function tanh(x) {
// tslint:disable-next-line:no-any
if (Math.tanh != null) {
// tslint:disable-next-line:no-any
return Math.tanh(x);
}
if (x === Infinity) {
return 1;
}
else if (x === -Infinity) {
return -1;
}
else {
const e2x = Math.exp(2 * x);
return (e2x - 1) / (e2x + 1);
}
}
function sizeToSquarishShape(size) {
const width = Math.ceil(Math.sqrt(size));
return [width, Math.ceil(size / width)];
}
/**
* Creates a new array with randomized indicies to a given quantity.
*
* ```js
* const randomTen = tf.util.createShuffledIndices(10);
* console.log(randomTen);
* ```
*
* @param number Quantity of how many shuffled indicies to create.
*
* @doc {heading: 'Util', namespace: 'util'}
*/
function createShuffledIndices(n) {
const shuffledIndices = new Uint32Array(n);
for (let i = 0; i < n; ++i) {
shuffledIndices[i] = i;
}
shuffle(shuffledIndices);
return shuffledIndices;
}
function rightPad(a, size) {
if (size <= a.length) {
return a;
}
return a + ' '.repeat(size - a.length);
}
function repeatedTry(checkFn, delayFn = (counter) => 0, maxCounter) {
return new Promise((resolve, reject) => {
let tryCount = 0;
const tryFn = () => {
if (checkFn()) {
resolve();
return;
}
tryCount++;
const nextBackoff = delayFn(tryCount);
if (maxCounter != null && tryCount >= maxCounter) {
reject();
return;
}
setTimeout(tryFn, nextBackoff);
};
tryFn();
});
}
/**
* Given the full size of the array and a shape that may contain -1 as the
* implicit dimension, returns the inferred shape where -1 is replaced.
* E.g. For shape=[2, -1, 3] and size=24, it will return [2, 4, 3].
*
* @param shape The shape, which may contain -1 in some dimension.
* @param size The full size (number of elements) of the array.
* @return The inferred shape where -1 is replaced with the inferred size.
*/
function inferFromImplicitShape(shape, size) {
let shapeProd = 1;
let implicitIdx = -1;
for (let i = 0; i < shape.length; ++i) {
if (shape[i] >= 0) {
shapeProd *= shape[i];
}
else if (shape[i] === -1) {
if (implicitIdx !== -1) {
throw Error(`Shapes can only have 1 implicit size. ` +
`Found -1 at dim ${implicitIdx} and dim ${i}`);
}
implicitIdx = i;
}
else if (shape[i] < 0) {
throw Error(`Shapes can not be < 0. Found ${shape[i]} at dim ${i}`);
}
}
if (implicitIdx === -1) {
if (size > 0 && size !== shapeProd) {
throw Error(`Size(${size}) must match the product of shape ${shape}`);
}
return shape;
}
if (shapeProd === 0) {
throw Error(`Cannot infer the missing size in [${shape}] when ` +
`there are 0 elements`);
}
if (size % shapeProd !== 0) {
throw Error(`The implicit shape can't be a fractional number. ` +
`Got ${size} / ${shapeProd}`);
}
const newShape = shape.slice();
newShape[implicitIdx] = size / shapeProd;
return newShape;
}
function parseAxisParam(axis, shape) {
const rank = shape.length;
// Normalize input
axis = axis == null ? shape.map((s, i) => i) : [].concat(axis);
// Check for valid range
assert(axis.every(ax => ax >= -rank && ax < rank), () => `All values in axis param must be in range [-${rank}, ${rank}) but ` +
`got axis ${axis}`);
// Check for only integers
assert(axis.every(ax => isInt(ax)), () => `All values in axis param must be integers but ` +
`got axis ${axis}`);
// Handle negative axis.
return axis.map(a => a < 0 ? rank + a : a);
}
/** Reduces the shape by removing all dimensions of shape 1. */
function squeezeShape(shape, axis) {
const newShape = [];
const keptDims = [];
const isEmptyArray = axis != null && Array.isArray(axis) && axis.length === 0;
const axes = (axis == null || isEmptyArray) ?
null :
parseAxisParam(axis, shape).sort();
let j = 0;
for (let i = 0; i < shape.length; ++i) {
if (axes != null) {
if (axes[j] === i && shape[i] !== 1) {
throw new Error(`Can't squeeze axis ${i} since its dim '${shape[i]}' is not 1`);
}
if ((axes[j] == null || axes[j] > i) && shape[i] === 1) {
newShape.push(shape[i]);
keptDims.push(i);
}
if (axes[j] <= i) {
j++;
}
}
if (shape[i] !== 1) {
newShape.push(shape[i]);
keptDims.push(i);
}
}
return { newShape, keptDims };
}
function getTypedArrayFromDType(dtype, size) {
let values = null;
if (dtype == null || dtype === 'float32') {
values = new Float32Array(size);
}
else if (dtype === 'int32') {
values = new Int32Array(size);
}
else if (dtype === 'bool') {
values = new Uint8Array(size);
}
else {
throw new Error(`Unknown data type ${dtype}`);
}
return values;
}
function getArrayFromDType(dtype, size) {
let values = null;
if (dtype == null || dtype === 'float32') {
values = new Float32Array(size);
}
else if (dtype === 'int32') {
values = new Int32Array(size);
}
else if (dtype === 'bool') {
values = new Uint8Array(size);
}
else if (dtype === 'string') {
values = new Array(size);
}
else {
throw new Error(`Unknown data type ${dtype}`);
}
return values;
}
function checkConversionForErrors(vals, dtype) {
for (let i = 0; i < vals.length; i++) {
const num = vals[i];
if (isNaN(num) || !isFinite(num)) {
throw Error(`A tensor of type ${dtype} being uploaded contains ${num}.`);
}
}
}
/** Returns true if the dtype is valid. */
function isValidDtype(dtype) {
return dtype === 'bool' || dtype === 'complex64' || dtype === 'float32' ||
dtype === 'int32' || dtype === 'string';
}
/**
* Returns true if the new type can't encode the old type without loss of
* precision.
*/
function hasEncodingLoss(oldType, newType) {
if (newType === 'complex64') {
return false;
}
if (newType === 'float32' && oldType !== 'complex64') {
return false;
}
if (newType === 'int32' && oldType !== 'float32' && oldType !== 'complex64') {
return false;
}
if (newType === 'bool' && oldType === 'bool') {
return false;
}
return true;
}
function isTypedArray(a) {
return a instanceof Float32Array || a instanceof Int32Array ||
a instanceof Uint8Array;
}
function bytesPerElement(dtype) {
if (dtype === 'float32' || dtype === 'int32') {
return 4;
}
else if (dtype === 'complex64') {
return 8;
}
else if (dtype === 'bool') {
return 1;
}
else {
throw new Error(`Unknown dtype ${dtype}`);
}
}
/**
* Returns the approximate number of bytes allocated in the string array - 2
* bytes per character. Computing the exact bytes for a native string in JS is
* not possible since it depends on the encoding of the html page that serves
* the website.
*/
function bytesFromStringArray(arr) {
if (arr == null) {
return 0;
}
let bytes = 0;
arr.forEach(x => bytes += x.length);
return bytes;
}
/** Returns true if the value is a string. */
function isString(value) {
return typeof value === 'string' || value instanceof String;
}
function isBoolean(value) {
return typeof value === 'boolean';
}
function isNumber(value) {
return typeof value === 'number';
}
function inferDtype(values) {
if (Array.isArray(values)) {
return inferDtype(values[0]);
}
if (values instanceof Float32Array) {
return 'float32';
}
else if (values instanceof Int32Array || values instanceof Uint8Array) {
return 'int32';
}
else if (isNumber(values)) {
return 'float32';
}
else if (isString(values)) {
return 'string';
}
else if (isBoolean(values)) {
return 'bool';
}
return 'float32';
}
function isFunction(f) {
return !!(f && f.constructor && f.call && f.apply);
}
function nearestDivisor(size, start) {
for (let i = start; i < size; ++i) {
if (size % i === 0) {
return i;
}
}
return size;
}
function computeStrides(shape) {
const rank = shape.length;
if (rank < 2) {
return [];
}
// Last dimension has implicit stride of 1, thus having D-1 (instead of D)
// strides.
const strides = new Array(rank - 1);
strides[rank - 2] = shape[rank - 1];
for (let i = rank - 3; i >= 0; --i) {
strides[i] = strides[i + 1] * shape[i + 1];
}
return strides;
}
function createNestedArray(offset, shape, a, isComplex = false) {
const ret = new Array();
if (shape.length === 1) {
const d = shape[0] * (isComplex ? 2 : 1);
for (let i = 0; i < d; i++) {
ret[i] = a[offset + i];
}
}
else {
const d = shape[0];
const rest = shape.slice(1);
const len = rest.reduce((acc, c) => acc * c) * (isComplex ? 2 : 1);
for (let i = 0; i < d; i++) {
ret[i] = createNestedArray(offset + i * len, rest, a, isComplex);
}
}
return ret;
}
// Provide a nested array of TypedArray in given shape.
function toNestedArray(shape, a, isComplex = false) {
if (shape.length === 0) {
// Scalar type should return a single number.
return a[0];
}
const size = shape.reduce((acc, c) => acc * c) * (isComplex ? 2 : 1);
if (size === 0) {
// A tensor with shape zero should be turned into empty list.
return [];
}
if (size !== a.length) {
throw new Error(`[${shape}] does not match the input size ${a.length}${isComplex ? ' for a complex tensor' : ''}.`);
}
return createNestedArray(0, shape, a, isComplex);
}
function makeOnesTypedArray(size, dtype) {
const array = makeZerosTypedArray(size, dtype);
for (let i = 0; i < array.length; i++) {
array[i] = 1;
}
return array;
}
function makeZerosTypedArray(size, dtype) {
if (dtype == null || dtype === 'float32' || dtype === 'complex64') {
return new Float32Array(size);
}
else if (dtype === 'int32') {
return new Int32Array(size);
}
else if (dtype === 'bool') {
return new Uint8Array(size);
}
else {
throw new Error(`Unknown data type ${dtype}`);
}
}
/**
* Make nested `TypedArray` filled with zeros.
* @param shape The shape information for the nested array.
* @param dtype dtype of the array element.
*/
function makeZerosNestedTypedArray(shape, dtype) {
const size = shape.reduce((prev, curr) => prev * curr, 1);
if (dtype == null || dtype === 'float32') {
return toNestedArray(shape, new Float32Array(size));
}
else if (dtype === 'int32') {
return toNestedArray(shape, new Int32Array(size));
}
else if (dtype === 'bool') {
return toNestedArray(shape, new Uint8Array(size));
}
else {
throw new Error(`Unknown data type ${dtype}`);
}
}
function assertNonNegativeIntegerDimensions(shape) {
shape.forEach(dimSize => {
assert(Number.isInteger(dimSize) && dimSize >= 0, () => `Tensor must have a shape comprised of positive integers but got ` +
`shape [${shape}].`);
});
}
/**
* Computes flat index for a given location (multidimentionsal index) in a
* Tensor/multidimensional array.
*
* @param locs Location in the tensor.
* @param rank Rank of the tensor.
* @param strides Tensor strides.
*/
function locToIndex(locs, rank, strides) {
if (rank === 0) {
return 0;
}
else if (rank === 1) {
return locs[0];
}
let index = locs[locs.length - 1];
for (let i = 0; i < locs.length - 1; ++i) {
index += strides[i] * locs[i];
}
return index;
}
/**
* Computes the location (multidimensional index) in a tensor/multidimentional
* array for a given flat index.
*
* @param index Index in flat array.
* @param rank Rank of tensor.
* @param strides Strides of tensor.
*/
function indexToLoc(index, rank, strides) {
if (rank === 0) {
return [];
}
else if (rank === 1) {
return [index];
}
const locs = new Array(rank);
for (let i = 0; i < locs.length - 1; ++i) {
locs[i] = Math.floor(index / strides[i]);
index -= locs[i] * strides[i];
}
locs[locs.length - 1] = index;
return locs;
}
/**
* This method asserts whether an object is a Promise instance.
* @param object
*/
// tslint:disable-next-line: no-any
function isPromise(object) {
// We chose to not use 'obj instanceOf Promise' for two reasons:
// 1. It only reliably works for es6 Promise, not other Promise
// implementations.
// 2. It doesn't work with framework that uses zone.js. zone.js monkey patch
// the async calls, so it is possible the obj (patched) is comparing to a
// pre-patched Promise.
return object && object.then && typeof object.then === 'function';
}
//# sourceMappingURL=util_base.js.map
/***/ }),
/* 9 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return mul; });
/* harmony import */ var _engine__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(5);
/* harmony import */ var _kernel_names__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(3);
/* harmony import */ var _tensor_util__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(23);
/* harmony import */ var _tensor_util_env__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(2);
/* harmony import */ var _operation__WEBPACK_IMPORTED_MODULE_4__ = __webpack_require__(4);
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Multiplies two `tf.Tensor`s element-wise, A * B. Supports broadcasting.
*
* We also expose `tf.mulStrict` which has the same signature as this op and
* asserts that `a` and `b` are the same shape (does not broadcast).
*
* ```js
* const a = tf.tensor1d([1, 2, 3, 4]);
* const b = tf.tensor1d([2, 3, 4, 5]);
*
* a.mul(b).print(); // or tf.mul(a, b)
* ```
*
* ```js
* // Broadcast mul a with b.
* const a = tf.tensor1d([1, 2, 3, 4]);
* const b = tf.scalar(5);
*
* a.mul(b).print(); // or tf.mul(a, b)
* ```
* @param a The first tensor to multiply.
* @param b The second tensor to multiply. Must have the same dtype as `a`.
*
* @doc {heading: 'Operations', subheading: 'Arithmetic'}
*/
function mul_(a, b) {
let $a = Object(_tensor_util_env__WEBPACK_IMPORTED_MODULE_3__[/* convertToTensor */ "a"])(a, 'a', 'mul');
let $b = Object(_tensor_util_env__WEBPACK_IMPORTED_MODULE_3__[/* convertToTensor */ "a"])(b, 'b', 'mul');
[$a, $b] = Object(_tensor_util__WEBPACK_IMPORTED_MODULE_2__["makeTypesMatch"])($a, $b);
const inputs = { a: $a, b: $b };
return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].runKernel(_kernel_names__WEBPACK_IMPORTED_MODULE_1__[/* Multiply */ "Vb"], inputs);
}
const mul = Object(_operation__WEBPACK_IMPORTED_MODULE_4__[/* op */ "b"])({ mul_ });
//# sourceMappingURL=mul.js.map
/***/ }),
/* 10 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
__webpack_require__.r(__webpack_exports__);
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "createScalarValue", function() { return createScalarValue; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "toTypedArray", function() { return toTypedArray; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "now", function() { return now; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "fetch", function() { return fetch; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "encodeString", function() { return encodeString; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "decodeString", function() { return decodeString; });
/* harmony import */ var _environment__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(22);
/* harmony import */ var _util_base__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(8);
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "shuffle", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["M"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "shuffleCombo", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["N"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "clamp", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["i"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "nearestLargerEven", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["H"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "sum", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["R"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "randUniform", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["J"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "distSquared", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["l"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "assert", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["b"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "assertShapesMatch", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["e"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "assertNonNull", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["d"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "flatten", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["m"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "sizeFromShape", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["O"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "isScalarShape", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["y"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "arraysEqual", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["a"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "isInt", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["v"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "tanh", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["S"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "sizeToSquarishShape", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["P"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "createShuffledIndices", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["k"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "rightPad", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["L"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "repeatedTry", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["K"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "inferFromImplicitShape", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["s"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "parseAxisParam", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["I"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "squeezeShape", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["Q"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "getTypedArrayFromDType", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["o"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "getArrayFromDType", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["n"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "checkConversionForErrors", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["h"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "isValidDtype", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["B"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "hasEncodingLoss", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["p"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "isTypedArray", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["A"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "bytesPerElement", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["g"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "bytesFromStringArray", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["f"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "isString", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["z"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "isBoolean", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["t"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "isNumber", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["w"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "inferDtype", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["r"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "isFunction", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["u"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "nearestDivisor", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["G"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "computeStrides", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["j"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "toNestedArray", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["T"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "makeOnesTypedArray", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["D"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "makeZerosTypedArray", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["F"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "makeZerosNestedTypedArray", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["E"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "assertNonNegativeIntegerDimensions", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["c"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "locToIndex", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["C"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "indexToLoc", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["q"]; });
/* harmony reexport (safe) */ __webpack_require__.d(__webpack_exports__, "isPromise", function() { return _util_base__WEBPACK_IMPORTED_MODULE_1__["x"]; });
/**
* @license
* Copyright 2017 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Create typed array for scalar value. Used for storing in `DataStorage`.
*/
function createScalarValue(value, dtype) {
if (dtype === 'string') {
return encodeString(value);
}
return toTypedArray([value], dtype);
}
function noConversionNeeded(a, dtype) {
return (a instanceof Float32Array && dtype === 'float32') ||
(a instanceof Int32Array && dtype === 'int32') ||
(a instanceof Uint8Array && dtype === 'bool');
}
function toTypedArray(a, dtype) {
if (dtype === 'string') {
throw new Error('Cannot convert a string[] to a TypedArray');
}
if (Array.isArray(a)) {
a = _util_base__WEBPACK_IMPORTED_MODULE_1__[/* flatten */ "m"](a);
}
if (Object(_environment__WEBPACK_IMPORTED_MODULE_0__[/* env */ "c"])().getBool('DEBUG')) {
_util_base__WEBPACK_IMPORTED_MODULE_1__[/* checkConversionForErrors */ "h"](a, dtype);
}
if (noConversionNeeded(a, dtype)) {
return a;
}
if (dtype == null || dtype === 'float32' || dtype === 'complex64') {
return new Float32Array(a);
}
else if (dtype === 'int32') {
return new Int32Array(a);
}
else if (dtype === 'bool') {
const bool = new Uint8Array(a.length);
for (let i = 0; i < bool.length; ++i) {
if (Math.round(a[i]) !== 0) {
bool[i] = 1;
}
}
return bool;
}
else {
throw new Error(`Unknown data type ${dtype}`);
}
}
/**
* Returns the current high-resolution time in milliseconds relative to an
* arbitrary time in the past. It works across different platforms (node.js,
* browsers).
*
* ```js
* console.log(tf.util.now());
* ```
*
* @doc {heading: 'Util', namespace: 'util'}
*/
function now() {
return Object(_environment__WEBPACK_IMPORTED_MODULE_0__[/* env */ "c"])().platform.now();
}
/**
* Returns a platform-specific implementation of
* [`fetch`](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API).
*
* If `fetch` is defined on the global object (`window`, `process`, etc.),
* `tf.util.fetch` returns that function.
*
* If not, `tf.util.fetch` returns a platform-specific solution.
*
* ```js
* const resource = await tf.util.fetch('https://unpkg.com/@tensorflow/tfjs');
* // handle response
* ```
*
* @doc {heading: 'Util'}
*/
function fetch(path, requestInits) {
return Object(_environment__WEBPACK_IMPORTED_MODULE_0__[/* env */ "c"])().platform.fetch(path, requestInits);
}
/**
* Encodes the provided string into bytes using the provided encoding scheme.
*
* @param s The string to encode.
* @param encoding The encoding scheme. Defaults to utf-8.
*
* @doc {heading: 'Util'}
*/
function encodeString(s, encoding = 'utf-8') {
encoding = encoding || 'utf-8';
return Object(_environment__WEBPACK_IMPORTED_MODULE_0__[/* env */ "c"])().platform.encode(s, encoding);
}
/**
* Decodes the provided bytes into a string using the provided encoding scheme.
* @param bytes The bytes to decode.
*
* @param encoding The encoding scheme. Defaults to utf-8.
*
* @doc {heading: 'Util'}
*/
function decodeString(bytes, encoding = 'utf-8') {
encoding = encoding || 'utf-8';
return Object(_environment__WEBPACK_IMPORTED_MODULE_0__[/* env */ "c"])().platform.decode(bytes, encoding);
}
//# sourceMappingURL=util.js.map
/***/ }),
/* 11 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return assertNotComplex; });
/* harmony import */ var _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(0);
/**
* @license
* Copyright 2019 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
function assertNotComplex(tensor, opName) {
if (!Array.isArray(tensor)) {
tensor = [tensor];
}
tensor.forEach(t => {
if (t != null) {
_tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["util"].assert(t.dtype !== 'complex64', () => `${opName} does not support complex64 tensors in the CPU backend.`);
}
});
}
//# sourceMappingURL=cpu_util.js.map
/***/ }),
/* 12 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return cast; });
/* harmony import */ var _engine__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(5);
/* harmony import */ var _kernel_names__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(3);
/* harmony import */ var _tensor_util_env__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(2);
/* harmony import */ var _util__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(8);
/* harmony import */ var _operation__WEBPACK_IMPORTED_MODULE_4__ = __webpack_require__(4);
/**
* @license
* Copyright 2020 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Casts a `tf.Tensor` to a new dtype.
*
* ```js
* const x = tf.tensor1d([1.5, 2.5, 3]);
* tf.cast(x, 'int32').print();
* ```
* @param x The input tensor to be casted.
* @param dtype The dtype to cast the input tensor to.
*
* @doc {heading: 'Tensors', subheading: 'Transformations'}
*/
function cast_(x, dtype) {
const $x = Object(_tensor_util_env__WEBPACK_IMPORTED_MODULE_2__[/* convertToTensor */ "a"])(x, 'x', 'cast');
// Sanity checks.
if (!_util__WEBPACK_IMPORTED_MODULE_3__[/* isValidDtype */ "B"](dtype)) {
throw new Error(`Failed to cast to unknown dtype ${dtype}`);
}
if (dtype === 'string' && $x.dtype !== 'string' ||
dtype !== 'string' && $x.dtype === 'string') {
throw new Error('Only strings can be casted to strings');
}
const inputs = { x: $x };
const attrs = { dtype };
return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].runKernel(_kernel_names__WEBPACK_IMPORTED_MODULE_1__[/* Cast */ "w"], inputs, attrs);
}
const cast = Object(_operation__WEBPACK_IMPORTED_MODULE_4__[/* op */ "b"])({ cast_ });
//# sourceMappingURL=cast.js.map
/***/ }),
/* 13 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return add; });
/* harmony import */ var _engine__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(5);
/* harmony import */ var _kernel_names__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(3);
/* harmony import */ var _tensor_util__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(23);
/* harmony import */ var _tensor_util_env__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(2);
/* harmony import */ var _operation__WEBPACK_IMPORTED_MODULE_4__ = __webpack_require__(4);
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Adds two `tf.Tensor`s element-wise, A + B. Supports broadcasting.
*
*
* ```js
* const a = tf.tensor1d([1, 2, 3, 4]);
* const b = tf.tensor1d([10, 20, 30, 40]);
*
* a.add(b).print(); // or tf.add(a, b)
* ```
*
* ```js
* // Broadcast add a with b.
* const a = tf.scalar(5);
* const b = tf.tensor1d([10, 20, 30, 40]);
*
* a.add(b).print(); // or tf.add(a, b)
* ```
* @param a The first `tf.Tensor` to add.
* @param b The second `tf.Tensor` to add. Must have the same type as `a`.
*
* @doc {heading: 'Operations', subheading: 'Arithmetic'}
*/
function add_(a, b) {
let $a = Object(_tensor_util_env__WEBPACK_IMPORTED_MODULE_3__[/* convertToTensor */ "a"])(a, 'a', 'add');
let $b = Object(_tensor_util_env__WEBPACK_IMPORTED_MODULE_3__[/* convertToTensor */ "a"])(b, 'b', 'add');
[$a, $b] = Object(_tensor_util__WEBPACK_IMPORTED_MODULE_2__["makeTypesMatch"])($a, $b);
const inputs = { a: $a, b: $b };
return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].runKernel(_kernel_names__WEBPACK_IMPORTED_MODULE_1__[/* Add */ "d"], inputs);
}
const add = Object(_operation__WEBPACK_IMPORTED_MODULE_4__[/* op */ "b"])({ add_ });
//# sourceMappingURL=add.js.map
/***/ }),
/* 14 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return sub; });
/* harmony import */ var _engine__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(5);
/* harmony import */ var _kernel_names__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(3);
/* harmony import */ var _tensor_util__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(23);
/* harmony import */ var _tensor_util_env__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(2);
/* harmony import */ var _operation__WEBPACK_IMPORTED_MODULE_4__ = __webpack_require__(4);
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Subtracts two `tf.Tensor`s element-wise, A - B. Supports broadcasting.
*
* ```js
* const a = tf.tensor1d([10, 20, 30, 40]);
* const b = tf.tensor1d([1, 2, 3, 4]);
*
* a.sub(b).print(); // or tf.sub(a, b)
* ```
*
* ```js
* // Broadcast subtract a with b.
* const a = tf.tensor1d([10, 20, 30, 40]);
* const b = tf.scalar(5);
*
* a.sub(b).print(); // or tf.sub(a, b)
* ```
* @param a The first `tf.Tensor` to subtract from.
* @param b The second `tf.Tensor` to be subtracted. Must have the same dtype as
* `a`.
*
* @doc {heading: 'Operations', subheading: 'Arithmetic'}
*/
function sub_(a, b) {
let $a = Object(_tensor_util_env__WEBPACK_IMPORTED_MODULE_3__[/* convertToTensor */ "a"])(a, 'a', 'sub');
let $b = Object(_tensor_util_env__WEBPACK_IMPORTED_MODULE_3__[/* convertToTensor */ "a"])(b, 'b', 'sub');
[$a, $b] = Object(_tensor_util__WEBPACK_IMPORTED_MODULE_2__["makeTypesMatch"])($a, $b);
const inputs = { a: $a, b: $b };
return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].runKernel(_kernel_names__WEBPACK_IMPORTED_MODULE_1__[/* Sub */ "Rc"], inputs);
}
const sub = Object(_operation__WEBPACK_IMPORTED_MODULE_4__[/* op */ "b"])({ sub_ });
//# sourceMappingURL=sub.js.map
/***/ }),
/* 15 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return div; });
/* harmony import */ var _engine__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(5);
/* harmony import */ var _kernel_names__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(3);
/* harmony import */ var _tensor_util__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(23);
/* harmony import */ var _tensor_util_env__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(2);
/* harmony import */ var _floorDiv__WEBPACK_IMPORTED_MODULE_4__ = __webpack_require__(114);
/* harmony import */ var _operation__WEBPACK_IMPORTED_MODULE_5__ = __webpack_require__(4);
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Divides two `tf.Tensor`s element-wise, A / B. Supports broadcasting.
*
* ```js
* const a = tf.tensor1d([1, 4, 9, 16]);
* const b = tf.tensor1d([1, 2, 3, 4]);
*
* a.div(b).print(); // or tf.div(a, b)
* ```
*
* ```js
* // Broadcast div a with b.
* const a = tf.tensor1d([2, 4, 6, 8]);
* const b = tf.scalar(2);
*
* a.div(b).print(); // or tf.div(a, b)
* ```
*
* @param a The first tensor as the numerator.
* @param b The second tensor as the denominator. Must have the same dtype as
* `a`.
*
* @doc {heading: 'Operations', subheading: 'Arithmetic'}
*/
function div_(a, b) {
let $a = Object(_tensor_util_env__WEBPACK_IMPORTED_MODULE_3__[/* convertToTensor */ "a"])(a, 'a', 'div');
let $b = Object(_tensor_util_env__WEBPACK_IMPORTED_MODULE_3__[/* convertToTensor */ "a"])(b, 'b', 'div');
[$a, $b] = Object(_tensor_util__WEBPACK_IMPORTED_MODULE_2__["makeTypesMatch"])($a, $b);
if ($a.dtype === 'int32' && $b.dtype === 'int32') {
return Object(_floorDiv__WEBPACK_IMPORTED_MODULE_4__[/* floorDiv */ "a"])($a, $b);
}
const inputs = { a: $a, b: $b };
const attrs = {};
// tslint:disable-next-line: no-unnecessary-type-assertion
return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].runKernel(_kernel_names__WEBPACK_IMPORTED_MODULE_1__[/* RealDiv */ "lc"], inputs, attrs);
}
const div = Object(_operation__WEBPACK_IMPORTED_MODULE_5__[/* op */ "b"])({ div_ });
//# sourceMappingURL=div.js.map
/***/ }),
/* 16 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return scalar; });
/* harmony import */ var _util__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(8);
/* harmony import */ var _tensor_ops_util__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(51);
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Creates rank-0 `tf.Tensor` (scalar) with the provided value and dtype.
*
* The same functionality can be achieved with `tf.tensor`, but in general
* we recommend using `tf.scalar` as it makes the code more readable.
*
* ```js
* tf.scalar(3.14).print();
* ```
*
* @param value The value of the scalar.
* @param dtype The data type.
*
* @doc {heading: 'Tensors', subheading: 'Creation'}
*/
function scalar(value, dtype) {
if (((Object(_util__WEBPACK_IMPORTED_MODULE_0__[/* isTypedArray */ "A"])(value) && dtype !== 'string') || Array.isArray(value)) &&
dtype !== 'complex64') {
throw new Error('Error creating a new Scalar: value must be a primitive ' +
'(number|boolean|string)');
}
if (dtype === 'string' && Object(_util__WEBPACK_IMPORTED_MODULE_0__[/* isTypedArray */ "A"])(value) &&
!(value instanceof Uint8Array)) {
throw new Error('When making a scalar from encoded string, ' +
'the value must be `Uint8Array`.');
}
const shape = [];
const inferredShape = [];
return Object(_tensor_ops_util__WEBPACK_IMPORTED_MODULE_1__[/* makeTensor */ "a"])(value, shape, inferredShape, dtype);
}
//# sourceMappingURL=scalar.js.map
/***/ }),
/* 17 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "b", function() { return getBroadcastDims; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "c", function() { return getReductionAxes; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return assertAndGetBroadcastShape; });
/**
* @license
* Copyright 2017 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Returns the dimensions in the input shape that are broadcasted to
* produce the provided output shape.
*
* The returned dimensions are 0-indexed and sorted. An example:
* inShape = [4, 1, 3]
* outShape = [5, 4, 3, 3]
* result = [1]. Dimension 1 (2nd dimension of input) gets broadcasted 1 => 3.
*/
function getBroadcastDims(inShape, outShape) {
const inRank = inShape.length;
const dims = [];
for (let i = 0; i < inRank; i++) {
const dim = inRank - 1 - i;
const a = inShape[dim] || 1;
const b = outShape[outShape.length - 1 - i] || 1;
if (b > 1 && a === 1) {
dims.unshift(dim);
}
}
return dims;
}
/**
* Returns the axes in the output space that should be reduced to produce
* the input space.
*/
function getReductionAxes(inShape, outShape) {
const result = [];
for (let i = 0; i < outShape.length; i++) {
const inDim = inShape[inShape.length - i - 1];
const outAxis = outShape.length - i - 1;
const outDim = outShape[outAxis];
if (inDim == null || (inDim === 1 && outDim > 1)) {
result.unshift(outAxis);
}
}
return result;
}
function assertAndGetBroadcastShape(shapeA, shapeB) {
const result = [];
const l = Math.max(shapeA.length, shapeB.length);
for (let i = 0; i < l; i++) {
let a = shapeA[shapeA.length - i - 1];
if (a == null) {
a = 1;
}
let b = shapeB[shapeB.length - i - 1];
if (b == null) {
b = 1;
}
if (a === 1) {
result.unshift(b);
}
else if (b === 1) {
result.unshift(a);
}
else if (a !== b) {
const errMsg = `Operands could not be broadcast together with shapes ` +
`${shapeA} and ${shapeB}.`;
throw Error(errMsg);
}
else {
result.unshift(a);
}
}
return result;
}
//# sourceMappingURL=broadcast_util.js.map
/***/ }),
/* 18 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return unaryKernelFunc; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "b", function() { return unaryKernelFuncFromImpl; });
/* harmony import */ var _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(0);
/* harmony import */ var _cpu_util__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(11);
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Template that creates a `KernelFunc` for unary ops.
* @param name Kernel name.
* @param op A `SimpleUnaryOperation` for the kernel.
* @param dtype Optional. If set, the result has this dtype. Otherwise, the
* result has the same dtype as the input. This is mainly used in certain
* kernels that return bool type, such as isFinite, isInf, etc.
*/
function unaryKernelFunc(name, op, dtype) {
return ({ inputs, attrs, backend }) => {
const { x } = inputs;
Object(_cpu_util__WEBPACK_IMPORTED_MODULE_1__[/* assertNotComplex */ "a"])(x, name);
if (x.dtype === 'string' || dtype === 'string') {
throw new Error('unaryKernelFunc does not support string input/output');
}
const cpuBackend = backend;
const values = cpuBackend.data.get(x.dataId).values;
const xSize = _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["util"].sizeFromShape(x.shape);
const $dtype = dtype || x.dtype;
const newValues = _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["util"].getArrayFromDType($dtype, xSize);
for (let i = 0; i < xSize; ++i) {
newValues[i] = op(values[i], attrs);
}
return cpuBackend.makeTensorInfo(x.shape, $dtype, newValues);
};
}
/**
* Template that creates a `KernelFunc` for unary ops from the given
* `SimpleUnaryImpl`..
* @param name Kernel name.
* @param unaryImpl A `SimpleUnaryImpl` that implements the op.
* @param dtype Optional. If set, the result has this dtype. Otherwise, the
* result has the same dtype as the input. This is mainly used in certain
* kernels that return bool type, such as isFinite, isInf, etc.
*/
function unaryKernelFuncFromImpl(name, unaryImpl, dtype) {
return ({ inputs, attrs, backend }) => {
const { x } = inputs;
Object(_cpu_util__WEBPACK_IMPORTED_MODULE_1__[/* assertNotComplex */ "a"])(x, name);
if (x.dtype === 'string' || dtype === 'string') {
throw new Error('unaryKernelFunc does not support string input/output');
}
const cpuBackend = backend;
const values = cpuBackend.data.get(x.dataId).values;
const $dtype = dtype || x.dtype;
const newValues = unaryImpl(values, $dtype, attrs);
return cpuBackend.makeTensorInfo(x.shape, $dtype, newValues);
};
}
//# sourceMappingURL=unary_utils.js.map
/***/ }),
/* 19 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return sum; });
/* harmony import */ var _engine__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(5);
/* harmony import */ var _kernel_names__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(3);
/* harmony import */ var _tensor_util_env__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(2);
/* harmony import */ var _cast__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(12);
/* harmony import */ var _operation__WEBPACK_IMPORTED_MODULE_4__ = __webpack_require__(4);
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes the sum of elements across dimensions of a `tf.Tensor`.
*
* Reduces the input along the dimensions given in `axes`. Unless `keepDims`
* is true, the rank of the `tf.Tensor` is reduced by 1 for each entry in
* `axes`. If `keepDims` is true, the reduced dimensions are retained with
* length 1. If axes has no entries, all dimensions are reduced, and a
* `tf.Tensor` with a single element is returned.
*
* ```js
* const x = tf.tensor1d([1, 2, 3]);
*
* x.sum().print(); // or tf.sum(x)
* ```
*
* ```js
* const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
*
* const axis = 1;
* x.sum(axis).print(); // or tf.sum(x, axis)
* ```
*
* @param x The input tensor to compute the sum over. If the dtype is `bool`
* it will be converted to `int32` and the output dtype will be `int32`.
* @param axis The dimension(s) to reduce. By default it reduces
* all dimensions.
* @param keepDims If true, retains reduced dimensions with size 1.
*
* @doc {heading: 'Operations', subheading: 'Reduction'}
*/
function sum_(x, axis = null, keepDims = false) {
let $x = Object(_tensor_util_env__WEBPACK_IMPORTED_MODULE_2__[/* convertToTensor */ "a"])(x, 'x', 'sum');
if ($x.dtype === 'bool') {
$x = Object(_cast__WEBPACK_IMPORTED_MODULE_3__[/* cast */ "a"])($x, 'int32');
}
const inputs = { x: $x };
const attrs = { axis, keepDims };
return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].runKernel(_kernel_names__WEBPACK_IMPORTED_MODULE_1__[/* Sum */ "Sc"], inputs, attrs);
}
const sum = Object(_operation__WEBPACK_IMPORTED_MODULE_4__[/* op */ "b"])({ sum_ });
//# sourceMappingURL=sum.js.map
/***/ }),
/* 20 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return zerosLike; });
/* harmony import */ var _engine__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(5);
/* harmony import */ var _kernel_names__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(3);
/* harmony import */ var _tensor_util_env__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(2);
/* harmony import */ var _operation__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(4);
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Creates a `tf.Tensor` with all elements set to 0 with the same shape as the
* given tensor.
*
* ```js
* const x = tf.tensor([1, 2]);
* tf.zerosLike(x).print();
* ```
*
* @param x The tensor of required shape.
*
* @doc {heading: 'Tensors', subheading: 'Creation'}
*/
function zerosLike_(x) {
const $x = Object(_tensor_util_env__WEBPACK_IMPORTED_MODULE_2__[/* convertToTensor */ "a"])(x, 'x', 'zerosLike');
const inputs = { x: $x };
return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].runKernel(_kernel_names__WEBPACK_IMPORTED_MODULE_1__[/* ZerosLike */ "cd"], inputs);
}
const zerosLike = Object(_operation__WEBPACK_IMPORTED_MODULE_3__[/* op */ "b"])({ zerosLike_ });
//# sourceMappingURL=zeros_like.js.map
/***/ }),
/* 21 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
// EXPORTS
__webpack_require__.d(__webpack_exports__, "b", function() { return /* reexport */ abs["a" /* abs */]; });
__webpack_require__.d(__webpack_exports__, "c", function() { return /* reexport */ acos["a" /* acos */]; });
__webpack_require__.d(__webpack_exports__, "d", function() { return /* reexport */ acosh["a" /* acosh */]; });
__webpack_require__.d(__webpack_exports__, "e", function() { return /* reexport */ add["a" /* add */]; });
__webpack_require__.d(__webpack_exports__, "f", function() { return /* reexport */ add_n["a" /* addN */]; });
__webpack_require__.d(__webpack_exports__, "g", function() { return /* reexport */ ops_all["a" /* all */]; });
__webpack_require__.d(__webpack_exports__, "h", function() { return /* reexport */ any["a" /* any */]; });
__webpack_require__.d(__webpack_exports__, "i", function() { return /* reexport */ arg_max["a" /* argMax */]; });
__webpack_require__.d(__webpack_exports__, "j", function() { return /* reexport */ arg_min["a" /* argMin */]; });
__webpack_require__.d(__webpack_exports__, "k", function() { return /* reexport */ asin["a" /* asin */]; });
__webpack_require__.d(__webpack_exports__, "l", function() { return /* reexport */ asinh["a" /* asinh */]; });
__webpack_require__.d(__webpack_exports__, "m", function() { return /* reexport */ atan["a" /* atan */]; });
__webpack_require__.d(__webpack_exports__, "n", function() { return /* reexport */ atan2["a" /* atan2 */]; });
__webpack_require__.d(__webpack_exports__, "o", function() { return /* reexport */ atanh["a" /* atanh */]; });
__webpack_require__.d(__webpack_exports__, "p", function() { return /* reexport */ avg_pool["a" /* avgPool */]; });
__webpack_require__.d(__webpack_exports__, "q", function() { return /* reexport */ avg_pool_3d["a" /* avgPool3d */]; });
__webpack_require__.d(__webpack_exports__, "r", function() { return /* reexport */ basicLSTMCell; });
__webpack_require__.d(__webpack_exports__, "w", function() { return /* reexport */ batch_to_space_nd["a" /* batchToSpaceND */]; });
__webpack_require__.d(__webpack_exports__, "s", function() { return /* reexport */ batchnorm["a" /* batchNorm */]; });
__webpack_require__.d(__webpack_exports__, "t", function() { return /* reexport */ batchNorm2d; });
__webpack_require__.d(__webpack_exports__, "u", function() { return /* reexport */ batchNorm3d; });
__webpack_require__.d(__webpack_exports__, "v", function() { return /* reexport */ batchNorm4d; });
__webpack_require__.d(__webpack_exports__, "x", function() { return /* reexport */ bincount["a" /* bincount */]; });
__webpack_require__.d(__webpack_exports__, "z", function() { return /* reexport */ broadcast_to["a" /* broadcastTo */]; });
__webpack_require__.d(__webpack_exports__, "A", function() { return /* reexport */ buffer["a" /* buffer */]; });
__webpack_require__.d(__webpack_exports__, "B", function() { return /* reexport */ cast["a" /* cast */]; });
__webpack_require__.d(__webpack_exports__, "C", function() { return /* reexport */ ceil["a" /* ceil */]; });
__webpack_require__.d(__webpack_exports__, "D", function() { return /* reexport */ clip_by_value["a" /* clipByValue */]; });
__webpack_require__.d(__webpack_exports__, "E", function() { return /* reexport */ clone["a" /* clone */]; });
__webpack_require__.d(__webpack_exports__, "F", function() { return /* reexport */ complex["a" /* complex */]; });
__webpack_require__.d(__webpack_exports__, "G", function() { return /* reexport */ concat["a" /* concat */]; });
__webpack_require__.d(__webpack_exports__, "H", function() { return /* reexport */ concat1d; });
__webpack_require__.d(__webpack_exports__, "I", function() { return /* reexport */ concat2d; });
__webpack_require__.d(__webpack_exports__, "J", function() { return /* reexport */ concat3d; });
__webpack_require__.d(__webpack_exports__, "K", function() { return /* reexport */ concat4d; });
__webpack_require__.d(__webpack_exports__, "L", function() { return /* reexport */ conv1d["a" /* conv1d */]; });
__webpack_require__.d(__webpack_exports__, "M", function() { return /* reexport */ conv2d["a" /* conv2d */]; });
__webpack_require__.d(__webpack_exports__, "N", function() { return /* reexport */ conv2d_transpose["a" /* conv2dTranspose */]; });
__webpack_require__.d(__webpack_exports__, "O", function() { return /* reexport */ conv3d["a" /* conv3d */]; });
__webpack_require__.d(__webpack_exports__, "P", function() { return /* reexport */ conv3dTranspose; });
__webpack_require__.d(__webpack_exports__, "Q", function() { return /* reexport */ cos["a" /* cos */]; });
__webpack_require__.d(__webpack_exports__, "R", function() { return /* reexport */ cosh["a" /* cosh */]; });
__webpack_require__.d(__webpack_exports__, "T", function() { return /* reexport */ cumsum["a" /* cumsum */]; });
__webpack_require__.d(__webpack_exports__, "U", function() { return /* reexport */ dense_bincount["a" /* denseBincount */]; });
__webpack_require__.d(__webpack_exports__, "V", function() { return /* reexport */ depth_to_space["a" /* depthToSpace */]; });
__webpack_require__.d(__webpack_exports__, "W", function() { return /* reexport */ depthwise_conv2d["a" /* depthwiseConv2d */]; });
__webpack_require__.d(__webpack_exports__, "X", function() { return /* reexport */ diag; });
__webpack_require__.d(__webpack_exports__, "Y", function() { return /* reexport */ dilation2d["a" /* dilation2d */]; });
__webpack_require__.d(__webpack_exports__, "Z", function() { return /* reexport */ div["a" /* div */]; });
__webpack_require__.d(__webpack_exports__, "ab", function() { return /* reexport */ div_no_nan["a" /* divNoNan */]; });
__webpack_require__.d(__webpack_exports__, "bb", function() { return /* reexport */ dot["a" /* dot */]; });
__webpack_require__.d(__webpack_exports__, "db", function() { return /* reexport */ einsum["a" /* einsum */]; });
__webpack_require__.d(__webpack_exports__, "eb", function() { return /* reexport */ elu["a" /* elu */]; });
__webpack_require__.d(__webpack_exports__, "gb", function() { return /* reexport */ equal["a" /* equal */]; });
__webpack_require__.d(__webpack_exports__, "hb", function() { return /* reexport */ erf["a" /* erf */]; });
__webpack_require__.d(__webpack_exports__, "ib", function() { return /* reexport */ exp["a" /* exp */]; });
__webpack_require__.d(__webpack_exports__, "jb", function() { return /* reexport */ expand_dims["a" /* expandDims */]; });
__webpack_require__.d(__webpack_exports__, "kb", function() { return /* reexport */ expm1["a" /* expm1 */]; });
__webpack_require__.d(__webpack_exports__, "lb", function() { return /* reexport */ eye; });
__webpack_require__.d(__webpack_exports__, "nb", function() { return /* reexport */ fill["a" /* fill */]; });
__webpack_require__.d(__webpack_exports__, "ob", function() { return /* reexport */ floor["a" /* floor */]; });
__webpack_require__.d(__webpack_exports__, "pb", function() { return /* reexport */ floorDiv["a" /* floorDiv */]; });
__webpack_require__.d(__webpack_exports__, "rb", function() { return /* reexport */ gather["a" /* gather */]; });
__webpack_require__.d(__webpack_exports__, "tb", function() { return /* reexport */ greater["a" /* greater */]; });
__webpack_require__.d(__webpack_exports__, "ub", function() { return /* reexport */ greater_equal["a" /* greaterEqual */]; });
__webpack_require__.d(__webpack_exports__, "wb", function() { return /* reexport */ imag["a" /* imag */]; });
__webpack_require__.d(__webpack_exports__, "Ab", function() { return /* reexport */ is_finite["a" /* isFinite */]; });
__webpack_require__.d(__webpack_exports__, "Bb", function() { return /* reexport */ is_inf["a" /* isInf */]; });
__webpack_require__.d(__webpack_exports__, "Cb", function() { return /* reexport */ is_nan["a" /* isNaN */]; });
__webpack_require__.d(__webpack_exports__, "Db", function() { return /* reexport */ leaky_relu["a" /* leakyRelu */]; });
__webpack_require__.d(__webpack_exports__, "Eb", function() { return /* reexport */ less["a" /* less */]; });
__webpack_require__.d(__webpack_exports__, "Fb", function() { return /* reexport */ less_equal["a" /* lessEqual */]; });
__webpack_require__.d(__webpack_exports__, "Hb", function() { return /* reexport */ linspace["a" /* linspace */]; });
__webpack_require__.d(__webpack_exports__, "Ib", function() { return /* reexport */ local_response_normalization["a" /* localResponseNormalization */]; });
__webpack_require__.d(__webpack_exports__, "Jb", function() { return /* reexport */ log["a" /* log */]; });
__webpack_require__.d(__webpack_exports__, "Kb", function() { return /* reexport */ log1p["a" /* log1p */]; });
__webpack_require__.d(__webpack_exports__, "Lb", function() { return /* reexport */ log_sigmoid["a" /* logSigmoid */]; });
__webpack_require__.d(__webpack_exports__, "Mb", function() { return /* reexport */ log_softmax["a" /* logSoftmax */]; });
__webpack_require__.d(__webpack_exports__, "Nb", function() { return /* reexport */ log_sum_exp["a" /* logSumExp */]; });
__webpack_require__.d(__webpack_exports__, "Ob", function() { return /* reexport */ logical_and["a" /* logicalAnd */]; });
__webpack_require__.d(__webpack_exports__, "Pb", function() { return /* reexport */ logical_not["a" /* logicalNot */]; });
__webpack_require__.d(__webpack_exports__, "Qb", function() { return /* reexport */ logical_or["a" /* logicalOr */]; });
__webpack_require__.d(__webpack_exports__, "Rb", function() { return /* reexport */ logical_xor["a" /* logicalXor */]; });
__webpack_require__.d(__webpack_exports__, "Tb", function() { return /* reexport */ mat_mul["a" /* matMul */]; });
__webpack_require__.d(__webpack_exports__, "Ub", function() { return /* reexport */ max["a" /* max */]; });
__webpack_require__.d(__webpack_exports__, "Vb", function() { return /* reexport */ max_pool["a" /* maxPool */]; });
__webpack_require__.d(__webpack_exports__, "Wb", function() { return /* reexport */ max_pool_3d["a" /* maxPool3d */]; });
__webpack_require__.d(__webpack_exports__, "Xb", function() { return /* reexport */ max_pool_with_argmax["a" /* maxPoolWithArgmax */]; });
__webpack_require__.d(__webpack_exports__, "Yb", function() { return /* reexport */ maximum["a" /* maximum */]; });
__webpack_require__.d(__webpack_exports__, "Zb", function() { return /* reexport */ ops_mean["a" /* mean */]; });
__webpack_require__.d(__webpack_exports__, "ac", function() { return /* reexport */ meshgrid; });
__webpack_require__.d(__webpack_exports__, "bc", function() { return /* reexport */ min["a" /* min */]; });
__webpack_require__.d(__webpack_exports__, "cc", function() { return /* reexport */ minimum["a" /* minimum */]; });
__webpack_require__.d(__webpack_exports__, "dc", function() { return /* reexport */ mirror_pad["a" /* mirrorPad */]; });
__webpack_require__.d(__webpack_exports__, "ec", function() { return /* reexport */ mod["a" /* mod */]; });
__webpack_require__.d(__webpack_exports__, "fc", function() { return /* reexport */ moments; });
__webpack_require__.d(__webpack_exports__, "hc", function() { return /* reexport */ mul["a" /* mul */]; });
__webpack_require__.d(__webpack_exports__, "ic", function() { return /* reexport */ multiRNNCell; });
__webpack_require__.d(__webpack_exports__, "jc", function() { return /* reexport */ multinomial["a" /* multinomial */]; });
__webpack_require__.d(__webpack_exports__, "kc", function() { return /* reexport */ neg["a" /* neg */]; });
__webpack_require__.d(__webpack_exports__, "mc", function() { return /* reexport */ not_equal["a" /* notEqual */]; });
__webpack_require__.d(__webpack_exports__, "nc", function() { return /* reexport */ one_hot["a" /* oneHot */]; });
__webpack_require__.d(__webpack_exports__, "oc", function() { return /* reexport */ ones["a" /* ones */]; });
__webpack_require__.d(__webpack_exports__, "pc", function() { return /* reexport */ ones_like["a" /* onesLike */]; });
__webpack_require__.d(__webpack_exports__, "rc", function() { return /* reexport */ outerProduct; });
__webpack_require__.d(__webpack_exports__, "sc", function() { return /* reexport */ ops_pad["a" /* pad */]; });
__webpack_require__.d(__webpack_exports__, "tc", function() { return /* reexport */ pad1d; });
__webpack_require__.d(__webpack_exports__, "uc", function() { return /* reexport */ pad2d; });
__webpack_require__.d(__webpack_exports__, "vc", function() { return /* reexport */ pad3d; });
__webpack_require__.d(__webpack_exports__, "wc", function() { return /* reexport */ pad4d; });
__webpack_require__.d(__webpack_exports__, "xc", function() { return /* reexport */ pool["a" /* pool */]; });
__webpack_require__.d(__webpack_exports__, "yc", function() { return /* reexport */ pow["a" /* pow */]; });
__webpack_require__.d(__webpack_exports__, "zc", function() { return /* reexport */ prelu["a" /* prelu */]; });
__webpack_require__.d(__webpack_exports__, "Ac", function() { return /* reexport */ print["a" /* print */]; });
__webpack_require__.d(__webpack_exports__, "Bc", function() { return /* reexport */ prod["a" /* prod */]; });
__webpack_require__.d(__webpack_exports__, "Cc", function() { return /* reexport */ rand; });
__webpack_require__.d(__webpack_exports__, "Dc", function() { return /* reexport */ randomGamma; });
__webpack_require__.d(__webpack_exports__, "Ec", function() { return /* reexport */ randomNormal; });
__webpack_require__.d(__webpack_exports__, "Fc", function() { return /* reexport */ random_uniform["a" /* randomUniform */]; });
__webpack_require__.d(__webpack_exports__, "Gc", function() { return /* reexport */ range["a" /* range */]; });
__webpack_require__.d(__webpack_exports__, "Hc", function() { return /* reexport */ real["a" /* real */]; });
__webpack_require__.d(__webpack_exports__, "Ic", function() { return /* reexport */ reciprocal["a" /* reciprocal */]; });
__webpack_require__.d(__webpack_exports__, "Jc", function() { return /* reexport */ relu["a" /* relu */]; });
__webpack_require__.d(__webpack_exports__, "Kc", function() { return /* reexport */ relu6["a" /* relu6 */]; });
__webpack_require__.d(__webpack_exports__, "Lc", function() { return /* reexport */ reshape["a" /* reshape */]; });
__webpack_require__.d(__webpack_exports__, "Mc", function() { return /* reexport */ reverse["a" /* reverse */]; });
__webpack_require__.d(__webpack_exports__, "Nc", function() { return /* reexport */ reverse1d; });
__webpack_require__.d(__webpack_exports__, "Oc", function() { return /* reexport */ reverse2d; });
__webpack_require__.d(__webpack_exports__, "Pc", function() { return /* reexport */ reverse3d; });
__webpack_require__.d(__webpack_exports__, "Qc", function() { return /* reexport */ reverse4d; });
__webpack_require__.d(__webpack_exports__, "Sc", function() { return /* reexport */ round["a" /* round */]; });
__webpack_require__.d(__webpack_exports__, "Tc", function() { return /* reexport */ rsqrt["a" /* rsqrt */]; });
__webpack_require__.d(__webpack_exports__, "Uc", function() { return /* reexport */ scalar["a" /* scalar */]; });
__webpack_require__.d(__webpack_exports__, "Wc", function() { return /* reexport */ selu["a" /* selu */]; });
__webpack_require__.d(__webpack_exports__, "Xc", function() { return /* reexport */ separable_conv2d["a" /* separableConv2d */]; });
__webpack_require__.d(__webpack_exports__, "Yc", function() { return /* reexport */ setdiff1d_async["a" /* setdiff1dAsync */]; });
__webpack_require__.d(__webpack_exports__, "Zc", function() { return /* reexport */ sigmoid["a" /* sigmoid */]; });
__webpack_require__.d(__webpack_exports__, "ad", function() { return /* reexport */ sign["a" /* sign */]; });
__webpack_require__.d(__webpack_exports__, "cd", function() { return /* reexport */ sin["a" /* sin */]; });
__webpack_require__.d(__webpack_exports__, "dd", function() { return /* reexport */ sinh["a" /* sinh */]; });
__webpack_require__.d(__webpack_exports__, "ed", function() { return /* reexport */ slice["a" /* slice */]; });
__webpack_require__.d(__webpack_exports__, "fd", function() { return /* reexport */ slice1d; });
__webpack_require__.d(__webpack_exports__, "gd", function() { return /* reexport */ slice2d; });
__webpack_require__.d(__webpack_exports__, "hd", function() { return /* reexport */ slice3d; });
__webpack_require__.d(__webpack_exports__, "id", function() { return /* reexport */ slice4d; });
__webpack_require__.d(__webpack_exports__, "jd", function() { return /* reexport */ softmax["a" /* softmax */]; });
__webpack_require__.d(__webpack_exports__, "kd", function() { return /* reexport */ softplus["a" /* softplus */]; });
__webpack_require__.d(__webpack_exports__, "ld", function() { return /* reexport */ space_to_batch_nd["a" /* spaceToBatchND */]; });
__webpack_require__.d(__webpack_exports__, "mb", function() { return /* reexport */ fft["a" /* fft */]; });
__webpack_require__.d(__webpack_exports__, "vb", function() { return /* reexport */ ifft["a" /* ifft */]; });
__webpack_require__.d(__webpack_exports__, "zb", function() { return /* reexport */ irfft["a" /* irfft */]; });
__webpack_require__.d(__webpack_exports__, "Rc", function() { return /* reexport */ rfft["a" /* rfft */]; });
__webpack_require__.d(__webpack_exports__, "pd", function() { return /* reexport */ split["a" /* split */]; });
__webpack_require__.d(__webpack_exports__, "qd", function() { return /* reexport */ sqrt["a" /* sqrt */]; });
__webpack_require__.d(__webpack_exports__, "rd", function() { return /* reexport */ square["a" /* square */]; });
__webpack_require__.d(__webpack_exports__, "sd", function() { return /* reexport */ squared_difference["a" /* squaredDifference */]; });
__webpack_require__.d(__webpack_exports__, "td", function() { return /* reexport */ squeeze["a" /* squeeze */]; });
__webpack_require__.d(__webpack_exports__, "ud", function() { return /* reexport */ stack["a" /* stack */]; });
__webpack_require__.d(__webpack_exports__, "vd", function() { return /* reexport */ ops_step["a" /* step */]; });
__webpack_require__.d(__webpack_exports__, "wd", function() { return /* reexport */ strided_slice["a" /* stridedSlice */]; });
__webpack_require__.d(__webpack_exports__, "xd", function() { return /* reexport */ sub["a" /* sub */]; });
__webpack_require__.d(__webpack_exports__, "yd", function() { return /* reexport */ sum["a" /* sum */]; });
__webpack_require__.d(__webpack_exports__, "zd", function() { return /* reexport */ tan["a" /* tan */]; });
__webpack_require__.d(__webpack_exports__, "Ad", function() { return /* reexport */ tanh["a" /* tanh */]; });
__webpack_require__.d(__webpack_exports__, "Bd", function() { return /* reexport */ ops_tensor["a" /* tensor */]; });
__webpack_require__.d(__webpack_exports__, "Cd", function() { return /* reexport */ tensor1d["a" /* tensor1d */]; });
__webpack_require__.d(__webpack_exports__, "Dd", function() { return /* reexport */ tensor2d; });
__webpack_require__.d(__webpack_exports__, "Ed", function() { return /* reexport */ tensor3d["a" /* tensor3d */]; });
__webpack_require__.d(__webpack_exports__, "Fd", function() { return /* reexport */ tensor4d; });
__webpack_require__.d(__webpack_exports__, "Gd", function() { return /* reexport */ tensor5d; });
__webpack_require__.d(__webpack_exports__, "Hd", function() { return /* reexport */ tensor6d; });
__webpack_require__.d(__webpack_exports__, "Id", function() { return /* reexport */ tile["a" /* tile */]; });
__webpack_require__.d(__webpack_exports__, "Jd", function() { return /* reexport */ topk["a" /* topk */]; });
__webpack_require__.d(__webpack_exports__, "Ld", function() { return /* reexport */ truncated_normal["a" /* truncatedNormal */]; });
__webpack_require__.d(__webpack_exports__, "Md", function() { return /* reexport */ unique["a" /* unique */]; });
__webpack_require__.d(__webpack_exports__, "Nd", function() { return /* reexport */ unsorted_segment_sum["a" /* unsortedSegmentSum */]; });
__webpack_require__.d(__webpack_exports__, "Od", function() { return /* reexport */ unstack["a" /* unstack */]; });
__webpack_require__.d(__webpack_exports__, "Pd", function() { return /* reexport */ variable; });
__webpack_require__.d(__webpack_exports__, "Qd", function() { return /* reexport */ where["a" /* where */]; });
__webpack_require__.d(__webpack_exports__, "Rd", function() { return /* reexport */ where_async["a" /* whereAsync */]; });
__webpack_require__.d(__webpack_exports__, "Sd", function() { return /* reexport */ zeros["a" /* zeros */]; });
__webpack_require__.d(__webpack_exports__, "Td", function() { return /* reexport */ zeros_like["a" /* zerosLike */]; });
__webpack_require__.d(__webpack_exports__, "y", function() { return /* reexport */ booleanMaskAsync; });
__webpack_require__.d(__webpack_exports__, "Kd", function() { return /* reexport */ transpose["a" /* transpose */]; });
__webpack_require__.d(__webpack_exports__, "lc", function() { return /* reexport */ norm["a" /* norm */]; });
__webpack_require__.d(__webpack_exports__, "gc", function() { return /* reexport */ movingAverage; });
__webpack_require__.d(__webpack_exports__, "Vc", function() { return /* reexport */ scatter_nd["a" /* scatterND */]; });
__webpack_require__.d(__webpack_exports__, "nd", function() { return /* reexport */ sparse_to_dense["a" /* sparseToDense */]; });
__webpack_require__.d(__webpack_exports__, "sb", function() { return /* reexport */ gather_nd["a" /* gatherND */]; });
__webpack_require__.d(__webpack_exports__, "cb", function() { return /* reexport */ dropout; });
__webpack_require__.d(__webpack_exports__, "fb", function() { return /* reexport */ enclosingPowerOfTwo; });
__webpack_require__.d(__webpack_exports__, "S", function() { return /* reexport */ cosineWindow; });
__webpack_require__.d(__webpack_exports__, "yb", function() { return /* reexport */ inTopKAsync; });
__webpack_require__.d(__webpack_exports__, "qc", function() { return /* reexport */ operation["b" /* op */]; });
__webpack_require__.d(__webpack_exports__, "a", function() { return /* reexport */ operation["a" /* OP_SCOPE_SUFFIX */]; });
__webpack_require__.d(__webpack_exports__, "xb", function() { return /* binding */ ops_image; });
__webpack_require__.d(__webpack_exports__, "Gb", function() { return /* binding */ linalg; });
__webpack_require__.d(__webpack_exports__, "Sb", function() { return /* binding */ ops_losses; });
__webpack_require__.d(__webpack_exports__, "od", function() { return /* binding */ spectral; });
__webpack_require__.d(__webpack_exports__, "qb", function() { return /* reexport */ fused_ops_namespaceObject; });
__webpack_require__.d(__webpack_exports__, "bd", function() { return /* binding */ ops_signal; });
__webpack_require__.d(__webpack_exports__, "md", function() { return /* binding */ sparse; });
// NAMESPACE OBJECT: ./node_modules/@tensorflow/tfjs-core/dist/ops/fused_ops.js
var fused_ops_namespaceObject = {};
__webpack_require__.r(fused_ops_namespaceObject);
__webpack_require__.d(fused_ops_namespaceObject, "conv2d", function() { return conv2d_conv2d; });
__webpack_require__.d(fused_ops_namespaceObject, "depthwiseConv2d", function() { return depthwiseConv2d; });
__webpack_require__.d(fused_ops_namespaceObject, "matMul", function() { return matMul; });
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/abs.js
var abs = __webpack_require__(43);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/acos.js
var acos = __webpack_require__(216);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/acosh.js
var acosh = __webpack_require__(217);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/add.js
var add = __webpack_require__(13);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/add_n.js
var add_n = __webpack_require__(218);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/all.js
var ops_all = __webpack_require__(151);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/any.js
var any = __webpack_require__(152);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/arg_max.js
var arg_max = __webpack_require__(153);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/arg_min.js
var arg_min = __webpack_require__(154);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/asin.js
var asin = __webpack_require__(219);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/asinh.js
var asinh = __webpack_require__(220);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/atan.js
var atan = __webpack_require__(221);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/atan2.js
var atan2 = __webpack_require__(155);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/atanh.js
var atanh = __webpack_require__(222);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/avg_pool.js
var avg_pool = __webpack_require__(116);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/avg_pool_3d.js
var avg_pool_3d = __webpack_require__(223);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/tensor_util_env.js
var tensor_util_env = __webpack_require__(2);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/concat.js
var concat = __webpack_require__(32);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/mat_mul.js
var mat_mul = __webpack_require__(24);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/mul.js
var mul = __webpack_require__(9);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/operation.js
var operation = __webpack_require__(4);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/sigmoid.js
var sigmoid = __webpack_require__(71);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/slice.js
var slice = __webpack_require__(28);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/tanh.js
var tanh = __webpack_require__(145);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/basic_lstm_cell.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes the next state and output of a BasicLSTMCell.
*
* Returns `[newC, newH]`.
*
* Derived from tf.contrib.rnn.BasicLSTMCell.
*
* @param forgetBias Forget bias for the cell.
* @param lstmKernel The weights for the cell.
* @param lstmBias The bias for the cell.
* @param data The input to the cell.
* @param c Previous cell state.
* @param h Previous cell output.
*
* @doc {heading: 'Operations', subheading: 'RNN'}
*/
function basicLSTMCell_(forgetBias, lstmKernel, lstmBias, data, c, h) {
const $forgetBias = Object(tensor_util_env["a" /* convertToTensor */])(forgetBias, 'forgetBias', 'basicLSTMCell');
const $lstmKernel = Object(tensor_util_env["a" /* convertToTensor */])(lstmKernel, 'lstmKernel', 'basicLSTMCell');
const $lstmBias = Object(tensor_util_env["a" /* convertToTensor */])(lstmBias, 'lstmBias', 'basicLSTMCell');
const $data = Object(tensor_util_env["a" /* convertToTensor */])(data, 'data', 'basicLSTMCell');
const $c = Object(tensor_util_env["a" /* convertToTensor */])(c, 'c', 'basicLSTMCell');
const $h = Object(tensor_util_env["a" /* convertToTensor */])(h, 'h', 'basicLSTMCell');
const combined = Object(concat["a" /* concat */])([$data, $h], 1);
const weighted = Object(mat_mul["a" /* matMul */])(combined, $lstmKernel);
const res = Object(add["a" /* add */])(weighted, $lstmBias);
// i = input_gate, j = new_input, f = forget_gate, o = output_gate
const batchSize = res.shape[0];
const sliceCols = res.shape[1] / 4;
const sliceSize = [batchSize, sliceCols];
const i = Object(slice["a" /* slice */])(res, [0, 0], sliceSize);
const j = Object(slice["a" /* slice */])(res, [0, sliceCols], sliceSize);
const f = Object(slice["a" /* slice */])(res, [0, sliceCols * 2], sliceSize);
const o = Object(slice["a" /* slice */])(res, [0, sliceCols * 3], sliceSize);
const newC = Object(add["a" /* add */])(Object(mul["a" /* mul */])(Object(sigmoid["a" /* sigmoid */])(i), Object(tanh["a" /* tanh */])(j)), Object(mul["a" /* mul */])($c, Object(sigmoid["a" /* sigmoid */])(Object(add["a" /* add */])($forgetBias, f))));
const newH = Object(mul["a" /* mul */])(Object(tanh["a" /* tanh */])(newC), Object(sigmoid["a" /* sigmoid */])(o));
return [newC, newH];
}
const basicLSTMCell = Object(operation["b" /* op */])({ basicLSTMCell_ });
//# sourceMappingURL=basic_lstm_cell.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/batch_to_space_nd.js
var batch_to_space_nd = __webpack_require__(91);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/batchnorm.js + 1 modules
var batchnorm = __webpack_require__(84);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/util_base.js
var util_base = __webpack_require__(8);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/batchnorm2d.js
/**
* Batch normalization, strictly for 2D. For the more relaxed version, see
* `tf.batchNorm`.
*
* @param x The input Tensor.
* @param mean A mean Tensor.
* @param variance A variance Tensor.
* @param offset An offset Tensor.
* @param scale A scale Tensor.
* @param varianceEpsilon A small float number to avoid dividing by 0.
*/
function batchNorm2d_(x, mean, variance, offset, scale, varianceEpsilon) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, 'x', 'batchNorm');
const $mean = Object(tensor_util_env["a" /* convertToTensor */])(mean, 'mean', 'batchNorm');
const $variance = Object(tensor_util_env["a" /* convertToTensor */])(variance, 'variance', 'batchNorm');
let $scale;
if (scale != null) {
$scale = Object(tensor_util_env["a" /* convertToTensor */])(scale, 'scale', 'batchNorm');
}
let $offset;
if (offset != null) {
$offset = Object(tensor_util_env["a" /* convertToTensor */])(offset, 'offset', 'batchNorm');
}
util_base["b" /* assert */]($x.rank === 2, () => `Error in batchNorm2D: x must be rank 2 but got rank ` +
`${$x.rank}.`);
util_base["b" /* assert */]($mean.rank === 2 || $mean.rank === 1, () => `Error in batchNorm2D: mean must be rank 2 or rank 1 but ` +
`got rank ${$mean.rank}.`);
util_base["b" /* assert */]($variance.rank === 2 || $variance.rank === 1, () => `Error in batchNorm2D: variance must be rank 2 or rank 1 ` +
`but got rank ${$variance.rank}.`);
if ($scale != null) {
util_base["b" /* assert */]($scale.rank === 2 || $scale.rank === 1, () => `Error in batchNorm2D: scale must be rank 2 or rank 1 ` +
`but got rank ${$scale.rank}.`);
}
if ($offset != null) {
util_base["b" /* assert */]($offset.rank === 2 || $offset.rank === 1, () => `Error in batchNorm2D: offset must be rank 2 or rank 1 ` +
`but got rank ${$offset.rank}.`);
}
return Object(batchnorm["a" /* batchNorm */])($x, $mean, $variance, $offset, $scale, varianceEpsilon);
}
const batchNorm2d = Object(operation["b" /* op */])({ batchNorm2d_ });
//# sourceMappingURL=batchnorm2d.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/batchnorm3d.js
/**
* Batch normalization, strictly for 3D. For the more relaxed version, see
* `tf.batchNorm`.
*
* @param x The input Tensor.
* @param mean A mean Tensor.
* @param variance A variance Tensor.
* @param offset An offset Tensor.
* @param scale A scale Tensor.
* @param varianceEpsilon A small float number to avoid dividing by 0.
*/
function batchNorm3d_(x, mean, variance, offset, scale, varianceEpsilon) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, 'x', 'batchNorm');
const $mean = Object(tensor_util_env["a" /* convertToTensor */])(mean, 'mean', 'batchNorm');
const $variance = Object(tensor_util_env["a" /* convertToTensor */])(variance, 'variance', 'batchNorm');
let $scale;
if (scale != null) {
$scale = Object(tensor_util_env["a" /* convertToTensor */])(scale, 'scale', 'batchNorm');
}
let $offset;
if (offset != null) {
$offset = Object(tensor_util_env["a" /* convertToTensor */])(offset, 'offset', 'batchNorm');
}
util_base["b" /* assert */]($x.rank === 3, () => `Error in batchNorm3D: x must be rank 3 but got rank ` +
`${$x.rank}.`);
util_base["b" /* assert */]($mean.rank === 3 || $mean.rank === 1, () => `Error in batchNorm3D: mean must be rank 3 or rank 1 but ` +
`got rank ${$mean.rank}.`);
util_base["b" /* assert */]($variance.rank === 3 || $variance.rank === 1, () => `Error in batchNorm3D: variance must be rank 3 or rank 1 ` +
`but got rank ${$variance.rank}.`);
if ($scale != null) {
util_base["b" /* assert */]($scale.rank === 3 || $scale.rank === 1, () => `Error in batchNorm3D: scale must be rank 3 or rank 1 ` +
`but got rank ${$scale.rank}.`);
}
if ($offset != null) {
util_base["b" /* assert */]($offset.rank === 3 || $offset.rank === 1, () => `Error in batchNorm3D: offset must be rank 3 or rank 1 ` +
`but got rank ${$offset.rank}.`);
}
return Object(batchnorm["a" /* batchNorm */])($x, $mean, $variance, $offset, $scale, varianceEpsilon);
}
const batchNorm3d = Object(operation["b" /* op */])({ batchNorm3d_ });
//# sourceMappingURL=batchnorm3d.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/batchnorm4d.js
/**
* Batch normalization, strictly for 4D. For the more relaxed version, see
* `tf.batchNorm`.
*
* @param x The input Tensor.
* @param mean A mean Tensor.
* @param variance A variance Tensor.
* @param offset An offset Tensor.
* @param scale A scale Tensor.
* @param varianceEpsilon A small float number to avoid dividing by 0.
*/
function batchNorm4d_(x, mean, variance, offset, scale, varianceEpsilon) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, 'x', 'batchNorm');
const $mean = Object(tensor_util_env["a" /* convertToTensor */])(mean, 'mean', 'batchNorm');
const $variance = Object(tensor_util_env["a" /* convertToTensor */])(variance, 'variance', 'batchNorm');
let $scale;
if (scale != null) {
$scale = Object(tensor_util_env["a" /* convertToTensor */])(scale, 'scale', 'batchNorm');
}
let $offset;
if (offset != null) {
$offset = Object(tensor_util_env["a" /* convertToTensor */])(offset, 'offset', 'batchNorm');
}
util_base["b" /* assert */]($x.rank === 4, () => `Error in batchNorm4D: x must be rank 4 but got rank ` +
`${$x.rank}.`);
util_base["b" /* assert */]($mean.rank === 4 || $mean.rank === 1, () => `Error in batchNorm4D: mean must be rank 4 or rank 1 but ` +
`got rank ${$mean.rank}.`);
util_base["b" /* assert */]($variance.rank === 4 || $variance.rank === 1, () => `Error in batchNorm4D: variance must be rank 4 or rank 1 ` +
`but got rank ${$variance.rank}.`);
if ($scale != null) {
util_base["b" /* assert */]($scale.rank === 4 || $scale.rank === 1, () => `Error in batchNorm4D: scale must be rank 4 or rank 1 ` +
`but got rank ${$scale.rank}.`);
}
if ($offset != null) {
util_base["b" /* assert */]($offset.rank === 4 || $offset.rank === 1, () => `Error in batchNorm4D: offset must be rank 4 or rank 1 ` +
`but got rank ${$offset.rank}.`);
}
return Object(batchnorm["a" /* batchNorm */])($x, $mean, $variance, $offset, $scale, varianceEpsilon);
}
const batchNorm4d = Object(operation["b" /* op */])({ batchNorm4d_ });
//# sourceMappingURL=batchnorm4d.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/bincount.js
var bincount = __webpack_require__(224);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/broadcast_to.js
var broadcast_to = __webpack_require__(101);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/buffer.js
var buffer = __webpack_require__(47);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/cast.js
var cast = __webpack_require__(12);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/ceil.js
var ceil = __webpack_require__(225);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/clip_by_value.js
var clip_by_value = __webpack_require__(226);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/clone.js
var clone = __webpack_require__(70);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/complex.js
var complex = __webpack_require__(58);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/concat_1d.js
/**
* Concatenates a list of`tf.Tensor1D`s along an axis. See `concat` for details.
*
* For example, if:
* A: shape(3) = |r1, g1, b1|
* B: shape(2) = |r2, g2|
* C = tf.concat1d([A, B]) == |r1, g1, b1, r2, g2|
*
* @param tensors A list of`tf.Tensor`s to concatenate.
* @return The concatenated array.
*/
function concat1d_(tensors) {
return Object(concat["a" /* concat */])(tensors, 0 /* axis */);
}
const concat1d = Object(operation["b" /* op */])({ concat1d_ });
//# sourceMappingURL=concat_1d.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/concat_2d.js
/**
* Concatenates a list of`tf.Tensor2D`s along an axis. See `concat` for details.
*
* For example, if:
* A: shape(2, 3) = | r1, g1, b1 |
* | r2, g2, b2 |
*
* B: shape(2, 3) = | r3, g3, b3 |
* | r4, g4, b4 |
*
* C = tf.concat2d([A, B], axis)
*
* if axis = 0:
* C: shape(4, 3) = | r1, g1, b1 |
* | r2, g2, b2 |
* | r3, g3, b3 |
* | r4, g4, b4 |
*
* if axis = 1:
* C = shape(2, 6) = | r1, g1, b1, r3, g3, b3 |
* | r2, g2, b2, r4, g4, b4 |
*
*
* @param tensors A list of `tf.Tensor`s to concatenate.
* @param axis The axis to concatenate along.
* @return The concatenated array.
*/
function concat2d_(tensors, axis) {
return Object(concat["a" /* concat */])(tensors, axis);
}
const concat2d = Object(operation["b" /* op */])({ concat2d_ });
//# sourceMappingURL=concat_2d.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/concat_3d.js
/**
* Concatenates a list of `tf.Tensor3D`s along an axis.
* See `concat` for details.
*
* For example, if:
* A: shape(2, 1, 3) = | r1, g1, b1 |
* | r2, g2, b2 |
*
* B: shape(2, 1, 3) = | r3, g3, b3 |
* | r4, g4, b4 |
*
* C = tf.concat3d([A, B], axis)
*
* if axis = 0:
* C: shape(4, 1, 3) = | r1, g1, b1 |
* | r2, g2, b2 |
* | r3, g3, b3 |
* | r4, g4, b4 |
*
* if axis = 1:
* C: shape(2, 2, 3) = | r1, g1, b1, r3, g3, b3 |
* | r2, g2, b2, r4, g4, b4 |
*
* if axis = 2:
* C = shape(2, 1, 6) = | r1, g1, b1, r3, g3, b3 |
* | r2, g2, b2, r4, g4, b4 |
*
* @param tensors A list of`tf.Tensor`s to concatenate.
* @param axis The axis to concate along.
* @return The concatenated array.
*/
function concat3d_(tensors, axis) {
return Object(concat["a" /* concat */])(tensors, axis);
}
const concat3d = Object(operation["b" /* op */])({ concat3d_ });
//# sourceMappingURL=concat_3d.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/concat_4d.js
/**
* Concatenates a list of `tf.Tensor4D`s along an axis.
* See `concat` for details.
*
* @param tensors A list of `tf.Tensor`s to concatenate.
* @param axis The axis to concate along.
* @return The concatenated array.
*/
function concat4d_(tensors, axis) {
return Object(concat["a" /* concat */])(tensors, axis);
}
const concat4d = Object(operation["b" /* op */])({ concat4d_ });
//# sourceMappingURL=concat_4d.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/conv1d.js
var conv1d = __webpack_require__(156);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/conv2d.js
var conv2d = __webpack_require__(65);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/conv2d_transpose.js
var conv2d_transpose = __webpack_require__(157);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/conv3d.js
var conv3d = __webpack_require__(227);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/conv3d_backprop_input.js
var conv3d_backprop_input = __webpack_require__(209);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/conv3d_transpose.js
/**
* Computes the transposed 3D convolution of a volume, also known as a
* deconvolution.
*
* @param x The input image, of rank 5 or rank 4, of shape
* `[batch, depth, height, width, inDepth]`. If rank 4, batch of 1 is assumed.
* @param filter The filter, rank 4, of shape
* `[depth, filterHeight, filterWidth, outDepth, inDepth]`.
* `inDepth` must match `inDepth` in `x`.
* @param outputShape Output shape, of rank 5 or rank 4:
* `[batch, depth, height, width, outDepth]`. If rank 3, batch of 1 is
* assumed.
* @param strides The strides of the original convolution:
* `[strideDepth, strideHeight, strideWidth]`.
* @param pad The type of padding algorithm used in the non-transpose version
* of the op.
*
* @doc {heading: 'Operations', subheading: 'Convolution'}
*/
function conv3dTranspose_(x, filter, outputShape, strides, pad) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, 'x', 'conv3dTranspose');
const $filter = Object(tensor_util_env["a" /* convertToTensor */])(filter, 'filter', 'conv3dTranspose');
return Object(conv3d_backprop_input["a" /* conv3DBackpropInput */])(outputShape, $x, $filter, strides, pad);
}
const conv3dTranspose = Object(operation["b" /* op */])({ conv3dTranspose_ });
//# sourceMappingURL=conv3d_transpose.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/cos.js
var cos = __webpack_require__(117);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/cosh.js
var cosh = __webpack_require__(158);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/cumsum.js
var cumsum = __webpack_require__(118);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/dense_bincount.js
var dense_bincount = __webpack_require__(228);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/depth_to_space.js
var depth_to_space = __webpack_require__(159);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/depthwise_conv2d.js
var depthwise_conv2d = __webpack_require__(92);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/engine.js + 2 modules
var engine = __webpack_require__(5);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/kernel_names.js
var kernel_names = __webpack_require__(3);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/diag.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Returns a diagonal tensor with a given diagonal values.
*
* Given a diagonal, this operation returns a tensor with the diagonal and
* everything else padded with zeros.
*
* Assume the input has dimensions `[D1,..., Dk]`, then the output is a tensor
* of rank 2k with dimensions `[D1,..., Dk, D1,..., Dk]`
*
* ```js
* const x = tf.tensor1d([1, 2, 3, 4]);
*
* tf.diag(x).print()
* ```
* ```js
* const x = tf.tensor1d([1, 2, 3, 4, 5, 6, 6, 8], [4, 2])
*
* tf.diag(x).print()
* ```
* @param x The input tensor.
*
* @doc {heading: 'Tensors', subheading: 'Creation'}
*/
function diag_(x) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, 'x', 'diag');
const inputs = { x: $x };
return engine["a" /* ENGINE */].runKernel(kernel_names["R" /* Diag */], inputs);
}
const diag = Object(operation["b" /* op */])({ diag_ });
//# sourceMappingURL=diag.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/dilation2d.js
var dilation2d = __webpack_require__(160);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/div.js
var div = __webpack_require__(15);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/div_no_nan.js
var div_no_nan = __webpack_require__(161);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/dot.js
var dot = __webpack_require__(162);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/einsum.js
var einsum = __webpack_require__(229);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/elu.js
var elu = __webpack_require__(119);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/equal.js
var equal = __webpack_require__(93);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/erf.js
var erf = __webpack_require__(230);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/exp.js
var exp = __webpack_require__(41);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/expand_dims.js
var expand_dims = __webpack_require__(63);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/expm1.js
var expm1 = __webpack_require__(231);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/reshape.js
var reshape = __webpack_require__(7);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/tile.js
var tile = __webpack_require__(85);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/eye.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Create an identity matrix.
*
* @param numRows Number of rows.
* @param numColumns Number of columns. Defaults to `numRows`.
* @param batchShape If provided, will add the batch shape to the beginning
* of the shape of the returned `tf.Tensor` by repeating the identity
* matrix.
* @param dtype Data type.
* @returns Identity matrix of the specified size and data type, possibly
* with batch repetition if `batchShape` is specified.
*
* @doc {heading: 'Tensors', subheading: 'Creation'}
*/
function eye_(numRows, numColumns, batchShape, dtype = 'float32') {
if (numColumns == null) {
numColumns = numRows;
}
const buff = Object(buffer["a" /* buffer */])([numRows, numColumns], dtype);
const n = numRows <= numColumns ? numRows : numColumns;
for (let i = 0; i < n; ++i) {
buff.set(1, i, i);
}
const out = Object(reshape["a" /* reshape */])(buff.toTensor(), [numRows, numColumns]);
if (batchShape == null) {
return out;
}
else {
if (batchShape.length === 1) {
return Object(tile["a" /* tile */])(Object(expand_dims["a" /* expandDims */])(out, 0), [batchShape[0], 1, 1]);
}
else if (batchShape.length === 2) {
// tslint:disable-next-line:no-unnecessary-type-assertion
return Object(tile["a" /* tile */])(Object(expand_dims["a" /* expandDims */])(Object(expand_dims["a" /* expandDims */])(out, 0), 0), [batchShape[0], batchShape[1], 1, 1]);
}
else if (batchShape.length === 3) {
// tslint:disable-next-line:no-unnecessary-type-assertion
return Object(tile["a" /* tile */])(Object(expand_dims["a" /* expandDims */])(Object(expand_dims["a" /* expandDims */])(Object(expand_dims["a" /* expandDims */])(out, 0), 0), 0), [
batchShape[0], batchShape[1], batchShape[2], 1, 1
]);
}
else {
throw new Error(`eye() currently supports only 1D and 2D ` +
// tslint:disable-next-line:no-any
`batchShapes, but received ${batchShape.length}D.`);
}
}
}
const eye = Object(operation["b" /* op */])({ eye_ });
//# sourceMappingURL=eye.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/fill.js
var fill = __webpack_require__(115);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/floor.js
var floor = __webpack_require__(120);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/floorDiv.js
var floorDiv = __webpack_require__(114);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/gather.js
var gather = __webpack_require__(94);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/greater.js
var greater = __webpack_require__(50);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/greater_equal.js
var greater_equal = __webpack_require__(66);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/imag.js
var imag = __webpack_require__(121);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/is_finite.js
var is_finite = __webpack_require__(232);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/is_inf.js
var is_inf = __webpack_require__(233);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/is_nan.js
var is_nan = __webpack_require__(234);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/leaky_relu.js
var leaky_relu = __webpack_require__(122);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/less.js
var less = __webpack_require__(123);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/less_equal.js
var less_equal = __webpack_require__(67);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/linspace.js
var linspace = __webpack_require__(235);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/local_response_normalization.js
var local_response_normalization = __webpack_require__(163);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/log.js
var log = __webpack_require__(75);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/log1p.js
var log1p = __webpack_require__(164);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/log_sigmoid.js
var log_sigmoid = __webpack_require__(236);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/log_softmax.js
var log_softmax = __webpack_require__(237);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/log_sum_exp.js
var log_sum_exp = __webpack_require__(124);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/logical_and.js
var logical_and = __webpack_require__(59);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/logical_not.js
var logical_not = __webpack_require__(95);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/logical_or.js
var logical_or = __webpack_require__(125);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/logical_xor.js
var logical_xor = __webpack_require__(166);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/max.js
var max = __webpack_require__(72);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/max_pool.js
var max_pool = __webpack_require__(126);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/max_pool_3d.js
var max_pool_3d = __webpack_require__(238);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/max_pool_with_argmax.js
var max_pool_with_argmax = __webpack_require__(239);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/maximum.js
var maximum = __webpack_require__(90);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/mean.js
var ops_mean = __webpack_require__(88);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/ones.js
var ones = __webpack_require__(55);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/tensor.js + 1 modules
var dist_tensor = __webpack_require__(6);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/meshgrid.js
/**
* @license
* Copyright 2021 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Broadcasts parameters for evaluation on an N-D grid.
*
* Given N one-dimensional coordinate arrays `*args`, returns a list `outputs`
* of N-D coordinate arrays for evaluating expressions on an N-D grid.
*
* Notes:
* `meshgrid` supports cartesian ('xy') and matrix ('ij') indexing conventions.
* When the `indexing` argument is set to 'xy' (the default), the broadcasting
* instructions for the first two dimensions are swapped.
* Examples:
* Calling `const [X, Y] = meshgrid(x, y)` with the tensors
*
* ```javascript
* const x = [1, 2, 3];
* const y = [4, 5, 6];
* const [X, Y] = tf.meshgrid(x, y);
* // X = [[1, 2, 3],
* // [1, 2, 3],
* // [1, 2, 3]]
* // Y = [[4, 4, 4],
* // [5, 5, 5],
* // [6, 6, 6]]
* ```
*
* @param x Tensor with rank geq 1.
* @param y Tensor with rank geq 1.
* @param indexing
*
* @doc {heading: 'Operations', subheading: 'Slicing and Joining'}
*/
function meshgrid(x, y, { indexing = 'xy' } = {}) {
if (indexing !== 'xy' && indexing !== 'ij') {
throw new TypeError(`${indexing} is not a valid third argument to meshgrid`);
}
if (x === undefined) {
return [];
}
let $x = Object(tensor_util_env["a" /* convertToTensor */])(x, 'x', 'meshgrid', x instanceof dist_tensor["a" /* Tensor */] ? x.dtype : 'float32');
if (y === undefined) {
return [$x];
}
let $y = Object(tensor_util_env["a" /* convertToTensor */])(y, 'y', 'meshgrid', y instanceof dist_tensor["a" /* Tensor */] ? y.dtype : 'float32');
const w = Object(util_base["O" /* sizeFromShape */])($x.shape);
const h = Object(util_base["O" /* sizeFromShape */])($y.shape);
if (indexing === 'xy') {
$x = Object(reshape["a" /* reshape */])($x, [1, -1]);
$y = Object(reshape["a" /* reshape */])($y, [-1, 1]);
return [
Object(mat_mul["a" /* matMul */])(Object(ones["a" /* ones */])([h, 1], $x.dtype), $x),
Object(mat_mul["a" /* matMul */])($y, Object(ones["a" /* ones */])([1, w], $y.dtype)),
];
}
$x = Object(reshape["a" /* reshape */])($x, [-1, 1]);
$y = Object(reshape["a" /* reshape */])($y, [1, -1]);
return [
Object(mat_mul["a" /* matMul */])($x, Object(ones["a" /* ones */])([1, h], $x.dtype)),
Object(mat_mul["a" /* matMul */])(Object(ones["a" /* ones */])([w, 1], $y.dtype), $y),
];
}
//# sourceMappingURL=meshgrid.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/min.js
var min = __webpack_require__(105);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/minimum.js
var minimum = __webpack_require__(127);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/mirror_pad.js
var mirror_pad = __webpack_require__(167);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/mod.js
var mod = __webpack_require__(168);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/axis_util.js
var axis_util = __webpack_require__(39);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/square.js
var square = __webpack_require__(25);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/sub.js
var sub = __webpack_require__(14);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/moments.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Calculates the mean and variance of `x`. The mean and variance are
* calculated by aggregating the contents of `x` across `axes`. If `x` is
* 1-D and `axes = [0]` this is just the mean and variance of a vector.
*
* @param x The input tensor.
* @param axis The dimension(s) along with to compute mean and
* variance. By default it reduces all dimensions.
* @param keepDims If true, the moments have the same dimensionality as the
* input.
* @return An object with two keys: `mean` and `variance`.
*
* @doc {heading: 'Operations', subheading: 'Normalization'}
*/
function moments_(x, axis = null, keepDims = false) {
x = Object(tensor_util_env["a" /* convertToTensor */])(x, 'x', 'moments');
const axes = Object(util_base["I" /* parseAxisParam */])(axis, x.shape);
const xMean = Object(ops_mean["a" /* mean */])(x, axes, keepDims);
let keepDimsShape = xMean.shape;
if (!keepDims) {
keepDimsShape = Object(axis_util["e" /* expandShapeToKeepDim */])(xMean.shape, axes);
}
const devSquared = Object(square["a" /* square */])(Object(sub["a" /* sub */])(Object(cast["a" /* cast */])(x, 'float32'), Object(reshape["a" /* reshape */])(xMean, keepDimsShape)));
const variance = Object(ops_mean["a" /* mean */])(devSquared, axes, keepDims);
return { mean: xMean, variance };
}
const moments = Object(operation["b" /* op */])({ moments_ });
//# sourceMappingURL=moments.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/multi_rnn_cell.js
/**
* Computes the next states and outputs of a stack of LSTMCells.
*
* Each cell output is used as input to the next cell.
*
* Returns `[cellState, cellOutput]`.
*
* Derived from tf.contrib.rn.MultiRNNCell.
*
* @param lstmCells Array of LSTMCell functions.
* @param data The input to the cell.
* @param c Array of previous cell states.
* @param h Array of previous cell outputs.
*
* @doc {heading: 'Operations', subheading: 'RNN'}
*/
function multiRNNCell_(lstmCells, data, c, h) {
const $data = Object(tensor_util_env["a" /* convertToTensor */])(data, 'data', 'multiRNNCell');
const $c = Object(tensor_util_env["b" /* convertToTensorArray */])(c, 'c', 'multiRNNCell');
const $h = Object(tensor_util_env["b" /* convertToTensorArray */])(h, 'h', 'multiRNNCell');
let input = $data;
const newStates = [];
for (let i = 0; i < lstmCells.length; i++) {
const output = lstmCells[i](input, $c[i], $h[i]);
newStates.push(output[0]);
newStates.push(output[1]);
input = output[1];
}
const newC = [];
const newH = [];
for (let i = 0; i < newStates.length; i += 2) {
newC.push(newStates[i]);
newH.push(newStates[i + 1]);
}
return [newC, newH];
}
const multiRNNCell = Object(operation["b" /* op */])({ multiRNNCell_ });
//# sourceMappingURL=multi_rnn_cell.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/multinomial.js
var multinomial = __webpack_require__(240);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/neg.js
var neg = __webpack_require__(30);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/not_equal.js
var not_equal = __webpack_require__(128);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/one_hot.js
var one_hot = __webpack_require__(106);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/ones_like.js
var ones_like = __webpack_require__(241);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/outer_product.js
/**
* Computes the outer product of two vectors, `v1` and `v2`.
*
* ```js
* const a = tf.tensor1d([1, 2, 3]);
* const b = tf.tensor1d([3, 4, 5]);
*
* tf.outerProduct(a, b).print();
* ```
* @param v1 The first vector in the outer product operation.
* @param v2 The second vector in the outer product operation.
*
* @doc {heading: 'Operations', subheading: 'Matrices'}
*/
function outerProduct_(v1, v2) {
const $v1 = Object(tensor_util_env["a" /* convertToTensor */])(v1, 'v1', 'outerProduct');
const $v2 = Object(tensor_util_env["a" /* convertToTensor */])(v2, 'v2', 'outerProduct');
util_base["b" /* assert */]($v1.rank === 1 && $v2.rank === 1, () => `Error in outerProduct: inputs must be rank 1, but got ranks ` +
`${$v1.rank} and ${$v2.rank}.`);
const v12D = Object(reshape["a" /* reshape */])($v1, [-1, 1]);
const v22D = Object(reshape["a" /* reshape */])($v2, [1, -1]);
return Object(mat_mul["a" /* matMul */])(v12D, v22D);
}
const outerProduct = Object(operation["b" /* op */])({ outerProduct_ });
//# sourceMappingURL=outer_product.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/pad.js
var ops_pad = __webpack_require__(56);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/pad1d.js
/**
* Pads a `tf.Tensor1D` with a given value and paddings. See `pad` for details.
*/
function pad1d_(x, paddings, constantValue = 0) {
Object(util_base["b" /* assert */])(paddings.length === 2, () => 'Invalid number of paddings. Must be length of 2.');
return Object(ops_pad["a" /* pad */])(x, [paddings], constantValue);
}
const pad1d = Object(operation["b" /* op */])({ pad1d_ });
//# sourceMappingURL=pad1d.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/pad2d.js
/**
* Pads a `tf.Tensor2D` with a given value and paddings. See `pad` for details.
*/
function pad2d_(x, paddings, constantValue = 0) {
Object(util_base["b" /* assert */])(paddings.length === 2 && paddings[0].length === 2 &&
paddings[1].length === 2, () => 'Invalid number of paddings. Must be length of 2 each.');
return Object(ops_pad["a" /* pad */])(x, paddings, constantValue);
}
const pad2d = Object(operation["b" /* op */])({ pad2d_ });
//# sourceMappingURL=pad2d.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/pad3d.js
/**
* Pads a `tf.Tensor3D` with a given value and paddings. See `pad` for details.
*/
function pad3d_(x, paddings, constantValue = 0) {
Object(util_base["b" /* assert */])(paddings.length === 3 && paddings[0].length === 2 &&
paddings[1].length === 2 && paddings[2].length === 2, () => 'Invalid number of paddings. Must be length of 2 each.');
return Object(ops_pad["a" /* pad */])(x, paddings, constantValue);
}
const pad3d = Object(operation["b" /* op */])({ pad3d_ });
//# sourceMappingURL=pad3d.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/pad4d.js
/**
* Pads a `tf.Tensor4D` with a given value and paddings. See `pad` for details.
*/
function pad4d_(x, paddings, constantValue = 0) {
Object(util_base["b" /* assert */])(paddings.length === 4 && paddings[0].length === 2 &&
paddings[1].length === 2 && paddings[2].length === 2 &&
paddings[3].length === 2, () => 'Invalid number of paddings. Must be length of 2 each.');
return Object(ops_pad["a" /* pad */])(x, paddings, constantValue);
}
const pad4d = Object(operation["b" /* op */])({ pad4d_ });
//# sourceMappingURL=pad4d.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/pool.js
var pool = __webpack_require__(169);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/pow.js
var pow = __webpack_require__(53);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/prelu.js
var prelu = __webpack_require__(129);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/print.js
var print = __webpack_require__(150);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/prod.js
var prod = __webpack_require__(170);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/rand.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Creates a `tf.Tensor` with values sampled from a random number generator
* function defined by the user.
*
* @param shape An array of integers defining the output tensor shape.
* @param randFunction A random number generator function which is called
* for each element in the output tensor.
* @param dtype The data type of the output tensor. Defaults to 'float32'.
*
* @doc {heading: 'Tensors', subheading: 'Random'}
*/
function rand_(shape, randFunction, dtype) {
const size = Object(util_base["O" /* sizeFromShape */])(shape);
let values = null;
if (dtype == null || dtype === 'float32') {
values = new Float32Array(size);
}
else if (dtype === 'int32') {
values = new Int32Array(size);
}
else if (dtype === 'bool') {
values = new Uint8Array(size);
}
else {
throw new Error(`Unknown data type ${dtype}`);
}
for (let i = 0; i < size; i++) {
values[i] = randFunction();
}
return engine["a" /* ENGINE */].makeTensor(values, shape, dtype);
}
const rand = Object(operation["b" /* op */])({ rand_ });
//# sourceMappingURL=rand.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/rand_util.js
var rand_util = __webpack_require__(102);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/random_gamma.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Creates a `tf.Tensor` with values sampled from a gamma distribution.
*
* ```js
* tf.randomGamma([2, 2], 1).print();
* ```
*
* @param shape An array of integers defining the output tensor shape.
* @param alpha The shape parameter of the gamma distribution.
* @param beta The inverse scale parameter of the gamma distribution. Defaults
* to 1.
* @param dtype The data type of the output. Defaults to float32.
* @param seed The seed for the random number generator.
*
* @doc {heading: 'Tensors', subheading: 'Random'}
*/
function randomGamma_(shape, alpha, beta = 1, dtype = 'float32', seed) {
if (beta == null) {
beta = 1;
}
if (dtype == null) {
dtype = 'float32';
}
if (dtype !== 'float32' && dtype !== 'int32') {
throw new Error(`Unsupported data type ${dtype}`);
}
const rgamma = new rand_util["b" /* RandGamma */](alpha, beta, dtype, seed);
const res = Object(buffer["a" /* buffer */])(shape, dtype);
for (let i = 0; i < res.values.length; i++) {
res.values[i] = rgamma.nextValue();
}
return res.toTensor();
}
const randomGamma = Object(operation["b" /* op */])({ randomGamma_ });
//# sourceMappingURL=random_gamma.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/random_normal.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Creates a `tf.Tensor` with values sampled from a normal distribution.
*
* ```js
* tf.randomNormal([2, 2]).print();
* ```
*
* @param shape An array of integers defining the output tensor shape.
* @param mean The mean of the normal distribution.
* @param stdDev The standard deviation of the normal distribution.
* @param dtype The data type of the output.
* @param seed The seed for the random number generator.
*
* @doc {heading: 'Tensors', subheading: 'Random'}
*/
function randomNormal_(shape, mean = 0, stdDev = 1, dtype, seed) {
if (dtype != null && dtype === 'bool') {
throw new Error(`Unsupported data type ${dtype}`);
}
const randGauss = new rand_util["a" /* MPRandGauss */](mean, stdDev, dtype, false /* truncated */, seed);
const res = Object(buffer["a" /* buffer */])(shape, dtype);
for (let i = 0; i < res.values.length; i++) {
res.values[i] = randGauss.nextValue();
}
return res.toTensor();
}
const randomNormal = Object(operation["b" /* op */])({ randomNormal_ });
//# sourceMappingURL=random_normal.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/random_uniform.js
var random_uniform = __webpack_require__(171);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/range.js
var range = __webpack_require__(146);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/real.js
var real = __webpack_require__(107);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/reciprocal.js
var reciprocal = __webpack_require__(242);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/relu.js
var relu = __webpack_require__(81);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/relu6.js
var relu6 = __webpack_require__(130);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/reverse.js
var reverse = __webpack_require__(48);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/reverse_1d.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Reverses a `tf.Tensor1D`.
*
* @param x The input tensor.
*/
function reverse1d_(x) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, 'x', 'reverse');
util_base["b" /* assert */]($x.rank === 1, () => `Error in reverse1D: x must be rank 1 but got rank ${$x.rank}.`);
return Object(reverse["a" /* reverse */])($x, 0);
}
const reverse1d = Object(operation["b" /* op */])({ reverse1d_ });
//# sourceMappingURL=reverse_1d.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/reverse_2d.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Reverses a `tf.Tensor2D` along a specified axis.
*
* @param x The input tensor.
* @param axis The set of dimensions to reverse. Must be in the
* range [-rank(x), rank(x)). Defaults to all axes.
*/
function reverse2d_(x, axis) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, 'x', 'reverse');
util_base["b" /* assert */]($x.rank === 2, () => `Error in reverse2D: x must be rank 2 but got rank ${$x.rank}.`);
return Object(reverse["a" /* reverse */])($x, axis);
}
const reverse2d = Object(operation["b" /* op */])({ reverse2d_ });
//# sourceMappingURL=reverse_2d.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/reverse_3d.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Reverses a `tf.Tensor3D` along a specified axis.
*
* @param x The input tensor.
* @param axis The set of dimensions to reverse. Must be in the
* range [-rank(x), rank(x)). Defaults to all axes.
*/
function reverse3d_(x, axis) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, 'x', 'reverse');
util_base["b" /* assert */]($x.rank === 3, () => `Error in reverse3D: x must be rank 3 but got rank ${$x.rank}.`);
return Object(reverse["a" /* reverse */])($x, axis);
}
const reverse3d = Object(operation["b" /* op */])({ reverse3d_ });
//# sourceMappingURL=reverse_3d.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/reverse_4d.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Reverses a `tf.Tensor4D` along a specified axis.
*
* @param x The input tensor.
* @param axis The set of dimensions to reverse. Must be in the
* range [-rank(x), rank(x)). Defaults to all axes.
*/
function reverse4d_(x, axis) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, 'x', 'reverse');
util_base["b" /* assert */]($x.rank === 4, () => `Error in reverse4D: x must be rank 4 but got rank ${$x.rank}.`);
return Object(reverse["a" /* reverse */])($x, axis);
}
const reverse4d = Object(operation["b" /* op */])({ reverse4d_ });
//# sourceMappingURL=reverse_4d.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/round.js
var round = __webpack_require__(243);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/rsqrt.js
var rsqrt = __webpack_require__(172);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/scalar.js
var scalar = __webpack_require__(16);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/selu.js
var selu = __webpack_require__(173);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/separable_conv2d.js
var separable_conv2d = __webpack_require__(174);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/setdiff1d_async.js
var setdiff1d_async = __webpack_require__(244);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/sign.js
var sign = __webpack_require__(245);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/sin.js
var sin = __webpack_require__(175);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/sinh.js
var sinh = __webpack_require__(176);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/slice1d.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Extracts a 1D slice from 1D array starting at coordinates `begin` and is
* of length `size`. See `slice` for details.
*/
function slice1d_(x, begin, size) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, 'x', 'slice1d');
util_base["b" /* assert */]($x.rank === 1, () => `slice1d expects a rank-1 tensor, but got a rank-${$x.rank} tensor`);
return Object(slice["a" /* slice */])($x, [begin], [size]);
}
const slice1d = Object(operation["b" /* op */])({ slice1d_ });
//# sourceMappingURL=slice1d.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/slice2d.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Extracts a 2D slice from a 2D array starting at coordinates `begin` and
* is of size `size`. See `slice` for details.
*/
function slice2d_(x, begin, size) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, 'x', 'slice2d');
util_base["b" /* assert */]($x.rank === 2, () => `slice2d expects a rank-2 tensor, but got a rank-${$x.rank} tensor`);
return Object(slice["a" /* slice */])($x, begin, size);
}
const slice2d = Object(operation["b" /* op */])({ slice2d_ });
//# sourceMappingURL=slice2d.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/slice3d.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Extracts a 3D slice from a 3D array starting at coordinates `begin` and
* is of size `size`. See `slice` for details.
*/
function slice3d_(x, begin, size) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, 'x', 'slice3d');
util_base["b" /* assert */]($x.rank === 3, () => `slice3d expects a rank-3 tensor, but got a rank-${$x.rank} tensor`);
return Object(slice["a" /* slice */])($x, begin, size);
}
const slice3d = Object(operation["b" /* op */])({ slice3d_ });
//# sourceMappingURL=slice3d.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/slice4d.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Extracts a 4D slice from a 4D array starting at coordinates `begin` and
* is of size `size`. See `slice` for details.
*/
function slice4d_(x, begin, size) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, 'x', 'slice4d');
util_base["b" /* assert */]($x.rank === 4, () => `slice4d expects a rank-4 tensor, but got a rank-${$x.rank} tensor`);
return Object(slice["a" /* slice */])($x, begin, size);
}
const slice4d = Object(operation["b" /* op */])({ slice4d_ });
//# sourceMappingURL=slice4d.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/softmax.js
var softmax = __webpack_require__(246);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/softplus.js
var softplus = __webpack_require__(165);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/space_to_batch_nd.js
var space_to_batch_nd = __webpack_require__(96);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/spectral/fft.js
var fft = __webpack_require__(131);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/spectral/ifft.js
var ifft = __webpack_require__(108);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/spectral/irfft.js
var irfft = __webpack_require__(177);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/spectral/rfft.js
var rfft = __webpack_require__(132);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/split.js
var split = __webpack_require__(76);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/sqrt.js
var sqrt = __webpack_require__(37);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/squared_difference.js
var squared_difference = __webpack_require__(133);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/squeeze.js
var squeeze = __webpack_require__(97);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/stack.js
var stack = __webpack_require__(60);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/step.js
var ops_step = __webpack_require__(82);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/strided_slice.js
var strided_slice = __webpack_require__(247);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/sum.js
var sum = __webpack_require__(19);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/tan.js
var tan = __webpack_require__(248);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/tensor.js
var ops_tensor = __webpack_require__(100);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/tensor1d.js
var tensor1d = __webpack_require__(77);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/tensor_ops_util.js
var tensor_ops_util = __webpack_require__(51);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/tensor2d.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Creates rank-2 `tf.Tensor` with the provided values, shape and dtype.
*
* The same functionality can be achieved with `tf.tensor`, but in general
* we recommend using `tf.tensor2d` as it makes the code more readable.
*
* ```js
* // Pass a nested array.
* tf.tensor2d([[1, 2], [3, 4]]).print();
* ```
* ```js
* // Pass a flat array and specify a shape.
* tf.tensor2d([1, 2, 3, 4], [2, 2]).print();
* ```
*
* @param values The values of the tensor. Can be nested array of numbers,
* or a flat array, or a `TypedArray`.
* @param shape The shape of the tensor. If not provided, it is inferred from
* `values`.
* @param dtype The data type.
*
* @doc {heading: 'Tensors', subheading: 'Creation'}
*/
function tensor2d(values, shape, dtype) {
Object(util_base["d" /* assertNonNull */])(values);
if (shape != null && shape.length !== 2) {
throw new Error('tensor2d() requires shape to have two numbers');
}
const inferredShape = Object(tensor_util_env["c" /* inferShape */])(values, dtype);
if (inferredShape.length !== 2 && inferredShape.length !== 1) {
throw new Error('tensor2d() requires values to be number[][] or flat/TypedArray');
}
if (inferredShape.length === 1 && shape == null) {
throw new Error('tensor2d() requires shape to be provided when `values` ' +
'are a flat/TypedArray');
}
return Object(tensor_ops_util["a" /* makeTensor */])(values, shape, inferredShape, dtype);
}
//# sourceMappingURL=tensor2d.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/tensor3d.js
var tensor3d = __webpack_require__(178);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/tensor4d.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Creates rank-4 `tf.Tensor` with the provided values, shape and dtype.
*
* The same functionality can be achieved with `tf.tensor`, but in general
* we recommend using `tf.tensor4d` as it makes the code more readable.
*
* ```js
* // Pass a nested array.
* tf.tensor4d([[[[1], [2]], [[3], [4]]]]).print();
* ```
* ```js
* // Pass a flat array and specify a shape.
* tf.tensor4d([1, 2, 3, 4], [1, 2, 2, 1]).print();
* ```
*
* @param values The values of the tensor. Can be nested array of numbers,
* or a flat array, or a `TypedArray`.
* @param shape The shape of the tensor. Optional. If not provided,
* it is inferred from `values`.
* @param dtype The data type.
*
* @doc {heading: 'Tensors', subheading: 'Creation'}
*/
function tensor4d(values, shape, dtype) {
Object(util_base["d" /* assertNonNull */])(values);
if (shape != null && shape.length !== 4) {
throw new Error('tensor4d() requires shape to have four numbers');
}
const inferredShape = Object(tensor_util_env["c" /* inferShape */])(values, dtype);
if (inferredShape.length !== 4 && inferredShape.length !== 1) {
throw new Error('tensor4d() requires values to be number[][][][] or flat/TypedArray');
}
if (inferredShape.length === 1 && shape == null) {
throw new Error('tensor4d() requires shape to be provided when `values` ' +
'are a flat array');
}
return Object(tensor_ops_util["a" /* makeTensor */])(values, shape, inferredShape, dtype);
}
//# sourceMappingURL=tensor4d.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/tensor5d.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Creates rank-5 `tf.Tensor` with the provided values, shape and dtype.
*
* The same functionality can be achieved with `tf.tensor`, but in general
* we recommend using `tf.tensor5d` as it makes the code more readable.
*
* ```js
* // Pass a nested array.
* tf.tensor5d([[[[[1],[2]],[[3],[4]]],[[[5],[6]],[[7],[8]]]]]).print();
* ```
* ```js
* // Pass a flat array and specify a shape.
* tf.tensor5d([1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 2, 2, 1]).print();
* ```
*
* @param values The values of the tensor. Can be nested array of numbers,
* or a flat array, or a `TypedArray`.
* @param shape The shape of the tensor. Optional. If not provided,
* it is inferred from `values`.
* @param dtype The data type.
*
* @doc {heading: 'Tensors', subheading: 'Creation'}
*/
function tensor5d(values, shape, dtype) {
Object(util_base["d" /* assertNonNull */])(values);
if (shape != null && shape.length !== 5) {
throw new Error('tensor5d() requires shape to have five numbers');
}
const inferredShape = Object(tensor_util_env["c" /* inferShape */])(values, dtype);
if (inferredShape.length !== 5 && inferredShape.length !== 1) {
throw new Error('tensor5d() requires values to be ' +
'number[][][][][] or flat/TypedArray');
}
if (inferredShape.length === 1 && shape == null) {
throw new Error('tensor5d() requires shape to be provided when `values` ' +
'are a flat array');
}
return Object(tensor_ops_util["a" /* makeTensor */])(values, shape, inferredShape, dtype);
}
//# sourceMappingURL=tensor5d.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/tensor6d.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Creates rank-6 `tf.Tensor` with the provided values, shape and dtype.
*
* The same functionality can be achieved with `tf.tensor`, but in general
* we recommend using `tf.tensor6d` as it makes the code more readable.
*
* ```js
* // Pass a nested array.
* tf.tensor6d([[[[[[1],[2]],[[3],[4]]],[[[5],[6]],[[7],[8]]]]]]).print();
* ```
* ```js
* // Pass a flat array and specify a shape.
* tf.tensor6d([1, 2, 3, 4, 5, 6, 7, 8], [1, 1, 2, 2, 2, 1]).print();
* ```
*
* @param values The values of the tensor. Can be nested array of numbers,
* or a flat array, or a `TypedArray`.
* @param shape The shape of the tensor. Optional. If not provided,
* it is inferred from `values`.
* @param dtype The data type.
*
* @doc {heading: 'Tensors', subheading: 'Creation'}
*/
function tensor6d(values, shape, dtype) {
Object(util_base["d" /* assertNonNull */])(values);
if (shape != null && shape.length !== 6) {
throw new Error('tensor6d() requires shape to have six numbers');
}
const inferredShape = Object(tensor_util_env["c" /* inferShape */])(values, dtype);
if (inferredShape.length !== 6 && inferredShape.length !== 1) {
throw new Error('tensor6d() requires values to be number[][][][][][] or ' +
'flat/TypedArray');
}
if (inferredShape.length === 1 && shape == null) {
throw new Error('tensor6d() requires shape to be provided when `values` ' +
'are a flat array');
}
shape = shape ||
inferredShape;
return Object(tensor_ops_util["a" /* makeTensor */])(values, shape, inferredShape, dtype);
}
//# sourceMappingURL=tensor6d.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/topk.js
var topk = __webpack_require__(179);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/truncated_normal.js
var truncated_normal = __webpack_require__(249);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/unique.js
var unique = __webpack_require__(180);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/unsorted_segment_sum.js
var unsorted_segment_sum = __webpack_require__(134);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/unstack.js
var unstack = __webpack_require__(83);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/variable.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Creates a new variable with the provided initial value.
* ```js
* const x = tf.variable(tf.tensor([1, 2, 3]));
* x.assign(tf.tensor([4, 5, 6]));
*
* x.print();
* ```
*
* @param initialValue Initial value for the tensor.
* @param trainable If true, optimizers are allowed to update it.
* @param name Name of the variable. Defaults to a unique id.
* @param dtype If set, initialValue will be converted to the given type.
*
* @doc {heading: 'Tensors', subheading: 'Creation'}
*/
function variable(initialValue, trainable = true, name, dtype) {
return engine["a" /* ENGINE */].makeVariable(initialValue, trainable, name, dtype);
}
//# sourceMappingURL=variable.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/where.js
var where = __webpack_require__(36);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/where_async.js
var where_async = __webpack_require__(181);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/zeros.js
var zeros = __webpack_require__(80);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/zeros_like.js
var zeros_like = __webpack_require__(20);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/boolean_mask.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Apply boolean mask to tensor.
*
* ```js
* const tensor = tf.tensor2d([1, 2, 3, 4, 5, 6], [3, 2]);
* const mask = tf.tensor1d([1, 0, 1], 'bool');
* const result = await tf.booleanMaskAsync(tensor, mask);
* result.print();
* ```
*
* @param tensor N-D tensor.
* @param mask K-D boolean tensor, K <= N and K must be known statically.
* @param axis A 0-D int Tensor representing the axis in tensor to mask from.
* By default, axis is 0 which will mask from the first dimension.
* Otherwise K + axis <= N.
*
* @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}
*/
async function booleanMaskAsync_(tensor, mask, axis) {
const $tensor = Object(tensor_util_env["a" /* convertToTensor */])(tensor, 'tensor', 'boolMask');
const $mask = Object(tensor_util_env["a" /* convertToTensor */])(mask, 'mask', 'boolMask', 'bool');
const axisFrom = axis == null ? 0 : axis;
const maskDim = $mask.rank;
const tensorShape = $tensor.shape;
util_base["b" /* assert */](maskDim > 0, () => 'mask cannot be scalar');
util_base["e" /* assertShapesMatch */](tensorShape.slice(axisFrom, axisFrom + maskDim), $mask.shape, `mask's shape must match the first K dimensions of tensor's shape,`);
let leadingSize = 1;
for (let i = axisFrom; i < axisFrom + maskDim; i++) {
leadingSize *= tensorShape[i];
}
const targetTensorShape = tensorShape.slice(0, axisFrom)
.concat([leadingSize], tensorShape.slice(axisFrom + maskDim));
const reshapedTensor = Object(reshape["a" /* reshape */])($tensor, targetTensorShape);
const reshapedMask = Object(reshape["a" /* reshape */])($mask, [-1]);
const positivePositions = await Object(where_async["a" /* whereAsync */])(reshapedMask);
const indices = Object(squeeze["a" /* squeeze */])(positivePositions, [1]);
const res = Object(gather["a" /* gather */])(reshapedTensor, indices, axisFrom);
// Ensure no memory leak.
if (tensor !== $tensor) {
$tensor.dispose();
}
if (mask !== $mask) {
$mask.dispose();
}
indices.dispose();
reshapedTensor.dispose();
reshapedMask.dispose();
positivePositions.dispose();
return res;
}
const booleanMaskAsync = booleanMaskAsync_;
//# sourceMappingURL=boolean_mask.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/transpose.js
var transpose = __webpack_require__(52);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/norm.js
var norm = __webpack_require__(135);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/tensor_util.js
var tensor_util = __webpack_require__(23);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/moving_average.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Compute the moving average of a variable.
*
* Without zeroDebias, the moving average operation is defined by:
* `v += delta`
* where
* `delta = (1 - decay) * (x - v)`
*
* With zeroDebias (default), the `delta` term is scaled to debias the
* effect of the (assumed) zero-initialization of `v`.
* `delta /= (1 - decay ^ step)`
*
* For more details on the zero-debiasing algorithm, see:
* https://arxiv.org/abs/1412.6980
*
* Note that this function is completely stateless and does not keep track of
* step count. The step count needs to be maintained by the caller and passed
* in as `step`.
*
* @param v The current moving average value.
* @param x New input value, must have the same shape and dtype as `v`.
* @param decay The decay factor. Typical values are 0.95 and 0.99.
* @param step Step count.
* @param zeroDebias: Whether zeroDebias is to be performed (default: `true`).
* @returns The new moving average value.
*
* @doc {heading: 'Operations', subheading: 'Moving Average'}
*/
function movingAverage_(v, x, decay, step, zeroDebias = true) {
const $v = Object(tensor_util_env["a" /* convertToTensor */])(v, 'v', 'movingAverage');
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, 'x', 'movingAverage');
const $decay = Object(tensor_util_env["a" /* convertToTensor */])(decay, 'decay', 'movingAverage');
Object(tensor_util["assertTypesMatch"])($v, $x);
util_base["b" /* assert */](util_base["a" /* arraysEqual */]($v.shape, $x.shape), () => 'Shape mismatch in v and x');
const one = Object(scalar["a" /* scalar */])(1);
const oneMinusDecay = Object(sub["a" /* sub */])(one, $decay);
let update = Object(mul["a" /* mul */])(Object(sub["a" /* sub */])($x, $v), oneMinusDecay);
if (zeroDebias) {
util_base["b" /* assert */](step != null, () => 'When using zeroDebias: true, step is required.');
const $step = Object(tensor_util_env["a" /* convertToTensor */])(step, 'step', 'movingAverage');
update = Object(div["a" /* div */])(update, Object(sub["a" /* sub */])(one, Object(pow["a" /* pow */])($decay, $step)));
}
return Object(add["a" /* add */])($v, update);
}
const movingAverage = Object(operation["b" /* op */])({ movingAverage_ });
//# sourceMappingURL=moving_average.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/scatter_nd.js
var scatter_nd = __webpack_require__(250);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/sparse_to_dense.js + 1 modules
var sparse_to_dense = __webpack_require__(253);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/gather_nd.js
var gather_nd = __webpack_require__(251);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/dropout_util.js
/**
* @license
* Copyright 2019 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Normalize noise shape based on provided tensor and noise shape.
*
* @param x Tensor.
* @param noiseShape The shape for the randomly generated keep/drop flags, as
* an array of numbers. Optional.
* @returns Normalized noise shape.
*/
function getNoiseShape(x, noiseShape) {
if (noiseShape == null) {
return x.shape.slice();
}
if (util_base["a" /* arraysEqual */](x.shape, noiseShape)) {
return noiseShape;
}
if (x.shape.length === noiseShape.length) {
const newDimension = [];
for (let i = 0; i < x.shape.length; i++) {
if (noiseShape[i] == null && x.shape[i] != null) {
newDimension.push(x.shape[i]);
}
else {
newDimension.push(noiseShape[i]);
}
}
return newDimension;
}
return noiseShape;
}
//# sourceMappingURL=dropout_util.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/dropout.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes dropout.
*
* ```js
* const x = tf.tensor1d([1, 2, 2, 1]);
* const rate = 0.75;
* const output = tf.dropout(x, rate);
* output.print();
* ```
*
* @param x A floating point Tensor or TensorLike.
* @param rate A float in the range [0, 1). The probability that each element
* of x is discarded.
* @param noiseShape An array of numbers of type int32, representing the
* shape for randomly generated keep/drop flags. If the noiseShape has null
* value, it will be automatically replaced with the x's relative dimension
* size. Optional.
* @param seed Used to create random seeds. Optional.
* @returns A Tensor of the same shape of x.
*
* @doc {heading: 'Operations', subheading: 'Dropout'}
*/
function dropout_(x, rate, noiseShape, seed) {
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, 'x', 'dropout');
util_base["b" /* assert */]($x.dtype === 'float32', () => `x has to be a floating point tensor since it's going to be ` +
`scaled, but got a ${$x.dtype} tensor instead.`);
util_base["b" /* assert */](rate >= 0 && rate < 1, () => `rate must be a float in the range [0, 1), but got ${rate}.`);
if (rate === 0) {
return x instanceof dist_tensor["a" /* Tensor */] ? $x.clone() : $x;
}
const $noiseShape = getNoiseShape($x, noiseShape);
const keepProb = 1 - rate;
const multiplier = Object(div["a" /* div */])(Object(floor["a" /* floor */])(Object(add["a" /* add */])(Object(random_uniform["a" /* randomUniform */])($noiseShape, 0, 1, 'float32', seed), keepProb)), keepProb);
return Object(mul["a" /* mul */])($x, multiplier);
}
const dropout = Object(operation["b" /* op */])({ dropout_ });
//# sourceMappingURL=dropout.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/signal_ops_util.js
/**
* @license
* Copyright 2019 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
function enclosingPowerOfTwo(value) {
// Return 2**N for integer N such that 2**N >= value.
return Math.floor(Math.pow(2, Math.ceil(Math.log(value) / Math.log(2.0))));
}
function cosineWindow(windowLength, a, b) {
const even = 1 - windowLength % 2;
const newValues = new Float32Array(windowLength);
for (let i = 0; i < windowLength; ++i) {
const cosArg = (2.0 * Math.PI * i) / (windowLength + even - 1);
newValues[i] = a - b * Math.cos(cosArg);
}
return Object(tensor1d["a" /* tensor1d */])(newValues, 'float32');
}
//# sourceMappingURL=signal_ops_util.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/in_top_k.js
/**
* @license
* Copyright 2019 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Returns whether the targets are in the top K predictions.
*
* ```js
* const predictions = tf.tensor2d([[20, 10, 40, 30], [30, 50, -20, 10]]);
* const targets = tf.tensor1d([2, 0]);
* const precision = await tf.inTopKAsync(predictions, targets);
* precision.print();
* ```
* @param predictions 2-D or higher `tf.Tensor` with last dimension being
* at least `k`.
* @param targets 1-D or higher `tf.Tensor`.
* @param k Optional Number of top elements to look at for computing precision,
* default to 1.
*
* @doc {heading: 'Operations', subheading: 'Evaluation'}
*/
async function inTopKAsync_(predictions, targets, k = 1) {
const $predictions = Object(tensor_util_env["a" /* convertToTensor */])(predictions, 'predictions', 'inTopK');
const $targets = Object(tensor_util_env["a" /* convertToTensor */])(targets, 'targets', 'inTopK');
Object(util_base["b" /* assert */])($predictions.rank > 1, () => 'inTopK() expects the predictions to be of rank 2 or higher, ' +
`but got ${$predictions.rank}`);
Object(util_base["b" /* assert */])($predictions.rank - 1 === $targets.rank, () => `predictions rank should be 1 larger than ` +
`targets rank, but got predictions rank ` +
`${$predictions.rank} and targets rank ${$targets.rank}`);
Object(util_base["e" /* assertShapesMatch */])($predictions.shape.slice(0, $predictions.shape.length - 1), $targets.shape, `predictions's shape should be align with the targets' shape, ` +
'except the last dimension.');
const lastDim = $predictions.shape[$predictions.shape.length - 1];
Object(util_base["b" /* assert */])(k > 0 && k <= lastDim, () => `'k' passed to inTopK() must be > 0 && <= the predictions last ` +
`dimension (${lastDim}), but got ${k}`);
const predictionsVals = await $predictions.data();
const targetsVals = await $targets.data();
// Reshape predictionsVals into a 2d tensor [batch, lastDim]
// and look up topK along lastDim.
const [batch, size] = [predictionsVals.length / lastDim, lastDim];
const precision = Object(util_base["o" /* getTypedArrayFromDType */])('bool', batch);
for (let b = 0; b < batch; b++) {
const offset = b * size;
const vals = predictionsVals.subarray(offset, offset + size);
const valAndInd = [];
for (let i = 0; i < vals.length; i++) {
valAndInd.push({ value: vals[i], index: i });
}
valAndInd.sort((a, b) => b.value - a.value);
precision[b] = 0;
for (let i = 0; i < k; i++) {
if (valAndInd[i].index === targetsVals[b]) {
precision[b] = 1;
break;
}
}
}
if (predictions !== $predictions) {
$predictions.dispose();
}
if (targets !== $targets) {
$targets.dispose();
}
// Output precision has the same shape as targets.
return Object(ops_tensor["a" /* tensor */])(precision, $targets.shape, 'bool');
}
const inTopKAsync = inTopKAsync_;
//# sourceMappingURL=in_top_k.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/gradients.js
var gradients = __webpack_require__(34);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/broadcast_util.js
var broadcast_util = __webpack_require__(17);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/conv2d_backprop_filter.js
var conv2d_backprop_filter = __webpack_require__(140);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/conv2d_backprop_input.js
var conv2d_backprop_input = __webpack_require__(138);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/conv_util.js
var conv_util = __webpack_require__(31);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/fused_util.js
var fused_util = __webpack_require__(44);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/fused/conv2d.js
/**
* @license
* Copyright 2019 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes a 2D convolution over the input x, optionally fused with adding a
* bias and applying an activation.
*
* ```js
* const inputDepth = 2;
* const inShape = [2, 2, 2, inputDepth];
* const outputDepth = 2;
* const fSize = 1;
* const pad = 0;
* const strides = 1;
*
* const x = tf.tensor4d( [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
* 16], inShape);
* const w = tf.tensor4d([-1, 1, -2, 0.5], [fSize, fSize, inputDepth,
* outputDepth]);
*
* tf.fused.conv2d({ x, filter: w, strides, pad, dataFormat: 'NHWC',
* dilations: [1, 1], bias: tf.scalar(5), activation: 'relu' }).print();
* ```
*
* @param obj An object with the following properties:
* @param x The input tensor, of rank 4 or rank 3, of shape
* `[batch, height, width, inChannels]`. If rank 3, batch of 1 is
* assumed.
* @param filter The filter, rank 4, of shape
* `[filterHeight, filterWidth, inDepth, outDepth]`.
* @param strides The strides of the convolution: `[strideHeight,
* strideWidth]`.
* @param pad The type of padding algorithm.
* - `same` and stride 1: output will be of same size as input,
* regardless of filter size.
* - `valid` output will be smaller than input if filter is larger
* than 1x1.
* - For more info, see this guide:
* [https://www.tensorflow.org/api_guides/python/nn#Convolution](
* https://www.tensorflow.org/api_guides/python/nn#Convolution)
* @param dataFormat An optional string from: "NHWC", "NCHW". Defaults to
* "NHWC". Specify the data format of the input and output data. With the
* default format "NHWC", the data is stored in the order of: [batch,
* height, width, channels]. Only "NHWC" is currently supported.
* @param dilations The dilation rates: `[dilationHeight, dilationWidth]`
* in which we sample input values across the height and width dimensions
* in atrous convolution. Defaults to `[1, 1]`. If `dilations` is a single
* number, then `dilationHeight == dilationWidth`. If it is greater than
* 1, then all values of `strides` must be 1.
* @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
* provided, it will default to truncate.
* @param bias Tensor to be added to the result.
* @param activation Name of activation kernel (defaults to `linear`) to be
* applied
* after biasAdd.
* @param preluActivationWeights Tensor of prelu weights to be applied as part
* of a `prelu` activation, typically the same shape as `x`.
* @param leakyreluAlpha Optional. Alpha to be applied as part of a `leakyrelu`
* activation.
*/
function fusedConv2d_({ x, filter, strides, pad, dataFormat = 'NHWC', dilations = [1, 1], dimRoundingMode, bias, activation = 'linear', preluActivationWeights, leakyreluAlpha }) {
activation = activation || 'linear';
if (Object(fused_util["d" /* shouldFuse */])(engine["a" /* ENGINE */].state.gradientDepth, activation) === false) {
let result = Object(conv2d["a" /* conv2d */])(x, filter, strides, pad, dataFormat, dilations, dimRoundingMode);
if (bias != null) {
result = Object(add["a" /* add */])(result, bias);
}
return Object(fused_util["a" /* applyActivation */])(result, activation, preluActivationWeights, leakyreluAlpha);
}
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, 'x', 'conv2d');
const $filter = Object(tensor_util_env["a" /* convertToTensor */])(filter, 'filter', 'conv2d');
let x4D = $x;
let reshapedTo4D = false;
if ($x.rank === 3) {
reshapedTo4D = true;
x4D = Object(reshape["a" /* reshape */])($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]);
}
util_base["b" /* assert */](x4D.rank === 4, () => `Error in fused conv2d: input must be rank 4, but got rank ` +
`${x4D.rank}.`);
util_base["b" /* assert */]($filter.rank === 4, () => `Error in fused conv2d: filter must be rank 4, but got rank ` +
`${$filter.rank}.`);
if (dimRoundingMode != null) {
util_base["b" /* assert */](util_base["v" /* isInt */](pad), () => `Error in fused conv2d: pad must be an integer when using, ` +
`dimRoundingMode ${dimRoundingMode} but got pad ${pad}.`);
}
util_base["b" /* assert */](x4D.shape[3] === $filter.shape[2], () => `Error in conv2d: depth of input (${x4D.shape[3]}) must match ` +
`input depth for filter ${$filter.shape[2]}.`);
util_base["b" /* assert */](conv_util["h" /* eitherStridesOrDilationsAreOne */](strides, dilations), () => 'Error in conv2D: Either strides or dilations must be 1. ' +
`Got strides ${strides} and dilations '${dilations}'`);
util_base["b" /* assert */](dataFormat === 'NHWC', () => `Error in conv2d: got dataFormat of ${dataFormat} but only NHWC is currently supported.`);
const convInfo = conv_util["a" /* computeConv2DInfo */](x4D.shape, $filter.shape, strides, dilations, pad, dimRoundingMode);
let $bias;
if (bias != null) {
$bias = Object(tensor_util_env["a" /* convertToTensor */])(bias, 'bias', 'fused conv2d');
[$bias] = Object(tensor_util["makeTypesMatch"])($bias, $x);
broadcast_util["a" /* assertAndGetBroadcastShape */](convInfo.outShape, $bias.shape);
}
let $preluActivationWeights;
if (preluActivationWeights != null) {
$preluActivationWeights = Object(tensor_util_env["a" /* convertToTensor */])(preluActivationWeights, 'prelu weights', 'fused conv2d');
}
const grad = (dy, saved) => {
const [$filter, x4D, y, $bias] = saved;
const dyActivation = Object(fused_util["c" /* getFusedDyActivation */])(dy, y, activation);
util_base["b" /* assert */](conv_util["i" /* tupleValuesAreOne */](dilations), () => 'Error in gradient of fused conv2D: ' +
`dilation rates greater than 1 ` +
`are not yet supported in gradients. Got dilations '${dilations}'`);
const xDer = Object(conv2d_backprop_input["a" /* conv2DBackpropInput */])(x4D.shape, dyActivation, $filter, strides, pad);
const filterDer = Object(conv2d_backprop_filter["a" /* conv2DBackpropFilter */])(x4D, dyActivation, $filter.shape, strides, pad);
const der = [xDer, filterDer];
if ($bias != null) {
const biasDer = Object(fused_util["b" /* getFusedBiasGradient */])($bias, dyActivation);
der.push(biasDer);
}
return der;
};
const inputs = {
x: x4D,
filter: $filter,
bias: $bias,
preluActivationWeights: $preluActivationWeights
};
const attrs = {
strides,
pad,
dataFormat,
dilations,
dimRoundingMode,
activation,
leakyreluAlpha
};
// Depending on the the params passed in we will have different number of
// inputs and thus a a different number of elements in the gradient.
if (bias == null) {
const customOp = Object(gradients["a" /* customGrad */])((x4D, filter, save) => {
let res =
// tslint:disable-next-line: no-unnecessary-type-assertion
engine["a" /* ENGINE */].runKernel(kernel_names["kb" /* FusedConv2D */], inputs, attrs);
save([filter, x4D, res]);
if (reshapedTo4D) {
// tslint:disable-next-line: no-unnecessary-type-assertion
res = Object(reshape["a" /* reshape */])(res, [res.shape[1], res.shape[2], res.shape[3]]);
}
return { value: res, gradFunc: grad };
});
return customOp(x4D, $filter);
}
else {
const customOpWithBias = Object(gradients["a" /* customGrad */])((x4D, filter, bias, save) => {
let res = engine["a" /* ENGINE */].runKernel(kernel_names["kb" /* FusedConv2D */], inputs, attrs);
save([filter, x4D, res, bias]);
if (reshapedTo4D) {
// tslint:disable-next-line: no-unnecessary-type-assertion
res = Object(reshape["a" /* reshape */])(res, [res.shape[1], res.shape[2], res.shape[3]]);
}
return { value: res, gradFunc: grad };
});
return customOpWithBias(x4D, $filter, $bias);
}
}
const conv2d_conv2d = Object(operation["b" /* op */])({ fusedConv2d_ });
//# sourceMappingURL=conv2d.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/depthwise_conv2d_native_backprop_filter.js
var depthwise_conv2d_native_backprop_filter = __webpack_require__(213);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/depthwise_conv2d_native_backprop_input.js
var depthwise_conv2d_native_backprop_input = __webpack_require__(212);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/fused/depthwise_conv2d.js
/**
* @license
* Copyright 2019 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes depthwise 2D convolution, optionally fused with adding a
* bias and applying an activation.
*
* Given a 4D `input` array and a `filter` array of shape
* `[filterHeight, filterWidth, inChannels, channelMultiplier]` containing
* `inChannels` convolutional filters of depth 1, this op applies a
* different filter to each input channel (expanding from 1 channel to
* `channelMultiplier` channels for each), then concatenates the results
* together. The output has `inChannels * channelMultiplier` channels.
*
* See
* [https://www.tensorflow.org/api_docs/python/tf/nn/depthwise_conv2d](
* https://www.tensorflow.org/api_docs/python/tf/nn/depthwise_conv2d)
* for more details.
*
* @param obj An object with the following properties:
* @param x The input tensor, of rank 4 or rank 3, of shape
* `[batch, height, width, inChannels]`. If rank 3, batch of 1 is
* assumed.
* @param filter The filter tensor, rank 4, of shape
* `[filterHeight, filterWidth, inChannels, channelMultiplier]`.
* @param strides The strides of the convolution: `[strideHeight,
* strideWidth]`. If strides is a single number, then `strideHeight ==
* strideWidth`.
* @param pad The type of padding algorithm.
* - `same` and stride 1: output will be of same size as input,
* regardless of filter size.
* - `valid`: output will be smaller than input if filter is larger
* than 1x1.
* - For more info, see this guide:
* [https://www.tensorflow.org/api_guides/python/nn#Convolution](
* https://www.tensorflow.org/api_guides/python/nn#Convolution)
* @param dilations The dilation rates: `[dilationHeight, dilationWidth]`
* in which we sample input values across the height and width dimensions
* in atrous convolution. Defaults to `[1, 1]`. If `rate` is a single
* number, then `dilationHeight == dilationWidth`. If it is greater than
* 1, then all values of `strides` must be 1.
* @param dataFormat: An optional string from: "NHWC", "NCHW". Defaults to
* "NHWC". Specify the data format of the input and output data. With the
* default format "NHWC", the data is stored in the order of: [batch,
* height, width, channels]. Only "NHWC" is currently supported.
* @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is
* provided, it will default to truncate.
* @param bias Tensor to be added to the result.
* @param activation Name of activation kernel (defaults to `linear`).
* @param preluActivationWeights Tensor of prelu weights to be applied as part
* of a `prelu` activation, typically the same shape as `x`.
* @param leakyreluAlpha Optional. Alpha to be applied as part of a `leakyrelu`
* activation.
*/
function fusedDepthwiseConv2d_({ x, filter, strides, pad, dataFormat = 'NHWC', dilations = [1, 1], dimRoundingMode, bias, activation = 'linear', preluActivationWeights, leakyreluAlpha }) {
if (Object(fused_util["d" /* shouldFuse */])(engine["a" /* ENGINE */].state.gradientDepth, activation) === false) {
let result = Object(depthwise_conv2d["a" /* depthwiseConv2d */])(x, filter, strides, pad, dataFormat, dilations, dimRoundingMode);
if (bias != null) {
result = Object(add["a" /* add */])(result, bias);
}
return Object(fused_util["a" /* applyActivation */])(result, activation, preluActivationWeights, leakyreluAlpha);
}
const $x = Object(tensor_util_env["a" /* convertToTensor */])(x, 'x', 'depthwiseConv2d');
const $filter = Object(tensor_util_env["a" /* convertToTensor */])(filter, 'filter', 'depthwiseConv2d');
let x4D = $x;
let reshapedTo4D = false;
if ($x.rank === 3) {
reshapedTo4D = true;
x4D = Object(reshape["a" /* reshape */])($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]);
}
util_base["b" /* assert */](x4D.rank === 4, () => `Error in fused depthwiseConv2d: input must be rank 4, but got ` +
`rank ${x4D.rank}.`);
util_base["b" /* assert */]($filter.rank === 4, () => `Error in fused depthwiseConv2d: filter must be rank 4, ` +
`but got rank ${$filter.rank}.`);
util_base["b" /* assert */](x4D.shape[3] === $filter.shape[2], () => `Error in fused depthwiseConv2d: number of input channels ` +
`(${x4D.shape[3]}) must match the inChannels dimension in ` +
`filter ${$filter.shape[2]}.`);
if (dilations == null) {
dilations = [1, 1];
}
util_base["b" /* assert */](conv_util["h" /* eitherStridesOrDilationsAreOne */](strides, dilations), () => 'Error in fused depthwiseConv2d: Either strides or dilations must ' +
`be 1. Got strides ${strides} and dilations '${dilations}'`);
if (dimRoundingMode != null) {
util_base["b" /* assert */](util_base["v" /* isInt */](pad), () => `Error in fused depthwiseConv2d: pad must be an integer when ` +
`using dimRoundingMode ${dimRoundingMode} but got pad ${pad}.`);
}
const convInfo = conv_util["a" /* computeConv2DInfo */](x4D.shape, $filter.shape, strides, dilations, pad, dimRoundingMode, true /* depthwise */);
let $bias;
if (bias != null) {
$bias = Object(tensor_util_env["a" /* convertToTensor */])(bias, 'bias', 'fused conv2d');
[$bias] = Object(tensor_util["makeTypesMatch"])($bias, $x);
broadcast_util["a" /* assertAndGetBroadcastShape */](convInfo.outShape, $bias.shape);
}
let $preluActivationWeights;
if (preluActivationWeights != null) {
$preluActivationWeights = Object(tensor_util_env["a" /* convertToTensor */])(preluActivationWeights, 'prelu weights', 'fused depthwiseConv2d');
}
const grad = (dy, saved) => {
util_base["b" /* assert */](conv_util["i" /* tupleValuesAreOne */](dilations), () => 'Error in gradient of fused depthwiseConv2d: dilation rates ' +
`greater than 1 are not yet supported. Got dilations ` +
`'${dilations}'`);
const [$filter, x4D, y, bias] = saved;
const dyActivation = Object(fused_util["c" /* getFusedDyActivation */])(dy, y, activation);
const xDer = Object(depthwise_conv2d_native_backprop_input["a" /* depthwiseConv2dNativeBackpropInput */])(x4D.shape, dyActivation, $filter, strides, pad, dilations, dimRoundingMode);
const filterDer = Object(depthwise_conv2d_native_backprop_filter["a" /* depthwiseConv2dNativeBackpropFilter */])(x4D, dyActivation, $filter.shape, strides, pad, dilations, dimRoundingMode);
if (bias != null) {
const biasDer = Object(fused_util["b" /* getFusedBiasGradient */])($bias, dyActivation);
return [xDer, filterDer, biasDer];
}
return [xDer, filterDer];
};
const inputs = {
x: x4D,
filter: $filter,
bias: $bias,
preluActivationWeights: $preluActivationWeights
};
const attrs = {
strides,
pad,
dataFormat,
dilations,
dimRoundingMode,
activation,
leakyreluAlpha
};
// Depending on the the params passed in we will have different number of
// inputs and thus a a different number of elements in the gradient.
if (bias == null) {
const customOp = Object(gradients["a" /* customGrad */])((x4D, filter, save) => {
// tslint:disable-next-line: no-unnecessary-type-assertion
let res = engine["a" /* ENGINE */].runKernel(kernel_names["lb" /* FusedDepthwiseConv2D */], inputs, attrs);
save([filter, x4D, res]);
if (reshapedTo4D) {
// tslint:disable-next-line: no-unnecessary-type-assertion
res = Object(reshape["a" /* reshape */])(res, [res.shape[1], res.shape[2], res.shape[3]]);
}
return { value: res, gradFunc: grad };
});
return customOp(x4D, $filter);
}
else {
const customOpWithBias = Object(gradients["a" /* customGrad */])((x4D, filter, bias, save) => {
// tslint:disable-next-line: no-unnecessary-type-assertion
let res = engine["a" /* ENGINE */].runKernel(kernel_names["lb" /* FusedDepthwiseConv2D */], inputs, attrs);
save([filter, x4D, res, bias]);
if (reshapedTo4D) {
// tslint:disable-next-line: no-unnecessary-type-assertion
res = Object(reshape["a" /* reshape */])(res, [res.shape[1], res.shape[2], res.shape[3]]);
}
return { value: res, gradFunc: grad };
});
return customOpWithBias(x4D, $filter, $bias);
}
}
const depthwiseConv2d = Object(operation["b" /* op */])({ fusedDepthwiseConv2d_ });
//# sourceMappingURL=depthwise_conv2d.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/fused/mat_mul.js
/**
* @license
* Copyright 2019 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes the dot product of two matrices with optional activation and bias.
*
* ```js
* const a = tf.tensor2d([-1, -2], [1, 2]);
* const b = tf.tensor2d([1, 2, 3, 4], [2, 2]);
* const bias = tf.tensor2d([1, 2], [1, 2]);
*
* tf.fused.matMul({a, b, bias, activation: 'relu'}).print();
* ```
*
* @param obj An object with the following properties:
* - `a` First matrix in dot product operation.
* - `b` Second matrix in dot product operation.
* - `transposeA` If true, `a` is transposed before multiplication.
* - `transposeB` If true, `b` is transposed before multiplication.
* - `bias` Matrix to be added to the result.
* - `activation` Name of activation kernel (defaults to `linear`).
* - `preluActivationWeights` Tensor of prelu weights.
* - `leakyreluAlpha` Alpha of leakyrelu.
*/
function fusedMatMul_({ a, b, transposeA = false, transposeB = false, bias, activation = 'linear', preluActivationWeights, leakyreluAlpha, }) {
if (Object(fused_util["d" /* shouldFuse */])(engine["a" /* ENGINE */].state.gradientDepth, activation) === false) {
let result = Object(mat_mul["a" /* matMul */])(a, b, transposeA, transposeB);
if (bias != null) {
result = Object(add["a" /* add */])(result, bias);
}
return Object(fused_util["a" /* applyActivation */])(result, activation, preluActivationWeights, leakyreluAlpha);
}
let $a = Object(tensor_util_env["a" /* convertToTensor */])(a, 'a', 'fused matMul');
let $b = Object(tensor_util_env["a" /* convertToTensor */])(b, 'b', 'fused matMul');
[$a, $b] = Object(tensor_util["makeTypesMatch"])($a, $b);
const innerShapeA = transposeA ? $a.shape[$a.rank - 2] : $a.shape[$a.rank - 1];
const innerShapeB = transposeB ? $b.shape[$b.rank - 1] : $b.shape[$b.rank - 2];
const outerShapeA = transposeA ? $a.shape[$a.rank - 1] : $a.shape[$a.rank - 2];
const outerShapeB = transposeB ? $b.shape[$b.rank - 2] : $b.shape[$b.rank - 1];
const outerDimsA = $a.shape.slice(0, -2);
const outerDimsB = $b.shape.slice(0, -2);
const batchDimA = util_base["O" /* sizeFromShape */](outerDimsA);
const batchDimB = util_base["O" /* sizeFromShape */](outerDimsB);
util_base["b" /* assert */]($a.rank >= 2 && $b.rank >= 2 && $a.rank === $b.rank, () => `Error in fused matMul: inputs must have the same rank of at ` +
`least 2, got ranks ${$a.rank} and ${$b.rank}.`);
util_base["b" /* assert */](util_base["a" /* arraysEqual */](outerDimsA, outerDimsB), () => `Error in fused matMul: outer dimensions (${outerDimsA}) and (` +
`${outerDimsB}) of Tensors with shapes ${$a.shape} and ` +
`${$b.shape} must match.`);
util_base["b" /* assert */](innerShapeA === innerShapeB, () => `Error in fused matMul: inner shapes (${innerShapeA}) and (` +
`${innerShapeB}) of Tensors with shapes ${$a.shape} and ` +
`${$b.shape} and transposeA=${transposeA}` +
` and transposeB=${transposeB} must match.`);
const outShape = $a.shape.slice(0, -2).concat([outerShapeA, outerShapeB]);
const a3D = transposeA ?
Object(reshape["a" /* reshape */])($a, [batchDimA, innerShapeA, outerShapeA]) :
Object(reshape["a" /* reshape */])($a, [batchDimA, outerShapeA, innerShapeA]);
const b3D = transposeB ?
Object(reshape["a" /* reshape */])($b, [batchDimB, outerShapeB, innerShapeB]) :
Object(reshape["a" /* reshape */])($b, [batchDimB, innerShapeB, outerShapeB]);
let $bias;
if (bias != null) {
$bias = Object(tensor_util_env["a" /* convertToTensor */])(bias, 'bias', 'fused matMul');
[$bias] = Object(tensor_util["makeTypesMatch"])($bias, $a);
broadcast_util["a" /* assertAndGetBroadcastShape */](outShape, $bias.shape);
}
let $preluActivationWeights;
if (preluActivationWeights != null) {
$preluActivationWeights = Object(tensor_util_env["a" /* convertToTensor */])(preluActivationWeights, 'prelu weights', 'fused matMul');
}
const grad = (dy, saved) => {
const [a3D, b3D, y, $bias] = saved;
// we reshape dy because the result of the forward is not
// necessarily going to be a 3d tensor due to a reshape done at the end of
// the customOp.
const dyActivation = Object(fused_util["c" /* getFusedDyActivation */])(Object(reshape["a" /* reshape */])(dy, y.shape), y, activation);
let aDer;
let bDer;
if (!transposeA && !transposeB) {
aDer = Object(mat_mul["a" /* matMul */])(dyActivation, b3D, false, true);
bDer = Object(mat_mul["a" /* matMul */])(a3D, dyActivation, true, false);
}
else if (!transposeA && transposeB) {
aDer = Object(mat_mul["a" /* matMul */])(dyActivation, b3D, false, false);
bDer = Object(mat_mul["a" /* matMul */])(dyActivation, a3D, true, false);
}
else if (transposeA && !transposeB) {
aDer = Object(mat_mul["a" /* matMul */])(b3D, dyActivation, false, true);
bDer = Object(mat_mul["a" /* matMul */])(a3D, dyActivation, false, false);
}
else {
aDer = Object(mat_mul["a" /* matMul */])(b3D, dyActivation, true, true);
bDer = Object(mat_mul["a" /* matMul */])(dyActivation, a3D, true, true);
}
if (bias != null) {
const biasDer = Object(fused_util["b" /* getFusedBiasGradient */])($bias, dyActivation);
return [aDer, bDer, biasDer];
}
else {
return [aDer, bDer];
}
};
const inputs = {
a: a3D,
b: b3D,
bias: $bias,
preluActivationWeights: $preluActivationWeights
};
const attrs = { transposeA, transposeB, activation, leakyreluAlpha };
// Depending on the the params passed in we will have different number of
// inputs and thus a a different number of elements in the gradient.
if (bias == null) {
const customOp = Object(gradients["a" /* customGrad */])((a3D, b3D, save) => {
const res =
// tslint:disable-next-line: no-unnecessary-type-assertion
engine["a" /* ENGINE */].runKernel(kernel_names["dd" /* _FusedMatMul */], inputs, attrs);
save([a3D, b3D, res]);
return { value: Object(reshape["a" /* reshape */])(res, outShape), gradFunc: grad };
});
return customOp(a3D, b3D);
}
else {
const customOpWithBias = Object(gradients["a" /* customGrad */])((a3D, b3D, $bias, save) => {
const res =
// tslint:disable-next-line: no-unnecessary-type-assertion
engine["a" /* ENGINE */].runKernel(kernel_names["dd" /* _FusedMatMul */], inputs, attrs);
save([a3D, b3D, res, $bias]);
return { value: Object(reshape["a" /* reshape */])(res, outShape), gradFunc: grad };
});
return customOpWithBias(a3D, b3D, $bias);
}
}
const matMul = Object(operation["b" /* op */])({ fusedMatMul_ });
//# sourceMappingURL=mat_mul.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/fused_ops.js
/**
* @license
* Copyright 2019 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
//# sourceMappingURL=fused_ops.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/signal/hamming_window.js
/**
* @license
* Copyright 2019 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Generate a hamming window.
*
* See: https://en.wikipedia.org/wiki/Window_function#Hann_and_Hamming_windows
*
* ```js
* tf.signal.hammingWindow(10).print();
* ```
* @param The length of window
*
* @doc {heading: 'Operations', subheading: 'Signal', namespace: 'signal'}
*/
function hammingWindow_(windowLength) {
return cosineWindow(windowLength, 0.54, 0.46);
}
const hammingWindow = Object(operation["b" /* op */])({ hammingWindow_ });
//# sourceMappingURL=hamming_window.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/signal/hann_window.js
/**
* @license
* Copyright 2019 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Generate a Hann window.
*
* See: https://en.wikipedia.org/wiki/Window_function#Hann_and_Hamming_windows
*
* ```js
* tf.signal.hannWindow(10).print();
* ```
* @param The length of window
*
* @doc {heading: 'Operations', subheading: 'Signal', namespace: 'signal'}
*/
function hannWindow_(windowLength) {
return cosineWindow(windowLength, 0.5, 0.5);
}
const hannWindow = Object(operation["b" /* op */])({ hannWindow_ });
//# sourceMappingURL=hann_window.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/signal/frame.js
/**
* @license
* Copyright 2019 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Expands input into frames of frameLength.
* Slides a window size with frameStep.
*
* ```js
* tf.signal.frame([1, 2, 3], 2, 1).print();
* ```
* @param signal The input tensor to be expanded
* @param frameLength Length of each frame
* @param frameStep The frame hop size in samples.
* @param padEnd Whether to pad the end of signal with padValue.
* @param padValue An number to use where the input signal does
* not exist when padEnd is True.
*
* @doc {heading: 'Operations', subheading: 'Signal', namespace: 'signal'}
*/
function frame_(signal, frameLength, frameStep, padEnd = false, padValue = 0) {
let start = 0;
const output = [];
while (start + frameLength <= signal.size) {
output.push(Object(slice["a" /* slice */])(signal, start, frameLength));
start += frameStep;
}
if (padEnd) {
while (start < signal.size) {
const padLen = (start + frameLength) - signal.size;
const pad = Object(concat["a" /* concat */])([
Object(slice["a" /* slice */])(signal, start, frameLength - padLen), Object(fill["a" /* fill */])([padLen], padValue)
]);
output.push(pad);
start += frameStep;
}
}
if (output.length === 0) {
return tensor2d([], [0, frameLength]);
}
return Object(reshape["a" /* reshape */])(Object(concat["a" /* concat */])(output), [output.length, frameLength]);
}
const frame_frame = Object(operation["b" /* op */])({ frame_ });
//# sourceMappingURL=frame.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/signal/stft.js
/**
* @license
* Copyright 2019 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes the Short-time Fourier Transform of signals
* See: https://en.wikipedia.org/wiki/Short-time_Fourier_transform
*
* ```js
* const input = tf.tensor1d([1, 1, 1, 1, 1])
* tf.signal.stft(input, 3, 1).print();
* ```
* @param signal 1-dimensional real value tensor.
* @param frameLength The window length of samples.
* @param frameStep The number of samples to step.
* @param fftLength The size of the FFT to apply.
* @param windowFn A callable that takes a window length and returns 1-d tensor.
*
* @doc {heading: 'Operations', subheading: 'Signal', namespace: 'signal'}
*/
function stft_(signal, frameLength, frameStep, fftLength, windowFn = hannWindow) {
if (fftLength == null) {
fftLength = enclosingPowerOfTwo(frameLength);
}
const framedSignal = frame_frame(signal, frameLength, frameStep);
const windowedSignal = Object(mul["a" /* mul */])(framedSignal, windowFn(frameLength));
return Object(rfft["a" /* rfft */])(windowedSignal, fftLength);
}
const stft = Object(operation["b" /* op */])({ stft_ });
//# sourceMappingURL=stft.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/image/crop_and_resize.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Extracts crops from the input image tensor and resizes them using bilinear
* sampling or nearest neighbor sampling (possibly with aspect ratio change)
* to a common output size specified by cropSize.
*
* @param image 4d tensor of shape `[batch,imageHeight,imageWidth, depth]`,
* where imageHeight and imageWidth must be positive, specifying the
* batch of images from which to take crops
* @param boxes 2d float32 tensor of shape `[numBoxes, 4]`. Each entry is
* `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the normalized
* coordinates of the box in the boxInd[i]'th image in the batch
* @param boxInd 1d int32 tensor of shape `[numBoxes]` with values in range
* `[0, batch)` that specifies the image that the `i`-th box refers to.
* @param cropSize 1d int32 tensor of 2 elements `[cropHeigh, cropWidth]`
* specifying the size to which all crops are resized to.
* @param method Optional string from `'bilinear' | 'nearest'`,
* defaults to bilinear, which specifies the sampling method for resizing
* @param extrapolationValue A threshold for deciding when to remove boxes based
* on score. Defaults to 0.
* @return A 4D tensor of the shape `[numBoxes,cropHeight,cropWidth,depth]`
*
* @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}
*/
function cropAndResize_(image, boxes, boxInd, cropSize, method = 'bilinear', extrapolationValue = 0) {
const $image = Object(tensor_util_env["a" /* convertToTensor */])(image, 'image', 'cropAndResize');
const $boxes = Object(tensor_util_env["a" /* convertToTensor */])(boxes, 'boxes', 'cropAndResize', 'float32');
const $boxInd = Object(tensor_util_env["a" /* convertToTensor */])(boxInd, 'boxInd', 'cropAndResize', 'int32');
const numBoxes = $boxes.shape[0];
util_base["b" /* assert */]($image.rank === 4, () => 'Error in cropAndResize: image must be rank 4,' +
`but got rank ${$image.rank}.`);
util_base["b" /* assert */]($boxes.rank === 2 && $boxes.shape[1] === 4, () => `Error in cropAndResize: boxes must be have size [${numBoxes},4] ` +
`but had shape ${$boxes.shape}.`);
util_base["b" /* assert */]($boxInd.rank === 1 && $boxInd.shape[0] === numBoxes, () => `Error in cropAndResize: boxInd must be have size [${numBoxes}] ` +
`but had shape ${$boxes.shape}.`);
util_base["b" /* assert */](cropSize.length === 2, () => `Error in cropAndResize: cropSize must be of length 2, but got ` +
`length ${cropSize.length}.`);
util_base["b" /* assert */](cropSize[0] >= 1 && cropSize[1] >= 1, () => `cropSize must be atleast [1,1], but was ${cropSize}`);
util_base["b" /* assert */](method === 'bilinear' || method === 'nearest', () => `method must be bilinear or nearest, but was ${method}`);
const inputs = { image: $image, boxes: $boxes, boxInd: $boxInd };
const attrs = { method, extrapolationValue, cropSize };
const res = engine["a" /* ENGINE */].runKernel(kernel_names["K" /* CropAndResize */], inputs, attrs);
return res;
}
const cropAndResize = Object(operation["b" /* op */])({ cropAndResize_ });
//# sourceMappingURL=crop_and_resize.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/image/flip_left_right.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Flips the image left to right. Currently available in the CPU, WebGL, and
* WASM backends.
*
* @param image 4d tensor of shape `[batch, imageHeight, imageWidth, depth]`.
*/
/** @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'} */
function flipLeftRight_(image) {
const $image = Object(tensor_util_env["a" /* convertToTensor */])(image, 'image', 'flipLeftRight', 'float32');
util_base["b" /* assert */]($image.rank === 4, () => 'Error in flipLeftRight: image must be rank 4,' +
`but got rank ${$image.rank}.`);
const inputs = { image: $image };
const res = engine["a" /* ENGINE */].runKernel(kernel_names["fb" /* FlipLeftRight */], inputs, {});
return res;
}
const flipLeftRight = Object(operation["b" /* op */])({ flipLeftRight_ });
//# sourceMappingURL=flip_left_right.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/image/rotate_with_offset.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Rotates the input image tensor counter-clockwise with an optional offset
* center of rotation. Currently available in the CPU, WebGL, and WASM backends.
*
* @param image 4d tensor of shape `[batch, imageHeight, imageWidth, depth]`.
* @param radians The amount of rotation.
* @param fillValue The value to fill in the empty space leftover
* after rotation. Can be either a single grayscale value (0-255), or an
* array of three numbers `[red, green, blue]` specifying the red, green,
* and blue channels. Defaults to `0` (black).
* @param center The center of rotation. Can be either a single value (0-1), or
* an array of two numbers `[centerX, centerY]`. Defaults to `0.5` (rotates
* the image around its center).
*
* @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}
*/
function rotateWithOffset_(image, radians, fillValue = 0, center = 0.5) {
const $image = Object(tensor_util_env["a" /* convertToTensor */])(image, 'image', 'rotateWithOffset', 'float32');
util_base["b" /* assert */]($image.rank === 4, () => 'Error in rotateWithOffset: image must be rank 4,' +
`but got rank ${$image.rank}.`);
const inputs = { image: $image };
const attrs = { radians, fillValue, center };
const res = engine["a" /* ENGINE */].runKernel(kernel_names["vc" /* RotateWithOffset */], inputs, attrs);
return res;
}
const rotateWithOffset = Object(operation["b" /* op */])({ rotateWithOffset_ });
//# sourceMappingURL=rotate_with_offset.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/nonmax_util.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
function nonMaxSuppSanityCheck(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma) {
if (iouThreshold == null) {
iouThreshold = 0.5;
}
if (scoreThreshold == null) {
scoreThreshold = Number.NEGATIVE_INFINITY;
}
if (softNmsSigma == null) {
softNmsSigma = 0.0;
}
const numBoxes = boxes.shape[0];
maxOutputSize = Math.min(maxOutputSize, numBoxes);
util_base["b" /* assert */](0 <= iouThreshold && iouThreshold <= 1, () => `iouThreshold must be in [0, 1], but was '${iouThreshold}'`);
util_base["b" /* assert */](boxes.rank === 2, () => `boxes must be a 2D tensor, but was of rank '${boxes.rank}'`);
util_base["b" /* assert */](boxes.shape[1] === 4, () => `boxes must have 4 columns, but 2nd dimension was ${boxes.shape[1]}`);
util_base["b" /* assert */](scores.rank === 1, () => 'scores must be a 1D tensor');
util_base["b" /* assert */](scores.shape[0] === numBoxes, () => `scores has incompatible shape with boxes. Expected ${numBoxes}, ` +
`but was ${scores.shape[0]}`);
util_base["b" /* assert */](0 <= softNmsSigma && softNmsSigma <= 1, () => `softNmsSigma must be in [0, 1], but was '${softNmsSigma}'`);
return { maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma };
}
//# sourceMappingURL=nonmax_util.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/image/non_max_suppression.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Performs non maximum suppression of bounding boxes based on
* iou (intersection over union).
*
* @param boxes a 2d tensor of shape `[numBoxes, 4]`. Each entry is
* `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the corners of
* the bounding box.
* @param scores a 1d tensor providing the box scores of shape `[numBoxes]`.
* @param maxOutputSize The maximum number of boxes to be selected.
* @param iouThreshold A float representing the threshold for deciding whether
* boxes overlap too much with respect to IOU. Must be between [0, 1].
* Defaults to 0.5 (50% box overlap).
* @param scoreThreshold A threshold for deciding when to remove boxes based
* on score. Defaults to -inf, which means any score is accepted.
* @return A 1D tensor with the selected box indices.
*
* @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}
*/
function nonMaxSuppression_(boxes, scores, maxOutputSize, iouThreshold = 0.5, scoreThreshold = Number.NEGATIVE_INFINITY) {
const $boxes = Object(tensor_util_env["a" /* convertToTensor */])(boxes, 'boxes', 'nonMaxSuppression');
const $scores = Object(tensor_util_env["a" /* convertToTensor */])(scores, 'scores', 'nonMaxSuppression');
const inputs = nonMaxSuppSanityCheck($boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold);
maxOutputSize = inputs.maxOutputSize;
iouThreshold = inputs.iouThreshold;
scoreThreshold = inputs.scoreThreshold;
const attrs = { maxOutputSize, iouThreshold, scoreThreshold };
return engine["a" /* ENGINE */].runKernel(kernel_names["Xb" /* NonMaxSuppressionV3 */], { boxes: $boxes, scores: $scores }, attrs);
}
const nonMaxSuppression = Object(operation["b" /* op */])({ nonMaxSuppression_ });
//# sourceMappingURL=non_max_suppression.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/backends/non_max_suppression_impl.js + 1 modules
var non_max_suppression_impl = __webpack_require__(79);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/image/non_max_suppression_async.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Performs non maximum suppression of bounding boxes based on
* iou (intersection over union).
*
* This is the async version of `nonMaxSuppression`
*
* @param boxes a 2d tensor of shape `[numBoxes, 4]`. Each entry is
* `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the corners of
* the bounding box.
* @param scores a 1d tensor providing the box scores of shape `[numBoxes]`.
* @param maxOutputSize The maximum number of boxes to be selected.
* @param iouThreshold A float representing the threshold for deciding whether
* boxes overlap too much with respect to IOU. Must be between [0, 1].
* Defaults to 0.5 (50% box overlap).
* @param scoreThreshold A threshold for deciding when to remove boxes based
* on score. Defaults to -inf, which means any score is accepted.
* @return A 1D tensor with the selected box indices.
*
* @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}
*/
async function nonMaxSuppressionAsync_(boxes, scores, maxOutputSize, iouThreshold = 0.5, scoreThreshold = Number.NEGATIVE_INFINITY) {
const $boxes = Object(tensor_util_env["a" /* convertToTensor */])(boxes, 'boxes', 'nonMaxSuppressionAsync');
const $scores = Object(tensor_util_env["a" /* convertToTensor */])(scores, 'scores', 'nonMaxSuppressionAsync');
const inputs = nonMaxSuppSanityCheck($boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold);
maxOutputSize = inputs.maxOutputSize;
iouThreshold = inputs.iouThreshold;
scoreThreshold = inputs.scoreThreshold;
const boxesAndScores = await Promise.all([$boxes.data(), $scores.data()]);
const boxesVals = boxesAndScores[0];
const scoresVals = boxesAndScores[1];
// We call a cpu based impl directly with the typedarray data here rather
// than a kernel because all kernels are synchronous (and thus cannot await
// .data()).
const { selectedIndices } = Object(non_max_suppression_impl["a" /* nonMaxSuppressionV3Impl */])(boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold);
if ($boxes !== boxes) {
$boxes.dispose();
}
if ($scores !== scores) {
$scores.dispose();
}
return Object(tensor1d["a" /* tensor1d */])(selectedIndices, 'int32');
}
const nonMaxSuppressionAsync = nonMaxSuppressionAsync_;
//# sourceMappingURL=non_max_suppression_async.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/image/non_max_suppression_with_score.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Performs non maximum suppression of bounding boxes based on
* iou (intersection over union).
*
* This op also supports a Soft-NMS mode (c.f.
* Bodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score
* of other overlapping boxes, therefore favoring different regions of the image
* with high scores. To enable this Soft-NMS mode, set the `softNmsSigma`
* parameter to be larger than 0.
*
* @param boxes a 2d tensor of shape `[numBoxes, 4]`. Each entry is
* `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the corners of
* the bounding box.
* @param scores a 1d tensor providing the box scores of shape `[numBoxes]`.
* @param maxOutputSize The maximum number of boxes to be selected.
* @param iouThreshold A float representing the threshold for deciding whether
* boxes overlap too much with respect to IOU. Must be between [0, 1].
* Defaults to 0.5 (50% box overlap).
* @param scoreThreshold A threshold for deciding when to remove boxes based
* on score. Defaults to -inf, which means any score is accepted.
* @param softNmsSigma A float representing the sigma parameter for Soft NMS.
* When sigma is 0, it falls back to nonMaxSuppression.
* @return A map with the following properties:
* - selectedIndices: A 1D tensor with the selected box indices.
* - selectedScores: A 1D tensor with the corresponding scores for each
* selected box.
*
* @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}
*/
function nonMaxSuppressionWithScore_(boxes, scores, maxOutputSize, iouThreshold = 0.5, scoreThreshold = Number.NEGATIVE_INFINITY, softNmsSigma = 0.0) {
const $boxes = Object(tensor_util_env["a" /* convertToTensor */])(boxes, 'boxes', 'nonMaxSuppression');
const $scores = Object(tensor_util_env["a" /* convertToTensor */])(scores, 'scores', 'nonMaxSuppression');
const params = nonMaxSuppSanityCheck($boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma);
maxOutputSize = params.maxOutputSize;
iouThreshold = params.iouThreshold;
scoreThreshold = params.scoreThreshold;
softNmsSigma = params.softNmsSigma;
const inputs = { boxes: $boxes, scores: $scores };
const attrs = { maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma };
// tslint:disable-next-line: no-unnecessary-type-assertion
const result = engine["a" /* ENGINE */].runKernel(kernel_names["Zb" /* NonMaxSuppressionV5 */], inputs, attrs);
return { selectedIndices: result[0], selectedScores: result[1] };
}
const nonMaxSuppressionWithScore = Object(operation["b" /* op */])({ nonMaxSuppressionWithScore_ });
//# sourceMappingURL=non_max_suppression_with_score.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/image/non_max_suppression_with_score_async.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Asynchronously performs non maximum suppression of bounding boxes based on
* iou (intersection over union).
*
* This op also supports a Soft-NMS mode (c.f.
* Bodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score
* of other overlapping boxes, therefore favoring different regions of the image
* with high scores. To enable this Soft-NMS mode, set the `softNmsSigma`
* parameter to be larger than 0.
*
* @param boxes a 2d tensor of shape `[numBoxes, 4]`. Each entry is
* `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the corners of
* the bounding box.
* @param scores a 1d tensor providing the box scores of shape `[numBoxes]`.
* @param maxOutputSize The maximum number of boxes to be selected.
* @param iouThreshold A float representing the threshold for deciding whether
* boxes overlap too much with respect to IOU. Must be between [0, 1].
* Defaults to 0.5 (50% box overlap).
* @param scoreThreshold A threshold for deciding when to remove boxes based
* on score. Defaults to -inf, which means any score is accepted.
* @param softNmsSigma A float representing the sigma parameter for Soft NMS.
* When sigma is 0, it falls back to nonMaxSuppression.
* @return A map with the following properties:
* - selectedIndices: A 1D tensor with the selected box indices.
* - selectedScores: A 1D tensor with the corresponding scores for each
* selected box.
*
* @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}
*/
async function nonMaxSuppressionWithScoreAsync_(boxes, scores, maxOutputSize, iouThreshold = 0.5, scoreThreshold = Number.NEGATIVE_INFINITY, softNmsSigma = 0.0) {
const $boxes = Object(tensor_util_env["a" /* convertToTensor */])(boxes, 'boxes', 'nonMaxSuppressionAsync');
const $scores = Object(tensor_util_env["a" /* convertToTensor */])(scores, 'scores', 'nonMaxSuppressionAsync');
const params = nonMaxSuppSanityCheck($boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma);
maxOutputSize = params.maxOutputSize;
iouThreshold = params.iouThreshold;
scoreThreshold = params.scoreThreshold;
softNmsSigma = params.softNmsSigma;
const boxesAndScores = await Promise.all([$boxes.data(), $scores.data()]);
const boxesVals = boxesAndScores[0];
const scoresVals = boxesAndScores[1];
// We call a cpu based impl directly with the typedarray data here rather
// than a kernel because all kernels are synchronous (and thus cannot await
// .data()).
const { selectedIndices, selectedScores } = Object(non_max_suppression_impl["c" /* nonMaxSuppressionV5Impl */])(boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma);
if ($boxes !== boxes) {
$boxes.dispose();
}
if ($scores !== scores) {
$scores.dispose();
}
return {
selectedIndices: Object(tensor1d["a" /* tensor1d */])(selectedIndices, 'int32'),
selectedScores: Object(tensor1d["a" /* tensor1d */])(selectedScores)
};
}
const nonMaxSuppressionWithScoreAsync = nonMaxSuppressionWithScoreAsync_;
//# sourceMappingURL=non_max_suppression_with_score_async.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/image/non_max_suppression_padded.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Asynchronously performs non maximum suppression of bounding boxes based on
* iou (intersection over union), with an option to pad results.
*
* @param boxes a 2d tensor of shape `[numBoxes, 4]`. Each entry is
* `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the corners of
* the bounding box.
* @param scores a 1d tensor providing the box scores of shape `[numBoxes]`.
* @param maxOutputSize The maximum number of boxes to be selected.
* @param iouThreshold A float representing the threshold for deciding whether
* boxes overlap too much with respect to IOU. Must be between [0, 1].
* Defaults to 0.5 (50% box overlap).
* @param scoreThreshold A threshold for deciding when to remove boxes based
* on score. Defaults to -inf, which means any score is accepted.
* @param padToMaxOutputSize Defalts to false. If true, size of output
* `selectedIndices` is padded to maxOutputSize.
* @return A map with the following properties:
* - selectedIndices: A 1D tensor with the selected box indices.
* - validOutputs: A scalar denoting how many elements in `selectedIndices`
* are valid. Valid elements occur first, then padding.
*
* @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}
*/
function nonMaxSuppressionPadded_(boxes, scores, maxOutputSize, iouThreshold = 0.5, scoreThreshold = Number.NEGATIVE_INFINITY, padToMaxOutputSize = false) {
const $boxes = Object(tensor_util_env["a" /* convertToTensor */])(boxes, 'boxes', 'nonMaxSuppression');
const $scores = Object(tensor_util_env["a" /* convertToTensor */])(scores, 'scores', 'nonMaxSuppression');
const params = nonMaxSuppSanityCheck($boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold, null /* softNmsSigma */);
const $maxOutputSize = params.maxOutputSize;
const $iouThreshold = params.iouThreshold;
const $scoreThreshold = params.scoreThreshold;
const inputs = { boxes: $boxes, scores: $scores };
const attrs = {
maxOutputSize: $maxOutputSize,
iouThreshold: $iouThreshold,
scoreThreshold: $scoreThreshold,
padToMaxOutputSize
};
// tslint:disable-next-line: no-unnecessary-type-assertion
const result = engine["a" /* ENGINE */].runKernel(kernel_names["Yb" /* NonMaxSuppressionV4 */], inputs, attrs);
return { selectedIndices: result[0], validOutputs: result[1] };
}
const nonMaxSuppressionPadded = Object(operation["b" /* op */])({ nonMaxSuppressionPadded_ });
//# sourceMappingURL=non_max_suppression_padded.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/image/non_max_suppression_padded_async.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Asynchronously performs non maximum suppression of bounding boxes based on
* iou (intersection over union), with an option to pad results.
*
* @param boxes a 2d tensor of shape `[numBoxes, 4]`. Each entry is
* `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the corners of
* the bounding box.
* @param scores a 1d tensor providing the box scores of shape `[numBoxes]`.
* @param maxOutputSize The maximum number of boxes to be selected.
* @param iouThreshold A float representing the threshold for deciding whether
* boxes overlap too much with respect to IOU. Must be between [0, 1].
* Defaults to 0.5 (50% box overlap).
* @param scoreThreshold A threshold for deciding when to remove boxes based
* on score. Defaults to -inf, which means any score is accepted.
* @param padToMaxOutputSize Defalts to false. If true, size of output
* `selectedIndices` is padded to maxOutputSize.
* @return A map with the following properties:
* - selectedIndices: A 1D tensor with the selected box indices.
* - validOutputs: A scalar denoting how many elements in `selectedIndices`
* are valid. Valid elements occur first, then padding.
*
* @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}
*/
async function nonMaxSuppressionPaddedAsync_(boxes, scores, maxOutputSize, iouThreshold = 0.5, scoreThreshold = Number.NEGATIVE_INFINITY, padToMaxOutputSize = false) {
const $boxes = Object(tensor_util_env["a" /* convertToTensor */])(boxes, 'boxes', 'nonMaxSuppressionAsync');
const $scores = Object(tensor_util_env["a" /* convertToTensor */])(scores, 'scores', 'nonMaxSuppressionAsync');
const params = nonMaxSuppSanityCheck($boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold, null /* softNmsSigma */);
const $maxOutputSize = params.maxOutputSize;
const $iouThreshold = params.iouThreshold;
const $scoreThreshold = params.scoreThreshold;
const [boxesVals, scoresVals] = await Promise.all([$boxes.data(), $scores.data()]);
// We call a cpu based impl directly with the typedarray data here rather
// than a kernel because all kernels are synchronous (and thus cannot await
// .data()).
const { selectedIndices, validOutputs } = Object(non_max_suppression_impl["b" /* nonMaxSuppressionV4Impl */])(boxesVals, scoresVals, $maxOutputSize, $iouThreshold, $scoreThreshold, padToMaxOutputSize);
if ($boxes !== boxes) {
$boxes.dispose();
}
if ($scores !== scores) {
$scores.dispose();
}
return {
selectedIndices: Object(tensor1d["a" /* tensor1d */])(selectedIndices, 'int32'),
validOutputs: Object(scalar["a" /* scalar */])(validOutputs, 'int32')
};
}
const nonMaxSuppressionPaddedAsync = nonMaxSuppressionPaddedAsync_;
//# sourceMappingURL=non_max_suppression_padded_async.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/image/resize_bilinear.js
var resize_bilinear = __webpack_require__(211);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/image/resize_nearest_neighbor.js
var resize_nearest_neighbor = __webpack_require__(210);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/image/transform.js
/**
* @license
* Copyright 2021 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Applies the given transform(s) to the image(s).
*
* @param image 4d tensor of shape `[batch, imageHeight, imageWidth, depth]`.
* @param transforms Projective transform matrix/matrices. A tensor1d of length
* 8 or tensor of size N x 8. If one row of transforms is [a0, a1, a2, b0
* b1, b2, c0, c1], then it maps the output point (x, y) to a transformed
* input point (x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k),
* where k = c0 x + c1 y + 1. The transforms are inverted compared to the
* transform mapping input points to output points.
* @param interpolation Interpolation mode.
* Supported values: 'nearest', 'bilinear'. Default to 'nearest'.
* @param fillMode Points outside the boundaries of the input are filled
* according to the given mode, one of 'constant', 'reflect', 'wrap',
* 'nearest'. Default to 'constant'.
* 'reflect': (d c b a | a b c d | d c b a ) The input is extended by
* reflecting about the edge of the last pixel.
* 'constant': (k k k k | a b c d | k k k k) The input is extended by
* filling all values beyond the edge with the same constant value k.
* 'wrap': (a b c d | a b c d | a b c d) The input is extended by
* wrapping around to the opposite edge.
* 'nearest': (a a a a | a b c d | d d d d) The input is extended by
* the nearest pixel.
* @param fillValue A float represents the value to be filled outside the
* boundaries when fillMode is 'constant'.
* @param Output dimension after the transform, [height, width]. If undefined,
* output is the same size as input image.
*
* @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'}
*/
function transform_(image, transforms, interpolation = 'nearest', fillMode = 'constant', fillValue = 0, outputShape) {
const $image = Object(tensor_util_env["a" /* convertToTensor */])(image, 'image', 'transform', 'float32');
const $transforms = Object(tensor_util_env["a" /* convertToTensor */])(transforms, 'transforms', 'transform', 'float32');
util_base["b" /* assert */]($image.rank === 4, () => 'Error in transform: image must be rank 4,' +
`but got rank ${$image.rank}.`);
util_base["b" /* assert */]($transforms.rank === 2 &&
($transforms.shape[0] === $image.shape[0] ||
$transforms.shape[0] === 1) &&
$transforms.shape[1] === 8, () => `Error in transform: Input transform should be batch x 8 or 1 x 8`);
util_base["b" /* assert */](outputShape == null || outputShape.length === 2, () => 'Error in transform: outputShape must be [height, width] or null, ' +
`but got ${outputShape}.`);
const inputs = { image: $image, transforms: $transforms };
const attrs = { interpolation, fillMode, fillValue, outputShape };
return engine["a" /* ENGINE */].runKernel(kernel_names["Xc" /* Transform */], inputs, attrs);
}
const transform = Object(operation["b" /* op */])({ transform_ });
//# sourceMappingURL=transform.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/linalg/band_part.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Copy a tensor setting everything outside a central band in each innermost
* matrix to zero.
*
* The band part is computed as follows: Assume input has `k` dimensions
* `[I, J, K, ..., M, N]`, then the output is a tensor with the same shape where
* `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`.
* The indicator function
* `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower))`
* `&& (num_upper < 0 || (n-m) <= num_upper)`
*
* ```js
* const x = tf.tensor2d([[ 0, 1, 2, 3],
* [-1, 0, 1, 2],
* [-2, -1, 0, 1],
* [-3, -2, -1, 0]]);
* let y = tf.linalg.bandPart(x, 1, -1);
* y.print(); // [[ 0, 1, 2, 3],
* // [-1, 0, 1, 2],
* // [ 0, -1, 0, 1],
* // [ 0, 0 , -1, 0]]
* let z = tf.linalg.bandPart(x, 2, 1);
* z.print(); // [[ 0, 1, 0, 0],
* // [-1, 0, 1, 0],
* // [-2, -1, 0, 1],
* // [ 0, -2, -1, 0]]
* ```
*
* @param x Rank `k` tensor
* @param numLower Number of subdiagonals to keep.
* If negative, keep entire lower triangle.
* @param numUpper Number of subdiagonals to keep.
* If negative, keep entire upper triangle.
* @returns Rank `k` tensor of the same shape as input.
* The extracted banded tensor.
*
* @doc {heading:'Operations', subheading:'Linear Algebra', namespace:'linalg'}
*/
function bandPart_(a, numLower, numUpper) {
Object(util_base["b" /* assert */])(numLower % 1 === 0, () => `bandPart(): numLower must be an integer, got ${numLower}.`);
Object(util_base["b" /* assert */])(numUpper % 1 === 0, () => `bandPart(): numUpper must be an integer, got ${numUpper}.`);
const $a = Object(tensor_util_env["a" /* convertToTensor */])(a, 'a', 'bandPart');
Object(util_base["b" /* assert */])($a.rank >= 2, () => `bandPart(): Rank must be at least 2, got ${$a.rank}.`);
const shape = $a.shape;
const [M, N] = $a.shape.slice(-2);
if (!(numLower <= M)) {
throw new Error(`bandPart(): numLower (${numLower})` +
` must not be greater than the number of rows (${M}).`);
}
if (!(numUpper <= N)) {
throw new Error(`bandPart(): numUpper (${numUpper})` +
` must not be greater than the number of columns (${N}).`);
}
if (numLower < 0) {
numLower = M;
}
if (numUpper < 0) {
numUpper = N;
}
const i = Object(reshape["a" /* reshape */])(Object(range["a" /* range */])(0, M, 1, 'int32'), [-1, 1]);
const j = Object(range["a" /* range */])(0, N, 1, 'int32');
const ij = Object(sub["a" /* sub */])(i, j);
const inBand = Object(logical_and["a" /* logicalAnd */])(Object(less_equal["a" /* lessEqual */])(ij, Object(scalar["a" /* scalar */])(+numLower, 'int32')), Object(greater_equal["a" /* greaterEqual */])(ij, Object(scalar["a" /* scalar */])(-numUpper, 'int32')));
const zero = Object(zeros["a" /* zeros */])([M, N], $a.dtype);
return Object(reshape["a" /* reshape */])(Object(stack["a" /* stack */])(Object(unstack["a" /* unstack */])(Object(reshape["a" /* reshape */])($a, [-1, M, N]))
.map(mat => Object(where["a" /* where */])(inBand, mat, zero))), shape);
}
const bandPart = Object(operation["b" /* op */])({ bandPart_ });
//# sourceMappingURL=band_part.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/linalg/gram_schmidt.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Gram-Schmidt orthogonalization.
*
* ```js
* const x = tf.tensor2d([[1, 2], [3, 4]]);
* let y = tf.linalg.gramSchmidt(x);
* y.print();
* console.log('Othogonalized:');
* y.dot(y.transpose()).print(); // should be nearly the identity matrix.
* console.log('First row direction maintained:');
* const data = await y.array();
* console.log(data[0][1] / data[0][0]); // should be nearly 2.
* ```
*
* @param xs The vectors to be orthogonalized, in one of the two following
* formats:
* - An Array of `tf.Tensor1D`.
* - A `tf.Tensor2D`, i.e., a matrix, in which case the vectors are the rows
* of `xs`.
* In each case, all the vectors must have the same length and the length
* must be greater than or equal to the number of vectors.
* @returns The orthogonalized and normalized vectors or matrix.
* Orthogonalization means that the vectors or the rows of the matrix
* are orthogonal (zero inner products). Normalization means that each
* vector or each row of the matrix has an L2 norm that equals `1`.
*
* @doc {heading:'Operations', subheading:'Linear Algebra', namespace:'linalg'}
*/
function gramSchmidt_(xs) {
let inputIsTensor2D;
if (Array.isArray(xs)) {
inputIsTensor2D = false;
Object(util_base["b" /* assert */])(xs != null && xs.length > 0, () => 'Gram-Schmidt process: input must not be null, undefined, or ' +
'empty');
const dim = xs[0].shape[0];
for (let i = 1; i < xs.length; ++i) {
Object(util_base["b" /* assert */])(xs[i].shape[0] === dim, () => 'Gram-Schmidt: Non-unique lengths found in the input vectors: ' +
`(${xs[i].shape[0]} vs. ${dim})`);
}
}
else {
inputIsTensor2D = true;
xs = Object(split["a" /* split */])(xs, xs.shape[0], 0).map(x => Object(squeeze["a" /* squeeze */])(x, [0]));
}
Object(util_base["b" /* assert */])(xs.length <= xs[0].shape[0], () => `Gram-Schmidt: Number of vectors (${xs.length}) exceeds ` +
`number of dimensions (${xs[0].shape[0]}).`);
const ys = [];
const xs1d = xs;
for (let i = 0; i < xs.length; ++i) {
ys.push(engine["a" /* ENGINE */].tidy(() => {
let x = xs1d[i];
if (i > 0) {
for (let j = 0; j < i; ++j) {
const proj = Object(mul["a" /* mul */])(Object(sum["a" /* sum */])(Object(mul["a" /* mul */])(ys[j], x)), ys[j]);
x = Object(sub["a" /* sub */])(x, proj);
}
}
return Object(div["a" /* div */])(x, Object(norm["a" /* norm */])(x, 'euclidean'));
}));
}
if (inputIsTensor2D) {
return Object(stack["a" /* stack */])(ys, 0);
}
else {
return ys;
}
}
const gramSchmidt = Object(operation["b" /* op */])({ gramSchmidt_ });
//# sourceMappingURL=gram_schmidt.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/globals.js
var globals = __webpack_require__(26);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/linalg/qr.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Compute QR decomposition of m-by-n matrix using Householder transformation.
*
* Implementation based on
* [http://www.cs.cornell.edu/~bindel/class/cs6210-f09/lec18.pdf]
* (http://www.cs.cornell.edu/~bindel/class/cs6210-f09/lec18.pdf)
*
* ```js
* const a = tf.tensor2d([[1, 2], [3, 4]]);
* let [q, r] = tf.linalg.qr(a);
* console.log('Q');
* q.print();
* console.log('R');
* r.print();
* console.log('Orthogonalized');
* q.dot(q.transpose()).print() // should be nearly the identity matrix.
* console.log('Reconstructed');
* q.dot(r).print(); // should be nearly [[1, 2], [3, 4]];
* ```
*
* @param x The `tf.Tensor` to be QR-decomposed. Must have rank >= 2. Suppose
* it has the shape `[..., M, N]`.
* @param fullMatrices An optional boolean parameter. Defaults to `false`.
* If `true`, compute full-sized `Q`. If `false` (the default),
* compute only the leading N columns of `Q` and `R`.
* @returns An `Array` of two `tf.Tensor`s: `[Q, R]`. `Q` is a unitary matrix,
* i.e., its columns all have unit norm and are mutually orthogonal.
* If `M >= N`,
* If `fullMatrices` is `false` (default),
* - `Q` has a shape of `[..., M, N]`,
* - `R` has a shape of `[..., N, N]`.
* If `fullMatrices` is `true` (default),
* - `Q` has a shape of `[..., M, M]`,
* - `R` has a shape of `[..., M, N]`.
* If `M < N`,
* - `Q` has a shape of `[..., M, M]`,
* - `R` has a shape of `[..., M, N]`.
* @throws If the rank of `x` is less than 2.
*
* @doc {heading:'Operations',
* subheading:'Linear Algebra',
* namespace:'linalg'}
*/
function qr_(x, fullMatrices = false) {
Object(util_base["b" /* assert */])(x.rank >= 2, () => `qr() requires input tensor to have a rank >= 2, but got rank ${x.rank}`);
if (x.rank === 2) {
return qr2d(x, fullMatrices);
}
else {
// Rank > 2.
// TODO(cais): Below we split the input into individual 2D tensors,
// perform QR decomposition on them and then stack the results back
// together. We should explore whether this can be parallelized.
const outerDimsProd = x.shape.slice(0, x.shape.length - 2)
.reduce((value, prev) => value * prev);
const x2ds = Object(unstack["a" /* unstack */])(Object(reshape["a" /* reshape */])(x, [
outerDimsProd, x.shape[x.shape.length - 2],
x.shape[x.shape.length - 1]
]), 0);
const q2ds = [];
const r2ds = [];
x2ds.forEach(x2d => {
const [q2d, r2d] = qr2d(x2d, fullMatrices);
q2ds.push(q2d);
r2ds.push(r2d);
});
const q = Object(reshape["a" /* reshape */])(Object(stack["a" /* stack */])(q2ds, 0), x.shape);
const r = Object(reshape["a" /* reshape */])(Object(stack["a" /* stack */])(r2ds, 0), x.shape);
return [q, r];
}
}
function qr2d(x, fullMatrices = false) {
return engine["a" /* ENGINE */].tidy(() => {
Object(util_base["b" /* assert */])(x.shape.length === 2, () => `qr2d() requires a 2D Tensor, but got a ${x.shape.length}D Tensor.`);
const m = x.shape[0];
const n = x.shape[1];
let q = eye(m); // Orthogonal transform so far.
let r = Object(clone["a" /* clone */])(x); // Transformed matrix so far.
const one2D = tensor2d([[1]], [1, 1]);
let w = Object(clone["a" /* clone */])(one2D);
const iters = m >= n ? n : m;
for (let j = 0; j < iters; ++j) {
// This tidy within the for-loop ensures we clean up temporary
// tensors as soon as they are no longer needed.
const rTemp = r;
const wTemp = w;
const qTemp = q;
[w, r, q] = engine["a" /* ENGINE */].tidy(() => {
// Find H = I - tau * w * w', to put zeros below R(j, j).
const rjEnd1 = Object(slice["a" /* slice */])(r, [j, j], [m - j, 1]);
const normX = Object(norm["a" /* norm */])(rjEnd1);
const rjj = Object(slice["a" /* slice */])(r, [j, j], [1, 1]);
// The sign() function returns 0 on 0, which causes division by zero.
const s = Object(where["a" /* where */])(Object(greater["a" /* greater */])(rjj, 0), tensor2d([[-1]]), tensor2d([[1]]));
const u1 = Object(sub["a" /* sub */])(rjj, Object(mul["a" /* mul */])(s, normX));
const wPre = Object(div["a" /* div */])(rjEnd1, u1);
if (wPre.shape[0] === 1) {
w = Object(clone["a" /* clone */])(one2D);
}
else {
w = Object(concat["a" /* concat */])([
one2D,
Object(slice["a" /* slice */])(wPre, [1, 0], [wPre.shape[0] - 1, wPre.shape[1]])
], 0);
}
const tau = Object(neg["a" /* neg */])(Object(div["a" /* div */])(Object(mat_mul["a" /* matMul */])(s, u1), normX));
// -- R := HR, Q := QH.
const rjEndAll = Object(slice["a" /* slice */])(r, [j, 0], [m - j, n]);
const tauTimesW = Object(mul["a" /* mul */])(tau, w);
const wT = Object(transpose["a" /* transpose */])(w);
if (j === 0) {
r = Object(sub["a" /* sub */])(rjEndAll, Object(mat_mul["a" /* matMul */])(tauTimesW, Object(mat_mul["a" /* matMul */])(wT, rjEndAll)));
}
else {
const rTimesTau = Object(sub["a" /* sub */])(rjEndAll, Object(mat_mul["a" /* matMul */])(tauTimesW, Object(mat_mul["a" /* matMul */])(wT, rjEndAll)));
r = Object(concat["a" /* concat */])([Object(slice["a" /* slice */])(r, [0, 0], [j, n]), rTimesTau], 0);
}
const tawTimesWT = Object(transpose["a" /* transpose */])(tauTimesW);
const qAllJEnd = Object(slice["a" /* slice */])(q, [0, j], [m, q.shape[1] - j]);
if (j === 0) {
q = Object(sub["a" /* sub */])(qAllJEnd, Object(mat_mul["a" /* matMul */])(Object(mat_mul["a" /* matMul */])(qAllJEnd, w), tawTimesWT));
}
else {
const qTimesTau = Object(sub["a" /* sub */])(qAllJEnd, Object(mat_mul["a" /* matMul */])(Object(mat_mul["a" /* matMul */])(qAllJEnd, w), tawTimesWT));
q = Object(concat["a" /* concat */])([Object(slice["a" /* slice */])(q, [0, 0], [m, j]), qTimesTau], 1);
}
return [w, r, q];
});
Object(globals["d" /* dispose */])([rTemp, wTemp, qTemp]);
}
if (!fullMatrices && m > n) {
q = Object(slice["a" /* slice */])(q, [0, 0], [m, n]);
r = Object(slice["a" /* slice */])(r, [0, 0], [n, n]);
}
return [q, r];
});
}
const qr = Object(operation["b" /* op */])({ qr_ });
//# sourceMappingURL=qr.js.map
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/loss_ops_utils.js
var loss_ops_utils = __webpack_require__(35);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/losses/compute_weighted_loss.js
/**
* Computes the weighted loss between two tensors.
*
* @param losses Tensor of shape `[batch_size, d1, ... dN]`.
* @param weights Tensor whose rank is either 0, or the same rank as
* `losses`, and must be broadcastable to `losses` (i.e., all
* dimensions must be either `1`, or the same as the corresponding
* `losses` dimension).
*
* @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'}
*/
function computeWeightedLoss_(losses, weights, reduction = loss_ops_utils["a" /* Reduction */].SUM_BY_NONZERO_WEIGHTS) {
const $losses = Object(tensor_util_env["a" /* convertToTensor */])(losses, 'losses', 'computeWeightedLoss');
let $weights = null;
if (weights != null) {
$weights = Object(tensor_util_env["a" /* convertToTensor */])(weights, 'weights', 'computeWeightedLoss');
}
const weightedLoss = ($weights == null) ? $losses : Object(mul["a" /* mul */])($losses, $weights);
if (reduction === loss_ops_utils["a" /* Reduction */].NONE) {
return weightedLoss;
}
if (reduction === loss_ops_utils["a" /* Reduction */].SUM) {
return Object(sum["a" /* sum */])(weightedLoss);
}
if (reduction === loss_ops_utils["a" /* Reduction */].MEAN) {
if ($weights == null) {
return Object(ops_mean["a" /* mean */])(weightedLoss);
}
else {
const broadcastFactor = $losses.size / $weights.size;
const result = Object(div["a" /* div */])(Object(sum["a" /* sum */])(weightedLoss), Object(sum["a" /* sum */])($weights));
return broadcastFactor > 1 ? Object(div["a" /* div */])(result, Object(scalar["a" /* scalar */])(broadcastFactor)) :
result;
}
}
if (reduction === loss_ops_utils["a" /* Reduction */].SUM_BY_NONZERO_WEIGHTS) {
if ($weights == null) {
return Object(div["a" /* div */])(Object(sum["a" /* sum */])(weightedLoss), Object(scalar["a" /* scalar */])($losses.size));
}
else {
const broadcastedWeights = Object(mul["a" /* mul */])($weights, Object(ones["a" /* ones */])($losses.shape));
const numNonZeros = Object(cast["a" /* cast */])(Object(sum["a" /* sum */])(Object(not_equal["a" /* notEqual */])(broadcastedWeights, Object(scalar["a" /* scalar */])(0))), 'float32');
return Object(div["a" /* div */])(Object(sum["a" /* sum */])(weightedLoss), numNonZeros);
}
}
throw Error(`Unknown reduction: ${reduction}`);
}
const computeWeightedLoss = Object(operation["b" /* op */])({ computeWeightedLoss_ });
//# sourceMappingURL=compute_weighted_loss.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/losses/absolute_difference.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes the absolute difference loss between two tensors.
*
* @param labels The ground truth output tensor, same dimensions as
* 'predictions'.
* @param predictions The predicted outputs.
* @param weights Tensor whose rank is either 0, or the same rank as
* `labels`, and must be broadcastable to `labels` (i.e., all dimensions
* must be either `1`, or the same as the corresponding `losses`
* dimension).
* @param reduction Type of reduction to apply to loss. Should be of type
* `Reduction`
*
* @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'}
*/
function absoluteDifference_(labels, predictions, weights, reduction = loss_ops_utils["a" /* Reduction */].SUM_BY_NONZERO_WEIGHTS) {
const $labels = Object(tensor_util_env["a" /* convertToTensor */])(labels, 'labels', 'absoluteDifference');
const $predictions = Object(tensor_util_env["a" /* convertToTensor */])(predictions, 'predictions', 'absoluteDifference');
let $weights = null;
if (weights != null) {
$weights = Object(tensor_util_env["a" /* convertToTensor */])(weights, 'weights', 'absoluteDifference');
}
Object(util_base["e" /* assertShapesMatch */])($labels.shape, $predictions.shape, 'Error in absoluteDifference: ');
const losses = Object(abs["a" /* abs */])(Object(sub["a" /* sub */])($labels, $predictions));
return computeWeightedLoss(losses, $weights, reduction);
}
const absoluteDifference = Object(operation["b" /* op */])({ absoluteDifference_ });
//# sourceMappingURL=absolute_difference.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/losses/cosine_distance.js
/**
* Computes the cosine distance loss between two tensors.
*
* @param labels The ground truth output tensor, same dimensions as
* 'predictions'.
* @param predictions The predicted outputs.
* @param axis The dimension along which the cosine distance is computed.
* @param weights Tensor whose rank is either 0, or the same rank as
* `labels`, and must be broadcastable to `labels` (i.e., all dimensions
* must be either `1`, or the same as the corresponding `losses`
* dimension).
* @param reduction Type of reduction to apply to loss. Should be of type
* `Reduction`
*
* @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'}
*/
function cosineDistance_(labels, predictions, axis, weights, reduction = loss_ops_utils["a" /* Reduction */].SUM_BY_NONZERO_WEIGHTS) {
const $labels = Object(tensor_util_env["a" /* convertToTensor */])(labels, 'labels', 'cosineDistance');
const $predictions = Object(tensor_util_env["a" /* convertToTensor */])(predictions, 'predictions', 'cosineDistance');
let $weights = null;
if (weights != null) {
$weights = Object(tensor_util_env["a" /* convertToTensor */])(weights, 'weights', 'cosineDistance');
}
Object(util_base["e" /* assertShapesMatch */])($labels.shape, $predictions.shape, 'Error in cosineDistance: ');
const one = Object(scalar["a" /* scalar */])(1);
const losses = Object(sub["a" /* sub */])(one, Object(sum["a" /* sum */])(Object(mul["a" /* mul */])($labels, $predictions), axis, true));
return computeWeightedLoss(losses, $weights, reduction);
}
const cosineDistance = Object(operation["b" /* op */])({ cosineDistance_ });
//# sourceMappingURL=cosine_distance.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/losses/hinge_loss.js
/**
* Computes the Hinge loss between two tensors.
*
* @param labels The ground truth output tensor, same dimensions as
* 'predictions'.
* @param predictions The predicted outputs.
* @param weights Tensor whose rank is either 0, or the same rank as
* `labels`, and must be broadcastable to `labels` (i.e., all dimensions
* must be either `1`, or the same as the corresponding `losses`
* dimension).
* @param reduction Type of reduction to apply to loss. Should be of type
* `Reduction`
*
* @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'}
*/
function hingeLoss_(labels, predictions, weights, reduction = loss_ops_utils["a" /* Reduction */].SUM_BY_NONZERO_WEIGHTS) {
let $labels = Object(tensor_util_env["a" /* convertToTensor */])(labels, 'labels', 'hingeLoss');
const $predictions = Object(tensor_util_env["a" /* convertToTensor */])(predictions, 'predictions', 'hingeLoss');
let $weights = null;
if (weights != null) {
$weights = Object(tensor_util_env["a" /* convertToTensor */])(weights, 'weights', 'hingeLoss');
}
Object(util_base["e" /* assertShapesMatch */])($labels.shape, $predictions.shape, 'Error in hingeLoss: ');
const one = Object(scalar["a" /* scalar */])(1);
// Convert binary labels to (-1, 1)
$labels = Object(sub["a" /* sub */])(Object(mul["a" /* mul */])(Object(scalar["a" /* scalar */])(2), $labels), one);
const losses = Object(relu["a" /* relu */])(Object(sub["a" /* sub */])(one, Object(mul["a" /* mul */])($labels, $predictions)));
return computeWeightedLoss(losses, $weights, reduction);
}
const hingeLoss = Object(operation["b" /* op */])({ hingeLoss_ });
//# sourceMappingURL=hinge_loss.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/losses/huber_loss.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes the huber loss between two tensors.
*
* @param labels The ground truth output tensor, same dimensions as
* 'predictions'.
* @param predictions The predicted outputs.
* @param weights Tensor whose rank is either 0, or the same rank as
* `labels`, and must be broadcastable to `labels` (i.e., all dimensions
* must be either `1`, or the same as the corresponding `losses`
* dimension).
* @param delta Point where huber loss changes from quadratic to linear.
* @param reduction Type of reduction to apply to loss. Should be of type
* `Reduction`.
*
* @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'}
*/
function huberLoss_(labels, predictions, weights, delta = 1.0, reduction = loss_ops_utils["a" /* Reduction */].SUM_BY_NONZERO_WEIGHTS) {
const $labels = Object(tensor_util_env["a" /* convertToTensor */])(labels, 'labels', 'huberLoss');
const $predictions = Object(tensor_util_env["a" /* convertToTensor */])(predictions, 'predictions', 'huberLoss');
let $weights = null;
if (weights != null) {
$weights = Object(tensor_util_env["a" /* convertToTensor */])(weights, 'weights', 'huberLoss');
}
Object(util_base["e" /* assertShapesMatch */])($labels.shape, $predictions.shape, 'Error in huberLoss: ');
const deltaScalar = Object(scalar["a" /* scalar */])(delta);
const error = Object(abs["a" /* abs */])(Object(sub["a" /* sub */])($predictions, $labels));
const quadratic = Object(minimum["a" /* minimum */])(error, deltaScalar);
const linear = Object(sub["a" /* sub */])(error, quadratic);
const losses = Object(add["a" /* add */])(Object(mul["a" /* mul */])(Object(scalar["a" /* scalar */])(0.5), Object(square["a" /* square */])(quadratic)), Object(mul["a" /* mul */])(deltaScalar, linear));
return computeWeightedLoss(losses, $weights, reduction);
}
const huberLoss = Object(operation["b" /* op */])({ huberLoss_ });
//# sourceMappingURL=huber_loss.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/losses/log_loss.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes the log loss between two tensors.
*
* @param labels The ground truth output tensor, same dimensions as
* 'predictions'.
* @param predictions The predicted outputs.
* @param weights Tensor whose rank is either 0, or the same rank as
* `labels`, and must be broadcastable to `labels` (i.e., all dimensions
* must be either `1`, or the same as the corresponding `losses`
* dimension).
* @param epsilon A small increment to avoid taking log of zero
* @param reduction Type of reduction to apply to loss. Should be of type
* `Reduction`
*
* @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'}
*/
function logLoss_(labels, predictions, weights, epsilon = 1e-7, reduction = loss_ops_utils["a" /* Reduction */].SUM_BY_NONZERO_WEIGHTS) {
const $labels = Object(tensor_util_env["a" /* convertToTensor */])(labels, 'labels', 'logLoss');
const $predictions = Object(tensor_util_env["a" /* convertToTensor */])(predictions, 'predictions', 'logLoss');
let $weights = null;
if (weights != null) {
$weights = Object(tensor_util_env["a" /* convertToTensor */])(weights, 'weights', 'logLoss');
}
Object(util_base["e" /* assertShapesMatch */])($labels.shape, $predictions.shape, 'Error in logLoss: ');
const one = Object(scalar["a" /* scalar */])(1);
const epsilonScalar = Object(scalar["a" /* scalar */])(epsilon);
const l1 = Object(neg["a" /* neg */])(Object(mul["a" /* mul */])($labels, Object(log["a" /* log */])(Object(add["a" /* add */])($predictions, epsilonScalar))));
const l2 = Object(mul["a" /* mul */])(Object(sub["a" /* sub */])(one, $labels), Object(log["a" /* log */])(Object(add["a" /* add */])(Object(sub["a" /* sub */])(one, $predictions), epsilonScalar)));
const losses = Object(sub["a" /* sub */])(l1, l2);
return computeWeightedLoss(losses, $weights, reduction);
}
const logLoss = Object(operation["b" /* op */])({ logLoss_ });
//# sourceMappingURL=log_loss.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/losses/mean_squared_error.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes the mean squared error between two tensors.
*
* @param labels The ground truth output tensor, same dimensions as
* 'predictions'.
* @param predictions The predicted outputs.
* @param weights Tensor whose rank is either 0, or the same rank as
* `labels`, and must be broadcastable to `labels` (i.e., all dimensions
* must be either `1`, or the same as the corresponding `losses`
* dimension).
* @param reduction Type of reduction to apply to loss. Should be of type
* `Reduction`
*
* @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'}
*/
function meanSquaredError_(labels, predictions, weights, reduction = loss_ops_utils["a" /* Reduction */].SUM_BY_NONZERO_WEIGHTS) {
const $labels = Object(tensor_util_env["a" /* convertToTensor */])(labels, 'labels', 'meanSquaredError');
const $predictions = Object(tensor_util_env["a" /* convertToTensor */])(predictions, 'predictions', 'meanSquaredError');
let $weights = null;
if (weights != null) {
$weights = Object(tensor_util_env["a" /* convertToTensor */])(weights, 'weights', 'meanSquaredError');
}
Object(util_base["e" /* assertShapesMatch */])($labels.shape, $predictions.shape, 'Error in meanSquaredError: ');
const losses = Object(squared_difference["a" /* squaredDifference */])($labels, $predictions);
return computeWeightedLoss(losses, $weights, reduction);
}
const meanSquaredError = Object(operation["b" /* op */])({ meanSquaredError_ });
//# sourceMappingURL=mean_squared_error.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/losses/sigmoid_cross_entropy.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
function sigmoidCrossEntropyWithLogits_(labels, logits) {
const $labels = Object(tensor_util_env["a" /* convertToTensor */])(labels, 'labels', 'sigmoidCrossEntropyWithLogits');
const $logits = Object(tensor_util_env["a" /* convertToTensor */])(logits, 'logits', 'sigmoidCrossEntropyWithLogits');
Object(util_base["e" /* assertShapesMatch */])($labels.shape, $logits.shape, 'Error in sigmoidCrossEntropyWithLogits: ');
/**
* Implementation Details:
*
* For brevity, let `x = logits`, `z = labels`. The logistic loss is
* z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
* = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
* = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
* = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
* = (1 - z) * x + log(1 + exp(-x))
* = x - x * z + log(1 + exp(-x))
*
* For x < 0, to avoid overflow in exp(-x), we reformulate the above
* x - x * z + log(1 + exp(-x))
* = log(exp(x)) - x * z + log(1 + exp(-x))
* = - x * z + log(1 + exp(x))
*
* Hence, to ensure stability and avoid overflow, the implementation uses
* this equivalent formulation:
* max(x, 0) - x * z + log(1 + exp(-abs(x)))
*/
const maxOutput = Object(relu["a" /* relu */])($logits);
const outputXTarget = Object(mul["a" /* mul */])($logits, $labels);
const sigmoidOutput = Object(log1p["a" /* log1p */])(Object(exp["a" /* exp */])(Object(neg["a" /* neg */])(Object(abs["a" /* abs */])($logits))));
return Object(add["a" /* add */])(Object(sub["a" /* sub */])(maxOutput, outputXTarget), sigmoidOutput);
}
/**
* Computes the sigmoid cross entropy loss between two tensors.
*
* If labelSmoothing is nonzero, smooth the labels towards 1/2:
*
* newMulticlassLabels = multiclassLabels * (1 - labelSmoothing)
* + 0.5 * labelSmoothing
*
* @param multiClassLabels The ground truth output tensor of shape
* [batch_size, num_classes], same dimensions as 'predictions'.
* @param logits The predicted outputs.
* @param weights Tensor whose rank is either 0, or the same rank as
* `labels`, and must be broadcastable to `labels` (i.e., all dimensions
* must be either `1`, or the same as the corresponding `losses`
* dimension).
* @param labelSmoothing If greater than 0, then smooth the labels.
* @param reduction Type of reduction to apply to loss. Should be of type
* `Reduction`
*
* @doc { heading: 'Training', subheading: 'Losses', namespace: 'losses' }
*/
function sigmoidCrossEntropy_(multiClassLabels, logits, weights, labelSmoothing = 0, reduction = loss_ops_utils["a" /* Reduction */].SUM_BY_NONZERO_WEIGHTS) {
let $multiClassLabels = Object(tensor_util_env["a" /* convertToTensor */])(multiClassLabels, 'multiClassLabels', 'sigmoidCrossEntropy');
const $logits = Object(tensor_util_env["a" /* convertToTensor */])(logits, 'logits', 'sigmoidCrossEntropy');
let $weights = null;
if (weights != null) {
$weights = Object(tensor_util_env["a" /* convertToTensor */])(weights, 'weights', 'sigmoidCrossEntropy');
}
Object(util_base["e" /* assertShapesMatch */])($multiClassLabels.shape, $logits.shape, 'Error in sigmoidCrossEntropy: ');
if (labelSmoothing > 0) {
const labelSmoothingScalar = Object(scalar["a" /* scalar */])(labelSmoothing);
const one = Object(scalar["a" /* scalar */])(1);
const half = Object(scalar["a" /* scalar */])(0.5);
$multiClassLabels =
Object(add["a" /* add */])(Object(mul["a" /* mul */])($multiClassLabels, Object(sub["a" /* sub */])(one, labelSmoothingScalar)), Object(mul["a" /* mul */])(half, labelSmoothingScalar));
}
const losses = sigmoidCrossEntropyWithLogits_($multiClassLabels, $logits);
return computeWeightedLoss(losses, $weights, reduction);
}
const sigmoidCrossEntropy = Object(operation["b" /* op */])({ sigmoidCrossEntropy_ });
//# sourceMappingURL=sigmoid_cross_entropy.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/losses/softmax_cross_entropy.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes softmax cross entropy between logits and labels.
*
* Measures the probability error in discrete classification tasks in which
* the classes are mutually exclusive (each entry is in exactly one class).
* For example, each CIFAR-10 image is labeled with one and only one label: an
* image can be a dog or a truck, but not both.
*
* `NOTE`: While the classes are mutually exclusive, their probabilities need
* not be. All that is required is that each row of labels is a valid
* probability distribution. If they are not, the computation of the gradient
* will be incorrect.
*
* `WARNING`: This op expects unscaled logits, since it performs a softmax on
* logits internally for efficiency. Do not call this op with the output of
* softmax, as it will produce incorrect results.
*
* logits and labels must have the same shape, e.g. [batch_size, num_classes]
* and the same dtype.
* @param labels The labels array.
* @param logits The logits array.
* @param dim The dimension softmax would be performed on. Defaults to `-1`
* which indicates the last dimension.
*/
function softmaxCrossEntropyWithLogits_(labels, logits, dim = -1) {
if (dim === -1) {
dim = logits.rank - 1;
}
if (dim !== logits.rank - 1) {
throw Error(`Softmax cross entropy along a non-last dimension is not yet ` +
`supported. Labels / logits was rank ${logits.rank} ` +
`and dim was ${dim}`);
}
// Use a custom gradient for numerical stability.
const customOp = Object(gradients["a" /* customGrad */])((labels, logits, save) => {
// Reference:
// 1. http://cs231n.github.io/linear-classify/#softmax
// 2. https://blog.feedly.com/tricks-of-the-trade-logsumexp/
const keepDims = true;
const lse = Object(log_sum_exp["a" /* logSumExp */])(logits, [dim], keepDims);
const logResult = Object(sub["a" /* sub */])(Object(cast["a" /* cast */])(logits, 'float32'), lse);
save([labels, logResult]);
const costVector = Object(neg["a" /* neg */])(Object(mul["a" /* mul */])(logResult, labels));
const value = Object(sum["a" /* sum */])(costVector, [dim]);
const gradFunc = (dy, saved) => {
const [labels, logResult] = saved;
const dyShape = Object(axis_util["e" /* expandShapeToKeepDim */])(dy.shape, [dim]);
return [
Object(mul["a" /* mul */])(Object(reshape["a" /* reshape */])(dy, dyShape), Object(sub["a" /* sub */])(Object(cast["a" /* cast */])(labels, 'float32'), Object(exp["a" /* exp */])(logResult))),
Object(mul["a" /* mul */])(Object(reshape["a" /* reshape */])(dy, dyShape), Object(sub["a" /* sub */])(Object(exp["a" /* exp */])(logResult), Object(cast["a" /* cast */])(labels, 'float32'))),
];
};
return { value, gradFunc };
});
return customOp(labels, logits);
}
/**
* Computes the softmax cross entropy loss between two tensors.
*
* If labelSmoothing is nonzero, smooth the labels towards 1/2:
*
* newOnehotLabels = onehotLabels * (1 - labelSmoothing)
* + labelSmoothing / numClasses
*
* @param onehotLabels One hot encoded labels
* [batch_size, num_classes], same dimensions as 'predictions'.
* @param logits The predicted outputs.
* @param weights Tensor whose rank is either 0, or 1, and must be
* broadcastable to `loss` of shape [batch_size]
* @param labelSmoothing If greater than 0, then smooth the labels.
* @param reduction Type of reduction to apply to loss. Should be of type
* `Reduction`
*
* @doc { heading: 'Training', subheading: 'Losses', namespace: 'losses' }
*/
function softmaxCrossEntropy_(onehotLabels, logits, weights, labelSmoothing = 0, reduction = loss_ops_utils["a" /* Reduction */].SUM_BY_NONZERO_WEIGHTS) {
let $onehotLabels = Object(tensor_util_env["a" /* convertToTensor */])(onehotLabels, 'onehotLabels', 'softmaxCrossEntropy');
const $logits = Object(tensor_util_env["a" /* convertToTensor */])(logits, 'logits', 'softmaxCrossEntropy');
let $weights = null;
if (weights != null) {
$weights = Object(tensor_util_env["a" /* convertToTensor */])(weights, 'weights', 'softmaxCrossEntropy');
}
Object(util_base["e" /* assertShapesMatch */])($onehotLabels.shape, $logits.shape, 'Error in softmaxCrossEntropy: ');
if (labelSmoothing > 0) {
const labelSmoothingScalar = Object(scalar["a" /* scalar */])(labelSmoothing);
const one = Object(scalar["a" /* scalar */])(1);
const numClasses = Object(scalar["a" /* scalar */])($onehotLabels.shape[1]);
$onehotLabels =
Object(add["a" /* add */])(Object(mul["a" /* mul */])($onehotLabels, Object(sub["a" /* sub */])(one, labelSmoothingScalar)), Object(div["a" /* div */])(labelSmoothingScalar, numClasses));
}
const losses = softmaxCrossEntropyWithLogits_($onehotLabels, $logits);
return computeWeightedLoss(losses, $weights, reduction);
}
const softmaxCrossEntropy = Object(operation["b" /* op */])({ softmaxCrossEntropy_ });
//# sourceMappingURL=softmax_cross_entropy.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/sparse/sparse_reshape.js
/**
* @license
* Copyright 2021 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* This operation has the same semantics as reshape on the represented dense
* tensor. The `inputIndices` are recomputed based on the requested `newShape`.
* If one component of `newShape` is the special value -1, the size of that
* dimension is computed so that the total dense size remains constant. At most
* one component of `newShape` can be -1. The number of dense elements implied
* by `newShape` must be the same as the number of dense elements originally
* implied by `inputShape`. Reshaping does not affect the order of values in the
* SparseTensor. If the input tensor has rank R_in and N non-empty values, and
* `newShape` has length R_out, then `inputIndices` has shape [N, R_in],
* `inputShape` has length R_in, `outputIndices` has shape [N, R_out], and
* `outputShape` has length R_out.
*
* ```js
* const result = tf.sparse.sparseReshape(
* [[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0], [1, 2, 3]],
* [2, 3, 6], [9, -1]);
* console.log(result);
* result['outputIndices'].print(); //[[0, 0], [0, 1], [1, 2], [4, 2], [8, 1]]
* result['outputShape'].print(); // [9, 4]
* ```
* @param inputIndices: 2-D. N x R_in matrix with the indices of non-empty
* values in a SparseTensor.
* @param inputShape: 1-D. R_in Tensor1D with the input SparseTensor's dense
* shape.
* @param newShape: 1-D. R_out Tensor1D with the requested new dense shape.
* @return A map with the following properties:
* - outputIndices: 2-D. N x R_out matrix with the updated indices of
* non-empty values in the output SparseTensor.
* - outputShape: 1-D. R_out vector with the full dense shape of the output
* SparseTensor. This is the same as newShape but with any -1 dimensions
* filled in.
* @doc {heading: 'Operations', subheading: 'Sparse'}
*/
function sparseReshape_(inputIndices, inputShape, newShape) {
const $inputIndices = Object(tensor_util_env["a" /* convertToTensor */])(inputIndices, 'inputIndices', 'sparseReshape');
const $inputShape = Object(tensor_util_env["a" /* convertToTensor */])(inputShape, 'inputShape', 'sparseReshape');
const $newShape = Object(tensor_util_env["a" /* convertToTensor */])(newShape, 'newShape', 'sparseReshape');
if ($inputIndices.rank !== 2) {
throw new Error(`Input indices should be Tensor2D but received shape
${$inputIndices.shape}`);
}
if ($inputShape.rank !== 1) {
throw new Error(`Input shape should be Tensor1D but received shape ${$inputShape.shape}`);
}
if ($newShape.rank !== 1) {
throw new Error(`New shape should be Tensor1D but received shape ${$newShape.shape}`);
}
const inputs = {
inputIndices: $inputIndices,
inputShape: $inputShape,
newShape: $newShape
};
const result = engine["a" /* ENGINE */].runKernel(kernel_names["Jc" /* SparseReshape */], inputs);
return { outputIndices: result[0], outputShape: result[1] };
}
const sparseReshape = Object(operation["b" /* op */])({ sparseReshape_ });
//# sourceMappingURL=sparse_reshape.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-core/dist/ops/ops.js
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
// Modularized ops.
const spectral = {
fft: fft["a" /* fft */],
ifft: ifft["a" /* ifft */],
rfft: rfft["a" /* rfft */],
irfft: irfft["a" /* irfft */]
};
const ops_signal = {
hammingWindow: hammingWindow,
hannWindow: hannWindow,
frame: frame_frame,
stft: stft,
};
// Image Ops namespace
const ops_image = {
flipLeftRight: flipLeftRight,
resizeNearestNeighbor: resize_nearest_neighbor["a" /* resizeNearestNeighbor */],
resizeBilinear: resize_bilinear["a" /* resizeBilinear */],
rotateWithOffset: rotateWithOffset,
cropAndResize: cropAndResize,
nonMaxSuppression: nonMaxSuppression,
nonMaxSuppressionAsync: nonMaxSuppressionAsync,
nonMaxSuppressionWithScore: nonMaxSuppressionWithScore,
nonMaxSuppressionWithScoreAsync: nonMaxSuppressionWithScoreAsync,
nonMaxSuppressionPadded: nonMaxSuppressionPadded,
nonMaxSuppressionPaddedAsync: nonMaxSuppressionPaddedAsync,
transform: transform
};
// linalg namespace
const linalg = {
bandPart: bandPart,
gramSchmidt: gramSchmidt,
qr: qr
};
// losses namespace;
const ops_losses = {
absoluteDifference: absoluteDifference,
computeWeightedLoss: computeWeightedLoss,
cosineDistance: cosineDistance,
hingeLoss: hingeLoss,
huberLoss: huberLoss,
logLoss: logLoss,
meanSquaredError: meanSquaredError,
sigmoidCrossEntropy: sigmoidCrossEntropy,
softmaxCrossEntropy: softmaxCrossEntropy
};
const sparse = { sparseReshape: sparseReshape };
// Second level exports.
//# sourceMappingURL=ops.js.map
/***/ }),
/* 22 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "b", function() { return Environment; });
/* unused harmony export getQueryParams */
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "c", function() { return env; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return ENV; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "d", function() { return setEnvironmentGlobal; });
/* harmony import */ var _util_base__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(8);
/**
* @license
* Copyright 2017 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
// Expects flags from URL in the format ?tfjsflags=FLAG1:1,FLAG2:true.
const TENSORFLOWJS_FLAGS_PREFIX = 'tfjsflags';
/**
* The environment contains evaluated flags as well as the registered platform.
* This is always used as a global singleton and can be retrieved with
* `tf.env()`.
*
* @doc {heading: 'Environment'}
*/
class Environment {
// tslint:disable-next-line: no-any
constructor(global) {
this.global = global;
this.flags = {};
this.flagRegistry = {};
this.urlFlags = {};
// Jasmine spies on this in 'environment_test.ts'
this.getQueryParams = getQueryParams;
this.populateURLFlags();
}
setPlatform(platformName, platform) {
if (this.platform != null) {
console.warn(`Platform ${this.platformName} has already been set. ` +
`Overwriting the platform with ${platform}.`);
}
this.platformName = platformName;
this.platform = platform;
}
registerFlag(flagName, evaluationFn, setHook) {
this.flagRegistry[flagName] = { evaluationFn, setHook };
// Override the flag value from the URL. This has to happen here because the
// environment is initialized before flags get registered.
if (this.urlFlags[flagName] != null) {
const flagValue = this.urlFlags[flagName];
console.warn(`Setting feature override from URL ${flagName}: ${flagValue}.`);
this.set(flagName, flagValue);
}
}
async getAsync(flagName) {
if (flagName in this.flags) {
return this.flags[flagName];
}
this.flags[flagName] = await this.evaluateFlag(flagName);
return this.flags[flagName];
}
get(flagName) {
if (flagName in this.flags) {
return this.flags[flagName];
}
const flagValue = this.evaluateFlag(flagName);
if (Object(_util_base__WEBPACK_IMPORTED_MODULE_0__[/* isPromise */ "x"])(flagValue)) {
throw new Error(`Flag ${flagName} cannot be synchronously evaluated. ` +
`Please use getAsync() instead.`);
}
this.flags[flagName] = flagValue;
return this.flags[flagName];
}
getNumber(flagName) {
return this.get(flagName);
}
getBool(flagName) {
return this.get(flagName);
}
getFlags() {
return this.flags;
}
// For backwards compatibility.
get features() {
return this.flags;
}
set(flagName, value) {
if (this.flagRegistry[flagName] == null) {
throw new Error(`Cannot set flag ${flagName} as it has not been registered.`);
}
this.flags[flagName] = value;
if (this.flagRegistry[flagName].setHook != null) {
this.flagRegistry[flagName].setHook(value);
}
}
evaluateFlag(flagName) {
if (this.flagRegistry[flagName] == null) {
throw new Error(`Cannot evaluate flag '${flagName}': no evaluation function found.`);
}
return this.flagRegistry[flagName].evaluationFn();
}
setFlags(flags) {
this.flags = Object.assign({}, flags);
}
reset() {
this.flags = {};
this.urlFlags = {};
this.populateURLFlags();
}
populateURLFlags() {
if (typeof this.global === 'undefined' ||
typeof this.global.location === 'undefined' ||
typeof this.global.location.search === 'undefined') {
return;
}
const urlParams = this.getQueryParams(this.global.location.search);
if (TENSORFLOWJS_FLAGS_PREFIX in urlParams) {
const keyValues = urlParams[TENSORFLOWJS_FLAGS_PREFIX].split(',');
keyValues.forEach(keyValue => {
const [key, value] = keyValue.split(':');
this.urlFlags[key] = parseValue(key, value);
});
}
}
}
function getQueryParams(queryString) {
const params = {};
queryString.replace(/[?&]([^=?&]+)(?:=([^&]*))?/g, (s, ...t) => {
decodeParam(params, t[0], t[1]);
return t.join('=');
});
return params;
}
function decodeParam(params, name, value) {
params[decodeURIComponent(name)] = decodeURIComponent(value || '');
}
function parseValue(flagName, value) {
value = value.toLowerCase();
if (value === 'true' || value === 'false') {
return value === 'true';
}
else if (`${+value}` === value) {
return +value;
}
throw new Error(`Could not parse value flag value ${value} for flag ${flagName}.`);
}
/**
* Returns the current environment (a global singleton).
*
* The environment object contains the evaluated feature values as well as the
* active platform.
*
* @doc {heading: 'Environment'}
*/
function env() {
return ENV;
}
let ENV = null;
function setEnvironmentGlobal(environment) {
ENV = environment;
}
//# sourceMappingURL=environment.js.map
/***/ }),
/* 23 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
__webpack_require__.r(__webpack_exports__);
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "makeTypesMatch", function() { return makeTypesMatch; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "assertTypesMatch", function() { return assertTypesMatch; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "isTensorInList", function() { return isTensorInList; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "getTensorsInContainer", function() { return getTensorsInContainer; });
/* harmony import */ var _tensor__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(6);
/* harmony import */ var _types__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(89);
/* harmony import */ var _util__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(8);
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
function makeTypesMatch(a, b) {
if (a.dtype === b.dtype) {
return [a, b];
}
const dtype = Object(_types__WEBPACK_IMPORTED_MODULE_1__[/* upcastType */ "c"])(a.dtype, b.dtype);
return [a.cast(dtype), b.cast(dtype)];
}
function assertTypesMatch(a, b) {
Object(_util__WEBPACK_IMPORTED_MODULE_2__[/* assert */ "b"])(a.dtype === b.dtype, () => `The dtypes of the first(${a.dtype}) and` +
` second(${b.dtype}) input must match`);
}
function isTensorInList(tensor, tensorList) {
return tensorList.some(x => x.id === tensor.id);
}
/**
* Extracts any `Tensor`s found within the provided object.
*
* @param container an object that may be a `Tensor` or may directly contain
* `Tensor`s, such as a `Tensor[]` or `{key: Tensor, ...}`. In general it
* is safe to pass any object here, except that `Promise`s are not
* supported.
* @returns An array of `Tensors` found within the passed object. If the
* argument is simply a `Tensor', a list containing that `Tensor` is
* returned. If the object is not a `Tensor` or does not
* contain `Tensors`, an empty list is returned.
*/
function getTensorsInContainer(result) {
const list = [];
const seen = new Set();
walkTensorContainer(result, list, seen);
return list;
}
function walkTensorContainer(container, list, seen) {
if (container == null) {
return;
}
if (container instanceof _tensor__WEBPACK_IMPORTED_MODULE_0__[/* Tensor */ "a"]) {
list.push(container);
return;
}
if (!isIterable(container)) {
return;
}
// Iteration over keys works also for arrays.
const iterable = container;
for (const k in iterable) {
const val = iterable[k];
if (!seen.has(val)) {
seen.add(val);
walkTensorContainer(val, list, seen);
}
}
}
// tslint:disable-next-line:no-any
function isIterable(obj) {
return Array.isArray(obj) || typeof obj === 'object';
}
//# sourceMappingURL=tensor_util.js.map
/***/ }),
/* 24 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return matMul; });
/* harmony import */ var _engine__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(5);
/* harmony import */ var _kernel_names__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(3);
/* harmony import */ var _tensor_util__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(23);
/* harmony import */ var _tensor_util_env__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(2);
/* harmony import */ var _operation__WEBPACK_IMPORTED_MODULE_4__ = __webpack_require__(4);
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes the dot product of two matrices, A * B. These must be matrices.
*
* ```js
* const a = tf.tensor2d([1, 2], [1, 2]);
* const b = tf.tensor2d([1, 2, 3, 4], [2, 2]);
*
* a.matMul(b).print(); // or tf.matMul(a, b)
* ```
* @param a First matrix in dot product operation.
* @param b Second matrix in dot product operation.
* @param transposeA If true, `a` is transposed before multiplication.
* @param transposeB If true, `b` is transposed before multiplication.
*
* @doc {heading: 'Operations', subheading: 'Matrices'}
*/
function matMul_(a, b, transposeA = false, transposeB = false) {
let $a = Object(_tensor_util_env__WEBPACK_IMPORTED_MODULE_3__[/* convertToTensor */ "a"])(a, 'a', 'matMul');
let $b = Object(_tensor_util_env__WEBPACK_IMPORTED_MODULE_3__[/* convertToTensor */ "a"])(b, 'b', 'matMul');
[$a, $b] = Object(_tensor_util__WEBPACK_IMPORTED_MODULE_2__["makeTypesMatch"])($a, $b);
const inputs = { a: $a, b: $b };
const attrs = { transposeA, transposeB };
return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].runKernel(_kernel_names__WEBPACK_IMPORTED_MODULE_1__[/* BatchMatMul */ "s"], inputs, attrs);
}
const matMul = Object(_operation__WEBPACK_IMPORTED_MODULE_4__[/* op */ "b"])({ matMul_ });
//# sourceMappingURL=mat_mul.js.map
/***/ }),
/* 25 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return square; });
/* harmony import */ var _engine__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(5);
/* harmony import */ var _tensor_util_env__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(2);
/* harmony import */ var _operation__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(4);
/**
* @license
* Copyright 2019 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes square of `x` element-wise: `x ^ 2`
*
* ```js
* const x = tf.tensor1d([1, 2, Math.sqrt(2), -1]);
*
* x.square().print(); // or tf.square(x)
* ```
* @param x The input Tensor.
*
* @doc {heading: 'Operations', subheading: 'Basic math'}
*/
function square_(x) {
const $x = Object(_tensor_util_env__WEBPACK_IMPORTED_MODULE_1__[/* convertToTensor */ "a"])(x, 'x', 'square');
const attrs = {};
return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].runKernel('Square', { x: $x }, attrs);
}
const square = Object(_operation__WEBPACK_IMPORTED_MODULE_2__[/* op */ "b"])({ square_ });
//# sourceMappingURL=square.js.map
/***/ }),
/* 26 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "g", function() { return enableProdMode; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "f", function() { return enableDebugMode; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "c", function() { return disableDeprecationWarnings; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "b", function() { return deprecationWarn; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "e", function() { return disposeVariables; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "h", function() { return engine; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "m", function() { return memory; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "n", function() { return profile; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "t", function() { return tidy; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "d", function() { return dispose; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "l", function() { return keep; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "u", function() { return time; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "r", function() { return setBackend; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "o", function() { return ready; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "k", function() { return getBackend; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "q", function() { return removeBackend; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "i", function() { return findBackend; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "j", function() { return findBackendFactory; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "p", function() { return registerBackend; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return backend; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "s", function() { return setPlatform; });
/* harmony import */ var _engine__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(5);
/* harmony import */ var _environment__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(22);
/* harmony import */ var _tensor__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(6);
/* harmony import */ var _tensor_util__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(23);
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Enables production mode which disables correctness checks in favor of
* performance.
*
* @doc {heading: 'Environment'}
*/
function enableProdMode() {
Object(_environment__WEBPACK_IMPORTED_MODULE_1__[/* env */ "c"])().set('PROD', true);
}
/**
* Enables debug mode which will log information about all executed kernels:
* the elapsed time of the kernel execution, as well as the rank, shape, and
* size of the output tensor.
*
* Debug mode will significantly slow down your application as it will
* download the result of every operation to the CPU. This should not be used in
* production. Debug mode does not affect the timing information of the kernel
* execution as we do not measure download time in the kernel execution time.
*
* See also: `tf.profile`, `tf.memory`.
*
* @doc {heading: 'Environment'}
*/
function enableDebugMode() {
Object(_environment__WEBPACK_IMPORTED_MODULE_1__[/* env */ "c"])().set('DEBUG', true);
}
/** Globally disables deprecation warnings */
function disableDeprecationWarnings() {
Object(_environment__WEBPACK_IMPORTED_MODULE_1__[/* env */ "c"])().set('DEPRECATION_WARNINGS_ENABLED', false);
console.warn(`TensorFlow.js deprecation warnings have been disabled.`);
}
/** Warn users about deprecated functionality. */
function deprecationWarn(msg) {
if (Object(_environment__WEBPACK_IMPORTED_MODULE_1__[/* env */ "c"])().getBool('DEPRECATION_WARNINGS_ENABLED')) {
console.warn(msg + ' You can disable deprecation warnings with ' +
'tf.disableDeprecationWarnings().');
}
}
Object(_tensor__WEBPACK_IMPORTED_MODULE_2__[/* setDeprecationWarningFn */ "e"])(deprecationWarn);
/**
* Dispose all variables kept in backend engine.
*
* @doc {heading: 'Environment'}
*/
function disposeVariables() {
_engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].disposeVariables();
}
/**
* It returns the global engine that keeps track of all tensors and backends.
*
* @doc {heading: 'Environment'}
*/
function engine() {
return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"];
}
/**
* Returns memory info at the current time in the program. The result is an
* object with the following properties:
*
* - `numBytes`: Number of bytes allocated (undisposed) at this time.
* - `numTensors`: Number of unique tensors allocated.
* - `numDataBuffers`: Number of unique data buffers allocated
* (undisposed) at this time, which is ≤ the number of tensors
* (e.g. `a.reshape(newShape)` makes a new Tensor that shares the same
* data buffer with `a`).
* - `unreliable`: True if the memory usage is unreliable. See `reasons` when
* `unreliable` is true.
* - `reasons`: `string[]`, reasons why the memory is unreliable, present if
* `unreliable` is true.
*
* WebGL Properties:
* - `numBytesInGPU`: Number of bytes allocated (undisposed) in the GPU only at
* this time.
*
* @doc {heading: 'Performance', subheading: 'Memory'}
*/
function memory() {
return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].memory();
}
/**
* Executes the provided function `f()` and returns a promise that resolves
* with information about the function's memory use:
* - `newBytes`: the number of new bytes allocated
* - `newTensors`: the number of new tensors created
* - `peakBytes`: the peak number of bytes allocated
* - `kernels`: an array of objects for each kernel involved that reports
* their input and output shapes, number of bytes used, and number of new
* tensors created.
* - `kernelNames`: an array of unique strings with just the names of the
* kernels in the `kernels` array.
*
* ```js
* const profile = await tf.profile(() => {
* const x = tf.tensor1d([1, 2, 3]);
* let x2 = x.square();
* x2.dispose();
* x2 = x.square();
* x2.dispose();
* return x;
* });
*
* console.log(`newBytes: ${profile.newBytes}`);
* console.log(`newTensors: ${profile.newTensors}`);
* console.log(`byte usage over all kernels: ${profile.kernels.map(k =>
* k.totalBytesSnapshot)}`);
* ```
*
*
* @doc {heading: 'Performance', subheading: 'Profile'}
*/
function profile(f) {
return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].profile(f);
}
/**
* Executes the provided function `fn` and after it is executed, cleans up all
* intermediate tensors allocated by `fn` except those returned by `fn`.
* `fn` must not return a Promise (async functions not allowed). The returned
* result can be a complex object.
*
* Using this method helps avoid memory leaks. In general, wrap calls to
* operations in `tf.tidy` for automatic memory cleanup.
*
* NOTE: Variables do *not* get cleaned up when inside a tidy(). If you want to
* dispose variables, please use `tf.disposeVariables` or call dispose()
* directly on variables.
*
* ```js
* // y = 2 ^ 2 + 1
* const y = tf.tidy(() => {
* // a, b, and one will be cleaned up when the tidy ends.
* const one = tf.scalar(1);
* const a = tf.scalar(2);
* const b = a.square();
*
* console.log('numTensors (in tidy): ' + tf.memory().numTensors);
*
* // The value returned inside the tidy function will return
* // through the tidy, in this case to the variable y.
* return b.add(one);
* });
*
* console.log('numTensors (outside tidy): ' + tf.memory().numTensors);
* y.print();
* ```
*
* @param nameOrFn The name of the closure, or the function to execute.
* If a name is provided, the 2nd argument should be the function.
* If debug mode is on, the timing and the memory usage of the function
* will be tracked and displayed on the console using the provided name.
* @param fn The function to execute.
*
* @doc {heading: 'Performance', subheading: 'Memory'}
*/
function tidy(nameOrFn, fn) {
return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].tidy(nameOrFn, fn);
}
/**
* Disposes any `tf.Tensor`s found within the provided object.
*
* @param container an object that may be a `tf.Tensor` or may directly
* contain `tf.Tensor`s, such as a `Tensor[]` or `{key: Tensor, ...}`. If
* the object is not a `tf.Tensor` or does not contain `Tensors`, nothing
* happens. In general it is safe to pass any object here, except that
* `Promise`s are not supported.
*
* @doc {heading: 'Performance', subheading: 'Memory'}
*/
function dispose(container) {
const tensors = Object(_tensor_util__WEBPACK_IMPORTED_MODULE_3__["getTensorsInContainer"])(container);
tensors.forEach(tensor => tensor.dispose());
}
/**
* Keeps a `tf.Tensor` generated inside a `tf.tidy` from being disposed
* automatically.
*
* ```js
* let b;
* const y = tf.tidy(() => {
* const one = tf.scalar(1);
* const a = tf.scalar(2);
*
* // b will not be cleaned up by the tidy. a and one will be cleaned up
* // when the tidy ends.
* b = tf.keep(a.square());
*
* console.log('numTensors (in tidy): ' + tf.memory().numTensors);
*
* // The value returned inside the tidy function will return
* // through the tidy, in this case to the variable y.
* return b.add(one);
* });
*
* console.log('numTensors (outside tidy): ' + tf.memory().numTensors);
* console.log('y:');
* y.print();
* console.log('b:');
* b.print();
* ```
*
* @param result The tensor to keep from being disposed.
*
* @doc {heading: 'Performance', subheading: 'Memory'}
*/
function keep(result) {
return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].keep(result);
}
/**
* Executes `f()` and returns a promise that resolves with timing
* information.
*
* The result is an object with the following properties:
*
* - `wallMs`: Wall execution time.
* - `kernelMs`: Kernel execution time, ignoring data transfer. If using the
* WebGL backend and the query timer extension is not available, this will
* return an error object.
* - On `WebGL` The following additional properties exist:
* - `uploadWaitMs`: CPU blocking time on texture uploads.
* - `downloadWaitMs`: CPU blocking time on texture downloads (readPixels).
*
* ```js
* const x = tf.randomNormal([20, 20]);
* const time = await tf.time(() => x.matMul(x));
*
* console.log(`kernelMs: ${time.kernelMs}, wallTimeMs: ${time.wallMs}`);
* ```
*
* @param f The function to execute and time.
*
* @doc {heading: 'Performance', subheading: 'Timing'}
*/
function time(f) {
return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].time(f);
}
/**
* Sets the backend (cpu, webgl, wasm, etc) responsible for creating tensors and
* executing operations on those tensors. Returns a promise that resolves
* to a boolean if the backend initialization was successful.
*
* Note this disposes the current backend, if any, as well as any tensors
* associated with it. A new backend is initialized, even if it is of the
* same type as the previous one.
*
* @param backendName The name of the backend. Currently supports
* `'webgl'|'cpu'` in the browser, `'tensorflow'` under node.js
* (requires tfjs-node), and `'wasm'` (requires tfjs-backend-wasm).
*
* @doc {heading: 'Backends'}
*/
function setBackend(backendName) {
return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].setBackend(backendName);
}
/**
* Returns a promise that resolves when the currently selected backend (or the
* highest priority one) has initialized. Await this promise when you are using
* a backend that has async initialization.
*
* @doc {heading: 'Backends'}
*/
function ready() {
return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].ready();
}
/**
* Returns the current backend name (cpu, webgl, etc). The backend is
* responsible for creating tensors and executing operations on those tensors.
*
* @doc {heading: 'Backends'}
*/
function getBackend() {
return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].backendName;
}
/**
* Removes a backend and the registered factory.
*
* @doc {heading: 'Backends'}
*/
function removeBackend(name) {
_engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].removeBackend(name);
}
/**
* Finds the backend registered under the provided name. Returns null if the
* name is not in the registry, or the registration hasn't finished yet.
*/
function findBackend(name) {
return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].findBackend(name);
}
/**
* Finds the backend factory registered under the provided name. Returns a
* function that produces a new backend when called. Returns null if the name
* is not in the registry.
*/
function findBackendFactory(name) {
return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].findBackendFactory(name);
}
/**
* Registers a global backend. The registration should happen when importing
* a module file (e.g. when importing `backend_webgl.ts`), and is used for
* modular builds (e.g. custom tfjs bundle with only webgl support).
*
* @param factory The backend factory function. When called, it should
* return a backend instance, or a promise of an instance.
* @param priority The priority of the backend (higher = more important).
* In case multiple backends are registered, the priority is used to find
* the best backend. Defaults to 1.
* @return False if there is already a registered backend under this name, true
* if not.
*
* @doc {heading: 'Backends'}
*/
function registerBackend(name, factory, priority = 1) {
return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].registerBackend(name, factory, priority);
}
/**
* Gets the current backend. If no backends have been initialized, this will
* attempt to initialize the best backend. Will throw an error if the highest
* priority backend has async initialization, in which case, you should call
* 'await tf.ready()' before running other code.
*
* @doc {heading: 'Backends'}
*/
function backend() {
return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].backend;
}
/**
* Sets the global platform.
*
* @param platformName The name of this platform.
* @param platform A platform implementation.
*/
function setPlatform(platformName, platform) {
Object(_environment__WEBPACK_IMPORTED_MODULE_1__[/* env */ "c"])().setPlatform(platformName, platform);
}
//# sourceMappingURL=globals.js.map
/***/ }),
/* 27 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return createSimpleBinaryKernelImpl; });
/* harmony import */ var _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(0);
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Template that creates implementation for binary ops. Supports broadcast.
*/
function createSimpleBinaryKernelImpl(op) {
return (aShape, bShape, aVals, bVals, dtype) => {
const newShape = _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["backend_util"].assertAndGetBroadcastShape(aShape, bShape);
const resultRank = newShape.length;
const resultStrides = _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["util"].computeStrides(newShape);
const resultSize = _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["util"].sizeFromShape(newShape);
const result = _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["util"].getTypedArrayFromDType(dtype, resultSize);
const aRank = aShape.length;
const bRank = bShape.length;
const aStrides = _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["util"].computeStrides(aShape);
const bStrides = _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["util"].computeStrides(bShape);
const aBroadcastDims = _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["backend_util"].getBroadcastDims(aShape, newShape);
const bBroadcastDims = _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["backend_util"].getBroadcastDims(bShape, newShape);
if (aBroadcastDims.length + bBroadcastDims.length === 0) {
for (let i = 0; i < result.length; ++i) {
result[i] = op(aVals[i % aVals.length], bVals[i % bVals.length]);
}
}
else {
for (let i = 0; i < result.length; ++i) {
const loc = _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["util"].indexToLoc(i, resultRank, resultStrides);
const aLoc = loc.slice(-aRank);
aBroadcastDims.forEach(d => aLoc[d] = 0);
const aIndex = _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["util"].locToIndex(aLoc, aRank, aStrides);
const bLoc = loc.slice(-bRank);
bBroadcastDims.forEach(d => bLoc[d] = 0);
const bIndex = _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["util"].locToIndex(bLoc, bRank, bStrides);
result[i] = op(aVals[aIndex], bVals[bIndex]);
}
}
return [result, newShape];
};
}
//# sourceMappingURL=binary_impl.js.map
/***/ }),
/* 28 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return slice; });
/* harmony import */ var _engine__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(5);
/* harmony import */ var _kernel_names__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(3);
/* harmony import */ var _tensor_util_env__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(2);
/* harmony import */ var _operation__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(4);
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Extracts a slice from a `tf.Tensor` starting at coordinates `begin`
* and is of size `size`.
*
* Also available are stricter rank-specific methods with the same signature
* as this method that assert that `x` is of the given rank:
* - `tf.slice1d`
* - `tf.slice2d`
* - `tf.slice3d`
* - `tf.slice4d`
*
* ```js
* const x = tf.tensor1d([1, 2, 3, 4]);
*
* x.slice([1], [2]).print();
* ```
*
* ```js
* const x = tf.tensor2d([1, 2, 3, 4], [2, 2]);
*
* x.slice([1, 0], [1, 2]).print();
* ```
* @param x The input `tf.Tensor` to slice from.
* @param begin The coordinates to start the slice from. The length can be
* less than the rank of x - the rest of the axes will have implicit 0 as
* start. Can also be a single number, in which case it specifies the
* first axis.
* @param size The size of the slice. The length can be less than the rank of
* x - the rest of the axes will have implicit -1. A value of -1 requests
* the rest of the dimensions in the axis. Can also be a single number,
* in which case it specifies the size of the first axis.
*
* @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}
*/
function slice_(x, begin, size) {
const $x = Object(_tensor_util_env__WEBPACK_IMPORTED_MODULE_2__[/* convertToTensor */ "a"])(x, 'x', 'slice', 'string_or_numeric');
if ($x.rank === 0) {
throw new Error('Slicing scalar is not possible');
}
const inputs = { x: $x };
const attrs = { begin, size };
return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].runKernel(_kernel_names__WEBPACK_IMPORTED_MODULE_1__[/* Slice */ "Fc"], inputs, attrs);
}
const slice = Object(_operation__WEBPACK_IMPORTED_MODULE_3__[/* op */ "b"])({ slice_ });
//# sourceMappingURL=slice.js.map
/***/ }),
/* 29 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return binaryKernelFunc; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "b", function() { return createComplexBinaryKernelImpl; });
/* harmony import */ var _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(0);
/* harmony import */ var _cpu_util__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(11);
/* harmony import */ var _kernels_Cast__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(78);
/* harmony import */ var _kernels_Complex__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(38);
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Template that creates a `KernelFunc` for binary ops.
* @param name Kernel name.
* @param binaryKernelImpl A `SimpleBinaryKernelImpl` for the kernel.
* @param binaryKernelComplexImpl Optional. If exists, represents a
* `ComplexBinaryKernelImpl` for the kernel, will be used when input dtype
* is `complex64`.
* @param dtype Optional. If set, the result has this dtype. Otherwise, the
* result has the same dtype as the first input. This is mainly used in
* comparison kernels, such as Equal, Less, Greater, etc.
*/
function binaryKernelFunc(name, simpleImpl, complexImpl, dtype) {
if (complexImpl == null) {
return ({ inputs, backend }) => {
const { a, b } = inputs;
const cpuBackend = backend;
Object(_cpu_util__WEBPACK_IMPORTED_MODULE_1__[/* assertNotComplex */ "a"])([a, b], name);
const aVals = cpuBackend.data.get(a.dataId).values;
const bVals = cpuBackend.data.get(b.dataId).values;
const $dtype = dtype || a.dtype;
const [resultData, resultShape] = simpleImpl(a.shape, b.shape, aVals, bVals, $dtype);
return cpuBackend.makeTensorInfo(resultShape, $dtype, resultData);
};
}
return ({ inputs, backend }) => {
const { a, b } = inputs;
const cpuBackend = backend;
if (a.dtype === 'complex64' || b.dtype === 'complex64') {
const $aComplex = Object(_kernels_Cast__WEBPACK_IMPORTED_MODULE_2__[/* cast */ "a"])({ inputs: { x: a }, backend: cpuBackend, attrs: { dtype: 'complex64' } });
const $aComplexVals = cpuBackend.data.get($aComplex.dataId);
const aReal = $aComplexVals.complexTensorInfos.real;
const aImag = $aComplexVals.complexTensorInfos.imag;
const aRealVals = cpuBackend.data.get(aReal.dataId).values;
const aImagVals = cpuBackend.data.get(aImag.dataId).values;
const $bComplex = Object(_kernels_Cast__WEBPACK_IMPORTED_MODULE_2__[/* cast */ "a"])({ inputs: { x: b }, backend: cpuBackend, attrs: { dtype: 'complex64' } });
const $bComplexVals = cpuBackend.data.get($bComplex.dataId);
const bReal = $bComplexVals.complexTensorInfos.real;
const bImag = $bComplexVals.complexTensorInfos.imag;
const bRealVals = cpuBackend.data.get(bReal.dataId).values;
const bImagVals = cpuBackend.data.get(bImag.dataId).values;
const [resultRealData, resultImagData, resultShape] = complexImpl(a.shape, b.shape, aRealVals, aImagVals, bRealVals, bImagVals);
const resultReal = cpuBackend.makeTensorInfo(resultShape, 'float32', resultRealData);
const resultImag = cpuBackend.makeTensorInfo(resultShape, 'float32', resultImagData);
const result = Object(_kernels_Complex__WEBPACK_IMPORTED_MODULE_3__[/* complex */ "a"])({ inputs: { real: resultReal, imag: resultImag }, backend: cpuBackend });
cpuBackend.disposeIntermediateTensorInfo($aComplex);
cpuBackend.disposeIntermediateTensorInfo($bComplex);
cpuBackend.disposeIntermediateTensorInfo(resultReal);
cpuBackend.disposeIntermediateTensorInfo(resultImag);
return result;
}
else {
const aVals = cpuBackend.data.get(a.dataId).values;
const bVals = cpuBackend.data.get(b.dataId).values;
const $dtype = dtype || a.dtype;
const [resultData, resultShape] = simpleImpl(a.shape, b.shape, aVals, bVals, $dtype);
return cpuBackend.makeTensorInfo(resultShape, $dtype, resultData);
}
};
}
/**
* Template that creates the complex type implementation for binary ops.
* Supports broadcast.
*/
function createComplexBinaryKernelImpl(op) {
return (aShape, bShape, aRealVals, aImagVals, bRealVals, bImagVals) => {
const resultShape = _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["backend_util"].assertAndGetBroadcastShape(aShape, bShape);
const resultSize = _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["util"].sizeFromShape(resultShape);
const resultRank = resultShape.length;
const resultStrides = _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["util"].computeStrides(resultShape);
const resultRealVals = _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["util"].getTypedArrayFromDType('float32', resultSize);
const resultImagVals = _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["util"].getTypedArrayFromDType('float32', resultSize);
const aBroadcastDims = _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["backend_util"].getBroadcastDims(aShape, resultShape);
const bBroadcastDims = _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["backend_util"].getBroadcastDims(bShape, resultShape);
const aVals = _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["backend_util"].mergeRealAndImagArrays(aRealVals, aImagVals);
const bVals = _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["backend_util"].mergeRealAndImagArrays(bRealVals, bImagVals);
const aRank = aShape.length;
const aStrides = _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["util"].computeStrides(aShape);
const bRank = bShape.length;
const bStrides = _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["util"].computeStrides(bShape);
if (aBroadcastDims.length + bBroadcastDims.length === 0) {
for (let i = 0; i < resultRealVals.length; i++) {
const aIdx = i % aVals.length;
const bIdx = i % bVals.length;
const result = op(aVals[aIdx * 2], aVals[aIdx * 2 + 1], bVals[bIdx * 2], bVals[bIdx * 2 + 1]);
resultRealVals[i] = result.real;
resultImagVals[i] = result.imag;
}
}
else {
for (let i = 0; i < resultRealVals.length; i++) {
const loc = _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["util"].indexToLoc(i, resultRank, resultStrides);
const aLoc = loc.slice(-aRank);
aBroadcastDims.forEach(d => aLoc[d] = 0);
const aIndex = _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["util"].locToIndex(aLoc, aRank, aStrides);
const bLoc = loc.slice(-bRank);
bBroadcastDims.forEach(d => bLoc[d] = 0);
const bIndex = _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["util"].locToIndex(bLoc, bRank, bStrides);
const opResult = op(aVals[aIndex * 2], aVals[aIndex * 2 + 1], bVals[bIndex * 2], bVals[bIndex * 2 + 1]);
resultRealVals[i] = opResult.real;
resultImagVals[i] = opResult.imag;
}
}
return [resultRealVals, resultImagVals, resultShape];
};
}
//# sourceMappingURL=binary_utils.js.map
/***/ }),
/* 30 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return neg; });
/* harmony import */ var _engine__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(5);
/* harmony import */ var _kernel_names__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(3);
/* harmony import */ var _tensor_util_env__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(2);
/* harmony import */ var _operation__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(4);
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes `-1 * x` element-wise.
*
* ```js
* const x = tf.tensor2d([1, 2, -2, 0], [2, 2]);
*
* x.neg().print(); // or tf.neg(x)
* ```
*
* @param x The input tensor.
*
* @doc {heading: 'Operations', subheading: 'Basic math'}
*/
function neg_(x) {
const $x = Object(_tensor_util_env__WEBPACK_IMPORTED_MODULE_2__[/* convertToTensor */ "a"])(x, 'x', 'neg');
const inputs = { x: $x };
return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].runKernel(_kernel_names__WEBPACK_IMPORTED_MODULE_1__[/* Neg */ "Wb"], inputs);
}
const neg = Object(_operation__WEBPACK_IMPORTED_MODULE_3__[/* op */ "b"])({ neg_ });
//# sourceMappingURL=neg.js.map
/***/ }),
/* 31 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "d", function() { return computeDilation2DInfo; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "e", function() { return computePool2DInfo; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "f", function() { return computePool3DInfo; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return computeConv2DInfo; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "b", function() { return computeConv3DInfo; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "c", function() { return computeDefaultPad; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "i", function() { return tupleValuesAreOne; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "h", function() { return eitherStridesOrDilationsAreOne; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "g", function() { return convertConv2DDataFormat; });
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
*
* @param inputShape Input tensor shape is of the following dimensions:
* `[batch, height, width, inChannels]`.
* @param filterShape The filter shape is of the following dimensions:
* `[filterHeight, filterWidth, depth]`.
* @param strides The strides of the sliding window for each dimension of the
* input tensor: `[strideHeight, strideWidth]`.
* If `strides` is a single number,
* then `strideHeight == strideWidth`.
* @param pad The type of padding algorithm.
* - `same` and stride 1: output will be of same size as input,
* regardless of filter size.
* - `valid`: output will be smaller than input if filter is larger
* than 1*1x1.
* - For more info, see this guide:
* [https://www.tensorflow.org/api_guides/python/nn#Convolution](
* https://www.tensorflow.org/api_guides/python/nn#Convolution)
* @param dataFormat The data format of the input and output data.
* Defaults to 'NHWC'.
* @param dilations The dilation rates: `[dilationHeight, dilationWidth]`.
* Defaults to `[1, 1]`. If `dilations` is a single number, then
* `dilationHeight == dilationWidth`.
*/
function computeDilation2DInfo(inputShape, filterShape, strides, pad, dataFormat = 'NHWC', dilations) {
// `computerConv2DInfo` require filterShape to be in the dimension of:
// `[filterHeight, filterWidth, depth, outDepth]`, dilation2d doesn't have
// outDepth, it should have the same depth as the input.
// Input shape: [batch, height, width, inChannels]
const inputChannels = inputShape[3];
const $filterShape = [...filterShape, inputChannels];
const $dataFormat = convertConv2DDataFormat(dataFormat);
return computeConv2DInfo(inputShape, $filterShape, strides, dilations, pad, null /* roundingMode */, null /* depthWise */, $dataFormat);
}
function computePool2DInfo(inShape, filterSize, strides, dilations, pad, roundingMode, dataFormat = 'channelsLast') {
const [filterHeight, filterWidth] = parseTupleParam(filterSize);
let filterShape;
if (dataFormat === 'channelsLast') {
filterShape = [filterHeight, filterWidth, inShape[3], inShape[3]];
}
else if (dataFormat === 'channelsFirst') {
filterShape = [filterHeight, filterWidth, inShape[1], inShape[1]];
}
else {
throw new Error(`Unknown dataFormat ${dataFormat}`);
}
return computeConv2DInfo(inShape, filterShape, strides, dilations, pad, roundingMode, false, dataFormat);
}
/**
* Computes the information for a forward pass of a pooling3D operation.
*/
function computePool3DInfo(inShape, filterSize, strides, dilations, pad, roundingMode, dataFormat = 'NDHWC') {
const [filterDepth, filterHeight, filterWidth] = parse3TupleParam(filterSize);
let filterShape;
let $dataFormat;
if (dataFormat === 'NDHWC') {
$dataFormat = 'channelsLast';
filterShape =
[filterDepth, filterHeight, filterWidth, inShape[4], inShape[4]];
}
else if (dataFormat === 'NCDHW') {
$dataFormat = 'channelsFirst';
filterShape =
[filterDepth, filterHeight, filterWidth, inShape[1], inShape[1]];
}
else {
throw new Error(`Unknown dataFormat ${dataFormat}`);
}
return computeConv3DInfo(inShape, filterShape, strides, dilations, pad, false, $dataFormat, roundingMode);
}
/**
* Computes the information for a forward pass of a convolution/pooling
* operation.
*/
function computeConv2DInfo(inShape, filterShape, strides, dilations, pad, roundingMode, depthwise = false, dataFormat = 'channelsLast') {
let [batchSize, inHeight, inWidth, inChannels] = [-1, -1, -1, -1];
if (dataFormat === 'channelsLast') {
[batchSize, inHeight, inWidth, inChannels] = inShape;
}
else if (dataFormat === 'channelsFirst') {
[batchSize, inChannels, inHeight, inWidth] = inShape;
}
else {
throw new Error(`Unknown dataFormat ${dataFormat}`);
}
const [filterHeight, filterWidth, , filterChannels] = filterShape;
const [strideHeight, strideWidth] = parseTupleParam(strides);
const [dilationHeight, dilationWidth] = parseTupleParam(dilations);
const effectiveFilterHeight = getEffectiveFilterSize(filterHeight, dilationHeight);
const effectiveFilterWidth = getEffectiveFilterSize(filterWidth, dilationWidth);
const { padInfo, outHeight, outWidth } = getPadAndOutInfo(pad, inHeight, inWidth, strideHeight, strideWidth, effectiveFilterHeight, effectiveFilterWidth, roundingMode, dataFormat);
const outChannels = depthwise ? filterChannels * inChannels : filterChannels;
let outShape;
if (dataFormat === 'channelsFirst') {
outShape = [batchSize, outChannels, outHeight, outWidth];
}
else if (dataFormat === 'channelsLast') {
outShape = [batchSize, outHeight, outWidth, outChannels];
}
return {
batchSize,
dataFormat,
inHeight,
inWidth,
inChannels,
outHeight,
outWidth,
outChannels,
padInfo,
strideHeight,
strideWidth,
filterHeight,
filterWidth,
effectiveFilterHeight,
effectiveFilterWidth,
dilationHeight,
dilationWidth,
inShape,
outShape,
filterShape
};
}
/**
* Computes the information for a forward pass of a 3D convolution/pooling
* operation.
*/
function computeConv3DInfo(inShape, filterShape, strides, dilations, pad, depthwise = false, dataFormat = 'channelsLast', roundingMode) {
let [batchSize, inDepth, inHeight, inWidth, inChannels] = [-1, -1, -1, -1, -1];
if (dataFormat === 'channelsLast') {
[batchSize, inDepth, inHeight, inWidth, inChannels] = inShape;
}
else if (dataFormat === 'channelsFirst') {
[batchSize, inChannels, inDepth, inHeight, inWidth] = inShape;
}
else {
throw new Error(`Unknown dataFormat ${dataFormat}`);
}
const [filterDepth, filterHeight, filterWidth, , filterChannels] = filterShape;
const [strideDepth, strideHeight, strideWidth] = parse3TupleParam(strides);
const [dilationDepth, dilationHeight, dilationWidth] = parse3TupleParam(dilations);
const effectiveFilterDepth = getEffectiveFilterSize(filterDepth, dilationDepth);
const effectiveFilterHeight = getEffectiveFilterSize(filterHeight, dilationHeight);
const effectiveFilterWidth = getEffectiveFilterSize(filterWidth, dilationWidth);
const { padInfo, outDepth, outHeight, outWidth } = get3DPadAndOutInfo(pad, inDepth, inHeight, inWidth, strideDepth, strideHeight, strideWidth, effectiveFilterDepth, effectiveFilterHeight, effectiveFilterWidth, roundingMode);
const outChannels = depthwise ? filterChannels * inChannels : filterChannels;
let outShape;
if (dataFormat === 'channelsFirst') {
outShape = [batchSize, outChannels, outDepth, outHeight, outWidth];
}
else if (dataFormat === 'channelsLast') {
outShape = [batchSize, outDepth, outHeight, outWidth, outChannels];
}
return {
batchSize,
dataFormat,
inDepth,
inHeight,
inWidth,
inChannels,
outDepth,
outHeight,
outWidth,
outChannels,
padInfo,
strideDepth,
strideHeight,
strideWidth,
filterDepth,
filterHeight,
filterWidth,
effectiveFilterDepth,
effectiveFilterHeight,
effectiveFilterWidth,
dilationDepth,
dilationHeight,
dilationWidth,
inShape,
outShape,
filterShape
};
}
function computeOutputShape2D(inShape, fieldSize, stride, zeroPad, roundingMode) {
if (zeroPad == null) {
zeroPad = computeDefaultPad(inShape, fieldSize, stride);
}
const inputRows = inShape[0];
const inputCols = inShape[1];
const outputRows = round((inputRows - fieldSize + 2 * zeroPad) / stride + 1, roundingMode);
const outputCols = round((inputCols - fieldSize + 2 * zeroPad) / stride + 1, roundingMode);
return [outputRows, outputCols];
}
function computeOutputShape4D(inShape, fieldSize, outChannels, stride, zeroPad, roundingMode) {
if (zeroPad == null) {
zeroPad = computeDefaultPad(inShape, fieldSize, stride);
}
const inputDepth = inShape[0];
const inputRows = inShape[1];
const inputCols = inShape[2];
const outputDepths = round((inputDepth - fieldSize + 2 * zeroPad) / stride + 1, roundingMode);
const outputRows = round((inputRows - fieldSize + 2 * zeroPad) / stride + 1, roundingMode);
const outputCols = round((inputCols - fieldSize + 2 * zeroPad) / stride + 1, roundingMode);
return [outputDepths, outputRows, outputCols, outChannels];
}
function computeDefaultPad(inputShape, fieldSize, stride, dilation = 1) {
const effectiveFieldSize = getEffectiveFilterSize(fieldSize, dilation);
return Math.floor((inputShape[0] * (stride - 1) - stride + effectiveFieldSize) / 2);
}
function parseTupleParam(param) {
if (typeof param === 'number') {
return [param, param, param];
}
if (param.length === 2) {
return [param[0], param[1], 1];
}
return param;
}
function parse3TupleParam(param) {
return typeof param === 'number' ? [param, param, param] : param;
}
/* See https://www.tensorflow.org/api_docs/python/tf/nn/atrous_conv2d
* Atrous convolution is equivalent to standard convolution with upsampled
* filters with effective_filter_height =
* filter_height + (filter_height - 1) * (dilation - 1)
* and effective_filter_width =
* filter_width + (filter_width - 1) * (dilation - 1),
* produced by inserting dilation - 1 zeros along consecutive elements across
* the filters' spatial dimensions.
* When there is a dilation, this converts a filter dimension to the
* effective filter dimension, so it can be used in a standard convolution.
*/
function getEffectiveFilterSize(filterSize, dilation) {
if (dilation <= 1) {
return filterSize;
}
return filterSize + (filterSize - 1) * (dilation - 1);
}
function getPadAndOutInfo(pad, inHeight, inWidth, strideHeight, strideWidth, filterHeight, filterWidth, roundingMode, dataFormat) {
let padInfo;
let outHeight;
let outWidth;
if (typeof pad === 'number') {
const padType = (pad === 0) ? 'VALID' : 'NUMBER';
padInfo = { top: pad, bottom: pad, left: pad, right: pad, type: padType };
const outShape = computeOutputShape2D([inHeight, inWidth], filterHeight, strideHeight, pad, roundingMode);
outHeight = outShape[0];
outWidth = outShape[1];
}
else if (pad === 'same') {
outHeight = Math.ceil(inHeight / strideHeight);
outWidth = Math.ceil(inWidth / strideWidth);
const padAlongHeight = Math.max(0, (outHeight - 1) * strideHeight + filterHeight - inHeight);
const padAlongWidth = Math.max(0, (outWidth - 1) * strideWidth + filterWidth - inWidth);
const top = Math.floor(padAlongHeight / 2);
const bottom = padAlongHeight - top;
const left = Math.floor(padAlongWidth / 2);
const right = padAlongWidth - left;
padInfo = { top, bottom, left, right, type: 'SAME' };
}
else if (pad === 'valid') {
padInfo = { top: 0, bottom: 0, left: 0, right: 0, type: 'VALID' };
outHeight = Math.ceil((inHeight - filterHeight + 1) / strideHeight);
outWidth = Math.ceil((inWidth - filterWidth + 1) / strideWidth);
}
else if (typeof pad === 'object') {
const top = dataFormat === 'channelsLast' ? pad[1][0] : pad[2][0];
const bottom = dataFormat === 'channelsLast' ? pad[1][1] : pad[2][1];
const left = dataFormat === 'channelsLast' ? pad[2][0] : pad[3][0];
const right = dataFormat === 'channelsLast' ? pad[2][1] : pad[3][1];
const padType = (top === 0 && bottom === 0 && left === 0 && right === 0) ?
'VALID' :
'EXPLICIT';
padInfo = { top, bottom, left, right, type: padType };
outHeight = round((inHeight - filterHeight + top + bottom) / strideHeight + 1, roundingMode);
outWidth = round((inWidth - filterWidth + left + right) / strideWidth + 1, roundingMode);
}
else {
throw Error(`Unknown padding parameter: ${pad}`);
}
return { padInfo, outHeight, outWidth };
}
function get3DPadAndOutInfo(pad, inDepth, inHeight, inWidth, strideDepth, strideHeight, strideWidth, filterDepth, filterHeight, filterWidth, roundingMode) {
let padInfo;
let outDepth;
let outHeight;
let outWidth;
if (typeof pad === 'number') {
const padType = (pad === 0) ? 'VALID' : 'NUMBER';
padInfo = {
top: pad,
bottom: pad,
left: pad,
right: pad,
front: pad,
back: pad,
type: padType
};
const outShape = computeOutputShape4D([inDepth, inHeight, inWidth, 1], filterDepth, 1, strideDepth, pad, roundingMode);
outDepth = outShape[0];
outHeight = outShape[1];
outWidth = outShape[2];
}
else if (pad === 'same') {
outDepth = Math.ceil(inDepth / strideDepth);
outHeight = Math.ceil(inHeight / strideHeight);
outWidth = Math.ceil(inWidth / strideWidth);
const padAlongDepth = (outDepth - 1) * strideDepth + filterDepth - inDepth;
const padAlongHeight = (outHeight - 1) * strideHeight + filterHeight - inHeight;
const padAlongWidth = (outWidth - 1) * strideWidth + filterWidth - inWidth;
const front = Math.floor(padAlongDepth / 2);
const back = padAlongDepth - front;
const top = Math.floor(padAlongHeight / 2);
const bottom = padAlongHeight - top;
const left = Math.floor(padAlongWidth / 2);
const right = padAlongWidth - left;
padInfo = { top, bottom, left, right, front, back, type: 'SAME' };
}
else if (pad === 'valid') {
padInfo = {
top: 0,
bottom: 0,
left: 0,
right: 0,
front: 0,
back: 0,
type: 'VALID'
};
outDepth = Math.ceil((inDepth - filterDepth + 1) / strideDepth);
outHeight = Math.ceil((inHeight - filterHeight + 1) / strideHeight);
outWidth = Math.ceil((inWidth - filterWidth + 1) / strideWidth);
}
else {
throw Error(`Unknown padding parameter: ${pad}`);
}
return { padInfo, outDepth, outHeight, outWidth };
}
/**
* Rounds a value depending on the rounding mode
* @param value
* @param roundingMode A string from: 'ceil', 'round', 'floor'. If none is
* provided, it will default to truncate.
*/
function round(value, roundingMode) {
if (!roundingMode) {
return Math.trunc(value);
}
switch (roundingMode) {
case 'round':
// used for Caffe Conv
return Math.round(value);
case 'ceil':
// used for Caffe Pool
return Math.ceil(value);
case 'floor':
return Math.floor(value);
default:
throw new Error(`Unknown roundingMode ${roundingMode}`);
}
}
function tupleValuesAreOne(param) {
const [dimA, dimB, dimC] = parseTupleParam(param);
return dimA === 1 && dimB === 1 && dimC === 1;
}
function eitherStridesOrDilationsAreOne(strides, dilations) {
return tupleValuesAreOne(strides) || tupleValuesAreOne(dilations);
}
/**
* Convert Conv2D dataFormat from 'NHWC'|'NCHW' to
* 'channelsLast'|'channelsFirst'
* @param dataFormat in 'NHWC'|'NCHW' mode
* @return dataFormat in 'channelsLast'|'channelsFirst' mode
* @throws unknown dataFormat
*/
function convertConv2DDataFormat(dataFormat) {
if (dataFormat === 'NHWC') {
return 'channelsLast';
}
else if (dataFormat === 'NCHW') {
return 'channelsFirst';
}
else {
throw new Error(`Unknown dataFormat ${dataFormat}`);
}
}
//# sourceMappingURL=conv_util.js.map
/***/ }),
/* 32 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return concat; });
/* harmony import */ var _engine__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(5);
/* harmony import */ var _kernel_names__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(3);
/* harmony import */ var _tensor_util_env__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(2);
/* harmony import */ var _util__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(8);
/* harmony import */ var _clone__WEBPACK_IMPORTED_MODULE_4__ = __webpack_require__(70);
/* harmony import */ var _operation__WEBPACK_IMPORTED_MODULE_5__ = __webpack_require__(4);
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Concatenates a list of `tf.Tensor`s along a given axis.
*
* The tensors ranks and types must match, and their sizes must match in all
* dimensions except `axis`.
*
* Also available are stricter rank-specific methods that assert that
* `tensors` are of the given rank:
* - `tf.concat1d`
* - `tf.concat2d`
* - `tf.concat3d`
* - `tf.concat4d`
*
* Except `tf.concat1d` (which does not have axis param), all methods have
* same signature as this method.
*
* ```js
* const a = tf.tensor1d([1, 2]);
* const b = tf.tensor1d([3, 4]);
* a.concat(b).print(); // or a.concat(b)
* ```
*
* ```js
* const a = tf.tensor1d([1, 2]);
* const b = tf.tensor1d([3, 4]);
* const c = tf.tensor1d([5, 6]);
* tf.concat([a, b, c]).print();
* ```
*
* ```js
* const a = tf.tensor2d([[1, 2], [10, 20]]);
* const b = tf.tensor2d([[3, 4], [30, 40]]);
* const axis = 1;
* tf.concat([a, b], axis).print();
* ```
* @param tensors A list of tensors to concatenate.
* @param axis The axis to concate along. Defaults to 0 (the first dim).
*
* @doc {heading: 'Tensors', subheading: 'Slicing and Joining'}
*/
function concat_(tensors, axis = 0) {
Object(_util__WEBPACK_IMPORTED_MODULE_3__[/* assert */ "b"])(tensors.length >= 1, () => 'Pass at least one tensor to concat');
const $tensors = Object(_tensor_util_env__WEBPACK_IMPORTED_MODULE_2__[/* convertToTensorArray */ "b"])(tensors, 'tensors', 'concat', 'string_or_numeric');
if ($tensors[0].dtype === 'complex64') {
$tensors.forEach(tensor => {
if (tensor.dtype !== 'complex64') {
throw new Error(`Cannot concatenate complex64 tensors with a tensor
with dtype ${tensor.dtype}. `);
}
});
}
if ($tensors.length === 1) {
return Object(_clone__WEBPACK_IMPORTED_MODULE_4__[/* clone */ "a"])($tensors[0]);
}
const inputs = $tensors;
const attr = { axis };
return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].runKernel(_kernel_names__WEBPACK_IMPORTED_MODULE_1__[/* Concat */ "B"], inputs, attr);
}
const concat = Object(_operation__WEBPACK_IMPORTED_MODULE_5__[/* op */ "b"])({ concat_ });
//# sourceMappingURL=concat.js.map
/***/ }),
/* 33 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return transpose; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "b", function() { return transposeConfig; });
/* harmony import */ var _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(0);
/* harmony import */ var _cpu_util__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(11);
/* harmony import */ var _Transpose_impl__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(137);
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
function transpose(args) {
const { inputs, attrs, backend } = args;
const { x } = inputs;
const { perm } = attrs;
Object(_cpu_util__WEBPACK_IMPORTED_MODULE_1__[/* assertNotComplex */ "a"])(x, 'transpose');
const xRank = x.shape.length;
const newShape = new Array(xRank);
for (let i = 0; i < newShape.length; i++) {
newShape[i] = x.shape[perm[i]];
}
const values = backend.data.get(x.dataId).values;
const result = Object(_Transpose_impl__WEBPACK_IMPORTED_MODULE_2__[/* transposeImpl */ "a"])(values, x.shape, x.dtype, perm, newShape);
const dataId = backend.write(result, newShape, x.dtype);
return { dataId, shape: newShape, dtype: x.dtype };
}
const transposeConfig = {
kernelName: _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["Transpose"],
backendName: 'cpu',
kernelFunc: transpose
};
//# sourceMappingURL=Transpose.js.map
/***/ }),
/* 34 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return customGrad; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "f", function() { return variableGrads; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "d", function() { return valueAndGrad; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "e", function() { return valueAndGrads; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "b", function() { return grad; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "c", function() { return grads; });
/* harmony import */ var _engine__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(5);
/* harmony import */ var _tensor__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(6);
/* harmony import */ var _tensor_util_env__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(2);
/* harmony import */ var _util__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(8);
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Provided `f(x)`, returns another function `g(x, dy?)`, which gives the
* gradient of `f(x)` with respect to `x`.
*
* If `dy` is provided, the gradient of `f(x).mul(dy).sum()` with respect to
* `x` is computed instead. `f(x)` must take a single tensor `x` and return a
* single tensor `y`. If `f()` takes multiple inputs, use `tf.grads` instead.
*
* ```js
* // f(x) = x ^ 2
* const f = x => x.square();
* // f'(x) = 2x
* const g = tf.grad(f);
*
* const x = tf.tensor1d([2, 3]);
* g(x).print();
* ```
*
* ```js
* // f(x) = x ^ 3
* const f = x => x.pow(tf.scalar(3, 'int32'));
* // f'(x) = 3x ^ 2
* const g = tf.grad(f);
* // f''(x) = 6x
* const gg = tf.grad(g);
*
* const x = tf.tensor1d([2, 3]);
* gg(x).print();
* ```
*
* @param f The function f(x), to compute gradient for.
*
* @doc {heading: 'Training', subheading: 'Gradients'}
*/
function grad(f) {
_util__WEBPACK_IMPORTED_MODULE_3__[/* assert */ "b"](_util__WEBPACK_IMPORTED_MODULE_3__[/* isFunction */ "u"](f), () => 'The f passed in grad(f) must be a function');
return (x, dy) => {
// x can be of any dtype, thus null as the last argument.
const $x = Object(_tensor_util_env__WEBPACK_IMPORTED_MODULE_2__[/* convertToTensor */ "a"])(x, 'x', 'tf.grad', 'string_or_numeric');
const $dy = (dy != null) ? Object(_tensor_util_env__WEBPACK_IMPORTED_MODULE_2__[/* convertToTensor */ "a"])(dy, 'dy', 'tf.grad') : null;
return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].tidy(() => {
const { value, grads } = _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].gradients(() => f($x), [$x], $dy);
if ($dy != null) {
_util__WEBPACK_IMPORTED_MODULE_3__[/* assertShapesMatch */ "e"](value.shape, $dy.shape, 'The shape of dy passed in grad(f)(x, dy) must match the shape ' +
'returned by f(x)');
}
checkGrads(grads);
return grads[0];
});
};
}
/**
* Provided `f(x1, x2,...)`, returns another function `g([x1, x2,...], dy?)`,
* which gives an array of gradients of `f()` with respect to each input
* [`x1`,`x2`,...].
*
* If `dy` is passed when calling `g()`, the gradient of
* `f(x1,...).mul(dy).sum()` with respect to each input is computed instead.
* The provided `f` must take one or more tensors and return a single tensor
* `y`. If `f()` takes a single input, we recommend using `tf.grad` instead.
*
* ```js
* // f(a, b) = a * b
* const f = (a, b) => a.mul(b);
* // df / da = b, df / db = a
* const g = tf.grads(f);
*
* const a = tf.tensor1d([2, 3]);
* const b = tf.tensor1d([-2, -3]);
* const [da, db] = g([a, b]);
* console.log('da');
* da.print();
* console.log('db');
* db.print();
* ```
*
* @param f The function `f(x1, x2,...)` to compute gradients for.
*
* @doc {heading: 'Training', subheading: 'Gradients'}
*/
function grads(f) {
_util__WEBPACK_IMPORTED_MODULE_3__[/* assert */ "b"](_util__WEBPACK_IMPORTED_MODULE_3__[/* isFunction */ "u"](f), () => 'The f passed in grads(f) must be a function');
return (args, dy) => {
_util__WEBPACK_IMPORTED_MODULE_3__[/* assert */ "b"](Array.isArray(args), () => 'The args passed in grads(f)(args) must be an array ' +
'of `Tensor`s or `TensorLike`s');
// args can be of any dtype, thus null as the last argument.
const $args = Object(_tensor_util_env__WEBPACK_IMPORTED_MODULE_2__[/* convertToTensorArray */ "b"])(args, 'args', 'tf.grads', 'string_or_numeric');
const $dy = (dy != null) ? Object(_tensor_util_env__WEBPACK_IMPORTED_MODULE_2__[/* convertToTensor */ "a"])(dy, 'dy', 'tf.grads') : null;
return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].tidy(() => {
const { value, grads } = _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].gradients(() => f(...$args), $args, $dy);
if ($dy != null) {
_util__WEBPACK_IMPORTED_MODULE_3__[/* assertShapesMatch */ "e"](value.shape, $dy.shape, 'The shape of dy passed in grads(f)([x1,...], dy) must ' +
'match the shape returned by f([x1,...])');
}
checkGrads(grads);
return grads;
});
};
}
/**
* Like `tf.grad`, but also returns the value of `f()`. Useful when `f()`
* returns a metric you want to show.
*
* The result is a rich object with the following properties:
* - grad: The gradient of `f(x)` w.r.t `x` (result of `tf.grad`).
* - value: The value returned by `f(x)`.
*
* ```js
* // f(x) = x ^ 2
* const f = x => x.square();
* // f'(x) = 2x
* const g = tf.valueAndGrad(f);
*
* const x = tf.tensor1d([2, 3]);
* const {value, grad} = g(x);
*
* console.log('value');
* value.print();
* console.log('grad');
* grad.print();
* ```
*
* @doc {heading: 'Training', subheading: 'Gradients'}
*/
function valueAndGrad(f) {
_util__WEBPACK_IMPORTED_MODULE_3__[/* assert */ "b"](_util__WEBPACK_IMPORTED_MODULE_3__[/* isFunction */ "u"](f), () => 'The f passed in valueAndGrad(f) must be a function');
return (x, dy) => {
_util__WEBPACK_IMPORTED_MODULE_3__[/* assert */ "b"](x instanceof _tensor__WEBPACK_IMPORTED_MODULE_1__[/* Tensor */ "a"], () => 'The x passed in valueAndGrad(f)(x) must be a tensor');
_util__WEBPACK_IMPORTED_MODULE_3__[/* assert */ "b"](dy == null || dy instanceof _tensor__WEBPACK_IMPORTED_MODULE_1__[/* Tensor */ "a"], () => 'The dy passed in valueAndGrad(f)(x, dy) must be a tensor');
const { grads, value } = _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].gradients(() => f(x), [x], dy);
checkGrads(grads);
return { grad: grads[0], value };
};
}
/**
* Like `tf.grads`, but returns also the value of `f()`. Useful when `f()`
* returns a metric you want to show.
*
* The result is a rich object with the following properties:
* - grads: The gradients of `f()` w.r.t each input (result of `tf.grads`).
* - value: The value returned by `f(x)`.
*
* ```js
* // f(a, b) = a * b
* const f = (a, b) => a.mul(b);
* // df/da = b, df/db = a
* const g = tf.valueAndGrads(f);
*
* const a = tf.tensor1d([2, 3]);
* const b = tf.tensor1d([-2, -3]);
* const {value, grads} = g([a, b]);
*
* const [da, db] = grads;
*
* console.log('value');
* value.print();
*
* console.log('da');
* da.print();
* console.log('db');
* db.print();
* ```
*
* @doc {heading: 'Training', subheading: 'Gradients'}
*/
function valueAndGrads(f) {
_util__WEBPACK_IMPORTED_MODULE_3__[/* assert */ "b"](_util__WEBPACK_IMPORTED_MODULE_3__[/* isFunction */ "u"](f), () => 'The f passed in valueAndGrads(f) must be a function');
return (args, dy) => {
_util__WEBPACK_IMPORTED_MODULE_3__[/* assert */ "b"](Array.isArray(args) && args.every(arg => arg instanceof _tensor__WEBPACK_IMPORTED_MODULE_1__[/* Tensor */ "a"]), () => 'The args passed in valueAndGrads(f)(args) must be array of ' +
'tensors');
_util__WEBPACK_IMPORTED_MODULE_3__[/* assert */ "b"](dy == null || dy instanceof _tensor__WEBPACK_IMPORTED_MODULE_1__[/* Tensor */ "a"], () => 'The dy passed in valueAndGrads(f)(args, dy) must be a tensor');
const res = _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].gradients(() => f(...args), args, dy);
if (dy != null) {
_util__WEBPACK_IMPORTED_MODULE_3__[/* assertShapesMatch */ "e"](res.value.shape, dy.shape, 'The shape of dy passed in valueAndGrads(f)([x1,...], dy) must ' +
'match the shape returned by f([x1,...])');
}
checkGrads(res.grads);
return res;
};
}
/**
* Computes and returns the gradient of f(x) with respect to the list of
* trainable variables provided by `varList`. If no list is provided, it
* defaults to all trainable variables.
*
* ```js
* const a = tf.variable(tf.tensor1d([3, 4]));
* const b = tf.variable(tf.tensor1d([5, 6]));
* const x = tf.tensor1d([1, 2]);
*
* // f(a, b) = a * x ^ 2 + b * x
* const f = () => a.mul(x.square()).add(b.mul(x)).sum();
* // df/da = x ^ 2, df/db = x
* const {value, grads} = tf.variableGrads(f);
*
* Object.keys(grads).forEach(varName => grads[varName].print());
* ```
*
* @param f The function to execute. f() should return a scalar.
* @param varList The list of variables to compute the gradients with respect
* to. Defaults to all trainable variables.
* @returns An object with the following keys and values:
* - `value`: The value of the function `f`.
* - `grads`: A map from the names of the variables to the gradients.
* If the `varList` argument is provided explicitly and contains a subset of
* non-trainable variables, this map in the return value will contain keys
* that map the names of the non-trainable variables to `null`.
*
* @doc {heading: 'Training', subheading: 'Gradients'}
*/
function variableGrads(f, varList) {
_util__WEBPACK_IMPORTED_MODULE_3__[/* assert */ "b"](_util__WEBPACK_IMPORTED_MODULE_3__[/* isFunction */ "u"](f), () => 'The f passed in variableGrads(f) must be a function');
_util__WEBPACK_IMPORTED_MODULE_3__[/* assert */ "b"](varList == null ||
Array.isArray(varList) && varList.every(v => v instanceof _tensor__WEBPACK_IMPORTED_MODULE_1__[/* Variable */ "c"]), () => 'The varList passed in variableGrads(f, varList) must be an array ' +
'of variables');
const specifiedVarList = varList != null;
if (!specifiedVarList) {
// Get all of the trainable variables.
varList = [];
for (const varName in _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].registeredVariables) {
varList.push(_engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].registeredVariables[varName]);
}
}
const specifiedNonTrainable = specifiedVarList ? varList.filter(variable => !variable.trainable) : null;
// Prune non-trainable variables.
const originalVarCount = varList.length;
varList = varList.filter(variable => variable.trainable);
_util__WEBPACK_IMPORTED_MODULE_3__[/* assert */ "b"](varList.length > 0, () => `variableGrads() expects at least one of the input variables to ` +
`be trainable, but none of the ${originalVarCount} variables is ` +
`trainable.`);
const allowNoGradients = true;
const { value, grads } = _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].gradients(f, varList, null, allowNoGradients);
_util__WEBPACK_IMPORTED_MODULE_3__[/* assert */ "b"](grads.some(g => g != null), () => 'Cannot find a connection between any variable and the result of ' +
'the loss function y=f(x). Please make sure the operations that ' +
'use variables are inside the function f passed to minimize().');
_util__WEBPACK_IMPORTED_MODULE_3__[/* assert */ "b"](value.rank === 0, () => `The f passed in variableGrads(f) must return a scalar, but it ` +
`returned a rank-${value.rank} tensor`);
const namedGrads = {};
varList.forEach((v, i) => {
if (grads[i] != null) {
namedGrads[v.name] = grads[i];
}
});
if (specifiedNonTrainable != null) {
// If varList is explicitly provided and contains non-trainable values,
// add them to the returned gradients with `null` values.
specifiedNonTrainable.forEach(v => namedGrads[v.name] = null);
}
return { value, grads: namedGrads };
}
/**
* Overrides the gradient computation of a function `f`.
*
* Takes a function
* `f(...inputs, save) => {value: Tensor, gradFunc: (dy, saved) => Tensor[]}`
* and returns another function `g(...inputs)` which takes the same inputs as
* `f`. When called, `g` returns `f().value`. In backward mode, custom gradients
* with respect to each input of `f` are computed using `f().gradFunc`.
*
* The `save` function passsed to `f` should be used for saving tensors needed
* in the gradient. And the `saved` passed to the `gradFunc` is a
* `NamedTensorMap`, which contains those saved tensor.
*
* ```js
* const customOp = tf.customGrad((x, save) => {
* // Save x to make sure it's available later for the gradient.
* save([x]);
* // Override gradient of our custom x ^ 2 op to be dy * abs(x);
* return {
* value: x.square(),
* // Note `saved.x` which points to the `x` we saved earlier.
* gradFunc: (dy, saved) => [dy.mul(saved[0].abs())]
* };
* });
*
* const x = tf.tensor1d([-1, -2, 3]);
* const dx = tf.grad(x => customOp(x));
*
* console.log(`f(x):`);
* customOp(x).print();
* console.log(`f'(x):`);
* dx(x).print();
* ```
*
* @param f The function to evaluate in forward mode, which should return
* `{value: Tensor, gradFunc: (dy, saved) => Tensor[]}`, where `gradFunc`
* returns the custom gradients of `f` with respect to its inputs.
*
* @doc {heading: 'Training', subheading: 'Gradients'}
*/
function customGrad(f) {
return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].customGrad(f);
}
function checkGrads(grads) {
const numNullGradients = grads.filter(g => g == null).length;
if (numNullGradients > 0) {
throw new Error(`Cannot compute gradient of y=f(x) with respect to x. Make sure that
the f you passed encloses all operations that lead from x to y.`);
}
}
//# sourceMappingURL=gradients.js.map
/***/ }),
/* 35 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return Reduction; });
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
var Reduction;
(function (Reduction) {
Reduction[Reduction["NONE"] = 0] = "NONE";
Reduction[Reduction["MEAN"] = 1] = "MEAN";
Reduction[Reduction["SUM"] = 2] = "SUM";
Reduction[Reduction["SUM_BY_NONZERO_WEIGHTS"] = 3] = "SUM_BY_NONZERO_WEIGHTS";
})(Reduction || (Reduction = {}));
//# sourceMappingURL=loss_ops_utils.js.map
/***/ }),
/* 36 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return where; });
/* harmony import */ var _engine__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(5);
/* harmony import */ var _kernel_names__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(3);
/* harmony import */ var _tensor_util_env__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(2);
/* harmony import */ var _broadcast_to__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(101);
/* harmony import */ var _broadcast_util__WEBPACK_IMPORTED_MODULE_4__ = __webpack_require__(17);
/* harmony import */ var _operation__WEBPACK_IMPORTED_MODULE_5__ = __webpack_require__(4);
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Returns the elements, either `a` or `b` depending on the `condition`.
*
* If the condition is true, select from `a`, otherwise select from `b`.
*
* ```js
* const cond = tf.tensor1d([false, false, true], 'bool');
* const a = tf.tensor1d([1 , 2, 3]);
* const b = tf.tensor1d([-1, -2, -3]);
*
* a.where(cond, b).print();
* ```
*
* @param condition The input condition. Must be of dtype bool.
* @param a If `condition` is rank 1, `a` may have a higher rank but
* its first dimension must match the size of `condition`.
* @param b A tensor with the same dtype as `a` and with shape that is
* compatible with `a`.
* @return A tensor with same dtype as `a` and `b`, and shape that is
* broadcastable from `a` and `b`.
*
* @doc {heading: 'Operations', subheading: 'Logical'}
*/
function where_(condition, a, b) {
const $a = Object(_tensor_util_env__WEBPACK_IMPORTED_MODULE_2__[/* convertToTensor */ "a"])(a, 'a', 'where');
const $b = Object(_tensor_util_env__WEBPACK_IMPORTED_MODULE_2__[/* convertToTensor */ "a"])(b, 'b', 'where');
const $condition = Object(_tensor_util_env__WEBPACK_IMPORTED_MODULE_2__[/* convertToTensor */ "a"])(condition, 'condition', 'where', 'bool');
// TODO: move this logic to forward function when the broadcastTo op is
// implemented in WASM.
// Find the broadcastable shape for $condition, $a, and $b.
const broadcastShape = Object(_broadcast_util__WEBPACK_IMPORTED_MODULE_4__[/* assertAndGetBroadcastShape */ "a"])(Object(_broadcast_util__WEBPACK_IMPORTED_MODULE_4__[/* assertAndGetBroadcastShape */ "a"])($condition.shape, $a.shape), $b.shape);
const $broadcastedCondition = Object(_broadcast_to__WEBPACK_IMPORTED_MODULE_3__[/* broadcastTo */ "a"])($condition, broadcastShape);
const $broadcastedA = Object(_broadcast_to__WEBPACK_IMPORTED_MODULE_3__[/* broadcastTo */ "a"])($a, broadcastShape);
const $broadcastedB = Object(_broadcast_to__WEBPACK_IMPORTED_MODULE_3__[/* broadcastTo */ "a"])($b, broadcastShape);
const inputs = {
condition: $broadcastedCondition,
t: $broadcastedA,
e: $broadcastedB
};
return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].runKernel(_kernel_names__WEBPACK_IMPORTED_MODULE_1__[/* Select */ "zc"], inputs);
}
const where = Object(_operation__WEBPACK_IMPORTED_MODULE_5__[/* op */ "b"])({ where_ });
//# sourceMappingURL=where.js.map
/***/ }),
/* 37 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return sqrt; });
/* harmony import */ var _engine__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(5);
/* harmony import */ var _kernel_names__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(3);
/* harmony import */ var _tensor_util_env__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(2);
/* harmony import */ var _operation__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(4);
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes square root of the input `tf.Tensor` element-wise: `y = sqrt(x)`
*
* ```js
* const x = tf.tensor1d([1, 2, 4, -1]);
*
* x.sqrt().print(); // or tf.sqrt(x)
* ```
* @param x The input tensor.
*
* @doc {heading: 'Operations', subheading: 'Basic math'}
*/
function sqrt_(x) {
const $x = Object(_tensor_util_env__WEBPACK_IMPORTED_MODULE_2__[/* convertToTensor */ "a"])(x, 'x', 'sqrt');
const inputs = { x: $x };
return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].runKernel(_kernel_names__WEBPACK_IMPORTED_MODULE_1__[/* Sqrt */ "Mc"], inputs);
}
const sqrt = Object(_operation__WEBPACK_IMPORTED_MODULE_3__[/* op */ "b"])({ sqrt_ });
//# sourceMappingURL=sqrt.js.map
/***/ }),
/* 38 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return complex; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "b", function() { return complexConfig; });
/* harmony import */ var _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(0);
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
function complex(args) {
const { inputs, backend } = args;
const { real, imag } = inputs;
const realVals = backend.data.get(real.dataId).values;
const imagVals = backend.data.get(imag.dataId).values;
const complexInfo = backend.makeTensorInfo(real.shape, 'complex64');
const complex = backend.data.get(complexInfo.dataId);
// The complex tensor owns the underlying real and imag tensorInfos, only the
// complex tensor tracks refCount, when complexData is disposed the
// underlying tensorData will be disposed.
complex.complexTensorInfos = {
real: backend.makeTensorInfo(real.shape, 'float32', realVals),
imag: backend.makeTensorInfo(imag.shape, 'float32', imagVals)
};
return complexInfo;
}
const complexConfig = {
kernelName: _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["Complex"],
backendName: 'cpu',
kernelFunc: complex
};
//# sourceMappingURL=Complex.js.map
/***/ }),
/* 39 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "b", function() { return axesAreInnerMostDims; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "c", function() { return combineLocations; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "d", function() { return computeOutAndReduceShapes; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "e", function() { return expandShapeToKeepDim; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return assertAxesAreInnerMostDims; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "f", function() { return getAxesPermutation; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "h", function() { return getUndoAxesPermutation; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "g", function() { return getInnerMostAxes; });
/* harmony import */ var _util__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(8);
/**
* @license
* Copyright 2017 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Returns true if the axis specifies the inner most dimensions of the
* array.
*/
function axesAreInnerMostDims(axes, rank) {
for (let i = 0; i < axes.length; ++i) {
if (axes[axes.length - i - 1] !== rank - 1 - i) {
return false;
}
}
return true;
}
function combineLocations(outputLoc, reduceLoc, axes) {
const rank = outputLoc.length + reduceLoc.length;
const loc = [];
let outIdx = 0;
let reduceIdx = 0;
for (let dim = 0; dim < rank; dim++) {
if (axes.indexOf(dim) === -1) {
loc.push(outputLoc[outIdx++]);
}
else {
loc.push(reduceLoc[reduceIdx++]);
}
}
return loc;
}
function computeOutAndReduceShapes(aShape, axes) {
const outShape = [];
const rank = aShape.length;
for (let dim = 0; dim < rank; dim++) {
if (axes.indexOf(dim) === -1) {
outShape.push(aShape[dim]);
}
}
const reduceShape = axes.map(dim => aShape[dim]);
return [outShape, reduceShape];
}
function expandShapeToKeepDim(shape, axes) {
const reduceSubShape = axes.map(x => 1);
return combineLocations(shape, reduceSubShape, axes);
}
function assertAxesAreInnerMostDims(msg, axes, rank) {
_util__WEBPACK_IMPORTED_MODULE_0__[/* assert */ "b"](axesAreInnerMostDims(axes, rank), () => `${msg} supports only inner-most axes for now. ` +
`Got axes ${axes} and rank-${rank} input.`);
}
/**
* Returns the axes permutation to be used with `tf.transpose`, if such
* permutation is necessary. Otherwise it returns null. This method is used by
* operations that operate only on inner-most axes.
*/
function getAxesPermutation(axes, rank) {
if (axesAreInnerMostDims(axes, rank)) {
return null;
}
const result = [];
for (let i = 0; i < rank; ++i) {
if (axes.indexOf(i) === -1) {
result.push(i);
}
}
axes.forEach(axis => result.push(axis));
return result;
}
/** Returns the axes permutation that undoes the original permutation. */
function getUndoAxesPermutation(axes) {
return axes.map((axis, i) => [i, axis])
.sort((a, b) => a[1] - b[1])
.map(x => x[0]);
}
function getInnerMostAxes(numAxes, rank) {
const res = [];
for (let i = rank - numAxes; i < rank; ++i) {
res.push(i);
}
return res;
}
//# sourceMappingURL=axis_util.js.map
/***/ }),
/* 40 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* WEBPACK VAR INJECTION */(function(Buffer) {/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "f", function() { return encodeWeights; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "e", function() { return decodeWeights; });
/* unused harmony export concatenateTypedArrays */
/* unused harmony export stringByteLength */
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return arrayBufferToBase64String; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "b", function() { return base64StringToArrayBuffer; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "d", function() { return concatenateArrayBuffers; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "c", function() { return basename; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "g", function() { return getModelArtifactsInfoForJSON; });
/* unused harmony export getFloat16Decoder */
/* harmony import */ var _ops_complex__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(58);
/* harmony import */ var _ops_tensor__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(100);
/* harmony import */ var _util__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(8);
/* harmony import */ var _types__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(148);
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/** Number of bytes reserved for the length of the string. (32bit integer). */
const NUM_BYTES_STRING_LENGTH = 4;
/**
* Encode a map from names to weight values as an ArrayBuffer, along with an
* `Array` of `WeightsManifestEntry` as specification of the encoded weights.
*
* This function does not perform sharding.
*
* This function is the reverse of `decodeWeights`.
*
* @param tensors A map ("dict") from names to tensors.
* @param group Group to which the weights belong (optional).
* @returns A `Promise` of
* - A flat `ArrayBuffer` with all the binary values of the `Tensor`s
* concatenated.
* - An `Array` of `WeightManifestEntry`s, carrying information including
* tensor names, `dtype`s and shapes.
* @throws Error: on unsupported tensor `dtype`.
*/
async function encodeWeights(tensors, group) {
// TODO(adarob, cais): Support quantization.
const specs = [];
const dataPromises = [];
const names = Array.isArray(tensors) ?
tensors.map(tensor => tensor.name) :
Object.keys(tensors);
for (let i = 0; i < names.length; ++i) {
const name = names[i];
const t = Array.isArray(tensors) ? tensors[i].tensor : tensors[name];
if (t.dtype !== 'float32' && t.dtype !== 'int32' && t.dtype !== 'bool' &&
t.dtype !== 'string' && t.dtype !== 'complex64') {
throw new Error(`Unsupported dtype in weight '${name}': ${t.dtype}`);
}
const spec = { name, shape: t.shape, dtype: t.dtype };
if (t.dtype === 'string') {
const utf8bytes = new Promise(async (resolve) => {
const vals = await t.bytes();
const totalNumBytes = vals.reduce((p, c) => p + c.length, 0) +
NUM_BYTES_STRING_LENGTH * vals.length;
const bytes = new Uint8Array(totalNumBytes);
let offset = 0;
for (let i = 0; i < vals.length; i++) {
const val = vals[i];
const bytesOfLength = new Uint8Array(new Uint32Array([val.length]).buffer);
bytes.set(bytesOfLength, offset);
offset += NUM_BYTES_STRING_LENGTH;
bytes.set(val, offset);
offset += val.length;
}
resolve(bytes);
});
dataPromises.push(utf8bytes);
}
else {
dataPromises.push(t.data());
}
if (group != null) {
spec.group = group;
}
specs.push(spec);
}
const tensorValues = await Promise.all(dataPromises);
return { data: concatenateTypedArrays(tensorValues), specs };
}
/**
* Decode flat ArrayBuffer as weights.
*
* This function does not handle sharding.
*
* This function is the reverse of `encodeWeights`.
*
* @param buffer A flat ArrayBuffer carrying the binary values of the tensors
* concatenated in the order specified in `specs`.
* @param specs Specifications of the names, dtypes and shapes of the tensors
* whose value are encoded by `buffer`.
* @return A map from tensor name to tensor value, with the names corresponding
* to names in `specs`.
* @throws Error, if any of the tensors has unsupported dtype.
*/
function decodeWeights(buffer, specs) {
// TODO(adarob, cais): Support quantization.
const out = {};
let float16Decode;
let offset = 0;
for (const spec of specs) {
const name = spec.name;
const dtype = spec.dtype;
const shape = spec.shape;
const size = Object(_util__WEBPACK_IMPORTED_MODULE_2__[/* sizeFromShape */ "O"])(shape);
let values;
if ('quantization' in spec) {
const quantization = spec.quantization;
if (quantization.dtype === 'uint8' || quantization.dtype === 'uint16') {
if (!('min' in quantization && 'scale' in quantization)) {
throw new Error(`Weight ${spec.name} with quantization ${quantization.dtype} ` +
`doesn't have corresponding metadata min and scale.`);
}
}
else if (quantization.dtype === 'float16') {
if (dtype !== 'float32') {
throw new Error(`Weight ${spec.name} is quantized with ${quantization.dtype} ` +
`which only supports weights of type float32 not ${dtype}.`);
}
}
else {
throw new Error(`Weight ${spec.name} has unknown ` +
`quantization dtype ${quantization.dtype}. ` +
`Supported quantization dtypes are: ` +
`'uint8', 'uint16', and 'float16'.`);
}
const quantizationSizeFactor = _types__WEBPACK_IMPORTED_MODULE_3__[/* DTYPE_VALUE_SIZE_MAP */ "a"][quantization.dtype];
const byteBuffer = buffer.slice(offset, offset + size * quantizationSizeFactor);
const quantizedArray = (quantization.dtype === 'uint8') ?
new Uint8Array(byteBuffer) :
new Uint16Array(byteBuffer);
if (dtype === 'float32') {
if (quantization.dtype === 'uint8' || quantization.dtype === 'uint16') {
values = new Float32Array(quantizedArray.length);
for (let i = 0; i < quantizedArray.length; i++) {
const v = quantizedArray[i];
values[i] = v * quantization.scale + quantization.min;
}
}
else if (quantization.dtype === 'float16') {
if (float16Decode === undefined) {
float16Decode = getFloat16Decoder();
}
values = float16Decode(quantizedArray);
}
else {
throw new Error(`Unsupported quantization type ${quantization.dtype} ` +
`for weight type float32.`);
}
}
else if (dtype === 'int32') {
if (quantization.dtype !== 'uint8' && quantization.dtype !== 'uint16') {
throw new Error(`Unsupported quantization type ${quantization.dtype} ` +
`for weight type int32.`);
}
values = new Int32Array(quantizedArray.length);
for (let i = 0; i < quantizedArray.length; i++) {
const v = quantizedArray[i];
values[i] = Math.round(v * quantization.scale + quantization.min);
}
}
else {
throw new Error(`Unsupported dtype in weight '${name}': ${dtype}`);
}
offset += size * quantizationSizeFactor;
}
else if (dtype === 'string') {
const size = Object(_util__WEBPACK_IMPORTED_MODULE_2__[/* sizeFromShape */ "O"])(spec.shape);
values = [];
for (let i = 0; i < size; i++) {
const byteLength = new Uint32Array(buffer.slice(offset, offset + NUM_BYTES_STRING_LENGTH))[0];
offset += NUM_BYTES_STRING_LENGTH;
const bytes = new Uint8Array(buffer.slice(offset, offset + byteLength));
values.push(bytes);
offset += byteLength;
}
}
else {
const dtypeFactor = _types__WEBPACK_IMPORTED_MODULE_3__[/* DTYPE_VALUE_SIZE_MAP */ "a"][dtype];
const byteBuffer = buffer.slice(offset, offset + size * dtypeFactor);
if (dtype === 'float32') {
values = new Float32Array(byteBuffer);
}
else if (dtype === 'int32') {
values = new Int32Array(byteBuffer);
}
else if (dtype === 'bool') {
values = new Uint8Array(byteBuffer);
}
else if (dtype === 'complex64') {
values = new Float32Array(byteBuffer);
const real = new Float32Array(values.length / 2);
const image = new Float32Array(values.length / 2);
for (let i = 0; i < real.length; i++) {
real[i] = values[i * 2];
image[i] = values[i * 2 + 1];
}
const realTensor = Object(_ops_tensor__WEBPACK_IMPORTED_MODULE_1__[/* tensor */ "a"])(real, shape, 'float32');
const imageTensor = Object(_ops_tensor__WEBPACK_IMPORTED_MODULE_1__[/* tensor */ "a"])(image, shape, 'float32');
out[name] = Object(_ops_complex__WEBPACK_IMPORTED_MODULE_0__[/* complex */ "a"])(realTensor, imageTensor);
realTensor.dispose();
imageTensor.dispose();
}
else {
throw new Error(`Unsupported dtype in weight '${name}': ${dtype}`);
}
offset += size * dtypeFactor;
}
if (dtype !== 'complex64') {
out[name] = Object(_ops_tensor__WEBPACK_IMPORTED_MODULE_1__[/* tensor */ "a"])(values, shape, dtype);
}
}
return out;
}
/**
* Concatenate TypedArrays into an ArrayBuffer.
*/
function concatenateTypedArrays(xs) {
// TODO(adarob, cais): Support quantization.
if (xs === null) {
throw new Error(`Invalid input value: ${JSON.stringify(xs)}`);
}
let totalByteLength = 0;
// `normalizedXs` is here for this reason: a `TypedArray`'s `buffer'
// can have a different byte length from that of the `TypedArray` itself,
// for example, when the `TypedArray` is created from an offset in an
// `ArrayBuffer`. `normliazedXs` holds `TypedArray`s whose `buffer`s match
// the `TypedArray` in byte length. If an element of `xs` does not show
// this property, a new `TypedArray` that satisfy this property will be
// constructed and pushed into `normalizedXs`.
const normalizedXs = [];
xs.forEach((x) => {
totalByteLength += x.byteLength;
// tslint:disable:no-any
normalizedXs.push(x.byteLength === x.buffer.byteLength ? x :
new x.constructor(x));
if (!(x instanceof Float32Array || x instanceof Int32Array ||
x instanceof Uint8Array)) {
throw new Error(`Unsupported TypedArray subtype: ${x.constructor.name}`);
}
// tslint:enable:no-any
});
const y = new Uint8Array(totalByteLength);
let offset = 0;
normalizedXs.forEach((x) => {
y.set(new Uint8Array(x.buffer), offset);
offset += x.byteLength;
});
return y.buffer;
}
// Use Buffer on Node.js instead of Blob/atob/btoa
const useNodeBuffer = typeof Buffer !== 'undefined' &&
(typeof Blob === 'undefined' || typeof atob === 'undefined' ||
typeof btoa === 'undefined');
/**
* Calculate the byte length of a JavaScript string.
*
* Note that a JavaScript string can contain wide characters, therefore the
* length of the string is not necessarily equal to the byte length.
*
* @param str Input string.
* @returns Byte length.
*/
function stringByteLength(str) {
if (useNodeBuffer) {
return Buffer.byteLength(str);
}
return new Blob([str]).size;
}
/**
* Encode an ArrayBuffer as a base64 encoded string.
*
* @param buffer `ArrayBuffer` to be converted.
* @returns A string that base64-encodes `buffer`.
*/
function arrayBufferToBase64String(buffer) {
if (useNodeBuffer) {
return Buffer.from(buffer).toString('base64');
}
const buf = new Uint8Array(buffer);
let s = '';
for (let i = 0, l = buf.length; i < l; i++) {
s += String.fromCharCode(buf[i]);
}
return btoa(s);
}
/**
* Decode a base64 string as an ArrayBuffer.
*
* @param str Base64 string.
* @returns Decoded `ArrayBuffer`.
*/
function base64StringToArrayBuffer(str) {
if (useNodeBuffer) {
const buf = Buffer.from(str, 'base64');
return buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength);
}
const s = atob(str);
const buffer = new Uint8Array(s.length);
for (let i = 0; i < s.length; ++i) {
buffer.set([s.charCodeAt(i)], i);
}
return buffer.buffer;
}
/**
* Concatenate a number of ArrayBuffers into one.
*
* @param buffers A number of array buffers to concatenate.
* @returns Result of concatenating `buffers` in order.
*/
function concatenateArrayBuffers(buffers) {
if (buffers.length === 1) {
return buffers[0];
}
let totalByteLength = 0;
buffers.forEach((buffer) => {
totalByteLength += buffer.byteLength;
});
const temp = new Uint8Array(totalByteLength);
let offset = 0;
buffers.forEach((buffer) => {
temp.set(new Uint8Array(buffer), offset);
offset += buffer.byteLength;
});
return temp.buffer;
}
/**
* Get the basename of a path.
*
* Behaves in a way analogous to Linux's basename command.
*
* @param path
*/
function basename(path) {
const SEPARATOR = '/';
path = path.trim();
while (path.endsWith(SEPARATOR)) {
path = path.slice(0, path.length - 1);
}
const items = path.split(SEPARATOR);
return items[items.length - 1];
}
/**
* Populate ModelArtifactsInfo fields for a model with JSON topology.
* @param modelArtifacts
* @returns A ModelArtifactsInfo object.
*/
function getModelArtifactsInfoForJSON(modelArtifacts) {
if (modelArtifacts.modelTopology instanceof ArrayBuffer) {
throw new Error('Expected JSON model topology, received ArrayBuffer.');
}
return {
dateSaved: new Date(),
modelTopologyType: 'JSON',
modelTopologyBytes: modelArtifacts.modelTopology == null ?
0 :
stringByteLength(JSON.stringify(modelArtifacts.modelTopology)),
weightSpecsBytes: modelArtifacts.weightSpecs == null ?
0 :
stringByteLength(JSON.stringify(modelArtifacts.weightSpecs)),
weightDataBytes: modelArtifacts.weightData == null ?
0 :
modelArtifacts.weightData.byteLength,
};
}
/**
* Computes mantisa table for casting Float16 to Float32
* See http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf
*
* @returns Uint32Array, 2048 mantissa lookup values.
*/
function computeFloat16MantisaTable() {
const convertMantissa = (i) => {
let m = i << 13;
let e = 0;
while ((m & 0x00800000) === 0) {
e -= 0x00800000;
m <<= 1;
}
m &= ~0x00800000;
e += 0x38800000;
return m | e;
};
const mantisaTable = new Uint32Array(2048);
mantisaTable[0] = 0;
for (let i = 1; i < 1024; i++) {
mantisaTable[i] = convertMantissa(i);
}
for (let i = 1024; i < 2048; i++) {
mantisaTable[i] = 0x38000000 + ((i - 1024) << 13);
}
return mantisaTable;
}
/**
* Computes exponent table for casting Float16 to Float32
* See http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf
*
* @returns Uint32Array, 64 exponent lookup values.
*/
function computeFloat16ExponentTable() {
const exponentTable = new Uint32Array(64);
exponentTable[0] = 0;
exponentTable[31] = 0x47800000;
exponentTable[32] = 0x80000000;
exponentTable[63] = 0xc7800000;
for (let i = 1; i < 31; i++) {
exponentTable[i] = i << 23;
}
for (let i = 33; i < 63; i++) {
exponentTable[i] = 0x80000000 + ((i - 32) << 23);
}
return exponentTable;
}
/**
* Computes offset table for casting Float16 to Float32
* See http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf
*
* @returns Uint32Array, 6d offset values.
*/
function computeFloat16OffsetTable() {
const offsetTable = new Uint32Array(64);
for (let i = 0; i < 64; i++) {
offsetTable[i] = 1024;
}
offsetTable[0] = offsetTable[32] = 0;
return offsetTable;
}
/**
* Retrieve a Float16 decoder which will decode a ByteArray of Float16 values
* to a Float32Array.
*
* @returns Function (buffer: Uint16Array) => Float32Array which decodes
* the Uint16Array of Float16 bytes to a Float32Array.
*/
function getFloat16Decoder() {
// Algorithm is based off of
// http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf
// Cache lookup tables
const mantisaTable = computeFloat16MantisaTable();
const exponentTable = computeFloat16ExponentTable();
const offsetTable = computeFloat16OffsetTable();
return (quantizedArray) => {
const buffer = new ArrayBuffer(4 * quantizedArray.length);
const bufferUint32View = new Uint32Array(buffer);
for (let index = 0; index < quantizedArray.length; index++) {
const float16Bits = quantizedArray[index];
const float32Bits = mantisaTable[offsetTable[float16Bits >> 10] + (float16Bits & 0x3ff)] +
exponentTable[float16Bits >> 10];
bufferUint32View[index] = float32Bits;
}
return new Float32Array(buffer);
};
}
//# sourceMappingURL=io_utils.js.map
/* WEBPACK VAR INJECTION */}.call(this, __webpack_require__(215).Buffer))
/***/ }),
/* 41 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return exp; });
/* harmony import */ var _engine__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(5);
/* harmony import */ var _kernel_names__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(3);
/* harmony import */ var _tensor_util_env__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(2);
/* harmony import */ var _operation__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(4);
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes exponential of the input `tf.Tensor` element-wise. `e ^ x`
*
* ```js
* const x = tf.tensor1d([1, 2, -3]);
*
* x.exp().print(); // or tf.exp(x)
* ```
* @param x The input tensor.
*
* @doc {heading: 'Operations', subheading: 'Basic math'}
*/
function exp_(x) {
const $x = Object(_tensor_util_env__WEBPACK_IMPORTED_MODULE_2__[/* convertToTensor */ "a"])(x, 'x', 'exp');
const inputs = { x: $x };
return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].runKernel(_kernel_names__WEBPACK_IMPORTED_MODULE_1__[/* Exp */ "ab"], inputs);
}
const exp = Object(_operation__WEBPACK_IMPORTED_MODULE_3__[/* op */ "b"])({ exp_ });
//# sourceMappingURL=exp.js.map
/***/ }),
/* 42 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return identity; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "b", function() { return identityConfig; });
/* harmony import */ var _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(0);
/**
* @license
* Copyright 2020 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
function identity(args) {
const { inputs, backend } = args;
const { x } = inputs;
backend.incRef(x.dataId);
return { dataId: x.dataId, shape: x.shape, dtype: x.dtype };
}
const identityConfig = {
kernelName: _tensorflow_tfjs_core__WEBPACK_IMPORTED_MODULE_0__["Identity"],
backendName: 'cpu',
kernelFunc: identity
};
//# sourceMappingURL=Identity.js.map
/***/ }),
/* 43 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return abs; });
/* harmony import */ var _engine__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(5);
/* harmony import */ var _kernel_names__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(3);
/* harmony import */ var _tensor_util_env__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(2);
/* harmony import */ var _operation__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(4);
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
/**
* Computes absolute value element-wise: `abs(x)`
*
* ```js
* const x = tf.tensor1d([-1, 2, -3, 4]);
*
* x.abs().print(); // or tf.abs(x)
* ```
* @param x The input `tf.Tensor`.
*
* @doc {heading: 'Operations', subheading: 'Basic math'}
*/
function abs_(x) {
const $x = Object(_tensor_util_env__WEBPACK_IMPORTED_MODULE_2__[/* convertToTensor */ "a"])(x, 'x', 'abs');
if ($x.dtype === 'complex64') {
const inputs = { x: $x };
return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].runKernel(_kernel_names__WEBPACK_IMPORTED_MODULE_1__[/* ComplexAbs */ "A"], inputs);
}
else {
const inputs = { x: $x };
return _engine__WEBPACK_IMPORTED_MODULE_0__[/* ENGINE */ "a"].runKernel(_kernel_names__WEBPACK_IMPORTED_MODULE_1__[/* Abs */ "a"], inputs);
}
}
const abs = Object(_operation__WEBPACK_IMPORTED_MODULE_3__[/* op */ "b"])({ abs_ });
//# sourceMappingURL=abs.js.map
/***/ }),
/* 44 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "c", function() { return getFusedDyActivation; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "b", function() { return getFusedBiasGradient; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "a", function() { return applyActivation; });
/* harmony export (binding) */ __webpack_require__.d(__webpack_exports__, "d", function() { return shouldFuse; });
/* harmony import */ var _broadcast_util__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(17);
/* harmony import */ var _elu__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(119);
/* harmony import */ var _leaky_relu__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(122);
/* harmony import */ var _mul__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(9);
/* harmony import */ var _prelu__WEBPACK_IMPORTED_MODULE_4__ = __webpack_require__(129);
/* harmony import */ var _relu__WEBPACK_IMPORTED_MODULE_5__ = __webpack_require__(81);
/* harmony import */ var _relu6__WEBPACK_IMPORTED_MODULE_6__ = __webpack_require__(130);
/* harmony import */ var _reshape__WEBPACK_IMPORTED_MODULE_7__ = __webpack_require__(7);
/* harmony import */ var _sigmoid__WEBPACK_IMPORTED_MODULE_8__ = __webpack_require__(71);
/* harmony import */ var _step__WEBPACK_IMPORTED_MODULE_9__ = __webpack_require__(82);
/* harmony import */ var _sum__WEBPACK_IMPORTED_MODULE_10__ = __webpack_require__(19);
/**
* @license
* Copyright 2019 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
// Returns gradient for fused activation.
function getFusedDyActivation(dy, y, activation) {
if (activation == null || activation === 'linear') {
return dy;
}
if (activation === 'relu') {
return Object(_mul__WEBPACK_IMPORTED_MODULE_3__[/* mul */ "a"])(dy, Object(_step__WEBPACK_IMPORTED_MODULE_9__[/* step */ "a"])(y));
}
throw new Error(`Cannot compute gradient for fused activation ${activation}.`);
}
// Returns gradient for fused bias.
function getFusedBiasGradient(bias, dyActivation) {
let res = dyActivation;
const reduceAxes = _broadcast_util__WEBPACK_IMPORTED_MODULE_0__[/* getReductionAxes */ "c"](bias.shape, dyActivation.shape);
if (reduceAxes.length > 0) {
res = Object(_sum__WEBPACK_IMPORTED_MODULE_10__[/* sum */ "a"])(res, reduceAxes);
}
return Object(_reshape__WEBPACK_IMPORTED_MODULE_7__[/* reshape */ "a"])(res, bias.shape);
}
function applyActivation(x, activation, preluActivationWeights, leakyreluAlpha) {
if (activation === 'linear') {
return x;
}
else if (activation === 'relu') {
return Object(_relu__WEBPACK_IMPORTED_MODULE_5__[/* relu */ "a"])(x);
}
else if (activation === 'elu') {
return Object(_elu__WEBPACK_IMPORTED_MODULE_1__[/* elu */ "a"])(x);
}
else if (activation === 'relu6') {
return Object(_relu6__WEBPACK_IMPORTED_MODULE_6__[/* relu6 */ "a"])(x);
}
else if (activation === 'prelu') {
return Object(_prelu__WEBPACK_IMPORTED_MODULE_4__[/* prelu */ "a"])(x, preluActivationWeights);
}
else if (activation === 'leakyrelu') {
return Object(_leaky_relu__WEBPACK_IMPORTED_MODULE_2__[/* leakyRelu */ "a"])(x, leakyreluAlpha);
}
else if (activation === 'sigmoid') {
return Object(_sigmoid__WEBPACK_IMPORTED_MODULE_8__[/* sigmoid */ "a"])(x);
}
throw new Error(`Unknown fused activation ${activation}.`);
}
// Whether we should call fused ops.
const shouldFuse = (gradientDepth, activation) => {
const gradientMode = gradientDepth > 0;
return !gradientMode || activation === 'linear';
};
//# sourceMappingURL=fused_util.js.map
/***/ }),
/* 45 */
/***/ (function(module, __webpack_exports__, __webpack_require__) {
"use strict";
// EXPORTS
__webpack_require__.d(__webpack_exports__, "f", function() { return /* binding */ iteratorFromItems; });
__webpack_require__.d(__webpack_exports__, "e", function() { return /* binding */ iteratorFromFunction; });
__webpack_require__.d(__webpack_exports__, "d", function() { return /* binding */ iteratorFromConcatenated; });
__webpack_require__.d(__webpack_exports__, "g", function() { return /* binding */ iteratorFromZipped; });
__webpack_require__.d(__webpack_exports__, "a", function() { return /* binding */ lazy_iterator_LazyIterator; });
__webpack_require__.d(__webpack_exports__, "b", function() { return /* binding */ lazy_iterator_OneToManyIterator; });
__webpack_require__.d(__webpack_exports__, "c", function() { return /* binding */ ZipMismatchMode; });
// UNUSED EXPORTS: iteratorFromIncrementing, iteratorFromConcatenatedFunction, ChainedIterator, PrefetchIterator, ShuffleIterator
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-core/dist/index.js + 41 modules
var dist = __webpack_require__(0);
// EXTERNAL MODULE: ./node_modules/seedrandom/index.js
var seedrandom = __webpack_require__(86);
// EXTERNAL MODULE: ./node_modules/@tensorflow/tfjs-data/dist/util/deep_map.js
var deep_map = __webpack_require__(74);
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-data/dist/util/deep_clone.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* =============================================================================
*/
function deepClone(container) {
return Object(deep_map["b" /* deepMap */])(container, cloneIfTensor);
}
// tslint:disable-next-line: no-any
function cloneIfTensor(item) {
if (item instanceof dist["Tensor"]) {
return ({ value: item.clone(), recurse: false });
}
else if (Object(deep_map["e" /* isIterable */])(item)) {
return { value: null, recurse: true };
}
else {
return { value: item, recurse: false };
}
}
//# sourceMappingURL=deep_clone.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-data/dist/util/ring_buffer.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* =============================================================================
*/
/**
* A ring buffer, providing O(1) FIFO, LIFO, and related operations.
*/
class RingBuffer {
/**
* Constructs a `RingBuffer`.
* @param capacity The number of items that the buffer can accomodate.
*/
constructor(capacity) {
this.capacity = capacity;
// Note we store the indices in the range 0 <= index < 2*capacity.
// This allows us to distinguish the full from the empty case.
// See https://www.snellman.net/blog/archive/2016-12-13-ring-buffers/
this.begin = 0; // inclusive
this.end = 0; // exclusive
if (capacity == null) {
throw new RangeError('Can\'t create a ring buffer of unknown capacity.');
}
if (capacity < 1) {
throw new RangeError('Can\'t create ring buffer of capacity < 1.');
}
this.data = new Array(capacity);
this.doubledCapacity = 2 * capacity;
}
/**
* Map any index into the range 0 <= index < 2*capacity.
*/
wrap(index) {
// don't trust % on negative numbers
while (index < 0) {
index += this.doubledCapacity;
}
return index % this.doubledCapacity;
}
get(index) {
if (index < 0) {
throw new RangeError('Can\'t get item at a negative index.');
}
return this.data[index % this.capacity];
}
set(index, value) {
if (index < 0) {
throw new RangeError('Can\'t set item at a negative index.');
}
this.data[index % this.capacity] = value;
}
/**
* Returns the current number of items in the buffer.
*/
length() {
let length = this.end - this.begin;
if (length < 0) {
length = this.doubledCapacity + length;
}
return length;
}
/**
* Reports whether the buffer is full.
* @returns true if the number of items in the buffer equals its capacity, and
* false otherwise.
*/
isFull() {
return this.length() === this.capacity;
}
/**
* Reports whether the buffer is empty.
* @returns true if the number of items in the buffer equals zero, and
* false otherwise.
*/
isEmpty() {
return this.length() === 0;
}
/**
* Adds an item to the end of the buffer.
*/
push(value) {
if (this.isFull()) {
throw new RangeError('Ring buffer is full.');
}
this.set(this.end, value);
this.end = this.wrap(this.end + 1);
}
/**
* Adds many items to the end of the buffer, in order.
*/
pushAll(values) {
for (const value of values) {
this.push(value);
}
}
/**
* Removes and returns the last item in the buffer.
*/
pop() {
if (this.isEmpty()) {
throw new RangeError('Ring buffer is empty.');
}
this.end = this.wrap(this.end - 1);
const result = this.get(this.end);
this.set(this.end, undefined);
return result;
}
/**
* Adds an item to the beginning of the buffer.
*/
unshift(value) {
if (this.isFull()) {
throw new RangeError('Ring buffer is full.');
}
this.begin = this.wrap(this.begin - 1);
this.set(this.begin, value);
}
/**
* Removes and returns the first item in the buffer.
*/
shift() {
if (this.isEmpty()) {
throw new RangeError('Ring buffer is empty.');
}
const result = this.get(this.begin);
this.set(this.begin, undefined);
this.begin = this.wrap(this.begin + 1);
return result;
}
/**
* Removes and returns a specific item in the buffer, and moves the last item
* to the vacated slot. This is useful for implementing a shuffling stream.
* Note that this operation necessarily scrambles the original order.
*
* @param relativeIndex: the index of the item to remove, relative to the
* first item in the buffer (e.g., hiding the ring nature of the underlying
* storage).
*/
shuffleExcise(relativeIndex) {
if (this.isEmpty()) {
throw new RangeError('Ring buffer is empty.');
}
const index = this.wrap(this.begin + relativeIndex);
const result = this.get(index);
this.set(index, this.pop());
return result;
}
}
//# sourceMappingURL=ring_buffer.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-data/dist/util/growing_ring_buffer.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* =============================================================================
*/
class growing_ring_buffer_GrowingRingBuffer extends RingBuffer {
/**
* Constructs a `GrowingRingBuffer`.
*/
constructor() {
super(growing_ring_buffer_GrowingRingBuffer.INITIAL_CAPACITY);
}
isFull() {
return false;
}
push(value) {
if (super.isFull()) {
this.expand();
}
super.push(value);
}
unshift(value) {
if (super.isFull()) {
this.expand();
}
super.unshift(value);
}
/**
* Doubles the capacity of the buffer.
*/
expand() {
const newCapacity = this.capacity * 2;
const newData = new Array(newCapacity);
const len = this.length();
// Rotate the buffer to start at index 0 again, since we can't just
// allocate more space at the end.
for (let i = 0; i < len; i++) {
newData[i] = this.get(this.wrap(this.begin + i));
}
this.data = newData;
this.capacity = newCapacity;
this.doubledCapacity = 2 * this.capacity;
this.begin = 0;
this.end = len;
}
}
growing_ring_buffer_GrowingRingBuffer.INITIAL_CAPACITY = 32;
//# sourceMappingURL=growing_ring_buffer.js.map
// CONCATENATED MODULE: ./node_modules/@tensorflow/tfjs-data/dist/iterators/lazy_iterator.js
/**
* @license
* Copyright 2018 Google LLC. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* =============================================================================
*/
// Here we implement a simple asynchronous iterator.
// This lets us avoid using either third-party stream libraries or
// recent TypeScript language support requiring polyfills.
/**
* Create a `LazyIterator` from an array of items.
*/
function iteratorFromItems(items) {
return new lazy_iterator_ArrayIterator(items);
}
/**
* Create a `LazyIterator` of incrementing integers.
*/
function iteratorFromIncrementing(start) {
let i = start;
return iteratorFromFunction(() => ({ value: i++, done: false }));
}
/**
* Create a `LazyIterator` from a function.
*
* ```js
* let i = -1;
* const func = () =>
* ++i < 5 ? {value: i, done: false} : {value: null, done: true};
* const iter = tf.data.iteratorFromFunction(func);
* await iter.forEachAsync(e => console.log(e));
* ```
*
* @param func A function that produces data on each call.
*/
function iteratorFromFunction(func) {
return new FunctionCallIterator(func);
}
/**
* Create a `LazyIterator` by concatenating underlying streams, which are
* themselves provided as a stream.
*
* This can also be thought of as a "stream flatten" operation.
*
* @param baseIterators A stream of streams to be concatenated.
* @param baseErrorHandler An optional function that can intercept `Error`s
* raised during a `next()` call on the base stream. This function can decide
* whether the error should be propagated, whether the error should be
* ignored, or whether the base stream should be terminated.
*/
function iteratorFromConcatenated(baseIterators, baseErrorHandler) {
return new ChainedIterator(baseIterators, baseErrorHandler);
}
/**
* Create a `LazyIterator` by concatenating streams produced by calling a
* stream-generating function a given number of times.
*
* Since a `LazyIterator` is read-once, it cannot be repeated, but this
* function can be used to achieve a similar effect:
*
* LazyIterator.ofConcatenatedFunction(() => new MyIterator(), 6);
*
* @param iteratorFunc: A function that produces a new stream on each call.
* @param count: The number of times to call the function.
* @param baseErrorHandler An optional function that can intercept `Error`s
* raised during a `next()` call on the base stream. This function can decide
* whether the error should be propagated, whether the error should be
* ignored, or whether the base stream should be terminated.
*/
function iteratorFromConcatenatedFunction(iteratorFunc, count, baseErrorHandler) {
return iteratorFromConcatenated(iteratorFromFunction(iteratorFunc).take(count), baseErrorHandler);
}
/**
* Create a `LazyIterator` by zipping together an array, dict, or nested
* structure of `LazyIterator`s (and perhaps additional constants).
*
* The underlying streams must provide elements in a consistent order such
* that they correspond.
*
* Typically, the underlying streams should have the same number of
* elements. If they do not, the behavior is determined by the
* `mismatchMode` argument.
*
* The nested structure of the `iterators` argument determines the
* structure of elements in the resulting iterator.
*
* @param iterators: An array or object containing LazyIterators at the
* leaves.
* @param mismatchMode: Determines what to do when one underlying iterator
* is exhausted before the others. `ZipMismatchMode.FAIL` (the default)
* causes an error to be thrown in this case. `ZipMismatchMode.SHORTEST`
* causes the zipped iterator to terminate with the furst underlying
* streams, so elements remaining on the longer streams are ignored.
* `ZipMismatchMode.LONGEST` causes the zipped stream to continue, filling
* in nulls for the exhausted streams, until all streams are exhausted.
*/
function iteratorFromZipped(iterators, mismatchMode = ZipMismatchMode.FAIL) {
return new lazy_iterator_ZipIterator(iterators, mismatchMode);
}
/**
* An asynchronous iterator, providing lazy access to a potentially
* unbounded stream of elements.
*
* Iterator can be obtained from a dataset:
* `const iter = await dataset.iterator();`
*/
class lazy_iterator_LazyIterator {
/**
* Collect all remaining elements of a bounded stream into an array.
* Obviously this will succeed only for small streams that fit in memory.
* Useful for testing.
*
* @returns A Promise for an array of stream elements, which will resolve
* when the stream is exhausted.
*/
async toArray() {
const result = [];
let x = await this.next();
while (!x.done) {
result.push(x.value);
x = await this.next();
}
return result;
}
/**
* Collect all elements of this dataset into an array with prefetching 100
* elements. This is useful for testing, because the prefetch changes the
* order in which the Promises are resolved along the processing pipeline.
* This may help expose bugs where results are dependent on the order of
* Promise resolution rather than on the logical order of the stream (i.e.,
* due to hidden mutable state).
*
* @returns A Promise for an array of stream elements, which will resolve
* when the stream is exhausted.
*/
async toArrayForTest() {
const stream = this.prefetch(100);
const result = [];
let x = await stream.next();
while (!x.done) {
result.push(x.value);
x = await stream.next();
}
return result;
}
/**
* Draw items from the stream until it is exhausted.
*
* This can be useful when the stream has side effects but no output. In
* that case, calling this function guarantees that the stream will be
* fully processed.
*/
async resolveFully() {
let x = await this.next();
while (!x.done) {
x = await this.next();
}
}
/**
* Draw items from the stream until it is exhausted, or a predicate fails.
*
* This can be useful when the stream has side effects but no output. In
* that case, calling this function guarantees that the stream will be
* fully processed.
*/
async resolveWhile(predicate) {
let x = await this.next();
let shouldContinue = predicate(x.value);
while ((!x.done) && shouldContinue) {
x = await this.next();
shouldContinue = predicate(x.value);
}
}
/**
* Handles errors thrown on this stream using a provided handler function.
*
* @param handler A function that handles any `Error` thrown during a `next()`
* call and returns true if the stream should continue (dropping the failed
* call) or false if the stream should quietly terminate. If the handler
* itself throws (or rethrows) an `Error`, that will be propagated.
*
* @returns A `LazyIterator` of elements passed through from upstream,
* possibly filtering or terminating on upstream `next()` calls that
* throw an `Error`.
*/
handleErrors(handler) {
return new ErrorHandlingLazyIterator(this, handler);
}
// TODO(soergel): Implement reduce() etc.
/**
* Filters this stream according to `predicate`.
*
* @param predicate A function mapping a stream element to a boolean or a
* `Promise` for one.
*
* @returns A `LazyIterator` of elements for which the predicate was true.
*/
filter(predicate) {
return new lazy_iterator_FilterIterator(this, predicate);
}
/**
* Maps this stream through a 1-to-1 transform.
*
* @param transform A function mapping a stream element to a transformed
* element.
*
* @returns A `LazyIterator` of transformed elements.
*/
map(transform) {
return new lazy_iterator_MapIterator(this, transform);
}
/**
* Maps this stream through an async 1-to-1 transform.
*
* @param transform A function mapping a stream element to a `Promise` for a
* transformed stream element.
*
* @returns A `LazyIterator` of transformed elements.
*/
mapAsync(transform) {
return new lazy_iterator_AsyncMapIterator(this, transform);
}
/**
* Maps this stream through a 1-to-1 transform, forcing serial execution.
*
* @param transform A function mapping a stream element to a transformed
* element.
*
* @returns A `LazyIterator` of transformed elements.
*/
serialMapAsync(transform) {
return new lazy_iterator_AsyncMapIterator(this, transform).serial();
}
/**
* Maps this stream through a 1-to-many transform.
*
* @param transform A function mapping a stream element to an array of
* transformed elements.
*
* @returns A `DataStream` of transformed elements.
*/
flatmap(transform) {
return new lazy_iterator_FlatmapIterator(this, transform);
}
/**
* Apply a function to every element of the stream.
*
* @param f A function to apply to each stream element.
*/
async forEachAsync(f) {
return this.map(f).resolveFully();
}
/**
* Apply a function to every element of the stream, forcing serial execution.
*
* @param f A function to apply to each stream element. Should return 'true'
* to indicate that the stream should continue, or 'false' to cause it to
* terminate.
*/
async serialForEach(f) {
return this.serialMapAsync(f).resolveWhile(x => (x === true));
}
/**
* Groups elements into batches, represented as arrays of elements.
*
* We can think of the elements of this iterator as 'rows' (even if they are
* nested structures). By the same token, consecutive values for a given
* key within the elements form a 'column'. This matches the usual sense of
* 'row' and 'column' when processing tabular data (e.g., parsing a CSV).
*
* Thus, "Row-major" means that the resulting batch is simply a collection of
* rows: `[row1, row2, row3, ...]`. This is contrast to the column-major
* form, which is needed for vectorized computation.
*
* @param batchSize The number of elements desired per batch.
* @param smallLastBatch Whether to emit the final batch when it has fewer
* than batchSize elements. Default true.
* @returns A `LazyIterator` of batches of elements, represented as arrays
* of the original element type.
*/
rowMajorBatch(batchSize, smallLastBatch = true) {
return new RowMajorBatchIterator(this, batchSize, smallLastBatch);
}
/**
* Groups elements into batches, represented in column-major form.
*
* We can think of the elements of this iterator as 'rows' (even if they are
* nested structures). By the same token, consecutive values for a given
* key within the elements form a 'column'. This matches the usual sense of
* 'row' and 'column' when processing tabular data (e.g., parsing a CSV).
*
* Thus, "column-major" means that the resulting batch is a (potentially
* nested) structure representing the columns. Each column entry, then,
* contains a collection of the values found in that column for a range of
* input elements. This representation allows for vectorized computation, in
* contrast to the row-major form.
*
* The inputs should all have the same nested structure (i.e., of arrays and
* dicts). The result is a single object with the same nested structure,
* where the leaves are arrays collecting the values of the inputs at that
* location (or, optionally, the result of a custom function applied to those
* arrays).
*
* @param batchSize The number of elements desired per batch.
* @param smallLastBatch Whether to emit the final batch when it has fewer
* than batchSize elements. Default true.
* @param zipFn: (optional) A function that expects an array of elements at a
* single node of the object tree, and returns a `DeepMapResult`. The
* `DeepMapResult` either provides a result value for that node (i.e.,
* representing the subtree), or indicates that the node should be processed
* recursively. The default zipFn recurses as far as possible and places
* arrays at the leaves.
* @returns A `LazyIterator` of batches of elements, represented as an object
* with collections at the leaves.
*/
columnMajorBatch(batchSize, smallLastBatch = true,
// tslint:disable-next-line:no-any
zipFn = deep_map["f" /* zipToList */]) {
// First collect the desired number of input elements as a row-major batch.
const rowBatches = this.rowMajorBatch(batchSize, smallLastBatch);
// Now 'rotate' or 'pivot' the data, collecting all values from each column
// in the batch (i.e., for each key within the elements) into an array.
return rowBatches.map(x => Object(deep_map["d" /* deepZip */])(x, zipFn));
}
/**
* Concatenate this `LazyIterator` with another.
*
* @param iterator A `LazyIterator` to be concatenated onto this one.
* @param baseErrorHandler An optional function that can intercept `Error`s
* raised during a `next()` call on the base stream. This function can
* decide whether the error should be propagated, whether the error should
* be ignored, or whether the base stream should be terminated.
* @returns A `LazyIterator`.
*/
concatenate(iterator, baseErrorHandler) {
return new ChainedIterator(iteratorFromItems([this, iterator]), baseErrorHandler);
}
/**
* Limits this stream to return at most `count` items.
*
* @param count The maximum number of items to provide from the stream. If
* a negative or undefined value is given, the entire stream is returned
* unaltered.
*/
take(count) {
if (count < 0 || count == null) {
return this;
}
return new TakeIterator(this, count);
}
/**
* Skips the first `count` items in this stream.
*
* @param count The number of items to skip. If a negative or undefined
* value is given, the entire stream is returned unaltered.
*/
skip(count) {
if (count < 0 || count == null) {
return this;
}
return new lazy_iterator_SkipIterator(this, count);
}
/**
* Prefetch the first `bufferSize` items in this stream.
*
* Note this prefetches Promises, but makes no guarantees about when those
* Promises resolve.
*
* @param bufferSize: An integer specifying the number of elements to be
* prefetched.
*/
prefetch(bufferSize) {
return new lazy_iterator_PrefetchIterator(this, bufferSize);
}
// TODO(soergel): deep sharded shuffle, where supported
/**
* Randomly shuffles the elements of this stream.
*
* @param bufferSize: An integer specifying the number of elements from
* this stream from which the new stream will sample.
* @param seed: (Optional.) An integer specifying the random seed that
* will be used to create the distribution.
*/
shuffle(windowSize, seed) {
return new lazy_iterator_ShuffleIterator(this, windowSize, seed);
}
/**
* Force an iterator to execute serially: each next() call will await the
* prior one, so that they cannot execute concurrently.
*/
serial() {
return new SerialIterator(this);
}
}
// ============================================================================
// The following private classes serve to implement the chainable methods
// on LazyIterator. Unfortunately they can't be placed in separate files,
// due to resulting trouble with circular imports.
// ============================================================================
// Iterators that just extend LazyIterator directly
// ============================================================================
class lazy_iterator_ArrayIterator extends lazy_iterator_LazyIterator {
constructor(items) {
super();
this.items = items;
this.trav = 0;
}
summary() {
return `Array of ${this.items.length} items`;
}
async next() {
if (this.trav >= this.items.length) {
return { value: null, done: true };
}
const item = this.items[this.trav];
this.trav++;
return { value: deepClone(item), done: false };
}
}
class FunctionCallIterator extends lazy_iterator_LazyIterator {
constructor(nextFn) {
super();
this.nextFn = nextFn;
}
summary() {
return `Function call`;
}
async next() {
try {
return this.nextFn();
}
catch (e) {
// Modify the error message but leave the stack trace intact
e.message =
`Error thrown while iterating through a dataset: ${e.message}`;
throw e;
}
}
}
class SerialIterator extends lazy_iterator_LazyIterator {
constructor(upstream) {
super();
this.upstream = upstream;
this.lastRead = Promise.resolve({ value: null, done: false });
}
summary() {
return `${this.upstream.summary()} -> Serial`;
}
async next() {
// This sets this.lastRead to a new Promise right away, as opposed to
// saying `await this.lastRead; this.lastRead = this.serialNext();` which
// would not work because this.nextRead would be updated only after the
// promise resolves.
this.lastRead = this.lastRead.then(() => this.serialNext());
return this.lastRead;
}
async serialNext() {
return this.upstream.next();
}
}
class lazy_iterator_SkipIterator extends lazy_iterator_LazyIterator {
constructor(upstream, maxCount) {
super();
this.upstream = upstream;
this.maxCount = maxCount;
// Local state that should not be clobbered by out-of-order execution.
this.count = 0;
this.lastRead = Promise.resolve({ value: null, done: false });
}
summary() {
return `${this.upstream.summary()} -> Skip`;
}
async next() {
// This sets this.lastRead to a new Promise right away, as opposed to
// saying `await this.lastRead; this.lastRead = this.serialNext();` which
// would not work because this.nextRead would be updated only after the
// promise resolves.
this.lastRead = this.lastRead.then(() => this.serialNext());
return this.lastRead;
}
async serialNext() {
// TODO(soergel): consider tradeoffs of reading in parallel, eg.
// collecting next() promises in an Array and then waiting for
// Promise.all() of those. Benefit: pseudo-parallel execution. Drawback:
// maybe delayed GC.
while (this.count++ < this.maxCount) {
const skipped = await this.upstream.next();
// short-circuit if upstream is already empty
if (skipped.done) {
return skipped;
}
dist["dispose"](skipped.value);
}
return this.upstream.next();
}
}
class TakeIterator extends lazy_iterator_LazyIterator {
constructor(upstream, maxCount) {
super();
this.upstream = upstream;
this.maxCount = maxCount;
this.count = 0;
}
summary() {
return `${this.upstream.summary()} -> Take`;
}
async next() {
if (this.count++ >= this.maxCount) {
return { value: null, done: true };
}
return this.upstream.next();
}
}
// Note this batch just groups items into row-wise element arrays.
// Rotating these to a column-wise representation happens only at the dataset
// level.
class RowMajorBatchIterator extends lazy_iterator_LazyIterator {
constructor(upstream, batchSize, enableSmallLastBatch = true) {
super();
this.upstream = upstream;
this.batchSize = batchSize;
this.enableSmallLastBatch = enableSmallLastBatch;
this.lastRead = Promise.resolve({ value: null, done: false });
}
summary() {
return `${this.upstream.summary()} -> RowMajorBatch`;
}
async next() {
// This sets this.lastRead to a new Promise right away, as opposed to
// saying `await this.lastRead; this.lastRead = this.serialNext();` which
// would not work because this.nextRead would be updated only after the
// promise resolves.
this.lastRead = this.lastRead.then(() => this.serialNext());
return this.lastRead;
}
async serialNext() {
const batch = [];
while (batch.length < this.batchSize) {
const item = await this.upstream.next();
if (item.done) {
if (this.enableSmallLastBatch && batch.length > 0) {
return { value: batch, done: false };
}
return { value: null, done: true };
}
batch.push(item.value);
}
return { value: batch, done: false };
}
}
class lazy_iterator_FilterIterator extends lazy_iterator_LazyIterator {
constructor(upstream, predicate) {
super();
this.upstream = upstream;
this.predicate = predicate;
this.lastRead = Promise.resolve({ value: null, done: false });
}
summary() {
return `${this.upstream.summary()} -> Filter`;
}
async next() {
// This sets this.lastRead to a new Promise right away, as opposed to
// saying `await this.lastRead; this.lastRead = this.serialNext();` which
// would not work because this.nextRead would be updated only after the
// promise resolves.
this.lastRead = this.lastRead.then(() => this.serialNext());
return this.lastRead;
}
async serialNext() {
while (true) {
const item = await this.upstream.next();
if (item.done || this.predicate(item.value)) {
return item;
}
dist["dispose"](item.value);
}
}
}
class lazy_iterator_MapIterator extends lazy_iterator_LazyIterator {
constructor(upstream, transform) {
super();
this.upstream = upstream;
this.transform = transform;
}
summary() {
return `${this.upstream.summary()} -> Map`;
}
async next() {
const item = await this.upstream.next();
if (item.done) {
return { value: null, done: true };
}
const inputTensors = dist["tensor_util"].getTensorsInContainer(item.value);
// Careful: the transform may mutate the item in place.
// That's why we have to remember the input Tensors above, and then
// below dispose only those that were not passed through to the output.
// Note too that the transform function is responsible for tidying
// any intermediate Tensors. Here we are concerned only about the
// inputs.
const mapped = this.transform(item.value);
const outputTensors = dist["tensor_util"].getTensorsInContainer(mapped);
// TODO(soergel) faster intersection
// TODO(soergel) move to tf.disposeExcept(in, out)?
for (const t of inputTensors) {
if (!dist["tensor_util"].isTensorInList(t, outputTensors)) {
t.dispose();
}
}
return { value: mapped, done: false };
}
}
class ErrorHandlingLazyIterator extends lazy_iterator_LazyIterator {
constructor(upstream, handler) {
super();
this.upstream = upstream;
this.handler = handler;
this.count = 0;
this.lastRead = Promise.resolve({ value: null, done: false });
}
summary() {
return `${this.upstream.summary()} -> handleErrors`;
}
async next() {
// This sets this.lastRead to a new Promise right away, as opposed to
// saying `await this.lastRead; this.lastRead = this.serialNext();` which
// would not work because this.nextRead would be updated only after the
// promise resolves.
this.lastRead = this.lastRead.then(() => this.serialNext());
return this.lastRead;
}
async serialNext() {
while (true) {
try {
return await this.upstream.next();
}
catch (e) {
if (!this.handler(e)) {
return { value: null, done: true };
}
// If the handler returns true, loop and fetch the next upstream item.
// If the upstream iterator throws an endless stream of errors, and if
// the handler says to ignore them, then we loop forever here. That is
// the correct behavior-- it's up to the handler to decide when to stop.
}
}
}
}
class lazy_iterator_AsyncMapIterator extends lazy_iterator_LazyIterator {
constructor(upstream, transform) {
super();
this.upstream = upstream;
this.transform = transform;
}
summary() {
return `${this.upstream.summary()} -> AsyncMap`;
}
async next() {
const item = await this.upstream.next();
if (item.done) {
return { value: null, done: true };
}
const inputTensors = dist["tensor_util"].getTensorsInContainer(item.value);
// Careful: the transform may mutate the item in place.
// That's why we have to remember the input Tensors above, and then
// below dispose only those that were not passed through to the output.
// Note too that the transform function is responsible for tidying
// any intermediate Tensors. Here we are concerned only about the
// inputs.
const mapped = await this.transform(item.value);
const outputTensors = dist["tensor_util"].getTensorsInContainer(mapped);
// TODO(soergel) faster intersection
// TODO(soergel) move to tf.disposeExcept(in, out)?
for (const t of inputTensors) {
if (!dist["tensor_util"].isTensorInList(t, outputTensors)) {
t.dispose();
}
}
return { value: mapped, done: false };
}
}
// Iterators that maintain a queue of pending items
// ============================================================================
/**
* A base class for transforming streams that operate by maintaining an
* output queue of elements that are ready to return via next(). This is
* commonly required when the transformation is 1-to-many: A call to next()
* may trigger a call to the underlying stream, which will produce many
* mapped elements of this stream-- of which we need to return only one, so
* we have to queue the rest.
*/
class lazy_iterator_OneToManyIterator extends lazy_iterator_LazyIterator {
constructor() {
super();
this.outputQueue = new growing_ring_buffer_GrowingRingBuffer();
this.lastRead = Promise.resolve({ value: null, done: false });
}
async next() {
// This sets this.lastRead to a new Promise right away, as opposed to
// saying `await this.lastRead; this.lastRead = this.serialNext();` which
// would not work because this.nextRead would be updated only after the
// promise resolves.
this.lastRead = this.lastRead.then(() => this.serialNext());
return this.lastRead;
}
async serialNext() {
// Fetch so that the queue contains at least one item if possible.
// If the upstream source is exhausted, AND there are no items left in
// the output queue, then this stream is also exhausted.
while (this.outputQueue.length() === 0) {
// TODO(soergel): consider parallel reads.
if (!await this.pump()) {
return { value: null, done: true };
}
}
return { value: this.outputQueue.shift(), done: false };
}
}
class lazy_iterator_FlatmapIterator extends lazy_iterator_OneToManyIterator {
constructor(upstream, transform) {
super();
this.upstream = upstream;
this.transform = transform;
}
summary() {
return `${this.upstream.summary()} -> Flatmap`;
}
async pump() {
const item = await this.upstream.next();
if (item.done) {
return false;
}
const inputTensors = dist["tensor_util"].getTensorsInContainer(item.value);
// Careful: the transform may mutate the item in place.
// that's why we have to remember the input Tensors above, and then
// below dispose only those that were not passed through to the output.
// Note too that the transform function is responsible for tidying any
// intermediate Tensors. Here we are concerned only about the inputs.
const mappedArray = this.transform(item.value);
const outputTensors = dist["tensor_util"].getTensorsInContainer(mappedArray);
this.outputQueue.pushAll(mappedArray);
// TODO(soergel) faster intersection, and deduplicate outputTensors
// TODO(soergel) move to tf.disposeExcept(in, out)?
for (const t of inputTensors) {
if (!dist["tensor_util"].isTensorInList(t, outputTensors)) {
t.dispose();
}
}
return true;
}
}
/**
* Provides a `LazyIterator` that concatenates a stream of underlying
* streams.
*
* Doing this in a concurrency-safe way requires some trickery. In
* particular, we want this stream to return the elements from the
* underlying streams in the correct order according to when next() was
* called, even if the resulting Promises resolve in a different order.
*/
class ChainedIterator extends lazy_iterator_LazyIterator {
constructor(iterators, baseErrorHandler) {
super();
this.baseErrorHandler = baseErrorHandler;
// Strict Promise execution order:
// a next() call may not even begin until the previous one completes.
this.lastRead = null;
// Local state that should not be clobbered by out-of-order execution.
this.iterator = null;
this.moreIterators = iterators;
}
summary() {
const upstreamSummaries = 'TODO: fill in upstream of chained summaries';
return `${upstreamSummaries} -> Chained`;
}
async next() {
this.lastRead = this.readFromChain(this.lastRead);
return this.lastRead;
}
async readFromChain(lastRead) {
// Must await on the previous read since the previous read may have advanced
// the stream of streams, from which we need to read.
// This is unfortunate since we can't parallelize reads. Which means
// prefetching of chained streams is a no-op.
// One solution is to prefetch immediately upstream of this.
await lastRead;
if (this.iterator == null) {
const iteratorResult = await this.moreIterators.next();
if (iteratorResult.done) {
// No more streams to stream from.
return { value: null, done: true };
}
this.iterator = iteratorResult.value;
if (this.baseErrorHandler != null) {
this.iterator = this.iterator.handleErrors(this.baseErrorHandler);
}
}
const itemResult = await this.iterator.next();
if (itemResult.done) {
this.iterator = null;
return this.readFromChain(lastRead);
}
return itemResult;
}
}
var ZipMismatchMode;
(function (ZipMismatchMode) {
ZipMismatchMode[ZipMismatchMode["FAIL"] = 0] = "FAIL";
ZipMismatchMode[ZipMismatchMode["SHORTEST"] = 1] = "SHORTEST";
ZipMismatchMode[ZipMismatchMode["LONGEST"] = 2] = "LONGEST"; // use nulls for exhausted streams; use up the longest stream.
})(ZipMismatchMode || (ZipMismatchMode = {}));
/**
* Provides a `LazyIterator` that zips together an array, dict, or nested
* structure of `LazyIterator`s (and perhaps additional constants).
*
* The underlying streams must provide elements in a consistent order such
* that they correspond.
*
* Typically, the underlying streams should have the same number of
* elements. If they do not, the behavior is determined by the
* `mismatchMode` argument.
*
* The nested structure of the `iterators` argument determines the
* structure of elements in the resulting iterator.
*
* Doing this in a concurrency-safe way requires some trickery. In
* particular, we want this stream to return the elements from the
* underlying streams in the correct order according to when next() was
* called, even if the resulting Promises resolve in a different order.
*
* @param iterators: An array or object containing LazyIterators at the
* leaves.
* @param mismatchMode: Determines what to do when one underlying iterator
* is exhausted before the others. `ZipMismatchMode.FAIL` (the default)
* causes an error to be thrown in this case. `ZipMismatchMode.SHORTEST`
* causes the zipped iterator to terminate with the furst underlying
* streams, so elements remaining on the longer streams are ignored.
* `ZipMismatchMode.LONGEST` causes the zipped stream to continue, filling
* in nulls for the exhausted streams, until all streams are exhausted.
*/
class lazy_iterator_ZipIterator extends lazy_iterator_LazyIterator {
constructor(iterators, mismatchMode = ZipMismatchMode.FAIL) {
super();
this.iterators = iterators;
this.mismatchMode = mismatchMode;
this.count = 0;
this.currentPromise = null;
}
summary() {
const upstreamSummaries = 'TODO: fill in upstream of zip summaries';
return `{${upstreamSummaries}} -> Zip`;
}
async nextState(afterState) {
// This chaining ensures that the underlying next() are not even called
// before the previous ones have resolved.
await afterState;
// Collect underlying iterator "done" signals as a side effect in
// getNext()
let numIterators = 0;
let iteratorsDone = 0;
function getNext(container) {
if (container instanceof lazy_iterator_LazyIterator) {
const result = container.next();
return {
value: result.then(x => {
numIterators++;
if (x.done) {
iteratorsDone++;
}
return x.value;
}),
recurse: false
};
}
else {
return { value: null, recurse: true };
}
}
const mapped = await Object(deep_map["c" /* deepMapAndAwaitAll */])(this.iterators, getNext);
if (numIterators === iteratorsDone) {
// The streams have all ended.
return { value: null, done: true };
}
if (iteratorsDone > 0) {
switch (this.mismatchMode) {
case ZipMismatchMode.FAIL:
throw new Error('Zipped streams should have the same length. ' +
`Mismatched at element ${this.count}.`);
case ZipMismatchMode.SHORTEST:
return { value: null, done: true };
case ZipMismatchMode.LONGEST:
default:
// Continue. The exhausted streams already produced value: null.
}
}
this.count++;
return { value: mapped, done: false };
}
async next() {
this.currentPromise = this.nextState(this.currentPromise);
return this.currentPromise;
}
}
// Iterators that maintain a ring buffer of pending promises
// ============================================================================
/**
* A stream that prefetches a given number of items from an upstream source,
* returning them in FIFO order.
*
* Note this prefetches Promises, but makes no guarantees about when those
* Promises resolve.
*/
class lazy_iterator_PrefetchIterator extends lazy_iterator_LazyIterator {
constructor(upstream, bufferSize) {
super();
this.upstream = upstream;
this.bufferSize = bufferSize;
this.buffer = new RingBuffer(bufferSize);
}
summary() {
return `${this.upstream.summary()} -> Prefetch`;
}
/**
* Refill the prefetch buffer. Returns only after the buffer is full, or
* the upstream source is exhausted.
*/
refill() {
while (!this.buffer.isFull()) {
const v = this.upstream.next();
this.buffer.push(v);
}
}
next() {
this.refill();
// This shift will never throw an error because the buffer is always
// full after a refill. If the stream is exhausted, the buffer will be
// full of Promises that will resolve to the end-of-stream signal.
return this.buffer.shift();
}
}
/**
* A stream that performs a sliding-window random shuffle on an upstream
* source. This is like a `PrefetchIterator` except that the items are
* returned in randomized order. Mixing naturally improves as the buffer
* size increases.
*/
class lazy_iterator_ShuffleIterator extends lazy_iterator_PrefetchIterator {
constructor(upstream, windowSize, seed) {
super(upstream, windowSize);
this.upstream = upstream;
this.windowSize = windowSize;
// Local state that should not be clobbered by out-of-order execution.
this.upstreamExhausted = false;
this.random = seedrandom["alea"](seed || dist["util"].now().toString());
this.lastRead = Promise.resolve({ value: null, done: false });
}
async next() {
// This sets this.lastRead to a new Promise right away, as opposed to
// saying `await this.lastRead; this.lastRead = this.serialNext();` which
// would not work because this.nextRead would be updated only after the
// promise resolves.
this.lastRead = this.lastRead.then(() => this.serialNext());
return this.lastRead;
}
randomInt(max) {
return Math.floor(this.random() * max);
}
chooseIndex() {
return this.randomInt(this.buffer.length());
}
async serialNext() {
// TODO(soergel): consider performance
if (!this.upstreamExhausted) {
this.refill();
}
while (!this.buffer.isEmpty()) {
const chosenIndex = this.chooseIndex();
const result = await this.buffer.shuffleExcise(chosenIndex);
if (result.done) {
this.upstreamExhausted = true;
}
else {
this.refill();
return result;
}
}
return { value: null, done: true };
}
}
//# sourceMappingURL=lazy_iterator.js.map
/***/ }),
/* 46 */
/***/ (function(module, exports, __webpack_require__) {
"use strict";
/* WEBPACK VAR INJECTION */(function(global) {
var numeric = ( false)?(undefined):(exports);
if(typeof global !== "undefined") { global.numeric = numeric; }
numeric.version = "1.2.6";
// 1. Utility functions
numeric.bench = function bench (f,interval) {
var t1,t2,n,i;
if(typeof interval === "undefined") { interval = 15; }
n = 0.5;
t1 = new Date();
while(1) {
n*=2;
for(i=n;i>3;i-=4) { f(); f(); f(); f(); }
while(i>0) { f(); i--; }
t2 = new Date();
if(t2-t1 > interval) break;
}
for(i=n;i>3;i-=4) { f(); f(); f(); f(); }
while(i>0) { f(); i--; }
t2 = new Date();
return 1000*(3*n-1)/(t2-t1);
}
numeric._myIndexOf = (function _myIndexOf(w) {
var n = this.length,k;
for(k=0;k<n;++k) if(this[k]===w) return k;
return -1;
});
numeric.myIndexOf = (Array.prototype.indexOf)?Array.prototype.indexOf:numeric._myIndexOf;
numeric.Function = Function;
numeric.precision = 4;
numeric.largeArray = 50;
numeric.prettyPrint = function prettyPrint(x) {
function fmtnum(x) {
if(x === 0) { return '0'; }
if(isNaN(x)) { return 'NaN'; }
if(x<0) { return '-'+fmtnum(-x); }
if(isFinite(x)) {
var scale = Math.floor(Math.log(x) / Math.log(10));
var normalized = x / Math.pow(10,scale);
var basic = normalized.toPrecision(numeric.precision);
if(parseFloat(basic) === 10) { scale++; normalized = 1; basic = normalized.toPrecision(numeric.precision); }
return parseFloat(basic).toString()+'e'+scale.toString();
}
return 'Infinity';
}
var ret = [];
function foo(x) {
var k;
if(typeof x === "undefined") { ret.push(Array(numeric.precision+8).join(' ')); return false; }
if(typeof x === "string") { ret.push('"'+x+'"'); return false; }
if(typeof x === "boolean") { ret.push(x.toString()); return false; }
if(typeof x === "number") {
var a = fmtnum(x);
var b = x.toPrecision(numeric.precision);
var c = parseFloat(x.toString()).toString();
var d = [a,b,c,parseFloat(b).toString(),parseFloat(c).toString()];
for(k=1;k<d.length;k++) { if(d[k].length < a.length) a = d[k]; }
ret.push(Array(numeric.precision+8-a.length).join(' ')+a);
return false;
}
if(x === null) { ret.push("null"); return false; }
if(typeof x === "function") {
ret.push(x.toString());
var flag = false;
for(k in x) { if(x.hasOwnProperty(k)) {
if(flag) ret.push(',\n');
else ret.push('\n{');
flag = true;
ret.push(k);
ret.push(': \n');
foo(x[k]);
} }
if(flag) ret.push('}\n');
return true;
}
if(x instanceof Array) {
if(x.length > numeric.largeArray) { ret.push('...Large Array...'); return true; }
var flag = false;
ret.push('[');
for(k=0;k<x.length;k++) { if(k>0) { ret.push(','); if(flag) ret.push('\n '); } flag = foo(x[k]); }
ret.push(']');
return true;
}
ret.push('{');
var flag = false;
for(k in x) { if(x.hasOwnProperty(k)) { if(flag) ret.push(',\n'); flag = true; ret.push(k); ret.push(': \n'); foo(x[k]); } }
ret.push('}');
return true;
}
foo(x);
return ret.join('');
}
numeric.parseDate = function parseDate(d) {
function foo(d) {
if(typeof d === 'string') { return Date.parse(d.replace(/-/g,'/')); }
if(!(d instanceof Array)) { throw new Error("parseDate: parameter must be arrays of strings"); }
var ret = [],k;
for(k=0;k<d.length;k++) { ret[k] = foo(d[k]); }
return ret;
}
return foo(d);
}
numeric.parseFloat = function parseFloat_(d) {
function foo(d) {
if(typeof d === 'string') { return parseFloat(d); }
if(!(d instanceof Array)) { throw new Error("parseFloat: parameter must be arrays of strings"); }
var ret = [],k;
for(k=0;k<d.length;k++) { ret[k] = foo(d[k]); }
return ret;
}
return foo(d);
}
numeric.parseCSV = function parseCSV(t) {
var foo = t.split('\n');
var j,k;
var ret = [];
var pat = /(([^'",]*)|('[^']*')|("[^"]*")),/g;
var patnum = /^\s*(([+-]?[0-9]+(\.[0-9]*)?(e[+-]?[0-9]+)?)|([+-]?[0-9]*(\.[0-9]+)?(e[+-]?[0-9]+)?))\s*$/;
var stripper = function(n) { return n.substr(0,n.length-1); }
var count = 0;
for(k=0;k<foo.length;k++) {
var bar = (foo[k]+",").match(pat),baz;
if(bar.length>0) {
ret[count] = [];
for(j=0;j<bar.length;j++) {
baz = stripper(bar[j]);
if(patnum.test(baz)) { ret[count][j] = parseFloat(baz); }
else ret[count][j] = baz;
}
count++;
}
}
return ret;
}
numeric.toCSV = function toCSV(A) {
var s = numeric.dim(A);
var i,j,m,n,row,ret;
m = s[0];
n = s[1];
ret = [];
for(i=0;i<m;i++) {
row = [];
for(j=0;j<m;j++) { row[j] = A[i][j].toString(); }
ret[i] = row.join(', ');
}
return ret.join('\n')+'\n';
}
numeric.getURL = function getURL(url) {
var client = new XMLHttpRequest();
client.open("GET",url,false);
client.send();
return client;
}
numeric.imageURL = function imageURL(img) {
function base64(A) {
var n = A.length, i,x,y,z,p,q,r,s;
var key = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/=";
var ret = "";
for(i=0;i<n;i+=3) {
x = A[i];
y = A[i+1];
z = A[i+2];
p = x >> 2;
q = ((x & 3) << 4) + (y >> 4);
r = ((y & 15) << 2) + (z >> 6);
s = z & 63;
if(i+1>=n) { r = s = 64; }
else if(i+2>=n) { s = 64; }
ret += key.charAt(p) + key.charAt(q) + key.charAt(r) + key.charAt(s);
}
return ret;
}
function crc32Array (a,from,to) {
if(typeof from === "undefined") { from = 0; }
if(typeof to === "undefined") { to = a.length; }
var table = [0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3,
0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91,
0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7,
0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5,
0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B,
0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59,
0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F,
0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D,
0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433,
0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01,
0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457,
0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65,
0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB,
0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9,
0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F,
0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD,
0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683,
0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1,
0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7,
0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5,
0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B,
0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79,
0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F,
0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D,
0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713,
0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21,
0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777,
0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45,
0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB,
0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9,
0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF,
0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D];
var crc = -1, y = 0, n = a.length,i;
for (i = from; i < to; i++) {
y = (crc ^ a[i]) & 0xFF;
crc = (crc >>> 8) ^ table[y];
}
return crc ^ (-1);
}
var h = img[0].length, w = img[0][0].length, s1, s2, next,k,length,a,b,i,j,adler32,crc32;
var stream = [
137, 80, 78, 71, 13, 10, 26, 10, // 0: PNG signature
0,0,0,13, // 8: IHDR Chunk length
73, 72, 68, 82, // 12: "IHDR"
(w >> 24) & 255, (w >> 16) & 255, (w >> 8) & 255, w&255, // 16: Width
(h >> 24) & 255, (h >> 16) & 255, (h >> 8) & 255, h&255, // 20: Height
8, // 24: bit depth
2, // 25: RGB
0, // 26: deflate
0, // 27: no filter
0, // 28: no interlace
-1,-2,-3,-4, // 29: CRC
-5,-6,-7,-8, // 33: IDAT Chunk length
73, 68, 65, 84, // 37: "IDAT"
// RFC 1950 header starts here
8, // 41: RFC1950 CMF
29 // 42: RFC1950 FLG
];
crc32 = crc32Array(stream,12,29);
stream[29] = (crc32>>24)&255;
stream[30] = (crc32>>16)&255;
stream[31] = (crc32>>8)&255;
stream[32] = (crc32)&255;
s1 = 1;
s2 = 0;
for(i=0;i<h;i++) {
if(i<h-1) { stream.push(0); }
else { stream.push(1); }
a = (3*w+1+(i===0))&255; b = ((3*w+1+(i===0))>>8)&255;
stream.push(a); stream.push(b);
stream.push((~a)&255); stream.push((~b)&255);
if(i===0) stream.push(0);
for(j=0;j<w;j++) {
for(k=0;k<3;k++) {
a = img[k][i][j];
if(a>255) a = 255;
else if(a<0) a=0;
else a = Math.round(a);
s1 = (s1 + a )%65521;
s2 = (s2 + s1)%65521;
stream.push(a);
}
}
stream.push(0);
}
adler32 = (s2<<16)+s1;
stream.push((adler32>>24)&255);
stream.push((adler32>>16)&255);
stream.push((adler32>>8)&255);
stream.push((adler32)&255);
length = stream.length - 41;
stream[33] = (length>>24)&255;
stream[34] = (length>>16)&255;
stream[35] = (length>>8)&255;
stream[36] = (length)&255;
crc32 = crc32Array(stream,37);
stream.push((crc32>>24)&255);
stream.push((crc32>>16)&255);
stream.push((crc32>>8)&255);
stream.push((crc32)&255);
stream.push(0);
stream.push(0);
stream.push(0);
stream.push(0);
// a = stream.length;
stream.push(73); // I
stream.push(69); // E
stream.push(78); // N
stream.push(68); // D
stream.push(174); // CRC1
stream.push(66); // CRC2
stream.push(96); // CRC3
stream.push(130); // CRC4
return 'data:image/png;base64,'+base64(stream);
}
// 2. Linear algebra with Arrays.
numeric._dim = function _dim(x) {
var ret = [];
while(typeof x === "object") { ret.push(x.length); x = x[0]; }
return ret;
}
numeric.dim = function dim(x) {
var y,z;
if(typeof x === "object") {
y = x[0];
if(typeof y === "object") {
z = y[0];
if(typeof z === "object") {
return numeric._dim(x);
}
return [x.length,y.length];
}
return [x.length];
}
return [];
}
numeric.mapreduce = function mapreduce(body,init) {
return Function('x','accum','_s','_k',
'if(typeof accum === "undefined") accum = '+init+';\n'+
'if(typeof x === "number") { var xi = x; '+body+'; return accum; }\n'+
'if(typeof _s === "undefined") _s = numeric.dim(x);\n'+
'if(typeof _k === "undefined") _k = 0;\n'+
'var _n = _s[_k];\n'+
'var i,xi;\n'+
'if(_k < _s.length-1) {\n'+
' for(i=_n-1;i>=0;i--) {\n'+
' accum = arguments.callee(x[i],accum,_s,_k+1);\n'+
' }'+
' return accum;\n'+
'}\n'+
'for(i=_n-1;i>=1;i-=2) { \n'+
' xi = x[i];\n'+
' '+body+';\n'+
' xi = x[i-1];\n'+
' '+body+';\n'+
'}\n'+
'if(i === 0) {\n'+
' xi = x[i];\n'+
' '+body+'\n'+
'}\n'+
'return accum;'
);
}
numeric.mapreduce2 = function mapreduce2(body,setup) {
return Function('x',
'var n = x.length;\n'+
'var i,xi;\n'+setup+';\n'+
'for(i=n-1;i!==-1;--i) { \n'+
' xi = x[i];\n'+
' '+body+';\n'+
'}\n'+
'return accum;'
);
}
numeric.same = function same(x,y) {
var i,n;
if(!(x instanceof Array) || !(y instanceof Array)) { return false; }
n = x.length;
if(n !== y.length) { return false; }
for(i=0;i<n;i++) {
if(x[i] === y[i]) { continue; }
if(typeof x[i] === "object") { if(!same(x[i],y[i])) return false; }
else { return false; }
}
return true;
}
numeric.rep = function rep(s,v,k) {
if(typeof k === "undefined") { k=0; }
var n = s[k], ret = Array(n), i;
if(k === s.length-1) {
for(i=n-2;i>=0;i-=2) { ret[i+1] = v; ret[i] = v; }
if(i===-1) { ret[0] = v; }
return ret;
}
for(i=n-1;i>=0;i--) { ret[i] = numeric.rep(s,v,k+1); }
return ret;
}
numeric.dotMMsmall = function dotMMsmall(x,y) {
var i,j,k,p,q,r,ret,foo,bar,woo,i0,k0,p0,r0;
p = x.length; q = y.length; r = y[0].length;
ret = Array(p);
for(i=p-1;i>=0;i--) {
foo = Array(r);
bar = x[i];
for(k=r-1;k>=0;k--) {
woo = bar[q-1]*y[q-1][k];
for(j=q-2;j>=1;j-=2) {
i0 = j-1;
woo += bar[j]*y[j][k] + bar[i0]*y[i0][k];
}
if(j===0) { woo += bar[0]*y[0][k]; }
foo[k] = woo;
}
ret[i] = foo;
}
return ret;
}
numeric._getCol = function _getCol(A,j,x) {
var n = A.length, i;
for(i=n-1;i>0;--i) {
x[i] = A[i][j];
--i;
x[i] = A[i][j];
}
if(i===0) x[0] = A[0][j];
}
numeric.dotMMbig = function dotMMbig(x,y){
var gc = numeric._getCol, p = y.length, v = Array(p);
var m = x.length, n = y[0].length, A = new Array(m), xj;
var VV = numeric.dotVV;
var i,j,k,z;
--p;
--m;
for(i=m;i!==-1;--i) A[i] = Array(n);
--n;
for(i=n;i!==-1;--i) {
gc(y,i,v);
for(j=m;j!==-1;--j) {
z=0;
xj = x[j];
A[j][i] = VV(xj,v);
}
}
return A;
}
numeric.dotMV = function dotMV(x,y) {
var p = x.length, q = y.length,i;
var ret = Array(p), dotVV = numeric.dotVV;
for(i=p-1;i>=0;i--) { ret[i] = dotVV(x[i],y); }
return ret;
}
numeric.dotVM = function dotVM(x,y) {
var i,j,k,p,q,r,ret,foo,bar,woo,i0,k0,p0,r0,s1,s2,s3,baz,accum;
p = x.length; q = y[0].length;
ret = Array(q);
for(k=q-1;k>=0;k--) {
woo = x[p-1]*y[p-1][k];
for(j=p-2;j>=1;j-=2) {
i0 = j-1;
woo += x[j]*y[j][k] + x[i0]*y[i0][k];
}
if(j===0) { woo += x[0]*y[0][k]; }
ret[k] = woo;
}
return ret;
}
numeric.dotVV = function dotVV(x,y) {
var i,n=x.length,i1,ret = x[n-1]*y[n-1];
for(i=n-2;i>=1;i-=2) {
i1 = i-1;
ret += x[i]*y[i] + x[i1]*y[i1];
}
if(i===0) { ret += x[0]*y[0]; }
return ret;
}
numeric.dot = function dot(x,y) {
var d = numeric.dim;
switch(d(x).length*1000+d(y).length) {
case 2002:
if(y.length < 10) return numeric.dotMMsmall(x,y);
else return numeric.dotMMbig(x,y);
case 2001: return numeric.dotMV(x,y);
case 1002: return numeric.dotVM(x,y);
case 1001: return numeric.dotVV(x,y);
case 1000: return numeric.mulVS(x,y);
case 1: return numeric.mulSV(x,y);
case 0: return x*y;
default: throw new Error('numeric.dot only works on vectors and matrices');
}
}
numeric.diag = function diag(d) {
var i,i1,j,n = d.length, A = Array(n), Ai;
for(i=n-1;i>=0;i--) {
Ai = Array(n);
i1 = i+2;
for(j=n-1;j>=i1;j-=2) {
Ai[j] = 0;
Ai[j-1] = 0;
}
if(j>i) { Ai[j] = 0; }
Ai[i] = d[i];
for(j=i-1;j>=1;j-=2) {
Ai[j] = 0;
Ai[j-1] = 0;
}
if(j===0) { Ai[0] = 0; }
A[i] = Ai;
}
return A;
}
numeric.getDiag = function(A) {
var n = Math.min(A.length,A[0].length),i,ret = Array(n);
for(i=n-1;i>=1;--i) {
ret[i] = A[i][i];
--i;
ret[i] = A[i][i];
}
if(i===0) {
ret[0] = A[0][0];
}
return ret;
}
numeric.identity = function identity(n) { return numeric.diag(numeric.rep([n],1)); }
numeric.pointwise = function pointwise(params,body,setup) {
if(typeof setup === "undefined") { setup = ""; }
var fun = [];
var k;
var avec = /\[i\]$/,p,thevec = '';
var haveret = false;
for(k=0;k<params.length;k++) {
if(avec.test(params[k])) {
p = params[k].substring(0,params[k].length-3);
thevec = p;
} else { p = params[k]; }
if(p==='ret') haveret = true;
fun.push(p);
}
fun[params.length] = '_s';
fun[params.length+1] = '_k';
fun[params.length+2] = (
'if(typeof _s === "undefined") _s = numeric.dim('+thevec+');\n'+
'if(typeof _k === "undefined") _k = 0;\n'+
'var _n = _s[_k];\n'+
'var i'+(haveret?'':', ret = Array(_n)')+';\n'+
'if(_k < _s.length-1) {\n'+
' for(i=_n-1;i>=0;i--) ret[i] = arguments.callee('+params.join(',')+',_s,_k+1);\n'+
' return ret;\n'+
'}\n'+
setup+'\n'+
'for(i=_n-1;i!==-1;--i) {\n'+
' '+body+'\n'+
'}\n'+
'return ret;'
);
return Function.apply(null,fun);
}
numeric.pointwise2 = function pointwise2(params,body,setup) {
if(typeof setup === "undefined") { setup = ""; }
var fun = [];
var k;
var avec = /\[i\]$/,p,thevec = '';
var haveret = false;
for(k=0;k<params.length;k++) {
if(avec.test(params[k])) {
p = params[k].substring(0,params[k].length-3);
thevec = p;
} else { p = params[k]; }
if(p==='ret') haveret = true;
fun.push(p);
}
fun[params.length] = (
'var _n = '+thevec+'.length;\n'+
'var i'+(haveret?'':', ret = Array(_n)')+';\n'+
setup+'\n'+
'for(i=_n-1;i!==-1;--i) {\n'+
body+'\n'+
'}\n'+
'return ret;'
);
return Function.apply(null,fun);
}
numeric._biforeach = (function _biforeach(x,y,s,k,f) {
if(k === s.length-1) { f(x,y); return; }
var i,n=s[k];
for(i=n-1;i>=0;i--) { _biforeach(typeof x==="object"?x[i]:x,typeof y==="object"?y[i]:y,s,k+1,f); }
});
numeric._biforeach2 = (function _biforeach2(x,y,s,k,f) {
if(k === s.length-1) { return f(x,y); }
var i,n=s[k],ret = Array(n);
for(i=n-1;i>=0;--i) { ret[i] = _biforeach2(typeof x==="object"?x[i]:x,typeof y==="object"?y[i]:y,s,k+1,f); }
return ret;
});
numeric._foreach = (function _foreach(x,s,k,f) {
if(k === s.length-1) { f(x); return; }
var i,n=s[k];
for(i=n-1;i>=0;i--) { _foreach(x[i],s,k+1,f); }
});
numeric._foreach2 = (function _foreach2(x,s,k,f) {
if(k === s.length-1) { return f(x); }
var i,n=s[k], ret = Array(n);
for(i=n-1;i>=0;i--) { ret[i] = _foreach2(x[i],s,k+1,f); }
return ret;
});
/*numeric.anyV = numeric.mapreduce('if(xi) return true;','false');
numeric.allV = numeric.mapreduce('if(!xi) return false;','true');
numeric.any = function(x) { if(typeof x.length === "undefined") return x; return numeric.anyV(x); }
numeric.all = function(x) { if(typeof x.length === "undefined") return x; return numeric.allV(x); }*/
numeric.ops2 = {
add: '+',
sub: '-',
mul: '*',
div: '/',
mod: '%',
and: '&&',
or: '||',
eq: '===',
neq: '!==',
lt: '<',
gt: '>',
leq: '<=',
geq: '>=',
band: '&',
bor: '|',
bxor: '^',
lshift: '<<',
rshift: '>>',
rrshift: '>>>'
};
numeric.opseq = {
addeq: '+=',
subeq: '-=',
muleq: '*=',
diveq: '/=',
modeq: '%=',
lshifteq: '<<=',
rshifteq: '>>=',
rrshifteq: '>>>=',
bandeq: '&=',
boreq: '|=',
bxoreq: '^='
};
numeric.mathfuns = ['abs','acos','asin','atan','ceil','cos',
'exp','floor','log','round','sin','sqrt','tan',
'isNaN','isFinite'];
numeric.mathfuns2 = ['atan2','pow','max','min'];
numeric.ops1 = {
neg: '-',
not: '!',
bnot: '~',
clone: ''
};
numeric.mapreducers = {
any: ['if(xi) return true;','var accum = false;'],
all: ['if(!xi) return false;','var accum = true;'],
sum: ['accum += xi;','var accum = 0;'],
prod: ['accum *= xi;','var accum = 1;'],
norm2Squared: ['accum += xi*xi;','var accum = 0;'],
norminf: ['accum = max(accum,abs(xi));','var accum = 0, max = Math.max, abs = Math.abs;'],
norm1: ['accum += abs(xi)','var accum = 0, abs = Math.abs;'],
sup: ['accum = max(accum,xi);','var accum = -Infinity, max = Math.max;'],
inf: ['accum = min(accum,xi);','var accum = Infinity, min = Math.min;']
};
(function () {
var i,o;
for(i=0;i<numeric.mathfuns2.length;++i) {
o = numeric.mathfuns2[i];
numeric.ops2[o] = o;
}
for(i in numeric.ops2) {
if(numeric.ops2.hasOwnProperty(i)) {
o = numeric.ops2[i];
var code, codeeq, setup = '';
if(numeric.myIndexOf.call(numeric.mathfuns2,i)!==-1) {
setup = 'var '+o+' = Math.'+o+';\n';
code = function(r,x,y) { return r+' = '+o+'('+x+','+y+')'; };
codeeq = function(x,y) { return x+' = '+o+'('+x+','+y+')'; };
} else {
code = function(r,x,y) { return r+' = '+x+' '+o+' '+y; };
if(numeric.opseq.hasOwnProperty(i+'eq')) {
codeeq = function(x,y) { return x+' '+o+'= '+y; };
} else {
codeeq = function(x,y) { return x+' = '+x+' '+o+' '+y; };
}
}
numeric[i+'VV'] = numeric.pointwise2(['x[i]','y[i]'],code('ret[i]','x[i]','y[i]'),setup);
numeric[i+'SV'] = numeric.pointwise2(['x','y[i]'],code('ret[i]','x','y[i]'),setup);
numeric[i+'VS'] = numeric.pointwise2(['x[i]','y'],code('ret[i]','x[i]','y'),setup);
numeric[i] = Function(
'var n = arguments.length, i, x = arguments[0], y;\n'+
'var VV = numeric.'+i+'VV, VS = numeric.'+i+'VS, SV = numeric.'+i+'SV;\n'+
'var dim = numeric.dim;\n'+
'for(i=1;i!==n;++i) { \n'+
' y = arguments[i];\n'+
' if(typeof x === "object") {\n'+
' if(typeof y === "object") x = numeric._biforeach2(x,y,dim(x),0,VV);\n'+
' else x = numeric._biforeach2(x,y,dim(x),0,VS);\n'+
' } else if(typeof y === "object") x = numeric._biforeach2(x,y,dim(y),0,SV);\n'+
' else '+codeeq('x','y')+'\n'+
'}\nreturn x;\n');
numeric[o] = numeric[i];
numeric[i+'eqV'] = numeric.pointwise2(['ret[i]','x[i]'], codeeq('ret[i]','x[i]'),setup);
numeric[i+'eqS'] = numeric.pointwise2(['ret[i]','x'], codeeq('ret[i]','x'),setup);
numeric[i+'eq'] = Function(
'var n = arguments.length, i, x = arguments[0], y;\n'+
'var V = numeric.'+i+'eqV, S = numeric.'+i+'eqS\n'+
'var s = numeric.dim(x);\n'+
'for(i=1;i!==n;++i) { \n'+
' y = arguments[i];\n'+
' if(typeof y === "object") numeric._biforeach(x,y,s,0,V);\n'+
' else numeric._biforeach(x,y,s,0,S);\n'+
'}\nreturn x;\n');
}
}
for(i=0;i<numeric.mathfuns2.length;++i) {
o = numeric.mathfuns2[i];
delete numeric.ops2[o];
}
for(i=0;i<numeric.mathfuns.length;++i) {
o = numeric.mathfuns[i];
numeric.ops1[o] = o;
}
for(i in numeric.ops1) {
if(numeric.ops1.hasOwnProperty(i)) {
setup = '';
o = numeric.ops1[i];
if(numeric.myIndexOf.call(numeric.mathfuns,i)!==-1) {
if(Math.hasOwnProperty(o)) setup = 'var '+o+' = Math.'+o+';\n';
}
numeric[i+'eqV'] = numeric.pointwise2(['ret[i]'],'ret[i] = '+o+'(ret[i]);',setup);
numeric[i+'eq'] = Function('x',
'if(typeof x !== "object") return '+o+'x\n'+
'var i;\n'+
'var V = numeric.'+i+'eqV;\n'+
'var s = numeric.dim(x);\n'+
'numeric._foreach(x,s,0,V);\n'+
'return x;\n');
numeric[i+'V'] = numeric.pointwise2(['x[i]'],'ret[i] = '+o+'(x[i]);',setup);
numeric[i] = Function('x',
'if(typeof x !== "object") return '+o+'(x)\n'+
'var i;\n'+
'var V = numeric.'+i+'V;\n'+
'var s = numeric.dim(x);\n'+
'return numeric._foreach2(x,s,0,V);\n');
}
}
for(i=0;i<numeric.mathfuns.length;++i) {
o = numeric.mathfuns[i];
delete numeric.ops1[o];
}
for(i in numeric.mapreducers) {
if(numeric.mapreducers.hasOwnProperty(i)) {
o = numeric.mapreducers[i];
numeric[i+'V'] = numeric.mapreduce2(o[0],o[1]);
numeric[i] = Function('x','s','k',
o[1]+
'if(typeof x !== "object") {'+
' xi = x;\n'+
o[0]+';\n'+
' return accum;\n'+
'}'+
'if(typeof s === "undefined") s = numeric.dim(x);\n'+
'if(typeof k === "undefined") k = 0;\n'+
'if(k === s.length-1) return numeric.'+i+'V(x);\n'+
'var xi;\n'+
'var n = x.length, i;\n'+
'for(i=n-1;i!==-1;--i) {\n'+
' xi = arguments.callee(x[i]);\n'+
o[0]+';\n'+
'}\n'+
'return accum;\n');
}
}
}());
numeric.truncVV = numeric.pointwise(['x[i]','y[i]'],'ret[i] = round(x[i]/y[i])*y[i];','var round = Math.round;');
numeric.truncVS = numeric.pointwise(['x[i]','y'],'ret[i] = round(x[i]/y)*y;','var round = Math.round;');
numeric.truncSV = numeric.pointwise(['x','y[i]'],'ret[i] = round(x/y[i])*y[i];','var round = Math.round;');
numeric.trunc = function trunc(x,y) {
if(typeof x === "object") {
if(typeof y === "object") return numeric.truncVV(x,y);
return numeric.truncVS(x,y);
}
if (typeof y === "object") return numeric.truncSV(x,y);
return Math.round(x/y)*y;
}
numeric.inv = function inv(x) {
var s = numeric.dim(x), abs = Math.abs, m = s[0], n = s[1];
var A = numeric.clone(x), Ai, Aj;
var I = numeric.identity(m), Ii, Ij;
var i,j,k,x;
for(j=0;j<n;++j) {
var i0 = -1;
var v0 = -1;
for(i=j;i!==m;++i) { k = abs(A[i][j]); if(k>v0) { i0 = i; v0 = k; } }
Aj = A[i0]; A[i0] = A[j]; A[j] = Aj;
Ij = I[i0]; I[i0] = I[j]; I[j] = Ij;
x = Aj[j];
for(k=j;k!==n;++k) Aj[k] /= x;
for(k=n-1;k!==-1;--k) Ij[k] /= x;
for(i=m-1;i!==-1;--i) {
if(i!==j) {
Ai = A[i];
Ii = I[i];
x = Ai[j];
for(k=j+1;k!==n;++k) Ai[k] -= Aj[k]*x;
for(k=n-1;k>0;--k) { Ii[k] -= Ij[k]*x; --k; Ii[k] -= Ij[k]*x; }
if(k===0) Ii[0] -= Ij[0]*x;
}
}
}
return I;
}
numeric.det = function det(x) {
var s = numeric.dim(x);
if(s.length !== 2 || s[0] !== s[1]) { throw new Error('numeric: det() only works on square matrices'); }
var n = s[0], ret = 1,i,j,k,A = numeric.clone(x),Aj,Ai,alpha,temp,k1,k2,k3;
for(j=0;j<n-1;j++) {
k=j;
for(i=j+1;i<n;i++) { if(Math.abs(A[i][j]) > Math.abs(A[k][j])) { k = i; } }
if(k !== j) {
temp = A[k]; A[k] = A[j]; A[j] = temp;
ret *= -1;
}
Aj = A[j];
for(i=j+1;i<n;i++) {
Ai = A[i];
alpha = Ai[j]/Aj[j];
for(k=j+1;k<n-1;k+=2) {
k1 = k+1;
Ai[k] -= Aj[k]*alpha;
Ai[k1] -= Aj[k1]*alpha;
}
if(k!==n) { Ai[k] -= Aj[k]*alpha; }
}
if(Aj[j] === 0) { return 0; }
ret *= Aj[j];
}
return ret*A[j][j];
}
numeric.transpose = function transpose(x) {
var i,j,m = x.length,n = x[0].length, ret=Array(n),A0,A1,Bj;
for(j=0;j<n;j++) ret[j] = Array(m);
for(i=m-1;i>=1;i-=2) {
A1 = x[i];
A0 = x[i-1];
for(j=n-1;j>=1;--j) {
Bj = ret[j]; Bj[i] = A1[j]; Bj[i-1] = A0[j];
--j;
Bj = ret[j]; Bj[i] = A1[j]; Bj[i-1] = A0[j];
}
if(j===0) {
Bj = ret[0]; Bj[i] = A1[0]; Bj[i-1] = A0[0];
}
}
if(i===0) {
A0 = x[0];
for(j=n-1;j>=1;--j) {
ret[j][0] = A0[j];
--j;
ret[j][0] = A0[j];
}
if(j===0) { ret[0][0] = A0[0]; }
}
return ret;
}
numeric.negtranspose = function negtranspose(x) {
var i,j,m = x.length,n = x[0].length, ret=Array(n),A0,A1,Bj;
for(j=0;j<n;j++) ret[j] = Array(m);
for(i=m-1;i>=1;i-=2) {
A1 = x[i];
A0 = x[i-1];
for(j=n-1;j>=1;--j) {
Bj = ret[j]; Bj[i] = -A1[j]; Bj[i-1] = -A0[j];
--j;
Bj = ret[j]; Bj[i] = -A1[j]; Bj[i-1] = -A0[j];
}
if(j===0) {
Bj = ret[0]; Bj[i] = -A1[0]; Bj[i-1] = -A0[0];
}
}
if(i===0) {
A0 = x[0];
for(j=n-1;j>=1;--j) {
ret[j][0] = -A0[j];
--j;
ret[j][0] = -A0[j];
}
if(j===0) { ret[0][0] = -A0[0]; }
}
return ret;
}
numeric._random = function _random(s,k) {
var i,n=s[k],ret=Array(n), rnd;
if(k === s.length-1) {
rnd = Math.random;
for(i=n-1;i>=1;i-=2) {
ret[i] = rnd();
ret[i-1] = rnd();
}
if(i===0) { ret[0] = rnd(); }
return ret;
}
for(i=n-1;i>=0;i--) ret[i] = _random(s,k+1);
return ret;
}
numeric.random = function random(s) { return numeric._random(s,0); }
numeric.norm2 = function norm2(x) { return Math.sqrt(numeric.norm2Squared(x)); }
numeric.linspace = function linspace(a,b,n) {
if(typeof n === "undefined") n = Math.max(Math.round(b-a)+1,1);
if(n<2) { return n===1?[a]:[]; }
var i,ret = Array(n);
n--;
for(i=n;i>=0;i--) { ret[i] = (i*b+(n-i)*a)/n; }
return ret;
}
numeric.getBlock = function getBlock(x,from,to) {
var s = numeric.dim(x);
function foo(x,k) {
var i,a = from[k], n = to[k]-a, ret = Array(n);
if(k === s.length-1) {
for(i=n;i>=0;i--) { ret[i] = x[i+a]; }
return ret;
}
for(i=n;i>=0;i--) { ret[i] = foo(x[i+a],k+1); }
return ret;
}
return foo(x,0);
}
numeric.setBlock = function setBlock(x,from,to,B) {
var s = numeric.dim(x);
function foo(x,y,k) {
var i,a = from[k], n = to[k]-a;
if(k === s.length-1) { for(i=n;i>=0;i--) { x[i+a] = y[i]; } }
for(i=n;i>=0;i--) { foo(x[i+a],y[i],k+1); }
}
foo(x,B,0);
return x;
}
numeric.getRange = function getRange(A,I,J) {
var m = I.length, n = J.length;
var i,j;
var B = Array(m), Bi, AI;
for(i=m-1;i!==-1;--i) {
B[i] = Array(n);
Bi = B[i];
AI = A[I[i]];
for(j=n-1;j!==-1;--j) Bi[j] = AI[J[j]];
}
return B;
}
numeric.blockMatrix = function blockMatrix(X) {
var s = numeric.dim(X);
if(s.length<4) return numeric.blockMatrix([X]);
var m=s[0],n=s[1],M,N,i,j,Xij;
M = 0; N = 0;
for(i=0;i<m;++i) M+=X[i][0].length;
for(j=0;j<n;++j) N+=X[0][j][0].length;
var Z = Array(M);
for(i=0;i<M;++i) Z[i] = Array(N);
var I=0,J,ZI,k,l,Xijk;
for(i=0;i<m;++i) {
J=N;
for(j=n-1;j!==-1;--j) {
Xij = X[i][j];
J -= Xij[0].length;
for(k=Xij.length-1;k!==-1;--k) {
Xijk = Xij[k];
ZI = Z[I+k];
for(l = Xijk.length-1;l!==-1;--l) ZI[J+l] = Xijk[l];
}
}
I += X[i][0].length;
}
return Z;
}
numeric.tensor = function tensor(x,y) {
if(typeof x === "number" || typeof y === "number") return numeric.mul(x,y);
var s1 = numeric.dim(x), s2 = numeric.dim(y);
if(s1.length !== 1 || s2.length !== 1) {
throw new Error('numeric: tensor product is only defined for vectors');
}
var m = s1[0], n = s2[0], A = Array(m), Ai, i,j,xi;
for(i=m-1;i>=0;i--) {
Ai = Array(n);
xi = x[i];
for(j=n-1;j>=3;--j) {
Ai[j] = xi * y[j];
--j;
Ai[j] = xi * y[j];
--j;
Ai[j] = xi * y[j];
--j;
Ai[j] = xi * y[j];
}
while(j>=0) { Ai[j] = xi * y[j]; --j; }
A[i] = Ai;
}
return A;
}
// 3. The Tensor type T
numeric.T = function T(x,y) { this.x = x; this.y = y; }
numeric.t = function t(x,y) { return new numeric.T(x,y); }
numeric.Tbinop = function Tbinop(rr,rc,cr,cc,setup) {
var io = numeric.indexOf;
if(typeof setup !== "string") {
var k;
setup = '';
for(k in numeric) {
if(numeric.hasOwnProperty(k) && (rr.indexOf(k)>=0 || rc.indexOf(k)>=0 || cr.indexOf(k)>=0 || cc.indexOf(k)>=0) && k.length>1) {
setup += 'var '+k+' = numeric.'+k+';\n';
}
}
}
return Function(['y'],
'var x = this;\n'+
'if(!(y instanceof numeric.T)) { y = new numeric.T(y); }\n'+
setup+'\n'+
'if(x.y) {'+
' if(y.y) {'+
' return new numeric.T('+cc+');\n'+
' }\n'+
' return new numeric.T('+cr+');\n'+
'}\n'+
'if(y.y) {\n'+
' return new numeric.T('+rc+');\n'+
'}\n'+
'return new numeric.T('+rr+');\n'
);
}
numeric.T.prototype.add = numeric.Tbinop(
'add(x.x,y.x)',
'add(x.x,y.x),y.y',
'add(x.x,y.x),x.y',
'add(x.x,y.x),add(x.y,y.y)');
numeric.T.prototype.sub = numeric.Tbinop(
'sub(x.x,y.x)',
'sub(x.x,y.x),neg(y.y)',
'sub(x.x,y.x),x.y',
'sub(x.x,y.x),sub(x.y,y.y)');
numeric.T.prototype.mul = numeric.Tbinop(
'mul(x.x,y.x)',
'mul(x.x,y.x),mul(x.x,y.y)',
'mul(x.x,y.x),mul(x.y,y.x)',
'sub(mul(x.x,y.x),mul(x.y,y.y)),add(mul(x.x,y.y),mul(x.y,y.x))');
numeric.T.prototype.reciprocal = function reciprocal() {
var mul = numeric.mul, div = numeric.div;
if(this.y) {
var d = numeric.add(mul(this.x,this.x),mul(this.y,this.y));
return new numeric.T(div(this.x,d),div(numeric.neg(this.y),d));
}
return new T(div(1,this.x));
}
numeric.T.prototype.div = function div(y) {
if(!(y instanceof numeric.T)) y = new numeric.T(y);
if(y.y) { return this.mul(y.reciprocal()); }
var div = numeric.div;
if(this.y) { return new numeric.T(div(this.x,y.x),div(this.y,y.x)); }
return new numeric.T(div(this.x,y.x));
}
numeric.T.prototype.dot = numeric.Tbinop(
'dot(x.x,y.x)',
'dot(x.x,y.x),dot(x.x,y.y)',
'dot(x.x,y.x),dot(x.y,y.x)',
'sub(dot(x.x,y.x),dot(x.y,y.y)),add(dot(x.x,y.y),dot(x.y,y.x))'
);
numeric.T.prototype.transpose = function transpose() {
var t = numeric.transpose, x = this.x, y = this.y;
if(y) { return new numeric.T(t(x),t(y)); }
return new numeric.T(t(x));
}
numeric.T.prototype.transjugate = function transjugate() {
var t = numeric.transpose, x = this.x, y = this.y;
if(y) { return new numeric.T(t(x),numeric.negtranspose(y)); }
return new numeric.T(t(x));
}
numeric.Tunop = function Tunop(r,c,s) {
if(typeof s !== "string") { s = ''; }
return Function(
'var x = this;\n'+
s+'\n'+
'if(x.y) {'+
' '+c+';\n'+
'}\n'+
r+';\n'
);
}
numeric.T.prototype.exp = numeric.Tunop(
'return new numeric.T(ex)',
'return new numeric.T(mul(cos(x.y),ex),mul(sin(x.y),ex))',
'var ex = numeric.exp(x.x), cos = numeric.cos, sin = numeric.sin, mul = numeric.mul;');
numeric.T.prototype.conj = numeric.Tunop(
'return new numeric.T(x.x);',
'return new numeric.T(x.x,numeric.neg(x.y));');
numeric.T.prototype.neg = numeric.Tunop(
'return new numeric.T(neg(x.x));',
'return new numeric.T(neg(x.x),neg(x.y));',
'var neg = numeric.neg;');
numeric.T.prototype.sin = numeric.Tunop(
'return new numeric.T(numeric.sin(x.x))',
'return x.exp().sub(x.neg().exp()).div(new numeric.T(0,2));');
numeric.T.prototype.cos = numeric.Tunop(
'return new numeric.T(numeric.cos(x.x))',
'return x.exp().add(x.neg().exp()).div(2);');
numeric.T.prototype.abs = numeric.Tunop(
'return new numeric.T(numeric.abs(x.x));',
'return new numeric.T(numeric.sqrt(numeric.add(mul(x.x,x.x),mul(x.y,x.y))));',
'var mul = numeric.mul;');
numeric.T.prototype.log = numeric.Tunop(
'return new numeric.T(numeric.log(x.x));',
'var theta = new numeric.T(numeric.atan2(x.y,x.x)), r = x.abs();\n'+
'return new numeric.T(numeric.log(r.x),theta.x);');
numeric.T.prototype.norm2 = numeric.Tunop(
'return numeric.norm2(x.x);',
'var f = numeric.norm2Squared;\n'+
'return Math.sqrt(f(x.x)+f(x.y));');
numeric.T.prototype.inv = function inv() {
var A = this;
if(typeof A.y === "undefined") { return new numeric.T(numeric.inv(A.x)); }
var n = A.x.length, i, j, k;
var Rx = numeric.identity(n),Ry = numeric.rep([n,n],0);
var Ax = numeric.clone(A.x), Ay = numeric.clone(A.y);
var Aix, Aiy, Ajx, Ajy, Rix, Riy, Rjx, Rjy;
var i,j,k,d,d1,ax,ay,bx,by,temp;
for(i=0;i<n;i++) {
ax = Ax[i][i]; ay = Ay[i][i];
d = ax*ax+ay*ay;
k = i;
for(j=i+1;j<n;j++) {
ax = Ax[j][i]; ay = Ay[j][i];
d1 = ax*ax+ay*ay;
if(d1 > d) { k=j; d = d1; }
}
if(k!==i) {
temp = Ax[i]; Ax[i] = Ax[k]; Ax[k] = temp;
temp = Ay[i]; Ay[i] = Ay[k]; Ay[k] = temp;
temp = Rx[i]; Rx[i] = Rx[k]; Rx[k] = temp;
temp = Ry[i]; Ry[i] = Ry[k]; Ry[k] = temp;
}
Aix = Ax[i]; Aiy = Ay[i];
Rix = Rx[i]; Riy = Ry[i];
ax = Aix[i]; ay = Aiy[i];
for(j=i+1;j<n;j++) {
bx = Aix[j]; by = Aiy[j];
Aix[j] = (bx*ax+by*ay)/d;
Aiy[j] = (by*ax-bx*ay)/d;
}
for(j=0;j<n;j++) {
bx = Rix[j]; by = Riy[j];
Rix[j] = (bx*ax+by*ay)/d;
Riy[j] = (by*ax-bx*ay)/d;
}
for(j=i+1;j<n;j++) {
Ajx = Ax[j]; Ajy = Ay[j];
Rjx = Rx[j]; Rjy = Ry[j];
ax = Ajx[i]; ay = Ajy[i];
for(k=i+1;k<n;k++) {
bx = Aix[k]; by = Aiy[k];
Ajx[k] -= bx*ax-by*ay;
Ajy[k] -= by*ax+bx*ay;
}
for(k=0;k<n;k++) {
bx = Rix[k]; by = Riy[k];
Rjx[k] -= bx*ax-by*ay;
Rjy[k] -= by*ax+bx*ay;
}
}
}
for(i=n-1;i>0;i--) {
Rix = Rx[i]; Riy = Ry[i];
for(j=i-1;j>=0;j--) {
Rjx = Rx[j]; Rjy = Ry[j];
ax = Ax[j][i]; ay = Ay[j][i];
for(k=n-1;k>=0;k--) {
bx = Rix[k]; by = Riy[k];
Rjx[k] -= ax*bx - ay*by;
Rjy[k] -= ax*by + ay*bx;
}
}
}
return new numeric.T(Rx,Ry);
}
numeric.T.prototype.get = function get(i) {
var x = this.x, y = this.y, k = 0, ik, n = i.length;
if(y) {
while(k<n) {
ik = i[k];
x = x[ik];
y = y[ik];
k++;
}
return new numeric.T(x,y);
}
while(k<n) {
ik = i[k];
x = x[ik];
k++;
}
return new numeric.T(x);
}
numeric.T.prototype.set = function set(i,v) {
var x = this.x, y = this.y, k = 0, ik, n = i.length, vx = v.x, vy = v.y;
if(n===0) {
if(vy) { this.y = vy; }
else if(y) { this.y = undefined; }
this.x = x;
return this;
}
if(vy) {
if(y) { /* ok */ }
else {
y = numeric.rep(numeric.dim(x),0);
this.y = y;
}
while(k<n-1) {
ik = i[k];
x = x[ik];
y = y[ik];
k++;
}
ik = i[k];
x[ik] = vx;
y[ik] = vy;
return this;
}
if(y) {
while(k<n-1) {
ik = i[k];
x = x[ik];
y = y[ik];
k++;
}
ik = i[k];
x[ik] = vx;
if(vx instanceof Array) y[ik] = numeric.rep(numeric.dim(vx),0);
else y[ik] = 0;
return this;
}
while(k<n-1) {
ik = i[k];
x = x[ik];
k++;
}
ik = i[k];
x[ik] = vx;
return this;
}
numeric.T.prototype.getRows = function getRows(i0,i1) {
var n = i1-i0+1, j;
var rx = Array(n), ry, x = this.x, y = this.y;
for(j=i0;j<=i1;j++) { rx[j-i0] = x[j]; }
if(y) {
ry = Array(n);
for(j=i0;j<=i1;j++) { ry[j-i0] = y[j]; }
return new numeric.T(rx,ry);
}
return new numeric.T(rx);
}
numeric.T.prototype.setRows = function setRows(i0,i1,A) {
var j;
var rx = this.x, ry = this.y, x = A.x, y = A.y;
for(j=i0;j<=i1;j++) { rx[j] = x[j-i0]; }
if(y) {
if(!ry) { ry = numeric.rep(numeric.dim(rx),0); this.y = ry; }
for(j=i0;j<=i1;j++) { ry[j] = y[j-i0]; }
} else if(ry) {
for(j=i0;j<=i1;j++) { ry[j] = numeric.rep([x[j-i0].length],0); }
}
return this;
}
numeric.T.prototype.getRow = function getRow(k) {
var x = this.x, y = this.y;
if(y) { return new numeric.T(x[k],y[k]); }
return new numeric.T(x[k]);
}
numeric.T.prototype.setRow = function setRow(i,
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment