Created
December 8, 2023 19:46
-
-
Save plmok61/8f390264e46341e20741d78a8531aa15 to your computer and use it in GitHub Desktop.
TS Bundle
This file has been truncated, but you can view the full file.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/******/ (() => { // webpackBootstrap | |
/******/ var __webpack_modules__ = ({ | |
/***/ "./src/App.tsx": | |
/*!*********************!*\ | |
!*** ./src/App.tsx ***! | |
\*********************/ | |
/***/ ((module, __webpack_exports__, __webpack_require__) => { | |
"use strict"; | |
__webpack_require__.r(__webpack_exports__); | |
/* harmony export */ __webpack_require__.d(__webpack_exports__, { | |
/* harmony export */ "default": () => (__WEBPACK_DEFAULT_EXPORT__) | |
/* harmony export */ }); | |
/* harmony import */ var react__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! react */ "./node_modules/react/index.js"); | |
/* harmony import */ var react__WEBPACK_IMPORTED_MODULE_0___default = /*#__PURE__*/__webpack_require__.n(react__WEBPACK_IMPORTED_MODULE_0__); | |
/* harmony import */ var _vladmandic_human__WEBPACK_IMPORTED_MODULE_5__ = __webpack_require__(/*! @vladmandic/human */ "./node_modules/@vladmandic/human/dist/human.esm.js"); | |
/* harmony import */ var _config__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! ./config */ "./src/config.ts"); | |
/* harmony import */ var _setVideoSource__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(/*! ./setVideoSource */ "./src/setVideoSource.ts"); | |
/* harmony import */ var _App_css__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(/*! ./App.css */ "./src/App.css"); | |
/* harmony import */ var react_jsx_dev_runtime__WEBPACK_IMPORTED_MODULE_4__ = __webpack_require__(/*! react/jsx-dev-runtime */ "./node_modules/react/jsx-dev-runtime.js"); | |
/* provided dependency */ var __react_refresh_utils__ = __webpack_require__(/*! ./node_modules/@pmmmwh/react-refresh-webpack-plugin/lib/runtime/RefreshUtils.js */ "./node_modules/@pmmmwh/react-refresh-webpack-plugin/lib/runtime/RefreshUtils.js"); | |
__webpack_require__.$Refresh$.runtime = __webpack_require__(/*! ./node_modules/react-refresh/runtime.js */ "./node_modules/react-refresh/runtime.js"); | |
var _jsxFileName = "/Users/philipmok/Desktop/test-human-ts/src/App.tsx", | |
_s = __webpack_require__.$Refresh$.signature(); | |
function App() { | |
_s(); | |
const videoRef = (0,react__WEBPACK_IMPORTED_MODULE_0__.useRef)(null); | |
const [videoElReady, setVideoElReady] = (0,react__WEBPACK_IMPORTED_MODULE_0__.useState)(false); | |
const [human, setHuman] = (0,react__WEBPACK_IMPORTED_MODULE_0__.useState)(null); | |
const [loading, setLoading] = (0,react__WEBPACK_IMPORTED_MODULE_0__.useState)(true); | |
const [faces, setFaces] = (0,react__WEBPACK_IMPORTED_MODULE_0__.useState)([]); | |
const [faceDetected, setFaceDetected] = (0,react__WEBPACK_IMPORTED_MODULE_0__.useState)(false); | |
const [face, setFace] = (0,react__WEBPACK_IMPORTED_MODULE_0__.useState)(null); | |
const [progress, setProgress] = (0,react__WEBPACK_IMPORTED_MODULE_0__.useState)(0); | |
const [status, setStatus] = (0,react__WEBPACK_IMPORTED_MODULE_0__.useState)('complete'); | |
const [error, setError] = (0,react__WEBPACK_IMPORTED_MODULE_0__.useState)(null); | |
const [averageAge, setAverageAge] = (0,react__WEBPACK_IMPORTED_MODULE_0__.useState)(null); | |
const [recursiveDetect, setRecursiveDetect] = (0,react__WEBPACK_IMPORTED_MODULE_0__.useState)(false); | |
const [detectOnceFace, setDetectOnceFace] = (0,react__WEBPACK_IMPORTED_MODULE_0__.useState)(false); | |
const handleLoadedMetadata = (0,react__WEBPACK_IMPORTED_MODULE_0__.useCallback)(() => { | |
setVideoElReady(true); | |
}, [setVideoElReady]); | |
(0,react__WEBPACK_IMPORTED_MODULE_0__.useEffect)(() => { | |
if (human) { | |
return; | |
} | |
const init = async () => { | |
const h = new _vladmandic_human__WEBPACK_IMPORTED_MODULE_5__["default"](_config__WEBPACK_IMPORTED_MODULE_1__["default"]); | |
console.log('load start'); | |
await h.load(); | |
console.log('warmup start'); | |
await h.warmup(); | |
console.log('done initializing'); | |
console.log(h.config); | |
console.log(h.env); | |
const models = h.models.list(); | |
console.log(models); | |
setHuman(h); | |
}; | |
init(); | |
}, [human]); | |
(0,react__WEBPACK_IMPORTED_MODULE_0__.useEffect)(() => { | |
if (!videoElReady || !human) { | |
return; | |
} | |
const videoEl = videoRef.current; | |
if (videoEl === null) { | |
return; | |
} | |
if (recursiveDetect) { | |
const recursive = async () => { | |
console.log('Starting recursive detect'); | |
setStatus('in progress'); | |
let count = 0; | |
let facesArray = []; | |
setError(null); | |
setFaces([]); | |
setFace(null); | |
setFaceDetected(false); | |
const detect = async () => { | |
count += 1; | |
if (count >= 100) { | |
setFaces(facesArray); | |
setStatus('complete'); | |
return; | |
} | |
try { | |
const res = await human.detect(videoEl); | |
if (res !== null && res !== void 0 && res.error) { | |
console.error(res.error); | |
setFaceDetected(false); | |
setTimeout(detect, 25); | |
setError(res.error); | |
return; | |
} | |
console.log('face response', res); | |
if (!res.face.length || res.face[0].faceScore < 1) { | |
console.warn('No face detected'); | |
setFaceDetected(false); | |
setTimeout(detect, 25); | |
return; | |
} | |
const face = res.face[0]; | |
if (face) { | |
setFaceDetected(true); | |
facesArray.push(face); | |
setFace(face); | |
setProgress(count); | |
setTimeout(detect, 25); | |
} | |
} catch (error) { | |
console.error(error); | |
setFaceDetected(false); | |
} | |
}; | |
detect(); | |
}; | |
recursive(); | |
} | |
}, [videoElReady, human, videoRef, recursiveDetect]); | |
(0,react__WEBPACK_IMPORTED_MODULE_0__.useEffect)(() => { | |
const awaitSetVideoSource = async () => { | |
if (!videoRef.current) { | |
return; | |
} | |
await (0,_setVideoSource__WEBPACK_IMPORTED_MODULE_2__["default"])(videoRef.current, 'user', err => { | |
if (err.name === 'NotAllowedError') { | |
console.warn('Camera permission denied.', { | |
error: err.message | |
}); | |
return; | |
} | |
console.error('Error setting video source', err); | |
}); | |
setLoading(false); | |
}; | |
awaitSetVideoSource(); | |
}, []); | |
(0,react__WEBPACK_IMPORTED_MODULE_0__.useEffect)(() => { | |
if (!faces.length) { | |
return; | |
} | |
const averageAge = faces.reduce((acc, face) => { | |
if (!face.age) { | |
return acc; | |
} | |
return acc + face.age; | |
}, 0) / faces.length; | |
setAverageAge(averageAge); | |
}, [faces]); | |
const detectOnce = (0,react__WEBPACK_IMPORTED_MODULE_0__.useCallback)(async () => { | |
if (!videoRef.current) return; | |
if (!human) return; | |
setRecursiveDetect(false); | |
const videoEl = videoRef.current; | |
const res = await human.detect(videoEl); | |
console.log('Memory: ', human.tf.engine().memory()); | |
console.log('TensorFlow Flags: ', human.tf.ENV.flags); | |
console.log('Performance: ', res.performance); | |
console.log('Faces: ', res.face); | |
if (!res.face.length || res.face[0].faceScore < 1) { | |
console.warn('No face detected'); | |
return setDetectOnceFace(false); | |
} | |
setDetectOnceFace(true); | |
}, [human]); | |
return /*#__PURE__*/(0,react_jsx_dev_runtime__WEBPACK_IMPORTED_MODULE_4__.jsxDEV)("div", { | |
className: "App", | |
children: [/*#__PURE__*/(0,react_jsx_dev_runtime__WEBPACK_IMPORTED_MODULE_4__.jsxDEV)("div", { | |
style: { | |
width: '50%' | |
}, | |
children: /*#__PURE__*/(0,react_jsx_dev_runtime__WEBPACK_IMPORTED_MODULE_4__.jsxDEV)("video", { | |
ref: videoRef, | |
"data-testid": "age-prediction-video", | |
style: { | |
transform: 'rotateY(180deg)', | |
objectFit: 'cover', | |
width: '100%', | |
height: '100%' | |
}, | |
onLoadedMetadata: handleLoadedMetadata, | |
playsInline: true, | |
autoPlay: true, | |
muted: true | |
}, void 0, false, { | |
fileName: _jsxFileName, | |
lineNumber: 163, | |
columnNumber: 9 | |
}, this) | |
}, void 0, false, { | |
fileName: _jsxFileName, | |
lineNumber: 162, | |
columnNumber: 7 | |
}, this), loading && /*#__PURE__*/(0,react_jsx_dev_runtime__WEBPACK_IMPORTED_MODULE_4__.jsxDEV)("p", { | |
children: "Setting video source" | |
}, void 0, false, { | |
fileName: _jsxFileName, | |
lineNumber: 178, | |
columnNumber: 19 | |
}, this), /*#__PURE__*/(0,react_jsx_dev_runtime__WEBPACK_IMPORTED_MODULE_4__.jsxDEV)("p", { | |
children: "Typescript" | |
}, void 0, false, { | |
fileName: _jsxFileName, | |
lineNumber: 179, | |
columnNumber: 7 | |
}, this), human ? /*#__PURE__*/(0,react_jsx_dev_runtime__WEBPACK_IMPORTED_MODULE_4__.jsxDEV)(react_jsx_dev_runtime__WEBPACK_IMPORTED_MODULE_4__.Fragment, { | |
children: [/*#__PURE__*/(0,react_jsx_dev_runtime__WEBPACK_IMPORTED_MODULE_4__.jsxDEV)("div", { | |
style: { | |
border: '1px solid black', | |
padding: '1rem', | |
margin: '1rem' | |
}, | |
children: [/*#__PURE__*/(0,react_jsx_dev_runtime__WEBPACK_IMPORTED_MODULE_4__.jsxDEV)("button", { | |
onClick: () => setRecursiveDetect(prev => !prev), | |
disabled: status === 'in progress', | |
children: `Recursive Detect ${recursiveDetect ? 'On' : 'Off'}` | |
}, void 0, false, { | |
fileName: _jsxFileName, | |
lineNumber: 190, | |
columnNumber: 13 | |
}, this), /*#__PURE__*/(0,react_jsx_dev_runtime__WEBPACK_IMPORTED_MODULE_4__.jsxDEV)("div", { | |
children: [/*#__PURE__*/(0,react_jsx_dev_runtime__WEBPACK_IMPORTED_MODULE_4__.jsxDEV)("p", { | |
children: ["Face Detected: ", faceDetected ? 'Yes' : 'No'] | |
}, void 0, true, { | |
fileName: _jsxFileName, | |
lineNumber: 198, | |
columnNumber: 15 | |
}, this), /*#__PURE__*/(0,react_jsx_dev_runtime__WEBPACK_IMPORTED_MODULE_4__.jsxDEV)("p", { | |
children: ["Faces: ", progress, " / 100"] | |
}, void 0, true, { | |
fileName: _jsxFileName, | |
lineNumber: 199, | |
columnNumber: 15 | |
}, this), /*#__PURE__*/(0,react_jsx_dev_runtime__WEBPACK_IMPORTED_MODULE_4__.jsxDEV)("p", { | |
children: ["Error: ", error ? error.message : 'none'] | |
}, void 0, true, { | |
fileName: _jsxFileName, | |
lineNumber: 200, | |
columnNumber: 15 | |
}, this), face && /*#__PURE__*/(0,react_jsx_dev_runtime__WEBPACK_IMPORTED_MODULE_4__.jsxDEV)(react_jsx_dev_runtime__WEBPACK_IMPORTED_MODULE_4__.Fragment, { | |
children: [/*#__PURE__*/(0,react_jsx_dev_runtime__WEBPACK_IMPORTED_MODULE_4__.jsxDEV)("h3", { | |
children: "Most Recent Face" | |
}, void 0, false, { | |
fileName: _jsxFileName, | |
lineNumber: 203, | |
columnNumber: 19 | |
}, this), /*#__PURE__*/(0,react_jsx_dev_runtime__WEBPACK_IMPORTED_MODULE_4__.jsxDEV)("p", { | |
children: ["Age: ", face.age || 'null'] | |
}, void 0, true, { | |
fileName: _jsxFileName, | |
lineNumber: 204, | |
columnNumber: 19 | |
}, this), /*#__PURE__*/(0,react_jsx_dev_runtime__WEBPACK_IMPORTED_MODULE_4__.jsxDEV)("p", { | |
children: ["Live Score: ", face.live || 'null'] | |
}, void 0, true, { | |
fileName: _jsxFileName, | |
lineNumber: 205, | |
columnNumber: 19 | |
}, this), /*#__PURE__*/(0,react_jsx_dev_runtime__WEBPACK_IMPORTED_MODULE_4__.jsxDEV)("p", { | |
children: ["Real Score: ", face.real || 'null'] | |
}, void 0, true, { | |
fileName: _jsxFileName, | |
lineNumber: 206, | |
columnNumber: 19 | |
}, this)] | |
}, void 0, true), averageAge && /*#__PURE__*/(0,react_jsx_dev_runtime__WEBPACK_IMPORTED_MODULE_4__.jsxDEV)("p", { | |
children: ["Average Age: ", averageAge] | |
}, void 0, true, { | |
fileName: _jsxFileName, | |
lineNumber: 210, | |
columnNumber: 17 | |
}, this)] | |
}, void 0, true, { | |
fileName: _jsxFileName, | |
lineNumber: 197, | |
columnNumber: 13 | |
}, this)] | |
}, void 0, true, { | |
fileName: _jsxFileName, | |
lineNumber: 183, | |
columnNumber: 11 | |
}, this), /*#__PURE__*/(0,react_jsx_dev_runtime__WEBPACK_IMPORTED_MODULE_4__.jsxDEV)("div", { | |
style: { | |
border: '1px solid black', | |
padding: '1rem', | |
margin: '1rem' | |
}, | |
children: [/*#__PURE__*/(0,react_jsx_dev_runtime__WEBPACK_IMPORTED_MODULE_4__.jsxDEV)("button", { | |
onClick: detectOnce, | |
children: "Detect Once" | |
}, void 0, false, { | |
fileName: _jsxFileName, | |
lineNumber: 222, | |
columnNumber: 13 | |
}, this), /*#__PURE__*/(0,react_jsx_dev_runtime__WEBPACK_IMPORTED_MODULE_4__.jsxDEV)("p", { | |
children: ["Face Detected: ", detectOnceFace ? 'Yes' : 'No'] | |
}, void 0, true, { | |
fileName: _jsxFileName, | |
lineNumber: 227, | |
columnNumber: 13 | |
}, this)] | |
}, void 0, true, { | |
fileName: _jsxFileName, | |
lineNumber: 215, | |
columnNumber: 11 | |
}, this), /*#__PURE__*/(0,react_jsx_dev_runtime__WEBPACK_IMPORTED_MODULE_4__.jsxDEV)("button", { | |
onClick: () => setHuman(null), | |
children: "Re-init" | |
}, void 0, false, { | |
fileName: _jsxFileName, | |
lineNumber: 230, | |
columnNumber: 11 | |
}, this)] | |
}, void 0, true) : /*#__PURE__*/(0,react_jsx_dev_runtime__WEBPACK_IMPORTED_MODULE_4__.jsxDEV)("p", { | |
children: "Initializing..." | |
}, void 0, false, { | |
fileName: _jsxFileName, | |
lineNumber: 236, | |
columnNumber: 12 | |
}, this)] | |
}, void 0, true, { | |
fileName: _jsxFileName, | |
lineNumber: 161, | |
columnNumber: 5 | |
}, this); | |
} | |
_s(App, "10rxijvImZlPjjQhHkwdj/tJS8c="); | |
_c = App; | |
/* harmony default export */ const __WEBPACK_DEFAULT_EXPORT__ = (App); | |
var _c; | |
__webpack_require__.$Refresh$.register(_c, "App"); | |
const $ReactRefreshModuleId$ = __webpack_require__.$Refresh$.moduleId; | |
const $ReactRefreshCurrentExports$ = __react_refresh_utils__.getModuleExports( | |
$ReactRefreshModuleId$ | |
); | |
function $ReactRefreshModuleRuntime$(exports) { | |
if (true) { | |
let errorOverlay; | |
if (true) { | |
errorOverlay = false; | |
} | |
let testMode; | |
if (typeof __react_refresh_test__ !== 'undefined') { | |
testMode = __react_refresh_test__; | |
} | |
return __react_refresh_utils__.executeRuntime( | |
exports, | |
$ReactRefreshModuleId$, | |
module.hot, | |
errorOverlay, | |
testMode | |
); | |
} | |
} | |
if (typeof Promise !== 'undefined' && $ReactRefreshCurrentExports$ instanceof Promise) { | |
$ReactRefreshCurrentExports$.then($ReactRefreshModuleRuntime$); | |
} else { | |
$ReactRefreshModuleRuntime$($ReactRefreshCurrentExports$); | |
} | |
/***/ }), | |
/***/ "./src/config.ts": | |
/*!***********************!*\ | |
!*** ./src/config.ts ***! | |
\***********************/ | |
/***/ ((module, __webpack_exports__, __webpack_require__) => { | |
"use strict"; | |
__webpack_require__.r(__webpack_exports__); | |
/* harmony export */ __webpack_require__.d(__webpack_exports__, { | |
/* harmony export */ "default": () => (__WEBPACK_DEFAULT_EXPORT__) | |
/* harmony export */ }); | |
/* provided dependency */ var __react_refresh_utils__ = __webpack_require__(/*! ./node_modules/@pmmmwh/react-refresh-webpack-plugin/lib/runtime/RefreshUtils.js */ "./node_modules/@pmmmwh/react-refresh-webpack-plugin/lib/runtime/RefreshUtils.js"); | |
__webpack_require__.$Refresh$.runtime = __webpack_require__(/*! ./node_modules/react-refresh/runtime.js */ "./node_modules/react-refresh/runtime.js"); | |
const humanConfig = { | |
debug: true, | |
gesture: { | |
enabled: false | |
}, | |
hand: { | |
enabled: false | |
}, | |
body: { | |
enabled: false | |
}, | |
segmentation: { | |
enabled: false | |
}, | |
face: { | |
antispoof: { | |
enabled: true | |
}, | |
liveness: { | |
enabled: true | |
}, | |
iris: { | |
enabled: true | |
}, | |
emotion: { | |
enabled: true | |
}, | |
mesh: { | |
enabled: true | |
}, | |
detector: { | |
rotation: true | |
} | |
} | |
}; | |
/* harmony default export */ const __WEBPACK_DEFAULT_EXPORT__ = (humanConfig); | |
const $ReactRefreshModuleId$ = __webpack_require__.$Refresh$.moduleId; | |
const $ReactRefreshCurrentExports$ = __react_refresh_utils__.getModuleExports( | |
$ReactRefreshModuleId$ | |
); | |
function $ReactRefreshModuleRuntime$(exports) { | |
if (true) { | |
let errorOverlay; | |
if (true) { | |
errorOverlay = false; | |
} | |
let testMode; | |
if (typeof __react_refresh_test__ !== 'undefined') { | |
testMode = __react_refresh_test__; | |
} | |
return __react_refresh_utils__.executeRuntime( | |
exports, | |
$ReactRefreshModuleId$, | |
module.hot, | |
errorOverlay, | |
testMode | |
); | |
} | |
} | |
if (typeof Promise !== 'undefined' && $ReactRefreshCurrentExports$ instanceof Promise) { | |
$ReactRefreshCurrentExports$.then($ReactRefreshModuleRuntime$); | |
} else { | |
$ReactRefreshModuleRuntime$($ReactRefreshCurrentExports$); | |
} | |
/***/ }), | |
/***/ "./src/index.tsx": | |
/*!***********************!*\ | |
!*** ./src/index.tsx ***! | |
\***********************/ | |
/***/ ((module, __webpack_exports__, __webpack_require__) => { | |
"use strict"; | |
__webpack_require__.r(__webpack_exports__); | |
/* harmony import */ var react__WEBPACK_IMPORTED_MODULE_0__ = __webpack_require__(/*! react */ "./node_modules/react/index.js"); | |
/* harmony import */ var react__WEBPACK_IMPORTED_MODULE_0___default = /*#__PURE__*/__webpack_require__.n(react__WEBPACK_IMPORTED_MODULE_0__); | |
/* harmony import */ var react_dom_client__WEBPACK_IMPORTED_MODULE_1__ = __webpack_require__(/*! react-dom/client */ "./node_modules/react-dom/client.js"); | |
/* harmony import */ var _index_css__WEBPACK_IMPORTED_MODULE_2__ = __webpack_require__(/*! ./index.css */ "./src/index.css"); | |
/* harmony import */ var _App__WEBPACK_IMPORTED_MODULE_3__ = __webpack_require__(/*! ./App */ "./src/App.tsx"); | |
/* harmony import */ var _reportWebVitals__WEBPACK_IMPORTED_MODULE_4__ = __webpack_require__(/*! ./reportWebVitals */ "./src/reportWebVitals.ts"); | |
/* harmony import */ var react_jsx_dev_runtime__WEBPACK_IMPORTED_MODULE_5__ = __webpack_require__(/*! react/jsx-dev-runtime */ "./node_modules/react/jsx-dev-runtime.js"); | |
/* provided dependency */ var __react_refresh_utils__ = __webpack_require__(/*! ./node_modules/@pmmmwh/react-refresh-webpack-plugin/lib/runtime/RefreshUtils.js */ "./node_modules/@pmmmwh/react-refresh-webpack-plugin/lib/runtime/RefreshUtils.js"); | |
__webpack_require__.$Refresh$.runtime = __webpack_require__(/*! ./node_modules/react-refresh/runtime.js */ "./node_modules/react-refresh/runtime.js"); | |
var _jsxFileName = "/Users/philipmok/Desktop/test-human-ts/src/index.tsx"; | |
const root = react_dom_client__WEBPACK_IMPORTED_MODULE_1__.createRoot(document.getElementById('root')); | |
root.render( /*#__PURE__*/(0,react_jsx_dev_runtime__WEBPACK_IMPORTED_MODULE_5__.jsxDEV)(_App__WEBPACK_IMPORTED_MODULE_3__["default"], {}, void 0, false, { | |
fileName: _jsxFileName, | |
lineNumber: 11, | |
columnNumber: 3 | |
}, undefined)); | |
// If you want to start measuring performance in your app, pass a function | |
// to log results (for example: reportWebVitals(console.log)) | |
// or send to an analytics endpoint. Learn more: https://bit.ly/CRA-vitals | |
(0,_reportWebVitals__WEBPACK_IMPORTED_MODULE_4__["default"])(); | |
const $ReactRefreshModuleId$ = __webpack_require__.$Refresh$.moduleId; | |
const $ReactRefreshCurrentExports$ = __react_refresh_utils__.getModuleExports( | |
$ReactRefreshModuleId$ | |
); | |
function $ReactRefreshModuleRuntime$(exports) { | |
if (true) { | |
let errorOverlay; | |
if (true) { | |
errorOverlay = false; | |
} | |
let testMode; | |
if (typeof __react_refresh_test__ !== 'undefined') { | |
testMode = __react_refresh_test__; | |
} | |
return __react_refresh_utils__.executeRuntime( | |
exports, | |
$ReactRefreshModuleId$, | |
module.hot, | |
errorOverlay, | |
testMode | |
); | |
} | |
} | |
if (typeof Promise !== 'undefined' && $ReactRefreshCurrentExports$ instanceof Promise) { | |
$ReactRefreshCurrentExports$.then($ReactRefreshModuleRuntime$); | |
} else { | |
$ReactRefreshModuleRuntime$($ReactRefreshCurrentExports$); | |
} | |
/***/ }), | |
/***/ "./src/reportWebVitals.ts": | |
/*!********************************!*\ | |
!*** ./src/reportWebVitals.ts ***! | |
\********************************/ | |
/***/ ((module, __webpack_exports__, __webpack_require__) => { | |
"use strict"; | |
__webpack_require__.r(__webpack_exports__); | |
/* harmony export */ __webpack_require__.d(__webpack_exports__, { | |
/* harmony export */ "default": () => (__WEBPACK_DEFAULT_EXPORT__) | |
/* harmony export */ }); | |
/* provided dependency */ var __react_refresh_utils__ = __webpack_require__(/*! ./node_modules/@pmmmwh/react-refresh-webpack-plugin/lib/runtime/RefreshUtils.js */ "./node_modules/@pmmmwh/react-refresh-webpack-plugin/lib/runtime/RefreshUtils.js"); | |
__webpack_require__.$Refresh$.runtime = __webpack_require__(/*! ./node_modules/react-refresh/runtime.js */ "./node_modules/react-refresh/runtime.js"); | |
const reportWebVitals = onPerfEntry => { | |
if (onPerfEntry && onPerfEntry instanceof Function) { | |
__webpack_require__.e(/*! import() */ "node_modules_web-vitals_dist_web-vitals_js").then(__webpack_require__.bind(__webpack_require__, /*! web-vitals */ "./node_modules/web-vitals/dist/web-vitals.js")).then(({ | |
getCLS, | |
getFID, | |
getFCP, | |
getLCP, | |
getTTFB | |
}) => { | |
getCLS(onPerfEntry); | |
getFID(onPerfEntry); | |
getFCP(onPerfEntry); | |
getLCP(onPerfEntry); | |
getTTFB(onPerfEntry); | |
}); | |
} | |
}; | |
/* harmony default export */ const __WEBPACK_DEFAULT_EXPORT__ = (reportWebVitals); | |
const $ReactRefreshModuleId$ = __webpack_require__.$Refresh$.moduleId; | |
const $ReactRefreshCurrentExports$ = __react_refresh_utils__.getModuleExports( | |
$ReactRefreshModuleId$ | |
); | |
function $ReactRefreshModuleRuntime$(exports) { | |
if (true) { | |
let errorOverlay; | |
if (true) { | |
errorOverlay = false; | |
} | |
let testMode; | |
if (typeof __react_refresh_test__ !== 'undefined') { | |
testMode = __react_refresh_test__; | |
} | |
return __react_refresh_utils__.executeRuntime( | |
exports, | |
$ReactRefreshModuleId$, | |
module.hot, | |
errorOverlay, | |
testMode | |
); | |
} | |
} | |
if (typeof Promise !== 'undefined' && $ReactRefreshCurrentExports$ instanceof Promise) { | |
$ReactRefreshCurrentExports$.then($ReactRefreshModuleRuntime$); | |
} else { | |
$ReactRefreshModuleRuntime$($ReactRefreshCurrentExports$); | |
} | |
/***/ }), | |
/***/ "./src/setVideoSource.ts": | |
/*!*******************************!*\ | |
!*** ./src/setVideoSource.ts ***! | |
\*******************************/ | |
/***/ ((module, __webpack_exports__, __webpack_require__) => { | |
"use strict"; | |
__webpack_require__.r(__webpack_exports__); | |
/* harmony export */ __webpack_require__.d(__webpack_exports__, { | |
/* harmony export */ "default": () => (__WEBPACK_DEFAULT_EXPORT__) | |
/* harmony export */ }); | |
/* provided dependency */ var __react_refresh_utils__ = __webpack_require__(/*! ./node_modules/@pmmmwh/react-refresh-webpack-plugin/lib/runtime/RefreshUtils.js */ "./node_modules/@pmmmwh/react-refresh-webpack-plugin/lib/runtime/RefreshUtils.js"); | |
__webpack_require__.$Refresh$.runtime = __webpack_require__(/*! ./node_modules/react-refresh/runtime.js */ "./node_modules/react-refresh/runtime.js"); | |
async function setVideoSource(videoEl, facingMode, handleError) { | |
try { | |
var _navigator$mediaDevic; | |
if (videoEl === null) { | |
return null; | |
} | |
const stream = await ((_navigator$mediaDevic = navigator.mediaDevices) === null || _navigator$mediaDevic === void 0 ? void 0 : _navigator$mediaDevic.getUserMedia({ | |
video: { | |
facingMode | |
} | |
})); | |
videoEl.srcObject = stream; | |
const tracks = stream === null || stream === void 0 ? void 0 : stream.getTracks(); | |
if (tracks.length === 0) { | |
return null; | |
} | |
/** | |
* getCapabilities is not supported in Firefox | |
*/ | |
if (tracks[0].getCapabilities === undefined) { | |
return 'unknown'; | |
} | |
const capabilities = tracks[0].getCapabilities(); | |
console.log(capabilities); | |
const f = (capabilities === null || capabilities === void 0 ? void 0 : capabilities.facingMode) || []; | |
if (f.length) { | |
return f[0]; | |
} | |
return null; | |
} catch (err) { | |
handleError(err); | |
return null; | |
} | |
} | |
/* harmony default export */ const __WEBPACK_DEFAULT_EXPORT__ = (setVideoSource); | |
const $ReactRefreshModuleId$ = __webpack_require__.$Refresh$.moduleId; | |
const $ReactRefreshCurrentExports$ = __react_refresh_utils__.getModuleExports( | |
$ReactRefreshModuleId$ | |
); | |
function $ReactRefreshModuleRuntime$(exports) { | |
if (true) { | |
let errorOverlay; | |
if (true) { | |
errorOverlay = false; | |
} | |
let testMode; | |
if (typeof __react_refresh_test__ !== 'undefined') { | |
testMode = __react_refresh_test__; | |
} | |
return __react_refresh_utils__.executeRuntime( | |
exports, | |
$ReactRefreshModuleId$, | |
module.hot, | |
errorOverlay, | |
testMode | |
); | |
} | |
} | |
if (typeof Promise !== 'undefined' && $ReactRefreshCurrentExports$ instanceof Promise) { | |
$ReactRefreshCurrentExports$.then($ReactRefreshModuleRuntime$); | |
} else { | |
$ReactRefreshModuleRuntime$($ReactRefreshCurrentExports$); | |
} | |
/***/ }), | |
/***/ "./node_modules/@pmmmwh/react-refresh-webpack-plugin/lib/runtime/RefreshUtils.js": | |
/*!***************************************************************************************!*\ | |
!*** ./node_modules/@pmmmwh/react-refresh-webpack-plugin/lib/runtime/RefreshUtils.js ***! | |
\***************************************************************************************/ | |
/***/ ((module, __unused_webpack_exports, __webpack_require__) => { | |
/* global __webpack_require__ */ | |
var Refresh = __webpack_require__(/*! react-refresh/runtime */ "./node_modules/react-refresh/runtime.js"); | |
/** | |
* Extracts exports from a webpack module object. | |
* @param {string} moduleId A Webpack module ID. | |
* @returns {*} An exports object from the module. | |
*/ | |
function getModuleExports(moduleId) { | |
if (typeof moduleId === 'undefined') { | |
// `moduleId` is unavailable, which indicates that this module is not in the cache, | |
// which means we won't be able to capture any exports, | |
// and thus they cannot be refreshed safely. | |
// These are likely runtime or dynamically generated modules. | |
return {}; | |
} | |
var maybeModule = __webpack_require__.c[moduleId]; | |
if (typeof maybeModule === 'undefined') { | |
// `moduleId` is available but the module in cache is unavailable, | |
// which indicates the module is somehow corrupted (e.g. broken Webpacak `module` globals). | |
// We will warn the user (as this is likely a mistake) and assume they cannot be refreshed. | |
console.warn('[React Refresh] Failed to get exports for module: ' + moduleId + '.'); | |
return {}; | |
} | |
var exportsOrPromise = maybeModule.exports; | |
if (typeof Promise !== 'undefined' && exportsOrPromise instanceof Promise) { | |
return exportsOrPromise.then(function (exports) { | |
return exports; | |
}); | |
} | |
return exportsOrPromise; | |
} | |
/** | |
* Calculates the signature of a React refresh boundary. | |
* If this signature changes, it's unsafe to accept the boundary. | |
* | |
* This implementation is based on the one in [Metro](https://github.com/facebook/metro/blob/907d6af22ac6ebe58572be418e9253a90665ecbd/packages/metro/src/lib/polyfills/require.js#L795-L816). | |
* @param {*} moduleExports A Webpack module exports object. | |
* @returns {string[]} A React refresh boundary signature array. | |
*/ | |
function getReactRefreshBoundarySignature(moduleExports) { | |
var signature = []; | |
signature.push(Refresh.getFamilyByType(moduleExports)); | |
if (moduleExports == null || typeof moduleExports !== 'object') { | |
// Exit if we can't iterate over exports. | |
return signature; | |
} | |
for (var key in moduleExports) { | |
if (key === '__esModule') { | |
continue; | |
} | |
signature.push(key); | |
signature.push(Refresh.getFamilyByType(moduleExports[key])); | |
} | |
return signature; | |
} | |
/** | |
* Creates a data object to be retained across refreshes. | |
* This object should not transtively reference previous exports, | |
* which can form infinite chain of objects across refreshes, which can pressure RAM. | |
* | |
* @param {*} moduleExports A Webpack module exports object. | |
* @returns {*} A React refresh boundary signature array. | |
*/ | |
function getWebpackHotData(moduleExports) { | |
return { | |
signature: getReactRefreshBoundarySignature(moduleExports), | |
isReactRefreshBoundary: isReactRefreshBoundary(moduleExports) | |
}; | |
} | |
/** | |
* Creates a helper that performs a delayed React refresh. | |
* @returns {function(function(): void): void} A debounced React refresh function. | |
*/ | |
function createDebounceUpdate() { | |
/** | |
* A cached setTimeout handler. | |
* @type {number | undefined} | |
*/ | |
var refreshTimeout; | |
/** | |
* Performs react refresh on a delay and clears the error overlay. | |
* @param {function(): void} callback | |
* @returns {void} | |
*/ | |
function enqueueUpdate(callback) { | |
if (typeof refreshTimeout === 'undefined') { | |
refreshTimeout = setTimeout(function () { | |
refreshTimeout = undefined; | |
Refresh.performReactRefresh(); | |
callback(); | |
}, 30); | |
} | |
} | |
return enqueueUpdate; | |
} | |
/** | |
* Checks if all exports are likely a React component. | |
* | |
* This implementation is based on the one in [Metro](https://github.com/facebook/metro/blob/febdba2383113c88296c61e28e4ef6a7f4939fda/packages/metro/src/lib/polyfills/require.js#L748-L774). | |
* @param {*} moduleExports A Webpack module exports object. | |
* @returns {boolean} Whether the exports are React component like. | |
*/ | |
function isReactRefreshBoundary(moduleExports) { | |
if (Refresh.isLikelyComponentType(moduleExports)) { | |
return true; | |
} | |
if (moduleExports === undefined || moduleExports === null || typeof moduleExports !== 'object') { | |
// Exit if we can't iterate over exports. | |
return false; | |
} | |
var hasExports = false; | |
var areAllExportsComponents = true; | |
for (var key in moduleExports) { | |
hasExports = true; | |
// This is the ES Module indicator flag | |
if (key === '__esModule') { | |
continue; | |
} | |
// We can (and have to) safely execute getters here, | |
// as Webpack manually assigns harmony exports to getters, | |
// without any side-effects attached. | |
// Ref: https://github.com/webpack/webpack/blob/b93048643fe74de2a6931755911da1212df55897/lib/MainTemplate.js#L281 | |
var exportValue = moduleExports[key]; | |
if (!Refresh.isLikelyComponentType(exportValue)) { | |
areAllExportsComponents = false; | |
} | |
} | |
return hasExports && areAllExportsComponents; | |
} | |
/** | |
* Checks if exports are likely a React component and registers them. | |
* | |
* This implementation is based on the one in [Metro](https://github.com/facebook/metro/blob/febdba2383113c88296c61e28e4ef6a7f4939fda/packages/metro/src/lib/polyfills/require.js#L818-L835). | |
* @param {*} moduleExports A Webpack module exports object. | |
* @param {string} moduleId A Webpack module ID. | |
* @returns {void} | |
*/ | |
function registerExportsForReactRefresh(moduleExports, moduleId) { | |
if (Refresh.isLikelyComponentType(moduleExports)) { | |
// Register module.exports if it is likely a component | |
Refresh.register(moduleExports, moduleId + ' %exports%'); | |
} | |
if (moduleExports === undefined || moduleExports === null || typeof moduleExports !== 'object') { | |
// Exit if we can't iterate over the exports. | |
return; | |
} | |
for (var key in moduleExports) { | |
// Skip registering the ES Module indicator | |
if (key === '__esModule') { | |
continue; | |
} | |
var exportValue = moduleExports[key]; | |
if (Refresh.isLikelyComponentType(exportValue)) { | |
var typeID = moduleId + ' %exports% ' + key; | |
Refresh.register(exportValue, typeID); | |
} | |
} | |
} | |
/** | |
* Compares previous and next module objects to check for mutated boundaries. | |
* | |
* This implementation is based on the one in [Metro](https://github.com/facebook/metro/blob/907d6af22ac6ebe58572be418e9253a90665ecbd/packages/metro/src/lib/polyfills/require.js#L776-L792). | |
* @param {*} prevSignature The signature of the current Webpack module exports object. | |
* @param {*} nextSignature The signature of the next Webpack module exports object. | |
* @returns {boolean} Whether the React refresh boundary should be invalidated. | |
*/ | |
function shouldInvalidateReactRefreshBoundary(prevSignature, nextSignature) { | |
if (prevSignature.length !== nextSignature.length) { | |
return true; | |
} | |
for (var i = 0; i < nextSignature.length; i += 1) { | |
if (prevSignature[i] !== nextSignature[i]) { | |
return true; | |
} | |
} | |
return false; | |
} | |
var enqueueUpdate = createDebounceUpdate(); | |
function executeRuntime(moduleExports, moduleId, webpackHot, refreshOverlay, isTest) { | |
registerExportsForReactRefresh(moduleExports, moduleId); | |
if (webpackHot) { | |
var isHotUpdate = !!webpackHot.data; | |
var prevData; | |
if (isHotUpdate) { | |
prevData = webpackHot.data.prevData; | |
} | |
if (isReactRefreshBoundary(moduleExports)) { | |
webpackHot.dispose( | |
/** | |
* A callback to performs a full refresh if React has unrecoverable errors, | |
* and also caches the to-be-disposed module. | |
* @param {*} data A hot module data object from Webpack HMR. | |
* @returns {void} | |
*/ | |
function hotDisposeCallback(data) { | |
// We have to mutate the data object to get data registered and cached | |
data.prevData = getWebpackHotData(moduleExports); | |
}); | |
webpackHot.accept( | |
/** | |
* An error handler to allow self-recovering behaviours. | |
* @param {Error} error An error occurred during evaluation of a module. | |
* @returns {void} | |
*/ | |
function hotErrorHandler(error) { | |
if (typeof refreshOverlay !== 'undefined' && refreshOverlay) { | |
refreshOverlay.handleRuntimeError(error); | |
} | |
if (typeof isTest !== 'undefined' && isTest) { | |
if (window.onHotAcceptError) { | |
window.onHotAcceptError(error.message); | |
} | |
} | |
__webpack_require__.c[moduleId].hot.accept(hotErrorHandler); | |
}); | |
if (isHotUpdate) { | |
if (prevData && prevData.isReactRefreshBoundary && shouldInvalidateReactRefreshBoundary(prevData.signature, getReactRefreshBoundarySignature(moduleExports))) { | |
webpackHot.invalidate(); | |
} else { | |
enqueueUpdate( | |
/** | |
* A function to dismiss the error overlay after performing React refresh. | |
* @returns {void} | |
*/ | |
function updateCallback() { | |
if (typeof refreshOverlay !== 'undefined' && refreshOverlay) { | |
refreshOverlay.clearRuntimeErrors(); | |
} | |
}); | |
} | |
} | |
} else { | |
if (isHotUpdate && typeof prevData !== 'undefined') { | |
webpackHot.invalidate(); | |
} | |
} | |
} | |
} | |
module.exports = Object.freeze({ | |
enqueueUpdate: enqueueUpdate, | |
executeRuntime: executeRuntime, | |
getModuleExports: getModuleExports, | |
isReactRefreshBoundary: isReactRefreshBoundary, | |
registerExportsForReactRefresh: registerExportsForReactRefresh | |
}); | |
/***/ }), | |
/***/ "./node_modules/@vladmandic/human/dist/human.esm.js": | |
/*!**********************************************************!*\ | |
!*** ./node_modules/@vladmandic/human/dist/human.esm.js ***! | |
\**********************************************************/ | |
/***/ ((__unused_webpack_module, __webpack_exports__, __webpack_require__) => { | |
"use strict"; | |
var __filename = "/index.js"; | |
var __dirname = "/"; | |
__webpack_require__.r(__webpack_exports__); | |
/* harmony export */ __webpack_require__.d(__webpack_exports__, { | |
/* harmony export */ Env: () => (/* binding */ Env), | |
/* harmony export */ Human: () => (/* binding */ Human), | |
/* harmony export */ "default": () => (/* binding */ Human), | |
/* harmony export */ defaults: () => (/* binding */ config), | |
/* harmony export */ draw: () => (/* binding */ draw_exports), | |
/* harmony export */ empty: () => (/* binding */ empty), | |
/* harmony export */ env: () => (/* binding */ env), | |
/* harmony export */ match: () => (/* binding */ match_exports), | |
/* harmony export */ models: () => (/* binding */ models_exports2) | |
/* harmony export */ }); | |
/* | |
Human | |
homepage: <https://github.com/vladmandic/human> | |
author: <https://github.com/vladmandic>' | |
*/ | |
var __defProp = Object.defineProperty; | |
var __defNormalProp = (obj, key, value) => key in obj ? __defProp(obj, key, { | |
enumerable: true, | |
configurable: true, | |
writable: true, | |
value | |
}) : obj[key] = value; | |
var __export = (target, all2) => { | |
for (var name in all2) __defProp(target, name, { | |
get: all2[name], | |
enumerable: true | |
}); | |
}; | |
var __publicField = (obj, key, value) => { | |
__defNormalProp(obj, typeof key !== "symbol" ? key + "" : key, value); | |
return value; | |
}; | |
var __accessCheck = (obj, member, msg) => { | |
if (!member.has(obj)) throw TypeError("Cannot " + msg); | |
}; | |
var __privateGet = (obj, member, getter) => { | |
__accessCheck(obj, member, "read from private field"); | |
return getter ? getter.call(obj) : member.get(obj); | |
}; | |
var __privateAdd = (obj, member, value) => { | |
if (member.has(obj)) throw TypeError("Cannot add the same private member more than once"); | |
member instanceof WeakSet ? member.add(obj) : member.set(obj, value); | |
}; | |
var __privateSet = (obj, member, value, setter) => { | |
__accessCheck(obj, member, "write to private field"); | |
setter ? setter.call(obj, value) : member.set(obj, value); | |
return value; | |
}; | |
// dist/tfjs.esm.js | |
var tfjs_esm_exports = {}; | |
__export(tfjs_esm_exports, { | |
Abs: () => Xs, | |
Acos: () => Wo, | |
Acosh: () => Uo, | |
AdadeltaOptimizer: () => np, | |
AdagradOptimizer: () => sp, | |
AdamOptimizer: () => ap, | |
AdamaxOptimizer: () => ip, | |
Add: () => uo, | |
AddN: () => Go, | |
All: () => Ho, | |
Any: () => Ko, | |
ArgMax: () => Ys, | |
ArgMin: () => Qs, | |
Asin: () => qo, | |
Asinh: () => jo, | |
Atan: () => Xo, | |
Atan2: () => Qo, | |
Atanh: () => Yo, | |
AvgPool: () => Zo, | |
AvgPool3D: () => Zs, | |
AvgPool3DGrad: () => Ai, | |
AvgPoolGrad: () => Di, | |
BackendWasm: () => pm, | |
BatchMatMul: () => Jo, | |
BatchToSpaceND: () => Js, | |
Bincount: () => en, | |
BitwiseAnd: () => ja, | |
BroadcastArgs: () => ea, | |
BroadcastTo: () => Bce, | |
Cast: () => bo, | |
Ceil: () => tn, | |
ClipByValue: () => Co, | |
Complex: () => Fi, | |
ComplexAbs: () => Pi, | |
Concat: () => ta, | |
Conv2D: () => rn, | |
Conv2DBackpropFilter: () => Oi, | |
Conv2DBackpropInput: () => on, | |
Conv3D: () => nn, | |
Conv3DBackpropFilterV2: () => Xa, | |
Conv3DBackpropInputV2: () => sn, | |
Cos: () => an, | |
Cosh: () => un, | |
CropAndResize: () => ln, | |
Cumprod: () => pn, | |
Cumsum: () => cn, | |
DataStorage: () => zo, | |
DenseBincount: () => ra, | |
DepthToSpace: () => mn, | |
DepthwiseConv2dNative: () => dn, | |
DepthwiseConv2dNativeBackpropFilter: () => Mi, | |
DepthwiseConv2dNativeBackpropInput: () => Li, | |
Diag: () => oa, | |
Dilation2D: () => fn, | |
Dilation2DBackpropFilter: () => zi, | |
Dilation2DBackpropInput: () => Bi, | |
Draw: () => Pu, | |
ENV: () => nw, | |
Einsum: () => Vi, | |
Elu: () => gn, | |
EluGrad: () => Ya, | |
Environment: () => hl, | |
Equal: () => yn, | |
Erf: () => xn, | |
Exp: () => bn, | |
ExpandDims: () => na, | |
Expm1: () => Cn, | |
FFT: () => Wi, | |
Fill: () => sa, | |
FlipLeftRight: () => wn, | |
Floor: () => Sn, | |
FloorDiv: () => In, | |
FromPixels: () => Mu, | |
FusedBatchNorm: () => vn, | |
FusedConv2D: () => vo, | |
FusedDepthwiseConv2D: () => ko, | |
GPGPUContext: () => kp, | |
GatherNd: () => kn, | |
GatherV2: () => aa, | |
GraphModel: () => Bl, | |
Greater: () => Nn, | |
GreaterEqual: () => Tn, | |
IFFT: () => Ui, | |
Identity: () => wo, | |
Imag: () => Gi, | |
IsFinite: () => _n, | |
IsInf: () => $n, | |
IsNan: () => En, | |
KernelBackend: () => ao, | |
LRN: () => zn, | |
LRNGrad: () => Qa, | |
LeakyRelu: () => Rn, | |
Less: () => Dn, | |
LessEqual: () => An, | |
LinSpace: () => Fn, | |
Log: () => Pn, | |
Log1p: () => On, | |
LogSoftmax: () => zce, | |
LogicalAnd: () => Mn, | |
LogicalNot: () => Ln, | |
LogicalOr: () => Bn, | |
LogicalXor: () => $0, | |
LowerBound: () => Vce, | |
MathBackendCPU: () => xu, | |
MathBackendWebGL: () => wu, | |
MatrixBandPart: () => Wce, | |
Max: () => Vn, | |
MaxPool: () => Un, | |
MaxPool3D: () => ia, | |
MaxPool3DGrad: () => Ki, | |
MaxPoolGrad: () => Hi, | |
MaxPoolWithArgmax: () => ua, | |
Maximum: () => Wn, | |
Mean: () => Gn, | |
Min: () => Hn, | |
Minimum: () => Kn, | |
MirrorPad: () => qn, | |
Mod: () => jn, | |
MomentumOptimizer: () => up, | |
Multinomial: () => Xn, | |
Multiply: () => Yn, | |
Neg: () => pa, | |
NonMaxSuppressionV3: () => Zn, | |
NonMaxSuppressionV4: () => Za, | |
NonMaxSuppressionV5: () => Jn, | |
NotEqual: () => Qn, | |
OP_SCOPE_SUFFIX: () => kw, | |
OneHot: () => es, | |
OnesLike: () => ca, | |
Optimizer: () => Nr, | |
OptimizerConstructors: () => Pl, | |
Pack: () => la, | |
PadV2: () => ts, | |
Pool: () => Uce, | |
Pow: () => rs, | |
Prelu: () => os, | |
Prod: () => ns, | |
RMSPropOptimizer: () => pp, | |
RaggedGather: () => Qp, | |
RaggedRange: () => Zp, | |
RaggedTensorToTensor: () => Jp, | |
Range: () => ma, | |
Rank: () => hw, | |
Real: () => qi, | |
RealDiv: () => hn, | |
Reciprocal: () => ss, | |
Reduction: () => Rt, | |
Relu: () => as, | |
Relu6: () => ps, | |
Reshape: () => da, | |
ResizeBilinear: () => us, | |
ResizeBilinearGrad: () => ei, | |
ResizeNearestNeighbor: () => is, | |
ResizeNearestNeighborGrad: () => Ja, | |
Reverse: () => cs, | |
RotateWithOffset: () => As, | |
Round: () => ls, | |
Rsqrt: () => ms, | |
SGDOptimizer: () => mi, | |
ScatterNd: () => ds, | |
SearchSorted: () => hs, | |
Select: () => fa, | |
Selu: () => gs, | |
Sigmoid: () => Cs, | |
Sign: () => bs, | |
Sin: () => xs, | |
Sinh: () => ys, | |
Slice: () => ha, | |
Softmax: () => vs, | |
Softplus: () => ws, | |
SpaceToBatchND: () => ga, | |
SparseFillEmptyRows: () => ji, | |
SparseReshape: () => ti, | |
SparseSegmentMean: () => ya, | |
SparseSegmentSum: () => ba, | |
SparseToDense: () => ks, | |
SplitV: () => xa, | |
Sqrt: () => Ss, | |
Square: () => Xi, | |
SquaredDifference: () => Ns, | |
StaticRegexReplace: () => Ou, | |
Step: () => So, | |
StridedSlice: () => Ts, | |
StringNGrams: () => Ca, | |
StringSplit: () => Yi, | |
StringToHashBucketFast: () => Qi, | |
Sub: () => _s, | |
Sum: () => Is, | |
Tan: () => $s, | |
Tanh: () => Es, | |
Tensor: () => ut, | |
TensorBuffer: () => tt, | |
TensorScatterUpdate: () => fs, | |
Tile: () => po, | |
TopK: () => Rs, | |
Transform: () => Ds, | |
Transpose: () => co, | |
Unique: () => Zi, | |
Unpack: () => wa, | |
UnsortedSegmentSum: () => Ji, | |
UpperBound: () => Gce, | |
Variable: () => oi, | |
WebGPUBackend: () => Tu, | |
ZerosLike: () => Sa, | |
_FusedMatMul: () => Io, | |
abs: () => Jt, | |
acos: () => kk, | |
acosh: () => Nk, | |
add: () => Ce, | |
addN: () => Tk, | |
all: () => _k, | |
any: () => $k, | |
argMax: () => Ek, | |
argMin: () => Rk, | |
asin: () => Dk, | |
asinh: () => Ak, | |
atan: () => Fk, | |
atan2: () => Pk, | |
atanh: () => Ok, | |
avgPool: () => fd, | |
avgPool3d: () => Bk, | |
backend: () => vde, | |
backend_util: () => w, | |
basicLSTMCell: () => zk, | |
batchNorm: () => au, | |
batchNorm2d: () => Wk, | |
batchNorm3d: () => Uk, | |
batchNorm4d: () => Gk, | |
batchToSpaceND: () => hd, | |
bincount: () => gd, | |
bitwiseAnd: () => Hk, | |
booleanMaskAsync: () => E6, | |
broadcastArgs: () => Kk, | |
broadcastTo: () => iu, | |
broadcast_util: () => Ir, | |
browser: () => oT, | |
buffer: () => me, | |
cast: () => We, | |
ceil: () => qk, | |
clipByValue: () => jk, | |
clone: () => Ur, | |
complex: () => Er, | |
concat: () => bt, | |
concat1d: () => Xk, | |
concat2d: () => Yk, | |
concat3d: () => Qk, | |
concat4d: () => Zk, | |
conv1d: () => Jk, | |
conv2d: () => uu, | |
conv2dTranspose: () => e2, | |
conv3d: () => t2, | |
conv3dTranspose: () => o2, | |
copyRegisteredKernels: () => Zce, | |
cos: () => n2, | |
cosh: () => s2, | |
cosineWindow: () => Rl, | |
cumprod: () => a2, | |
cumsum: () => i2, | |
customGrad: () => vr, | |
denseBincount: () => u2, | |
deprecationWarn: () => Pw, | |
depthToSpace: () => p2, | |
depthwiseConv2d: () => lc, | |
deregisterOp: () => A5, | |
device_util: () => ou, | |
diag: () => c2, | |
dilation2d: () => l2, | |
disableDeprecationWarnings: () => dde, | |
dispose: () => Mt, | |
disposeVariables: () => fde, | |
div: () => je, | |
divNoNan: () => d2, | |
dot: () => f2, | |
dropout: () => W6, | |
einsum: () => pu, | |
elu: () => Cd, | |
enableDebugMode: () => mde, | |
enableProdMode: () => lde, | |
enclosingPowerOfTwo: () => Qw, | |
engine: () => pr, | |
ensureShape: () => h2, | |
env: () => A, | |
equal: () => bd, | |
erf: () => g2, | |
euclideanNorm: () => b2, | |
exp: () => $o, | |
expandDims: () => Ms, | |
expm1: () => C2, | |
eye: () => wd, | |
fft: () => fc, | |
fill: () => Ea, | |
findBackend: () => Sde, | |
findBackendFactory: () => Ide, | |
floor: () => Sd, | |
floorDiv: () => dd, | |
forceHalfFloat: () => MD, | |
fused: () => Zw, | |
gather: () => Id, | |
gatherND: () => z6, | |
gather_util: () => af, | |
getBackend: () => Cde, | |
getGradient: () => iw, | |
getKernel: () => tc, | |
getKernelsForBackend: () => Ym, | |
getThreadsCount: () => Zse, | |
gpgpu_util: () => cv, | |
grad: () => AK, | |
grads: () => FK, | |
greater: () => qu, | |
greaterEqual: () => vd, | |
ifft: () => Ju, | |
imag: () => lu, | |
image: () => Kj, | |
inTopKAsync: () => G6, | |
io: () => fi, | |
irfft: () => Kd, | |
isFinite: () => w2, | |
isInf: () => S2, | |
isNaN: () => I2, | |
keep: () => Rr, | |
kernel_impls: () => Wt, | |
leakyRelu: () => kd, | |
less: () => _l, | |
lessEqual: () => mc, | |
linalg: () => qj, | |
linspace: () => v2, | |
loadGraphModel: () => $8, | |
loadGraphModelSync: () => E8, | |
localResponseNormalization: () => k2, | |
log: () => pi, | |
log1p: () => Nd, | |
logSigmoid: () => N2, | |
logSoftmax: () => T2, | |
logSumExp: () => $d, | |
logicalAnd: () => ju, | |
logicalNot: () => Ed, | |
logicalOr: () => Rd, | |
logicalXor: () => _2, | |
losses: () => jj, | |
lowerBound: () => $2, | |
matMul: () => Ze, | |
math: () => JN, | |
max: () => Ra, | |
maxPool: () => Ad, | |
maxPool3d: () => E2, | |
maxPoolWithArgmax: () => R2, | |
maximum: () => Fd, | |
mean: () => Xu, | |
memory: () => hde, | |
meshgrid: () => D2, | |
min: () => Tl, | |
minimum: () => Yu, | |
mirrorPad: () => A2, | |
mod: () => F2, | |
moments: () => P2, | |
movingAverage: () => A6, | |
mul: () => se, | |
multiRNNCell: () => O2, | |
multinomial: () => M2, | |
neg: () => cr, | |
nextFrame: () => pS, | |
norm: () => Ku, | |
notEqual: () => Pd, | |
oneHot: () => El, | |
ones: () => Da, | |
onesLike: () => L2, | |
op: () => N, | |
outerProduct: () => B2, | |
pad: () => Aa, | |
pad1d: () => z2, | |
pad2d: () => V2, | |
pad3d: () => W2, | |
pad4d: () => U2, | |
pool: () => G2, | |
pow: () => ui, | |
prelu: () => Md, | |
print: () => md, | |
prod: () => H2, | |
profile: () => gde, | |
raggedGather: () => K2, | |
raggedRange: () => q2, | |
raggedTensorToTensor: () => j2, | |
rand: () => X2, | |
randomGamma: () => g1, | |
randomNormal: () => Ud, | |
randomStandardNormal: () => x1, | |
randomUniform: () => dc, | |
randomUniformInt: () => y1, | |
range: () => mu, | |
ready: () => bde, | |
real: () => ci, | |
reciprocal: () => b1, | |
registerBackend: () => su, | |
registerGradient: () => Xce, | |
registerKernel: () => ri, | |
registerOp: () => D5, | |
relu: () => du, | |
relu6: () => Gd, | |
removeBackend: () => wde, | |
reshape: () => W, | |
reverse: () => mo, | |
reverse1d: () => C1, | |
reverse2d: () => w1, | |
reverse3d: () => S1, | |
reverse4d: () => I1, | |
rfft: () => hc, | |
round: () => Hd, | |
rsqrt: () => v1, | |
scalar: () => ke, | |
scatterND: () => P6, | |
scatter_util: () => hu, | |
searchSorted: () => $l, | |
selu: () => k1, | |
separableConv2d: () => N1, | |
serialization: () => WN, | |
setBackend: () => yde, | |
setPlatform: () => kde, | |
setThreadsCount: () => Qse, | |
setWasmPath: () => Xse, | |
setWasmPaths: () => Yse, | |
setWebGLContext: () => vI, | |
setdiff1dAsync: () => T1, | |
shared: () => Tc, | |
sigmoid: () => $a, | |
sign: () => _1, | |
signal: () => Hj, | |
sin: () => $1, | |
sinh: () => E1, | |
slice: () => Xe, | |
slice1d: () => R1, | |
slice2d: () => D1, | |
slice3d: () => A1, | |
slice4d: () => F1, | |
slice_util: () => ct, | |
softmax: () => P1, | |
softplus: () => _d, | |
spaceToBatchND: () => Od, | |
sparse: () => Xj, | |
sparseToDense: () => L6, | |
spectral: () => Gj, | |
split: () => li, | |
sqrt: () => Dr, | |
square: () => er, | |
squaredDifference: () => qd, | |
squeeze: () => gc, | |
stack: () => kr, | |
step: () => jd, | |
stridedSlice: () => O1, | |
string: () => Yj, | |
sub: () => Te, | |
sum: () => ot, | |
sumOutType: () => ni, | |
tan: () => M1, | |
tanh: () => Nl, | |
tensor: () => ur, | |
tensor1d: () => tr, | |
tensor2d: () => fu, | |
tensor3d: () => Xd, | |
tensor4d: () => L1, | |
tensor5d: () => B1, | |
tensor6d: () => z1, | |
tensorScatterUpdate: () => W1, | |
tensor_util: () => ek, | |
test_util: () => h1, | |
tidy: () => De, | |
tile: () => cu, | |
time: () => xde, | |
topk: () => U1, | |
train: () => TGe, | |
transpose: () => yc, | |
truncatedNormal: () => G1, | |
unique: () => H1, | |
unregisterGradient: () => Qce, | |
unregisterKernel: () => Yce, | |
unsortedSegmentSum: () => K1, | |
unstack: () => fo, | |
upcastType: () => dt, | |
upperBound: () => q1, | |
util: () => y, | |
valueAndGrad: () => PK, | |
valueAndGrads: () => OK, | |
variable: () => j1, | |
variableGrads: () => zw, | |
version: () => Ace, | |
version_converter: () => D8, | |
version_core: () => _X, | |
version_cpu: () => cY, | |
version_wasm: () => Jse, | |
version_webgl: () => s9, | |
webgl: () => Cat, | |
webgl_util: () => Ac, | |
webgpu_util: () => Yv, | |
where: () => lo, | |
whereAsync: () => Qd, | |
zeros: () => Gr, | |
zerosLike: () => Ht | |
}); | |
var wG = Object.create; | |
var QC = Object.defineProperty; | |
var SG = Object.getOwnPropertyDescriptor; | |
var IG = Object.getOwnPropertyNames; | |
var vG = Object.getPrototypeOf; | |
var kG = Object.prototype.hasOwnProperty; | |
var qt = (r, t8) => () => (t8 || r((t8 = { | |
exports: {} | |
}).exports, t8), t8.exports); | |
var qe = (r, t8) => { | |
for (var e in t8) QC(r, e, { | |
get: t8[e], | |
enumerable: true | |
}); | |
}; | |
var NG = (r, t8, e, o) => { | |
if (t8 && typeof t8 == "object" || typeof t8 == "function") for (let n of IG(t8)) !kG.call(r, n) && n !== e && QC(r, n, { | |
get: () => t8[n], | |
enumerable: !(o = SG(t8, n)) || o.enumerable | |
}); | |
return r; | |
}; | |
var Kp = (r, t8, e) => (e = r != null ? wG(vG(r)) : {}, NG(t8 || !r || !r.__esModule ? QC(e, "default", { | |
value: r, | |
enumerable: true | |
}) : e, r)); | |
var V0 = qt((tle, z0) => { | |
z0.exports = Nt; | |
var No = null; | |
try { | |
No = new WebAssembly.Instance(new WebAssembly.Module(new Uint8Array([0, 97, 115, 109, 1, 0, 0, 0, 1, 13, 2, 96, 0, 1, 127, 96, 4, 127, 127, 127, 127, 1, 127, 3, 7, 6, 0, 1, 1, 1, 1, 1, 6, 6, 1, 127, 1, 65, 0, 11, 7, 50, 6, 3, 109, 117, 108, 0, 1, 5, 100, 105, 118, 95, 115, 0, 2, 5, 100, 105, 118, 95, 117, 0, 3, 5, 114, 101, 109, 95, 115, 0, 4, 5, 114, 101, 109, 95, 117, 0, 5, 8, 103, 101, 116, 95, 104, 105, 103, 104, 0, 0, 10, 191, 1, 6, 4, 0, 35, 0, 11, 36, 1, 1, 126, 32, 0, 173, 32, 1, 173, 66, 32, 134, 132, 32, 2, 173, 32, 3, 173, 66, 32, 134, 132, 126, 34, 4, 66, 32, 135, 167, 36, 0, 32, 4, 167, 11, 36, 1, 1, 126, 32, 0, 173, 32, 1, 173, 66, 32, 134, 132, 32, 2, 173, 32, 3, 173, 66, 32, 134, 132, 127, 34, 4, 66, 32, 135, 167, 36, 0, 32, 4, 167, 11, 36, 1, 1, 126, 32, 0, 173, 32, 1, 173, 66, 32, 134, 132, 32, 2, 173, 32, 3, 173, 66, 32, 134, 132, 128, 34, 4, 66, 32, 135, 167, 36, 0, 32, 4, 167, 11, 36, 1, 1, 126, 32, 0, 173, 32, 1, 173, 66, 32, 134, 132, 32, 2, 173, 32, 3, 173, 66, 32, 134, 132, 129, 34, 4, 66, 32, 135, 167, 36, 0, 32, 4, 167, 11, 36, 1, 1, 126, 32, 0, 173, 32, 1, 173, 66, 32, 134, 132, 32, 2, 173, 32, 3, 173, 66, 32, 134, 132, 130, 34, 4, 66, 32, 135, 167, 36, 0, 32, 4, 167, 11])), {}).exports; | |
} catch (r) {} | |
function Nt(r, t8, e) { | |
this.low = r | 0, this.high = t8 | 0, this.unsigned = !!e; | |
} | |
Nt.prototype.__isLong__; | |
Object.defineProperty(Nt.prototype, "__isLong__", { | |
value: true | |
}); | |
function Wr(r) { | |
return (r && r.__isLong__) === true; | |
} | |
Nt.isLong = Wr; | |
var R0 = {}, | |
D0 = {}; | |
function Bu(r, t8) { | |
var e, o, n; | |
return t8 ? (r >>>= 0, (n = 0 <= r && r < 256) && (o = D0[r], o) ? o : (e = Tt(r, (r | 0) < 0 ? -1 : 0, true), n && (D0[r] = e), e)) : (r |= 0, (n = -128 <= r && r < 128) && (o = R0[r], o) ? o : (e = Tt(r, r < 0 ? -1 : 0, false), n && (R0[r] = e), e)); | |
} | |
Nt.fromInt = Bu; | |
function To(r, t8) { | |
if (isNaN(r)) return t8 ? Lu : _o; | |
if (t8) { | |
if (r < 0) return Lu; | |
if (r >= O0) return B0; | |
} else { | |
if (r <= -F0) return Vr; | |
if (r + 1 >= F0) return L0; | |
} | |
return r < 0 ? To(-r, t8).neg() : Tt(r % oc | 0, r / oc | 0, t8); | |
} | |
Nt.fromNumber = To; | |
function Tt(r, t8, e) { | |
return new Nt(r, t8, e); | |
} | |
Nt.fromBits = Tt; | |
var Zm = Math.pow; | |
function cw(r, t8, e) { | |
if (r.length === 0) throw Error("empty string"); | |
if (r === "NaN" || r === "Infinity" || r === "+Infinity" || r === "-Infinity") return _o; | |
if (typeof t8 == "number" ? (e = t8, t8 = false) : t8 = !!t8, e = e || 10, e < 2 || 36 < e) throw RangeError("radix"); | |
var o; | |
if ((o = r.indexOf("-")) > 0) throw Error("interior hyphen"); | |
if (o === 0) return cw(r.substring(1), t8, e).neg(); | |
for (var n = To(Zm(e, 8)), s = _o, a = 0; a < r.length; a += 8) { | |
var i = Math.min(8, r.length - a), | |
p = parseInt(r.substring(a, a + i), e); | |
if (i < 8) { | |
var u = To(Zm(e, i)); | |
s = s.mul(u).add(To(p)); | |
} else s = s.mul(n), s = s.add(To(p)); | |
} | |
return s.unsigned = t8, s; | |
} | |
Nt.fromString = cw; | |
function Fs(r, t8) { | |
return typeof r == "number" ? To(r, t8) : typeof r == "string" ? cw(r, t8) : Tt(r.low, r.high, typeof t8 == "boolean" ? t8 : r.unsigned); | |
} | |
Nt.fromValue = Fs; | |
var A0 = 65536, | |
XG = 1 << 24, | |
oc = A0 * A0, | |
O0 = oc * oc, | |
F0 = O0 / 2, | |
P0 = Bu(XG), | |
_o = Bu(0); | |
Nt.ZERO = _o; | |
var Lu = Bu(0, true); | |
Nt.UZERO = Lu; | |
var rc = Bu(1); | |
Nt.ONE = rc; | |
var M0 = Bu(1, true); | |
Nt.UONE = M0; | |
var pw = Bu(-1); | |
Nt.NEG_ONE = pw; | |
var L0 = Tt(-1, 2147483647, false); | |
Nt.MAX_VALUE = L0; | |
var B0 = Tt(-1, -1, true); | |
Nt.MAX_UNSIGNED_VALUE = B0; | |
var Vr = Tt(0, -2147483648, false); | |
Nt.MIN_VALUE = Vr; | |
var de = Nt.prototype; | |
de.toInt = function () { | |
return this.unsigned ? this.low >>> 0 : this.low; | |
}; | |
de.toNumber = function () { | |
return this.unsigned ? (this.high >>> 0) * oc + (this.low >>> 0) : this.high * oc + (this.low >>> 0); | |
}; | |
de.toString = function (t8) { | |
if (t8 = t8 || 10, t8 < 2 || 36 < t8) throw RangeError("radix"); | |
if (this.isZero()) return "0"; | |
if (this.isNegative()) if (this.eq(Vr)) { | |
var e = To(t8), | |
o = this.div(e), | |
n = o.mul(e).sub(this); | |
return o.toString(t8) + n.toInt().toString(t8); | |
} else return "-" + this.neg().toString(t8); | |
for (var s = To(Zm(t8, 6), this.unsigned), a = this, i = "";;) { | |
var p = a.div(s), | |
u = a.sub(p.mul(s)).toInt() >>> 0, | |
c = u.toString(t8); | |
if (a = p, a.isZero()) return c + i; | |
for (; c.length < 6;) c = "0" + c; | |
i = "" + c + i; | |
} | |
}; | |
de.getHighBits = function () { | |
return this.high; | |
}; | |
de.getHighBitsUnsigned = function () { | |
return this.high >>> 0; | |
}; | |
de.getLowBits = function () { | |
return this.low; | |
}; | |
de.getLowBitsUnsigned = function () { | |
return this.low >>> 0; | |
}; | |
de.getNumBitsAbs = function () { | |
if (this.isNegative()) return this.eq(Vr) ? 64 : this.neg().getNumBitsAbs(); | |
for (var t8 = this.high != 0 ? this.high : this.low, e = 31; e > 0 && !(t8 & 1 << e); e--); | |
return this.high != 0 ? e + 33 : e + 1; | |
}; | |
de.isZero = function () { | |
return this.high === 0 && this.low === 0; | |
}; | |
de.eqz = de.isZero; | |
de.isNegative = function () { | |
return !this.unsigned && this.high < 0; | |
}; | |
de.isPositive = function () { | |
return this.unsigned || this.high >= 0; | |
}; | |
de.isOdd = function () { | |
return (this.low & 1) === 1; | |
}; | |
de.isEven = function () { | |
return (this.low & 1) === 0; | |
}; | |
de.equals = function (t8) { | |
return Wr(t8) || (t8 = Fs(t8)), this.unsigned !== t8.unsigned && this.high >>> 31 === 1 && t8.high >>> 31 === 1 ? false : this.high === t8.high && this.low === t8.low; | |
}; | |
de.eq = de.equals; | |
de.notEquals = function (t8) { | |
return !this.eq(t8); | |
}; | |
de.neq = de.notEquals; | |
de.ne = de.notEquals; | |
de.lessThan = function (t8) { | |
return this.comp(t8) < 0; | |
}; | |
de.lt = de.lessThan; | |
de.lessThanOrEqual = function (t8) { | |
return this.comp(t8) <= 0; | |
}; | |
de.lte = de.lessThanOrEqual; | |
de.le = de.lessThanOrEqual; | |
de.greaterThan = function (t8) { | |
return this.comp(t8) > 0; | |
}; | |
de.gt = de.greaterThan; | |
de.greaterThanOrEqual = function (t8) { | |
return this.comp(t8) >= 0; | |
}; | |
de.gte = de.greaterThanOrEqual; | |
de.ge = de.greaterThanOrEqual; | |
de.compare = function (t8) { | |
if (Wr(t8) || (t8 = Fs(t8)), this.eq(t8)) return 0; | |
var e = this.isNegative(), | |
o = t8.isNegative(); | |
return e && !o ? -1 : !e && o ? 1 : this.unsigned ? t8.high >>> 0 > this.high >>> 0 || t8.high === this.high && t8.low >>> 0 > this.low >>> 0 ? -1 : 1 : this.sub(t8).isNegative() ? -1 : 1; | |
}; | |
de.comp = de.compare; | |
de.negate = function () { | |
return !this.unsigned && this.eq(Vr) ? Vr : this.not().add(rc); | |
}; | |
de.neg = de.negate; | |
de.add = function (t8) { | |
Wr(t8) || (t8 = Fs(t8)); | |
var e = this.high >>> 16, | |
o = this.high & 65535, | |
n = this.low >>> 16, | |
s = this.low & 65535, | |
a = t8.high >>> 16, | |
i = t8.high & 65535, | |
p = t8.low >>> 16, | |
u = t8.low & 65535, | |
c = 0, | |
l = 0, | |
m = 0, | |
d = 0; | |
return d += s + u, m += d >>> 16, d &= 65535, m += n + p, l += m >>> 16, m &= 65535, l += o + i, c += l >>> 16, l &= 65535, c += e + a, c &= 65535, Tt(m << 16 | d, c << 16 | l, this.unsigned); | |
}; | |
de.subtract = function (t8) { | |
return Wr(t8) || (t8 = Fs(t8)), this.add(t8.neg()); | |
}; | |
de.sub = de.subtract; | |
de.multiply = function (t8) { | |
if (this.isZero()) return _o; | |
if (Wr(t8) || (t8 = Fs(t8)), No) { | |
var e = No.mul(this.low, this.high, t8.low, t8.high); | |
return Tt(e, No.get_high(), this.unsigned); | |
} | |
if (t8.isZero()) return _o; | |
if (this.eq(Vr)) return t8.isOdd() ? Vr : _o; | |
if (t8.eq(Vr)) return this.isOdd() ? Vr : _o; | |
if (this.isNegative()) return t8.isNegative() ? this.neg().mul(t8.neg()) : this.neg().mul(t8).neg(); | |
if (t8.isNegative()) return this.mul(t8.neg()).neg(); | |
if (this.lt(P0) && t8.lt(P0)) return To(this.toNumber() * t8.toNumber(), this.unsigned); | |
var o = this.high >>> 16, | |
n = this.high & 65535, | |
s = this.low >>> 16, | |
a = this.low & 65535, | |
i = t8.high >>> 16, | |
p = t8.high & 65535, | |
u = t8.low >>> 16, | |
c = t8.low & 65535, | |
l = 0, | |
m = 0, | |
d = 0, | |
f = 0; | |
return f += a * c, d += f >>> 16, f &= 65535, d += s * c, m += d >>> 16, d &= 65535, d += a * u, m += d >>> 16, d &= 65535, m += n * c, l += m >>> 16, m &= 65535, m += s * u, l += m >>> 16, m &= 65535, m += a * p, l += m >>> 16, m &= 65535, l += o * c + n * u + s * p + a * i, l &= 65535, Tt(d << 16 | f, l << 16 | m, this.unsigned); | |
}; | |
de.mul = de.multiply; | |
de.divide = function (t8) { | |
if (Wr(t8) || (t8 = Fs(t8)), t8.isZero()) throw Error("division by zero"); | |
if (No) { | |
if (!this.unsigned && this.high === -2147483648 && t8.low === -1 && t8.high === -1) return this; | |
var e = (this.unsigned ? No.div_u : No.div_s)(this.low, this.high, t8.low, t8.high); | |
return Tt(e, No.get_high(), this.unsigned); | |
} | |
if (this.isZero()) return this.unsigned ? Lu : _o; | |
var o, n, s; | |
if (this.unsigned) { | |
if (t8.unsigned || (t8 = t8.toUnsigned()), t8.gt(this)) return Lu; | |
if (t8.gt(this.shru(1))) return M0; | |
s = Lu; | |
} else { | |
if (this.eq(Vr)) { | |
if (t8.eq(rc) || t8.eq(pw)) return Vr; | |
if (t8.eq(Vr)) return rc; | |
var a = this.shr(1); | |
return o = a.div(t8).shl(1), o.eq(_o) ? t8.isNegative() ? rc : pw : (n = this.sub(t8.mul(o)), s = o.add(n.div(t8)), s); | |
} else if (t8.eq(Vr)) return this.unsigned ? Lu : _o; | |
if (this.isNegative()) return t8.isNegative() ? this.neg().div(t8.neg()) : this.neg().div(t8).neg(); | |
if (t8.isNegative()) return this.div(t8.neg()).neg(); | |
s = _o; | |
} | |
for (n = this; n.gte(t8);) { | |
o = Math.max(1, Math.floor(n.toNumber() / t8.toNumber())); | |
for (var i = Math.ceil(Math.log(o) / Math.LN2), p = i <= 48 ? 1 : Zm(2, i - 48), u = To(o), c = u.mul(t8); c.isNegative() || c.gt(n);) o -= p, u = To(o, this.unsigned), c = u.mul(t8); | |
u.isZero() && (u = rc), s = s.add(u), n = n.sub(c); | |
} | |
return s; | |
}; | |
de.div = de.divide; | |
de.modulo = function (t8) { | |
if (Wr(t8) || (t8 = Fs(t8)), No) { | |
var e = (this.unsigned ? No.rem_u : No.rem_s)(this.low, this.high, t8.low, t8.high); | |
return Tt(e, No.get_high(), this.unsigned); | |
} | |
return this.sub(this.div(t8).mul(t8)); | |
}; | |
de.mod = de.modulo; | |
de.rem = de.modulo; | |
de.not = function () { | |
return Tt(~this.low, ~this.high, this.unsigned); | |
}; | |
de.and = function (t8) { | |
return Wr(t8) || (t8 = Fs(t8)), Tt(this.low & t8.low, this.high & t8.high, this.unsigned); | |
}; | |
de.or = function (t8) { | |
return Wr(t8) || (t8 = Fs(t8)), Tt(this.low | t8.low, this.high | t8.high, this.unsigned); | |
}; | |
de.xor = function (t8) { | |
return Wr(t8) || (t8 = Fs(t8)), Tt(this.low ^ t8.low, this.high ^ t8.high, this.unsigned); | |
}; | |
de.shiftLeft = function (t8) { | |
return Wr(t8) && (t8 = t8.toInt()), (t8 &= 63) === 0 ? this : t8 < 32 ? Tt(this.low << t8, this.high << t8 | this.low >>> 32 - t8, this.unsigned) : Tt(0, this.low << t8 - 32, this.unsigned); | |
}; | |
de.shl = de.shiftLeft; | |
de.shiftRight = function (t8) { | |
return Wr(t8) && (t8 = t8.toInt()), (t8 &= 63) === 0 ? this : t8 < 32 ? Tt(this.low >>> t8 | this.high << 32 - t8, this.high >> t8, this.unsigned) : Tt(this.high >> t8 - 32, this.high >= 0 ? 0 : -1, this.unsigned); | |
}; | |
de.shr = de.shiftRight; | |
de.shiftRightUnsigned = function (t8) { | |
if (Wr(t8) && (t8 = t8.toInt()), t8 &= 63, t8 === 0) return this; | |
var e = this.high; | |
if (t8 < 32) { | |
var o = this.low; | |
return Tt(o >>> t8 | e << 32 - t8, e >>> t8, this.unsigned); | |
} else return t8 === 32 ? Tt(e, 0, this.unsigned) : Tt(e >>> t8 - 32, 0, this.unsigned); | |
}; | |
de.shru = de.shiftRightUnsigned; | |
de.shr_u = de.shiftRightUnsigned; | |
de.toSigned = function () { | |
return this.unsigned ? Tt(this.low, this.high, false) : this; | |
}; | |
de.toUnsigned = function () { | |
return this.unsigned ? this : Tt(this.low, this.high, true); | |
}; | |
de.toBytes = function (t8) { | |
return t8 ? this.toBytesLE() : this.toBytesBE(); | |
}; | |
de.toBytesLE = function () { | |
var t8 = this.high, | |
e = this.low; | |
return [e & 255, e >>> 8 & 255, e >>> 16 & 255, e >>> 24, t8 & 255, t8 >>> 8 & 255, t8 >>> 16 & 255, t8 >>> 24]; | |
}; | |
de.toBytesBE = function () { | |
var t8 = this.high, | |
e = this.low; | |
return [t8 >>> 24, t8 >>> 16 & 255, t8 >>> 8 & 255, t8 & 255, e >>> 24, e >>> 16 & 255, e >>> 8 & 255, e & 255]; | |
}; | |
Nt.fromBytes = function (t8, e, o) { | |
return o ? Nt.fromBytesLE(t8, e) : Nt.fromBytesBE(t8, e); | |
}; | |
Nt.fromBytesLE = function (t8, e) { | |
return new Nt(t8[0] | t8[1] << 8 | t8[2] << 16 | t8[3] << 24, t8[4] | t8[5] << 8 | t8[6] << 16 | t8[7] << 24, e); | |
}; | |
Nt.fromBytesBE = function (t8, e) { | |
return new Nt(t8[4] << 24 | t8[5] << 16 | t8[6] << 8 | t8[7], t8[0] << 24 | t8[1] << 16 | t8[2] << 8 | t8[3], e); | |
}; | |
}); | |
var Ik = qt(() => {}); | |
var vk = qt(() => {}); | |
var Q2 = qt((Y2, Vw) => { | |
(function (r, t8, e) { | |
function o(i) { | |
var p = this, | |
u = a(); | |
p.next = function () { | |
var c = 2091639 * p.s0 + p.c * 23283064365386963e-26; | |
return p.s0 = p.s1, p.s1 = p.s2, p.s2 = c - (p.c = c | 0); | |
}, p.c = 1, p.s0 = u(" "), p.s1 = u(" "), p.s2 = u(" "), p.s0 -= u(i), p.s0 < 0 && (p.s0 += 1), p.s1 -= u(i), p.s1 < 0 && (p.s1 += 1), p.s2 -= u(i), p.s2 < 0 && (p.s2 += 1), u = null; | |
} | |
function n(i, p) { | |
return p.c = i.c, p.s0 = i.s0, p.s1 = i.s1, p.s2 = i.s2, p; | |
} | |
function s(i, p) { | |
var u = new o(i), | |
c = p && p.state, | |
l = u.next; | |
return l.int32 = function () { | |
return u.next() * 4294967296 | 0; | |
}, l.double = function () { | |
return l() + (l() * 2097152 | 0) * 11102230246251565e-32; | |
}, l.quick = l, c && (typeof c == "object" && n(c, u), l.state = function () { | |
return n(u, {}); | |
}), l; | |
} | |
function a() { | |
var i = 4022871197, | |
p = function (u) { | |
u = String(u); | |
for (var c = 0; c < u.length; c++) { | |
i += u.charCodeAt(c); | |
var l = 0.02519603282416938 * i; | |
i = l >>> 0, l -= i, l *= i, i = l >>> 0, l -= i, i += l * 4294967296; | |
} | |
return (i >>> 0) * 23283064365386963e-26; | |
}; | |
return p; | |
} | |
t8 && t8.exports ? t8.exports = s : e && e.amd ? e(function () { | |
return s; | |
}) : this.alea = s; | |
})(Y2, typeof Vw == "object" && Vw, typeof define == "function" && define); | |
}); | |
var J2 = qt((Z2, Ww) => { | |
(function (r, t8, e) { | |
function o(a) { | |
var i = this, | |
p = ""; | |
i.x = 0, i.y = 0, i.z = 0, i.w = 0, i.next = function () { | |
var c = i.x ^ i.x << 11; | |
return i.x = i.y, i.y = i.z, i.z = i.w, i.w ^= i.w >>> 19 ^ c ^ c >>> 8; | |
}, a === (a | 0) ? i.x = a : p += a; | |
for (var u = 0; u < p.length + 64; u++) i.x ^= p.charCodeAt(u) | 0, i.next(); | |
} | |
function n(a, i) { | |
return i.x = a.x, i.y = a.y, i.z = a.z, i.w = a.w, i; | |
} | |
function s(a, i) { | |
var p = new o(a), | |
u = i && i.state, | |
c = function () { | |
return (p.next() >>> 0) / 4294967296; | |
}; | |
return c.double = function () { | |
do var l = p.next() >>> 11, | |
m = (p.next() >>> 0) / 4294967296, | |
d = (l + m) / (1 << 21); while (d === 0); | |
return d; | |
}, c.int32 = p.next, c.quick = c, u && (typeof u == "object" && n(u, p), c.state = function () { | |
return n(p, {}); | |
}), c; | |
} | |
t8 && t8.exports ? t8.exports = s : e && e.amd ? e(function () { | |
return s; | |
}) : this.xor128 = s; | |
})(Z2, typeof Ww == "object" && Ww, typeof define == "function" && define); | |
}); | |
var t1 = qt((e1, Uw) => { | |
(function (r, t8, e) { | |
function o(a) { | |
var i = this, | |
p = ""; | |
i.next = function () { | |
var c = i.x ^ i.x >>> 2; | |
return i.x = i.y, i.y = i.z, i.z = i.w, i.w = i.v, (i.d = i.d + 362437 | 0) + (i.v = i.v ^ i.v << 4 ^ (c ^ c << 1)) | 0; | |
}, i.x = 0, i.y = 0, i.z = 0, i.w = 0, i.v = 0, a === (a | 0) ? i.x = a : p += a; | |
for (var u = 0; u < p.length + 64; u++) i.x ^= p.charCodeAt(u) | 0, u == p.length && (i.d = i.x << 10 ^ i.x >>> 4), i.next(); | |
} | |
function n(a, i) { | |
return i.x = a.x, i.y = a.y, i.z = a.z, i.w = a.w, i.v = a.v, i.d = a.d, i; | |
} | |
function s(a, i) { | |
var p = new o(a), | |
u = i && i.state, | |
c = function () { | |
return (p.next() >>> 0) / 4294967296; | |
}; | |
return c.double = function () { | |
do var l = p.next() >>> 11, | |
m = (p.next() >>> 0) / 4294967296, | |
d = (l + m) / (1 << 21); while (d === 0); | |
return d; | |
}, c.int32 = p.next, c.quick = c, u && (typeof u == "object" && n(u, p), c.state = function () { | |
return n(p, {}); | |
}), c; | |
} | |
t8 && t8.exports ? t8.exports = s : e && e.amd ? e(function () { | |
return s; | |
}) : this.xorwow = s; | |
})(e1, typeof Uw == "object" && Uw, typeof define == "function" && define); | |
}); | |
var o1 = qt((r1, Gw) => { | |
(function (r, t8, e) { | |
function o(a) { | |
var i = this; | |
i.next = function () { | |
var u = i.x, | |
c = i.i, | |
l, | |
m, | |
d; | |
return l = u[c], l ^= l >>> 7, m = l ^ l << 24, l = u[c + 1 & 7], m ^= l ^ l >>> 10, l = u[c + 3 & 7], m ^= l ^ l >>> 3, l = u[c + 4 & 7], m ^= l ^ l << 7, l = u[c + 7 & 7], l = l ^ l << 13, m ^= l ^ l << 9, u[c] = m, i.i = c + 1 & 7, m; | |
}; | |
function p(u, c) { | |
var l, | |
m, | |
d = []; | |
if (c === (c | 0)) m = d[0] = c;else for (c = "" + c, l = 0; l < c.length; ++l) d[l & 7] = d[l & 7] << 15 ^ c.charCodeAt(l) + d[l + 1 & 7] << 13; | |
for (; d.length < 8;) d.push(0); | |
for (l = 0; l < 8 && d[l] === 0; ++l); | |
for (l == 8 ? m = d[7] = -1 : m = d[l], u.x = d, u.i = 0, l = 256; l > 0; --l) u.next(); | |
} | |
p(i, a); | |
} | |
function n(a, i) { | |
return i.x = a.x.slice(), i.i = a.i, i; | |
} | |
function s(a, i) { | |
a == null && (a = + /* @__PURE__ */new Date()); | |
var p = new o(a), | |
u = i && i.state, | |
c = function () { | |
return (p.next() >>> 0) / 4294967296; | |
}; | |
return c.double = function () { | |
do var l = p.next() >>> 11, | |
m = (p.next() >>> 0) / 4294967296, | |
d = (l + m) / (1 << 21); while (d === 0); | |
return d; | |
}, c.int32 = p.next, c.quick = c, u && (u.x && n(u, p), c.state = function () { | |
return n(p, {}); | |
}), c; | |
} | |
t8 && t8.exports ? t8.exports = s : e && e.amd ? e(function () { | |
return s; | |
}) : this.xorshift7 = s; | |
})(r1, typeof Gw == "object" && Gw, typeof define == "function" && define); | |
}); | |
var s1 = qt((n1, Hw) => { | |
(function (r, t8, e) { | |
function o(a) { | |
var i = this; | |
i.next = function () { | |
var u = i.w, | |
c = i.X, | |
l = i.i, | |
m, | |
d; | |
return i.w = u = u + 1640531527 | 0, d = c[l + 34 & 127], m = c[l = l + 1 & 127], d ^= d << 13, m ^= m << 17, d ^= d >>> 15, m ^= m >>> 12, d = c[l] = d ^ m, i.i = l, d + (u ^ u >>> 16) | 0; | |
}; | |
function p(u, c) { | |
var l, | |
m, | |
d, | |
f, | |
h, | |
g = [], | |
x = 128; | |
for (c === (c | 0) ? (m = c, c = null) : (c = c + "\0", m = 0, x = Math.max(x, c.length)), d = 0, f = -32; f < x; ++f) c && (m ^= c.charCodeAt((f + 32) % c.length)), f === 0 && (h = m), m ^= m << 10, m ^= m >>> 15, m ^= m << 4, m ^= m >>> 13, f >= 0 && (h = h + 1640531527 | 0, l = g[f & 127] ^= m + h, d = l == 0 ? d + 1 : 0); | |
for (d >= 128 && (g[(c && c.length || 0) & 127] = -1), d = 127, f = 4 * 128; f > 0; --f) m = g[d + 34 & 127], l = g[d = d + 1 & 127], m ^= m << 13, l ^= l << 17, m ^= m >>> 15, l ^= l >>> 12, g[d] = m ^ l; | |
u.w = h, u.X = g, u.i = d; | |
} | |
p(i, a); | |
} | |
function n(a, i) { | |
return i.i = a.i, i.w = a.w, i.X = a.X.slice(), i; | |
} | |
function s(a, i) { | |
a == null && (a = + /* @__PURE__ */new Date()); | |
var p = new o(a), | |
u = i && i.state, | |
c = function () { | |
return (p.next() >>> 0) / 4294967296; | |
}; | |
return c.double = function () { | |
do var l = p.next() >>> 11, | |
m = (p.next() >>> 0) / 4294967296, | |
d = (l + m) / (1 << 21); while (d === 0); | |
return d; | |
}, c.int32 = p.next, c.quick = c, u && (u.X && n(u, p), c.state = function () { | |
return n(p, {}); | |
}), c; | |
} | |
t8 && t8.exports ? t8.exports = s : e && e.amd ? e(function () { | |
return s; | |
}) : this.xor4096 = s; | |
})(n1, typeof Hw == "object" && Hw, typeof define == "function" && define); | |
}); | |
var i1 = qt((a1, Kw) => { | |
(function (r, t8, e) { | |
function o(a) { | |
var i = this, | |
p = ""; | |
i.next = function () { | |
var c = i.b, | |
l = i.c, | |
m = i.d, | |
d = i.a; | |
return c = c << 25 ^ c >>> 7 ^ l, l = l - m | 0, m = m << 24 ^ m >>> 8 ^ d, d = d - c | 0, i.b = c = c << 20 ^ c >>> 12 ^ l, i.c = l = l - m | 0, i.d = m << 16 ^ l >>> 16 ^ d, i.a = d - c | 0; | |
}, i.a = 0, i.b = 0, i.c = -1640531527, i.d = 1367130551, a === Math.floor(a) ? (i.a = a / 4294967296 | 0, i.b = a | 0) : p += a; | |
for (var u = 0; u < p.length + 20; u++) i.b ^= p.charCodeAt(u) | 0, i.next(); | |
} | |
function n(a, i) { | |
return i.a = a.a, i.b = a.b, i.c = a.c, i.d = a.d, i; | |
} | |
function s(a, i) { | |
var p = new o(a), | |
u = i && i.state, | |
c = function () { | |
return (p.next() >>> 0) / 4294967296; | |
}; | |
return c.double = function () { | |
do var l = p.next() >>> 11, | |
m = (p.next() >>> 0) / 4294967296, | |
d = (l + m) / (1 << 21); while (d === 0); | |
return d; | |
}, c.int32 = p.next, c.quick = c, u && (typeof u == "object" && n(u, p), c.state = function () { | |
return n(p, {}); | |
}), c; | |
} | |
t8 && t8.exports ? t8.exports = s : e && e.amd ? e(function () { | |
return s; | |
}) : this.tychei = s; | |
})(a1, typeof Kw == "object" && Kw, typeof define == "function" && define); | |
}); | |
var u1 = qt(() => {}); | |
var c1 = qt((p1, Ld) => { | |
(function (r, t8, e) { | |
var o = 256, | |
n = 6, | |
s = 52, | |
a = "random", | |
i = e.pow(o, n), | |
p = e.pow(2, s), | |
u = p * 2, | |
c = o - 1, | |
l; | |
function m(C, S, k) { | |
var _ = []; | |
S = S == true ? { | |
entropy: true | |
} : S || {}; | |
var E = g(h(S.entropy ? [C, b(t8)] : C == null ? x() : C, 3), _), | |
R = new d(_), | |
D = function () { | |
for (var P = R.g(n), O = i, M = 0; P < p;) P = (P + M) * o, O *= o, M = R.g(1); | |
for (; P >= u;) P /= 2, O /= 2, M >>>= 1; | |
return (P + M) / O; | |
}; | |
return D.int32 = function () { | |
return R.g(4) | 0; | |
}, D.quick = function () { | |
return R.g(4) / 4294967296; | |
}, D.double = D, g(b(R.S), t8), (S.pass || k || function (P, O, M, L) { | |
return L && (L.S && f(L, R), P.state = function () { | |
return f(R, {}); | |
}), M ? (e[a] = P, O) : P; | |
})(D, E, "global" in S ? S.global : this == e, S.state); | |
} | |
function d(C) { | |
var S, | |
k = C.length, | |
_ = this, | |
E = 0, | |
R = _.i = _.j = 0, | |
D = _.S = []; | |
for (k || (C = [k++]); E < o;) D[E] = E++; | |
for (E = 0; E < o; E++) D[E] = D[R = c & R + C[E % k] + (S = D[E])], D[R] = S; | |
(_.g = function (P) { | |
for (var O, M = 0, L = _.i, B = _.j, z = _.S; P--;) O = z[L = c & L + 1], M = M * o + z[c & (z[L] = z[B = c & B + O]) + (z[B] = O)]; | |
return _.i = L, _.j = B, M; | |
})(o); | |
} | |
function f(C, S) { | |
return S.i = C.i, S.j = C.j, S.S = C.S.slice(), S; | |
} | |
function h(C, S) { | |
var k = [], | |
_ = typeof C, | |
E; | |
if (S && _ == "object") for (E in C) try { | |
k.push(h(C[E], S - 1)); | |
} catch (R) {} | |
return k.length ? k : _ == "string" ? C : C + "\0"; | |
} | |
function g(C, S) { | |
for (var k = C + "", _, E = 0; E < k.length;) S[c & E] = c & (_ ^= S[c & E] * 19) + k.charCodeAt(E++); | |
return b(S); | |
} | |
function x() { | |
try { | |
var C; | |
return l && (C = l.randomBytes) ? C = C(o) : (C = new Uint8Array(o), (r.crypto || r.msCrypto).getRandomValues(C)), b(C); | |
} catch (_) { | |
var S = r.navigator, | |
k = S && S.plugins; | |
return [+ /* @__PURE__ */new Date(), r, k, r.screen, b(t8)]; | |
} | |
} | |
function b(C) { | |
return String.fromCharCode.apply(0, C); | |
} | |
if (g(e.random(), t8), typeof Ld == "object" && Ld.exports) { | |
Ld.exports = m; | |
try { | |
l = u1(); | |
} catch (C) {} | |
} else typeof define == "function" && __webpack_require__.amdO ? define(function () { | |
return m; | |
}) : e["seed" + a] = m; | |
})(typeof self != "undefined" ? self : p1, [], Math); | |
}); | |
var qw = qt((Y2e, l1) => { | |
var vq = Q2(), | |
kq = J2(), | |
Nq = t1(), | |
Tq = o1(), | |
_q = s1(), | |
$q = i1(), | |
Qu = c1(); | |
Qu.alea = vq; | |
Qu.xor128 = kq; | |
Qu.xorwow = Nq; | |
Qu.xorshift7 = Tq; | |
Qu.xor4096 = _q; | |
Qu.tychei = $q; | |
l1.exports = Qu; | |
}); | |
var Bv = qt(() => {}); | |
var zv = qt(() => {}); | |
var RB = qt(() => {}); | |
var DB = qt(() => {}); | |
var AB = qt(() => {}); | |
var FB = qt((Wg, Wv) => { | |
var Vv = (() => { | |
var r = typeof document != "undefined" && document.currentScript ? document.currentScript.src : void 0; | |
return true && (r = r || __filename), function (t8) { | |
t8 = t8 || {}; | |
function e() { | |
return oe.buffer != Ge && _t(oe.buffer), mt; | |
} | |
function o() { | |
return oe.buffer != Ge && _t(oe.buffer), it; | |
} | |
function n() { | |
return oe.buffer != Ge && _t(oe.buffer), gt; | |
} | |
function s() { | |
return oe.buffer != Ge && _t(oe.buffer), Lr; | |
} | |
function a() { | |
return oe.buffer != Ge && _t(oe.buffer), Lt; | |
} | |
function i() { | |
return oe.buffer != Ge && _t(oe.buffer), to; | |
} | |
function p() { | |
return oe.buffer != Ge && _t(oe.buffer), nr; | |
} | |
var u = typeof t8 != "undefined" ? t8 : {}, | |
c, | |
l; | |
u.ready = new Promise(function (F, V) { | |
c = F, l = V; | |
}); | |
var m; | |
typeof process != "undefined" && process.listeners && (m = { | |
uncaughtException: process.listeners("uncaughtException"), | |
unhandledRejection: process.listeners("unhandledRejection") | |
}); | |
var d = Object.assign({}, u), | |
f = [], | |
h = "./this.program", | |
g = (F, V) => { | |
throw V; | |
}, | |
x = typeof window == "object", | |
b = typeof importScripts == "function", | |
C = typeof process == "object" && typeof process.versions == "object" && typeof process.versions.node == "string", | |
S = u.ENVIRONMENT_IS_PTHREAD || false, | |
k = ""; | |
function _(F) { | |
return u.locateFile ? u.locateFile(F, k) : k + F; | |
} | |
var E, R, D, P; | |
function O(F) { | |
if (F instanceof Eu) return; | |
j("exiting due to exception: " + F); | |
} | |
if (C) { | |
var M = Bv(), | |
L = zv(); | |
b ? k = L.dirname(k) + "/" : k = __dirname + "/", E = (V, ue) => (V = zp(V) ? new URL(V) : L.normalize(V), M.readFileSync(V, ue ? void 0 : "utf8")), D = V => { | |
var ue = E(V, true); | |
return ue.buffer || (ue = new Uint8Array(ue)), ue; | |
}, R = (V, ue, Ee) => { | |
V = zp(V) ? new URL(V) : L.normalize(V), M.readFile(V, function (Be, Le) { | |
Be ? Ee(Be) : ue(Le.buffer); | |
}); | |
}, process.argv.length > 1 && (h = process.argv[1].replace(/\\/g, "/")), f = process.argv.slice(2), process.on("uncaughtException", function (V) { | |
if (!(V instanceof Eu)) throw V; | |
}), process.on("unhandledRejection", function (V) { | |
throw V; | |
}), g = (V, ue) => { | |
if (Bo()) throw process.exitCode = V, ue; | |
O(ue), process.exit(V); | |
}, u.inspect = function () { | |
return "[Emscripten Module object]"; | |
}; | |
let F; | |
try { | |
F = RB(); | |
} catch (V) { | |
throw console.error('The "worker_threads" module is not supported in this node.js build - perhaps a newer version is needed?'), V; | |
} | |
__webpack_require__.g.Worker = F.Worker; | |
} else (x || b) && (b ? k = self.location.href : typeof document != "undefined" && document.currentScript && (k = document.currentScript.src), typeof r != "undefined" && r && (k = r), k.indexOf("blob:") !== 0 ? k = k.substr(0, k.replace(/[?#].*/, "").lastIndexOf("/") + 1) : k = "", C || (E = F => { | |
var V = new XMLHttpRequest(); | |
return V.open("GET", F, false), V.send(null), V.responseText; | |
}, b && (D = F => { | |
var V = new XMLHttpRequest(); | |
return V.open("GET", F, false), V.responseType = "arraybuffer", V.send(null), new Uint8Array(V.response); | |
}), R = (F, V, ue) => { | |
var Ee = new XMLHttpRequest(); | |
Ee.open("GET", F, true), Ee.responseType = "arraybuffer", Ee.onload = () => { | |
if (Ee.status == 200 || Ee.status == 0 && Ee.response) { | |
V(Ee.response); | |
return; | |
} | |
ue(); | |
}, Ee.onerror = ue, Ee.send(null); | |
}), P = F => document.title = F); | |
C && typeof performance == "undefined" && (__webpack_require__.g.performance = DB().performance); | |
var B = console.log.bind(console), | |
z = console.warn.bind(console); | |
C && (B = F => M.writeSync(1, F + ` | |
`), z = F => M.writeSync(2, F + ` | |
`)); | |
var U = u.print || B, | |
j = u.printErr || z; | |
Object.assign(u, d), d = null, u.arguments && (f = u.arguments), u.thisProgram && (h = u.thisProgram), u.quit && (g = u.quit); | |
var q = 4, | |
Y = Atomics.load, | |
J = Atomics.store, | |
re = Atomics.compareExchange, | |
ne; | |
u.wasmBinary && (ne = u.wasmBinary); | |
var ee = u.noExitRuntime || true; | |
typeof WebAssembly != "object" && $u("no native wasm support detected"); | |
var oe, | |
ie, | |
le = false, | |
be; | |
function _e(F, V) { | |
F || $u(V); | |
} | |
var ve = typeof TextDecoder != "undefined" ? new TextDecoder("utf8") : void 0; | |
function Fe(F, V, ue) { | |
V >>>= 0; | |
for (var Ee = V + ue, Be = V; F[Be] && !(Be >= Ee);) ++Be; | |
if (Be - V > 16 && F.buffer && ve) return ve.decode(F.buffer instanceof SharedArrayBuffer ? F.slice(V, Be) : F.subarray(V, Be)); | |
for (var Le = ""; V < Be;) { | |
var ge = F[V++]; | |
if (!(ge & 128)) { | |
Le += String.fromCharCode(ge); | |
continue; | |
} | |
var Ne = F[V++] & 63; | |
if ((ge & 224) == 192) { | |
Le += String.fromCharCode((ge & 31) << 6 | Ne); | |
continue; | |
} | |
var Pt = F[V++] & 63; | |
if ((ge & 240) == 224 ? ge = (ge & 15) << 12 | Ne << 6 | Pt : ge = (ge & 7) << 18 | Ne << 12 | Pt << 6 | F[V++] & 63, ge < 65536) Le += String.fromCharCode(ge);else { | |
var so = ge - 65536; | |
Le += String.fromCharCode(55296 | so >> 10, 56320 | so & 1023); | |
} | |
} | |
return Le; | |
} | |
function Pe(F, V) { | |
return F >>>= 0, F ? Fe(o(), F, V) : ""; | |
} | |
function st(F, V, ue, Ee) { | |
if (ue >>>= 0, !(Ee > 0)) return 0; | |
for (var Be = ue, Le = ue + Ee - 1, ge = 0; ge < F.length; ++ge) { | |
var Ne = F.charCodeAt(ge); | |
if (Ne >= 55296 && Ne <= 57343) { | |
var Pt = F.charCodeAt(++ge); | |
Ne = 65536 + ((Ne & 1023) << 10) | Pt & 1023; | |
} | |
if (Ne <= 127) { | |
if (ue >= Le) break; | |
V[ue++ >>> 0] = Ne; | |
} else if (Ne <= 2047) { | |
if (ue + 1 >= Le) break; | |
V[ue++ >>> 0] = 192 | Ne >> 6, V[ue++ >>> 0] = 128 | Ne & 63; | |
} else if (Ne <= 65535) { | |
if (ue + 2 >= Le) break; | |
V[ue++ >>> 0] = 224 | Ne >> 12, V[ue++ >>> 0] = 128 | Ne >> 6 & 63, V[ue++ >>> 0] = 128 | Ne & 63; | |
} else { | |
if (ue + 3 >= Le) break; | |
V[ue++ >>> 0] = 240 | Ne >> 18, V[ue++ >>> 0] = 128 | Ne >> 12 & 63, V[ue++ >>> 0] = 128 | Ne >> 6 & 63, V[ue++ >>> 0] = 128 | Ne & 63; | |
} | |
} | |
return V[ue >>> 0] = 0, ue - Be; | |
} | |
function lt(F, V, ue) { | |
return st(F, o(), V, ue); | |
} | |
var Ge, mt, it, gt, xt, Lr, Lt, to, nr; | |
S && (Ge = u.buffer); | |
function _t(F) { | |
Ge = F, u.HEAP8 = mt = new Int8Array(F), u.HEAP16 = gt = new Int16Array(F), u.HEAP32 = Lr = new Int32Array(F), u.HEAPU8 = it = new Uint8Array(F), u.HEAPU16 = xt = new Uint16Array(F), u.HEAPU32 = Lt = new Uint32Array(F), u.HEAPF32 = to = new Float32Array(F), u.HEAPF64 = nr = new Float64Array(F); | |
} | |
var sr = u.INITIAL_MEMORY || 16777216; | |
if (S) oe = u.wasmMemory, Ge = u.buffer;else if (u.wasmMemory) oe = u.wasmMemory;else if (oe = new WebAssembly.Memory({ | |
initial: sr / 65536, | |
maximum: 65536, | |
shared: true | |
}), !(oe.buffer instanceof SharedArrayBuffer)) throw j("requested a shared WebAssembly.Memory but the returned buffer is not a SharedArrayBuffer, indicating that while the browser has SharedArrayBuffer it does not have WebAssembly threads support - you may need to set a flag"), C && j("(on node you may need: --experimental-wasm-threads --experimental-wasm-bulk-memory and/or recent version)"), Error("bad memory"); | |
oe && (Ge = oe.buffer), sr = Ge.byteLength, _t(Ge); | |
var ar, | |
ro = [], | |
oo = [], | |
hr = [], | |
Wa = false; | |
function Bo() { | |
return ee; | |
} | |
function Ks() { | |
if (u.preRun) for (typeof u.preRun == "function" && (u.preRun = [u.preRun]); u.preRun.length;) sl(u.preRun.shift()); | |
ul(ro); | |
} | |
function Yt() { | |
Wa = true, !S && ul(oo); | |
} | |
function Ua() { | |
if (!S) { | |
if (u.postRun) for (typeof u.postRun == "function" && (u.postRun = [u.postRun]); u.postRun.length;) l0(u.postRun.shift()); | |
ul(hr); | |
} | |
} | |
function sl(F) { | |
ro.unshift(F); | |
} | |
function al(F) { | |
oo.unshift(F); | |
} | |
function l0(F) { | |
hr.unshift(F); | |
} | |
var Ti = 0, | |
Bp = null, | |
Ga = null; | |
function Cy(F) { | |
Ti++, u.monitorRunDependencies && u.monitorRunDependencies(Ti); | |
} | |
function wm(F) { | |
if (Ti--, u.monitorRunDependencies && u.monitorRunDependencies(Ti), Ti == 0 && (Bp !== null && (clearInterval(Bp), Bp = null), Ga)) { | |
var V = Ga; | |
Ga = null, V(); | |
} | |
} | |
function $u(F) { | |
u.onAbort && u.onAbort(F), F = "Aborted(" + F + ")", j(F), le = true, be = 1, F += ". Build with -sASSERTIONS for more info."; | |
var V = new WebAssembly.RuntimeError(F); | |
throw l(V), V; | |
} | |
var wy = "data:application/octet-stream;base64,"; | |
function Sm(F) { | |
return F.startsWith(wy); | |
} | |
function zp(F) { | |
return F.startsWith("file://"); | |
} | |
var gr; | |
gr = "tfjs-backend-wasm-threaded-simd.wasm", Sm(gr) || (gr = _(gr)); | |
function Im(F) { | |
try { | |
if (F == gr && ne) return new Uint8Array(ne); | |
if (D) return D(F); | |
throw "both async and sync fetching of the wasm failed"; | |
} catch (V) { | |
$u(V); | |
} | |
} | |
function Sy() { | |
if (!ne && (x || b)) { | |
if (typeof fetch == "function" && !zp(gr)) return fetch(gr, { | |
credentials: "same-origin" | |
}).then(function (F) { | |
if (!F.ok) throw "failed to load wasm binary file at '" + gr + "'"; | |
return F.arrayBuffer(); | |
}).catch(function () { | |
return Im(gr); | |
}); | |
if (R) return new Promise(function (F, V) { | |
R(gr, function (ue) { | |
F(new Uint8Array(ue)); | |
}, V); | |
}); | |
} | |
return Promise.resolve().then(function () { | |
return Im(gr); | |
}); | |
} | |
function Iy() { | |
var F = { | |
env: Om, | |
wasi_snapshot_preview1: Om | |
}; | |
function V(ge, Ne) { | |
var Pt = ge.exports; | |
if (u.asm = Pt, Dy(u.asm._emscripten_tls_init), ar = u.asm.__indirect_function_table, al(u.asm.__wasm_call_ctors), ie = Ne, !S) { | |
var so = Me.unusedWorkers.length; | |
Me.unusedWorkers.forEach(function (Ka) { | |
Me.loadWasmModuleToWorker(Ka, function () { | |
--so || wm("wasm-instantiate"); | |
}); | |
}); | |
} | |
} | |
S || Cy("wasm-instantiate"); | |
function ue(ge) { | |
V(ge.instance, ge.module); | |
} | |
function Ee(ge) { | |
return Sy().then(function (Ne) { | |
return WebAssembly.instantiate(Ne, F); | |
}).then(function (Ne) { | |
return Ne; | |
}).then(ge, function (Ne) { | |
j("failed to asynchronously prepare wasm: " + Ne), $u(Ne); | |
}); | |
} | |
function Be() { | |
return !ne && typeof WebAssembly.instantiateStreaming == "function" && !Sm(gr) && !zp(gr) && !C && typeof fetch == "function" ? fetch(gr, { | |
credentials: "same-origin" | |
}).then(function (ge) { | |
var Ne = WebAssembly.instantiateStreaming(ge, F); | |
return Ne.then(ue, function (Pt) { | |
return j("wasm streaming compile failed: " + Pt), j("falling back to ArrayBuffer instantiation"), Ee(ue); | |
}); | |
}) : Ee(ue); | |
} | |
if (u.instantiateWasm) try { | |
var Le = u.instantiateWasm(F, V); | |
return Le; | |
} catch (ge) { | |
j("Module.instantiateWasm callback failed with error: " + ge), l(ge); | |
} | |
return Be().catch(l), {}; | |
} | |
var m0, | |
d0, | |
vm = {}; | |
function Eu(F) { | |
this.name = "ExitStatus", this.message = "Program terminated with exit(" + F + ")", this.status = F; | |
} | |
function vy(F) { | |
var V = Me.pthreads[F]; | |
delete Me.pthreads[F], V.terminate(), jC(F), Me.runningWorkers.splice(Me.runningWorkers.indexOf(V), 1), V.pthread_ptr = 0; | |
} | |
function ky(F) { | |
var V = Me.pthreads[F]; | |
V.postMessage({ | |
cmd: "cancel" | |
}); | |
} | |
function il(F) { | |
var V = Me.pthreads[F]; | |
_e(V), Me.returnWorkerToPool(V); | |
} | |
function Ny(F) { | |
var V = Me.getNewWorker(); | |
if (!V) return 6; | |
Me.runningWorkers.push(V), Me.pthreads[F.pthread_ptr] = V, V.pthread_ptr = F.pthread_ptr; | |
var ue = { | |
cmd: "run", | |
start_routine: F.startRoutine, | |
arg: F.arg, | |
pthread_ptr: F.pthread_ptr | |
}; | |
return V.runPthread = () => { | |
C && V.ref(), V.postMessage(ue, F.transferList), delete V.runPthread; | |
}, V.loaded && V.runPthread(), 0; | |
} | |
var km = { | |
varargs: void 0, | |
get: function () { | |
km.varargs += 4; | |
var F = s()[km.varargs - 4 >>> 2]; | |
return F; | |
}, | |
getStr: function (F) { | |
var V = Pe(F); | |
return V; | |
} | |
}; | |
function Nm(F) { | |
if (S) return _i(1, 1, F); | |
be = F, Bo() || (Me.terminateAllThreads(), u.onExit && u.onExit(F), le = true), g(F, new Eu(F)); | |
} | |
function Ty(F, V) { | |
if (be = F, !V && S) throw _m(F), "unwind"; | |
Nm(F); | |
} | |
var Tm = Ty; | |
function _y(F) { | |
if (F instanceof Eu || F == "unwind") return be; | |
g(1, F); | |
} | |
var Me = { | |
unusedWorkers: [], | |
runningWorkers: [], | |
tlsInitFunctions: [], | |
pthreads: {}, | |
init: function () { | |
S ? Me.initWorker() : Me.initMainThread(); | |
}, | |
initMainThread: function () { | |
for (var F = 8; F--;) Me.allocateUnusedWorker(); | |
}, | |
initWorker: function () { | |
ee = false; | |
}, | |
setExitStatus: function (F) { | |
be = F; | |
}, | |
terminateAllThreads: function () { | |
for (var F of Object.values(Me.pthreads)) Me.returnWorkerToPool(F); | |
for (var F of Me.unusedWorkers) F.terminate(); | |
Me.unusedWorkers = []; | |
}, | |
returnWorkerToPool: function (F) { | |
var V = F.pthread_ptr; | |
delete Me.pthreads[V], Me.unusedWorkers.push(F), Me.runningWorkers.splice(Me.runningWorkers.indexOf(F), 1), F.pthread_ptr = 0, C && F.unref(), jC(V); | |
}, | |
receiveObjectTransfer: function (F) {}, | |
threadInitTLS: function () { | |
Me.tlsInitFunctions.forEach(F => F()); | |
}, | |
loadWasmModuleToWorker: function (F, V) { | |
F.onmessage = Le => { | |
var ge = Le.data, | |
Ne = ge.cmd; | |
if (F.pthread_ptr && (Me.currentProxiedOperationCallerThread = F.pthread_ptr), ge.targetThread && ge.targetThread != Wm()) { | |
var Pt = Me.pthreads[ge.targetThread]; | |
Pt ? Pt.postMessage(ge, ge.transferList) : j('Internal error! Worker sent a message "' + Ne + '" to target pthread ' + ge.targetThread + ", but that thread no longer exists!"), Me.currentProxiedOperationCallerThread = void 0; | |
return; | |
} | |
Ne === "processProxyingQueue" ? pl(ge.queue) : Ne === "spawnThread" ? Ny(ge) : Ne === "cleanupThread" ? il(ge.thread) : Ne === "killThread" ? vy(ge.thread) : Ne === "cancelThread" ? ky(ge.thread) : Ne === "loaded" ? (F.loaded = true, C && F.unref(), V && V(F), F.runPthread && F.runPthread()) : Ne === "print" ? U("Thread " + ge.threadId + ": " + ge.text) : Ne === "printErr" ? j("Thread " + ge.threadId + ": " + ge.text) : Ne === "alert" ? alert("Thread " + ge.threadId + ": " + ge.text) : ge.target === "setimmediate" ? F.postMessage(ge) : Ne === "callHandler" ? u[ge.handler](...ge.args) : Ne && j("worker sent an unknown command " + Ne), Me.currentProxiedOperationCallerThread = void 0; | |
}, F.onerror = Le => { | |
var ge = "worker sent an error!"; | |
throw j(ge + " " + Le.filename + ":" + Le.lineno + ": " + Le.message), Le; | |
}, C && (F.on("message", function (Le) { | |
F.onmessage({ | |
data: Le | |
}); | |
}), F.on("error", function (Le) { | |
F.onerror(Le); | |
}), F.on("detachedExit", function () {})); | |
var ue = [], | |
Ee = ["onExit", "onAbort", "print", "printErr"]; | |
for (var Be of Ee) u.hasOwnProperty(Be) && ue.push(Be); | |
F.postMessage({ | |
cmd: "load", | |
handlers: ue, | |
urlOrBlob: u.mainScriptUrlOrBlob || r, | |
wasmMemory: oe, | |
wasmModule: ie | |
}); | |
}, | |
allocateUnusedWorker: function () { | |
var F, | |
V = _("tfjs-backend-wasm-threaded-simd.worker.js"); | |
F = new Worker(V), Me.unusedWorkers.push(F); | |
}, | |
getNewWorker: function () { | |
return Me.unusedWorkers.length == 0 && (Me.allocateUnusedWorker(), Me.loadWasmModuleToWorker(Me.unusedWorkers[0])), Me.unusedWorkers.pop(); | |
} | |
}; | |
u.PThread = Me; | |
function ul(F) { | |
for (; F.length > 0;) F.shift()(u); | |
} | |
function $y() { | |
var F = Wm(), | |
V = s()[F + 52 >>> 2], | |
ue = s()[F + 56 >>> 2], | |
Ee = V - ue; | |
b0(V, Ee), Um(V); | |
} | |
u.establishStackSpace = $y; | |
function _m(F) { | |
if (S) return _i(2, 0, F); | |
try { | |
Tm(F); | |
} catch (V) { | |
_y(V); | |
} | |
} | |
var Vp = []; | |
function Ey(F) { | |
var V = Vp[F]; | |
return V || (F >= Vp.length && (Vp.length = F + 1), Vp[F] = V = ar.get(F)), V; | |
} | |
function Ry(F, V) { | |
var ue = Ey(F)(V); | |
Bo() ? Me.setExitStatus(ue) : y0(ue); | |
} | |
u.invokeEntryPoint = Ry; | |
function Dy(F) { | |
Me.tlsInitFunctions.push(F); | |
} | |
function Ay(F) { | |
h0(F, !b, 1, !x), Me.threadInitTLS(); | |
} | |
function Fy(F) { | |
S ? postMessage({ | |
cmd: "cleanupThread", | |
thread: F | |
}) : il(F); | |
} | |
function $m(F, V, ue, Ee) { | |
return S ? _i(3, 1, F, V, ue, Ee) : Em(F, V, ue, Ee); | |
} | |
function Em(F, V, ue, Ee) { | |
if (typeof SharedArrayBuffer == "undefined") return j("Current environment does not support SharedArrayBuffer, pthreads are not available!"), 6; | |
var Be = [], | |
Le = 0; | |
if (S && (Be.length === 0 || Le)) return $m(F, V, ue, Ee); | |
if (Le) return Le; | |
var ge = { | |
startRoutine: ue, | |
pthread_ptr: F, | |
arg: Ee, | |
transferList: Be | |
}; | |
return S ? (ge.cmd = "spawnThread", postMessage(ge, Be), 0) : Ny(ge); | |
} | |
function Py() { | |
return 65536; | |
} | |
var Oy = true; | |
function My() { | |
return Oy; | |
} | |
function pl(F) { | |
Atomics.store(s(), F >> 2, 1), Wm() && x0(F), Atomics.compareExchange(s(), F >> 2, 1, 0); | |
} | |
u.executeNotifiedProxyingQueue = pl; | |
function Ly(F, V, ue, Ee) { | |
if (F == V) setTimeout(() => pl(Ee));else if (S) postMessage({ | |
targetThread: F, | |
cmd: "processProxyingQueue", | |
queue: Ee | |
});else { | |
var Be = Me.pthreads[F]; | |
if (!Be) return; | |
Be.postMessage({ | |
cmd: "processProxyingQueue", | |
queue: Ee | |
}); | |
} | |
return 1; | |
} | |
function By(F, V, ue) { | |
return -1; | |
} | |
function zy() { | |
$u(""); | |
} | |
function Ru(F) { | |
Ru.shown || (Ru.shown = {}), Ru.shown[F] || (Ru.shown[F] = 1, C && (F = "warning: " + F), j(F)); | |
} | |
function Vy() { | |
C || b || Ru("Blocking on the main thread is very dangerous, see https://emscripten.org/docs/porting/pthreads.html#blocking-on-the-main-browser-thread"); | |
} | |
function Wy() { | |
return Date.now(); | |
} | |
function Rm() { | |
return 4294901760; | |
} | |
function Uy() { | |
return Rm(); | |
} | |
var cl; | |
C ? cl = () => { | |
var F = process.hrtime(); | |
return F[0] * 1e3 + F[1] / 1e6; | |
} : cl = () => performance.timeOrigin + performance.now(); | |
function Gy(F, V, ue) { | |
o().copyWithin(F >>> 0, V >>> 0, V + ue >>> 0); | |
} | |
function Hy() { | |
return C ? AB().cpus().length : navigator.hardwareConcurrency; | |
} | |
function Ky(F) { | |
var V = XC(), | |
ue = F(); | |
return Um(V), ue; | |
} | |
function _i(F, V) { | |
var ue = arguments.length - 2, | |
Ee = arguments; | |
return Ky(() => { | |
for (var Be = ue, Le = Gm(Be * 8), ge = Le >> 3, Ne = 0; Ne < ue; Ne++) { | |
var Pt = Ee[2 + Ne]; | |
p()[ge + Ne >>> 0] = Pt; | |
} | |
return g0(F, Be, Le, V); | |
}); | |
} | |
var ll = []; | |
function qy(F, V, ue) { | |
ll.length = V; | |
for (var Ee = ue >> 3, Be = 0; Be < V; Be++) ll[Be] = p()[Ee + Be >>> 0]; | |
var Le = F < 0, | |
ge = Le ? vm[-F - 1] : rb[F]; | |
return ge.apply(null, ll); | |
} | |
function jy(F) { | |
try { | |
return oe.grow(F - Ge.byteLength + 65535 >>> 16), _t(oe.buffer), 1; | |
} catch (V) {} | |
} | |
function Xy(F) { | |
var V = o().length; | |
if (F = F >>> 0, F <= V) return false; | |
var ue = Rm(); | |
if (F > ue) return false; | |
let Ee = (Pt, so) => Pt + (so - Pt % so) % so; | |
for (var Be = 1; Be <= 4; Be *= 2) { | |
var Le = V * (1 + 0.2 / Be); | |
Le = Math.min(Le, F + 100663296); | |
var ge = Math.min(ue, Ee(Math.max(F, Le), 65536)), | |
Ne = jy(ge); | |
if (Ne) return true; | |
} | |
return false; | |
} | |
function Yy() { | |
throw "unwind"; | |
} | |
function Dm(F) { | |
return S ? _i(4, 1, F) : 52; | |
} | |
function Am(F, V, ue, Ee, Be) { | |
return S ? _i(5, 1, F, V, ue, Ee, Be) : 70; | |
} | |
var Qy = [null, [], []]; | |
function Zy(F, V) { | |
var ue = Qy[F]; | |
V === 0 || V === 10 ? ((F === 1 ? U : j)(Fe(ue, 0)), ue.length = 0) : ue.push(V); | |
} | |
function Fm(F, V, ue, Ee) { | |
if (S) return _i(6, 1, F, V, ue, Ee); | |
for (var Be = 0, Le = 0; Le < ue; Le++) { | |
var ge = a()[V >>> 2], | |
Ne = a()[V + 4 >>> 2]; | |
V += 8; | |
for (var Pt = 0; Pt < Ne; Pt++) Zy(F, o()[ge + Pt >>> 0]); | |
Be += Ne; | |
} | |
return a()[Ee >>> 2] = Be, 0; | |
} | |
function Pm(F) { | |
var V = u["_" + F]; | |
return V; | |
} | |
function Jy(F, V) { | |
e().set(F, V >>> 0); | |
} | |
function eb(F, V, ue, Ee, Be) { | |
var Le = { | |
string: Br => { | |
var Hp = 0; | |
if (Br != null && Br !== 0) { | |
var S0 = (Br.length << 2) + 1; | |
Hp = Gm(S0), lt(Br, Hp, S0); | |
} | |
return Hp; | |
}, | |
array: Br => { | |
var Hp = Gm(Br.length); | |
return Jy(Br, Hp), Hp; | |
} | |
}; | |
function ge(Br) { | |
return V === "string" ? Pe(Br) : V === "boolean" ? !!Br : Br; | |
} | |
var Ne = Pm(F), | |
Pt = [], | |
so = 0; | |
if (Ee) for (var Ka = 0; Ka < Ee.length; Ka++) { | |
var w0 = Le[ue[Ka]]; | |
w0 ? (so === 0 && (so = XC()), Pt[Ka] = w0(Ee[Ka])) : Pt[Ka] = Ee[Ka]; | |
} | |
var YC = Ne.apply(null, Pt); | |
function CG(Br) { | |
return so !== 0 && Um(so), ge(Br); | |
} | |
return YC = CG(YC), YC; | |
} | |
function tb(F, V, ue, Ee) { | |
ue = ue || []; | |
var Be = ue.every(ge => ge === "number" || ge === "boolean"), | |
Le = V !== "string"; | |
return Le && Be && !Ee ? Pm(F) : function () { | |
return eb(F, V, ue, arguments, Ee); | |
}; | |
} | |
Me.init(); | |
var rb = [null, Nm, _m, $m, Dm, Am, Fm], | |
Om = { | |
__emscripten_init_main_thread_js: Ay, | |
__emscripten_thread_cleanup: Fy, | |
__pthread_create_js: Em, | |
_emscripten_default_pthread_stack_size: Py, | |
_emscripten_get_now_is_monotonic: My, | |
_emscripten_notify_task_queue: Ly, | |
_emscripten_set_offscreencanvas_size: By, | |
abort: zy, | |
emscripten_check_blocking_allowed: Vy, | |
emscripten_date_now: Wy, | |
emscripten_get_heap_max: Uy, | |
emscripten_get_now: cl, | |
emscripten_memcpy_big: Gy, | |
emscripten_num_logical_cores: Hy, | |
emscripten_receive_on_main_thread_js: qy, | |
emscripten_resize_heap: Xy, | |
emscripten_unwind_to_js_event_loop: Yy, | |
exit: Tm, | |
fd_close: Dm, | |
fd_seek: Am, | |
fd_write: Fm, | |
memory: oe || u.wasmMemory | |
}, | |
f0 = Iy(), | |
ob = u.___wasm_call_ctors = function () { | |
return (ob = u.___wasm_call_ctors = u.asm.__wasm_call_ctors).apply(null, arguments); | |
}, | |
nb = u._init = function () { | |
return (nb = u._init = u.asm.init).apply(null, arguments); | |
}, | |
sb = u._init_with_threads_count = function () { | |
return (sb = u._init_with_threads_count = u.asm.init_with_threads_count).apply(null, arguments); | |
}, | |
ab = u._get_threads_count = function () { | |
return (ab = u._get_threads_count = u.asm.get_threads_count).apply(null, arguments); | |
}, | |
ib = u._register_tensor = function () { | |
return (ib = u._register_tensor = u.asm.register_tensor).apply(null, arguments); | |
}, | |
ub = u._dispose_data = function () { | |
return (ub = u._dispose_data = u.asm.dispose_data).apply(null, arguments); | |
}, | |
pb = u._dispose = function () { | |
return (pb = u._dispose = u.asm.dispose).apply(null, arguments); | |
}, | |
cb = u._Abs = function () { | |
return (cb = u._Abs = u.asm.Abs).apply(null, arguments); | |
}, | |
lb = u._Acos = function () { | |
return (lb = u._Acos = u.asm.Acos).apply(null, arguments); | |
}, | |
mb = u._Acosh = function () { | |
return (mb = u._Acosh = u.asm.Acosh).apply(null, arguments); | |
}, | |
db = u._Add = function () { | |
return (db = u._Add = u.asm.Add).apply(null, arguments); | |
}, | |
fb = u._AddN = function () { | |
return (fb = u._AddN = u.asm.AddN).apply(null, arguments); | |
}, | |
hb = u._All = function () { | |
return (hb = u._All = u.asm.All).apply(null, arguments); | |
}, | |
gb = u._Any = function () { | |
return (gb = u._Any = u.asm.Any).apply(null, arguments); | |
}, | |
xb = u._ArgMax = function () { | |
return (xb = u._ArgMax = u.asm.ArgMax).apply(null, arguments); | |
}, | |
yb = u._ArgMin = function () { | |
return (yb = u._ArgMin = u.asm.ArgMin).apply(null, arguments); | |
}, | |
bb = u._Asin = function () { | |
return (bb = u._Asin = u.asm.Asin).apply(null, arguments); | |
}, | |
Cb = u._Asinh = function () { | |
return (Cb = u._Asinh = u.asm.Asinh).apply(null, arguments); | |
}, | |
wb = u._Atan = function () { | |
return (wb = u._Atan = u.asm.Atan).apply(null, arguments); | |
}, | |
Sb = u._Atan2 = function () { | |
return (Sb = u._Atan2 = u.asm.Atan2).apply(null, arguments); | |
}, | |
Ib = u._Atanh = function () { | |
return (Ib = u._Atanh = u.asm.Atanh).apply(null, arguments); | |
}, | |
vb = u._AvgPool = function () { | |
return (vb = u._AvgPool = u.asm.AvgPool).apply(null, arguments); | |
}, | |
kb = u._AvgPool3D = function () { | |
return (kb = u._AvgPool3D = u.asm.AvgPool3D).apply(null, arguments); | |
}, | |
Nb = u._AvgPool3DGrad = function () { | |
return (Nb = u._AvgPool3DGrad = u.asm.AvgPool3DGrad).apply(null, arguments); | |
}, | |
Tb = u._AvgPoolGrad = function () { | |
return (Tb = u._AvgPoolGrad = u.asm.AvgPoolGrad).apply(null, arguments); | |
}, | |
_b = u._BatchMatMul = function () { | |
return (_b = u._BatchMatMul = u.asm.BatchMatMul).apply(null, arguments); | |
}, | |
$b = u._Bincount = function () { | |
return ($b = u._Bincount = u.asm.Bincount).apply(null, arguments); | |
}, | |
Eb = u._BitwiseAnd = function () { | |
return (Eb = u._BitwiseAnd = u.asm.BitwiseAnd).apply(null, arguments); | |
}, | |
Rb = u._Ceil = function () { | |
return (Rb = u._Ceil = u.asm.Ceil).apply(null, arguments); | |
}, | |
Db = u._ClipByValue = function () { | |
return (Db = u._ClipByValue = u.asm.ClipByValue).apply(null, arguments); | |
}, | |
Ab = u._Conv2D = function () { | |
return (Ab = u._Conv2D = u.asm.Conv2D).apply(null, arguments); | |
}, | |
Fb = u._Conv2DBackpropInput = function () { | |
return (Fb = u._Conv2DBackpropInput = u.asm.Conv2DBackpropInput).apply(null, arguments); | |
}, | |
Pb = u._Conv3D = function () { | |
return (Pb = u._Conv3D = u.asm.Conv3D).apply(null, arguments); | |
}, | |
Ob = u._Conv3DBackpropFilterV2 = function () { | |
return (Ob = u._Conv3DBackpropFilterV2 = u.asm.Conv3DBackpropFilterV2).apply(null, arguments); | |
}, | |
Mb = u._Conv3DBackpropInputV2 = function () { | |
return (Mb = u._Conv3DBackpropInputV2 = u.asm.Conv3DBackpropInputV2).apply(null, arguments); | |
}, | |
Lb = u._Cos = function () { | |
return (Lb = u._Cos = u.asm.Cos).apply(null, arguments); | |
}, | |
Bb = u._Cosh = function () { | |
return (Bb = u._Cosh = u.asm.Cosh).apply(null, arguments); | |
}, | |
zb = u._CropAndResize = function () { | |
return (zb = u._CropAndResize = u.asm.CropAndResize).apply(null, arguments); | |
}, | |
Vb = u._Cumprod = function () { | |
return (Vb = u._Cumprod = u.asm.Cumprod).apply(null, arguments); | |
}, | |
Wb = u._Cumsum = function () { | |
return (Wb = u._Cumsum = u.asm.Cumsum).apply(null, arguments); | |
}, | |
Ub = u._DenseBincount = function () { | |
return (Ub = u._DenseBincount = u.asm.DenseBincount).apply(null, arguments); | |
}, | |
Gb = u._DepthToSpace = function () { | |
return (Gb = u._DepthToSpace = u.asm.DepthToSpace).apply(null, arguments); | |
}, | |
Hb = u._DepthwiseConv2dNative = function () { | |
return (Hb = u._DepthwiseConv2dNative = u.asm.DepthwiseConv2dNative).apply(null, arguments); | |
}, | |
Kb = u._Diag = function () { | |
return (Kb = u._Diag = u.asm.Diag).apply(null, arguments); | |
}, | |
qb = u._Dilation2D = function () { | |
return (qb = u._Dilation2D = u.asm.Dilation2D).apply(null, arguments); | |
}, | |
jb = u._Dilation2DBackpropFilter = function () { | |
return (jb = u._Dilation2DBackpropFilter = u.asm.Dilation2DBackpropFilter).apply(null, arguments); | |
}, | |
Xb = u._Dilation2DBackpropInput = function () { | |
return (Xb = u._Dilation2DBackpropInput = u.asm.Dilation2DBackpropInput).apply(null, arguments); | |
}, | |
Yb = u._Elu = function () { | |
return (Yb = u._Elu = u.asm.Elu).apply(null, arguments); | |
}, | |
Qb = u._EluGrad = function () { | |
return (Qb = u._EluGrad = u.asm.EluGrad).apply(null, arguments); | |
}, | |
Zb = u._Equal = function () { | |
return (Zb = u._Equal = u.asm.Equal).apply(null, arguments); | |
}, | |
Jb = u._Erf = function () { | |
return (Jb = u._Erf = u.asm.Erf).apply(null, arguments); | |
}, | |
eC = u._Exp = function () { | |
return (eC = u._Exp = u.asm.Exp).apply(null, arguments); | |
}, | |
tC = u._Expm1 = function () { | |
return (tC = u._Expm1 = u.asm.Expm1).apply(null, arguments); | |
}, | |
rC = u._FlipLeftRight = function () { | |
return (rC = u._FlipLeftRight = u.asm.FlipLeftRight).apply(null, arguments); | |
}, | |
oC = u._Floor = function () { | |
return (oC = u._Floor = u.asm.Floor).apply(null, arguments); | |
}, | |
nC = u._FloorDiv = function () { | |
return (nC = u._FloorDiv = u.asm.FloorDiv).apply(null, arguments); | |
}, | |
sC = u._FusedBatchNorm = function () { | |
return (sC = u._FusedBatchNorm = u.asm.FusedBatchNorm).apply(null, arguments); | |
}, | |
aC = u._FusedConv2D = function () { | |
return (aC = u._FusedConv2D = u.asm.FusedConv2D).apply(null, arguments); | |
}, | |
iC = u._FusedDepthwiseConv2D = function () { | |
return (iC = u._FusedDepthwiseConv2D = u.asm.FusedDepthwiseConv2D).apply(null, arguments); | |
}, | |
uC = u._Gather = function () { | |
return (uC = u._Gather = u.asm.Gather).apply(null, arguments); | |
}, | |
pC = u._GatherNd = function () { | |
return (pC = u._GatherNd = u.asm.GatherNd).apply(null, arguments); | |
}, | |
cC = u._Greater = function () { | |
return (cC = u._Greater = u.asm.Greater).apply(null, arguments); | |
}, | |
lC = u._GreaterEqual = function () { | |
return (lC = u._GreaterEqual = u.asm.GreaterEqual).apply(null, arguments); | |
}, | |
mC = u._IsFinite = function () { | |
return (mC = u._IsFinite = u.asm.IsFinite).apply(null, arguments); | |
}, | |
dC = u._IsInf = function () { | |
return (dC = u._IsInf = u.asm.IsInf).apply(null, arguments); | |
}, | |
fC = u._IsNan = function () { | |
return (fC = u._IsNan = u.asm.IsNan).apply(null, arguments); | |
}, | |
hC = u._LRN = function () { | |
return (hC = u._LRN = u.asm.LRN).apply(null, arguments); | |
}, | |
gC = u._LRNGrad = function () { | |
return (gC = u._LRNGrad = u.asm.LRNGrad).apply(null, arguments); | |
}, | |
xC = u._LeakyRelu = function () { | |
return (xC = u._LeakyRelu = u.asm.LeakyRelu).apply(null, arguments); | |
}, | |
yC = u._Less = function () { | |
return (yC = u._Less = u.asm.Less).apply(null, arguments); | |
}, | |
bC = u._LessEqual = function () { | |
return (bC = u._LessEqual = u.asm.LessEqual).apply(null, arguments); | |
}, | |
CC = u._LinSpace = function () { | |
return (CC = u._LinSpace = u.asm.LinSpace).apply(null, arguments); | |
}, | |
wC = u._Log = function () { | |
return (wC = u._Log = u.asm.Log).apply(null, arguments); | |
}, | |
SC = u._Log1p = function () { | |
return (SC = u._Log1p = u.asm.Log1p).apply(null, arguments); | |
}, | |
IC = u._LogicalAnd = function () { | |
return (IC = u._LogicalAnd = u.asm.LogicalAnd).apply(null, arguments); | |
}, | |
vC = u._LogicalNot = function () { | |
return (vC = u._LogicalNot = u.asm.LogicalNot).apply(null, arguments); | |
}, | |
kC = u._LogicalOr = function () { | |
return (kC = u._LogicalOr = u.asm.LogicalOr).apply(null, arguments); | |
}, | |
NC = u._LogicalXor = function () { | |
return (NC = u._LogicalXor = u.asm.LogicalXor).apply(null, arguments); | |
}, | |
TC = u._Max = function () { | |
return (TC = u._Max = u.asm.Max).apply(null, arguments); | |
}, | |
_C = u._MaxPool = function () { | |
return (_C = u._MaxPool = u.asm.MaxPool).apply(null, arguments); | |
}, | |
$C = u._MaxPool3D = function () { | |
return ($C = u._MaxPool3D = u.asm.MaxPool3D).apply(null, arguments); | |
}, | |
EC = u._MaxPool3DGrad = function () { | |
return (EC = u._MaxPool3DGrad = u.asm.MaxPool3DGrad).apply(null, arguments); | |
}, | |
RC = u._MaxPoolGrad = function () { | |
return (RC = u._MaxPoolGrad = u.asm.MaxPoolGrad).apply(null, arguments); | |
}, | |
DC = u._MaxPoolWithArgmax = function () { | |
return (DC = u._MaxPoolWithArgmax = u.asm.MaxPoolWithArgmax).apply(null, arguments); | |
}, | |
AC = u._Maximum = function () { | |
return (AC = u._Maximum = u.asm.Maximum).apply(null, arguments); | |
}, | |
FC = u._Mean = function () { | |
return (FC = u._Mean = u.asm.Mean).apply(null, arguments); | |
}, | |
PC = u._Min = function () { | |
return (PC = u._Min = u.asm.Min).apply(null, arguments); | |
}, | |
OC = u._Minimum = function () { | |
return (OC = u._Minimum = u.asm.Minimum).apply(null, arguments); | |
}, | |
MC = u._MirrorPad = function () { | |
return (MC = u._MirrorPad = u.asm.MirrorPad).apply(null, arguments); | |
}, | |
LC = u._Mod = function () { | |
return (LC = u._Mod = u.asm.Mod).apply(null, arguments); | |
}, | |
BC = u._Multinomial = function () { | |
return (BC = u._Multinomial = u.asm.Multinomial).apply(null, arguments); | |
}, | |
zC = u._Multiply = function () { | |
return (zC = u._Multiply = u.asm.Multiply).apply(null, arguments); | |
}, | |
VC = u._Neg = function () { | |
return (VC = u._Neg = u.asm.Neg).apply(null, arguments); | |
}, | |
WC = u._NonMaxSuppressionV3 = function () { | |
return (WC = u._NonMaxSuppressionV3 = u.asm.NonMaxSuppressionV3).apply(null, arguments); | |
}, | |
UC = u._NonMaxSuppressionV4 = function () { | |
return (UC = u._NonMaxSuppressionV4 = u.asm.NonMaxSuppressionV4).apply(null, arguments); | |
}, | |
Mm = u._NonMaxSuppressionV5 = function () { | |
return (Mm = u._NonMaxSuppressionV5 = u.asm.NonMaxSuppressionV5).apply(null, arguments); | |
}, | |
Lm = u._NotEqual = function () { | |
return (Lm = u._NotEqual = u.asm.NotEqual).apply(null, arguments); | |
}, | |
ml = u._OneHot = function () { | |
return (ml = u._OneHot = u.asm.OneHot).apply(null, arguments); | |
}, | |
GC = u._PadV2 = function () { | |
return (GC = u._PadV2 = u.asm.PadV2).apply(null, arguments); | |
}, | |
HC = u._Pow = function () { | |
return (HC = u._Pow = u.asm.Pow).apply(null, arguments); | |
}, | |
Wp = u._Prelu = function () { | |
return (Wp = u._Prelu = u.asm.Prelu).apply(null, arguments); | |
}, | |
Bm = u._Prod = function () { | |
return (Bm = u._Prod = u.asm.Prod).apply(null, arguments); | |
}, | |
Up = u._RealDiv = function () { | |
return (Up = u._RealDiv = u.asm.RealDiv).apply(null, arguments); | |
}, | |
Gp = u._Reciprocal = function () { | |
return (Gp = u._Reciprocal = u.asm.Reciprocal).apply(null, arguments); | |
}, | |
KC = u._Relu = function () { | |
return (KC = u._Relu = u.asm.Relu).apply(null, arguments); | |
}, | |
K = u._Relu6 = function () { | |
return (K = u._Relu6 = u.asm.Relu6).apply(null, arguments); | |
}, | |
ae = u._ResizeBilinear = function () { | |
return (ae = u._ResizeBilinear = u.asm.ResizeBilinear).apply(null, arguments); | |
}, | |
$e = u._ResizeBilinearGrad = function () { | |
return ($e = u._ResizeBilinearGrad = u.asm.ResizeBilinearGrad).apply(null, arguments); | |
}, | |
at = u._ResizeNearestNeighbor = function () { | |
return (at = u._ResizeNearestNeighbor = u.asm.ResizeNearestNeighbor).apply(null, arguments); | |
}, | |
$t = u._ResizeNearestNeighborGrad = function () { | |
return ($t = u._ResizeNearestNeighborGrad = u.asm.ResizeNearestNeighborGrad).apply(null, arguments); | |
}, | |
Et = u._Reverse = function () { | |
return (Et = u._Reverse = u.asm.Reverse).apply(null, arguments); | |
}, | |
Qe = u._RotateWithOffset = function () { | |
return (Qe = u._RotateWithOffset = u.asm.RotateWithOffset).apply(null, arguments); | |
}, | |
Ke = u._Round = function () { | |
return (Ke = u._Round = u.asm.Round).apply(null, arguments); | |
}, | |
Gt = u._Rsqrt = function () { | |
return (Gt = u._Rsqrt = u.asm.Rsqrt).apply(null, arguments); | |
}, | |
no = u._ScatterNd = function () { | |
return (no = u._ScatterNd = u.asm.ScatterNd).apply(null, arguments); | |
}, | |
Ha = u._SearchSorted = function () { | |
return (Ha = u._SearchSorted = u.asm.SearchSorted).apply(null, arguments); | |
}, | |
zm = u._SelectV2 = function () { | |
return (zm = u._SelectV2 = u.asm.SelectV2).apply(null, arguments); | |
}, | |
dl = u._Selu = function () { | |
return (dl = u._Selu = u.asm.Selu).apply(null, arguments); | |
}, | |
qC = u._Sigmoid = function () { | |
return (qC = u._Sigmoid = u.asm.Sigmoid).apply(null, arguments); | |
}, | |
br = u._Sign = function () { | |
return (br = u._Sign = u.asm.Sign).apply(null, arguments); | |
}, | |
$i = u._Sin = function () { | |
return ($i = u._Sin = u.asm.Sin).apply(null, arguments); | |
}, | |
Vm = u._Sinh = function () { | |
return (Vm = u._Sinh = u.asm.Sinh).apply(null, arguments); | |
}, | |
WU = u._Softmax = function () { | |
return (WU = u._Softmax = u.asm.Softmax).apply(null, arguments); | |
}, | |
UU = u._Softplus = function () { | |
return (UU = u._Softplus = u.asm.Softplus).apply(null, arguments); | |
}, | |
GU = u._SparseFillEmptyRows = function () { | |
return (GU = u._SparseFillEmptyRows = u.asm.SparseFillEmptyRows).apply(null, arguments); | |
}, | |
HU = u._SparseReshape = function () { | |
return (HU = u._SparseReshape = u.asm.SparseReshape).apply(null, arguments); | |
}, | |
KU = u._SparseSegmentReduction = function () { | |
return (KU = u._SparseSegmentReduction = u.asm.SparseSegmentReduction).apply(null, arguments); | |
}, | |
qU = u._SparseToDense = function () { | |
return (qU = u._SparseToDense = u.asm.SparseToDense).apply(null, arguments); | |
}, | |
jU = u._Sqrt = function () { | |
return (jU = u._Sqrt = u.asm.Sqrt).apply(null, arguments); | |
}, | |
XU = u._Square = function () { | |
return (XU = u._Square = u.asm.Square).apply(null, arguments); | |
}, | |
YU = u._SquaredDifference = function () { | |
return (YU = u._SquaredDifference = u.asm.SquaredDifference).apply(null, arguments); | |
}, | |
QU = u._Step = function () { | |
return (QU = u._Step = u.asm.Step).apply(null, arguments); | |
}, | |
ZU = u._StridedSlice = function () { | |
return (ZU = u._StridedSlice = u.asm.StridedSlice).apply(null, arguments); | |
}, | |
JU = u._Sub = function () { | |
return (JU = u._Sub = u.asm.Sub).apply(null, arguments); | |
}, | |
eG = u._Sum = function () { | |
return (eG = u._Sum = u.asm.Sum).apply(null, arguments); | |
}, | |
tG = u._Tan = function () { | |
return (tG = u._Tan = u.asm.Tan).apply(null, arguments); | |
}, | |
rG = u._Tanh = function () { | |
return (rG = u._Tanh = u.asm.Tanh).apply(null, arguments); | |
}, | |
oG = u._TensorScatterUpdate = function () { | |
return (oG = u._TensorScatterUpdate = u.asm.TensorScatterUpdate).apply(null, arguments); | |
}, | |
nG = u._Tile = function () { | |
return (nG = u._Tile = u.asm.Tile).apply(null, arguments); | |
}, | |
sG = u._TopK = function () { | |
return (sG = u._TopK = u.asm.TopK).apply(null, arguments); | |
}, | |
aG = u._Transform = function () { | |
return (aG = u._Transform = u.asm.Transform).apply(null, arguments); | |
}, | |
iG = u._Transpose = function () { | |
return (iG = u._Transpose = u.asm.Transpose).apply(null, arguments); | |
}, | |
uG = u.__FusedMatMul = function () { | |
return (uG = u.__FusedMatMul = u.asm._FusedMatMul).apply(null, arguments); | |
}, | |
pG = u._malloc = function () { | |
return (pG = u._malloc = u.asm.malloc).apply(null, arguments); | |
}, | |
cG = u._free = function () { | |
return (cG = u._free = u.asm.free).apply(null, arguments); | |
}, | |
lG = u.__emscripten_tls_init = function () { | |
return (lG = u.__emscripten_tls_init = u.asm._emscripten_tls_init).apply(null, arguments); | |
}, | |
Wm = u._pthread_self = function () { | |
return (Wm = u._pthread_self = u.asm.pthread_self).apply(null, arguments); | |
}, | |
mG = u.___errno_location = function () { | |
return (mG = u.___errno_location = u.asm.__errno_location).apply(null, arguments); | |
}, | |
h0 = u.__emscripten_thread_init = function () { | |
return (h0 = u.__emscripten_thread_init = u.asm._emscripten_thread_init).apply(null, arguments); | |
}, | |
dG = u.__emscripten_thread_crashed = function () { | |
return (dG = u.__emscripten_thread_crashed = u.asm._emscripten_thread_crashed).apply(null, arguments); | |
}, | |
fG = u._emscripten_main_thread_process_queued_calls = function () { | |
return (fG = u._emscripten_main_thread_process_queued_calls = u.asm.emscripten_main_thread_process_queued_calls).apply(null, arguments); | |
}, | |
hG = u._emscripten_main_browser_thread_id = function () { | |
return (hG = u._emscripten_main_browser_thread_id = u.asm.emscripten_main_browser_thread_id).apply(null, arguments); | |
}, | |
g0 = u._emscripten_run_in_main_runtime_thread_js = function () { | |
return (g0 = u._emscripten_run_in_main_runtime_thread_js = u.asm.emscripten_run_in_main_runtime_thread_js).apply(null, arguments); | |
}, | |
gG = u._emscripten_dispatch_to_thread_ = function () { | |
return (gG = u._emscripten_dispatch_to_thread_ = u.asm.emscripten_dispatch_to_thread_).apply(null, arguments); | |
}, | |
x0 = u.__emscripten_proxy_execute_task_queue = function () { | |
return (x0 = u.__emscripten_proxy_execute_task_queue = u.asm._emscripten_proxy_execute_task_queue).apply(null, arguments); | |
}, | |
jC = u.__emscripten_thread_free_data = function () { | |
return (jC = u.__emscripten_thread_free_data = u.asm._emscripten_thread_free_data).apply(null, arguments); | |
}, | |
y0 = u.__emscripten_thread_exit = function () { | |
return (y0 = u.__emscripten_thread_exit = u.asm._emscripten_thread_exit).apply(null, arguments); | |
}, | |
b0 = u._emscripten_stack_set_limits = function () { | |
return (b0 = u._emscripten_stack_set_limits = u.asm.emscripten_stack_set_limits).apply(null, arguments); | |
}, | |
XC = u.stackSave = function () { | |
return (XC = u.stackSave = u.asm.stackSave).apply(null, arguments); | |
}, | |
Um = u.stackRestore = function () { | |
return (Um = u.stackRestore = u.asm.stackRestore).apply(null, arguments); | |
}, | |
Gm = u.stackAlloc = function () { | |
return (Gm = u.stackAlloc = u.asm.stackAlloc).apply(null, arguments); | |
}, | |
xG = u.dynCall_iijjiiii = function () { | |
return (xG = u.dynCall_iijjiiii = u.asm.dynCall_iijjiiii).apply(null, arguments); | |
}, | |
yG = u.dynCall_jiji = function () { | |
return (yG = u.dynCall_jiji = u.asm.dynCall_jiji).apply(null, arguments); | |
}; | |
u.keepRuntimeAlive = Bo, u.wasmMemory = oe, u.cwrap = tb, u.ExitStatus = Eu, u.PThread = Me; | |
var Hm; | |
Ga = function F() { | |
Hm || C0(), Hm || (Ga = F); | |
}; | |
function C0(F) { | |
if (F = F || f, Ti > 0) return; | |
if (S) { | |
c(u), Yt(), startWorker(u); | |
return; | |
} | |
if (Ks(), Ti > 0) return; | |
function V() { | |
Hm || (Hm = true, u.calledRun = true, !le && (Yt(), c(u), u.onRuntimeInitialized && u.onRuntimeInitialized(), Ua())); | |
} | |
u.setStatus ? (u.setStatus("Running..."), setTimeout(function () { | |
setTimeout(function () { | |
u.setStatus(""); | |
}, 1), V(); | |
}, 1)) : V(); | |
} | |
if (u.preInit) for (typeof u.preInit == "function" && (u.preInit = [u.preInit]); u.preInit.length > 0;) u.preInit.pop()(); | |
C0(); | |
var Km; | |
m && (Km = { | |
uncaughtException: process.listeners("uncaughtException").filter(function (F) { | |
return !m.uncaughtException.indexOf(F) > -1; | |
}), | |
unhandledRejection: process.listeners("unhandledRejection").filter(function (F) { | |
return !m.unhandledRejection.indexOf(F) > -1; | |
}) | |
}); | |
var qm; | |
if (typeof WasmBackendModule != "undefined") qm = WasmBackendModule;else if (typeof t8 != "undefined") qm = t8;else throw new Error("Could not find wasm module in post.js"); | |
if (Km) { | |
var bG = qm._dispose; | |
qm._dispose = function () { | |
bG(), Km.uncaughtException.forEach(function (F) { | |
process.removeListener("uncaughtException", F); | |
}), Km.unhandledRejection.forEach(function (F) { | |
process.removeListener("unhandledRejection", F); | |
}); | |
}; | |
} | |
return t8.ready; | |
}; | |
})(); | |
typeof Wg == "object" && typeof Wv == "object" ? Wv.exports = Vv : typeof define == "function" && __webpack_require__.amdO ? define([], function () { | |
return Vv; | |
}) : typeof Wg == "object" && (Wg.WasmBackendModuleThreadedSimd = Vv); | |
}); | |
var OB = qt((UFt, PB) => { | |
PB.exports.wasmWorkerContents = `"use strict";var Module={};var ENVIRONMENT_IS_NODE=typeof process=="object"&&typeof process.versions=="object"&&typeof process.versions.node=="string";if(ENVIRONMENT_IS_NODE){var nodeWorkerThreads=require("worker_threads");var parentPort=nodeWorkerThreads.parentPort;parentPort.on("message",data=>onmessage({data:data}));var fs=require("fs");Object.assign(global,{self:global,require:require,Module:Module,location:{href:__filename},Worker:nodeWorkerThreads.Worker,importScripts:function(f){(0,eval)(fs.readFileSync(f,"utf8")+"//# sourceURL="+f)},postMessage:function(msg){parentPort.postMessage(msg)},performance:global.performance||{now:function(){return Date.now()}}})}var initializedJS=false;var pendingNotifiedProxyingQueues=[];function threadPrintErr(){var text=Array.prototype.slice.call(arguments).join(" ");if(ENVIRONMENT_IS_NODE){fs.writeSync(2,text+" | |
");return}console.error(text)}function threadAlert(){var text=Array.prototype.slice.call(arguments).join(" ");postMessage({cmd:"alert",text:text,threadId:Module["_pthread_self"]()})}var err=threadPrintErr;self.alert=threadAlert;Module["instantiateWasm"]=(info,receiveInstance)=>{var instance=new WebAssembly.Instance(Module["wasmModule"],info);receiveInstance(instance);Module["wasmModule"]=null;return instance.exports};self.onunhandledrejection=e=>{throw e.reason??e};self.startWorker=instance=>{Module=instance;postMessage({"cmd":"loaded"})};self.onmessage=e=>{try{if(e.data.cmd==="load"){Module["wasmModule"]=e.data.wasmModule;for(const handler of e.data.handlers){Module[handler]=function(){postMessage({cmd:"callHandler",handler:handler,args:[...arguments]})}}Module["wasmMemory"]=e.data.wasmMemory;Module["buffer"]=Module["wasmMemory"].buffer;Module["ENVIRONMENT_IS_PTHREAD"]=true;if(typeof e.data.urlOrBlob=="string"){importScripts(e.data.urlOrBlob)}else{var objectUrl=URL.createObjectURL(e.data.urlOrBlob);importScripts(objectUrl);URL.revokeObjectURL(objectUrl)}WasmBackendModuleThreadedSimd(Module)}else if(e.data.cmd==="run"){Module["__emscripten_thread_init"](e.data.pthread_ptr,0,0,1);Module["establishStackSpace"]();Module["PThread"].receiveObjectTransfer(e.data);Module["PThread"].threadInitTLS();if(!initializedJS){pendingNotifiedProxyingQueues.forEach(queue=>{Module["executeNotifiedProxyingQueue"](queue)});pendingNotifiedProxyingQueues=[];initializedJS=true}try{Module["invokeEntryPoint"](e.data.start_routine,e.data.arg)}catch(ex){if(ex!="unwind"){if(ex instanceof Module["ExitStatus"]){if(Module["keepRuntimeAlive"]()){}else{Module["__emscripten_thread_exit"](ex.status)}}else{throw ex}}}}else if(e.data.cmd==="cancel"){if(Module["_pthread_self"]()){Module["__emscripten_thread_exit"](-1)}}else if(e.data.target==="setimmediate"){}else if(e.data.cmd==="processProxyingQueue"){if(initializedJS){Module["executeNotifiedProxyingQueue"](e.data.queue)}else{pendingNotifiedProxyingQueues.push(e.data.queue)}}else if(e.data.cmd){err("worker.js received unknown command "+e.data.cmd);err(e.data)}}catch(ex){if(Module["__emscripten_thread_crashed"]){Module["__emscripten_thread_crashed"]()}throw ex}};`; | |
}); | |
var MB = qt((Ug, Gv) => { | |
var Uv = (() => { | |
var r = typeof document != "undefined" && document.currentScript ? document.currentScript.src : void 0; | |
return true && (r = r || __filename), function (t8) { | |
t8 = t8 || {}; | |
var e = typeof t8 != "undefined" ? t8 : {}, | |
o, | |
n; | |
e.ready = new Promise(function (K, ae) { | |
o = K, n = ae; | |
}); | |
var s; | |
typeof process != "undefined" && process.listeners && (s = { | |
uncaughtException: process.listeners("uncaughtException"), | |
unhandledRejection: process.listeners("unhandledRejection") | |
}); | |
var a = Object.assign({}, e), | |
i = [], | |
p = "./this.program", | |
u = (K, ae) => { | |
throw ae; | |
}, | |
c = typeof window == "object", | |
l = typeof importScripts == "function", | |
m = typeof process == "object" && typeof process.versions == "object" && typeof process.versions.node == "string", | |
d = ""; | |
function f(K) { | |
return e.locateFile ? e.locateFile(K, d) : d + K; | |
} | |
var h, g, x, b; | |
function C(K) { | |
if (K instanceof Bp) return; | |
E("exiting due to exception: " + K); | |
} | |
if (m) { | |
var S = Bv(), | |
k = zv(); | |
l ? d = k.dirname(d) + "/" : d = __dirname + "/", h = (K, ae) => (K = Ks(K) ? new URL(K) : k.normalize(K), S.readFileSync(K, ae ? void 0 : "utf8")), x = K => { | |
var ae = h(K, true); | |
return ae.buffer || (ae = new Uint8Array(ae)), ae; | |
}, g = (K, ae, $e) => { | |
K = Ks(K) ? new URL(K) : k.normalize(K), S.readFile(K, function (at, $t) { | |
at ? $e(at) : ae($t.buffer); | |
}); | |
}, process.argv.length > 1 && (p = process.argv[1].replace(/\\/g, "/")), i = process.argv.slice(2), process.on("uncaughtException", function (K) { | |
if (!(K instanceof Bp)) throw K; | |
}), process.on("unhandledRejection", function (K) { | |
throw K; | |
}), u = (K, ae) => { | |
if (it()) throw process.exitCode = K, ae; | |
C(ae), process.exit(K); | |
}, e.inspect = function () { | |
return "[Emscripten Module object]"; | |
}; | |
} else (c || l) && (l ? d = self.location.href : typeof document != "undefined" && document.currentScript && (d = document.currentScript.src), r && (d = r), d.indexOf("blob:") !== 0 ? d = d.substr(0, d.replace(/[?#].*/, "").lastIndexOf("/") + 1) : d = "", h = K => { | |
var ae = new XMLHttpRequest(); | |
return ae.open("GET", K, false), ae.send(null), ae.responseText; | |
}, l && (x = K => { | |
var ae = new XMLHttpRequest(); | |
return ae.open("GET", K, false), ae.responseType = "arraybuffer", ae.send(null), new Uint8Array(ae.response); | |
}), g = (K, ae, $e) => { | |
var at = new XMLHttpRequest(); | |
at.open("GET", K, true), at.responseType = "arraybuffer", at.onload = () => { | |
if (at.status == 200 || at.status == 0 && at.response) { | |
ae(at.response); | |
return; | |
} | |
$e(); | |
}, at.onerror = $e, at.send(null); | |
}, b = K => document.title = K); | |
var _ = e.print || console.log.bind(console), | |
E = e.printErr || console.warn.bind(console); | |
Object.assign(e, a), a = null, e.arguments && (i = e.arguments), e.thisProgram && (p = e.thisProgram), e.quit && (u = e.quit); | |
var R = 4, | |
D; | |
e.wasmBinary && (D = e.wasmBinary); | |
var P = e.noExitRuntime || true; | |
typeof WebAssembly != "object" && hr("no native wasm support detected"); | |
var O, | |
M = false, | |
L; | |
function B(K, ae) { | |
K || hr(ae); | |
} | |
var z = typeof TextDecoder != "undefined" ? new TextDecoder("utf8") : void 0; | |
function U(K, ae, $e) { | |
ae >>>= 0; | |
for (var at = ae + $e, $t = ae; K[$t] && !($t >= at);) ++$t; | |
if ($t - ae > 16 && K.buffer && z) return z.decode(K.subarray(ae, $t)); | |
for (var Et = ""; ae < $t;) { | |
var Qe = K[ae++]; | |
if (!(Qe & 128)) { | |
Et += String.fromCharCode(Qe); | |
continue; | |
} | |
var Ke = K[ae++] & 63; | |
if ((Qe & 224) == 192) { | |
Et += String.fromCharCode((Qe & 31) << 6 | Ke); | |
continue; | |
} | |
var Gt = K[ae++] & 63; | |
if ((Qe & 240) == 224 ? Qe = (Qe & 15) << 12 | Ke << 6 | Gt : Qe = (Qe & 7) << 18 | Ke << 12 | Gt << 6 | K[ae++] & 63, Qe < 65536) Et += String.fromCharCode(Qe);else { | |
var no = Qe - 65536; | |
Et += String.fromCharCode(55296 | no >> 10, 56320 | no & 1023); | |
} | |
} | |
return Et; | |
} | |
function j(K, ae) { | |
return K >>>= 0, K ? U(ne, K, ae) : ""; | |
} | |
function q(K, ae, $e, at) { | |
if ($e >>>= 0, !(at > 0)) return 0; | |
for (var $t = $e, Et = $e + at - 1, Qe = 0; Qe < K.length; ++Qe) { | |
var Ke = K.charCodeAt(Qe); | |
if (Ke >= 55296 && Ke <= 57343) { | |
var Gt = K.charCodeAt(++Qe); | |
Ke = 65536 + ((Ke & 1023) << 10) | Gt & 1023; | |
} | |
if (Ke <= 127) { | |
if ($e >= Et) break; | |
ae[$e++ >>> 0] = Ke; | |
} else if (Ke <= 2047) { | |
if ($e + 1 >= Et) break; | |
ae[$e++ >>> 0] = 192 | Ke >> 6, ae[$e++ >>> 0] = 128 | Ke & 63; | |
} else if (Ke <= 65535) { | |
if ($e + 2 >= Et) break; | |
ae[$e++ >>> 0] = 224 | Ke >> 12, ae[$e++ >>> 0] = 128 | Ke >> 6 & 63, ae[$e++ >>> 0] = 128 | Ke & 63; | |
} else { | |
if ($e + 3 >= Et) break; | |
ae[$e++ >>> 0] = 240 | Ke >> 18, ae[$e++ >>> 0] = 128 | Ke >> 12 & 63, ae[$e++ >>> 0] = 128 | Ke >> 6 & 63, ae[$e++ >>> 0] = 128 | Ke & 63; | |
} | |
} | |
return ae[$e >>> 0] = 0, $e - $t; | |
} | |
function Y(K, ae, $e) { | |
return q(K, ne, ae, $e); | |
} | |
var J, re, ne, ee, oe, ie, le, be, _e; | |
function ve(K) { | |
J = K, e.HEAP8 = re = new Int8Array(K), e.HEAP16 = ee = new Int16Array(K), e.HEAP32 = ie = new Int32Array(K), e.HEAPU8 = ne = new Uint8Array(K), e.HEAPU16 = oe = new Uint16Array(K), e.HEAPU32 = le = new Uint32Array(K), e.HEAPF32 = be = new Float32Array(K), e.HEAPF64 = _e = new Float64Array(K); | |
} | |
var Fe = e.INITIAL_MEMORY || 16777216, | |
Pe, | |
st = [], | |
lt = [], | |
Ge = [], | |
mt = false; | |
function it() { | |
return P; | |
} | |
function gt() { | |
if (e.preRun) for (typeof e.preRun == "function" && (e.preRun = [e.preRun]); e.preRun.length;) Lt(e.preRun.shift()); | |
Ga(st); | |
} | |
function xt() { | |
mt = true, Ga(lt); | |
} | |
function Lr() { | |
if (e.postRun) for (typeof e.postRun == "function" && (e.postRun = [e.postRun]); e.postRun.length;) nr(e.postRun.shift()); | |
Ga(Ge); | |
} | |
function Lt(K) { | |
st.unshift(K); | |
} | |
function to(K) { | |
lt.unshift(K); | |
} | |
function nr(K) { | |
Ge.unshift(K); | |
} | |
var _t = 0, | |
sr = null, | |
ar = null; | |
function ro(K) { | |
_t++, e.monitorRunDependencies && e.monitorRunDependencies(_t); | |
} | |
function oo(K) { | |
if (_t--, e.monitorRunDependencies && e.monitorRunDependencies(_t), _t == 0 && (sr !== null && (clearInterval(sr), sr = null), ar)) { | |
var ae = ar; | |
ar = null, ae(); | |
} | |
} | |
function hr(K) { | |
e.onAbort && e.onAbort(K), K = "Aborted(" + K + ")", E(K), M = true, L = 1, K += ". Build with -sASSERTIONS for more info."; | |
var ae = new WebAssembly.RuntimeError(K); | |
throw n(ae), ae; | |
} | |
var Wa = "data:application/octet-stream;base64,"; | |
function Bo(K) { | |
return K.startsWith(Wa); | |
} | |
function Ks(K) { | |
return K.startsWith("file://"); | |
} | |
var Yt; | |
Yt = "tfjs-backend-wasm.wasm", Bo(Yt) || (Yt = f(Yt)); | |
function Ua(K) { | |
try { | |
if (K == Yt && D) return new Uint8Array(D); | |
if (x) return x(K); | |
throw "both async and sync fetching of the wasm failed"; | |
} catch (ae) { | |
hr(ae); | |
} | |
} | |
function sl() { | |
if (!D && (c || l)) { | |
if (typeof fetch == "function" && !Ks(Yt)) return fetch(Yt, { | |
credentials: "same-origin" | |
}).then(function (K) { | |
if (!K.ok) throw "failed to load wasm binary file at '" + Yt + "'"; | |
return K.arrayBuffer(); | |
}).catch(function () { | |
return Ua(Yt); | |
}); | |
if (g) return new Promise(function (K, ae) { | |
g(Yt, function ($e) { | |
K(new Uint8Array($e)); | |
}, ae); | |
}); | |
} | |
return Promise.resolve().then(function () { | |
return Ua(Yt); | |
}); | |
} | |
function al() { | |
var K = { | |
env: il, | |
wasi_snapshot_preview1: il | |
}; | |
function ae(Qe, Ke) { | |
var Gt = Qe.exports; | |
e.asm = Gt, O = e.asm.memory, ve(O.buffer), Pe = e.asm.__indirect_function_table, to(e.asm.__wasm_call_ctors), oo("wasm-instantiate"); | |
} | |
ro("wasm-instantiate"); | |
function $e(Qe) { | |
ae(Qe.instance); | |
} | |
function at(Qe) { | |
return sl().then(function (Ke) { | |
return WebAssembly.instantiate(Ke, K); | |
}).then(function (Ke) { | |
return Ke; | |
}).then(Qe, function (Ke) { | |
E("failed to asynchronously prepare wasm: " + Ke), hr(Ke); | |
}); | |
} | |
function $t() { | |
return !D && typeof WebAssembly.instantiateStreaming == "function" && !Bo(Yt) && !Ks(Yt) && !m && typeof fetch == "function" ? fetch(Yt, { | |
credentials: "same-origin" | |
}).then(function (Qe) { | |
var Ke = WebAssembly.instantiateStreaming(Qe, K); | |
return Ke.then($e, function (Gt) { | |
return E("wasm streaming compile failed: " + Gt), E("falling back to ArrayBuffer instantiation"), at($e); | |
}); | |
}) : at($e); | |
} | |
if (e.instantiateWasm) try { | |
var Et = e.instantiateWasm(K, ae); | |
return Et; | |
} catch (Qe) { | |
E("Module.instantiateWasm callback failed with error: " + Qe), n(Qe); | |
} | |
return $t().catch(n), {}; | |
} | |
var l0, Ti; | |
function Bp(K) { | |
this.name = "ExitStatus", this.message = "Program terminated with exit(" + K + ")", this.status = K; | |
} | |
function Ga(K) { | |
for (; K.length > 0;) K.shift()(e); | |
} | |
function Cy() { | |
hr(""); | |
} | |
function wm() { | |
return 4294901760; | |
} | |
function $u() { | |
return wm(); | |
} | |
function wy(K, ae, $e) { | |
ne.copyWithin(K >>> 0, ae >>> 0, ae + $e >>> 0); | |
} | |
function Sm(K) { | |
try { | |
return O.grow(K - J.byteLength + 65535 >>> 16), ve(O.buffer), 1; | |
} catch (ae) {} | |
} | |
function zp(K) { | |
var ae = ne.length; | |
K = K >>> 0; | |
var $e = wm(); | |
if (K > $e) return false; | |
let at = (Gt, no) => Gt + (no - Gt % no) % no; | |
for (var $t = 1; $t <= 4; $t *= 2) { | |
var Et = ae * (1 + 0.2 / $t); | |
Et = Math.min(Et, K + 100663296); | |
var Qe = Math.min($e, at(Math.max(K, Et), 65536)), | |
Ke = Sm(Qe); | |
if (Ke) return true; | |
} | |
return false; | |
} | |
var gr = { | |
varargs: void 0, | |
get: function () { | |
gr.varargs += 4; | |
var K = ie[gr.varargs - 4 >>> 2]; | |
return K; | |
}, | |
getStr: function (K) { | |
var ae = j(K); | |
return ae; | |
} | |
}; | |
function Im(K) { | |
return 52; | |
} | |
function Sy(K, ae, $e, at, $t) { | |
return 70; | |
} | |
var Iy = [null, [], []]; | |
function m0(K, ae) { | |
var $e = Iy[K]; | |
ae === 0 || ae === 10 ? ((K === 1 ? _ : E)(U($e, 0)), $e.length = 0) : $e.push(ae); | |
} | |
function d0(K, ae, $e, at) { | |
for (var $t = 0, Et = 0; Et < $e; Et++) { | |
var Qe = le[ae >>> 2], | |
Ke = le[ae + 4 >>> 2]; | |
ae += 8; | |
for (var Gt = 0; Gt < Ke; Gt++) m0(K, ne[Qe + Gt >>> 0]); | |
$t += Ke; | |
} | |
return le[at >>> 2] = $t, 0; | |
} | |
function vm(K) { | |
var ae = e["_" + K]; | |
return ae; | |
} | |
function Eu(K, ae) { | |
re.set(K, ae >>> 0); | |
} | |
function vy(K, ae, $e, at, $t) { | |
var Et = { | |
string: br => { | |
var $i = 0; | |
if (br != null && br !== 0) { | |
var Vm = (br.length << 2) + 1; | |
$i = ml(Vm), Y(br, $i, Vm); | |
} | |
return $i; | |
}, | |
array: br => { | |
var $i = ml(br.length); | |
return Eu(br, $i), $i; | |
} | |
}; | |
function Qe(br) { | |
return ae === "string" ? j(br) : ae === "boolean" ? !!br : br; | |
} | |
var Ke = vm(K), | |
Gt = [], | |
no = 0; | |
if (at) for (var Ha = 0; Ha < at.length; Ha++) { | |
var zm = Et[$e[Ha]]; | |
zm ? (no === 0 && (no = Mm()), Gt[Ha] = zm(at[Ha])) : Gt[Ha] = at[Ha]; | |
} | |
var dl = Ke.apply(null, Gt); | |
function qC(br) { | |
return no !== 0 && Lm(no), Qe(br); | |
} | |
return dl = qC(dl), dl; | |
} | |
function ky(K, ae, $e, at) { | |
$e = $e || []; | |
var $t = $e.every(Qe => Qe === "number" || Qe === "boolean"), | |
Et = ae !== "string"; | |
return Et && $t && !at ? vm(K) : function () { | |
return vy(K, ae, $e, arguments, at); | |
}; | |
} | |
var il = { | |
abort: Cy, | |
emscripten_get_heap_max: $u, | |
emscripten_memcpy_big: wy, | |
emscripten_resize_heap: zp, | |
fd_close: Im, | |
fd_seek: Sy, | |
fd_write: d0 | |
}, | |
Ny = al(), | |
km = e.___wasm_call_ctors = function () { | |
return (km = e.___wasm_call_ctors = e.asm.__wasm_call_ctors).apply(null, arguments); | |
}, | |
Nm = e._init = function () { | |
return (Nm = e._init = e.asm.init).apply(null, arguments); | |
}, | |
Ty = e._init_with_threads_count = function () { | |
return (Ty = e._init_with_threads_count = e.asm.init_with_threads_count).apply(null, arguments); | |
}, | |
Tm = e._get_threads_count = function () { | |
return (Tm = e._get_threads_count = e.asm.get_threads_count).apply(null, arguments); | |
}, | |
_y = e._register_tensor = function () { | |
return (_y = e._register_tensor = e.asm.register_tensor).apply(null, arguments); | |
}, | |
Me = e._dispose_data = function () { | |
return (Me = e._dispose_data = e.asm.dispose_data).apply(null, arguments); | |
}, | |
ul = e._dispose = function () { | |
return (ul = e._dispose = e.asm.dispose).apply(null, arguments); | |
}, | |
$y = e._Abs = function () { | |
return ($y = e._Abs = e.asm.Abs).apply(null, arguments); | |
}, | |
_m = e._Acos = function () { | |
return (_m = e._Acos = e.asm.Acos).apply(null, arguments); | |
}, | |
Vp = e._Acosh = function () { | |
return (Vp = e._Acosh = e.asm.Acosh).apply(null, arguments); | |
}, | |
Ey = e._Add = function () { | |
return (Ey = e._Add = e.asm.Add).apply(null, arguments); | |
}, | |
Ry = e._AddN = function () { | |
return (Ry = e._AddN = e.asm.AddN).apply(null, arguments); | |
}, | |
Dy = e._All = function () { | |
return (Dy = e._All = e.asm.All).apply(null, arguments); | |
}, | |
Ay = e._Any = function () { | |
return (Ay = e._Any = e.asm.Any).apply(null, arguments); | |
}, | |
Fy = e._ArgMax = function () { | |
return (Fy = e._ArgMax = e.asm.ArgMax).apply(null, arguments); | |
}, | |
$m = e._ArgMin = function () { | |
return ($m = e._ArgMin = e.asm.ArgMin).apply(null, arguments); | |
}, | |
Em = e._Asin = function () { | |
return (Em = e._Asin = e.asm.Asin).apply(null, arguments); | |
}, | |
Py = e._Asinh = function () { | |
return (Py = e._Asinh = e.asm.Asinh).apply(null, arguments); | |
}, | |
Oy = e._Atan = function () { | |
return (Oy = e._Atan = e.asm.Atan).apply(null, arguments); | |
}, | |
My = e._Atan2 = function () { | |
return (My = e._Atan2 = e.asm.Atan2).apply(null, arguments); | |
}, | |
pl = e._Atanh = function () { | |
return (pl = e._Atanh = e.asm.Atanh).apply(null, arguments); | |
}, | |
Ly = e._AvgPool = function () { | |
return (Ly = e._AvgPool = e.asm.AvgPool).apply(null, arguments); | |
}, | |
By = e._AvgPool3D = function () { | |
return (By = e._AvgPool3D = e.asm.AvgPool3D).apply(null, arguments); | |
}, | |
zy = e._AvgPool3DGrad = function () { | |
return (zy = e._AvgPool3DGrad = e.asm.AvgPool3DGrad).apply(null, arguments); | |
}, | |
Ru = e._AvgPoolGrad = function () { | |
return (Ru = e._AvgPoolGrad = e.asm.AvgPoolGrad).apply(null, arguments); | |
}, | |
Vy = e._BatchMatMul = function () { | |
return (Vy = e._BatchMatMul = e.asm.BatchMatMul).apply(null, arguments); | |
}, | |
Wy = e._Bincount = function () { | |
return (Wy = e._Bincount = e.asm.Bincount).apply(null, arguments); | |
}, | |
Rm = e._BitwiseAnd = function () { | |
return (Rm = e._BitwiseAnd = e.asm.BitwiseAnd).apply(null, arguments); | |
}, | |
Uy = e._Ceil = function () { | |
return (Uy = e._Ceil = e.asm.Ceil).apply(null, arguments); | |
}, | |
cl = e._ClipByValue = function () { | |
return (cl = e._ClipByValue = e.asm.ClipByValue).apply(null, arguments); | |
}, | |
Gy = e._Conv2D = function () { | |
return (Gy = e._Conv2D = e.asm.Conv2D).apply(null, arguments); | |
}, | |
Hy = e._Conv2DBackpropInput = function () { | |
return (Hy = e._Conv2DBackpropInput = e.asm.Conv2DBackpropInput).apply(null, arguments); | |
}, | |
Ky = e._Conv3D = function () { | |
return (Ky = e._Conv3D = e.asm.Conv3D).apply(null, arguments); | |
}, | |
_i = e._Conv3DBackpropFilterV2 = function () { | |
return (_i = e._Conv3DBackpropFilterV2 = e.asm.Conv3DBackpropFilterV2).apply(null, arguments); | |
}, | |
ll = e._Conv3DBackpropInputV2 = function () { | |
return (ll = e._Conv3DBackpropInputV2 = e.asm.Conv3DBackpropInputV2).apply(null, arguments); | |
}, | |
qy = e._Cos = function () { | |
return (qy = e._Cos = e.asm.Cos).apply(null, arguments); | |
}, | |
jy = e._Cosh = function () { | |
return (jy = e._Cosh = e.asm.Cosh).apply(null, arguments); | |
}, | |
Xy = e._CropAndResize = function () { | |
return (Xy = e._CropAndResize = e.asm.CropAndResize).apply(null, arguments); | |
}, | |
Yy = e._Cumprod = function () { | |
return (Yy = e._Cumprod = e.asm.Cumprod).apply(null, arguments); | |
}, | |
Dm = e._Cumsum = function () { | |
return (Dm = e._Cumsum = e.asm.Cumsum).apply(null, arguments); | |
}, | |
Am = e._DenseBincount = function () { | |
return (Am = e._DenseBincount = e.asm.DenseBincount).apply(null, arguments); | |
}, | |
Qy = e._DepthToSpace = function () { | |
return (Qy = e._DepthToSpace = e.asm.DepthToSpace).apply(null, arguments); | |
}, | |
Zy = e._DepthwiseConv2dNative = function () { | |
return (Zy = e._DepthwiseConv2dNative = e.asm.DepthwiseConv2dNative).apply(null, arguments); | |
}, | |
Fm = e._Diag = function () { | |
return (Fm = e._Diag = e.asm.Diag).apply(null, arguments); | |
}, | |
Pm = e._Dilation2D = function () { | |
return (Pm = e._Dilation2D = e.asm.Dilation2D).apply(null, arguments); | |
}, | |
Jy = e._Dilation2DBackpropFilter = function () { | |
return (Jy = e._Dilation2DBackpropFilter = e.asm.Dilation2DBackpropFilter).apply(null, arguments); | |
}, | |
eb = e._Dilation2DBackpropInput = function () { | |
return (eb = e._Dilation2DBackpropInput = e.asm.Dilation2DBackpropInput).apply(null, arguments); | |
}, | |
tb = e._Elu = function () { | |
return (tb = e._Elu = e.asm.Elu).apply(null, arguments); | |
}, | |
rb = e._EluGrad = function () { | |
return (rb = e._EluGrad = e.asm.EluGrad).apply(null, arguments); | |
}, | |
Om = e._Equal = function () { | |
return (Om = e._Equal = e.asm.Equal).apply(null, arguments); | |
}, | |
f0 = e._Erf = function () { | |
return (f0 = e._Erf = e.asm.Erf).apply(null, arguments); | |
}, | |
ob = e._Exp = function () { | |
return (ob = e._Exp = e.asm.Exp).apply(null, arguments); | |
}, | |
nb = e._Expm1 = function () { | |
return (nb = e._Expm1 = e.asm.Expm1).apply(null, arguments); | |
}, | |
sb = e._FlipLeftRight = function () { | |
return (sb = e._FlipLeftRight = e.asm.FlipLeftRight).apply(null, arguments); | |
}, | |
ab = e._Floor = function () { | |
return (ab = e._Floor = e.asm.Floor).apply(null, arguments); | |
}, | |
ib = e._FloorDiv = function () { | |
return (ib = e._FloorDiv = e.asm.FloorDiv).apply(null, arguments); | |
}, | |
ub = e._FusedBatchNorm = function () { | |
return (ub = e._FusedBatchNorm = e.asm.FusedBatchNorm).apply(null, arguments); | |
}, | |
pb = e._FusedConv2D = function () { | |
return (pb = e._FusedConv2D = e.asm.FusedConv2D).apply(null, arguments); | |
}, | |
cb = e._FusedDepthwiseConv2D = function () { | |
return (cb = e._FusedDepthwiseConv2D = e.asm.FusedDepthwiseConv2D).apply(null, arguments); | |
}, | |
lb = e._Gather = function () { | |
return (lb = e._Gather = e.asm.Gather).apply(null, arguments); | |
}, | |
mb = e._GatherNd = function () { | |
return (mb = e._GatherNd = e.asm.GatherNd).apply(null, arguments); | |
}, | |
db = e._Greater = function () { | |
return (db = e._Greater = e.asm.Greater).apply(null, arguments); | |
}, | |
fb = e._GreaterEqual = function () { | |
return (fb = e._GreaterEqual = e.asm.GreaterEqual).apply(null, arguments); | |
}, | |
hb = e._IsFinite = function () { | |
return (hb = e._IsFinite = e.asm.IsFinite).apply(null, arguments); | |
}, | |
gb = e._IsInf = function () { | |
return (gb = e._IsInf = e.asm.IsInf).apply(null, arguments); | |
}, | |
xb = e._IsNan = function () { | |
return (xb = e._IsNan = e.asm.IsNan).apply(null, arguments); | |
}, | |
yb = e._LRN = function () { | |
return (yb = e._LRN = e.asm.LRN).apply(null, arguments); | |
}, | |
bb = e._LRNGrad = function () { | |
return (bb = e._LRNGrad = e.asm.LRNGrad).apply(null, arguments); | |
}, | |
Cb = e._LeakyRelu = function () { | |
return (Cb = e._LeakyRelu = e.asm.LeakyRelu).apply(null, arguments); | |
}, | |
wb = e._Less = function () { | |
return (wb = e._Less = e.asm.Less).apply(null, arguments); | |
}, | |
Sb = e._LessEqual = function () { | |
return (Sb = e._LessEqual = e.asm.LessEqual).apply(null, arguments); | |
}, | |
Ib = e._LinSpace = function () { | |
return (Ib = e._LinSpace = e.asm.LinSpace).apply(null, arguments); | |
}, | |
vb = e._Log = function () { | |
return (vb = e._Log = e.asm.Log).apply(null, arguments); | |
}, | |
kb = e._Log1p = function () { | |
return (kb = e._Log1p = e.asm.Log1p).apply(null, arguments); | |
}, | |
Nb = e._LogicalAnd = function () { | |
return (Nb = e._LogicalAnd = e.asm.LogicalAnd).apply(null, arguments); | |
}, | |
Tb = e._LogicalNot = function () { | |
return (Tb = e._LogicalNot = e.asm.LogicalNot).apply(null, arguments); | |
}, | |
_b = e._LogicalOr = function () { | |
return (_b = e._LogicalOr = e.asm.LogicalOr).apply(null, arguments); | |
}, | |
$b = e._LogicalXor = function () { | |
return ($b = e._LogicalXor = e.asm.LogicalXor).apply(null, arguments); | |
}, | |
Eb = e._Max = function () { | |
return (Eb = e._Max = e.asm.Max).apply(null, arguments); | |
}, | |
Rb = e._MaxPool = function () { | |
return (Rb = e._MaxPool = e.asm.MaxPool).apply(null, arguments); | |
}, | |
Db = e._MaxPool3D = function () { | |
return (Db = e._MaxPool3D = e.asm.MaxPool3D).apply(null, arguments); | |
}, | |
Ab = e._MaxPool3DGrad = function () { | |
return (Ab = e._MaxPool3DGrad = e.asm.MaxPool3DGrad).apply(null, arguments); | |
}, | |
Fb = e._MaxPoolGrad = function () { | |
return (Fb = e._MaxPoolGrad = e.asm.MaxPoolGrad).apply(null, arguments); | |
}, | |
Pb = e._MaxPoolWithArgmax = function () { | |
return (Pb = e._MaxPoolWithArgmax = e.asm.MaxPoolWithArgmax).apply(null, arguments); | |
}, | |
Ob = e._Maximum = function () { | |
return (Ob = e._Maximum = e.asm.Maximum).apply(null, arguments); | |
}, | |
Mb = e._Mean = function () { | |
return (Mb = e._Mean = e.asm.Mean).apply(null, arguments); | |
}, | |
Lb = e._Min = function () { | |
return (Lb = e._Min = e.asm.Min).apply(null, arguments); | |
}, | |
Bb = e._Minimum = function () { | |
return (Bb = e._Minimum = e.asm.Minimum).apply(null, arguments); | |
}, | |
zb = e._MirrorPad = function () { | |
return (zb = e._MirrorPad = e.asm.MirrorPad).apply(null, arguments); | |
}, | |
Vb = e._Mod = function () { | |
return (Vb = e._Mod = e.asm.Mod).apply(null, arguments); | |
}, | |
Wb = e._Multinomial = function () { | |
return (Wb = e._Multinomial = e.asm.Multinomial).apply(null, arguments); | |
}, | |
Ub = e._Multiply = function () { | |
return (Ub = e._Multiply = e.asm.Multiply).apply(null, arguments); | |
}, | |
Gb = e._Neg = function () { | |
return (Gb = e._Neg = e.asm.Neg).apply(null, arguments); | |
}, | |
Hb = e._NonMaxSuppressionV3 = function () { | |
return (Hb = e._NonMaxSuppressionV3 = e.asm.NonMaxSuppressionV3).apply(null, arguments); | |
}, | |
Kb = e._NonMaxSuppressionV4 = function () { | |
return (Kb = e._NonMaxSuppressionV4 = e.asm.NonMaxSuppressionV4).apply(null, arguments); | |
}, | |
qb = e._NonMaxSuppressionV5 = function () { | |
return (qb = e._NonMaxSuppressionV5 = e.asm.NonMaxSuppressionV5).apply(null, arguments); | |
}, | |
jb = e._NotEqual = function () { | |
return (jb = e._NotEqual = e.asm.NotEqual).apply(null, arguments); | |
}, | |
Xb = e._OneHot = function () { | |
return (Xb = e._OneHot = e.asm.OneHot).apply(null, arguments); | |
}, | |
Yb = e._PadV2 = function () { | |
return (Yb = e._PadV2 = e.asm.PadV2).apply(null, arguments); | |
}, | |
Qb = e._Pow = function () { | |
return (Qb = e._Pow = e.asm.Pow).apply(null, arguments); | |
}, | |
Zb = e._Prelu = function () { | |
return (Zb = e._Prelu = e.asm.Prelu).apply(null, arguments); | |
}, | |
Jb = e._Prod = function () { | |
return (Jb = e._Prod = e.asm.Prod).apply(null, arguments); | |
}, | |
eC = e._RealDiv = function () { | |
return (eC = e._RealDiv = e.asm.RealDiv).apply(null, arguments); | |
}, | |
tC = e._Reciprocal = function () { | |
return (tC = e._Reciprocal = e.asm.Reciprocal).apply(null, arguments); | |
}, | |
rC = e._Relu = function () { | |
return (rC = e._Relu = e.asm.Relu).apply(null, arguments); | |
}, | |
oC = e._Relu6 = function () { | |
return (oC = e._Relu6 = e.asm.Relu6).apply(null, arguments); | |
}, | |
nC = e._ResizeBilinear = function () { | |
return (nC = e._ResizeBilinear = e.asm.ResizeBilinear).apply(null, arguments); | |
}, | |
sC = e._ResizeBilinearGrad = function () { | |
return (sC = e._ResizeBilinearGrad = e.asm.ResizeBilinearGrad).apply(null, arguments); | |
}, | |
aC = e._ResizeNearestNeighbor = function () { | |
return (aC = e._ResizeNearestNeighbor = e.asm.ResizeNearestNeighbor).apply(null, arguments); | |
}, | |
iC = e._ResizeNearestNeighborGrad = function () { | |
return (iC = e._ResizeNearestNeighborGrad = e.asm.ResizeNearestNeighborGrad).apply(null, arguments); | |
}, | |
uC = e._Reverse = function () { | |
return (uC = e._Reverse = e.asm.Reverse).apply(null, arguments); | |
}, | |
pC = e._RotateWithOffset = function () { | |
return (pC = e._RotateWithOffset = e.asm.RotateWithOffset).apply(null, arguments); | |
}, | |
cC = e._Round = function () { | |
return (cC = e._Round = e.asm.Round).apply(null, arguments); | |
}, | |
lC = e._Rsqrt = function () { | |
return (lC = e._Rsqrt = e.asm.Rsqrt).apply(null, arguments); | |
}, | |
mC = e._ScatterNd = function () { | |
return (mC = e._ScatterNd = e.asm.ScatterNd).apply(null, arguments); | |
}, | |
dC = e._SearchSorted = function () { | |
return (dC = e._SearchSorted = e.asm.SearchSorted).apply(null, arguments); | |
}, | |
fC = e._SelectV2 = function () { | |
return (fC = e._SelectV2 = e.asm.SelectV2).apply(null, arguments); | |
}, | |
hC = e._Selu = function () { | |
return (hC = e._Selu = e.asm.Selu).apply(null, arguments); | |
}, | |
gC = e._Sigmoid = function () { | |
return (gC = e._Sigmoid = e.asm.Sigmoid).apply(null, arguments); | |
}, | |
xC = e._Sign = function () { | |
return (xC = e._Sign = e.asm.Sign).apply(null, arguments); | |
}, | |
yC = e._Sin = function () { | |
return (yC = e._Sin = e.asm.Sin).apply(null, arguments); | |
}, | |
bC = e._Sinh = function () { | |
return (bC = e._Sinh = e.asm.Sinh).apply(null, arguments); | |
}, | |
CC = e._Softmax = function () { | |
return (CC = e._Softmax = e.asm.Softmax).apply(null, arguments); | |
}, | |
wC = e._Softplus = function () { | |
return (wC = e._Softplus = e.asm.Softplus).apply(null, arguments); | |
}, | |
SC = e._SparseFillEmptyRows = function () { | |
return (SC = e._SparseFillEmptyRows = e.asm.SparseFillEmptyRows).apply(null, arguments); | |
}, | |
IC = e._SparseReshape = function () { | |
return (IC = e._SparseReshape = e.asm.SparseReshape).apply(null, arguments); | |
}, | |
vC = e._SparseSegmentReduction = function () { | |
return (vC = e._SparseSegmentReduction = e.asm.SparseSegmentReduction).apply(null, arguments); | |
}, | |
kC = e._SparseToDense = function () { | |
return (kC = e._SparseToDense = e.asm.SparseToDense).apply(null, arguments); | |
}, | |
NC = e._Sqrt = function () { | |
return (NC = e._Sqrt = e.asm.Sqrt).apply(null, arguments); | |
}, | |
TC = e._Square = function () { | |
return (TC = e._Square = e.asm.Square).apply(null, arguments); | |
}, | |
_C = e._SquaredDifference = function () { | |
return (_C = e._SquaredDifference = e.asm.SquaredDifference).apply(null, arguments); | |
}, | |
$C = e._Step = function () { | |
return ($C = e._Step = e.asm.Step).apply(null, arguments); | |
}, | |
EC = e._StridedSlice = function () { | |
return (EC = e._StridedSlice = e.asm.StridedSlice).apply(null, arguments); | |
}, | |
RC = e._Sub = function () { | |
return (RC = e._Sub = e.asm.Sub).apply(null, arguments); | |
}, | |
DC = e._Sum = function () { | |
return (DC = e._Sum = e.asm.Sum).apply(null, arguments); | |
}, | |
AC = e._Tan = function () { | |
return (AC = e._Tan = e.asm.Tan).apply(null, arguments); | |
}, | |
FC = e._Tanh = function () { | |
return (FC = e._Tanh = e.asm.Tanh).apply(null, arguments); | |
}, | |
PC = e._TensorScatterUpdate = function () { | |
return (PC = e._TensorScatterUpdate = e.asm.TensorScatterUpdate).apply(null, arguments); | |
}, | |
OC = e._Tile = function () { | |
return (OC = e._Tile = e.asm.Tile).apply(null, arguments); | |
}, | |
MC = e._TopK = function () { | |
return (MC = e._TopK = e.asm.TopK).apply(null, arguments); | |
}, | |
LC = e._Transform = function () { | |
return (LC = e._Transform = e.asm.Transform).apply(null, arguments); | |
}, | |
BC = e._Transpose = function () { | |
return (BC = e._Transpose = e.asm.Transpose).apply(null, arguments); | |
}, | |
zC = e.__FusedMatMul = function () { | |
return (zC = e.__FusedMatMul = e.asm._FusedMatMul).apply(null, arguments); | |
}, | |
VC = e._malloc = function () { | |
return (VC = e._malloc = e.asm.malloc).apply(null, arguments); | |
}, | |
WC = e._free = function () { | |
return (WC = e._free = e.asm.free).apply(null, arguments); | |
}, | |
UC = e.___errno_location = function () { | |
return (UC = e.___errno_location = e.asm.__errno_location).apply(null, arguments); | |
}, | |
Mm = e.stackSave = function () { | |
return (Mm = e.stackSave = e.asm.stackSave).apply(null, arguments); | |
}, | |
Lm = e.stackRestore = function () { | |
return (Lm = e.stackRestore = e.asm.stackRestore).apply(null, arguments); | |
}, | |
ml = e.stackAlloc = function () { | |
return (ml = e.stackAlloc = e.asm.stackAlloc).apply(null, arguments); | |
}, | |
GC = e.dynCall_iijjiiii = function () { | |
return (GC = e.dynCall_iijjiiii = e.asm.dynCall_iijjiiii).apply(null, arguments); | |
}, | |
HC = e.dynCall_jiji = function () { | |
return (HC = e.dynCall_jiji = e.asm.dynCall_jiji).apply(null, arguments); | |
}; | |
e.cwrap = ky; | |
var Wp; | |
ar = function K() { | |
Wp || Bm(), Wp || (ar = K); | |
}; | |
function Bm(K) { | |
if (K = K || i, _t > 0 || (gt(), _t > 0)) return; | |
function ae() { | |
Wp || (Wp = true, e.calledRun = true, !M && (xt(), o(e), e.onRuntimeInitialized && e.onRuntimeInitialized(), Lr())); | |
} | |
e.setStatus ? (e.setStatus("Running..."), setTimeout(function () { | |
setTimeout(function () { | |
e.setStatus(""); | |
}, 1), ae(); | |
}, 1)) : ae(); | |
} | |
if (e.preInit) for (typeof e.preInit == "function" && (e.preInit = [e.preInit]); e.preInit.length > 0;) e.preInit.pop()(); | |
Bm(); | |
var Up; | |
s && (Up = { | |
uncaughtException: process.listeners("uncaughtException").filter(function (K) { | |
return !s.uncaughtException.indexOf(K) > -1; | |
}), | |
unhandledRejection: process.listeners("unhandledRejection").filter(function (K) { | |
return !s.unhandledRejection.indexOf(K) > -1; | |
}) | |
}); | |
var Gp; | |
if (typeof t8 != "undefined") Gp = t8;else if (typeof WasmBackendModuleThreadedSimd != "undefined") Gp = WasmBackendModuleThreadedSimd;else throw new Error("Could not find wasm module in post.js"); | |
if (Up) { | |
var KC = Gp._dispose; | |
Gp._dispose = function () { | |
KC(), Up.uncaughtException.forEach(function (K) { | |
process.removeListener("uncaughtException", K); | |
}), Up.unhandledRejection.forEach(function (K) { | |
process.removeListener("unhandledRejection", K); | |
}); | |
}; | |
} | |
return t8.ready; | |
}; | |
})(); | |
typeof Ug == "object" && typeof Gv == "object" ? Gv.exports = Uv : typeof define == "function" && __webpack_require__.amdO ? define([], function () { | |
return Uv; | |
}) : typeof Ug == "object" && (Ug.WasmBackendModule = Uv); | |
}); | |
var zo = class { | |
constructor(t8, e) { | |
this.backend = t8, this.dataMover = e, this.data = /* @__PURE__ */new WeakMap(), this.dataIdsCount = 0; | |
} | |
get(t8) { | |
return this.data.has(t8) || this.dataMover.moveData(this.backend, t8), this.data.get(t8); | |
} | |
set(t8, e) { | |
this.dataIdsCount++, this.data.set(t8, e); | |
} | |
has(t8) { | |
return this.data.has(t8); | |
} | |
delete(t8) { | |
return this.dataIdsCount--, this.data.delete(t8); | |
} | |
numDataIds() { | |
return this.dataIdsCount; | |
} | |
}; | |
var ao = class { | |
refCount(t8) { | |
return zr("refCount"); | |
} | |
incRef(t8) { | |
return zr("incRef"); | |
} | |
timerAvailable() { | |
return true; | |
} | |
time(t8) { | |
return zr("time"); | |
} | |
read(t8) { | |
return zr("read"); | |
} | |
readSync(t8) { | |
return zr("readSync"); | |
} | |
readToGPU(t8, e) { | |
return zr("readToGPU"); | |
} | |
numDataIds() { | |
return zr("numDataIds"); | |
} | |
disposeData(t8, e) { | |
return zr("disposeData"); | |
} | |
write(t8, e, o) { | |
return zr("write"); | |
} | |
move(t8, e, o, n, s) { | |
return zr("move"); | |
} | |
createTensorFromGPUData(t8, e, o) { | |
return zr("createTensorFromGPUData"); | |
} | |
memory() { | |
return zr("memory"); | |
} | |
floatPrecision() { | |
return zr("floatPrecision"); | |
} | |
epsilon() { | |
return this.floatPrecision() === 32 ? 1e-7 : 1e-4; | |
} | |
dispose() { | |
return zr("dispose"); | |
} | |
}; | |
function zr(r) { | |
throw new Error(`'${r}' not yet implemented or not found in the registry. This kernel may not be supported by the tfjs backend you have chosen`); | |
} | |
function I0(r) { | |
let t8 = r.length, | |
e = 0; | |
for (; t8 > 0;) e = Math.random() * t8 | 0, t8--, jm(r, t8, e); | |
} | |
function TG(r, t8) { | |
if (r.length !== t8.length) throw new Error(`Array sizes must match to be shuffled together First array length was ${r.length}Second array length was ${t8.length}`); | |
let e = r.length, | |
o = 0; | |
for (; e > 0;) o = Math.random() * e | 0, e--, jm(r, e, o), jm(t8, e, o); | |
} | |
function qp(r, t8, e) { | |
return Math.max(r, Math.min(t8, e)); | |
} | |
function _G(r) { | |
return r % 2 === 0 ? r : r + 1; | |
} | |
function jm(r, t8, e) { | |
let o = r[t8]; | |
r[t8] = r[e], r[e] = o; | |
} | |
function $G(r) { | |
let t8 = 0; | |
for (let e = 0; e < r.length; e++) t8 += r[e]; | |
return t8; | |
} | |
function EG(r, t8) { | |
let e = Math.random(); | |
return t8 * e + (1 - e) * r; | |
} | |
function RG(r, t8) { | |
let e = 0; | |
for (let o = 0; o < r.length; o++) { | |
let n = Number(r[o]) - Number(t8[o]); | |
e += n * n; | |
} | |
return e; | |
} | |
function $(r, t8) { | |
if (!r) throw new Error(typeof t8 == "string" ? t8 : t8()); | |
} | |
function yt(r, t8, e = "") { | |
$(Cr(r, t8), () => e + ` Shapes ${r} and ${t8} must match`); | |
} | |
function io(r) { | |
$(r != null, () => "The input to the tensor constructor must be a non-null value."); | |
} | |
function He(r) { | |
if (r.length === 0) return 1; | |
let t8 = r[0]; | |
for (let e = 1; e < r.length; e++) t8 *= r[e]; | |
return t8; | |
} | |
function DG(r) { | |
return r.length === 0; | |
} | |
function ZC(r, t8) { | |
if (r === t8) return true; | |
if (r == null || t8 == null || r.length !== t8.length) return false; | |
for (let e = 0; e < r.length; e++) if (r[e] !== null && t8[e] !== null && r[e] !== t8[e]) return false; | |
return true; | |
} | |
function Cr(r, t8) { | |
if (r === t8) return true; | |
if (r == null || t8 == null || r.length !== t8.length) return false; | |
for (let e = 0; e < r.length; e++) if (r[e] !== t8[e]) return false; | |
return true; | |
} | |
function qa(r) { | |
return r % 1 === 0; | |
} | |
function AG(r) { | |
if (Math.tanh != null) return Math.tanh(r); | |
if (r === 1 / 0) return 1; | |
if (r === -1 / 0) return -1; | |
{ | |
let t8 = Math.exp(2 * r); | |
return (t8 - 1) / (t8 + 1); | |
} | |
} | |
function FG(r) { | |
let t8 = Math.ceil(Math.sqrt(r)); | |
return [t8, Math.ceil(r / t8)]; | |
} | |
function PG(r) { | |
let t8 = new Uint32Array(r); | |
for (let e = 0; e < r; ++e) t8[e] = e; | |
return I0(t8), t8; | |
} | |
function Au(r, t8) { | |
return t8 <= r.length ? r : r + " ".repeat(t8 - r.length); | |
} | |
function OG(r, t8 = n => 0, e, o) { | |
return new Promise((n, s) => { | |
let a = 0, | |
i = () => { | |
if (r()) { | |
n(); | |
return; | |
} | |
a++; | |
let p = t8(a); | |
if (e != null && a >= e) { | |
s(); | |
return; | |
} | |
o != null ? o(i, p) : setTimeout(i, p); | |
}; | |
i(); | |
}); | |
} | |
function MG(r, t8) { | |
let e = 1, | |
o = -1; | |
for (let s = 0; s < r.length; ++s) if (r[s] >= 0) e *= r[s];else if (r[s] === -1) { | |
if (o !== -1) throw Error(`Shapes can only have 1 implicit size. Found -1 at dim ${o} and dim ${s}`); | |
o = s; | |
} else if (r[s] < 0) throw Error(`Shapes can not be < 0. Found ${r[s]} at dim ${s}`); | |
if (o === -1) { | |
if (t8 > 0 && t8 !== e) throw Error(`Size(${t8}) must match the product of shape ${r}`); | |
return r; | |
} | |
if (e === 0) throw Error(`Cannot infer the missing size in [${r}] when there are 0 elements`); | |
if (t8 % e !== 0) throw Error(`The implicit shape can't be a fractional number. Got ${t8} / ${e}`); | |
let n = r.slice(); | |
return n[o] = t8 / e, n; | |
} | |
function Ei(r, t8) { | |
let e = t8.length; | |
return r = r == null ? t8.map((o, n) => n) : [].concat(r), $(r.every(o => o >= -e && o < e), () => `All values in axis param must be in range [-${e}, ${e}) but got axis ${r}`), $(r.every(o => qa(o)), () => `All values in axis param must be integers but got axis ${r}`), r.map(o => o < 0 ? e + o : o); | |
} | |
function JC(r, t8) { | |
let e = [], | |
o = [], | |
n = t8 != null && Array.isArray(t8) && t8.length === 0, | |
s = t8 == null || n ? null : Ei(t8, r).sort(), | |
a = 0; | |
for (let i = 0; i < r.length; ++i) { | |
if (s != null) { | |
if (s[a] === i && r[i] !== 1) throw new Error(`Can't squeeze axis ${i} since its dim '${r[i]}' is not 1`); | |
(s[a] == null || s[a] > i) && r[i] === 1 && (e.push(r[i]), o.push(i)), s[a] <= i && a++; | |
} | |
r[i] !== 1 && (e.push(r[i]), o.push(i)); | |
} | |
return { | |
newShape: e, | |
keptDims: o | |
}; | |
} | |
function ew(r, t8) { | |
return Xm(r, t8); | |
} | |
function Xm(r, t8) { | |
let e = null; | |
if (r == null || r === "float32") e = new Float32Array(t8);else if (r === "int32") e = new Int32Array(t8);else if (r === "bool") e = new Uint8Array(t8);else if (r === "string") e = new Array(t8);else throw new Error(`Unknown data type ${r}`); | |
return e; | |
} | |
function tw(r, t8) { | |
for (let e = 0; e < r.length; e++) { | |
let o = r[e]; | |
if (isNaN(o) || !isFinite(o)) throw Error(`A tensor of type ${t8} being uploaded contains ${o}.`); | |
} | |
} | |
function rw(r) { | |
return r === "bool" || r === "complex64" || r === "float32" || r === "int32" || r === "string"; | |
} | |
function LG(r, t8) { | |
return !(t8 === "complex64" || t8 === "float32" && r !== "complex64" || t8 === "int32" && r !== "float32" && r !== "complex64" || t8 === "bool" && r === "bool"); | |
} | |
function jp(r) { | |
if (r === "float32" || r === "int32") return 4; | |
if (r === "complex64") return 8; | |
if (r === "bool") return 1; | |
throw new Error(`Unknown dtype ${r}`); | |
} | |
function ow(r) { | |
if (r == null) return 0; | |
let t8 = 0; | |
return r.forEach(e => t8 += e.length), t8; | |
} | |
function Vo(r) { | |
return typeof r == "string" || r instanceof String; | |
} | |
function v0(r) { | |
return typeof r == "boolean"; | |
} | |
function k0(r) { | |
return typeof r == "number"; | |
} | |
function Ri(r) { | |
return Array.isArray(r) ? Ri(r[0]) : r instanceof Float32Array ? "float32" : r instanceof Int32Array || r instanceof Uint8Array || r instanceof Uint8ClampedArray ? "int32" : k0(r) ? "float32" : Vo(r) ? "string" : v0(r) ? "bool" : "float32"; | |
} | |
function qs(r) { | |
return !!(r && r.constructor && r.call && r.apply); | |
} | |
function Xp(r, t8) { | |
for (let e = t8; e < r; ++e) if (r % e === 0) return e; | |
return r; | |
} | |
function js(r) { | |
let t8 = r.length; | |
if (t8 < 2) return []; | |
let e = new Array(t8 - 1); | |
e[t8 - 2] = r[t8 - 1]; | |
for (let o = t8 - 3; o >= 0; --o) e[o] = e[o + 1] * r[o + 1]; | |
return e; | |
} | |
function N0(r, t8, e, o = false) { | |
let n = new Array(); | |
if (t8.length === 1) { | |
let s = t8[0] * (o ? 2 : 1); | |
for (let a = 0; a < s; a++) n[a] = e[r + a]; | |
} else { | |
let s = t8[0], | |
a = t8.slice(1), | |
i = a.reduce((p, u) => p * u) * (o ? 2 : 1); | |
for (let p = 0; p < s; p++) n[p] = N0(r + p * i, a, e, o); | |
} | |
return n; | |
} | |
function Du(r, t8, e = false) { | |
if (r.length === 0) return t8[0]; | |
let o = r.reduce((n, s) => n * s) * (e ? 2 : 1); | |
if (o === 0) return []; | |
if (o !== t8.length) throw new Error(`[${r}] does not match the input size ${t8.length}${e ? " for a complex tensor" : ""}.`); | |
return N0(0, r, t8, e); | |
} | |
function BG(r, t8) { | |
if (Array.isArray(r)) return r; | |
if (t8 === "float32") return r instanceof Float32Array ? r : new Float32Array(r); | |
if (t8 === "int32") return r instanceof Int32Array ? r : new Int32Array(r); | |
if (t8 === "bool" || t8 === "string") return Uint8Array.from(new Int32Array(r)); | |
throw new Error(`Unknown dtype ${t8}`); | |
} | |
function fl(r, t8) { | |
let e = Yp(r, t8); | |
for (let o = 0; o < e.length; o++) e[o] = 1; | |
return e; | |
} | |
function Yp(r, t8) { | |
if (t8 == null || t8 === "float32" || t8 === "complex64") return new Float32Array(r); | |
if (t8 === "int32") return new Int32Array(r); | |
if (t8 === "bool") return new Uint8Array(r); | |
throw new Error(`Unknown data type ${t8}`); | |
} | |
function zG(r, t8) { | |
let e = r.reduce((o, n) => o * n, 1); | |
if (t8 == null || t8 === "float32") return Du(r, new Float32Array(e)); | |
if (t8 === "int32") return Du(r, new Int32Array(e)); | |
if (t8 === "bool") return Du(r, new Uint8Array(e)); | |
throw new Error(`Unknown data type ${t8}`); | |
} | |
function wt(r) { | |
r.forEach(t8 => { | |
$(Number.isInteger(t8) && t8 >= 0, () => `Tensor must have a shape comprised of positive integers but got shape [${r}].`); | |
}); | |
} | |
function VG(r, t8, e) { | |
if (t8 === 0) return 0; | |
if (t8 === 1) return r[0]; | |
let o = r[r.length - 1]; | |
for (let n = 0; n < r.length - 1; ++n) o += e[n] * r[n]; | |
return o; | |
} | |
function WG(r, t8, e) { | |
if (t8 === 0) return []; | |
if (t8 === 1) return [r]; | |
let o = new Array(t8); | |
for (let n = 0; n < o.length - 1; ++n) o[n] = Math.floor(r / e[n]), r -= o[n] * e[n]; | |
return o[o.length - 1] = r, o; | |
} | |
function Fu(r) { | |
return r && r.then && typeof r.then == "function"; | |
} | |
var T0 = "tfjsflags"; | |
var hl = class { | |
constructor(t8) { | |
this.global = t8, this.flags = {}, this.flagRegistry = {}, this.urlFlags = {}, this.getQueryParams = GG, this.populateURLFlags(); | |
} | |
setPlatform(t8, e) { | |
this.platform != null && (A().getBool("IS_TEST") || A().getBool("PROD") || console.warn(`Platform ${this.platformName} has already been set. Overwriting the platform with ${t8}.`)), this.platformName = t8, this.platform = e; | |
} | |
registerFlag(t8, e, o) { | |
if (this.flagRegistry[t8] = { | |
evaluationFn: e, | |
setHook: o | |
}, this.urlFlags[t8] != null) { | |
let n = this.urlFlags[t8]; | |
A().getBool("IS_TEST") || A().getBool("PROD") || console.warn(`Setting feature override from URL ${t8}: ${n}.`), this.set(t8, n); | |
} | |
} | |
async getAsync(t8) { | |
return t8 in this.flags ? this.flags[t8] : (this.flags[t8] = await this.evaluateFlag(t8), this.flags[t8]); | |
} | |
get(t8) { | |
if (t8 in this.flags) return this.flags[t8]; | |
let e = this.evaluateFlag(t8); | |
if (Fu(e)) throw new Error(`Flag ${t8} cannot be synchronously evaluated. Please use getAsync() instead.`); | |
return this.flags[t8] = e, this.flags[t8]; | |
} | |
getNumber(t8) { | |
return this.get(t8); | |
} | |
getBool(t8) { | |
return this.get(t8); | |
} | |
getString(t8) { | |
return this.get(t8); | |
} | |
getFlags() { | |
return this.flags; | |
} | |
get features() { | |
return this.flags; | |
} | |
set(t8, e) { | |
if (this.flagRegistry[t8] == null) throw new Error(`Cannot set flag ${t8} as it has not been registered.`); | |
this.flags[t8] = e, this.flagRegistry[t8].setHook != null && this.flagRegistry[t8].setHook(e); | |
} | |
evaluateFlag(t8) { | |
if (this.flagRegistry[t8] == null) throw new Error(`Cannot evaluate flag '${t8}': no evaluation function found.`); | |
return this.flagRegistry[t8].evaluationFn(); | |
} | |
setFlags(t8) { | |
this.flags = Object.assign({}, t8); | |
} | |
reset() { | |
this.flags = {}, this.urlFlags = {}, this.populateURLFlags(); | |
} | |
populateURLFlags() { | |
if (typeof this.global == "undefined" || typeof this.global.location == "undefined" || typeof this.global.location.search == "undefined") return; | |
let t8 = this.getQueryParams(this.global.location.search); | |
T0 in t8 && t8[T0].split(",").forEach(o => { | |
let [n, s] = o.split(":"); | |
this.urlFlags[n] = KG(n, s); | |
}); | |
} | |
}; | |
function GG(r) { | |
let t8 = {}; | |
return r.replace(/[?&]([^=?&]+)(?:=([^&]*))?/g, (e, ...o) => (HG(t8, o[0], o[1]), o.join("="))), t8; | |
} | |
function HG(r, t8, e) { | |
r[decodeURIComponent(t8)] = decodeURIComponent(e || ""); | |
} | |
function KG(r, t8) { | |
let e = t8.toLowerCase(); | |
return e === "true" || e === "false" ? e === "true" : `${+e}` === e ? +e : t8; | |
} | |
function A() { | |
return nw; | |
} | |
var nw = null; | |
function _0(r) { | |
nw = r; | |
} | |
var sw; | |
function aw() { | |
if (sw == null) { | |
let r; | |
if (typeof window != "undefined") r = window;else if (typeof __webpack_require__.g != "undefined") r = __webpack_require__.g;else if (typeof process != "undefined") r = process;else if (typeof self != "undefined") r = self;else throw new Error("Could not find a global object"); | |
sw = r; | |
} | |
return sw; | |
} | |
function qG() { | |
let r = aw(); | |
return r._tfGlobals == null && (r._tfGlobals = /* @__PURE__ */new Map()), r._tfGlobals; | |
} | |
function gl(r, t8) { | |
let e = qG(); | |
if (e.has(r)) return e.get(r); | |
{ | |
let o = t8(); | |
return e.set(r, o), e.get(r); | |
} | |
} | |
var Xs = "Abs"; | |
var Wo = "Acos"; | |
var Uo = "Acosh"; | |
var uo = "Add"; | |
var Go = "AddN"; | |
var Ho = "All"; | |
var Ko = "Any"; | |
var Ys = "ArgMax"; | |
var Qs = "ArgMin"; | |
var qo = "Asin"; | |
var jo = "Asinh"; | |
var Xo = "Atan"; | |
var Yo = "Atanh"; | |
var Qo = "Atan2"; | |
var Zo = "AvgPool"; | |
var Di = "AvgPoolGrad"; | |
var Zs = "AvgPool3D"; | |
var Ai = "AvgPool3DGrad"; | |
var Jo = "BatchMatMul"; | |
var Js = "BatchToSpaceND"; | |
var en = "Bincount"; | |
var ja = "BitwiseAnd"; | |
var Bce = "BroadcastTo"; | |
var ea = "BroadcastArgs"; | |
var bo = "Cast"; | |
var tn = "Ceil"; | |
var Co = "ClipByValue"; | |
var Fi = "Complex"; | |
var Pi = "ComplexAbs"; | |
var ta = "Concat"; | |
var rn = "Conv2D"; | |
var Oi = "Conv2DBackpropFilter"; | |
var on = "Conv2DBackpropInput"; | |
var nn = "Conv3D"; | |
var Xa = "Conv3DBackpropFilterV2"; | |
var sn = "Conv3DBackpropInputV2"; | |
var an = "Cos"; | |
var un = "Cosh"; | |
var pn = "Cumprod"; | |
var cn = "Cumsum"; | |
var ln = "CropAndResize"; | |
var ra = "DenseBincount"; | |
var mn = "DepthToSpace"; | |
var dn = "DepthwiseConv2dNative"; | |
var Mi = "DepthwiseConv2dNativeBackpropFilter"; | |
var Li = "DepthwiseConv2dNativeBackpropInput"; | |
var oa = "Diag"; | |
var fn = "Dilation2D"; | |
var Bi = "Dilation2DBackpropInput"; | |
var zi = "Dilation2DBackpropFilter"; | |
var Pu = "Draw"; | |
var hn = "RealDiv"; | |
var Vi = "Einsum"; | |
var gn = "Elu"; | |
var Ya = "EluGrad"; | |
var xn = "Erf"; | |
var yn = "Equal"; | |
var bn = "Exp"; | |
var na = "ExpandDims"; | |
var Cn = "Expm1"; | |
var Wi = "FFT"; | |
var sa = "Fill"; | |
var wn = "FlipLeftRight"; | |
var Sn = "Floor"; | |
var In = "FloorDiv"; | |
var vn = "FusedBatchNorm"; | |
var aa = "GatherV2"; | |
var kn = "GatherNd"; | |
var Nn = "Greater"; | |
var Tn = "GreaterEqual"; | |
var wo = "Identity"; | |
var Ui = "IFFT"; | |
var Gi = "Imag"; | |
var _n = "IsFinite"; | |
var $n = "IsInf"; | |
var En = "IsNan"; | |
var Rn = "LeakyRelu"; | |
var Dn = "Less"; | |
var An = "LessEqual"; | |
var Fn = "LinSpace"; | |
var Pn = "Log"; | |
var On = "Log1p"; | |
var Mn = "LogicalAnd"; | |
var Ln = "LogicalNot"; | |
var Bn = "LogicalOr"; | |
var $0 = "LogicalXor"; | |
var zce = "LogSoftmax"; | |
var Vce = "LowerBound"; | |
var zn = "LRN"; | |
var Qa = "LRNGrad"; | |
var Wce = "MatrixBandPart"; | |
var Vn = "Max"; | |
var Wn = "Maximum"; | |
var Un = "MaxPool"; | |
var Hi = "MaxPoolGrad"; | |
var ia = "MaxPool3D"; | |
var Ki = "MaxPool3DGrad"; | |
var ua = "MaxPoolWithArgmax"; | |
var Gn = "Mean"; | |
var Hn = "Min"; | |
var Kn = "Minimum"; | |
var qn = "MirrorPad"; | |
var jn = "Mod"; | |
var Xn = "Multinomial"; | |
var Yn = "Multiply"; | |
var pa = "Neg"; | |
var Qn = "NotEqual"; | |
var Zn = "NonMaxSuppressionV3"; | |
var Za = "NonMaxSuppressionV4"; | |
var Jn = "NonMaxSuppressionV5"; | |
var ca = "OnesLike"; | |
var es = "OneHot"; | |
var la = "Pack"; | |
var ts = "PadV2"; | |
var Uce = "Pool"; | |
var rs = "Pow"; | |
var os = "Prelu"; | |
var ns = "Prod"; | |
var Qp = "RaggedGather"; | |
var Zp = "RaggedRange"; | |
var Jp = "RaggedTensorToTensor"; | |
var ma = "Range"; | |
var qi = "Real"; | |
var ss = "Reciprocal"; | |
var as = "Relu"; | |
var da = "Reshape"; | |
var is = "ResizeNearestNeighbor"; | |
var Ja = "ResizeNearestNeighborGrad"; | |
var us = "ResizeBilinear"; | |
var ei = "ResizeBilinearGrad"; | |
var ps = "Relu6"; | |
var cs = "Reverse"; | |
var ls = "Round"; | |
var ms = "Rsqrt"; | |
var ds = "ScatterNd"; | |
var fs = "TensorScatterUpdate"; | |
var hs = "SearchSorted"; | |
var fa = "Select"; | |
var gs = "Selu"; | |
var ha = "Slice"; | |
var xs = "Sin"; | |
var ys = "Sinh"; | |
var bs = "Sign"; | |
var Cs = "Sigmoid"; | |
var ws = "Softplus"; | |
var Ss = "Sqrt"; | |
var Is = "Sum"; | |
var ga = "SpaceToBatchND"; | |
var xa = "SplitV"; | |
var vs = "Softmax"; | |
var ji = "SparseFillEmptyRows"; | |
var ti = "SparseReshape"; | |
var ya = "SparseSegmentMean"; | |
var ba = "SparseSegmentSum"; | |
var ks = "SparseToDense"; | |
var Ns = "SquaredDifference"; | |
var Xi = "Square"; | |
var Ou = "StaticRegexReplace"; | |
var Ts = "StridedSlice"; | |
var Ca = "StringNGrams"; | |
var Yi = "StringSplit"; | |
var Qi = "StringToHashBucketFast"; | |
var _s = "Sub"; | |
var $s = "Tan"; | |
var Es = "Tanh"; | |
var po = "Tile"; | |
var Rs = "TopK"; | |
var Ds = "Transform"; | |
var co = "Transpose"; | |
var Zi = "Unique"; | |
var wa = "Unpack"; | |
var Ji = "UnsortedSegmentSum"; | |
var Gce = "UpperBound"; | |
var Sa = "ZerosLike"; | |
var So = "Step"; | |
var Mu = "FromPixels"; | |
var As = "RotateWithOffset"; | |
var Io = "_FusedMatMul"; | |
var vo = "FusedConv2D"; | |
var ko = "FusedDepthwiseConv2D"; | |
function Ia(...r) { | |
A().getBool("IS_TEST") || A().getBool("PROD") || console.warn(...r); | |
} | |
function jG(...r) { | |
A().getBool("IS_TEST") || A().getBool("PROD") || console.log(...r); | |
} | |
var ec = gl("kernelRegistry", () => /* @__PURE__ */new Map()); | |
var xl = gl("gradRegistry", () => /* @__PURE__ */new Map()); | |
function tc(r, t8) { | |
let e = uw(r, t8); | |
return ec.get(e); | |
} | |
function iw(r) { | |
return xl.get(r); | |
} | |
function Ym(r) { | |
let t8 = ec.entries(), | |
e = []; | |
for (;;) { | |
let { | |
done: o, | |
value: n | |
} = t8.next(); | |
if (o) break; | |
let [s, a] = n, | |
[i] = s.split("_"); | |
i === r && e.push(a); | |
} | |
return e; | |
} | |
function ri(r) { | |
let { | |
kernelName: t8, | |
backendName: e | |
} = r, | |
o = uw(t8, e); | |
ec.has(o) && Ia(`The kernel '${t8}' for backend '${e}' is already registered`), ec.set(o, r); | |
} | |
function Xce(r) { | |
let { | |
kernelName: t8 | |
} = r; | |
xl.has(t8) && A().getBool("DEBUG") && Ia(`Overriding the gradient for '${t8}'`), xl.set(t8, r); | |
} | |
function Yce(r, t8) { | |
let e = uw(r, t8); | |
if (!ec.has(e)) throw new Error(`The kernel '${r}' for backend '${t8}' is not registered`); | |
ec.delete(e); | |
} | |
function Qce(r) { | |
if (!xl.has(r)) throw new Error(`The gradient '${r}' for backend is not registered`); | |
xl.delete(r); | |
} | |
function Zce(r, t8) { | |
Ym(r).forEach(o => { | |
let n = Object.assign({}, o, { | |
backendName: t8 | |
}); | |
ri(n); | |
}); | |
} | |
function uw(r, t8) { | |
return `${t8}_${r}`; | |
} | |
var y = {}; | |
qe(y, { | |
arraysEqual: () => Cr, | |
arraysEqualWithNull: () => ZC, | |
assert: () => $, | |
assertNonNegativeIntegerDimensions: () => wt, | |
assertNonNull: () => io, | |
assertShapesMatch: () => yt, | |
bytesFromStringArray: () => ow, | |
bytesPerElement: () => jp, | |
checkConversionForErrors: () => tw, | |
clamp: () => qp, | |
computeStrides: () => js, | |
convertBackendValuesAndArrayBuffer: () => BG, | |
createScalarValue: () => t4, | |
createShuffledIndices: () => PG, | |
decodeString: () => sc, | |
distSquared: () => RG, | |
encodeString: () => tu, | |
fetch: () => o4, | |
fingerPrint64: () => e4, | |
flatten: () => Ps, | |
getArrayFromDType: () => Xm, | |
getTypedArrayFromDType: () => ew, | |
hasEncodingLoss: () => LG, | |
hexToLong: () => yl, | |
indexToLoc: () => WG, | |
inferDtype: () => Ri, | |
inferFromImplicitShape: () => MG, | |
isBoolean: () => v0, | |
isFunction: () => qs, | |
isInt: () => qa, | |
isNumber: () => k0, | |
isPromise: () => Fu, | |
isScalarShape: () => DG, | |
isString: () => Vo, | |
isTypedArray: () => Ot, | |
isValidDtype: () => rw, | |
locToIndex: () => VG, | |
makeOnesTypedArray: () => fl, | |
makeZerosNestedTypedArray: () => zG, | |
makeZerosTypedArray: () => Yp, | |
nearestDivisor: () => Xp, | |
nearestLargerEven: () => _G, | |
now: () => Wu, | |
parseAxisParam: () => Ei, | |
randUniform: () => EG, | |
repeatedTry: () => OG, | |
rightPad: () => Au, | |
shuffle: () => I0, | |
shuffleCombo: () => TG, | |
sizeFromShape: () => He, | |
sizeToSquarishShape: () => FG, | |
squeezeShape: () => JC, | |
sum: () => $G, | |
swap: () => jm, | |
tanh: () => AG, | |
toNestedArray: () => Du, | |
toTypedArray: () => nc | |
}); | |
function Qm(r) { | |
return r instanceof Float32Array || r instanceof Int32Array || r instanceof Uint8Array || r instanceof Uint8ClampedArray; | |
} | |
var mw = Kp(V0()); | |
var Vu = mw.default || mw; | |
function yl(r) { | |
return Vu.fromString(r, true, 16); | |
} | |
var U0 = yl("c3a5c85c97cb3127"); | |
var zu = yl("b492b66fbe98f273"); | |
var wr = yl("9ae16a3b2f90404f"); | |
function lw(r) { | |
return r.xor(r.shru(47)); | |
} | |
function G0(r, t8, e) { | |
let o = r.slice(t8, t8 + e); | |
return Vu.fromBytes(Array.from(o), true, true); | |
} | |
function St(r, t8) { | |
return G0(r, t8, 8); | |
} | |
function W0(r, t8) { | |
return G0(r, t8, 4); | |
} | |
function Qt(r, t8) { | |
return t8 === 0 ? r : r.shru(t8).or(r.shl(64 - t8)); | |
} | |
function eu(r, t8, e = yl("9ddfea08eb382d69")) { | |
let o = r.xor(t8).mul(e); | |
o = o.xor(o.shru(47)); | |
let n = t8.xor(o).mul(e); | |
return n = n.xor(n.shru(47)), n = n.mul(e), n; | |
} | |
function YG(r, t8, e, o, n, s) { | |
n = n.add(r), s = Qt(s.add(n).add(o), 21); | |
let a = n; | |
return n = n.add(t8), n = n.add(e), s = s.add(Qt(n, 44)), [n.add(o), s.add(a)]; | |
} | |
function Jm(r, t8, e, o) { | |
return YG(St(r, t8), St(r, t8 + 8), St(r, t8 + 16), St(r, t8 + 24), e, o); | |
} | |
function QG(r, t8 = r.length) { | |
if (t8 >= 8) { | |
let e = wr.add(t8 * 2), | |
o = St(r, 0).add(wr), | |
n = St(r, t8 - 8), | |
s = Qt(n, 37).mul(e).add(o), | |
a = Qt(o, 25).add(n).mul(e); | |
return eu(s, a, e); | |
} | |
if (t8 >= 4) { | |
let e = wr.add(t8 * 2), | |
o = W0(r, 0); | |
return eu(o.shl(3).add(t8), W0(r, t8 - 4), e); | |
} | |
if (t8 > 0) { | |
let e = r[0], | |
o = r[t8 >> 1], | |
n = r[t8 - 1], | |
s = e + (o << 8), | |
a = t8 + (n << 2); | |
return lw(wr.mul(s).xor(U0.mul(a))).mul(wr); | |
} | |
return wr; | |
} | |
function ZG(r, t8 = r.length) { | |
let e = wr.add(t8 * 2), | |
o = St(r, 0).mul(zu), | |
n = St(r, 8), | |
s = St(r, t8 - 8).mul(e), | |
a = St(r, t8 - 16).mul(wr); | |
return eu(Qt(o.add(n), 43).add(Qt(s, 30)).add(a), o.add(Qt(n.add(wr), 18)).add(s), e); | |
} | |
function JG(r, t8 = r.length) { | |
let e = wr.add(t8 * 2), | |
o = St(r, 0).mul(wr), | |
n = St(r, 8), | |
s = St(r, t8 - 8).mul(e), | |
a = St(r, t8 - 16).mul(wr), | |
i = Qt(o.add(n), 43).add(Qt(s, 30)).add(a), | |
p = eu(i, o.add(Qt(n.add(wr), 18)).add(s), e), | |
u = St(r, 16).mul(e), | |
c = St(r, 24), | |
l = i.add(St(r, t8 - 32)).mul(e), | |
m = p.add(St(r, t8 - 24)).mul(e); | |
return eu(Qt(u.add(c), 43).add(Qt(l, 30)).add(m), u.add(Qt(c.add(o), 18)).add(l), e); | |
} | |
function e4(r, t8 = r.length) { | |
let e = Vu.fromNumber(81, true); | |
if (t8 <= 32) return t8 <= 16 ? QG(r, t8) : ZG(r, t8); | |
if (t8 <= 64) return JG(r, t8); | |
let o = e, | |
n = e.mul(zu).add(113), | |
s = lw(n.mul(wr).add(113)).mul(wr), | |
a = [Vu.UZERO, Vu.UZERO], | |
i = [Vu.UZERO, Vu.UZERO]; | |
o = o.mul(wr).add(St(r, 0)); | |
let p = 0, | |
u = (t8 - 1 >> 6) * 64, | |
c = u + (t8 - 1 & 63) - 63; | |
do o = Qt(o.add(n).add(a[0]).add(St(r, p + 8)), 37).mul(zu), n = Qt(n.add(a[1]).add(St(r, p + 48)), 42).mul(zu), o = o.xor(i[1]), n = n.add(a[0]).add(St(r, p + 40)), s = Qt(s.add(i[0]), 33).mul(zu), a = Jm(r, p, a[1].mul(zu), o.add(i[0])), i = Jm(r, p + 32, s.add(i[1]), n.add(St(r, p + 16))), [s, o] = [o, s], p += 64; while (p !== u); | |
let l = zu.add(s.and(255).shl(1)); | |
return p = c, i[0] = i[0].add(t8 - 1 & 63), a[0] = a[0].add(i[0]), i[0] = i[0].add(a[0]), o = Qt(o.add(n).add(a[0]).add(St(r, p + 8)), 37).mul(l), n = Qt(n.add(a[1]).add(St(r, p + 48)), 42).mul(l), o = o.xor(i[1].mul(9)), n = n.add(a[0].mul(9).add(St(r, p + 40))), s = Qt(s.add(i[0]), 33).mul(l), a = Jm(r, p, a[1].mul(l), o.add(i[0])), i = Jm(r, p + 32, s.add(i[1]), n.add(St(r, p + 16))), [s, o] = [o, s], eu(eu(a[0], i[0], l).add(lw(n).mul(U0)).add(s), eu(a[1], i[1], l).add(o), l); | |
} | |
function t4(r, t8) { | |
return t8 === "string" ? tu(r) : nc([r], t8); | |
} | |
function r4(r, t8) { | |
return r instanceof Float32Array && t8 === "float32" || r instanceof Int32Array && t8 === "int32" || r instanceof Uint8Array && t8 === "bool"; | |
} | |
function nc(r, t8) { | |
if (t8 === "string") throw new Error("Cannot convert a string[] to a TypedArray"); | |
if (Array.isArray(r) && (r = Ps(r)), A().getBool("DEBUG") && tw(r, t8), r4(r, t8)) return r; | |
if (t8 == null || t8 === "float32" || t8 === "complex64") return new Float32Array(r); | |
if (t8 === "int32") return new Int32Array(r); | |
if (t8 === "bool") { | |
let e = new Uint8Array(r.length); | |
for (let o = 0; o < e.length; ++o) Math.round(r[o]) !== 0 && (e[o] = 1); | |
return e; | |
} else throw new Error(`Unknown data type ${t8}`); | |
} | |
function Wu() { | |
return A().platform.now(); | |
} | |
function o4(r, t8) { | |
return A().platform.fetch(r, t8); | |
} | |
function tu(r, t8 = "utf-8") { | |
return t8 = t8 || "utf-8", A().platform.encode(r, t8); | |
} | |
function sc(r, t8 = "utf-8") { | |
return t8 = t8 || "utf-8", A().platform.decode(r, t8); | |
} | |
function Ot(r) { | |
return A().platform.isTypedArray != null ? A().platform.isTypedArray(r) : Qm(r); | |
} | |
function Ps(r, t8 = [], e = false) { | |
if (t8 == null && (t8 = []), typeof r == "boolean" || typeof r == "number" || typeof r == "string" || Fu(r) || r == null || Ot(r) && e) t8.push(r);else if (Array.isArray(r) || Ot(r)) for (let o = 0; o < r.length; ++o) Ps(r[o], t8, e);else { | |
let o = -1; | |
for (let n of Object.keys(r)) /^([1-9]+[0-9]*|0)$/.test(n) && (o = Math.max(o, Number(n))); | |
for (let n = 0; n <= o; n++) Ps(r[n], t8, e); | |
} | |
return t8; | |
} | |
var ed = class { | |
constructor(t8, e) { | |
this.backendTimer = t8, this.logger = e, e == null && (this.logger = new dw()); | |
} | |
profileKernel(t8, e, o) { | |
let n, | |
s = () => { | |
n = o(); | |
}, | |
a, | |
i = Wu(); | |
if (this.backendTimer.timerAvailable()) a = this.backendTimer.time(s);else { | |
s(); | |
for (let u of n) u.dataSync(); | |
a = Promise.resolve({ | |
kernelMs: Wu() - i | |
}); | |
} | |
if (A().getBool("CHECK_COMPUTATION_FOR_ERRORS")) for (let u = 0; u < n.length; u++) { | |
let c = n[u]; | |
c.data().then(l => { | |
n4(l, c.dtype, t8); | |
}); | |
} | |
return { | |
kernelName: t8, | |
outputs: n, | |
inputs: e, | |
timeMs: a.then(u => u.kernelMs), | |
extraInfo: a.then(u => u.getExtraProfileInfo != null ? u.getExtraProfileInfo() : "") | |
}; | |
} | |
logKernelProfile(t8) { | |
let { | |
kernelName: e, | |
outputs: o, | |
timeMs: n, | |
inputs: s, | |
extraInfo: a | |
} = t8; | |
o.forEach(i => { | |
Promise.all([i.data(), n, a]).then(p => { | |
this.logger.logKernelProfile(e, i, p[0], p[1], s, p[2]); | |
}); | |
}); | |
} | |
}; | |
function n4(r, t8, e) { | |
if (t8 !== "float32") return false; | |
for (let o = 0; o < r.length; o++) { | |
let n = r[o]; | |
if (isNaN(n) || !isFinite(n)) return console.warn(`Found ${n} in the result of '${e}'`), true; | |
} | |
return false; | |
} | |
var dw = class { | |
logKernelProfile(t8, e, o, n, s, a) { | |
let i = typeof n == "number" ? Au(`${n}ms`, 9) : n.error, | |
p = Au(t8, 25), | |
u = e.rank, | |
c = e.size, | |
l = Au(e.shape.toString(), 14), | |
m = ""; | |
for (let d in s) { | |
let f = s[d]; | |
if (f != null) { | |
let h = f.shape || e.shape, | |
g = h.length; | |
m += `${d}: ${g}D ${g > 0 ? h : ""} `; | |
} | |
} | |
console.log(`%c${p} %c${i} %c${u}D ${l} %c${c} %c${m} %c${a}`, "font-weight:bold", "color:red", "color:blue", "color: orange", "color: green", "color: steelblue"); | |
} | |
}; | |
function H0(r, t8, e) { | |
let o = {}, | |
n = {}; | |
for (let p = 0; p < t8.length; p++) o[t8[p].id] = true; | |
for (let p = 0; p < r.length; p++) { | |
let u = r[p], | |
c = u.inputs; | |
for (let l in c) { | |
let m = c[l], | |
d = false; | |
for (let f = 0; f < t8.length; f++) if (o[m.id]) { | |
u.outputs.forEach(h => o[h.id] = true), d = true, n[u.id] = true; | |
break; | |
} | |
if (d) break; | |
} | |
} | |
let s = {}; | |
s[e.id] = true; | |
let a = {}; | |
for (let p = r.length - 1; p >= 0; p--) { | |
let u = r[p], | |
c = u.inputs; | |
for (let l = 0; l < u.outputs.length; l++) if (s[u.outputs[l].id]) { | |
for (let m in c) s[c[m].id] = true, a[u.id] = true; | |
break; | |
} | |
} | |
let i = []; | |
for (let p = 0; p < r.length; p++) { | |
let u = r[p]; | |
if (n[u.id] && a[u.id]) { | |
let c = {}; | |
for (let m in u.inputs) { | |
let d = u.inputs[m]; | |
o[d.id] && (c[m] = d); | |
} | |
let l = Object.assign({}, u); | |
l.inputs = c, l.outputs = u.outputs, i.push(l); | |
} | |
} | |
return i; | |
} | |
function K0(r, t8, e, o) { | |
for (let n = t8.length - 1; n >= 0; n--) { | |
let s = t8[n], | |
a = []; | |
if (s.outputs.forEach(p => { | |
let u = r[p.id]; | |
u != null ? a.push(u) : a.push(null); | |
}), s.gradient == null) throw new Error(`Cannot compute gradient: gradient function not found for ${s.kernelName}.`); | |
let i = s.gradient(a); | |
for (let p in s.inputs) { | |
if (!(p in i)) throw new Error(`Cannot backprop through input ${p}. Available gradients found: ${Object.keys(i)}.`); | |
let u = e(() => i[p]()); | |
if (u.dtype !== "float32") throw new Error(`Error in gradient for op ${s.kernelName}. The gradient of input ${p} must have 'float32' dtype, but has '${u.dtype}'`); | |
let c = s.inputs[p]; | |
if (!Cr(u.shape, c.shape)) throw new Error(`Error in gradient for op ${s.kernelName}. The gradient of input '${p}' has shape '${u.shape}', which does not match the shape of the input '${c.shape}'`); | |
if (r[c.id] == null) r[c.id] = u;else { | |
let l = r[c.id]; | |
r[c.id] = o(l, u), l.dispose(); | |
} | |
} | |
} | |
} | |
var q0 = 20; | |
var bl = 3; | |
var fw = 7; | |
function j0(r, t8, e, o) { | |
let n = js(t8), | |
s = s4(r, t8, e, n), | |
a = t8.length, | |
i = td(r, t8, e, n, s), | |
p = ["Tensor"]; | |
return o && (p.push(` dtype: ${e}`), p.push(` rank: ${a}`), p.push(` shape: [${t8}]`), p.push(" values:")), p.push(i.map(u => " " + u).join(` | |
`)), p.join(` | |
`); | |
} | |
function s4(r, t8, e, o) { | |
let n = He(t8), | |
s = o[o.length - 1], | |
a = new Array(s).fill(0), | |
i = t8.length, | |
p = e === "complex64" ? wl(r) : r; | |
if (i > 1) for (let u = 0; u < n / s; u++) { | |
let c = u * s; | |
for (let l = 0; l < s; l++) a[l] = Math.max(a[l], Cl(p[c + l], 0, e).length); | |
} | |
return a; | |
} | |
function Cl(r, t8, e) { | |
let o; | |
return Array.isArray(r) ? o = `${parseFloat(r[0].toFixed(fw))} + ${parseFloat(r[1].toFixed(fw))}j` : Vo(r) ? o = `'${r}'` : e === "bool" ? o = X0(r) : o = parseFloat(r.toFixed(fw)).toString(), Au(o, t8); | |
} | |
function X0(r) { | |
return r === 0 ? "false" : "true"; | |
} | |
function td(r, t8, e, o, n, s = true) { | |
let a = e === "complex64" ? 2 : 1, | |
i = t8[0], | |
p = t8.length; | |
if (p === 0) { | |
if (e === "complex64") { | |
let h = wl(r); | |
return [Cl(h[0], 0, e)]; | |
} | |
return e === "bool" ? [X0(r[0])] : [r[0].toString()]; | |
} | |
if (p === 1) { | |
if (i > q0) { | |
let g = bl * a, | |
x = Array.from(r.slice(0, g)), | |
b = Array.from(r.slice((i - bl) * a, i * a)); | |
return e === "complex64" && (x = wl(x), b = wl(b)), ["[" + x.map((C, S) => Cl(C, n[S], e)).join(", ") + ", ..., " + b.map((C, S) => Cl(C, n[i - bl + S], e)).join(", ") + "]"]; | |
} | |
return ["[" + (e === "complex64" ? wl(r) : Array.from(r)).map((g, x) => Cl(g, n[x], e)).join(", ") + "]"]; | |
} | |
let u = t8.slice(1), | |
c = o.slice(1), | |
l = o[0] * a, | |
m = []; | |
if (i > q0) { | |
for (let h = 0; h < bl; h++) { | |
let g = h * l, | |
x = g + l; | |
m.push(...td(r.slice(g, x), u, e, c, n, false)); | |
} | |
m.push("..."); | |
for (let h = i - bl; h < i; h++) { | |
let g = h * l, | |
x = g + l; | |
m.push(...td(r.slice(g, x), u, e, c, n, h === i - 1)); | |
} | |
} else for (let h = 0; h < i; h++) { | |
let g = h * l, | |
x = g + l; | |
m.push(...td(r.slice(g, x), u, e, c, n, h === i - 1)); | |
} | |
let d = p === 2 ? "," : ""; | |
m[0] = "[" + (i > 0 ? m[0] + d : ""); | |
for (let h = 1; h < m.length - 1; h++) m[h] = " " + m[h] + d; | |
let f = `, | |
`; | |
for (let h = 2; h < p; h++) f += ` | |
`; | |
return m[m.length - 1] = " " + m[m.length - 1] + "]" + (s ? "" : f), m; | |
} | |
function wl(r) { | |
let t8 = []; | |
for (let e = 0; e < r.length; e += 2) t8.push([r[e], r[e + 1]]); | |
return t8; | |
} | |
var tt = class { | |
constructor(t8, e, o) { | |
if (this.dtype = e, this.shape = t8.slice(), this.size = He(t8), o != null) { | |
let n = o.length; | |
$(n === this.size, () => `Length of values '${n}' does not match the size inferred by the shape '${this.size}'.`); | |
} | |
if (e === "complex64") throw new Error("complex64 dtype TensorBuffers are not supported. Please create a TensorBuffer for the real and imaginary parts separately and call tf.complex(real, imag)."); | |
this.values = o || Xm(e, this.size), this.strides = js(t8); | |
} | |
set(t8, ...e) { | |
e.length === 0 && (e = [0]), $(e.length === this.rank, () => `The number of provided coordinates (${e.length}) must match the rank (${this.rank})`); | |
let o = this.locToIndex(e); | |
this.values[o] = t8; | |
} | |
get(...t8) { | |
t8.length === 0 && (t8 = [0]); | |
let e = 0; | |
for (let n of t8) { | |
if (n < 0 || n >= this.shape[e]) { | |
let s = `Requested out of range element at ${t8}. Buffer shape=${this.shape}`; | |
throw new Error(s); | |
} | |
e++; | |
} | |
let o = t8[t8.length - 1]; | |
for (let n = 0; n < t8.length - 1; ++n) o += this.strides[n] * t8[n]; | |
return this.values[o]; | |
} | |
locToIndex(t8) { | |
if (this.rank === 0) return 0; | |
if (this.rank === 1) return t8[0]; | |
let e = t8[t8.length - 1]; | |
for (let o = 0; o < t8.length - 1; ++o) e += this.strides[o] * t8[o]; | |
return e; | |
} | |
indexToLoc(t8) { | |
if (this.rank === 0) return []; | |
if (this.rank === 1) return [t8]; | |
let e = new Array(this.shape.length); | |
for (let o = 0; o < e.length - 1; ++o) e[o] = Math.floor(t8 / this.strides[o]), t8 -= e[o] * this.strides[o]; | |
return e[e.length - 1] = t8, e; | |
} | |
get rank() { | |
return this.shape.length; | |
} | |
toTensor() { | |
return Os().makeTensor(this.values, this.shape, this.dtype); | |
} | |
}; | |
var Os = null; | |
var ac = null; | |
var a4 = null; | |
function Y0(r) { | |
Os = r; | |
} | |
function Q0(r) { | |
ac = r; | |
} | |
function Z0(r) { | |
a4 = r; | |
} | |
var ut = class { | |
constructor(t8, e, o, n) { | |
this.kept = false, this.isDisposedInternal = false, this.shape = t8.slice(), this.dtype = e || "float32", this.size = He(t8), this.strides = js(t8), this.dataId = o, this.id = n, this.rankType = this.rank < 5 ? this.rank.toString() : "higher"; | |
} | |
get rank() { | |
return this.shape.length; | |
} | |
async buffer() { | |
let t8 = await this.data(); | |
return ac.buffer(this.shape, this.dtype, t8); | |
} | |
bufferSync() { | |
return ac.buffer(this.shape, this.dtype, this.dataSync()); | |
} | |
async array() { | |
let t8 = await this.data(); | |
return Du(this.shape, t8, this.dtype === "complex64"); | |
} | |
arraySync() { | |
return Du(this.shape, this.dataSync(), this.dtype === "complex64"); | |
} | |
async data() { | |
this.throwIfDisposed(); | |
let t8 = Os().read(this.dataId); | |
if (this.dtype === "string") { | |
let e = await t8; | |
try { | |
return e.map(o => sc(o)); | |
} catch (o) { | |
throw new Error("Failed to decode the string bytes into utf-8. To get the original bytes, call tensor.bytes()."); | |
} | |
} | |
return t8; | |
} | |
dataToGPU(t8) { | |
return this.throwIfDisposed(), Os().readToGPU(this.dataId, t8); | |
} | |
dataSync() { | |
this.throwIfDisposed(); | |
let t8 = Os().readSync(this.dataId); | |
if (this.dtype === "string") try { | |
return t8.map(e => sc(e)); | |
} catch (e) { | |
throw new Error("Failed to decode the string bytes into utf-8. To get the original bytes, call tensor.bytes()."); | |
} | |
return t8; | |
} | |
async bytes() { | |
this.throwIfDisposed(); | |
let t8 = await Os().read(this.dataId); | |
return this.dtype === "string" ? t8 : new Uint8Array(t8.buffer); | |
} | |
dispose() { | |
this.isDisposed || (this.kerasMask && this.kerasMask.dispose(), Os().disposeTensor(this), this.isDisposedInternal = true); | |
} | |
get isDisposed() { | |
return this.isDisposedInternal; | |
} | |
throwIfDisposed() { | |
if (this.isDisposed) throw new Error("Tensor is disposed."); | |
} | |
print(t8 = false) { | |
return ac.print(this, t8); | |
} | |
clone() { | |
return this.throwIfDisposed(), ac.clone(this); | |
} | |
toString(t8 = false) { | |
let e = this.dataSync(); | |
return j0(e, this.shape, this.dtype, t8); | |
} | |
cast(t8) { | |
return this.throwIfDisposed(), ac.cast(this, t8); | |
} | |
variable(t8 = true, e, o) { | |
return this.throwIfDisposed(), Os().makeVariable(this, t8, e, o); | |
} | |
}; | |
Object.defineProperty(ut, Symbol.hasInstance, { | |
value: r => !!r && r.data != null && r.dataSync != null && r.throwIfDisposed != null | |
}); | |
function i4() { | |
return gl("Tensor", () => ut); | |
} | |
i4(); | |
var oi = class extends ut { | |
constructor(t8, e, o, n) { | |
super(t8.shape, t8.dtype, t8.dataId, n), this.trainable = e, this.name = o; | |
} | |
assign(t8) { | |
if (t8.dtype !== this.dtype) throw new Error(`dtype of the new value (${t8.dtype}) and previous value (${this.dtype}) must match`); | |
if (!Cr(t8.shape, this.shape)) throw new Error(`shape of the new value (${t8.shape}) and previous value (${this.shape}) must match`); | |
Os().disposeTensor(this), this.dataId = t8.dataId, Os().incRef(this, null); | |
} | |
dispose() { | |
Os().disposeVariable(this), this.isDisposedInternal = true; | |
} | |
}; | |
Object.defineProperty(oi, Symbol.hasInstance, { | |
value: r => r instanceof ut && r.assign != null && r.assign instanceof Function | |
}); | |
var ek = {}; | |
qe(ek, { | |
assertTypesMatch: () => Cw, | |
getTensorsInContainer: () => Sl, | |
isTensorInList: () => p4, | |
makeTypesMatch: () => Oe | |
}); | |
var hw; | |
(function (r) { | |
r.R0 = "R0", r.R1 = "R1", r.R2 = "R2", r.R3 = "R3", r.R4 = "R4", r.R5 = "R5", r.R6 = "R6"; | |
})(hw || (hw = {})); | |
var gw; | |
(function (r) { | |
r.float32 = "float32", r.int32 = "int32", r.bool = "int32", r.complex64 = "complex64"; | |
})(gw || (gw = {})); | |
var xw; | |
(function (r) { | |
r.float32 = "float32", r.int32 = "int32", r.bool = "bool", r.complex64 = "complex64"; | |
})(xw || (xw = {})); | |
var yw; | |
(function (r) { | |
r.float32 = "float32", r.int32 = "float32", r.bool = "float32", r.complex64 = "complex64"; | |
})(yw || (yw = {})); | |
var bw; | |
(function (r) { | |
r.float32 = "complex64", r.int32 = "complex64", r.bool = "complex64", r.complex64 = "complex64"; | |
})(bw || (bw = {})); | |
var u4 = { | |
float32: yw, | |
int32: gw, | |
bool: xw, | |
complex64: bw | |
}; | |
function dt(r, t8) { | |
if (r === "string" || t8 === "string") { | |
if (r === "string" && t8 === "string") return "string"; | |
throw new Error(`Can not upcast ${r} with ${t8}`); | |
} | |
return u4[r][t8]; | |
} | |
function ni(r) { | |
return dt(r, "int32"); | |
} | |
function rd(r) { | |
return r != null && typeof r == "object" && "texture" in r && r.texture instanceof WebGLTexture; | |
} | |
function od(r) { | |
return typeof GPUBuffer != "undefined" && r != null && typeof r == "object" && "buffer" in r && r.buffer instanceof GPUBuffer; | |
} | |
function Oe(r, t8) { | |
if (r.dtype === t8.dtype) return [r, t8]; | |
let e = dt(r.dtype, t8.dtype); | |
return [r.cast(e), t8.cast(e)]; | |
} | |
function Cw(r, t8) { | |
$(r.dtype === t8.dtype, () => `The dtypes of the first(${r.dtype}) and second(${t8.dtype}) input must match`); | |
} | |
function p4(r, t8) { | |
return t8.some(e => e.id === r.id); | |
} | |
function Sl(r) { | |
let t8 = []; | |
return J0(r, t8, /* @__PURE__ */new Set()), t8; | |
} | |
function J0(r, t8, e) { | |
if (r == null) return; | |
if (r instanceof ut) { | |
t8.push(r); | |
return; | |
} | |
if (!c4(r)) return; | |
let o = r; | |
for (let n in o) { | |
let s = o[n]; | |
e.has(s) || (e.add(s), J0(s, t8, e)); | |
} | |
} | |
function c4(r) { | |
return Array.isArray(r) || typeof r == "object"; | |
} | |
function ww(r) { | |
return r.kernelName != null; | |
} | |
var nd = class { | |
constructor() { | |
this.registeredVariables = {}, this.nextTapeNodeId = 0, this.numBytes = 0, this.numTensors = 0, this.numStringTensors = 0, this.numDataBuffers = 0, this.gradientDepth = 0, this.kernelDepth = 0, this.scopeStack = [], this.numDataMovesStack = [], this.nextScopeId = 0, this.tensorInfo = /* @__PURE__ */new WeakMap(), this.profiling = false, this.activeProfile = { | |
newBytes: 0, | |
newTensors: 0, | |
peakBytes: 0, | |
kernels: [], | |
result: null, | |
get kernelNames() { | |
return Array.from(new Set(this.kernels.map(t8 => t8.name))); | |
} | |
}; | |
} | |
dispose() { | |
for (let t8 in this.registeredVariables) this.registeredVariables[t8].dispose(); | |
} | |
}; | |
var ru = class { | |
constructor(t8) { | |
this.ENV = t8, this.registry = {}, this.registryFactory = {}, this.pendingBackendInitId = 0, this.state = new nd(); | |
} | |
async ready() { | |
if (this.pendingBackendInit != null) return this.pendingBackendInit.then(() => {}); | |
if (this.backendInstance != null) return; | |
let t8 = this.getSortedBackends(); | |
for (let e = 0; e < t8.length; e++) { | |
let o = t8[e]; | |
if (await this.initializeBackend(o).success) { | |
await this.setBackend(o); | |
return; | |
} | |
} | |
throw new Error("Could not initialize any backends, all backend initializations failed."); | |
} | |
get backend() { | |
if (this.pendingBackendInit != null) throw new Error(`Backend '${this.backendName}' has not yet been initialized. Make sure to await tf.ready() or await tf.setBackend() before calling other methods`); | |
if (this.backendInstance == null) { | |
let { | |
name: t8, | |
asyncInit: e | |
} = this.initializeBackendsAndReturnBest(); | |
if (e) throw new Error(`The highest priority backend '${t8}' has not yet been initialized. Make sure to await tf.ready() or await tf.setBackend() before calling other methods`); | |
this.setBackend(t8); | |
} | |
return this.backendInstance; | |
} | |
backendNames() { | |
return Object.keys(this.registryFactory); | |
} | |
findBackend(t8) { | |
if (!(t8 in this.registry)) if (t8 in this.registryFactory) { | |
let { | |
asyncInit: e | |
} = this.initializeBackend(t8); | |
if (e) return null; | |
} else return null; | |
return this.registry[t8]; | |
} | |
findBackendFactory(t8) { | |
return t8 in this.registryFactory ? this.registryFactory[t8].factory : null; | |
} | |
registerBackend(t8, e, o = 1) { | |
return t8 in this.registryFactory ? (Ia(`${t8} backend was already registered. Reusing existing backend factory.`), false) : (this.registryFactory[t8] = { | |
factory: e, | |
priority: o | |
}, true); | |
} | |
async setBackend(t8) { | |
if (this.registryFactory[t8] == null) throw new Error(`Backend name '${t8}' not found in registry`); | |
if (this.backendName = t8, this.registry[t8] == null) { | |
this.backendInstance = null; | |
let { | |
success: e, | |
asyncInit: o | |
} = this.initializeBackend(t8); | |
if (!(o ? await e : e)) return false; | |
} | |
return this.backendInstance = this.registry[t8], this.setupRegisteredKernels(), this.profiler = new ed(this.backendInstance), true; | |
} | |
setupRegisteredKernels() { | |
Ym(this.backendName).forEach(e => { | |
e.setupFunc != null && e.setupFunc(this.backendInstance); | |
}); | |
} | |
disposeRegisteredKernels(t8) { | |
Ym(t8).forEach(o => { | |
o.disposeFunc != null && o.disposeFunc(this.registry[t8]); | |
}); | |
} | |
initializeBackend(t8) { | |
let e = this.registryFactory[t8]; | |
if (e == null) throw new Error(`Cannot initialize backend ${t8}, no registration found.`); | |
try { | |
let o = e.factory(); | |
if (o && !(o instanceof ao) && typeof o.then == "function") { | |
let n = ++this.pendingBackendInitId, | |
s = o.then(a => n < this.pendingBackendInitId ? false : (this.registry[t8] = a, this.pendingBackendInit = null, true)).catch(a => (n < this.pendingBackendInitId || (this.pendingBackendInit = null, Ia(`Initialization of backend ${t8} failed`), Ia(a.stack || a.message)), false)); | |
return this.pendingBackendInit = s, { | |
success: s, | |
asyncInit: true | |
}; | |
} else return this.registry[t8] = o, { | |
success: true, | |
asyncInit: false | |
}; | |
} catch (o) { | |
return Ia(`Initialization of backend ${t8} failed`), Ia(o.stack || o.message), { | |
success: false, | |
asyncInit: false | |
}; | |
} | |
} | |
removeBackend(t8) { | |
if (!(t8 in this.registryFactory)) throw new Error(`${t8} backend not found in registry`); | |
this.backendName === t8 && this.pendingBackendInit != null && this.pendingBackendInitId++, t8 in this.registry && (this.disposeRegisteredKernels(t8), this.registry[t8].dispose(), delete this.registry[t8]), delete this.registryFactory[t8], this.backendName === t8 && (this.pendingBackendInit = null, this.backendName = null, this.backendInstance = null); | |
} | |
getSortedBackends() { | |
if (Object.keys(this.registryFactory).length === 0) throw new Error("No backend found in registry."); | |
return Object.keys(this.registryFactory).sort((t8, e) => this.registryFactory[e].priority - this.registryFactory[t8].priority); | |
} | |
initializeBackendsAndReturnBest() { | |
let t8 = this.getSortedBackends(); | |
for (let e = 0; e < t8.length; e++) { | |
let o = t8[e], | |
{ | |
success: n, | |
asyncInit: s | |
} = this.initializeBackend(o); | |
if (s || n) return { | |
name: o, | |
asyncInit: s | |
}; | |
} | |
throw new Error("Could not initialize any backends, all backend initializations failed."); | |
} | |
moveData(t8, e) { | |
let o = this.state.tensorInfo.get(e), | |
n = o.backend, | |
s = this.readSync(e), | |
a = n.refCount(e); | |
n.disposeData(e, true), o.backend = t8, t8.move(e, s, o.shape, o.dtype, a), this.shouldCheckForMemLeaks() && this.state.numDataMovesStack[this.state.numDataMovesStack.length - 1]++; | |
} | |
tidy(t8, e) { | |
let o = null; | |
if (e == null) { | |
if (typeof t8 != "function") throw new Error("Please provide a function to tidy()"); | |
e = t8; | |
} else { | |
if (typeof t8 != "string" && !(t8 instanceof String)) throw new Error("When calling with two arguments, the first argument to tidy() must be a string"); | |
if (typeof e != "function") throw new Error("When calling with two arguments, the 2nd argument to tidy() must be a function"); | |
o = t8; | |
} | |
let n; | |
return this.scopedRun(() => this.startScope(o), () => this.endScope(n), () => (n = e(), n instanceof Promise && console.error("Cannot return a Promise inside of tidy."), n)); | |
} | |
scopedRun(t8, e, o) { | |
t8(); | |
try { | |
let n = o(); | |
return e(), n; | |
} catch (n) { | |
throw e(), n; | |
} | |
} | |
nextTensorId() { | |
return ru.nextTensorId++; | |
} | |
nextVariableId() { | |
return ru.nextVariableId++; | |
} | |
clone(t8) { | |
let e = T.runKernel(wo, { | |
x: t8 | |
}), | |
o = { | |
x: t8 | |
}, | |
n = a => ({ | |
x: () => { | |
let i = "float32", | |
p = { | |
x: a | |
}, | |
u = { | |
dtype: i | |
}; | |
return T.runKernel(bo, p, u); | |
} | |
}), | |
s = []; | |
return this.addTapeNode(this.state.activeScope.name, o, [e], n, s, {}), e; | |
} | |
runKernel(t8, e, o) { | |
if (this.backendName == null && this.backend, !(tc(t8, this.backendName) != null)) throw new Error(`Kernel '${t8}' not registered for backend '${this.backendName}'`); | |
return this.runKernelFunc({ | |
kernelName: t8, | |
inputs: e, | |
attrs: o | |
}); | |
} | |
shouldCheckForMemLeaks() { | |
return this.ENV.getBool("IS_TEST"); | |
} | |
checkKernelForMemLeak(t8, e, o) { | |
let n = this.backend.numDataIds(), | |
s = 0; | |
o.forEach(p => { | |
s += p.dtype === "complex64" ? 3 : 1; | |
}); | |
let a = this.state.numDataMovesStack[this.state.numDataMovesStack.length - 1], | |
i = n - e - s - a; | |
if (i > 0) throw new Error(`Backend '${this.backendName}' has an internal memory leak (${i} data ids) after running '${t8}'`); | |
} | |
runKernelFunc(t8) { | |
let e, | |
o = [], | |
n = this.isTapeOn(), | |
s = this.state.numBytes, | |
a = this.state.numTensors; | |
this.shouldCheckForMemLeaks() && this.state.numDataMovesStack.push(0); | |
let i; | |
this.backendName == null && this.backend; | |
let p, | |
u = ww(t8) ? t8.kernelName : this.state.activeScope != null ? this.state.activeScope.name : ""; | |
if (ww(t8)) { | |
let { | |
kernelName: f, | |
inputs: h, | |
attrs: g | |
} = t8; | |
this.backendName == null && this.backend; | |
let x = tc(f, this.backendName); | |
$(x != null, () => `Cannot find registered kernel '${f}' for backend '${this.backendName}'`), i = () => { | |
let b = this.backend.numDataIds(); | |
p = x.kernelFunc({ | |
inputs: h, | |
attrs: g, | |
backend: this.backend | |
}); | |
let C = Array.isArray(p) ? p : [p]; | |
this.shouldCheckForMemLeaks() && this.checkKernelForMemLeak(f, b, C); | |
let S = C.map(k => k.rank != null ? k : this.makeTensorFromTensorInfo(k)); | |
if (n) { | |
let k = this.getTensorsForGradient(f, h, S); | |
o = this.saveTensorsForBackwardMode(k); | |
} | |
return S; | |
}; | |
} else { | |
let { | |
forwardFunc: f | |
} = t8, | |
h = g => { | |
n && (o = g.map(x => this.keep(this.clone(x)))); | |
}; | |
i = () => { | |
let g = this.backend.numDataIds(); | |
p = this.tidy(() => f(this.backend, h)); | |
let x = Array.isArray(p) ? p : [p]; | |
return this.shouldCheckForMemLeaks() && this.checkKernelForMemLeak(u, g, x), x; | |
}; | |
} | |
let { | |
inputs: c, | |
attrs: l | |
} = t8, | |
m = ww(t8) ? null : t8.backwardsFunc, | |
d; | |
return this.scopedRun(() => this.state.kernelDepth++, () => this.state.kernelDepth--, () => { | |
!this.ENV.getBool("DEBUG") && !this.state.profiling ? e = i() : (d = this.profiler.profileKernel(u, c, () => i()), this.ENV.getBool("DEBUG") && this.profiler.logKernelProfile(d), e = d.outputs); | |
}), n && this.addTapeNode(u, c, e, m, o, l), this.state.profiling && this.state.activeProfile.kernels.push({ | |
name: u, | |
bytesAdded: this.state.numBytes - s, | |
totalBytesSnapshot: this.state.numBytes, | |
tensorsAdded: this.state.numTensors - a, | |
totalTensorsSnapshot: this.state.numTensors, | |
inputShapes: Object.keys(c).map(f => c[f] != null ? c[f].shape : null), | |
outputShapes: e.map(f => f.shape), | |
kernelTimeMs: d.timeMs, | |
extraInfo: d.extraInfo | |
}), Array.isArray(p) ? e : e[0]; | |
} | |
saveTensorsForBackwardMode(t8) { | |
return t8.map(o => this.keep(this.clone(o))); | |
} | |
getTensorsForGradient(t8, e, o) { | |
let n = iw(t8); | |
if (n != null) { | |
let s = n.inputsToSave || [], | |
a = n.outputsToSave || [], | |
i; | |
n.saveAllInputs ? ($(Array.isArray(e), () => "saveAllInputs is true, expected inputs to be an array."), i = Object.keys(e).map(u => e[u])) : i = s.map(u => e[u]); | |
let p = o.filter((u, c) => a[c]); | |
return i.concat(p); | |
} | |
return []; | |
} | |
makeTensor(t8, e, o, n) { | |
if (t8 == null) throw new Error("Values passed to engine.makeTensor() are null"); | |
o = o || "float32", n = n || this.backend; | |
let s = t8; | |
o === "string" && Vo(t8[0]) && (s = t8.map(p => tu(p))); | |
let a = n.write(s, e, o), | |
i = new ut(e, o, a, this.nextTensorId()); | |
if (this.trackTensor(i, n), o === "string") { | |
let p = this.state.tensorInfo.get(a), | |
u = ow(s); | |
this.state.numBytes += u - p.bytes, p.bytes = u; | |
} | |
return i; | |
} | |
makeTensorFromDataId(t8, e, o, n) { | |
o = o || "float32"; | |
let s = { | |
dataId: t8, | |
shape: e, | |
dtype: o | |
}; | |
return this.makeTensorFromTensorInfo(s, n); | |
} | |
makeTensorFromTensorInfo(t8, e) { | |
let { | |
dataId: o, | |
shape: n, | |
dtype: s | |
} = t8, | |
a = new ut(n, s, o, this.nextTensorId()); | |
return this.trackTensor(a, e), a; | |
} | |
makeVariable(t8, e = true, o, n) { | |
o = o || this.nextVariableId().toString(), n != null && n !== t8.dtype && (t8 = t8.cast(n)); | |
let s = new oi(t8, e, o, this.nextTensorId()); | |
if (this.state.registeredVariables[s.name] != null) throw new Error(`Variable with name ${s.name} was already registered`); | |
return this.state.registeredVariables[s.name] = s, this.incRef(s, this.backend), s; | |
} | |
trackTensor(t8, e) { | |
this.state.numTensors++, t8.dtype === "string" && this.state.numStringTensors++; | |
let o = 0; | |
t8.dtype !== "complex64" && t8.dtype !== "string" && (o = t8.size * jp(t8.dtype)), this.state.numBytes += o, this.state.tensorInfo.has(t8.dataId) || (this.state.numDataBuffers++, this.state.tensorInfo.set(t8.dataId, { | |
backend: e || this.backend, | |
dtype: t8.dtype, | |
shape: t8.shape, | |
bytes: o | |
})), t8 instanceof oi || this.track(t8); | |
} | |
incRef(t8, e) { | |
this.trackTensor(t8, e), this.backend.incRef(t8.dataId); | |
} | |
removeDataId(t8, e) { | |
this.state.tensorInfo.has(t8) && this.state.tensorInfo.get(t8).backend === e && (this.state.tensorInfo.delete(t8), this.state.numDataBuffers--); | |
} | |
disposeTensor(t8) { | |
if (!this.state.tensorInfo.has(t8.dataId)) return; | |
let e = this.state.tensorInfo.get(t8.dataId); | |
if (this.state.numTensors--, t8.dtype === "string" && (this.state.numStringTensors--, this.state.numBytes -= e.bytes), t8.dtype !== "complex64" && t8.dtype !== "string") { | |
let o = t8.size * jp(t8.dtype); | |
this.state.numBytes -= o; | |
} | |
e.backend.disposeData(t8.dataId) && this.removeDataId(t8.dataId, e.backend); | |
} | |
disposeVariables() { | |
for (let t8 in this.state.registeredVariables) { | |
let e = this.state.registeredVariables[t8]; | |
this.disposeVariable(e); | |
} | |
} | |
disposeVariable(t8) { | |
this.disposeTensor(t8), this.state.registeredVariables[t8.name] != null && delete this.state.registeredVariables[t8.name]; | |
} | |
memory() { | |
let t8 = this.backend.memory(); | |
return t8.numTensors = this.state.numTensors, t8.numDataBuffers = this.state.numDataBuffers, t8.numBytes = this.state.numBytes, this.state.numStringTensors > 0 && (t8.unreliable = true, t8.reasons == null && (t8.reasons = []), t8.reasons.push("Memory usage by string tensors is approximate (2 bytes per character)")), t8; | |
} | |
async profile(t8) { | |
this.state.profiling = true; | |
let e = this.state.numBytes, | |
o = this.state.numTensors; | |
this.state.activeProfile.kernels = [], this.state.activeProfile.result = await t8(), this.state.profiling = false, this.state.activeProfile.peakBytes = Math.max(...this.state.activeProfile.kernels.map(n => n.totalBytesSnapshot)), this.state.activeProfile.newBytes = this.state.numBytes - e, this.state.activeProfile.newTensors = this.state.numTensors - o; | |
for (let n of this.state.activeProfile.kernels) n.kernelTimeMs = await n.kernelTimeMs, n.extraInfo = await n.extraInfo; | |
return this.state.activeProfile; | |
} | |
isTapeOn() { | |
return this.state.gradientDepth > 0 && this.state.kernelDepth === 0; | |
} | |
addTapeNode(t8, e, o, n, s, a) { | |
let i = { | |
id: this.state.nextTapeNodeId++, | |
kernelName: t8, | |
inputs: e, | |
outputs: o, | |
saved: s | |
}, | |
p = iw(t8); | |
p != null && (n = p.gradFunc), n != null && (i.gradient = u => (u = u.map((c, l) => { | |
if (c == null) { | |
let m = o[l], | |
d = Yp(m.size, m.dtype); | |
return this.makeTensor(d, m.shape, m.dtype); | |
} | |
return c; | |
}), n(u.length > 1 ? u : u[0], s, a))), this.state.activeTape.push(i); | |
} | |
keep(t8) { | |
return t8.kept = true, t8; | |
} | |
startTape() { | |
this.state.gradientDepth === 0 && (this.state.activeTape = []), this.state.gradientDepth++; | |
} | |
endTape() { | |
this.state.gradientDepth--; | |
} | |
startScope(t8) { | |
let e = { | |
track: [], | |
name: "unnamed scope", | |
id: this.state.nextScopeId++ | |
}; | |
t8 && (e.name = t8), this.state.scopeStack.push(e), this.state.activeScope = e; | |
} | |
endScope(t8) { | |
let e = Sl(t8), | |
o = new Set(e.map(s => s.id)); | |
for (let s = 0; s < this.state.activeScope.track.length; s++) { | |
let a = this.state.activeScope.track[s]; | |
!a.kept && !o.has(a.id) && a.dispose(); | |
} | |
let n = this.state.scopeStack.pop(); | |
this.state.activeScope = this.state.scopeStack.length === 0 ? null : this.state.scopeStack[this.state.scopeStack.length - 1], e.forEach(s => { | |
!s.kept && s.scopeId === n.id && this.track(s); | |
}); | |
} | |
gradients(t8, e, o, n = false) { | |
if ($(e.length > 0, () => "gradients() received an empty list of xs."), o != null && o.dtype !== "float32") throw new Error(`dy must have 'float32' dtype, but has '${o.dtype}'`); | |
let s = this.scopedRun(() => this.startTape(), () => this.endTape(), () => this.tidy("forward", t8)); | |
$(s instanceof ut, () => "The result y returned by f() must be a tensor."); | |
let a = H0(this.state.activeTape, e, s); | |
if (!n && a.length === 0 && e.length > 0) throw new Error("Cannot compute gradient of y=f(x) with respect to x. Make sure that the f you passed encloses all operations that lead from x to y."); | |
return this.tidy("backward", () => { | |
let i = {}; | |
i[s.id] = o == null ? l4(s.shape) : o, K0(i, a, u => this.tidy(u), m4); | |
let p = e.map(u => i[u.id]); | |
return this.state.gradientDepth === 0 && (this.state.activeTape.forEach(u => { | |
for (let c of u.saved) c.dispose(); | |
}), this.state.activeTape = null), { | |
value: s, | |
grads: p | |
}; | |
}); | |
} | |
customGrad(t8) { | |
return $(qs(t8), () => "The f passed in customGrad(f) must be a function."), (...e) => { | |
$(e.every(i => i instanceof ut), () => "The args passed in customGrad(f)(x1, x2,...) must all be tensors"); | |
let o, | |
n = {}; | |
e.forEach((i, p) => { | |
n[p] = i; | |
}); | |
let s = (i, p) => (o = t8(...e, p), $(o.value instanceof ut, () => "The function f passed in customGrad(f) must return an object where `obj.value` is a tensor"), $(qs(o.gradFunc), () => "The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function."), o.value), | |
a = (i, p) => { | |
let u = o.gradFunc(i, p), | |
c = Array.isArray(u) ? u : [u]; | |
$(c.length === e.length, () => "The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function that returns the same number of tensors as inputs passed to f(...)."), $(c.every(m => m instanceof ut), () => "The function f passed in customGrad(f) must return an object where `obj.gradFunc` is a function that returns a list of only tensors."); | |
let l = {}; | |
return c.forEach((m, d) => { | |
l[d] = () => m; | |
}), l; | |
}; | |
return this.runKernelFunc({ | |
forwardFunc: s, | |
backwardsFunc: a, | |
inputs: n | |
}); | |
}; | |
} | |
readSync(t8) { | |
return this.state.tensorInfo.get(t8).backend.readSync(t8); | |
} | |
read(t8) { | |
return this.state.tensorInfo.get(t8).backend.read(t8); | |
} | |
readToGPU(t8, e) { | |
return this.state.tensorInfo.get(t8).backend.readToGPU(t8, e); | |
} | |
async time(t8) { | |
let e = Wu(), | |
o = await this.backend.time(t8); | |
return o.wallMs = Wu() - e, o; | |
} | |
track(t8) { | |
return this.state.activeScope != null && (t8.scopeId = this.state.activeScope.id, this.state.activeScope.track.push(t8)), t8; | |
} | |
get registeredVariables() { | |
return this.state.registeredVariables; | |
} | |
reset() { | |
this.pendingBackendInitId++, this.state.dispose(), this.ENV.reset(), this.state = new nd(); | |
for (let t8 in this.registry) this.disposeRegisteredKernels(t8), this.registry[t8].dispose(), delete this.registry[t8]; | |
this.backendName = null, this.backendInstance = null, this.pendingBackendInit = null; | |
} | |
}; | |
ru.nextTensorId = 0; | |
ru.nextVariableId = 0; | |
function l4(r) { | |
let t8 = fl(He(r), "float32"); | |
return T.makeTensor(t8, r, "float32"); | |
} | |
function Sw() { | |
let r = aw(); | |
if (r._tfengine == null) { | |
let t8 = new hl(r); | |
r._tfengine = new ru(t8); | |
} | |
return _0(r._tfengine.ENV), Y0(() => r._tfengine), r._tfengine; | |
} | |
var T = Sw(); | |
function m4(r, t8) { | |
let e = { | |
a: r, | |
b: t8 | |
}; | |
return T.runKernel(uo, e); | |
} | |
var ou = {}; | |
qe(ou, { | |
isBrowser: () => vw, | |
isMobile: () => h4, | |
mockIsMobile: () => f4 | |
}); | |
function d4() { | |
return typeof navigator != "undefined" && navigator != null; | |
} | |
var Iw; | |
function f4(r) { | |
Iw = r; | |
} | |
function h4(r) { | |
if (Iw !== void 0) return Iw; | |
if (r || d4()) { | |
if (r || (r = navigator), r.product === "ReactNative") return true; | |
let t8 = r.userAgent || r.vendor || (typeof window != "undefined" ? window.opera : ""); | |
if (!t8) { | |
let e = r; | |
return e.userAgentData && e.userAgentData.mobile; | |
} | |
return /(android|bb\d+|meego).+mobile|avantgo|bada\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\/|plucker|pocket|psp|series(4|6)0|symbian|treo|up\.(browser|link)|vodafone|wap|windows ce|xda|xiino/i.test(t8) || /1207|6310|6590|3gso|4thp|50[1-6]i|770s|802s|a wa|abac|ac(er|oo|s\-)|ai(ko|rn)|al(av|ca|co)|amoi|an(ex|ny|yw)|aptu|ar(ch|go)|as(te|us)|attw|au(di|\-m|r |s )|avan|be(ck|ll|nq)|bi(lb|rd)|bl(ac|az)|br(e|v)w|bumb|bw\-(n|u)|c55\/|capi|ccwa|cdm\-|cell|chtm|cldc|cmd\-|co(mp|nd)|craw|da(it|ll|ng)|dbte|dc\-s|devi|dica|dmob|do(c|p)o|ds(12|\-d)|el(49|ai)|em(l2|ul)|er(ic|k0)|esl8|ez([4-7]0|os|wa|ze)|fetc|fly(\-|_)|g1 u|g560|gene|gf\-5|g\-mo|go(\.w|od)|gr(ad|un)|haie|hcit|hd\-(m|p|t)|hei\-|hi(pt|ta)|hp( i|ip)|hs\-c|ht(c(\-| |_|a|g|p|s|t)|tp)|hu(aw|tc)|i\-(20|go|ma)|i230|iac( |\-|\/)|ibro|idea|ig01|ikom|im1k|inno|ipaq|iris|ja(t|v)a|jbro|jemu|jigs|kddi|keji|kgt( |\/)|klon|kpt |kwc\-|kyo(c|k)|le(no|xi)|lg( g|\/(k|l|u)|50|54|\-[a-w])|libw|lynx|m1\-w|m3ga|m50\/|ma(te|ui|xo)|mc(01|21|ca)|m\-cr|me(rc|ri)|mi(o8|oa|ts)|mmef|mo(01|02|bi|de|do|t(\-| |o|v)|zz)|mt(50|p1|v )|mwbp|mywa|n10[0-2]|n20[2-3]|n30(0|2)|n50(0|2|5)|n7(0(0|1)|10)|ne((c|m)\-|on|tf|wf|wg|wt)|nok(6|i)|nzph|o2im|op(ti|wv)|oran|owg1|p800|pan(a|d|t)|pdxg|pg(13|\-([1-8]|c))|phil|pire|pl(ay|uc)|pn\-2|po(ck|rt|se)|prox|psio|pt\-g|qa\-a|qc(07|12|21|32|60|\-[2-7]|i\-)|qtek|r380|r600|raks|rim9|ro(ve|zo)|s55\/|sa(ge|ma|mm|ms|ny|va)|sc(01|h\-|oo|p\-)|sdk\/|se(c(\-|0|1)|47|mc|nd|ri)|sgh\-|shar|sie(\-|m)|sk\-0|sl(45|id)|sm(al|ar|b3|it|t5)|so(ft|ny)|sp(01|h\-|v\-|v )|sy(01|mb)|t2(18|50)|t6(00|10|18)|ta(gt|lk)|tcl\-|tdg\-|tel(i|m)|tim\-|t\-mo|to(pl|sh)|ts(70|m\-|m3|m5)|tx\-9|up(\.b|g1|si)|utst|v400|v750|veri|vi(rg|te)|vk(40|5[0-3]|\-v)|vm40|voda|vulc|vx(52|53|60|61|70|80|81|83|85|98)|w3c(\-| )|webc|whit|wi(g |nc|nw)|wmlb|wonu|x700|yas\-|your|zeto|zte\-/i.test(t8.substr(0, 4)); | |
} | |
return false; | |
} | |
function vw() { | |
return typeof window != "undefined" && window.document != null || typeof WorkerGlobalScope != "undefined"; | |
} | |
var $r = A(); | |
$r.registerFlag("DEBUG", () => false, r => { | |
r && console.warn("Debugging mode is ON. The output of every math call will be downloaded to CPU and checked for NaNs. This significantly impacts performance."); | |
}); | |
$r.registerFlag("IS_BROWSER", () => vw()); | |
$r.registerFlag("IS_NODE", () => typeof process != "undefined" && typeof process.versions != "undefined" && typeof process.versions.node != "undefined"); | |
$r.registerFlag("IS_CHROME", () => typeof navigator != "undefined" && navigator != null && navigator.userAgent != null && /Chrome/.test(navigator.userAgent) && /Google Inc/.test(navigator.vendor)); | |
$r.registerFlag("IS_SAFARI", () => typeof navigator != "undefined" && navigator != null && navigator.userAgent != null && /Safari/.test(navigator.userAgent) && /Apple/.test(navigator.vendor)); | |
$r.registerFlag("PROD", () => false); | |
$r.registerFlag("TENSORLIKE_CHECK_SHAPE_CONSISTENCY", () => $r.getBool("DEBUG")); | |
$r.registerFlag("DEPRECATION_WARNINGS_ENABLED", () => true); | |
$r.registerFlag("IS_TEST", () => false); | |
$r.registerFlag("CHECK_COMPUTATION_FOR_ERRORS", () => $r.getBool("DEBUG")); | |
$r.registerFlag("WRAP_TO_IMAGEBITMAP", () => false); | |
$r.registerFlag("CANVAS2D_WILL_READ_FREQUENTLY_FOR_GPU", () => false); | |
$r.registerFlag("USE_SETTIMEOUTCUSTOM", () => false); | |
function ir(r, t8) { | |
let e = r; | |
if (Ot(r)) return t8 === "string" ? [] : [r.length]; | |
if (rd(r)) { | |
let n = r.channels || "RGBA"; | |
return [r.height, r.width * n.length]; | |
} else if (od(r)) return [r.buffer.size / (t8 == null ? 4 : jp(t8))]; | |
if (!Array.isArray(r)) return []; | |
let o = []; | |
for (; Array.isArray(e) || Ot(e) && t8 !== "string";) o.push(e.length), e = e[0]; | |
return Array.isArray(r) && A().getBool("TENSORLIKE_CHECK_SHAPE_CONSISTENCY") && rk(r, o, []), o; | |
} | |
function rk(r, t8, e) { | |
if (e = e || [], !Array.isArray(r) && !Ot(r)) { | |
$(t8.length === 0, () => `Element arr[${e.join("][")}] is a primitive, but should be an array/TypedArray of ${t8[0]} elements`); | |
return; | |
} | |
$(t8.length > 0, () => `Element arr[${e.join("][")}] should be a primitive, but is an array of ${r.length} elements`), $(r.length === t8[0], () => `Element arr[${e.join("][")}] should have ${t8[0]} elements, but has ${r.length} elements`); | |
let o = t8.slice(1); | |
for (let n = 0; n < r.length; ++n) rk(r[n], o, e.concat(n)); | |
} | |
function tk(r, t8, e, o) { | |
if (r !== "string_or_numeric") { | |
if (r == null) throw new Error("Expected dtype cannot be null."); | |
if (r !== "numeric" && r !== t8 || r === "numeric" && t8 === "string") throw new Error(`Argument '${e}' passed to '${o}' must be ${r} tensor, but got ${t8} tensor`); | |
} | |
} | |
function v(r, t8, e, o = "numeric") { | |
if (r instanceof ut) return tk(o, r.dtype, t8, e), r; | |
let n = Ri(r); | |
if (n !== "string" && ["bool", "int32", "float32"].indexOf(o) >= 0 && (n = o), tk(o, n, t8, e), r == null || !Ot(r) && !Array.isArray(r) && typeof r != "number" && typeof r != "boolean" && typeof r != "string") { | |
let p = r == null ? "null" : r.constructor.name; | |
throw new Error(`Argument '${t8}' passed to '${e}' must be a Tensor or TensorLike, but got '${p}'`); | |
} | |
let s = ir(r, n); | |
!Ot(r) && !Array.isArray(r) && (r = [r]); | |
let i = n !== "string" ? nc(r, n) : Ps(r, [], true); | |
return T.makeTensor(i, s, n); | |
} | |
function si(r, t8, e, o = "numeric") { | |
if (!Array.isArray(r)) throw new Error(`Argument ${t8} passed to ${e} must be a \`Tensor[]\` or \`TensorLike[]\``); | |
return r.map((s, a) => v(s, `${t8}[${a}]`, e, o)); | |
} | |
var kw = "__op"; | |
function N(r) { | |
let t8 = Object.keys(r); | |
if (t8.length !== 1) throw new Error(`Please provide an object with a single key (operation name) mapping to a function. Got an object with ${t8.length} keys.`); | |
let e = t8[0], | |
o = r[e]; | |
e.endsWith("_") && (e = e.substring(0, e.length - 1)), e = e + kw; | |
let n = (...s) => { | |
T.startScope(e); | |
try { | |
let a = o(...s); | |
return Fu(a) && console.error("Cannot return a Promise inside of tidy."), T.endScope(a), a; | |
} catch (a) { | |
throw T.endScope(null), a; | |
} | |
}; | |
return Object.defineProperty(n, "name", { | |
value: e, | |
configurable: true | |
}), n; | |
} | |
function g4(r, t8) { | |
let e = v(r, "real", "complex"), | |
o = v(t8, "imag", "complex"); | |
yt(e.shape, o.shape, `real and imag shapes, ${e.shape} and ${o.shape}, must match in call to tf.complex().`); | |
let n = { | |
real: e, | |
imag: o | |
}; | |
return T.runKernel(Fi, n); | |
} | |
var Er = N({ | |
complex_: g4 | |
}); | |
function Sr(r, t8, e, o) { | |
if (o == null) o = Ri(r);else if (o === "complex64") throw new Error("Cannot construct a complex64 tensor directly. Please use tf.complex(real, imag)."); | |
if (od(r) || rd(r)) { | |
if (o !== "float32" && o !== "int32") throw new Error(`Creating tensor from GPU data only supports 'float32'|'int32' dtype, while the dtype is ${o}.`); | |
return T.backend.createTensorFromGPUData(r, t8 || e, o); | |
} | |
if (!Ot(r) && !Array.isArray(r) && typeof r != "number" && typeof r != "boolean" && typeof r != "string") throw new Error("values passed to tensor(values) must be a number/boolean/string or an array of numbers/booleans/strings, or a TypedArray"); | |
if (t8 != null) { | |
wt(t8); | |
let n = He(t8), | |
s = He(e); | |
$(n === s, () => `Based on the provided shape, [${t8}], the tensor should have ${n} values but has ${s}`); | |
for (let a = 0; a < e.length; ++a) { | |
let i = e[a], | |
p = a === e.length - 1 ? i !== He(t8.slice(a)) : true; | |
$(e[a] === t8[a] || !p, () => `Error creating a new Tensor. Inferred shape (${e}) does not match the provided shape (${t8}). `); | |
} | |
} | |
return !Ot(r) && !Array.isArray(r) && (r = [r]), t8 = t8 || e, r = o !== "string" ? nc(r, o) : Ps(r, [], true), T.makeTensor(r, t8, o); | |
} | |
function ur(r, t8, e) { | |
let o = ir(r, e); | |
return Sr(r, t8, o, e); | |
} | |
var Il = { | |
float32: 4, | |
float16: 2, | |
int32: 4, | |
uint16: 2, | |
uint8: 1, | |
bool: 1, | |
complex64: 8 | |
}; | |
var jt = class { | |
static join(t8) { | |
return new jt(t8).slice(); | |
} | |
constructor(t8) { | |
if (this.shards = [], this.previousShardIndex = 0, t8 == null || (t8 instanceof Array || (t8 = [t8]), t8 = t8.map(o => Ot(o) ? o.buffer : o), t8.length === 0)) return; | |
this.bufferUniformSize = t8[0].byteLength; | |
let e = 0; | |
for (let o = 0; o < t8.length; o++) { | |
let n = t8[o]; | |
o !== t8.length - 1 && n.byteLength !== this.bufferUniformSize && (this.bufferUniformSize = void 0); | |
let s = e + n.byteLength; | |
this.shards.push({ | |
buffer: n, | |
start: e, | |
end: s | |
}), e = s; | |
} | |
this.shards.length === 0 && (this.byteLength = 0), this.byteLength = this.shards[this.shards.length - 1].end; | |
} | |
slice(t8 = 0, e = this.byteLength) { | |
if (this.shards.length === 0) return new ArrayBuffer(0); | |
if (t8 = isNaN(Number(t8)) ? 0 : t8, e = isNaN(Number(e)) ? 0 : e, t8 = Math.max(0, t8), e = Math.min(this.byteLength, e), e <= t8) return new ArrayBuffer(0); | |
let o = this.findShardForByte(t8); | |
if (o === -1) throw new Error(`Could not find start shard for byte ${t8}`); | |
let n = e - t8, | |
s = new ArrayBuffer(n), | |
a = new Uint8Array(s), | |
i = 0; | |
for (let p = o; p < this.shards.length; p++) { | |
let u = this.shards[p], | |
l = t8 + i - u.start, | |
m = i, | |
f = Math.min(e, u.end) - u.start, | |
h = new Uint8Array(u.buffer, l, f - l); | |
if (a.set(h, m), i += h.length, e < u.end) break; | |
} | |
return s; | |
} | |
findShardForByte(t8) { | |
if (this.shards.length === 0 || t8 < 0 || t8 >= this.byteLength) return -1; | |
if (this.bufferUniformSize != null) return this.previousShardIndex = Math.floor(t8 / this.bufferUniformSize), this.previousShardIndex; | |
function e(n) { | |
return t8 < n.start ? -1 : t8 >= n.end ? 1 : 0; | |
} | |
if (e(this.shards[this.previousShardIndex]) === 0) return this.previousShardIndex; | |
let o = x4(this.shards, e); | |
return o === -1 ? -1 : (this.previousShardIndex = o, this.previousShardIndex); | |
} | |
}; | |
function x4(r, t8) { | |
let e = 0, | |
o = r.length; | |
for (; e <= o;) { | |
let n = Math.floor((o - e) / 2) + e, | |
s = t8(r[n]); | |
if (s === 0) return n; | |
s < 0 ? o = n : e = n + 1; | |
} | |
return -1; | |
} | |
var sd = 4; | |
async function nk(r, t8) { | |
let e = [], | |
o = [], | |
n = Array.isArray(r) ? r.map(a => a.name) : Object.keys(r); | |
for (let a = 0; a < n.length; ++a) { | |
let i = n[a], | |
p = Array.isArray(r) ? r[a].tensor : r[i]; | |
if (p.dtype !== "float32" && p.dtype !== "int32" && p.dtype !== "bool" && p.dtype !== "string" && p.dtype !== "complex64") throw new Error(`Unsupported dtype in weight '${i}': ${p.dtype}`); | |
let u = { | |
name: i, | |
shape: p.shape, | |
dtype: p.dtype | |
}; | |
if (p.dtype === "string") { | |
let c = new Promise(async l => { | |
let m = await p.bytes(), | |
d = m.reduce((g, x) => g + x.length, 0) + sd * m.length, | |
f = new Uint8Array(d), | |
h = 0; | |
for (let g = 0; g < m.length; g++) { | |
let x = m[g], | |
b = new Uint8Array(new Uint32Array([x.length]).buffer); | |
f.set(b, h), h += sd, f.set(x, h), h += x.length; | |
} | |
l(f); | |
}); | |
o.push(c); | |
} else o.push(p.data()); | |
t8 != null && (u.group = t8), e.push(u); | |
} | |
let s = await Promise.all(o); | |
return { | |
data: y4(s), | |
specs: e | |
}; | |
} | |
function ad(r, t8) { | |
let e = new jt(r), | |
o = {}, | |
n, | |
s = 0; | |
for (let a of t8) { | |
let i = a.name, | |
p = a.dtype, | |
u = a.shape, | |
c = He(u), | |
l; | |
if ("quantization" in a) { | |
let m = a.quantization; | |
if (m.dtype === "uint8" || m.dtype === "uint16") { | |
if (!("min" in m && "scale" in m)) throw new Error(`Weight ${a.name} with quantization ${m.dtype} doesn't have corresponding metadata min and scale.`); | |
} else if (m.dtype === "float16") { | |
if (p !== "float32") throw new Error(`Weight ${a.name} is quantized with ${m.dtype} which only supports weights of type float32 not ${p}.`); | |
} else throw new Error(`Weight ${a.name} has unknown quantization dtype ${m.dtype}. Supported quantization dtypes are: 'uint8', 'uint16', and 'float16'.`); | |
let d = Il[m.dtype], | |
f = e.slice(s, s + c * d), | |
h = m.dtype === "uint8" ? new Uint8Array(f) : new Uint16Array(f); | |
if (p === "float32") { | |
if (m.dtype === "uint8" || m.dtype === "uint16") { | |
l = new Float32Array(h.length); | |
for (let g = 0; g < h.length; g++) { | |
let x = h[g]; | |
l[g] = x * m.scale + m.min; | |
} | |
} else if (m.dtype === "float16") n === void 0 && (n = S4()), l = n(h);else throw new Error(`Unsupported quantization type ${m.dtype} for weight type float32.`); | |
} else if (p === "int32") { | |
if (m.dtype !== "uint8" && m.dtype !== "uint16") throw new Error(`Unsupported quantization type ${m.dtype} for weight type int32.`); | |
l = new Int32Array(h.length); | |
for (let g = 0; g < h.length; g++) { | |
let x = h[g]; | |
l[g] = Math.round(x * m.scale + m.min); | |
} | |
} else throw new Error(`Unsupported dtype in weight '${i}': ${p}`); | |
s += c * d; | |
} else if (p === "string") { | |
let m = He(a.shape); | |
l = []; | |
for (let d = 0; d < m; d++) { | |
let f = new Uint32Array(e.slice(s, s + sd))[0]; | |
s += sd; | |
let h = new Uint8Array(e.slice(s, s + f)); | |
l.push(h), s += f; | |
} | |
} else { | |
let m = Il[p], | |
d = e.slice(s, s + c * m); | |
if (p === "float32") l = new Float32Array(d);else if (p === "int32") l = new Int32Array(d);else if (p === "bool") l = new Uint8Array(d);else if (p === "complex64") { | |
l = new Float32Array(d); | |
let f = new Float32Array(l.length / 2), | |
h = new Float32Array(l.length / 2); | |
for (let b = 0; b < f.length; b++) f[b] = l[b * 2], h[b] = l[b * 2 + 1]; | |
let g = ur(f, u, "float32"), | |
x = ur(h, u, "float32"); | |
o[i] = Er(g, x), g.dispose(), x.dispose(); | |
} else throw new Error(`Unsupported dtype in weight '${i}': ${p}`); | |
s += c * m; | |
} | |
p !== "complex64" && (o[i] = ur(l, u, p)); | |
} | |
return o; | |
} | |
function y4(r) { | |
if (r === null) throw new Error(`Invalid input value: ${JSON.stringify(r)}`); | |
let t8 = 0, | |
e = []; | |
r.forEach(s => { | |
if (t8 += s.byteLength, e.push(s.byteLength === s.buffer.byteLength ? s : new s.constructor(s)), !(s instanceof Float32Array || s instanceof Int32Array || s instanceof Uint8Array)) throw new Error(`Unsupported TypedArray subtype: ${s.constructor.name}`); | |
}); | |
let o = new Uint8Array(t8), | |
n = 0; | |
return e.forEach(s => { | |
o.set(new Uint8Array(s.buffer), n), n += s.byteLength; | |
}), o.buffer; | |
} | |
var Nw = typeof Buffer != "undefined" && (typeof Blob == "undefined" || typeof atob == "undefined" || typeof btoa == "undefined"); | |
function ok(r) { | |
return Nw ? Buffer.byteLength(r, "utf8") : new Blob([r]).size; | |
} | |
function sk(r) { | |
if (Nw) return Buffer.from(r).toString("base64"); | |
let t8 = new Uint8Array(r), | |
e = ""; | |
for (let o = 0, n = t8.length; o < n; o++) e += String.fromCharCode(t8[o]); | |
return btoa(e); | |
} | |
function ak(r) { | |
if (Nw) { | |
let o = Buffer.from(r, "base64"); | |
return o.buffer.slice(o.byteOffset, o.byteOffset + o.byteLength); | |
} | |
let t8 = atob(r), | |
e = new Uint8Array(t8.length); | |
for (let o = 0; o < t8.length; ++o) e.set([t8.charCodeAt(o)], o); | |
return e.buffer; | |
} | |
function ik(r) { | |
return jt.join(r); | |
} | |
function Tw(r) { | |
let t8 = "/"; | |
for (r = r.trim(); r.endsWith(t8);) r = r.slice(0, r.length - 1); | |
let e = r.split(t8); | |
return e[e.length - 1]; | |
} | |
function id(r, t8) { | |
let e = { | |
modelTopology: r.modelTopology, | |
format: r.format, | |
generatedBy: r.generatedBy, | |
convertedBy: r.convertedBy, | |
weightsManifest: t8 | |
}; | |
return r.signature != null && (e.signature = r.signature), r.userDefinedMetadata != null && (e.userDefinedMetadata = r.userDefinedMetadata), r.modelInitializer != null && (e.modelInitializer = r.modelInitializer), r.initializerSignature != null && (e.initializerSignature = r.initializerSignature), r.trainingConfig != null && (e.trainingConfig = r.trainingConfig), e; | |
} | |
function _w(r, t8, e) { | |
let o = { | |
modelTopology: r.modelTopology, | |
format: r.format, | |
generatedBy: r.generatedBy, | |
convertedBy: r.convertedBy | |
}; | |
if (r.trainingConfig != null && (o.trainingConfig = r.trainingConfig), r.weightsManifest != null) { | |
if (!t8) throw new Error("modelJSON has weightsManifest but weightSpecs is null"); | |
if (!e) throw new Error("modelJSON has weightsManifest but weightData is null"); | |
o.weightSpecs = t8, o.weightData = e; | |
} | |
return r.signature != null && (o.signature = r.signature), r.userDefinedMetadata != null && (o.userDefinedMetadata = r.userDefinedMetadata), r.modelInitializer != null && (o.modelInitializer = r.modelInitializer), r.initializerSignature != null && (o.initializerSignature = r.initializerSignature), o; | |
} | |
async function ic(r, t8) { | |
let e, o; | |
return r.weightsManifest != null && ([e, o] = await t8(r.weightsManifest)), _w(r, e, o); | |
} | |
function va(r) { | |
if (r.modelTopology instanceof ArrayBuffer) throw new Error("Expected JSON model topology, received ArrayBuffer."); | |
return { | |
dateSaved: /* @__PURE__ */new Date(), | |
modelTopologyType: "JSON", | |
modelTopologyBytes: r.modelTopology == null ? 0 : ok(JSON.stringify(r.modelTopology)), | |
weightSpecsBytes: r.weightSpecs == null ? 0 : ok(JSON.stringify(r.weightSpecs)), | |
weightDataBytes: r.weightData == null ? 0 : new jt(r.weightData).byteLength | |
}; | |
} | |
function ud(r) { | |
let t8 = []; | |
for (let e of r) t8.push(...e.weights); | |
return t8; | |
} | |
function b4() { | |
let r = e => { | |
let o = e << 13, | |
n = 0; | |
for (; !(o & 8388608);) n -= 8388608, o <<= 1; | |
return o &= -8388609, n += 947912704, o | n; | |
}, | |
t8 = new Uint32Array(2048); | |
t8[0] = 0; | |
for (let e = 1; e < 1024; e++) t8[e] = r(e); | |
for (let e = 1024; e < 2048; e++) t8[e] = 939524096 + (e - 1024 << 13); | |
return t8; | |
} | |
function C4() { | |
let r = new Uint32Array(64); | |
r[0] = 0, r[31] = 1199570944, r[32] = 2147483648, r[63] = 3347054592; | |
for (let t8 = 1; t8 < 31; t8++) r[t8] = t8 << 23; | |
for (let t8 = 33; t8 < 63; t8++) r[t8] = 2147483648 + (t8 - 32 << 23); | |
return r; | |
} | |
function w4() { | |
let r = new Uint32Array(64); | |
for (let t8 = 0; t8 < 64; t8++) r[t8] = 1024; | |
return r[0] = r[32] = 0, r; | |
} | |
function S4() { | |
let r = b4(), | |
t8 = C4(), | |
e = w4(); | |
return o => { | |
let n = new ArrayBuffer(4 * o.length), | |
s = new Uint32Array(n); | |
for (let a = 0; a < o.length; a++) { | |
let i = o[a], | |
p = r[e[i >> 10] + (i & 1023)] + t8[i >> 10]; | |
s[a] = p; | |
} | |
return new Float32Array(n); | |
}; | |
} | |
var ft = class { | |
constructor() { | |
this.saveRouters = [], this.loadRouters = []; | |
} | |
static getInstance() { | |
return ft.instance == null && (ft.instance = new ft()), ft.instance; | |
} | |
static registerSaveRouter(t8) { | |
ft.getInstance().saveRouters.push(t8); | |
} | |
static registerLoadRouter(t8) { | |
ft.getInstance().loadRouters.push(t8); | |
} | |
static getSaveHandlers(t8) { | |
return ft.getHandlers(t8, "save"); | |
} | |
static getLoadHandlers(t8, e) { | |
return ft.getHandlers(t8, "load", e); | |
} | |
static getHandlers(t8, e, o) { | |
let n = []; | |
return (e === "load" ? ft.getInstance().loadRouters : ft.getInstance().saveRouters).forEach(a => { | |
let i = a(t8, o); | |
i !== null && n.push(i); | |
}), n; | |
} | |
}; | |
var uk = r => ft.registerSaveRouter(r); | |
var pk = r => ft.registerLoadRouter(r); | |
var ck = r => ft.getSaveHandlers(r); | |
var lk = (r, t8) => ft.getLoadHandlers(r, t8); | |
var $w = "tensorflowjs"; | |
var Ew = 1; | |
var Uu = "models_store"; | |
var nu = "model_info_store"; | |
function mk() { | |
if (!A().getBool("IS_BROWSER")) throw new Error("Failed to obtain IndexedDB factory because the current environmentis not a web browser."); | |
let r = typeof window == "undefined" ? self : window, | |
t8 = r.indexedDB || r.mozIndexedDB || r.webkitIndexedDB || r.msIndexedDB || r.shimIndexedDB; | |
if (t8 == null) throw new Error("The current browser does not appear to support IndexedDB."); | |
return t8; | |
} | |
function Rw(r) { | |
let t8 = r.result; | |
t8.createObjectStore(Uu, { | |
keyPath: "modelPath" | |
}), t8.createObjectStore(nu, { | |
keyPath: "modelPath" | |
}); | |
} | |
var ka = class { | |
constructor(t8) { | |
if (this.indexedDB = mk(), t8 == null || !t8) throw new Error("For IndexedDB, modelPath must not be null, undefined or empty."); | |
this.modelPath = t8; | |
} | |
async save(t8) { | |
if (t8.modelTopology instanceof ArrayBuffer) throw new Error("BrowserLocalStorage.save() does not support saving model topology in binary formats yet."); | |
return this.databaseAction(this.modelPath, t8); | |
} | |
async load() { | |
return this.databaseAction(this.modelPath); | |
} | |
databaseAction(t8, e) { | |
return new Promise((o, n) => { | |
let s = this.indexedDB.open($w, Ew); | |
s.onupgradeneeded = () => Rw(s), s.onsuccess = () => { | |
let a = s.result; | |
if (e == null) { | |
let i = a.transaction(Uu, "readonly"), | |
u = i.objectStore(Uu).get(this.modelPath); | |
u.onsuccess = () => { | |
if (u.result == null) return a.close(), n(new Error(`Cannot find model with path '${this.modelPath}' in IndexedDB.`)); | |
o(u.result.modelArtifacts); | |
}, u.onerror = c => (a.close(), n(u.error)), i.oncomplete = () => a.close(); | |
} else { | |
e.weightData = jt.join(e.weightData); | |
let i = va(e), | |
p = a.transaction(nu, "readwrite"), | |
u = p.objectStore(nu), | |
c; | |
try { | |
c = u.put({ | |
modelPath: this.modelPath, | |
modelArtifactsInfo: i | |
}); | |
} catch (m) { | |
return n(m); | |
} | |
let l; | |
c.onsuccess = () => { | |
l = a.transaction(Uu, "readwrite"); | |
let m = l.objectStore(Uu), | |
d; | |
try { | |
d = m.put({ | |
modelPath: this.modelPath, | |
modelArtifacts: e, | |
modelArtifactsInfo: i | |
}); | |
} catch (f) { | |
return n(f); | |
} | |
d.onsuccess = () => o({ | |
modelArtifactsInfo: i | |
}), d.onerror = f => { | |
u = p.objectStore(nu); | |
let h = u.delete(this.modelPath); | |
h.onsuccess = () => (a.close(), n(d.error)), h.onerror = g => (a.close(), n(d.error)); | |
}; | |
}, c.onerror = m => (a.close(), n(c.error)), p.oncomplete = () => { | |
l == null ? a.close() : l.oncomplete = () => a.close(); | |
}; | |
} | |
}, s.onerror = a => n(s.error); | |
}); | |
} | |
}; | |
ka.URL_SCHEME = "indexeddb://"; | |
var dk = r => A().getBool("IS_BROWSER") && !Array.isArray(r) && r.startsWith(ka.URL_SCHEME) ? I4(r.slice(ka.URL_SCHEME.length)) : null; | |
ft.registerSaveRouter(dk); | |
ft.registerLoadRouter(dk); | |
function I4(r) { | |
return new ka(r); | |
} | |
function v4(r) { | |
return r.startsWith(ka.URL_SCHEME) ? r.slice(ka.URL_SCHEME.length) : r; | |
} | |
var pd = class { | |
constructor() { | |
this.indexedDB = mk(); | |
} | |
async listModels() { | |
return new Promise((t8, e) => { | |
let o = this.indexedDB.open($w, Ew); | |
o.onupgradeneeded = () => Rw(o), o.onsuccess = () => { | |
let n = o.result, | |
s = n.transaction(nu, "readonly"), | |
i = s.objectStore(nu).getAll(); | |
i.onsuccess = () => { | |
let p = {}; | |
for (let u of i.result) p[u.modelPath] = u.modelArtifactsInfo; | |
t8(p); | |
}, i.onerror = p => (n.close(), e(i.error)), s.oncomplete = () => n.close(); | |
}, o.onerror = n => e(o.error); | |
}); | |
} | |
async removeModel(t8) { | |
return t8 = v4(t8), new Promise((e, o) => { | |
let n = this.indexedDB.open($w, Ew); | |
n.onupgradeneeded = () => Rw(n), n.onsuccess = () => { | |
let s = n.result, | |
a = s.transaction(nu, "readwrite"), | |
i = a.objectStore(nu), | |
p = i.get(t8), | |
u; | |
p.onsuccess = () => { | |
if (p.result == null) return s.close(), o(new Error(`Cannot find model with path '${t8}' in IndexedDB.`)); | |
{ | |
let c = i.delete(t8), | |
l = () => { | |
u = s.transaction(Uu, "readwrite"); | |
let d = u.objectStore(Uu).delete(t8); | |
d.onsuccess = () => e(p.result.modelArtifactsInfo), d.onerror = f => o(p.error); | |
}; | |
c.onsuccess = l, c.onerror = m => (l(), s.close(), o(p.error)); | |
} | |
}, p.onerror = c => (s.close(), o(p.error)), a.oncomplete = () => { | |
u == null ? s.close() : u.oncomplete = () => s.close(); | |
}; | |
}, n.onerror = s => o(n.error); | |
}); | |
} | |
}; | |
var ai = "/"; | |
var uc = "tensorflowjs_models"; | |
var fk = "info"; | |
var k4 = "model_topology"; | |
var N4 = "weight_specs"; | |
var T4 = "weight_data"; | |
var _4 = "model_metadata"; | |
function hk(r) { | |
return { | |
info: [uc, r, fk].join(ai), | |
topology: [uc, r, k4].join(ai), | |
weightSpecs: [uc, r, N4].join(ai), | |
weightData: [uc, r, T4].join(ai), | |
modelMetadata: [uc, r, _4].join(ai) | |
}; | |
} | |
function gk(r) { | |
for (let t8 of Object.values(r)) window.localStorage.removeItem(t8); | |
} | |
function $4(r) { | |
let t8 = r.split(ai); | |
if (t8.length < 3) throw new Error(`Invalid key format: ${r}`); | |
return t8.slice(1, t8.length - 1).join(ai); | |
} | |
function E4(r) { | |
return r.startsWith(Na.URL_SCHEME) ? r.slice(Na.URL_SCHEME.length) : r; | |
} | |
var Na = class { | |
constructor(t8) { | |
if (!A().getBool("IS_BROWSER") || typeof window == "undefined" || typeof window.localStorage == "undefined") throw new Error("The current environment does not support local storage."); | |
if (this.LS = window.localStorage, t8 == null || !t8) throw new Error("For local storage, modelPath must not be null, undefined or empty."); | |
this.modelPath = t8, this.keys = hk(this.modelPath); | |
} | |
async save(t8) { | |
if (t8.modelTopology instanceof ArrayBuffer) throw new Error("BrowserLocalStorage.save() does not support saving model topology in binary formats yet."); | |
{ | |
let e = JSON.stringify(t8.modelTopology), | |
o = JSON.stringify(t8.weightSpecs), | |
n = va(t8), | |
s = jt.join(t8.weightData); | |
try { | |
this.LS.setItem(this.keys.info, JSON.stringify(n)), this.LS.setItem(this.keys.topology, e), this.LS.setItem(this.keys.weightSpecs, o), this.LS.setItem(this.keys.weightData, sk(s)); | |
let a = { | |
format: t8.format, | |
generatedBy: t8.generatedBy, | |
convertedBy: t8.convertedBy, | |
signature: t8.signature != null ? t8.signature : void 0, | |
userDefinedMetadata: t8.userDefinedMetadata != null ? t8.userDefinedMetadata : void 0, | |
modelInitializer: t8.modelInitializer != null ? t8.modelInitializer : void 0, | |
initializerSignature: t8.initializerSignature != null ? t8.initializerSignature : void 0, | |
trainingConfig: t8.trainingConfig != null ? t8.trainingConfig : void 0 | |
}; | |
return this.LS.setItem(this.keys.modelMetadata, JSON.stringify(a)), { | |
modelArtifactsInfo: n | |
}; | |
} catch (a) { | |
throw gk(this.keys), new Error(`Failed to save model '${this.modelPath}' to local storage: size quota being exceeded is a possible cause of this failure: modelTopologyBytes=${n.modelTopologyBytes}, weightSpecsBytes=${n.weightSpecsBytes}, weightDataBytes=${n.weightDataBytes}.`); | |
} | |
} | |
} | |
async load() { | |
let t8 = JSON.parse(this.LS.getItem(this.keys.info)); | |
if (t8 == null) throw new Error(`In local storage, there is no model with name '${this.modelPath}'`); | |
if (t8.modelTopologyType !== "JSON") throw new Error("BrowserLocalStorage does not support loading non-JSON model topology yet."); | |
let e = {}, | |
o = JSON.parse(this.LS.getItem(this.keys.topology)); | |
if (o == null) throw new Error(`In local storage, the topology of model '${this.modelPath}' is missing.`); | |
e.modelTopology = o; | |
let n = JSON.parse(this.LS.getItem(this.keys.weightSpecs)); | |
if (n == null) throw new Error(`In local storage, the weight specs of model '${this.modelPath}' are missing.`); | |
e.weightSpecs = n; | |
let s = this.LS.getItem(this.keys.modelMetadata); | |
if (s != null) { | |
let i = JSON.parse(s); | |
e.format = i.format, e.generatedBy = i.generatedBy, e.convertedBy = i.convertedBy, i.signature != null && (e.signature = i.signature), i.userDefinedMetadata != null && (e.userDefinedMetadata = i.userDefinedMetadata), i.modelInitializer != null && (e.modelInitializer = i.modelInitializer), i.initializerSignature != null && (e.initializerSignature = i.initializerSignature), i.trainingConfig != null && (e.trainingConfig = i.trainingConfig); | |
} | |
let a = this.LS.getItem(this.keys.weightData); | |
if (a == null) throw new Error(`In local storage, the binary weight values of model '${this.modelPath}' are missing.`); | |
return e.weightData = ak(a), e; | |
} | |
}; | |
Na.URL_SCHEME = "localstorage://"; | |
var xk = r => A().getBool("IS_BROWSER") && !Array.isArray(r) && r.startsWith(Na.URL_SCHEME) ? R4(r.slice(Na.URL_SCHEME.length)) : null; | |
ft.registerSaveRouter(xk); | |
ft.registerLoadRouter(xk); | |
function R4(r) { | |
return new Na(r); | |
} | |
var cd = class { | |
constructor() { | |
$(A().getBool("IS_BROWSER"), () => "Current environment is not a web browser"), $(typeof window == "undefined" || typeof window.localStorage != "undefined", () => "Current browser does not appear to support localStorage"), this.LS = window.localStorage; | |
} | |
async listModels() { | |
let t8 = {}, | |
e = uc + ai, | |
o = ai + fk; | |
for (let n = 0; n < this.LS.length; ++n) { | |
let s = this.LS.key(n); | |
if (s.startsWith(e) && s.endsWith(o)) { | |
let a = $4(s); | |
t8[a] = JSON.parse(this.LS.getItem(s)); | |
} | |
} | |
return t8; | |
} | |
async removeModel(t8) { | |
t8 = E4(t8); | |
let e = hk(t8); | |
if (this.LS.getItem(e.info) == null) throw new Error(`Cannot find model at path '${t8}'`); | |
let o = JSON.parse(this.LS.getItem(e.info)); | |
return gk(e), o; | |
} | |
}; | |
var pc = "://"; | |
var Zt = class { | |
constructor() { | |
this.managers = {}; | |
} | |
static getInstance() { | |
return Zt.instance == null && (Zt.instance = new Zt()), Zt.instance; | |
} | |
static registerManager(t8, e) { | |
$(t8 != null, () => "scheme must not be undefined or null."), t8.endsWith(pc) && (t8 = t8.slice(0, t8.indexOf(pc))), $(t8.length > 0, () => "scheme must not be an empty string."); | |
let o = Zt.getInstance(); | |
$(o.managers[t8] == null, () => `A model store manager is already registered for scheme '${t8}'.`), o.managers[t8] = e; | |
} | |
static getManager(t8) { | |
let e = Zt.getInstance().managers[t8]; | |
if (e == null) throw new Error(`Cannot find model manager for scheme '${t8}'`); | |
return e; | |
} | |
static getSchemes() { | |
return Object.keys(Zt.getInstance().managers); | |
} | |
}; | |
function ld(r) { | |
if (r.indexOf(pc) === -1) throw new Error(`The url string provided does not contain a scheme. Supported schemes are: ${Zt.getSchemes().join(",")}`); | |
return { | |
scheme: r.split(pc)[0], | |
path: r.split(pc)[1] | |
}; | |
} | |
async function yk(r, t8, e = false) { | |
$(r !== t8, () => `Old path and new path are the same: '${r}'`); | |
let o = ft.getLoadHandlers(r); | |
$(o.length > 0, () => `Copying failed because no load handler is found for source URL ${r}.`), $(o.length < 2, () => `Copying failed because more than one (${o.length}) load handlers for source URL ${r}.`); | |
let n = o[0], | |
s = ft.getSaveHandlers(t8); | |
$(s.length > 0, () => `Copying failed because no save handler is found for destination URL ${t8}.`), $(s.length < 2, () => `Copying failed because more than one (${o.length}) save handlers for destination URL ${t8}.`); | |
let a = s[0], | |
i = ld(r).scheme, | |
p = ld(r).path, | |
u = i === ld(r).scheme, | |
c = await n.load(); | |
e && u && (await Zt.getManager(i).removeModel(p)); | |
let l = await a.save(c); | |
return e && !u && (await Zt.getManager(i).removeModel(p)), l.modelArtifactsInfo; | |
} | |
async function bk() { | |
let r = Zt.getSchemes(), | |
t8 = {}; | |
for (let e of r) { | |
let o = await Zt.getManager(e).listModels(); | |
for (let n in o) { | |
let s = e + pc + n; | |
t8[s] = o[n]; | |
} | |
} | |
return t8; | |
} | |
async function Ck(r) { | |
let t8 = ld(r); | |
return Zt.getManager(t8.scheme).removeModel(t8.path); | |
} | |
async function wk(r, t8) { | |
return yk(r, t8, false); | |
} | |
async function Sk(r, t8) { | |
return yk(r, t8, true); | |
} | |
var Dw = class { | |
constructor() { | |
this.messageName = "setTimeoutCustom", this.functionRefs = [], this.handledMessageCount = 0, this.hasEventListener = false; | |
} | |
fetch(t8, e) { | |
return fetch(t8, e); | |
} | |
now() { | |
return performance.now(); | |
} | |
encode(t8, e) { | |
if (e !== "utf-8" && e !== "utf8") throw new Error(`Browser's encoder only supports utf-8, but got ${e}`); | |
return this.textEncoder == null && (this.textEncoder = new TextEncoder()), this.textEncoder.encode(t8); | |
} | |
decode(t8, e) { | |
return new TextDecoder(e).decode(t8); | |
} | |
setTimeoutCustom(t8, e) { | |
if (typeof window == "undefined" || !A().getBool("USE_SETTIMEOUTCUSTOM")) { | |
setTimeout(t8, e); | |
return; | |
} | |
this.functionRefs.push(t8), setTimeout(() => { | |
window.postMessage({ | |
name: this.messageName, | |
index: this.functionRefs.length - 1 | |
}, "*"); | |
}, e), this.hasEventListener || (this.hasEventListener = true, window.addEventListener("message", o => { | |
if (o.source === window && o.data.name === this.messageName) { | |
o.stopPropagation(); | |
let n = this.functionRefs[o.data.index]; | |
n(), this.handledMessageCount++, this.handledMessageCount === this.functionRefs.length && (this.functionRefs = [], this.handledMessageCount = 0); | |
} | |
}, true)); | |
} | |
isTypedArray(t8) { | |
return Qm(t8); | |
} | |
}; | |
if (A().get("IS_BROWSER")) { | |
A().setPlatform("browser", new Dw()); | |
try { | |
Zt.registerManager(Na.URL_SCHEME, new cd()); | |
} catch (r) {} | |
try { | |
Zt.registerManager(ka.URL_SCHEME, new pd()); | |
} catch (r) {} | |
} | |
var D4 = { | |
importFetch: () => Ik() | |
}; | |
var Aw; | |
var Fw = class { | |
constructor() { | |
this.util = vk(), this.textEncoder = new this.util.TextEncoder(); | |
} | |
fetch(t8, e) { | |
return A().global.fetch != null ? A().global.fetch(t8, e) : (Aw == null && (Aw = D4.importFetch()), Aw(t8, e)); | |
} | |
now() { | |
let t8 = process.hrtime(); | |
return t8[0] * 1e3 + t8[1] / 1e6; | |
} | |
encode(t8, e) { | |
if (e !== "utf-8" && e !== "utf8") throw new Error(`Node built-in encoder only supports utf-8, but got ${e}`); | |
return this.textEncoder.encode(t8); | |
} | |
decode(t8, e) { | |
return t8.length === 0 ? "" : new this.util.TextDecoder(e).decode(t8); | |
} | |
isTypedArray(t8) { | |
return this.util.types.isFloat32Array(t8) || this.util.types.isInt32Array(t8) || this.util.types.isUint8Array(t8) || this.util.types.isUint8ClampedArray(t8); | |
} | |
}; | |
A().get("IS_NODE") && !A().get("IS_BROWSER") && A().setPlatform("node", new Fw()); | |
function me(r, t8 = "float32", e) { | |
return t8 = t8 || "float32", wt(r), new tt(r, t8, e); | |
} | |
function A4(r, t8) { | |
let e = v(r, "x", "cast"); | |
if (!rw(t8)) throw new Error(`Failed to cast to unknown dtype ${t8}`); | |
if (t8 === "string" && e.dtype !== "string" || t8 !== "string" && e.dtype === "string") throw new Error("Only strings can be casted to strings"); | |
let o = { | |
x: e | |
}, | |
n = { | |
dtype: t8 | |
}; | |
return T.runKernel(bo, o, n); | |
} | |
var We = N({ | |
cast_: A4 | |
}); | |
function F4(r) { | |
let e = { | |
x: v(r, "x", "clone", "string_or_numeric") | |
}; | |
return T.runKernel(wo, e); | |
} | |
var Ur = N({ | |
clone_: F4 | |
}); | |
function md(r, t8 = false) { | |
console.log(r.toString(t8)); | |
} | |
Sw(); | |
var P4 = { | |
buffer: me, | |
cast: We, | |
clone: Ur, | |
print: md | |
}; | |
Q0(P4); | |
function lde() { | |
A().set("PROD", true); | |
} | |
function mde() { | |
A().set("DEBUG", true); | |
} | |
function dde() { | |
A().set("DEPRECATION_WARNINGS_ENABLED", false), console.warn("TensorFlow.js deprecation warnings have been disabled."); | |
} | |
function Pw(r) { | |
A().getBool("DEPRECATION_WARNINGS_ENABLED") && console.warn(r + " You can disable deprecation warnings with tf.disableDeprecationWarnings()."); | |
} | |
Z0(Pw); | |
function fde() { | |
T.disposeVariables(); | |
} | |
function pr() { | |
return T; | |
} | |
function hde() { | |
return T.memory(); | |
} | |
function gde(r) { | |
return T.profile(r); | |
} | |
function De(r, t8) { | |
return T.tidy(r, t8); | |
} | |
function Mt(r) { | |
Sl(r).forEach(e => e.dispose()); | |
} | |
function Rr(r) { | |
return T.keep(r); | |
} | |
function xde(r) { | |
return T.time(r); | |
} | |
function yde(r) { | |
return T.setBackend(r); | |
} | |
function bde() { | |
return T.ready(); | |
} | |
function Cde() { | |
return T.backendName; | |
} | |
function wde(r) { | |
T.removeBackend(r); | |
} | |
function Sde(r) { | |
return T.findBackend(r); | |
} | |
function Ide(r) { | |
return T.findBackendFactory(r); | |
} | |
function su(r, t8, e = 1) { | |
return T.registerBackend(r, t8, e); | |
} | |
function vde() { | |
return T.backend; | |
} | |
function kde(r, t8) { | |
A().setPlatform(r, t8); | |
} | |
function O4(r, t8) { | |
let e = v(r, "a", "add"), | |
o = v(t8, "b", "add"); | |
[e, o] = Oe(e, o); | |
let n = { | |
a: e, | |
b: o | |
}; | |
return T.runKernel(uo, n); | |
} | |
var Ce = N({ | |
add_: O4 | |
}); | |
function M4(r, t8) { | |
let e = v(r, "a", "floorDiv"), | |
o = v(t8, "b", "floorDiv"); | |
[e, o] = Oe(e, o); | |
let n = { | |
a: e, | |
b: o | |
}; | |
return T.runKernel(In, n); | |
} | |
var dd = N({ | |
floorDiv_: M4 | |
}); | |
function L4(r, t8) { | |
let e = v(r, "a", "div"), | |
o = v(t8, "b", "div"); | |
if ([e, o] = Oe(e, o), e.dtype === "int32" && o.dtype === "int32") return dd(e, o); | |
let n = { | |
a: e, | |
b: o | |
}, | |
s = {}; | |
return T.runKernel(hn, n, s); | |
} | |
var je = N({ | |
div_: L4 | |
}); | |
function B4(r, t8) { | |
let e = v(r, "a", "mul"), | |
o = v(t8, "b", "mul"); | |
[e, o] = Oe(e, o); | |
let n = { | |
a: e, | |
b: o | |
}; | |
return T.runKernel(Yn, n); | |
} | |
var se = N({ | |
mul_: B4 | |
}); | |
function z4(r) { | |
let t8 = v(r, "x", "abs"); | |
if (t8.dtype === "complex64") { | |
let e = { | |
x: t8 | |
}; | |
return T.runKernel(Pi, e); | |
} else { | |
let e = { | |
x: t8 | |
}; | |
return T.runKernel(Xs, e); | |
} | |
} | |
var Jt = N({ | |
abs_: z4 | |
}); | |
function V4(r) { | |
let e = { | |
x: v(r, "x", "acos") | |
}; | |
return T.runKernel(Wo, e); | |
} | |
var kk = N({ | |
acos_: V4 | |
}); | |
function W4(r) { | |
let e = { | |
x: v(r, "x", "acosh") | |
}; | |
return T.runKernel(Uo, e); | |
} | |
var Nk = N({ | |
acosh_: W4 | |
}); | |
function U4(r) { | |
$(Array.isArray(r), () => "The argument passed to tf.addN() must be a list of tensors"), $(r.length >= 1, () => `Must pass at least one tensor to tf.addN(), but got ${r.length}`); | |
let t8 = r.map((n, s) => v(n, `tensors${s}`, "addN")), | |
e = t8[0]; | |
t8.forEach(n => { | |
if (n.dtype !== e.dtype) throw new Error("All tensors passed to tf.addN() must have the same dtype"); | |
}), t8.forEach(n => { | |
if (!Cr(n.shape, e.shape)) throw new Error("All tensors passed to tf.addN() must have the same shape"); | |
}); | |
let o = t8; | |
return T.runKernel(Go, o); | |
} | |
var Tk = N({ | |
addN_: U4 | |
}); | |
function G4(r, t8 = null, e = false) { | |
let n = { | |
x: v(r, "x", "all", "bool") | |
}, | |
s = { | |
axis: t8, | |
keepDims: e | |
}; | |
return T.runKernel(Ho, n, s); | |
} | |
var _k = N({ | |
all_: G4 | |
}); | |
function H4(r, t8 = null, e = false) { | |
let n = { | |
x: v(r, "x", "any", "bool") | |
}, | |
s = { | |
axis: t8, | |
keepDims: e | |
}; | |
return T.runKernel(Ko, n, s); | |
} | |
var $k = N({ | |
any_: H4 | |
}); | |
function K4(r, t8 = 0) { | |
let o = { | |
x: v(r, "x", "argMax") | |
}, | |
n = { | |
axis: t8 | |
}; | |
return T.runKernel(Ys, o, n); | |
} | |
var Ek = N({ | |
argMax_: K4 | |
}); | |
function q4(r, t8 = 0) { | |
let o = { | |
x: v(r, "x", "argMin") | |
}, | |
n = { | |
axis: t8 | |
}; | |
return T.runKernel(Qs, o, n); | |
} | |
var Rk = N({ | |
argMin_: q4 | |
}); | |
function j4(r) { | |
let e = { | |
x: v(r, "x", "asin") | |
}; | |
return T.runKernel(qo, e); | |
} | |
var Dk = N({ | |
asin_: j4 | |
}); | |
function X4(r) { | |
let e = { | |
x: v(r, "x", "asinh") | |
}; | |
return T.runKernel(jo, e); | |
} | |
var Ak = N({ | |
asinh_: X4 | |
}); | |
function Y4(r) { | |
let e = { | |
x: v(r, "x", "atan") | |
}; | |
return T.runKernel(Xo, e); | |
} | |
var Fk = N({ | |
atan_: Y4 | |
}); | |
function Q4(r, t8) { | |
let e = v(r, "a", "atan2"), | |
o = v(t8, "b", "atan2"); | |
[e, o] = Oe(e, o); | |
let n = { | |
a: e, | |
b: o | |
}; | |
return T.runKernel(Qo, n); | |
} | |
var Pk = N({ | |
atan2_: Q4 | |
}); | |
function Z4(r) { | |
let e = { | |
x: v(r, "x", "atanh") | |
}; | |
return T.runKernel(Yo, e); | |
} | |
var Ok = N({ | |
atanh_: Z4 | |
}); | |
function J4(r, t8, e, o, n = "NHWC", s) { | |
let a = r[3], | |
i = [...t8, a], | |
p = Lk(n); | |
return Hu(r, i, e, s, o, null, null, p); | |
} | |
function Mw(r, t8, e, o, n, s, a = "channelsLast") { | |
let [i, p] = vl(t8), | |
u; | |
if (a === "channelsLast") u = [i, p, r[3], r[3]];else if (a === "channelsFirst") u = [i, p, r[1], r[1]];else throw new Error(`Unknown dataFormat ${a}`); | |
return Hu(r, u, e, o, n, s, false, a); | |
} | |
function eH(r, t8, e, o, n, s, a = "NDHWC") { | |
let [i, p, u] = Ow(t8), | |
c, | |
l; | |
if (a === "NDHWC") l = "channelsLast", c = [i, p, u, r[4], r[4]];else if (a === "NCDHW") l = "channelsFirst", c = [i, p, u, r[1], r[1]];else throw new Error(`Unknown dataFormat ${a}`); | |
return Mk(r, c, e, o, n, false, l, s); | |
} | |
function Hu(r, t8, e, o, n, s, a = false, i = "channelsLast") { | |
let [p, u, c, l] = [-1, -1, -1, -1]; | |
if (i === "channelsLast") [p, u, c, l] = r;else if (i === "channelsFirst") [p, l, u, c] = r;else throw new Error(`Unknown dataFormat ${i}`); | |
let [m, d,, f] = t8, | |
[h, g] = vl(e), | |
[x, b] = vl(o), | |
C = cc(m, x), | |
S = cc(d, b), | |
{ | |
padInfo: k, | |
outHeight: _, | |
outWidth: E | |
} = oH(n, u, c, h, g, C, S, s, i), | |
R = a ? f * l : f, | |
D; | |
return i === "channelsFirst" ? D = [p, R, _, E] : i === "channelsLast" && (D = [p, _, E, R]), { | |
batchSize: p, | |
dataFormat: i, | |
inHeight: u, | |
inWidth: c, | |
inChannels: l, | |
outHeight: _, | |
outWidth: E, | |
outChannels: R, | |
padInfo: k, | |
strideHeight: h, | |
strideWidth: g, | |
filterHeight: m, | |
filterWidth: d, | |
effectiveFilterHeight: C, | |
effectiveFilterWidth: S, | |
dilationHeight: x, | |
dilationWidth: b, | |
inShape: r, | |
outShape: D, | |
filterShape: t8 | |
}; | |
} | |
function Mk(r, t8, e, o, n, s = false, a = "channelsLast", i) { | |
let [p, u, c, l, m] = [-1, -1, -1, -1, -1]; | |
if (a === "channelsLast") [p, u, c, l, m] = r;else if (a === "channelsFirst") [p, m, u, c, l] = r;else throw new Error(`Unknown dataFormat ${a}`); | |
let [d, f, h,, g] = t8, | |
[x, b, C] = Ow(e), | |
[S, k, _] = Ow(o), | |
E = cc(d, S), | |
R = cc(f, k), | |
D = cc(h, _), | |
{ | |
padInfo: P, | |
outDepth: O, | |
outHeight: M, | |
outWidth: L | |
} = nH(n, u, c, l, x, b, C, E, R, D, i), | |
B = s ? g * m : g, | |
z; | |
return a === "channelsFirst" ? z = [p, B, O, M, L] : a === "channelsLast" && (z = [p, O, M, L, B]), { | |
batchSize: p, | |
dataFormat: a, | |
inDepth: u, | |
inHeight: c, | |
inWidth: l, | |
inChannels: m, | |
outDepth: O, | |
outHeight: M, | |
outWidth: L, | |
outChannels: B, | |
padInfo: P, | |
strideDepth: x, | |
strideHeight: b, | |
strideWidth: C, | |
filterDepth: d, | |
filterHeight: f, | |
filterWidth: h, | |
effectiveFilterDepth: E, | |
effectiveFilterHeight: R, | |
effectiveFilterWidth: D, | |
dilationDepth: S, | |
dilationHeight: k, | |
dilationWidth: _, | |
inShape: r, | |
outShape: z, | |
filterShape: t8 | |
}; | |
} | |
function tH(r, t8, e, o, n) { | |
o == null && (o = Lw(r, t8, e)); | |
let s = r[0], | |
a = r[1], | |
i = kl((s - t8 + 2 * o) / e + 1, n), | |
p = kl((a - t8 + 2 * o) / e + 1, n); | |
return [i, p]; | |
} | |
function rH(r, t8, e, o, n, s) { | |
n == null && (n = Lw(r, t8[0], o[0])); | |
let a = [0, 0, 0, e]; | |
for (let i = 0; i < 3; i++) r[i] + 2 * n >= t8[i] && (a[i] = kl((r[i] - t8[i] + 2 * n) / o[i] + 1, s)); | |
return a; | |
} | |
function Lw(r, t8, e, o = 1) { | |
let n = cc(t8, o); | |
return Math.floor((r[0] * (e - 1) - e + n) / 2); | |
} | |
function vl(r) { | |
return typeof r == "number" ? [r, r, r] : r.length === 2 ? [r[0], r[1], 1] : r; | |
} | |
function Ow(r) { | |
return typeof r == "number" ? [r, r, r] : r; | |
} | |
function cc(r, t8) { | |
return t8 <= 1 ? r : r + (r - 1) * (t8 - 1); | |
} | |
function oH(r, t8, e, o, n, s, a, i, p) { | |
let u, c, l; | |
if (typeof r == "number") { | |
u = { | |
top: r, | |
bottom: r, | |
left: r, | |
right: r, | |
type: r === 0 ? "VALID" : "NUMBER" | |
}; | |
let d = tH([t8, e], s, o, r, i); | |
c = d[0], l = d[1]; | |
} else if (r === "same") { | |
c = Math.ceil(t8 / o), l = Math.ceil(e / n); | |
let m = Math.max(0, (c - 1) * o + s - t8), | |
d = Math.max(0, (l - 1) * n + a - e), | |
f = Math.floor(m / 2), | |
h = m - f, | |
g = Math.floor(d / 2), | |
x = d - g; | |
u = { | |
top: f, | |
bottom: h, | |
left: g, | |
right: x, | |
type: "SAME" | |
}; | |
} else if (r === "valid") u = { | |
top: 0, | |
bottom: 0, | |
left: 0, | |
right: 0, | |
type: "VALID" | |
}, c = Math.ceil((t8 - s + 1) / o), l = Math.ceil((e - a + 1) / n);else if (typeof r == "object") { | |
let m = p === "channelsLast" ? r[1][0] : r[2][0], | |
d = p === "channelsLast" ? r[1][1] : r[2][1], | |
f = p === "channelsLast" ? r[2][0] : r[3][0], | |
h = p === "channelsLast" ? r[2][1] : r[3][1]; | |
u = { | |
top: m, | |
bottom: d, | |
left: f, | |
right: h, | |
type: m === 0 && d === 0 && f === 0 && h === 0 ? "VALID" : "EXPLICIT" | |
}, c = kl((t8 - s + m + d) / o + 1, i), l = kl((e - a + f + h) / n + 1, i); | |
} else throw Error(`Unknown padding parameter: ${r}`); | |
return { | |
padInfo: u, | |
outHeight: c, | |
outWidth: l | |
}; | |
} | |
function nH(r, t8, e, o, n, s, a, i, p, u, c) { | |
let l, m, d, f; | |
if (r === "valid" && (r = 0), typeof r == "number") { | |
l = { | |
top: r, | |
bottom: r, | |
left: r, | |
right: r, | |
front: r, | |
back: r, | |
type: r === 0 ? "VALID" : "NUMBER" | |
}; | |
let g = rH([t8, e, o, 1], [i, p, u], 1, [n, s, a], r, c); | |
m = g[0], d = g[1], f = g[2]; | |
} else if (r === "same") { | |
m = Math.ceil(t8 / n), d = Math.ceil(e / s), f = Math.ceil(o / a); | |
let h = (m - 1) * n + i - t8, | |
g = (d - 1) * s + p - e, | |
x = (f - 1) * a + u - o, | |
b = Math.floor(h / 2), | |
C = h - b, | |
S = Math.floor(g / 2), | |
k = g - S, | |
_ = Math.floor(x / 2), | |
E = x - _; | |
l = { | |
top: S, | |
bottom: k, | |
left: _, | |
right: E, | |
front: b, | |
back: C, | |
type: "SAME" | |
}; | |
} else throw Error(`Unknown padding parameter: ${r}`); | |
return { | |
padInfo: l, | |
outDepth: m, | |
outHeight: d, | |
outWidth: f | |
}; | |
} | |
function kl(r, t8) { | |
if (!t8) return Math.trunc(r); | |
switch (t8) { | |
case "round": | |
return Math.round(r); | |
case "ceil": | |
return Math.ceil(r); | |
case "floor": | |
return Math.floor(r); | |
default: | |
throw new Error(`Unknown roundingMode ${t8}`); | |
} | |
} | |
function Gu(r) { | |
let [t8, e, o] = vl(r); | |
return t8 === 1 && e === 1 && o === 1; | |
} | |
function xr(r, t8) { | |
return Gu(r) || Gu(t8); | |
} | |
function Ta(r) { | |
return vl(r).every(t8 => t8 > 0); | |
} | |
function Lk(r) { | |
if (r === "NHWC") return "channelsLast"; | |
if (r === "NCHW") return "channelsFirst"; | |
throw new Error(`Unknown dataFormat ${r}`); | |
} | |
function Bt(r, t8, e) { | |
if (e != null) { | |
if (typeof t8 == "string") throw Error(`Error in ${r}: pad must be an integer when using dimRoundingMode ${e} but got pad ${t8}.`); | |
if (typeof t8 == "number") $(qa(t8), () => `Error in ${r}: pad must be an integer when using dimRoundingMode ${e} but got pad ${t8}.`);else if (typeof t8 == "object") t8.forEach(o => { | |
o.forEach(n => { | |
$(qa(n), () => `Error in ${r}: pad must be an integer when using dimRoundingMode ${e} but got pad ${n}.`); | |
}); | |
});else throw Error(`Error in ${r}: Unknown padding parameter: ${t8}`); | |
} | |
} | |
function sH(r, t8) { | |
let o = { | |
x: v(r, "x", "reshape", "string_or_numeric") | |
}, | |
n = { | |
shape: t8 | |
}; | |
return T.runKernel(da, o, n); | |
} | |
var W = N({ | |
reshape_: sH | |
}); | |
function aH(r, t8, e, o, n) { | |
let s = v(r, "x", "avgPool", "float32"), | |
a = 1; | |
$(xr(e, a), () => `Error in avgPool: Either strides or dilations must be 1. Got strides ${e} and dilations '${a}'`); | |
let i = s, | |
p = false; | |
s.rank === 3 && (p = true, i = W(s, [1, s.shape[0], s.shape[1], s.shape[2]])), $(i.rank === 4, () => `Error in avgPool: x must be rank 4 but got rank ${i.rank}.`), Bt("avgPool", o, n); | |
let u = { | |
x: i | |
}, | |
c = { | |
filterSize: t8, | |
strides: e, | |
pad: o, | |
dimRoundingMode: n | |
}, | |
l = T.runKernel(Zo, u, c); | |
return l = We(l, s.dtype), p ? W(l, [l.shape[1], l.shape[2], l.shape[3]]) : l; | |
} | |
var fd = N({ | |
avgPool_: aH | |
}); | |
function iH(r, t8, e, o, n, s = "NDHWC") { | |
let a = v(r, "x", "avgPool3d", "float32"), | |
i = a, | |
p = false; | |
a.rank === 4 && (p = true, i = W(a, [1, a.shape[0], a.shape[1], a.shape[2], a.shape[3]])), $(i.rank === 5, () => `Error in avgPool3d: x must be rank 5 but got rank ${i.rank}.`), $(s === "NDHWC", () => `Error in avgPool3d: Only NDHWC is currently supported, but got dataFormat of ${s}`), $(typeof e == "number" && e > 0 || Array.isArray(e) && e[0] > 0 && e[1] > 0 && e[2] > 0, () => `Error in avgPool3d: Stride must be > 0, but got '${e}'`), Bt("avgPool3d", o, n); | |
let u = { | |
x: i | |
}, | |
c = { | |
filterSize: t8, | |
strides: e, | |
pad: o, | |
dimRoundingMode: n, | |
dataFormat: s | |
}, | |
l = T.runKernel(Zs, u, c); | |
return l = We(l, i.dtype), p ? W(l, [l.shape[1], l.shape[2], l.shape[3], l.shape[4]]) : l; | |
} | |
var Bk = N({ | |
avgPool3d_: iH | |
}); | |
function uH(r, t8 = 0) { | |
$(r.length >= 1, () => "Pass at least one tensor to concat"); | |
let e = si(r, "tensors", "concat", "string_or_numeric"); | |
if (e[0].dtype === "complex64" && e.forEach(s => { | |
if (s.dtype !== "complex64") throw new Error(`Cannot concatenate complex64 tensors with a tensor | |
with dtype ${s.dtype}. `); | |
}), e.length === 1) return Ur(e[0]); | |
let o = e, | |
n = { | |
axis: t8 | |
}; | |
return T.runKernel(ta, o, n); | |
} | |
var bt = N({ | |
concat_: uH | |
}); | |
function pH(r, t8, e = false, o = false) { | |
let n = v(r, "a", "matMul"), | |
s = v(t8, "b", "matMul"); | |
[n, s] = Oe(n, s); | |
let a = { | |
a: n, | |
b: s | |
}, | |
i = { | |
transposeA: e, | |
transposeB: o | |
}; | |
return T.runKernel(Jo, a, i); | |
} | |
var Ze = N({ | |
matMul_: pH | |
}); | |
function cH(r) { | |
let e = { | |
x: v(r, "x", "sigmoid", "float32") | |
}; | |
return T.runKernel(Cs, e); | |
} | |
var $a = N({ | |
sigmoid_: cH | |
}); | |
function lH(r, t8, e) { | |
let o = v(r, "x", "slice", "string_or_numeric"); | |
if (o.rank === 0) throw new Error("Slicing scalar is not possible"); | |
let n = { | |
x: o | |
}, | |
s = { | |
begin: t8, | |
size: e | |
}; | |
return T.runKernel(ha, n, s); | |
} | |
var Xe = N({ | |
slice_: lH | |
}); | |
function mH(r) { | |
let e = { | |
x: v(r, "x", "tanh", "float32") | |
}; | |
return T.runKernel(Es, e); | |
} | |
var Nl = N({ | |
tanh_: mH | |
}); | |
function dH(r, t8, e, o, n, s) { | |
let a = v(r, "forgetBias", "basicLSTMCell"), | |
i = v(t8, "lstmKernel", "basicLSTMCell"), | |
p = v(e, "lstmBias", "basicLSTMCell"), | |
u = v(o, "data", "basicLSTMCell"), | |
c = v(n, "c", "basicLSTMCell"), | |
l = v(s, "h", "basicLSTMCell"), | |
m = bt([u, l], 1), | |
d = Ze(m, i), | |
f = Ce(d, p), | |
h = f.shape[0], | |
g = f.shape[1] / 4, | |
x = [h, g], | |
b = Xe(f, [0, 0], x), | |
C = Xe(f, [0, g], x), | |
S = Xe(f, [0, g * 2], x), | |
k = Xe(f, [0, g * 3], x), | |
_ = Ce(se($a(b), Nl(C)), se(c, $a(Ce(a, S)))), | |
E = se(Nl(_), $a(k)); | |
return [_, E]; | |
} | |
var zk = N({ | |
basicLSTMCell_: dH | |
}); | |
function fH(r, t8, e) { | |
let o = v(r, "x", "batchToSpaceND"), | |
n = t8.reduce((i, p) => i * p); | |
$(o.rank >= 1 + t8.length, () => `input rank is ${o.rank} but should be > than blockShape.length ${t8.length}`), $(e.length === t8.length, () => `crops.length is ${e.length} but should be equal to blockShape.length ${t8.length}`), $(o.shape[0] % n === 0, () => `input tensor batch is ${o.shape[0]} but is not divisible by the product of the elements of blockShape ${t8.join(" * ")} === ${n}`); | |
let s = { | |
x: o | |
}, | |
a = { | |
blockShape: t8, | |
crops: e | |
}; | |
return T.runKernel(Js, s, a); | |
} | |
var hd = N({ | |
batchToSpaceND_: fH | |
}); | |
function Vk(r) { | |
let t8; | |
return r.rank === 0 || r.rank === 1 ? t8 = W(r, [1, 1, 1, r.size]) : r.rank === 2 ? t8 = W(r, [1, 1, r.shape[0], r.shape[1]]) : r.rank === 3 ? t8 = W(r, [1, r.shape[0], r.shape[1], r.shape[2]]) : t8 = r, t8; | |
} | |
function hH(r, t8, e, o, n, s) { | |
s == null && (s = 1e-3); | |
let a = v(r, "x", "batchNorm"), | |
i = v(t8, "mean", "batchNorm"), | |
p = v(e, "variance", "batchNorm"), | |
u; | |
n != null && (u = v(n, "scale", "batchNorm")); | |
let c; | |
o != null && (c = v(o, "offset", "batchNorm")), $(i.rank === p.rank, () => "Batch normalization gradient requires mean and variance to have equal ranks."), $(c == null || i.rank === c.rank, () => "Batch normalization gradient requires mean and offset to have equal ranks."), $(u == null || i.rank === u.rank, () => "Batch normalization gradient requires mean and scale to have equal ranks."); | |
let m = { | |
x: Vk(a), | |
scale: u, | |
offset: c, | |
mean: i, | |
variance: p | |
}, | |
d = { | |
varianceEpsilon: s | |
}, | |
f = T.runKernel(vn, m, d); | |
return W(f, a.shape); | |
} | |
var au = N({ | |
batchNorm_: hH | |
}); | |
function gH(r, t8, e, o, n, s) { | |
let a = v(r, "x", "batchNorm"), | |
i = v(t8, "mean", "batchNorm"), | |
p = v(e, "variance", "batchNorm"), | |
u; | |
n != null && (u = v(n, "scale", "batchNorm")); | |
let c; | |
return o != null && (c = v(o, "offset", "batchNorm")), $(a.rank === 2, () => `Error in batchNorm2D: x must be rank 2 but got rank ${a.rank}.`), $(i.rank === 2 || i.rank === 1, () => `Error in batchNorm2D: mean must be rank 2 or rank 1 but got rank ${i.rank}.`), $(p.rank === 2 || p.rank === 1, () => `Error in batchNorm2D: variance must be rank 2 or rank 1 but got rank ${p.rank}.`), u != null && $(u.rank === 2 || u.rank === 1, () => `Error in batchNorm2D: scale must be rank 2 or rank 1 but got rank ${u.rank}.`), c != null && $(c.rank === 2 || c.rank === 1, () => `Error in batchNorm2D: offset must be rank 2 or rank 1 but got rank ${c.rank}.`), au(a, i, p, c, u, s); | |
} | |
var Wk = N({ | |
batchNorm2d_: gH | |
}); | |
function xH(r, t8, e, o, n, s) { | |
let a = v(r, "x", "batchNorm"), | |
i = v(t8, "mean", "batchNorm"), | |
p = v(e, "variance", "batchNorm"), | |
u; | |
n != null && (u = v(n, "scale", "batchNorm")); | |
let c; | |
return o != null && (c = v(o, "offset", "batchNorm")), $(a.rank === 3, () => `Error in batchNorm3D: x must be rank 3 but got rank ${a.rank}.`), $(i.rank === 3 || i.rank === 1, () => `Error in batchNorm3D: mean must be rank 3 or rank 1 but got rank ${i.rank}.`), $(p.rank === 3 || p.rank === 1, () => `Error in batchNorm3D: variance must be rank 3 or rank 1 but got rank ${p.rank}.`), u != null && $(u.rank === 3 || u.rank === 1, () => `Error in batchNorm3D: scale must be rank 3 or rank 1 but got rank ${u.rank}.`), c != null && $(c.rank === 3 || c.rank === 1, () => `Error in batchNorm3D: offset must be rank 3 or rank 1 but got rank ${c.rank}.`), au(a, i, p, c, u, s); | |
} | |
var Uk = N({ | |
batchNorm3d_: xH | |
}); | |
function yH(r, t8, e, o, n, s) { | |
let a = v(r, "x", "batchNorm"), | |
i = v(t8, "mean", "batchNorm"), | |
p = v(e, "variance", "batchNorm"), | |
u; | |
n != null && (u = v(n, "scale", "batchNorm")); | |
let c; | |
return o != null && (c = v(o, "offset", "batchNorm")), $(a.rank === 4, () => `Error in batchNorm4D: x must be rank 4 but got rank ${a.rank}.`), $(i.rank === 4 || i.rank === 1, () => `Error in batchNorm4D: mean must be rank 4 or rank 1 but got rank ${i.rank}.`), $(p.rank === 4 || p.rank === 1, () => `Error in batchNorm4D: variance must be rank 4 or rank 1 but got rank ${p.rank}.`), u != null && $(u.rank === 4 || u.rank === 1, () => `Error in batchNorm4D: scale must be rank 4 or rank 1 but got rank ${u.rank}.`), c != null && $(c.rank === 4 || c.rank === 1, () => `Error in batchNorm4D: offset must be rank 4 or rank 1 but got rank ${c.rank}.`), au(a, i, p, c, u, s); | |
} | |
var Gk = N({ | |
batchNorm4d_: yH | |
}); | |
function bH(r, t8, e) { | |
let o = v(r, "x", "bincount"), | |
n = v(t8, "weights", "bincount"); | |
$(o.dtype === "int32", () => `Error in bincount: input dtype must be int32, but got ${o.dtype}`), $(e >= 0, () => `size must be non-negative, but got ${e}.`), $(n.size === o.size || n.size === 0, () => `Error in bincount: weights must have the same size as input or0-length, but got input shape: ${o.shape}, weights shape: ${n.shape}.`); | |
let s = { | |
x: o, | |
weights: n | |
}, | |
a = { | |
size: e | |
}; | |
return T.runKernel(en, s, a); | |
} | |
var gd = N({ | |
bincount_: bH | |
}); | |
function CH(r, t8) { | |
let e = v(r, "x", "bitwiseAnd"), | |
o = v(t8, "y", "bitwiseAnd"); | |
if (!Cr(e.shape, o.shape)) throw new Error(`BitwiseAnd: Tensors must have the same shape. x: ${e.shape}, y: ${o.shape}`); | |
if (e.dtype !== "int32" || o.dtype !== "int32") throw new Error(`BitwiseAnd: Only supports 'int32' values in tensor, found type of x: ${e.dtype} and type of y: ${o.dtype}`); | |
let n = { | |
a: e, | |
b: o | |
}; | |
return T.runKernel(ja, n); | |
} | |
var Hk = N({ | |
bitwiseAnd_: CH | |
}); | |
function wH(r, t8) { | |
let e = v(r, "s0", "broadcastArgs", "int32"), | |
o = v(t8, "s1", "broadcastArgs", "int32"); | |
if (e.rank !== 1) throw new Error(`broadcastArgs(): first input must be a vector (rank=1). Has rank ${e.rank}`); | |
if (o.rank !== 1) throw new Error(`broadcastArgs(): second input must be a vector (rank=1). Has rank ${o.rank}`); | |
let n = { | |
s0: e, | |
s1: o | |
}; | |
return T.runKernel(ea, n); | |
} | |
var Kk = N({ | |
broadcastArgs_: wH | |
}); | |
function SH(r, t8) { | |
let e = v(r, "broadcastTo", "x"), | |
o = e.shape; | |
if (wt(t8), t8.length < e.rank) throw new Error(`broadcastTo(): shape.length=${t8.length} < input.rank=${e.rank}.`); | |
if (t8.length > e.rank) { | |
let u = e.shape.slice(); | |
for (; u.length < t8.length;) u.unshift(1); | |
e = W(e, u); | |
} | |
let n = e.shape, | |
s = Array.from(t8); | |
for (let u = t8.length - 1; u >= 0; u--) if (n[u] === t8[u]) s[u] = 1;else if (e.shape[u] !== 1) throw new Error(`broadcastTo(): [${o}] cannot be broadcast to [${t8}].`); | |
if (s.map((u, c) => u > 1 ? c : -1).filter(u => u >= 0).length === 0) return Ur(e); | |
let i = { | |
x: e | |
}, | |
p = { | |
reps: s | |
}; | |
return T.runKernel(po, i, p); | |
} | |
var iu = N({ | |
broadcastTo_: SH | |
}); | |
function IH(r) { | |
let e = { | |
x: v(r, "x", "ceil", "float32") | |
}; | |
return T.runKernel(tn, e); | |
} | |
var qk = N({ | |
ceil_: IH | |
}); | |
function Ea(r, t8, e) { | |
wt(r), e = e || Ri(t8); | |
let o = { | |
shape: r, | |
value: t8, | |
dtype: e | |
}; | |
return T.runKernel(sa, {}, o); | |
} | |
function vH(r, t8, e) { | |
let o = v(r, "x", "clipByValue"); | |
if ($(t8 <= e, () => `Error in clip: min (${t8}) must be less than or equal to max (${e}).`), t8 === e) return Ea(o.shape, t8, o.dtype); | |
let n = { | |
x: o | |
}, | |
s = { | |
clipValueMin: t8, | |
clipValueMax: e | |
}; | |
return T.runKernel(Co, n, s); | |
} | |
var jk = N({ | |
clipByValue_: vH | |
}); | |
function kH(r) { | |
return bt(r, 0); | |
} | |
var Xk = N({ | |
concat1d_: kH | |
}); | |
function NH(r, t8) { | |
return bt(r, t8); | |
} | |
var Yk = N({ | |
concat2d_: NH | |
}); | |
function TH(r, t8) { | |
return bt(r, t8); | |
} | |
var Qk = N({ | |
concat3d_: TH | |
}); | |
function _H(r, t8) { | |
return bt(r, t8); | |
} | |
var Zk = N({ | |
concat4d_: _H | |
}); | |
function $H(r, t8, e, o, n = "NHWC", s = [1, 1], a) { | |
let i = v(r, "x", "conv2d", "float32"), | |
p = v(t8, "filter", "conv2d", "float32"), | |
u = i, | |
c = false; | |
i.rank === 3 && (c = true, u = W(i, [1, i.shape[0], i.shape[1], i.shape[2]])), $(u.rank === 4, () => `Error in conv2d: input must be rank 4, but got rank ${u.rank}.`), $(p.rank === 4, () => `Error in conv2d: filter must be rank 4, but got rank ${p.rank}.`), Bt("conv2d", o, a); | |
let l = n === "NHWC" ? u.shape[3] : u.shape[1]; | |
$(l === p.shape[2], () => `Error in conv2d: depth of input (${l}) must match input depth for filter ${p.shape[2]}.`), $(xr(e, s), () => `Error in conv2D: Either strides or dilations must be 1. Got strides ${e} and dilations '${s}'`), $(Ta(s), () => "Error in conv2D: Dilated rates should be larger than 0."), $(Ta(e), () => "Error in conv2D: Strides should be larger than 0."); | |
let m = { | |
x: u, | |
filter: p | |
}, | |
d = { | |
strides: e, | |
pad: o, | |
dataFormat: n, | |
dilations: s, | |
dimRoundingMode: a | |
}, | |
f = T.runKernel(rn, m, d); | |
return c ? W(f, [f.shape[1], f.shape[2], f.shape[3]]) : f; | |
} | |
var uu = N({ | |
conv2d_: $H | |
}); | |
function EH(r, t8, e, o, n = "NWC", s = 1, a) { | |
let i = v(r, "x", "conv1d"), | |
p = v(t8, "filter", "conv1d"), | |
u = i, | |
c = false; | |
i.rank === 2 && (c = true, u = W(i, [1, i.shape[0], i.shape[1]])), $(u.rank === 3, () => `Error in conv1d: input must be rank 3, but got rank ${u.rank}.`), $(p.rank === 3, () => `Error in conv1d: filter must be rank 3, but got rank ${p.rank}.`), Bt("conv1d", o, a), $(u.shape[2] === p.shape[1], () => `Error in conv1d: depth of input (${u.shape[2]}) must match input depth for filter ${p.shape[1]}.`), $(xr(e, s), () => `Error in conv1D: Either stride or dilation must be 1. Got stride ${e} and dilation '${s}'`), $(Ta(s), () => "Error in conv1D: Dilated rates should be larger than 0."), $(Ta(e), () => "Error in conv1D: Stride should be larger than 0."), $(n === "NWC", () => `Error in conv1d: got dataFormat of ${n} but only NWC is currently supported.`); | |
let l = W(p, [1, p.shape[0], p.shape[1], p.shape[2]]), | |
m = W(u, [u.shape[0], 1, u.shape[1], u.shape[2]]), | |
g = uu(m, l, [1, e], o, "NHWC", [1, s], a); | |
return c ? W(g, [g.shape[2], g.shape[3]]) : W(g, [g.shape[0], g.shape[2], g.shape[3]]); | |
} | |
var Jk = N({ | |
conv1d_: EH | |
}); | |
function RH(r, t8, e, o, n, s = "NHWC", a) { | |
$(r.length === t8.rank, () => `Length of inShape (${r.length}) and rank of dy (${t8.rank}) must match`); | |
let i = r, | |
p = t8, | |
u = false; | |
t8.rank === 3 && (u = true, p = W(t8, [1, t8.shape[0], t8.shape[1], t8.shape[2]]), i = [1, r[0], r[1], r[2]]), $(i.length === 4, () => `Error in conv2dDerInput: inShape must be length 4, but got length ${i.length}.`), $(p.rank === 4, () => `Error in conv2dDerInput: dy must be rank 4, but got rank ${p.rank}`), $(e.rank === 4, () => `Error in conv2dDerInput: filter must be rank 4, but got rank ${e.rank}`); | |
let c = s === "NHWC" ? i[3] : i[1], | |
l = s === "NHWC" ? p.shape[3] : p.shape[1]; | |
$(c === e.shape[2], () => `Error in conv2dDerInput: depth of input (${c}) must match input depth for filter ${e.shape[2]}.`), $(l === e.shape[3], () => `Error in conv2dDerInput: depth of output (${l}) must match output depth for filter ${e.shape[3]}.`), Bt("conv2dDerInput", n, a); | |
let m = { | |
dy: p, | |
filter: e | |
}, | |
d = { | |
strides: o, | |
pad: n, | |
dataFormat: s, | |
dimRoundingMode: a, | |
inputShape: i | |
}, | |
f = T.runKernel(on, m, d); | |
return u ? W(f, [f.shape[1], f.shape[2], f.shape[3]]) : f; | |
} | |
var xd = N({ | |
conv2DBackpropInput_: RH | |
}); | |
function DH(r, t8, e, o, n, s) { | |
let a = v(r, "x", "conv2dTranspose"), | |
i = v(t8, "filter", "conv2dTranspose"); | |
return xd(e, a, i, o, n, "NHWC", s); | |
} | |
var e2 = N({ | |
conv2dTranspose_: DH | |
}); | |
function AH(r, t8, e, o, n = "NDHWC", s = [1, 1, 1]) { | |
let a = v(r, "x", "conv3d"), | |
i = v(t8, "filter", "conv3d"), | |
p = a, | |
u = false; | |
a.rank === 4 && (u = true, p = W(a, [1, a.shape[0], a.shape[1], a.shape[2], a.shape[3]])), $(p.rank === 5, () => `Error in conv3d: input must be rank 5, but got rank ${p.rank}.`), $(i.rank === 5, () => `Error in conv3d: filter must be rank 5, but got rank ${i.rank}.`), $(p.shape[4] === i.shape[3], () => `Error in conv3d: depth of input (${p.shape[4]}) must match input depth for filter ${i.shape[3]}.`), $(xr(e, s), () => `Error in conv3D: Either strides or dilations must be 1. Got strides ${e} and dilations '${s}'`), $(n === "NDHWC", () => `Error in conv3d: got dataFormat of ${n} but only NDHWC is currently supported.`), $(Ta(s), () => "Error in conv3D: Dilated rates should be larger than 0."), $(Ta(e), () => "Error in conv3D: Strides should be larger than 0."); | |
let c = { | |
x: p, | |
filter: i | |
}, | |
l = { | |
strides: e, | |
pad: o, | |
dataFormat: n, | |
dilations: s | |
}, | |
m = T.runKernel(nn, c, l); | |
return u ? W(m, [m.shape[1], m.shape[2], m.shape[3], m.shape[4]]) : m; | |
} | |
var t2 = N({ | |
conv3d_: AH | |
}); | |
function FH(r, t8, e, o, n) { | |
$(r.length === t8.rank, () => `Length of inShape (${r.length}) and rank of dy (${t8.rank}) must match`); | |
let s = r, | |
a = t8, | |
i = false; | |
t8.rank === 4 && (i = true, a = W(t8, [1, t8.shape[0], t8.shape[1], t8.shape[2], t8.shape[3]]), s = [1, r[0], r[1], r[2], r[3]]); | |
let p = s[4], | |
u = a.shape[4]; | |
$(s.length === 5, () => `Error in conv3dDerInput: inShape must be length 5, but got length ${s.length}.`), $(a.rank === 5, () => `Error in conv3dDerInput: dy must be rank 5, but got rank ${a.rank}`), $(e.rank === 5, () => `Error in conv3dDerInput: filter must be rank 5, but got rank ${e.rank}`), $(p === e.shape[3], () => `Error in conv3dDerInput: depth of input (${p}) must match input depth for filter ${e.shape[3]}.`), $(u === e.shape[4], () => `Error in conv3dDerInput: depth of output (${u}) must match output depth for filter ${e.shape[4]}.`); | |
let c = { | |
dy: a, | |
filter: e | |
}, | |
l = { | |
pad: n, | |
strides: o, | |
inputShape: s | |
}, | |
m = T.runKernel(sn, c, l); | |
return i ? W(m, [m.shape[1], m.shape[2], m.shape[3], m.shape[4]]) : m; | |
} | |
var r2 = N({ | |
conv3DBackpropInput_: FH | |
}); | |
function PH(r, t8, e, o, n) { | |
let s = v(r, "x", "conv3dTranspose"), | |
a = v(t8, "filter", "conv3dTranspose"); | |
return r2(e, s, a, o, n); | |
} | |
var o2 = N({ | |
conv3dTranspose_: PH | |
}); | |
function OH(r) { | |
let e = { | |
x: v(r, "x", "cos", "float32") | |
}; | |
return T.runKernel(an, e); | |
} | |
var n2 = N({ | |
cos_: OH | |
}); | |
function MH(r) { | |
let e = { | |
x: v(r, "x", "cosh", "float32") | |
}; | |
return T.runKernel(un, e); | |
} | |
var s2 = N({ | |
cosh_: MH | |
}); | |
function LH(r, t8 = 0, e = false, o = false) { | |
let s = { | |
x: v(r, "x", "cumprod") | |
}, | |
a = { | |
axis: t8, | |
exclusive: e, | |
reverse: o | |
}; | |
return T.runKernel(pn, s, a); | |
} | |
var a2 = N({ | |
cumprod_: LH | |
}); | |
function BH(r, t8 = 0, e = false, o = false) { | |
let s = { | |
x: v(r, "x", "cumsum") | |
}, | |
a = { | |
axis: t8, | |
exclusive: e, | |
reverse: o | |
}; | |
return T.runKernel(cn, s, a); | |
} | |
var i2 = N({ | |
cumsum_: BH | |
}); | |
function zH(r, t8, e, o = false) { | |
let n = v(r, "x", "denseBincount"), | |
s = v(t8, "weights", "denseBincount"); | |
$(n.dtype === "int32", () => `Error in denseBincount: input dtype must be int32, but got ${n.dtype}`), $(n.rank <= 2, () => `Error in denseBincount: input must be at most rank 2, but got rank ${n.rank}.`), $(e >= 0, () => `size must be non-negative, but got ${e}.`), $(s.size === n.size || s.size === 0, () => `Error in denseBincount: weights must have the same shape as x or 0-length, but got x shape: ${n.shape}, weights shape: ${s.shape}.`); | |
let a = { | |
x: n, | |
weights: s | |
}, | |
i = { | |
size: e, | |
binaryOutput: o | |
}; | |
return T.runKernel(ra, a, i); | |
} | |
var u2 = N({ | |
denseBincount_: zH | |
}); | |
function VH(r, t8, e = "NHWC") { | |
let o = v(r, "x", "depthToSpace", "float32"), | |
n = e === "NHWC" ? o.shape[1] : o.shape[2], | |
s = e === "NHWC" ? o.shape[2] : o.shape[3], | |
a = e === "NHWC" ? o.shape[3] : o.shape[1]; | |
$(t8 > 1, () => `blockSize should be > 1 for depthToSpace, but was: ${t8}`), $(n * t8 >= 0, () => `Negative dimension size caused by overflow when multiplying | |
${n} and ${t8} for depthToSpace with input shape | |
${o.shape}`), $(s * t8 >= 0, () => `Negative dimension size caused by overflow when multiplying | |
${s} and ${t8} for depthToSpace with input shape | |
${o.shape}`), $(a % (t8 * t8) === 0, () => `Dimension size must be evenly divisible by ${t8 * t8} but is ${a} for depthToSpace with input shape ${o.shape}`); | |
let i = { | |
x: o | |
}, | |
p = { | |
blockSize: t8, | |
dataFormat: e | |
}; | |
return T.runKernel(mn, i, p); | |
} | |
var p2 = N({ | |
depthToSpace_: VH | |
}); | |
function WH(r, t8, e, o, n = "NHWC", s = [1, 1], a) { | |
let i = v(r, "x", "depthwiseConv2d", "float32"), | |
p = v(t8, "filter", "depthwiseConv2d", "float32"), | |
u = i, | |
c = false; | |
i.rank === 3 && (c = true, u = W(i, [1, i.shape[0], i.shape[1], i.shape[2]])), $(u.rank === 4, () => `Error in depthwiseConv2d: input must be rank 4, but got rank ${u.rank}.`), $(p.rank === 4, () => `Error in depthwiseConv2d: filter must be rank 4, but got rank ${p.rank}.`); | |
let l = n === "NHWC" ? u.shape[3] : u.shape[1]; | |
$(l === p.shape[2], () => `Error in depthwiseConv2d: number of input channels (${l}) must match the inChannels dimension in filter ${p.shape[2]}.`), Bt("depthwiseConv2d", o, a); | |
let m = { | |
x: u, | |
filter: p | |
}, | |
d = { | |
strides: e, | |
pad: o, | |
dataFormat: n, | |
dilations: s, | |
dimRoundingMode: a | |
}, | |
f = T.runKernel(dn, m, d); | |
return c ? W(f, [f.shape[1], f.shape[2], f.shape[3]]) : f; | |
} | |
var lc = N({ | |
depthwiseConv2d_: WH | |
}); | |
function UH(r) { | |
let e = { | |
x: v(r, "x", "diag") | |
}; | |
return T.runKernel(oa, e); | |
} | |
var c2 = N({ | |
diag_: UH | |
}); | |
function GH(r, t8, e, o, n = [1, 1], s = "NHWC") { | |
let a = v(r, "x", "dilation2d"), | |
i = v(t8, "filter", "dilation2d"); | |
$(a.rank === 3 || a.rank === 4, () => `Error in dilation2d: input must be rank 3 or 4, but got rank ${a.rank}.`), $(i.rank === 3, () => `Error in dilation2d: filter must be rank 3, but got rank ${i.rank}.`), $(s === "NHWC", () => `Error in dilation2d: Only NHWC is currently supported, but got dataFormat of ${s}`); | |
let p = a, | |
u = false; | |
a.rank === 3 && (p = W(a, [1, a.shape[0], a.shape[1], a.shape[2]]), u = true), $(p.shape[3] === i.shape[2], () => `Error in dilation2d: input and filter must have the same depth: ${p.shape[3]} vs ${i.shape[2]}`); | |
let c = { | |
x: p, | |
filter: i | |
}, | |
l = { | |
strides: e, | |
pad: o, | |
dilations: n | |
}, | |
m = T.runKernel(fn, c, l); | |
return u ? W(m, [m.shape[1], m.shape[2], m.shape[3]]) : m; | |
} | |
var l2 = N({ | |
dilation2d_: GH | |
}); | |
var Ir = {}; | |
qe(Ir, { | |
assertAndGetBroadcastShape: () => rt, | |
getBroadcastDims: () => m2, | |
getReductionAxes: () => yd | |
}); | |
function m2(r, t8) { | |
let e = r.length, | |
o = []; | |
for (let n = 0; n < e; n++) { | |
let s = e - 1 - n, | |
a = r[s] || 1; | |
(t8[t8.length - 1 - n] || 1) > 1 && a === 1 && o.unshift(s); | |
} | |
return o; | |
} | |
function yd(r, t8) { | |
let e = []; | |
for (let o = 0; o < t8.length; o++) { | |
let n = r[r.length - o - 1], | |
s = t8.length - o - 1, | |
a = t8[s]; | |
(n == null || n === 1 && a > 1) && e.unshift(s); | |
} | |
return e; | |
} | |
function rt(r, t8) { | |
let e = Math.max(r.length, t8.length), | |
o = new Array(e); | |
for (let n = 0; n < e; n++) { | |
let s = r[r.length - n - 1]; | |
s == null && (s = 1); | |
let a = t8[t8.length - n - 1]; | |
if (a == null && (a = 1), s === 1) o[e - n - 1] = a;else if (a === 1) o[e - n - 1] = s;else if (s !== a) { | |
let i = `Operands could not be broadcast together with shapes ${r} and ${t8}.`; | |
throw Error(i); | |
} else o[e - n - 1] = s; | |
} | |
return o; | |
} | |
function HH(r, t8) { | |
let e = v(r, "a", "equal", "string_or_numeric"), | |
o = v(t8, "b", "equal", "string_or_numeric"); | |
[e, o] = Oe(e, o), rt(e.shape, o.shape); | |
let n = { | |
a: e, | |
b: o | |
}; | |
return T.runKernel(yn, n); | |
} | |
var bd = N({ | |
equal_: HH | |
}); | |
function KH(r, t8, e) { | |
let o = v(t8, "a", "where"), | |
n = v(e, "b", "where"), | |
s = v(r, "condition", "where", "bool"), | |
a = rt(rt(s.shape, o.shape), n.shape), | |
i = iu(s, a), | |
p = iu(o, a), | |
u = iu(n, a), | |
c = { | |
condition: i, | |
t: p, | |
e: u | |
}; | |
return T.runKernel(fa, c); | |
} | |
var lo = N({ | |
where_: KH | |
}); | |
function qH(r) { | |
let e = { | |
x: v(r, "x", "zerosLike") | |
}; | |
return T.runKernel(Sa, e); | |
} | |
var Ht = N({ | |
zerosLike_: qH | |
}); | |
function jH(r, t8) { | |
let e = v(r, "a", "div"), | |
o = v(t8, "b", "div"); | |
[e, o] = Oe(e, o); | |
let n = je(e, o), | |
s = Ht(n), | |
a = bd(o, s); | |
return lo(a, s, n); | |
} | |
var d2 = N({ | |
divNoNan_: jH | |
}); | |
function XH(r, t8) { | |
let e = v(r, "t1", "dot"), | |
o = v(t8, "t2", "dot"); | |
$((e.rank === 1 || e.rank === 2) && (o.rank === 1 || o.rank === 2), () => `Error in dot: inputs must all be rank 1 or 2, but got ranks ${e.rank} and ${o.rank}.`); | |
let n = e.rank === 1 ? e.size : e.shape[1], | |
s = o.rank === 1 ? o.size : o.shape[0]; | |
if ($(n === s, () => `Error in dot: inner dimensions of inputs must match, but got ${n} and ${s}.`), e.rank === 1 && o.rank === 1) { | |
let a = W(e, [1, -1]), | |
i = W(o, [-1, 1]), | |
p = Ze(a, i); | |
return W(p, []); | |
} else if (e.rank === 1 && o.rank === 2) { | |
let a = W(e, [1, -1]), | |
i = W(o, [o.shape[0], o.shape[1]]), | |
p = Ze(a, i); | |
return W(p, [p.size]); | |
} else if (e.rank === 2 && o.rank === 1) { | |
let a = W(o, [-1, 1]), | |
i = Ze(e, a); | |
return W(i, [i.size]); | |
} else { | |
let a = W(o, [o.shape[0], o.shape[1]]); | |
return Ze(e, a); | |
} | |
} | |
var f2 = N({ | |
dot_: XH | |
}); | |
function YH(r, ...t8) { | |
let e = t8.map((n, s) => v(n, `tensors${s}`, "einsum")), | |
o = { | |
equation: r | |
}; | |
return T.runKernel(Vi, e, o); | |
} | |
var pu = N({ | |
einsum_: YH | |
}); | |
function QH(r) { | |
let e = { | |
x: v(r, "x", "elu", "float32") | |
}; | |
return T.runKernel(gn, e); | |
} | |
var Cd = N({ | |
elu_: QH | |
}); | |
function ZH(r, t8) { | |
let e = v(r, "x", "ensureShape", "string_or_numeric"); | |
if (!ZC(e.shape, t8)) throw new Error(`EnsureShape: Shape of tensor ${e.shape} is not compatible with expected shape ${t8}`); | |
return r; | |
} | |
var h2 = N({ | |
ensureShape_: ZH | |
}); | |
function JH(r) { | |
let t8 = v(r, "x", "erf"); | |
$(t8.dtype === "int32" || t8.dtype === "float32", () => "Input dtype must be `int32` or `float32`."), t8.dtype === "int32" && (t8 = We(t8, "float32")); | |
let e = { | |
x: t8 | |
}; | |
return T.runKernel(xn, e); | |
} | |
var g2 = N({ | |
erf_: JH | |
}); | |
function Bw(r, t8) { | |
for (let e = 0; e < r.length; ++e) if (r[r.length - e - 1] !== t8 - 1 - e) return false; | |
return true; | |
} | |
function x2(r, t8, e) { | |
let o = r.length + t8.length, | |
n = [], | |
s = 0, | |
a = 0; | |
for (let i = 0; i < o; i++) e.indexOf(i) === -1 ? n.push(r[s++]) : n.push(t8[a++]); | |
return n; | |
} | |
function eK(r, t8) { | |
let e = [], | |
o = r.length; | |
for (let s = 0; s < o; s++) t8.indexOf(s) === -1 && e.push(r[s]); | |
let n = t8.map(s => r[s]); | |
return [e, n]; | |
} | |
function ii(r, t8) { | |
let e = t8.map(o => 1); | |
return x2(r, e, t8); | |
} | |
function tK(r, t8, e) { | |
$(Bw(t8, e), () => `${r} supports only inner-most axes for now. Got axes ${t8} and rank-${e} input.`); | |
} | |
function rK(r, t8) { | |
if (Bw(r, t8)) return null; | |
let e = []; | |
for (let o = 0; o < t8; ++o) r.indexOf(o) === -1 && e.push(o); | |
return r.forEach(o => e.push(o)), e; | |
} | |
function oK(r) { | |
return r.map((t8, e) => [e, t8]).sort((t8, e) => t8[1] - e[1]).map(t8 => t8[0]); | |
} | |
function nK(r, t8) { | |
let e = []; | |
for (let o = t8 - r; o < t8; ++o) e.push(o); | |
return e; | |
} | |
function aK(r, t8 = null, e = false) { | |
let n = { | |
x: v(r, "x", "max") | |
}, | |
s = { | |
reductionIndices: t8, | |
keepDims: e | |
}; | |
return T.runKernel(Vn, n, s); | |
} | |
var Ra = N({ | |
max_: aK | |
}); | |
function iK(r, t8 = null, e = false) { | |
let n = { | |
x: v(r, "x", "min") | |
}, | |
s = { | |
axis: t8, | |
keepDims: e | |
}; | |
return T.runKernel(Hn, n, s); | |
} | |
var Tl = N({ | |
min_: iK | |
}); | |
function uK(r, t8) { | |
let e = v(r, "base", "pow"), | |
o = v(t8, "exp", "pow"); | |
[e, o] = Oe(e, o); | |
let n = { | |
a: e, | |
b: o | |
}; | |
return T.runKernel(rs, n); | |
} | |
var ui = N({ | |
pow_: uK | |
}); | |
function ke(r, t8) { | |
if ((Ot(r) && t8 !== "string" || Array.isArray(r)) && t8 !== "complex64") throw new Error("Error creating a new Scalar: value must be a primitive (number|boolean|string)"); | |
if (t8 === "string" && Ot(r) && !(r instanceof Uint8Array)) throw new Error("When making a scalar from encoded string, the value must be `Uint8Array`."); | |
return Sr(r, [], [], t8); | |
} | |
function pK(r) { | |
let e = { | |
x: v(r, "x", "sqrt", "float32") | |
}; | |
return T.runKernel(Ss, e); | |
} | |
var Dr = N({ | |
sqrt_: pK | |
}); | |
function cK(r) { | |
let t8 = v(r, "x", "square"), | |
e = {}; | |
return T.runKernel("Square", { | |
x: t8 | |
}, e); | |
} | |
var er = N({ | |
square_: cK | |
}); | |
function lK(r, t8 = null, e = false) { | |
let o = v(r, "x", "sum"); | |
o.dtype === "bool" && (o = We(o, "int32")); | |
let n = { | |
x: o | |
}, | |
s = { | |
axis: t8, | |
keepDims: e | |
}; | |
return T.runKernel(Is, n, s); | |
} | |
var ot = N({ | |
sum_: lK | |
}); | |
function mK(r, t8 = "euclidean", e = null, o = false) { | |
r = v(r, "x", "norm"); | |
let n = y2(r, t8, e), | |
s = n.shape; | |
if (o) { | |
let a = Ei(e, r.shape); | |
s = ii(n.shape, a); | |
} | |
return W(n, s); | |
} | |
function y2(r, t8, e = null) { | |
if (r.rank === 0) return Jt(r); | |
if (r.rank !== 1 && e === null) return y2(W(r, [-1]), t8, e); | |
if (r.rank === 1 || typeof e == "number" || Array.isArray(e) && e.length === 1) { | |
if (t8 === 1) return ot(Jt(r), e); | |
if (t8 === 1 / 0) return Ra(Jt(r), e); | |
if (t8 === -1 / 0) return Tl(Jt(r), e); | |
if (t8 === "euclidean" || t8 === 2) return Dr(ot(ui(Jt(r), ke(2, "int32")), e)); | |
throw new Error(`Error in norm: invalid ord value: ${t8}`); | |
} | |
if (Array.isArray(e) && e.length === 2) { | |
if (t8 === 1) return Ra(ot(Jt(r), e[0]), e[1] - 1); | |
if (t8 === 1 / 0) return Ra(ot(Jt(r), e[1]), e[0]); | |
if (t8 === -1 / 0) return Tl(ot(Jt(r), e[1]), e[0]); | |
if (t8 === "fro" || t8 === "euclidean") return Dr(ot(er(r), e)); | |
throw new Error(`Error in norm: invalid ord value: ${t8}`); | |
} | |
throw new Error(`Error in norm: invalid axis: ${e}`); | |
} | |
var Ku = N({ | |
norm_: mK | |
}); | |
function dK(r, t8 = null, e = false) { | |
return Ku(r, "euclidean", t8, e); | |
} | |
var b2 = N({ | |
euclideanNorm_: dK | |
}); | |
function fK(r) { | |
let e = { | |
x: v(r, "x", "exp") | |
}; | |
return T.runKernel(bn, e); | |
} | |
var $o = N({ | |
exp_: fK | |
}); | |
function hK(r, t8 = 0) { | |
let e = v(r, "x", "expandDims", "string_or_numeric"); | |
$(t8 <= e.rank, () => "Axis must be <= rank of the tensor"); | |
let o = { | |
input: e | |
}, | |
n = { | |
dim: t8 | |
}; | |
return T.runKernel(na, o, n); | |
} | |
var Ms = N({ | |
expandDims_: hK | |
}); | |
function gK(r) { | |
let e = { | |
x: v(r, "x", "expm1") | |
}; | |
return T.runKernel(Cn, e); | |
} | |
var C2 = N({ | |
expm1_: gK | |
}); | |
function xK(r, t8) { | |
let e = v(r, "x", "tile", "string_or_numeric"); | |
$(e.rank === t8.length, () => `Error in transpose: rank of input ${e.rank} must match length of reps ${t8}.`); | |
let o = { | |
x: e | |
}, | |
n = { | |
reps: t8 | |
}; | |
return T.runKernel(po, o, n); | |
} | |
var cu = N({ | |
tile_: xK | |
}); | |
function yK(r, t8, e, o = "float32") { | |
t8 == null && (t8 = r); | |
let n = me([r, t8], o), | |
s = r <= t8 ? r : t8; | |
for (let i = 0; i < s; ++i) n.set(1, i, i); | |
let a = W(n.toTensor(), [r, t8]); | |
if (e == null) return a; | |
if (e.length === 1) return cu(Ms(a, 0), [e[0], 1, 1]); | |
if (e.length === 2) return cu(Ms(Ms(a, 0), 0), [e[0], e[1], 1, 1]); | |
if (e.length === 3) return cu(Ms(Ms(Ms(a, 0), 0), 0), [e[0], e[1], e[2], 1, 1]); | |
throw new Error(`eye() currently supports only 1D and 2D batchShapes, but received ${e.length}D.`); | |
} | |
var wd = N({ | |
eye_: yK | |
}); | |
function bK(r) { | |
let e = { | |
x: v(r, "x", "floor", "float32") | |
}; | |
return T.runKernel(Sn, e); | |
} | |
var Sd = N({ | |
floor_: bK | |
}); | |
function CK(r, t8, e = 0, o = 0) { | |
let n = v(r, "x", "gather"), | |
s = v(t8, "indices", "gather", "int32"), | |
a = { | |
x: n, | |
indices: s | |
}, | |
i = { | |
axis: e, | |
batchDims: o | |
}; | |
return T.runKernel(aa, a, i); | |
} | |
var Id = N({ | |
gather_: CK | |
}); | |
function wK(r, t8) { | |
let e = v(r, "a", "greater", "string_or_numeric"), | |
o = v(t8, "b", "greater", "string_or_numeric"); | |
[e, o] = Oe(e, o), rt(e.shape, o.shape); | |
let n = { | |
a: e, | |
b: o | |
}; | |
return T.runKernel(Nn, n); | |
} | |
var qu = N({ | |
greater_: wK | |
}); | |
function SK(r, t8) { | |
let e = v(r, "a", "greaterEqual", "string_or_numeric"), | |
o = v(t8, "b", "greaterEqual", "string_or_numeric"); | |
[e, o] = Oe(e, o), rt(e.shape, o.shape); | |
let n = { | |
a: e, | |
b: o | |
}; | |
return T.runKernel(Tn, n); | |
} | |
var vd = N({ | |
greaterEqual_: SK | |
}); | |
function IK(r) { | |
let e = { | |
input: v(r, "input", "imag") | |
}; | |
return T.runKernel(Gi, e); | |
} | |
var lu = N({ | |
imag_: IK | |
}); | |
function vK(r) { | |
let e = { | |
x: v(r, "x", "isFinite") | |
}; | |
return T.runKernel(_n, e); | |
} | |
var w2 = N({ | |
isFinite_: vK | |
}); | |
function kK(r) { | |
let e = { | |
x: v(r, "x", "isInf") | |
}; | |
return T.runKernel($n, e); | |
} | |
var S2 = N({ | |
isInf_: kK | |
}); | |
function NK(r) { | |
let e = { | |
x: v(r, "x", "isNaN") | |
}; | |
return T.runKernel(En, e); | |
} | |
var I2 = N({ | |
isNaN_: NK | |
}); | |
function TK(r, t8 = 0.2) { | |
let o = { | |
x: v(r, "x", "leakyRelu") | |
}, | |
n = { | |
alpha: t8 | |
}; | |
return T.runKernel(Rn, o, n); | |
} | |
var kd = N({ | |
leakyRelu_: TK | |
}); | |
function _K(r, t8) { | |
let e = v(r, "a", "less", "string_or_numeric"), | |
o = v(t8, "b", "less", "string_or_numeric"); | |
[e, o] = Oe(e, o), rt(e.shape, o.shape); | |
let n = { | |
a: e, | |
b: o | |
}; | |
return T.runKernel(Dn, n); | |
} | |
var _l = N({ | |
less_: _K | |
}); | |
function $K(r, t8) { | |
let e = v(r, "a", "lessEqual", "string_or_numeric"), | |
o = v(t8, "b", "lessEqual", "string_or_numeric"); | |
[e, o] = Oe(e, o), rt(e.shape, o.shape); | |
let n = { | |
a: e, | |
b: o | |
}; | |
return T.runKernel(An, n); | |
} | |
var mc = N({ | |
lessEqual_: $K | |
}); | |
function v2(r, t8, e) { | |
if (e <= 0) throw new Error("The number of values should be positive."); | |
let o = { | |
start: r, | |
stop: t8, | |
num: e | |
}; | |
return T.runKernel(Fn, {}, o); | |
} | |
function EK(r, t8 = 5, e = 1, o = 1, n = 0.5) { | |
let s = v(r, "x", "localResponseNormalization"); | |
$(s.rank === 4 || s.rank === 3, () => `Error in localResponseNormalization: x must be rank 3 or 4 but got | |
rank ${s.rank}.`), $(qa(t8), () => `Error in localResponseNormalization: depthRadius must be an integer but got depthRadius ${t8}.`); | |
let a = s, | |
i = false; | |
s.rank === 3 && (i = true, a = W(s, [1, s.shape[0], s.shape[1], s.shape[2]])); | |
let p = { | |
x: a | |
}, | |
u = { | |
depthRadius: t8, | |
bias: e, | |
alpha: o, | |
beta: n | |
}, | |
c = T.runKernel(zn, p, u); | |
return i ? W(c, [c.shape[1], c.shape[2], c.shape[3]]) : c; | |
} | |
var k2 = N({ | |
localResponseNormalization_: EK | |
}); | |
function RK(r) { | |
let e = { | |
x: v(r, "x", "log", "float32") | |
}; | |
return T.runKernel(Pn, e); | |
} | |
var pi = N({ | |
log_: RK | |
}); | |
function DK(r) { | |
let e = { | |
x: v(r, "x", "log1p") | |
}; | |
return T.runKernel(On, e); | |
} | |
var Nd = N({ | |
log1p_: DK | |
}); | |
function AK(r) { | |
return $(qs(r), () => "The f passed in grad(f) must be a function"), (t8, e) => { | |
let o = v(t8, "x", "tf.grad", "string_or_numeric"), | |
n = e != null ? v(e, "dy", "tf.grad") : null; | |
return T.tidy(() => { | |
let { | |
value: s, | |
grads: a | |
} = T.gradients(() => r(o), [o], n); | |
return n != null && yt(s.shape, n.shape, "The shape of dy passed in grad(f)(x, dy) must match the shape returned by f(x)"), Td(a), a[0]; | |
}); | |
}; | |
} | |
function FK(r) { | |
return $(qs(r), () => "The f passed in grads(f) must be a function"), (t8, e) => { | |
$(Array.isArray(t8), () => "The args passed in grads(f)(args) must be an array of `Tensor`s or `TensorLike`s"); | |
let o = si(t8, "args", "tf.grads", "string_or_numeric"), | |
n = e != null ? v(e, "dy", "tf.grads") : null; | |
return T.tidy(() => { | |
let { | |
value: s, | |
grads: a | |
} = T.gradients(() => r(...o), o, n); | |
return n != null && yt(s.shape, n.shape, "The shape of dy passed in grads(f)([x1,...], dy) must match the shape returned by f([x1,...])"), Td(a), a; | |
}); | |
}; | |
} | |
function PK(r) { | |
return $(qs(r), () => "The f passed in valueAndGrad(f) must be a function"), (t8, e) => { | |
$(t8 instanceof ut, () => "The x passed in valueAndGrad(f)(x) must be a tensor"), $(e == null || e instanceof ut, () => "The dy passed in valueAndGrad(f)(x, dy) must be a tensor"); | |
let { | |
grads: o, | |
value: n | |
} = T.gradients(() => r(t8), [t8], e); | |
return Td(o), { | |
grad: o[0], | |
value: n | |
}; | |
}; | |
} | |
function OK(r) { | |
return $(qs(r), () => "The f passed in valueAndGrads(f) must be a function"), (t8, e) => { | |
$(Array.isArray(t8) && t8.every(n => n instanceof ut), () => "The args passed in valueAndGrads(f)(args) must be array of tensors"), $(e == null || e instanceof ut, () => "The dy passed in valueAndGrads(f)(args, dy) must be a tensor"); | |
let o = T.gradients(() => r(...t8), t8, e); | |
return e != null && yt(o.value.shape, e.shape, "The shape of dy passed in valueAndGrads(f)([x1,...], dy) must match the shape returned by f([x1,...])"), Td(o.grads), o; | |
}; | |
} | |
function zw(r, t8) { | |
$(qs(r), () => "The f passed in variableGrads(f) must be a function"), $(t8 == null || Array.isArray(t8) && t8.every(u => u instanceof oi), () => "The varList passed in variableGrads(f, varList) must be an array of variables"); | |
let e = t8 != null; | |
if (!e) { | |
t8 = []; | |
for (let u in T.registeredVariables) t8.push(T.registeredVariables[u]); | |
} | |
let o = e ? t8.filter(u => !u.trainable) : null, | |
n = t8.length; | |
t8 = t8.filter(u => u.trainable), $(t8.length > 0, () => `variableGrads() expects at least one of the input variables to be trainable, but none of the ${n} variables is trainable.`); | |
let s = true, | |
{ | |
value: a, | |
grads: i | |
} = T.gradients(r, t8, null, s); | |
$(i.some(u => u != null), () => "Cannot find a connection between any variable and the result of the loss function y=f(x). Please make sure the operations that use variables are inside the function f passed to minimize()."), $(a.rank === 0, () => `The f passed in variableGrads(f) must return a scalar, but it returned a rank-${a.rank} tensor`); | |
let p = {}; | |
return t8.forEach((u, c) => { | |
i[c] != null && (p[u.name] = i[c]); | |
}), o != null && o.forEach(u => p[u.name] = null), { | |
value: a, | |
grads: p | |
}; | |
} | |
function vr(r) { | |
return T.customGrad(r); | |
} | |
function Td(r) { | |
if (r.filter(e => e == null).length > 0) throw new Error(`Cannot compute gradient of y=f(x) with respect to x. Make sure that | |
the f you passed encloses all operations that lead from x to y.`); | |
} | |
function MK(r) { | |
let e = { | |
x: v(r, "x", "neg") | |
}; | |
return T.runKernel(pa, e); | |
} | |
var cr = N({ | |
neg_: MK | |
}); | |
function LK(r) { | |
let e = { | |
x: v(r, "x", "softplus") | |
}; | |
return T.runKernel(ws, e); | |
} | |
var _d = N({ | |
softplus_: LK | |
}); | |
function BK(r) { | |
let t8 = v(r, "x", "logSigmoid"); | |
return vr(o => ({ | |
value: cr(_d(cr(o))), | |
gradFunc: a => se(a, $a(cr(o))) | |
}))(t8); | |
} | |
var N2 = N({ | |
logSigmoid_: BK | |
}); | |
function zK(r, t8) { | |
let e = v(r, "a", "sub"), | |
o = v(t8, "b", "sub"); | |
[e, o] = Oe(e, o); | |
let n = { | |
a: e, | |
b: o | |
}; | |
return T.runKernel(_s, n); | |
} | |
var Te = N({ | |
sub_: zK | |
}); | |
function VK(r, t8 = -1) { | |
let e = v(r, "logits", "logSoftmax"); | |
if (t8 === -1 && (t8 = e.rank - 1), t8 !== e.rank - 1) throw Error(`Log Softmax along a non-last dimension is not yet supported. Logits was rank ${e.rank} and axis was ${t8}`); | |
return vr((n, s) => { | |
let i = Ra(n, t8, true), | |
p = Te(n, i), | |
u = Te(We(p, "float32"), pi(ot($o(p), t8, true))); | |
return s([u]), { | |
value: u, | |
gradFunc: (l, m) => { | |
let [d] = m, | |
f = true, | |
h = $o(d); | |
return Te(l, se(ot(l, t8, f), h)); | |
} | |
}; | |
})(e); | |
} | |
var T2 = N({ | |
logSoftmax_: VK | |
}); | |
function WK(r, t8 = null, e = false) { | |
let o = v(r, "x", "logSumExp"), | |
n = Ei(t8, o.shape), | |
s = Ra(o, n, true), | |
a = Te(o, s), | |
i = $o(a), | |
p = ot(i, n), | |
u = pi(p), | |
c = Ce(W(s, u.shape), u); | |
if (e) { | |
let l = ii(c.shape, n); | |
return W(c, l); | |
} | |
return c; | |
} | |
var $d = N({ | |
logSumExp_: WK | |
}); | |
function UK(r, t8) { | |
let e = v(r, "a", "logicalAnd", "bool"), | |
o = v(t8, "b", "logicalAnd", "bool"); | |
rt(e.shape, o.shape); | |
let n = { | |
a: e, | |
b: o | |
}; | |
return T.runKernel(Mn, n); | |
} | |
var ju = N({ | |
logicalAnd_: UK | |
}); | |
function GK(r) { | |
let e = { | |
x: v(r, "x", "logicalNot", "bool") | |
}; | |
return T.runKernel(Ln, e); | |
} | |
var Ed = N({ | |
logicalNot_: GK | |
}); | |
function HK(r, t8) { | |
let e = v(r, "a", "logicalOr", "bool"), | |
o = v(t8, "b", "logicalOr", "bool"); | |
rt(e.shape, o.shape); | |
let n = { | |
a: e, | |
b: o | |
}; | |
return T.runKernel(Bn, n); | |
} | |
var Rd = N({ | |
logicalOr_: HK | |
}); | |
function KK(r, t8) { | |
let e = v(r, "a", "logicalXor", "bool"), | |
o = v(t8, "b", "logicalXor", "bool"); | |
return rt(e.shape, o.shape), ju(Rd(r, t8), Ed(ju(r, t8))); | |
} | |
var _2 = N({ | |
logicalXor_: KK | |
}); | |
var Dd = 2147483648; | |
function qK(r, t8, e = "left") { | |
let o = v(r, "sortedSequence", "searchSorted"), | |
n = v(t8, "values", "searchSorted"), | |
s = o.shape[o.shape.length - 1], | |
a = n.shape[n.shape.length - 1], | |
i = W(o, [-1, s]), | |
p = W(n, [-1, a]); | |
if (i.rank < 2) throw new Error("Sorted input argument must be at least 2-dimensional"); | |
if (i.shape[0] !== p.shape[0]) throw new Error("Leading dimension of 'sortedSequence' and 'values' must match."); | |
if (He(p.shape) >= Dd) throw new Error(`values tensor size must less than ${Dd}`); | |
if (i.shape[1] >= Dd) throw new Error(`trailing dim_size must less than ${Dd} for int32 output type, was ${i.shape[1]}`); | |
let u = { | |
sortedSequence: i, | |
values: p | |
}, | |
c = { | |
side: e | |
}; | |
return T.runKernel(hs, u, c); | |
} | |
var $l = N({ | |
searchSorted_: qK | |
}); | |
function $2(r, t8) { | |
return $l(r, t8, "left"); | |
} | |
function jK(r, t8, e, o, n) { | |
let s = v(r, "x", "maxPool"), | |
a = 1, | |
i = s, | |
p = false; | |
s.rank === 3 && (p = true, i = W(s, [1, s.shape[0], s.shape[1], s.shape[2]])), $(i.rank === 4, () => `Error in maxPool: input must be rank 4 but got rank ${i.rank}.`), $(xr(e, a), () => `Error in maxPool: Either strides or dilations must be 1. Got strides ${e} and dilations '${a}'`), Bt("maxPool", o, n); | |
let u = { | |
x: i | |
}, | |
c = { | |
filterSize: t8, | |
strides: e, | |
pad: o, | |
dimRoundingMode: n | |
}, | |
l = T.runKernel(Un, u, c); | |
return p ? W(l, [l.shape[1], l.shape[2], l.shape[3]]) : l; | |
} | |
var Ad = N({ | |
maxPool_: jK | |
}); | |
function XK(r, t8 = [1, 1, 1], e, o, n, s = "NDHWC") { | |
let a = v(r, "x", "maxPool3d"), | |
i = a, | |
p = false; | |
a.rank === 4 && (p = true, i = W(a, [1, a.shape[0], a.shape[1], a.shape[2], a.shape[3]])), $(i.rank === 5, () => `Error in maxPool3d: x must be rank 5 but got rank ${i.rank}.`), $(s === "NDHWC", () => `Error in maxPool3d: Only NDHWC is currently supported, but got dataFormat of ${s}`), Bt("maxPool3d", o, n); | |
let u = { | |
x: i | |
}, | |
c = { | |
filterSize: t8, | |
strides: e, | |
pad: o, | |
dimRoundingMode: n, | |
dataFormat: s | |
}, | |
l = T.runKernel(ia, u, c); | |
return p ? W(l, [l.shape[1], l.shape[2], l.shape[3], l.shape[4]]) : l; | |
} | |
var E2 = N({ | |
maxPool3d_: XK | |
}); | |
function YK(r, t8, e, o, n = false) { | |
let a = { | |
x: v(r, "x", "maxPoolWithArgmax") | |
}, | |
i = { | |
filterSize: t8, | |
strides: e, | |
pad: o, | |
includeBatchInIndex: n | |
}, | |
p = T.runKernel(ua, a, i); | |
return { | |
result: p[0], | |
indexes: p[1] | |
}; | |
} | |
var R2 = N({ | |
maxPoolWithArgmax_: YK | |
}); | |
function QK(r, t8) { | |
let e = v(r, "a", "maximum"), | |
o = v(t8, "b", "maximum"); | |
[e, o] = Oe(e, o), e.dtype === "bool" && (e = We(e, "int32"), o = We(o, "int32")), rt(e.shape, o.shape); | |
let n = { | |
a: e, | |
b: o | |
}; | |
return T.runKernel(Wn, n); | |
} | |
var Fd = N({ | |
maximum_: QK | |
}); | |
function ZK(r, t8 = null, e = false) { | |
let n = { | |
x: v(r, "x", "mean") | |
}, | |
s = { | |
axis: t8, | |
keepDims: e | |
}; | |
return T.runKernel(Gn, n, s); | |
} | |
var Xu = N({ | |
mean_: ZK | |
}); | |
function Gr(r, t8 = "float32") { | |
if (wt(r), t8 === "complex64") { | |
let o = Gr(r, "float32"), | |
n = Gr(r, "float32"); | |
return Er(o, n); | |
} | |
let e = Yp(He(r), t8); | |
return T.makeTensor(e, r, t8); | |
} | |
function Da(r, t8 = "float32") { | |
if (wt(r), t8 === "complex64") { | |
let o = Da(r, "float32"), | |
n = Gr(r, "float32"); | |
return Er(o, n); | |
} | |
let e = fl(He(r), t8); | |
return T.makeTensor(e, r, t8); | |
} | |
function D2(r, t8, { | |
indexing: e = "xy" | |
} = {}) { | |
if (e !== "xy" && e !== "ij") throw new TypeError(`${e} is not a valid third argument to meshgrid`); | |
if (r === void 0) return []; | |
let o = v(r, "x", "meshgrid", r instanceof ut ? r.dtype : "float32"); | |
if (t8 === void 0) return [o]; | |
let n = v(t8, "y", "meshgrid", t8 instanceof ut ? t8.dtype : "float32"), | |
s = He(o.shape), | |
a = He(n.shape); | |
return e === "xy" ? (o = W(o, [1, -1]), n = W(n, [-1, 1]), [Ze(Da([a, 1], o.dtype), o), Ze(n, Da([1, s], n.dtype))]) : (o = W(o, [-1, 1]), n = W(n, [1, -1]), [Ze(o, Da([1, a], o.dtype)), Ze(Da([s, 1], n.dtype), n)]); | |
} | |
function JK(r, t8) { | |
let e = v(r, "a", "minimum"), | |
o = v(t8, "b", "minimum"); | |
[e, o] = Oe(e, o), e.dtype === "bool" && (e = We(e, "int32"), o = We(o, "int32")), rt(e.shape, o.shape); | |
let n = { | |
a: e, | |
b: o | |
}; | |
return T.runKernel(Kn, n); | |
} | |
var Yu = N({ | |
minimum_: JK | |
}); | |
function eq(r, t8, e) { | |
$(e === "reflect" || e === "symmetric", () => `Invalid mode. Mode must be either reflect or symmetric. Got ${e}.`); | |
let o = v(r, "x", "mirrorPad"); | |
if (o.rank === 0) throw new Error("mirrorPad(scalar) is not defined. Pass non-scalar to mirrorPad"); | |
$(t8.length === o.rank, () => `Padding doesn't match input. Must be ${o.rank}. Got ${t8.length}.`); | |
let n = e === "reflect" ? 1 : 0; | |
for (let i = 0; i < o.rank; i++) $(t8[i].length === 2, () => "Invalid number of paddings. Must be length of 2 each."), $(t8[i][0] >= 0 && t8[i][0] <= o.shape[i] - n && t8[i][1] >= 0 && t8[i][1] <= o.shape[i] - n, () => `Padding in dimension ${i} cannot be greater than or equal to ${o.shape[i] - n} or less than 0 for input of shape ${o.shape}`); | |
let s = { | |
paddings: t8, | |
mode: e | |
}, | |
a = { | |
x: o | |
}; | |
return T.runKernel(qn, a, s); | |
} | |
var A2 = N({ | |
mirrorPad_: eq | |
}); | |
function tq(r, t8) { | |
let e = v(r, "a", "mod"), | |
o = v(t8, "b", "mod"); | |
[e, o] = Oe(e, o); | |
let n = { | |
a: e, | |
b: o | |
}; | |
return T.runKernel(jn, n); | |
} | |
var F2 = N({ | |
mod_: tq | |
}); | |
function rq(r, t8 = null, e = false) { | |
r = v(r, "x", "moments"); | |
let o = Ei(t8, r.shape), | |
n = Xu(r, o, e), | |
s = n.shape; | |
e || (s = ii(n.shape, o)); | |
let a = er(Te(We(r, "float32"), W(n, s))), | |
i = Xu(a, o, e); | |
return { | |
mean: n, | |
variance: i | |
}; | |
} | |
var P2 = N({ | |
moments_: rq | |
}); | |
function oq(r, t8, e, o) { | |
let n = v(t8, "data", "multiRNNCell"), | |
s = si(e, "c", "multiRNNCell"), | |
a = si(o, "h", "multiRNNCell"), | |
i = n, | |
p = []; | |
for (let l = 0; l < r.length; l++) { | |
let m = r[l](i, s[l], a[l]); | |
p.push(m[0]), p.push(m[1]), i = m[1]; | |
} | |
let u = [], | |
c = []; | |
for (let l = 0; l < p.length; l += 2) u.push(p[l]), c.push(p[l + 1]); | |
return [u, c]; | |
} | |
var O2 = N({ | |
multiRNNCell_: oq | |
}); | |
function nq(r, t8, e, o = false) { | |
let n = v(r, "logits", "multinomial"), | |
s = n.size, | |
a = n.rank; | |
if (s < 2) throw new Error(`Error in multinomial: you need at least 2 outcomes, but got ${s}.`); | |
if (a > 2) throw new Error(`Rank of probabilities must be 1 or 2, but is ${a}`); | |
e = e || Math.random(); | |
let p = { | |
logits: a === 1 ? W(n, [1, -1]) : n | |
}, | |
u = { | |
numSamples: t8, | |
seed: e, | |
normalized: o | |
}, | |
c = T.runKernel(Xn, p, u); | |
return a === 1 ? W(c, [c.size]) : c; | |
} | |
var M2 = N({ | |
multinomial_: nq | |
}); | |
function sq(r, t8) { | |
let e = v(r, "a", "notEqual", "string_or_numeric"), | |
o = v(t8, "b", "notEqual", "string_or_numeric"); | |
[e, o] = Oe(e, o), rt(e.shape, o.shape); | |
let n = { | |
a: e, | |
b: o | |
}; | |
return T.runKernel(Qn, n); | |
} | |
var Pd = N({ | |
notEqual_: sq | |
}); | |
function aq(r, t8, e = 1, o = 0, n = "int32") { | |
if (t8 < 2) throw new Error(`Error in oneHot: depth must be >=2, but it is ${t8}`); | |
let a = { | |
indices: v(r, "indices", "oneHot", "int32") | |
}, | |
i = { | |
dtype: n, | |
depth: t8, | |
onValue: e, | |
offValue: o | |
}; | |
return T.runKernel(es, a, i); | |
} | |
var El = N({ | |
oneHot_: aq | |
}); | |
function iq(r) { | |
let e = { | |
x: v(r, "x", "onesLike") | |
}; | |
return T.runKernel(ca, e); | |
} | |
var L2 = N({ | |
onesLike_: iq | |
}); | |
function uq(r, t8) { | |
let e = v(r, "v1", "outerProduct"), | |
o = v(t8, "v2", "outerProduct"); | |
$(e.rank === 1 && o.rank === 1, () => `Error in outerProduct: inputs must be rank 1, but got ranks ${e.rank} and ${o.rank}.`); | |
let n = W(e, [-1, 1]), | |
s = W(o, [1, -1]); | |
return Ze(n, s); | |
} | |
var B2 = N({ | |
outerProduct_: uq | |
}); | |
function pq(r, t8, e = 0) { | |
let o = v(r, "x", "pad"); | |
if (o.rank === 0) throw new Error("pad(scalar) is not defined. Pass non-scalar to pad"); | |
let n = { | |
paddings: t8, | |
constantValue: e | |
}, | |
s = { | |
x: o | |
}; | |
return T.runKernel(ts, s, n); | |
} | |
var Aa = N({ | |
pad_: pq | |
}); | |
function cq(r, t8, e = 0) { | |
return $(t8.length === 2, () => "Invalid number of paddings. Must be length of 2."), Aa(r, [t8], e); | |
} | |
var z2 = N({ | |
pad1d_: cq | |
}); | |
function lq(r, t8, e = 0) { | |
return $(t8.length === 2 && t8[0].length === 2 && t8[1].length === 2, () => "Invalid number of paddings. Must be length of 2 each."), Aa(r, t8, e); | |
} | |
var V2 = N({ | |
pad2d_: lq | |
}); | |
function mq(r, t8, e = 0) { | |
return $(t8.length === 3 && t8[0].length === 2 && t8[1].length === 2 && t8[2].length === 2, () => "Invalid number of paddings. Must be length of 2 each."), Aa(r, t8, e); | |
} | |
var W2 = N({ | |
pad3d_: mq | |
}); | |
function dq(r, t8, e = 0) { | |
return $(t8.length === 4 && t8[0].length === 2 && t8[1].length === 2 && t8[2].length === 2 && t8[3].length === 2, () => "Invalid number of paddings. Must be length of 2 each."), Aa(r, t8, e); | |
} | |
var U2 = N({ | |
pad4d_: dq | |
}); | |
function fq(r, t8, e) { | |
let o = v(r, "x", "spaceToBatchND"); | |
$(o.rank >= 1 + t8.length, () => `input rank ${o.rank} should be > than [blockShape] ${t8.length}`), $(e.length === t8.length, () => `paddings.shape[0] ${e.length} must be equal to [blockShape] ${t8.length}`), $(o.shape.reduce((a, i, p) => p > 0 && p <= t8.length ? a && (i + e[p - 1][0] + e[p - 1][1]) % t8[p - 1] === 0 : a, true), () => `input spatial dimensions ${o.shape.slice(1)} with paddings ${e.toString()} must be divisible by blockShapes ${t8.toString()}`); | |
let n = { | |
x: o | |
}, | |
s = { | |
blockShape: t8, | |
paddings: e | |
}; | |
return T.runKernel(ga, n, s); | |
} | |
var Od = N({ | |
spaceToBatchND_: fq | |
}); | |
function hq(r, t8, e, o, n, s, a) { | |
n == null && (n = [1, 1]), s == null && (s = 1), o === 0 && (o = "valid"); | |
let i = v(r, "x", "maxPool"), | |
p = i, | |
u = false; | |
i.rank === 3 && (u = true, p = W(i, [1, i.shape[0], i.shape[1], i.shape[2]])), $(xr(s, n), () => `Error in pool: Either strides or dilations must be 1. Got strides ${s} and dilations '${n}'`); | |
let c = Mw(p.shape, t8, s, n, o), | |
l = [c.dilationHeight, c.dilationWidth], | |
m; | |
o === "same" ? m = xq([c.filterHeight, c.filterWidth], l) : m = [[0, 0], [0, 0]]; | |
let d = l[0] === 1 && l[1] === 1, | |
[f, h] = gq([c.inHeight, c.inWidth], l, m), | |
g = d ? o : "valid", | |
x = d ? p : Od(p, l, f), | |
C = (e === "avg" ? () => fd(x, t8, s, g, a) : () => Ad(x, t8, s, g, a))(), | |
S = d ? C : hd(C, l, h); | |
return u ? W(S, [S.shape[1], S.shape[2], S.shape[3]]) : S; | |
} | |
function gq(r, t8, e) { | |
let o = e.map(c => c[0]), | |
n = e.map(c => c[1]), | |
s = r.concat(o, n), | |
a = t8.map((c, l) => (c - s[l] % c) % c), | |
i = n.map((c, l) => c + a[l]), | |
p = t8.map((c, l) => [o[l], i[l]]), | |
u = t8.map((c, l) => [0, a[l]]); | |
return [p, u]; | |
} | |
function xq(r, t8) { | |
let o = r.map((a, i) => a + (a - 1) * (t8[i] - 1)).map(a => a - 1), | |
n = o.map(a => Math.floor(a / 2)), | |
s = o.map((a, i) => a - n[i]); | |
return o.map((a, i) => [n[i], s[i]]); | |
} | |
var G2 = N({ | |
pool_: hq | |
}); | |
function yq(r, t8) { | |
let e = v(r, "x", "prelu"), | |
o = v(t8, "alpha", "prelu"), | |
n = { | |
x: e, | |
alpha: o | |
}; | |
return T.runKernel(os, n); | |
} | |
var Md = N({ | |
prelu_: yq | |
}); | |
function bq(r, t8 = null, e = false) { | |
let o = v(r, "x", "prod"); | |
o.dtype === "bool" && (o = We(o, "int32")); | |
let n = { | |
x: o | |
}, | |
s = { | |
axis: t8, | |
keepDims: e | |
}; | |
return T.runKernel(ns, n, s); | |
} | |
var H2 = N({ | |
prod_: bq | |
}); | |
function Cq(r, t8, e, o) { | |
let n = r.map((c, l) => v(c, `tensors${l}`, "raggedGather", "int32")), | |
s = v(t8, "paramsDenseValues", "raggedGather"), | |
a = v(e, "indices", "raggedGather", "int32"), | |
i = { | |
paramsNestedSplits: n, | |
paramsDenseValues: s, | |
indices: a | |
}, | |
p = { | |
outputRaggedRank: o | |
}, | |
u = T.runKernel(Qp, i, p); | |
return { | |
outputNestedSplits: u.slice(0, u.length - 1), | |
outputDenseValues: u[u.length - 1] | |
}; | |
} | |
var K2 = N({ | |
raggedGather_: Cq | |
}); | |
function wq(r, t8, e) { | |
let o = v(r, "starts", "raggedRange"), | |
n = v(t8, "limits", "raggedRange", o.dtype), | |
s = v(e, "deltas", "raggedRange", o.dtype), | |
a = { | |
starts: o, | |
limits: n, | |
deltas: s | |
}, | |
i = T.runKernel(Zp, a); | |
return { | |
rtNestedSplits: i[0], | |
rtDenseValues: i[1] | |
}; | |
} | |
var q2 = N({ | |
raggedRange_: wq | |
}); | |
function Sq(r, t8, e, o, n) { | |
let s = v(r, "shape", "raggedTensorToTensor", "int32"), | |
a = v(t8, "values", "raggedTensorToTensor"), | |
i = v(e, "defaultValue", "raggedTensorToTensor", a.dtype), | |
p = o.map((l, m) => v(l, `tensors${m}`, "raggedTensorToTensor", "int32")), | |
u = { | |
shape: s, | |
values: a, | |
defaultValue: i, | |
rowPartitionTensors: p | |
}, | |
c = { | |
rowPartitionTypes: n | |
}; | |
return T.runKernel(Jp, u, c); | |
} | |
var j2 = N({ | |
raggedTensorToTensor_: Sq | |
}); | |
function Iq(r, t8, e) { | |
wt(r); | |
let o = He(r), | |
n = null; | |
if (e == null || e === "float32") n = new Float32Array(o);else if (e === "int32") n = new Int32Array(o);else if (e === "bool") n = new Uint8Array(o);else throw new Error(`Unknown data type ${e}`); | |
for (let s = 0; s < o; s++) n[s] = t8(); | |
return T.makeTensor(n, r, e); | |
} | |
var X2 = N({ | |
rand_: Iq | |
}); | |
var Wd = Kp(qw()); | |
var h1 = {}; | |
qe(h1, { | |
TEST_EPSILON_FLOAT16: () => m1, | |
createVideoElement: () => Oq, | |
encodeStrings: () => f1, | |
expectArrayBuffersEqual: () => Pq, | |
expectArraysClose: () => Rq, | |
expectArraysEqual: () => Aq, | |
expectNumbersClose: () => d1, | |
expectPromiseToFail: () => Dq, | |
expectValuesInRange: () => Fq, | |
play: () => Mq, | |
testEpsilon: () => Bd | |
}); | |
var Eq = 1e-3; | |
var m1 = 0.1; | |
function Rq(r, t8, e) { | |
return e == null && (e = Bd()), jw(r, t8, (o, n) => Xw(o, n, e)); | |
} | |
function Bd() { | |
return T.backend.floatPrecision() === 32 ? Eq : m1; | |
} | |
function jw(r, t8, e) { | |
let o = true; | |
if ((Ot(r) || Ot(t8)) && (o = false), Ot(r) && Ot(t8) && (o = true), o) { | |
let a = r.constructor.name, | |
i = t8.constructor.name; | |
if (a !== i) throw new Error(`Arrays are of different type. Actual: ${a}. Expected: ${i}`); | |
} | |
if (Array.isArray(r) && Array.isArray(t8)) { | |
let a = ir(r), | |
i = ir(t8); | |
if (!Cr(a, i)) throw new Error(`Arrays have different shapes. Actual: [${a}]. Expected: [${i}]`); | |
} | |
let n = Ot(r) ? r : Ps(r), | |
s = Ot(t8) ? t8 : Ps(t8); | |
if (n.length !== s.length) throw new Error(`Arrays have different lengths actual: ${n.length} vs expected: ${s.length}. | |
Actual: ${n}. | |
Expected: ${s}.`); | |
for (let a = 0; a < s.length; ++a) { | |
let i = n[a], | |
p = s[a]; | |
if (!e(i, p)) throw new Error(`Arrays differ: actual[${a}] = ${i}, expected[${a}] = ${p}. | |
Actual: ${n}. | |
Expected: ${s}.`); | |
} | |
typeof expect != "undefined" && expect().nothing(); | |
} | |
function Dq(r, t8) { | |
r().then(() => t8.fail(), () => t8()), typeof expect != "undefined" && expect().nothing(); | |
} | |
function Aq(r, t8) { | |
let e = typeof t8 == "string" || typeof t8 == "number" || typeof t8 == "boolean" ? [t8] : t8; | |
return Vo(r) || Vo(r[0]) || Vo(t8) || Vo(t8[0]) ? jw(r, e, (o, n) => o == n) : jw(r, t8, (o, n) => Xw(o, n, 0)); | |
} | |
function d1(r, t8, e) { | |
if (e == null && (e = Bd()), !Xw(r, t8, e)) throw new Error(`Numbers differ: actual === ${r}, expected === ${t8}`); | |
typeof expect != "undefined" && expect().nothing(); | |
} | |
function Xw(r, t8, e) { | |
return !isFinite(r) && !isFinite(t8) ? true : !(isNaN(r) || isNaN(t8) || Math.abs(r - t8) > e); | |
} | |
function Fq(r, t8, e) { | |
for (let o = 0; o < r.length; o++) if (r[o] < t8 || r[o] > e) throw new Error(`Value out of range:${r[o]} low: ${t8}, high: ${e}`); | |
} | |
function Pq(r, t8) { | |
let e = new Float32Array(r), | |
o = new Float32Array(t8); | |
if (e.length !== o.length) throw new Error(`Expected ArrayBuffer to be of length ${o.length}, but it was ${e.length}`); | |
for (let n = 0; n < o.length; n++) if (e[n] !== o[n]) throw new Error(`Expected ArrayBuffer value at ${n} to be ${o[n]} but got ${e[n]} instead`); | |
} | |
function f1(r) { | |
for (let t8 = 0; t8 < r.length; t8++) { | |
let e = r[t8]; | |
Array.isArray(e) ? f1(e) : r[t8] = tu(e); | |
} | |
return r; | |
} | |
function Oq(r) { | |
let t8 = document.createElement("video"); | |
return "playsInline" in t8 && (t8.playsInline = true), t8.muted = true, t8.loop = true, t8.style.position = "fixed", t8.style.left = "0px", t8.style.top = "0px", t8.preload = "auto", t8.appendChild(r), new Promise(e => { | |
t8.addEventListener("loadeddata", o => e(t8)), t8.load(); | |
}); | |
} | |
async function Mq(r) { | |
await r.play(), "requestVideoFrameCallback" in r && (await new Promise(t8 => { | |
r.requestVideoFrameCallback(t8); | |
})); | |
} | |
var Zu = class { | |
constructor(t8, e, o, n, s) { | |
this.mean = t8, this.stdDev = e, this.dtype = o, this.nextVal = NaN, this.truncated = n, this.truncated && (this.upper = this.mean + this.stdDev * 2, this.lower = this.mean - this.stdDev * 2); | |
let a = s || Math.random(); | |
this.random = Wd.alea(a.toString()); | |
} | |
nextValue() { | |
if (!isNaN(this.nextVal)) { | |
let n = this.nextVal; | |
return this.nextVal = NaN, n; | |
} | |
let t8, | |
e, | |
o = false; | |
for (; !o;) { | |
let n, s, a; | |
do n = 2 * this.random() - 1, s = 2 * this.random() - 1, a = n * n + s * s; while (a >= 1 || a === 0); | |
let i = Math.sqrt(-2 * Math.log(a) / a); | |
t8 = this.mean + this.stdDev * n * i, e = this.mean + this.stdDev * s * i, (!this.truncated || this.isValidTruncated(t8)) && (o = true); | |
} | |
return (!this.truncated || this.isValidTruncated(e)) && (this.nextVal = this.convertValue(e)), this.convertValue(t8); | |
} | |
convertValue(t8) { | |
return this.dtype == null || this.dtype === "float32" ? t8 : Math.round(t8); | |
} | |
isValidTruncated(t8) { | |
return t8 <= this.upper && t8 >= this.lower; | |
} | |
}; | |
var zd = class { | |
constructor(t8, e, o, n) { | |
this.alpha = t8, this.beta = 1 / e, this.dtype = o; | |
let s = n || Math.random(); | |
this.randu = Wd.alea(s.toString()), this.randn = new Zu(0, 1, o, false, this.randu()), t8 < 1 ? this.d = t8 + 2 / 3 : this.d = t8 - 1 / 3, this.c = 1 / Math.sqrt(9 * this.d); | |
} | |
nextValue() { | |
let t8, e, o, n, s, a; | |
for (;;) { | |
do n = this.randn.nextValue(), a = 1 + this.c * n; while (a <= 0); | |
if (a *= a * a, t8 = n * n, e = 1 - 0.331 * t8 * t8, o = 0.5 * t8 + this.d * (1 - a + Math.log(a)), s = this.randu(), s < e || Math.log(s) < o) break; | |
} | |
return a = 1 / this.beta * this.d * a, this.alpha < 1 && (a *= Math.pow(this.randu(), 1 / this.alpha)), this.convertValue(a); | |
} | |
convertValue(t8) { | |
return this.dtype === "float32" ? t8 : Math.round(t8); | |
} | |
}; | |
var Vd = class { | |
constructor(t8 = 0, e = 1, o, n) { | |
if (this.canReturnFloat = () => this.dtype == null || this.dtype === "float32", this.min = t8, this.range = e - t8, this.dtype = o, n == null && (n = Math.random()), typeof n == "number" && (n = n.toString()), !this.canReturnFloat() && this.range <= 1) throw new Error(`The difference between ${t8} - ${e} <= 1 and dtype is not float`); | |
this.random = Wd.alea(n); | |
} | |
convertValue(t8) { | |
return this.canReturnFloat() ? t8 : Math.round(t8); | |
} | |
nextValue() { | |
return this.convertValue(this.min + this.range * this.random()); | |
} | |
}; | |
function Lq(r, t8, e = 1, o = "float32", n) { | |
if (wt(r), e == null && (e = 1), o == null && (o = "float32"), o !== "float32" && o !== "int32") throw new Error(`Unsupported data type ${o}`); | |
let s = new zd(t8, e, o, n), | |
a = me(r, o); | |
for (let i = 0; i < a.values.length; i++) a.values[i] = s.nextValue(); | |
return a.toTensor(); | |
} | |
var g1 = N({ | |
randomGamma_: Lq | |
}); | |
function Bq(r, t8 = 0, e = 1, o, n) { | |
if (wt(r), o != null && o === "bool") throw new Error(`Unsupported data type ${o}`); | |
let s = new Zu(t8, e, o, false, n), | |
a = me(r, o); | |
for (let i = 0; i < a.values.length; i++) a.values[i] = s.nextValue(); | |
return a.toTensor(); | |
} | |
var Ud = N({ | |
randomNormal_: Bq | |
}); | |
function zq(r, t8, e) { | |
if (t8 != null && t8 === "bool") throw new Error(`Unsupported data type ${t8}`); | |
return Ud(r, 0, 1, t8, e); | |
} | |
var x1 = N({ | |
randomStandardNormal_: zq | |
}); | |
function Vq(r, t8 = 0, e = 1, o = "float32", n) { | |
wt(r); | |
let s = me(r, o), | |
a = new Vd(t8, e, null, n); | |
for (let i = 0; i < s.values.length; i++) s.values[i] = a.nextValue(); | |
return s.toTensor(); | |
} | |
var dc = N({ | |
randomUniform_: Vq | |
}); | |
function Wq(r, t8, e, o) { | |
return dc(r, t8, e, "int32", o); | |
} | |
var y1 = N({ | |
randomUniformInt_: Wq | |
}); | |
function mu(r, t8, e = 1, o = "float32") { | |
if (e === 0) throw new Error("Cannot have a step of zero"); | |
let n = { | |
start: r, | |
stop: t8, | |
step: e, | |
dtype: o | |
}; | |
return T.runKernel(ma, {}, n); | |
} | |
function Uq(r) { | |
let e = { | |
input: v(r, "input", "real") | |
}; | |
return T.runKernel(qi, e); | |
} | |
var ci = N({ | |
real_: Uq | |
}); | |
function Gq(r) { | |
let e = { | |
x: v(r, "x", "reciprocal") | |
}; | |
return T.runKernel(ss, e); | |
} | |
var b1 = N({ | |
reciprocal_: Gq | |
}); | |
function Hq(r) { | |
let e = { | |
x: v(r, "x", "relu") | |
}; | |
return T.runKernel(as, e); | |
} | |
var du = N({ | |
relu_: Hq | |
}); | |
function Kq(r) { | |
let e = { | |
x: v(r, "x", "relu6") | |
}; | |
return T.runKernel(ps, e); | |
} | |
var Gd = N({ | |
relu6_: Kq | |
}); | |
function qq(r, t8) { | |
let o = { | |
x: v(r, "x", "reverse") | |
}, | |
n = { | |
dims: t8 | |
}; | |
return T.runKernel(cs, o, n); | |
} | |
var mo = N({ | |
reverse_: qq | |
}); | |
function jq(r) { | |
let t8 = v(r, "x", "reverse"); | |
return $(t8.rank === 1, () => `Error in reverse1D: x must be rank 1 but got rank ${t8.rank}.`), mo(t8, 0); | |
} | |
var C1 = N({ | |
reverse1d_: jq | |
}); | |
function Xq(r, t8) { | |
let e = v(r, "x", "reverse"); | |
return $(e.rank === 2, () => `Error in reverse2D: x must be rank 2 but got rank ${e.rank}.`), mo(e, t8); | |
} | |
var w1 = N({ | |
reverse2d_: Xq | |
}); | |
function Yq(r, t8) { | |
let e = v(r, "x", "reverse"); | |
return $(e.rank === 3, () => `Error in reverse3D: x must be rank 3 but got rank ${e.rank}.`), mo(e, t8); | |
} | |
var S1 = N({ | |
reverse3d_: Yq | |
}); | |
function Qq(r, t8) { | |
let e = v(r, "x", "reverse"); | |
return $(e.rank === 4, () => `Error in reverse4D: x must be rank 4 but got rank ${e.rank}.`), mo(e, t8); | |
} | |
var I1 = N({ | |
reverse4d_: Qq | |
}); | |
function Zq(r) { | |
let e = { | |
x: v(r, "x", "round") | |
}; | |
return T.runKernel(ls, e); | |
} | |
var Hd = N({ | |
round_: Zq | |
}); | |
function Jq(r) { | |
let e = { | |
x: v(r, "x", "rsqrt", "float32") | |
}; | |
return T.runKernel(ms, e); | |
} | |
var v1 = N({ | |
rsqrt_: Jq | |
}); | |
function e6(r) { | |
let e = { | |
x: v(r, "x", "selu") | |
}; | |
return T.runKernel(gs, e); | |
} | |
var k1 = N({ | |
selu_: e6 | |
}); | |
function t6(r, t8, e, o, n, s = [1, 1], a = "NHWC") { | |
let i = v(r, "x", "separableConv2d"), | |
p = v(t8, "depthwiseFilter", "separableConv2d"), | |
u = v(e, "pointwiseFilter", "separableConv2d"), | |
c = i, | |
l = false; | |
if (i.rank === 3 && (l = true, c = W(i, [1, i.shape[0], i.shape[1], i.shape[2]])), a === "NCHW") throw new Error("separableConv2d currently does not support dataFormat NCHW; only NHWC is supported"); | |
$(c.rank === 4, () => `Error in separableConv2d: input must be rank 4, but got rank ${c.rank}.`), $(p.rank === 4, () => `Error in separableConv2d: depthwise filter must be rank 4, but got rank ${p.rank}.`), $(u.rank === 4, () => `Error in separableConv2d: pointwise filter must be rank 4, but got rank ${p.rank}.`), $(u.shape[0] === 1, () => `Error in separableConv2d: the first dimension of pointwise filter must be 1, but got ${u.shape[0]}.`), $(u.shape[1] === 1, () => `Error in separableConv2d: the second dimension of pointwise filter must be 1, but got ${u.shape[1]}.`); | |
let m = p.shape[2], | |
d = p.shape[3]; | |
$(u.shape[2] === m * d, () => `Error in separableConv2d: the third dimension of pointwise filter must be ${m * d}, but got ${u.shape[2]}.`); | |
let f = lc(c, p, o, n, a, s), | |
g = uu(f, u, 1, "valid", a); | |
return l ? W(g, [g.shape[1], g.shape[2], g.shape[3]]) : g; | |
} | |
var N1 = N({ | |
separableConv2d_: t6 | |
}); | |
async function r6(r, t8) { | |
let e = v(r, "x", "setdiff1d"), | |
o = v(t8, "y", "setdiff1d"); | |
$(e.dtype === o.dtype, () => `x and y should have the same dtype, but got x (${e.dtype}) and y (${o.dtype}).`), $(e.rank === 1, () => `x should be 1D tensor, but got x (${e.shape}).`), $(o.rank === 1, () => `y should be 1D tensor, but got y (${o.shape}).`); | |
let n = await e.data(), | |
s = await o.data(), | |
a = new Set(s), | |
i = 0; | |
for (let c = 0; c < n.length; c++) a.has(n[c]) || i++; | |
let p = new tt([i], e.dtype), | |
u = new tt([i], "int32"); | |
for (let c = 0, l = 0; c < n.length; c++) a.has(n[c]) || (p.values[l] = n[c], u.values[l] = c, l++); | |
return [p.toTensor(), u.toTensor()]; | |
} | |
var T1 = r6; | |
function o6(r) { | |
let e = { | |
x: v(r, "x", "sign") | |
}; | |
return T.runKernel(bs, e); | |
} | |
var _1 = N({ | |
sign_: o6 | |
}); | |
function n6(r) { | |
let e = { | |
x: v(r, "x", "sin", "float32") | |
}; | |
return T.runKernel(xs, e); | |
} | |
var $1 = N({ | |
sin_: n6 | |
}); | |
function s6(r) { | |
let e = { | |
x: v(r, "x", "sinh") | |
}; | |
return T.runKernel(ys, e); | |
} | |
var E1 = N({ | |
sinh_: s6 | |
}); | |
function a6(r, t8, e) { | |
let o = v(r, "x", "slice1d"); | |
return $(o.rank === 1, () => `slice1d expects a rank-1 tensor, but got a rank-${o.rank} tensor`), Xe(o, [t8], [e]); | |
} | |
var R1 = N({ | |
slice1d_: a6 | |
}); | |
function i6(r, t8, e) { | |
let o = v(r, "x", "slice2d"); | |
return $(o.rank === 2, () => `slice2d expects a rank-2 tensor, but got a rank-${o.rank} tensor`), Xe(o, t8, e); | |
} | |
var D1 = N({ | |
slice2d_: i6 | |
}); | |
function u6(r, t8, e) { | |
let o = v(r, "x", "slice3d"); | |
return $(o.rank === 3, () => `slice3d expects a rank-3 tensor, but got a rank-${o.rank} tensor`), Xe(o, t8, e); | |
} | |
var A1 = N({ | |
slice3d_: u6 | |
}); | |
function p6(r, t8, e) { | |
let o = v(r, "x", "slice4d"); | |
return $(o.rank === 4, () => `slice4d expects a rank-4 tensor, but got a rank-${o.rank} tensor`), Xe(o, t8, e); | |
} | |
var F1 = N({ | |
slice4d_: p6 | |
}); | |
function c6(r, t8 = -1) { | |
let e = v(r, "logits", "softmax", "float32"); | |
if (t8 === -1 && (t8 = e.rank - 1), t8 !== e.rank - 1) throw Error(`Softmax along a non-last dimension is not yet supported. Logits was rank ${e.rank} and dim was ${t8}`); | |
let o = { | |
logits: e | |
}, | |
n = { | |
dim: t8 | |
}; | |
return T.runKernel(vs, o, n); | |
} | |
var P1 = N({ | |
softmax_: c6 | |
}); | |
function l6(r) { | |
$(r.dtype === "complex64", () => `The dtype for tf.spectral.fft() must be complex64 but got ${r.dtype}.`); | |
let t8 = { | |
input: r | |
}; | |
return T.runKernel(Wi, t8); | |
} | |
var fc = N({ | |
fft_: l6 | |
}); | |
function m6(r) { | |
$(r.dtype === "complex64", () => `The dtype for tf.spectral.ifft() must be complex64 but got ${r.dtype}.`); | |
let t8 = { | |
input: r | |
}; | |
return T.runKernel(Ui, t8); | |
} | |
var Ju = N({ | |
ifft_: m6 | |
}); | |
function d6(r) { | |
let t8 = r.shape[r.shape.length - 1], | |
e = r.size / t8, | |
o; | |
if (t8 <= 2) { | |
let n = W(r, [e, t8]); | |
o = Ju(n); | |
} else { | |
let n = [e, 2 * (t8 - 1)], | |
s = W(ci(r), [e, t8]), | |
a = W(lu(r), [e, t8]), | |
i = mo(Xe(s, [0, 1], [e, t8 - 2]), 1), | |
p = se(mo(Xe(a, [0, 1], [e, t8 - 2]), 1), ke(-1)), | |
u = bt([s, i], 1), | |
c = bt([a, p], 1), | |
l = W(Er(u, c), [n[0], n[1]]); | |
o = Ju(l); | |
} | |
if (o = ci(o), r.rank === 3 && r.shape[0] !== 0) { | |
let n = o, | |
s = r.shape[0]; | |
o = W(o, [s, o.shape[0] / s, o.shape[1]]), n.dispose(); | |
} | |
return o; | |
} | |
var Kd = N({ | |
irfft_: d6 | |
}); | |
function f6(r, t8, e = 0) { | |
let n = { | |
x: v(r, "x", "split") | |
}, | |
s = { | |
numOrSizeSplits: t8, | |
axis: e | |
}; | |
return T.runKernel(xa, n, s); | |
} | |
var li = N({ | |
split_: f6 | |
}); | |
function h6(r, t8) { | |
$(r.dtype === "float32", () => `The dtype for rfft() must be real value but got ${r.dtype}`); | |
let e = r.shape[r.shape.length - 1], | |
o = r.size / e, | |
n; | |
if (t8 != null && t8 < e) { | |
let f = r.shape.map(g => 0), | |
h = r.shape.map(g => g); | |
h[r.shape.length - 1] = t8, n = Xe(r, f, h), e = t8; | |
} else if (t8 != null && t8 > e) { | |
let f = r.shape.map(h => h); | |
f[r.shape.length - 1] = t8 - e, n = bt([r, Gr(f)], r.shape.length - 1), e = t8; | |
} else n = r; | |
let s = Ht(n), | |
a = W(Er(n, s), [o, e]), | |
i = fc(a), | |
p = Math.floor(e / 2) + 1, | |
u = ci(i), | |
c = lu(i), | |
l = li(u, [p, e - p], u.shape.length - 1), | |
m = li(c, [p, e - p], c.shape.length - 1), | |
d = n.shape.slice(); | |
return d[n.shape.length - 1] = p, W(Er(l[0], m[0]), d); | |
} | |
var hc = N({ | |
rfft_: h6 | |
}); | |
function g6(r, t8) { | |
let e = v(r, "a", "squaredDifference"), | |
o = v(t8, "b", "squaredDifference"); | |
[e, o] = Oe(e, o), rt(e.shape, o.shape); | |
let n = { | |
a: e, | |
b: o | |
}, | |
s = {}; | |
return T.runKernel(Ns, n, s); | |
} | |
var qd = N({ | |
squaredDifference_: g6 | |
}); | |
function x6(r, t8) { | |
let e = v(r, "x", "squeeze", "string_or_numeric"); | |
return W(e, JC(e.shape, t8).newShape); | |
} | |
var gc = N({ | |
squeeze_: x6 | |
}); | |
function y6(r, t8 = 0) { | |
let e = si(r, "tensors", "stack", "string_or_numeric"); | |
$(e.length >= 1, () => "Pass at least one tensor to tf.stack"), e.length > 0 && $(t8 <= e[0].rank, () => "Axis must be <= rank of the tensor"); | |
let o = e, | |
n = { | |
axis: t8 | |
}; | |
return T.runKernel(la, o, n); | |
} | |
var kr = N({ | |
stack_: y6 | |
}); | |
function b6(r, t8 = 0) { | |
let o = { | |
x: v(r, "x", "step") | |
}, | |
n = { | |
alpha: t8 | |
}; | |
return T.runKernel(So, o, n); | |
} | |
var jd = N({ | |
step_: b6 | |
}); | |
function C6(r, t8, e, o, n = 0, s = 0, a = 0, i = 0, p = 0) { | |
let c = { | |
x: v(r, "x", "stridedSlice", "string_or_numeric") | |
}, | |
l = { | |
begin: t8, | |
end: e, | |
strides: o, | |
beginMask: n, | |
endMask: s, | |
ellipsisMask: a, | |
newAxisMask: i, | |
shrinkAxisMask: p | |
}; | |
return T.runKernel(Ts, c, l); | |
} | |
var O1 = N({ | |
stridedSlice_: C6 | |
}); | |
function w6(r) { | |
let e = { | |
x: v(r, "x", "tan", "float32") | |
}; | |
return T.runKernel($s, e); | |
} | |
var M1 = N({ | |
tan_: w6 | |
}); | |
function tr(r, t8) { | |
io(r); | |
let e = ir(r, t8); | |
if (e.length !== 1) throw new Error("tensor1d() requires values to be a flat/TypedArray"); | |
return Sr(r, null, e, t8); | |
} | |
function fu(r, t8, e) { | |
if (io(r), t8 != null && t8.length !== 2) throw new Error("tensor2d() requires shape to have two numbers"); | |
let o = ir(r, e); | |
if (o.length !== 2 && o.length !== 1) throw new Error("tensor2d() requires values to be number[][] or flat/TypedArray"); | |
if (o.length === 1 && t8 == null) throw new Error("tensor2d() requires shape to be provided when `values` are a flat/TypedArray"); | |
return Sr(r, t8, o, e); | |
} | |
function Xd(r, t8, e) { | |
if (io(r), t8 != null && t8.length !== 3) throw new Error("tensor3d() requires shape to have three numbers"); | |
let o = ir(r, e); | |
if (o.length !== 3 && o.length !== 1) throw new Error("tensor3d() requires values to be number[][][] or flat/TypedArray"); | |
if (o.length === 1 && t8 == null) throw new Error("tensor3d() requires shape to be provided when `values` are a flat array"); | |
return Sr(r, t8, o, e); | |
} | |
function L1(r, t8, e) { | |
if (io(r), t8 != null && t8.length !== 4) throw new Error("tensor4d() requires shape to have four numbers"); | |
let o = ir(r, e); | |
if (o.length !== 4 && o.length !== 1) throw new Error("tensor4d() requires values to be number[][][][] or flat/TypedArray"); | |
if (o.length === 1 && t8 == null) throw new Error("tensor4d() requires shape to be provided when `values` are a flat array"); | |
return Sr(r, t8, o, e); | |
} | |
function B1(r, t8, e) { | |
if (io(r), t8 != null && t8.length !== 5) throw new Error("tensor5d() requires shape to have five numbers"); | |
let o = ir(r, e); | |
if (o.length !== 5 && o.length !== 1) throw new Error("tensor5d() requires values to be number[][][][][] or flat/TypedArray"); | |
if (o.length === 1 && t8 == null) throw new Error("tensor5d() requires shape to be provided when `values` are a flat array"); | |
return Sr(r, t8, o, e); | |
} | |
function z1(r, t8, e) { | |
if (io(r), t8 != null && t8.length !== 6) throw new Error("tensor6d() requires shape to have six numbers"); | |
let o = ir(r, e); | |
if (o.length !== 6 && o.length !== 1) throw new Error("tensor6d() requires values to be number[][][][][][] or flat/TypedArray"); | |
if (o.length === 1 && t8 == null) throw new Error("tensor6d() requires shape to be provided when `values` are a flat array"); | |
return t8 = t8 || o, Sr(r, t8, o, e); | |
} | |
var hu = {}; | |
qe(hu, { | |
calculateShapes: () => V1, | |
validateInput: () => xc, | |
validateUpdateShape: () => Yw | |
}); | |
function Yw(r, t8, e) { | |
let o = t8.rank > 1 ? t8.shape[t8.rank - 1] : 1, | |
n = t8.rank > 1 ? t8.rank - 1 : 1, | |
s = `Must have updates.shape = indices.shape[:batchDim] + shape[sliceDim:], got updates.shape: ${e.shape}, indices.shape: ${t8.shape}, shape: ${r}, sliceDim: ${o}, and batchDim: ${n}.`; | |
if (e.rank < n) throw new Error(s + ` update.rank < ${n}. `); | |
if (r.length < o + (e.rank - n)) throw new Error(s + ` Output shape length < ${o + (e.rank - n)}`); | |
if (e.rank !== n + r.length - o) throw new Error(s + ` update.rank != ${n + r.length - o}`); | |
for (let a = 0; a < n; ++a) if (e.shape[a] !== t8.shape[a]) throw new Error(s + ` updates.shape[${a}] (${e.shape[a]}) != indices.shape[${a}] (${t8.shape[a]}).`); | |
for (let a = 0; a < e.rank - n; ++a) if (e.shape[a + n] !== r[a + o]) throw new Error(s + ` updates.shape[${a + n}] (${e.shape[a + n]}) != shape[${a + n}] (${r[a + n]})`); | |
} | |
function xc(r, t8, e) { | |
if (t8.rank < 1) throw new Error(`tf.scatterND() expects the indices to be rank 1 or higher, but the rank was ${t8.rank}.`); | |
if (r.rank < 1) throw new Error(`tf.scatterND() expects the updates to be rank 1 or higher, but the rank was ${r.rank}.`); | |
if (t8.dtype !== "int32") throw new Error(`The dtype of 'indices' should be int32, but got dtype: ${t8.dtype}`); | |
if (e.length < 1) throw new Error(`Output rank must be greater or equal to 1, but got shape: ${e}`); | |
if (e.length === 0) { | |
if (t8.size === 0) throw new Error(`Indices specified for empty output. indices shape: ${t8.shape}`); | |
if (r.size === 0) throw new Error(`Updates specified for empty output. updates shape: ${r.shape}`); | |
} | |
Yw(e, t8, r); | |
} | |
function V1(r, t8, e) { | |
let o = t8.shape.length, | |
n = o > 1 ? t8.shape[o - 1] : 1, | |
s = e.length, | |
a = 1; | |
for (let l = n; l < s; ++l) a *= e[l]; | |
let i = n < 1 ? 1 : n, | |
p = He(t8.shape) / i, | |
u = [...js(e.slice(0, n)), 1], | |
c = He(e); | |
return { | |
sliceRank: n, | |
numUpdates: p, | |
sliceSize: a, | |
strides: u, | |
outputSize: c | |
}; | |
} | |
function S6(r, t8, e) { | |
let o = v(r, "tensor", "tensorScatterupdate"), | |
n = v(t8, "indices", "tensorScatterupdate", "int32"), | |
s = v(e, "updates", "tensorScatterupdate"); | |
if (xc(s, n, o.shape), o.dtype !== s.dtype) throw new Error(`tensor and updates must have the same dtype, instead they are ${o.dtype} and ${s.dtype}.`); | |
let a = { | |
tensor: o, | |
indices: n, | |
updates: s | |
}, | |
i = {}; | |
return T.runKernel(fs, a, i); | |
} | |
var W1 = N({ | |
tensorScatterUpdate_: S6 | |
}); | |
function I6(r, t8 = 1, e = true) { | |
let o = v(r, "x", "topk"); | |
if (o.rank === 0) throw new Error("topk() expects the input to be of rank 1 or higher"); | |
let n = o.shape[o.shape.length - 1]; | |
if (t8 < 0) throw new Error(`'k' passed to topk() must be >= 0 but got ${t8}`); | |
if (t8 > n) throw new Error(`'k' passed to topk() must be <= the last dimension (${n}) but got ${t8}`); | |
let s = { | |
x: o | |
}, | |
a = { | |
k: t8, | |
sorted: e | |
}, | |
[i, p] = T.runKernel(Rs, s, a); | |
return { | |
values: i, | |
indices: p | |
}; | |
} | |
var U1 = N({ | |
topk_: I6 | |
}); | |
function v6(r, t8 = 0, e = 1, o, n) { | |
if (wt(r), o != null && o === "bool") throw new Error("Unsupported data type $ { dtype }"); | |
let s = new Zu(t8, e, o, true, n), | |
a = me(r, o); | |
for (let i = 0; i < a.values.length; i++) a.values[i] = s.nextValue(); | |
return a.toTensor(); | |
} | |
var G1 = N({ | |
truncatedNormal_: v6 | |
}); | |
function k6(r, t8 = 0) { | |
let e = v(r, "x", "unique", "string_or_numeric"); | |
$(e.rank > 0, () => "The input tensor must be at least 1D"); | |
let o = { | |
x: e | |
}, | |
n = { | |
axis: t8 | |
}, | |
[s, a] = T.runKernel(Zi, o, n); | |
return { | |
values: s, | |
indices: a | |
}; | |
} | |
var H1 = N({ | |
unique_: k6 | |
}); | |
function N6(r, t8, e) { | |
let o = v(r, "x", "unsortedSegmentSum"), | |
n = v(t8, "segmentIds", "unsortedSegmentSum", "int32"); | |
$(qa(e), () => "numSegments must be of dtype int"); | |
let s = { | |
x: o, | |
segmentIds: n | |
}, | |
a = { | |
numSegments: e | |
}; | |
return T.runKernel(Ji, s, a); | |
} | |
var K1 = N({ | |
unsortedSegmentSum_: N6 | |
}); | |
function T6(r, t8 = 0) { | |
let e = v(r, "x", "unstack", "string_or_numeric"); | |
$(t8 >= -e.shape.length && t8 < e.shape.length, () => `Axis = ${t8} is not in [-${e.shape.length}, ${e.shape.length})`); | |
let o = { | |
value: e | |
}, | |
n = { | |
axis: t8 | |
}; | |
return T.runKernel(wa, o, n); | |
} | |
var fo = N({ | |
unstack_: T6 | |
}); | |
function q1(r, t8) { | |
return $l(r, t8, "right"); | |
} | |
function j1(r, t8 = true, e, o) { | |
return T.makeVariable(r, t8, e, o); | |
} | |
function Yd(r, t8) { | |
let e = []; | |
for (let s = 0; s < t8.length; s++) t8[s] && e.push(s); | |
let o = me(r, "int32"), | |
n = me([e.length, r.length], "int32"); | |
for (let s = 0; s < e.length; s++) { | |
let a = o.indexToLoc(e[s]), | |
i = s * r.length; | |
n.values.set(a, i); | |
} | |
return n.toTensor(); | |
} | |
async function _6(r) { | |
let t8 = v(r, "condition", "whereAsync", "bool"), | |
e = await t8.data(), | |
o = Yd(t8.shape, e); | |
return r !== t8 && t8.dispose(), o; | |
} | |
var Qd = _6; | |
async function $6(r, t8, e) { | |
let o = v(r, "tensor", "boolMask"), | |
n = v(t8, "mask", "boolMask", "bool"), | |
s = e == null ? 0 : e, | |
a = n.rank, | |
i = o.shape; | |
$(a > 0, () => "mask cannot be scalar"), yt(i.slice(s, s + a), n.shape, "mask's shape must match the first K dimensions of tensor's shape,"); | |
let p = 1; | |
for (let h = s; h < s + a; h++) p *= i[h]; | |
let u = i.slice(0, s).concat([p], i.slice(s + a)), | |
c = W(o, u), | |
l = W(n, [-1]), | |
m = await Qd(l), | |
d = gc(m, [1]), | |
f = Id(c, d, s); | |
return r !== o && o.dispose(), t8 !== n && n.dispose(), d.dispose(), c.dispose(), l.dispose(), m.dispose(), f; | |
} | |
var E6 = $6; | |
function R6(r, t8, e) { | |
let o = v(r, "x", "transpose"); | |
if (t8 == null && (t8 = o.shape.map((a, i) => i).reverse()), $(o.rank === t8.length, () => `Error in transpose: rank of input ${o.rank} must match length of perm ${t8}.`), t8.forEach(a => { | |
$(a >= 0 && a < o.rank, () => `All entries in 'perm' must be between 0 and ${o.rank - 1} but got ${t8}`); | |
}), o.rank <= 1) return o.clone(); | |
let n = { | |
x: o | |
}, | |
s = { | |
perm: t8 | |
}; | |
return o.dtype === "complex64" ? De(() => { | |
let a = ci(o), | |
i = lu(o); | |
return a = T.runKernel(co, { | |
x: a | |
}, s), i = T.runKernel(co, { | |
x: i | |
}, s), e && (i = cr(i)), Er(a, i); | |
}) : T.runKernel(co, n, s); | |
} | |
var yc = N({ | |
transpose_: R6 | |
}); | |
function D6(r, t8, e, o, n = true) { | |
let s = v(r, "v", "movingAverage"), | |
a = v(t8, "x", "movingAverage"), | |
i = v(e, "decay", "movingAverage"); | |
Cw(s, a), $(Cr(s.shape, a.shape), () => "Shape mismatch in v and x"); | |
let p = ke(1), | |
u = Te(p, i), | |
c = se(Te(a, s), u); | |
if (n) { | |
$(o != null, () => "When using zeroDebias: true, step is required."); | |
let l = v(o, "step", "movingAverage"); | |
c = je(c, Te(p, ui(i, l))); | |
} | |
return Ce(s, c); | |
} | |
var A6 = N({ | |
movingAverage_: D6 | |
}); | |
function F6(r, t8, e) { | |
wt(e); | |
let o = v(r, "indices", "scatterND", "int32"), | |
n = v(t8, "updates", "scatterND"); | |
xc(n, o, e); | |
let s = { | |
indices: o, | |
updates: n | |
}, | |
a = { | |
shape: e | |
}; | |
return T.runKernel(ds, s, a); | |
} | |
var P6 = N({ | |
scatterND_: F6 | |
}); | |
function X1(r, t8, e, o) { | |
if (r.dtype !== "int32") throw new Error(`tf.sparseToDense() expects the indices to be int32 type, but the dtype was ${r.dtype}.`); | |
if (r.rank > 2) throw new Error(`sparseIndices should be a scalar, vector, or matrix, but got shape ${r.shape}.`); | |
let n = r.rank > 0 ? r.shape[0] : 1, | |
s = r.rank > 1 ? r.shape[1] : 1; | |
if (e.length !== s) throw new Error(`outputShape has incorrect number of elements:, ${e.length}, should be: ${s}.`); | |
let a = t8.size; | |
if (!(t8.rank === 0 || t8.rank === 1 && a === n)) throw new Error(`sparseValues has incorrect shape ${t8.shape}, should be [] or [${n}]`); | |
if (t8.dtype !== o.dtype) throw new Error("sparseValues.dtype must match defaultValues.dtype"); | |
} | |
function M6(r, t8, e, o = 0) { | |
wt(e); | |
let n = v(r, "sparseIndices", "sparseToDense", "int32"), | |
s = v(t8, "sparseValues", "sparseToDense", "string_or_numeric"), | |
a = v(o, "defaultValue", "sparseToDense", s.dtype); | |
X1(n, s, e, a); | |
let i = { | |
sparseIndices: n, | |
sparseValues: s, | |
defaultValue: a | |
}, | |
p = { | |
outputShape: e | |
}; | |
return T.runKernel(ks, i, p); | |
} | |
var L6 = N({ | |
sparseToDense_: M6 | |
}); | |
function B6(r, t8) { | |
let e = v(t8, "indices", "gatherND", "int32"), | |
n = { | |
params: v(r, "x", "gatherND", "string_or_numeric"), | |
indices: e | |
}; | |
return T.runKernel(kn, n); | |
} | |
var z6 = N({ | |
gatherND_: B6 | |
}); | |
function Y1(r, t8) { | |
if (t8 == null) return r.shape.slice(); | |
if (Cr(r.shape, t8)) return t8; | |
if (r.shape.length === t8.length) { | |
let e = []; | |
for (let o = 0; o < r.shape.length; o++) t8[o] == null && r.shape[o] != null ? e.push(r.shape[o]) : e.push(t8[o]); | |
return e; | |
} | |
return t8; | |
} | |
function V6(r, t8, e, o) { | |
let n = v(r, "x", "dropout"); | |
if ($(n.dtype === "float32", () => `x has to be a floating point tensor since it's going to be scaled, but got a ${n.dtype} tensor instead.`), $(t8 >= 0 && t8 < 1, () => `rate must be a float in the range [0, 1), but got ${t8}.`), t8 === 0) return r instanceof ut ? n.clone() : n; | |
let s = Y1(n, e), | |
a = 1 - t8, | |
i = je(Sd(Ce(dc(s, 0, 1, "float32", o), a)), a); | |
return se(n, i); | |
} | |
var W6 = N({ | |
dropout_: V6 | |
}); | |
function Qw(r) { | |
return Math.floor(Math.pow(2, Math.ceil(Math.log(r) / Math.log(2)))); | |
} | |
function Rl(r, t8, e) { | |
let o = 1 - r % 2, | |
n = new Float32Array(r); | |
for (let s = 0; s < r; ++s) { | |
let a = 2 * Math.PI * s / (r + o - 1); | |
n[s] = t8 - e * Math.cos(a); | |
} | |
return tr(n, "float32"); | |
} | |
async function U6(r, t8, e = 1) { | |
let o = v(r, "predictions", "inTopK"), | |
n = v(t8, "targets", "inTopK"); | |
$(o.rank > 1, () => `inTopK() expects the predictions to be of rank 2 or higher, but got ${o.rank}`), $(o.rank - 1 === n.rank, () => `predictions rank should be 1 larger than targets rank, but got predictions rank ${o.rank} and targets rank ${n.rank}`), yt(o.shape.slice(0, o.shape.length - 1), n.shape, "predictions's shape should be align with the targets' shape, except the last dimension."); | |
let s = o.shape[o.shape.length - 1]; | |
$(e > 0 && e <= s, () => `'k' passed to inTopK() must be > 0 && <= the predictions last dimension (${s}), but got ${e}`); | |
let a = await o.data(), | |
i = await n.data(), | |
[p, u] = [a.length / s, s], | |
c = ew("bool", p); | |
for (let l = 0; l < p; l++) { | |
let m = l * u, | |
d = a.subarray(m, m + u), | |
f = []; | |
for (let h = 0; h < d.length; h++) f.push({ | |
value: d[h], | |
index: h | |
}); | |
f.sort((h, g) => g.value - h.value), c[l] = 0; | |
for (let h = 0; h < e; h++) if (f[h].index === i[l]) { | |
c[l] = 1; | |
break; | |
} | |
} | |
return r !== o && o.dispose(), t8 !== n && n.dispose(), ur(c, n.shape, "bool"); | |
} | |
var G6 = U6; | |
var Zw = {}; | |
qe(Zw, { | |
conv2d: () => Z1, | |
depthwiseConv2d: () => tN, | |
matMul: () => rN | |
}); | |
function H6(r, t8, e, o, n, s = "NHWC", a) { | |
let i = r; | |
r.rank === 3 && (i = W(r, [1, r.shape[0], r.shape[1], r.shape[2]])); | |
let p = t8; | |
p.rank === 3 && (p = W(t8, [1, t8.shape[0], t8.shape[1], t8.shape[2]])), $(i.rank === 4, () => `Error in conv2dDerFilter: input must be rank 4, but got shape ${i.shape}.`), $(p.rank === 4, () => `Error in conv2dDerFilter: dy must be rank 4, but got shape ${p.shape}.`), $(e.length === 4, () => `Error in conv2dDerFilter: filterShape must be length 4, but got ${e}.`); | |
let u = s === "NHWC" ? i.shape[3] : i.shape[1], | |
c = s === "NHWC" ? p.shape[3] : p.shape[1]; | |
$(u === e[2], () => `Error in conv2dDerFilter: depth of input ${u}) must match input depth in filter (${e[2]}.`), $(c === e[3], () => `Error in conv2dDerFilter: depth of dy (${c}) must match output depth for filter (${e[3]}).`), Bt("conv2dDerFilter", n, a); | |
let l = { | |
x: i, | |
dy: p | |
}, | |
m = { | |
strides: o, | |
pad: n, | |
dataFormat: s, | |
dimRoundingMode: a, | |
filterShape: e | |
}; | |
return T.runKernel(Oi, l, m); | |
} | |
var Q1 = N({ | |
conv2DBackpropFilter_: H6 | |
}); | |
function ep(r, t8, e) { | |
if (e == null || e === "linear") return r; | |
if (e === "relu") return se(r, jd(t8)); | |
throw new Error(`Cannot compute gradient for fused activation ${e}.`); | |
} | |
function tp(r, t8) { | |
let e = t8, | |
o = yd(r.shape, t8.shape); | |
return o.length > 0 && (e = ot(e, o)), W(e, r.shape); | |
} | |
function rp(r, t8, e, o) { | |
if (t8 === "linear") return r; | |
if (t8 === "relu") return du(r); | |
if (t8 === "elu") return Cd(r); | |
if (t8 === "relu6") return Gd(r); | |
if (t8 === "prelu") return Md(r, e); | |
if (t8 === "leakyrelu") return kd(r, o); | |
if (t8 === "sigmoid") return $a(r); | |
throw new Error(`Unknown fused activation ${t8}.`); | |
} | |
var op = (r, t8) => !(r > 0) || t8 === "linear"; | |
function K6({ | |
x: r, | |
filter: t8, | |
strides: e, | |
pad: o, | |
dataFormat: n = "NHWC", | |
dilations: s = [1, 1], | |
dimRoundingMode: a, | |
bias: i, | |
activation: p = "linear", | |
preluActivationWeights: u, | |
leakyreluAlpha: c | |
}) { | |
if (p = p || "linear", op(T.state.gradientDepth, p) === false) { | |
$(n === "NHWC", () => `Error in fused conv2d: got dataFormat of ${n} but only NHWC is currently supported for the case of gradient depth is 0 and the activation is not linear.`); | |
let _ = uu(r, t8, e, o, n, s, a); | |
return i != null && (_ = Ce(_, i)), rp(_, p, u, c); | |
} | |
let l = v(r, "x", "conv2d", "float32"), | |
m = v(t8, "filter", "conv2d", "float32"), | |
d = l, | |
f = false; | |
l.rank === 3 && (f = true, d = W(l, [1, l.shape[0], l.shape[1], l.shape[2]])), $(d.rank === 4, () => `Error in fused conv2d: input must be rank 4, but got rank ${d.rank}.`), $(m.rank === 4, () => `Error in fused conv2d: filter must be rank 4, but got rank ${m.rank}.`), Bt("fused conv2d", o, a); | |
let h = n === "NHWC" ? d.shape[3] : d.shape[1]; | |
$(m.shape[2] === h, () => `Error in conv2d: depth of input (${h}) must match input depth for filter ${m.shape[2]}.`), $(xr(e, s), () => `Error in conv2D: Either strides or dilations must be 1. Got strides ${e} and dilations '${s}'`); | |
let g = Hu(d.shape, m.shape, e, s, o, a), | |
x; | |
i != null && (x = v(i, "bias", "fused conv2d"), [x] = Oe(x, l), n === "NHWC" ? rt(g.outShape, x.shape) : ($(x.shape.length <= 1, () => `Error in fused conv2d: only supports scalar or 1-D Tensor bias for NCHW format but got the bias of rank-${x.shape.length}.`), $(x.shape.length === 0 || x.shape[0] === g.outChannels || x.shape[0] === 1, () => `Error in fused conv2d: bias shape (${x.shape}) is not compatible with the number of output channels (${g.outChannels})`))); | |
let b; | |
if (u != null) { | |
let _ = u.shape; | |
if ($(_.length <= 1 || _.length === 3, () => `Error in fused conv2d: only supports scalar, 1-D Tensor or 3-D Tensor PReLU activation weights but got a tensor of rank-${_.length}.`), _.length === 1) $(_[0] === 1 || _[0] === g.outChannels, () => `Error in fused conv2d: PReLU activation weights (${_}) is not compatible with the number of output channels (${g.outChannels}).`);else if (_.length === 3) try { | |
rt(_, g.outShape); | |
} catch (E) { | |
let R = `Error in fused conv2d: PReLU activation weights (${_}) is not compatible with the output shape of the conv2d (${g.outShape}).`; | |
throw Error(R); | |
} | |
b = v(u, "prelu weights", "fused conv2d"); | |
} | |
let C = (_, E) => { | |
$(n === "NHWC", () => `Error in gradient of fused conv2D: got dataFormat of ${n} but only NHWC is currently supported.`); | |
let [R, D, P, O] = E, | |
M = ep(_, P, p); | |
$(Gu(s), () => `Error in gradient of fused conv2D: dilation rates greater than 1 are not yet supported in gradients. Got dilations '${s}'`); | |
let L = xd(D.shape, M, R, e, o), | |
B = Q1(D, M, R.shape, e, o), | |
z = [L, B]; | |
if (O != null) { | |
let U = tp(O, M); | |
z.push(U); | |
} | |
return z; | |
}, | |
S = { | |
x: d, | |
filter: m, | |
bias: x, | |
preluActivationWeights: b | |
}, | |
k = { | |
strides: e, | |
pad: o, | |
dataFormat: n, | |
dilations: s, | |
dimRoundingMode: a, | |
activation: p, | |
leakyreluAlpha: c | |
}; | |
return i == null ? vr((E, R, D) => { | |
let P = T.runKernel(vo, S, k); | |
return D([R, E, P]), f && (P = W(P, [P.shape[1], P.shape[2], P.shape[3]])), { | |
value: P, | |
gradFunc: C | |
}; | |
})(d, m) : vr((E, R, D, P) => { | |
let O = T.runKernel(vo, S, k); | |
return P([R, E, O, D]), f && (O = W(O, [O.shape[1], O.shape[2], O.shape[3]])), { | |
value: O, | |
gradFunc: C | |
}; | |
})(d, m, x); | |
} | |
var Z1 = N({ | |
fusedConv2d_: K6 | |
}); | |
function q6(r, t8, e, o, n, s = [1, 1], a) { | |
let i = r; | |
r.rank === 3 && (i = W(r, [1, r.shape[0], r.shape[1], r.shape[2]])); | |
let p = t8; | |
p.rank === 3 && (p = W(t8, [1, t8.shape[0], t8.shape[1], t8.shape[2]])); | |
let u = { | |
x: i, | |
dy: p | |
}, | |
c = { | |
strides: o, | |
pad: n, | |
dimRoundingMode: a, | |
dilations: s, | |
filterShape: e | |
}; | |
return T.runKernel(Mi, u, c); | |
} | |
var J1 = N({ | |
depthwiseConv2dNativeBackpropFilter_: q6 | |
}); | |
function j6(r, t8, e, o, n, s = [1, 1], a) { | |
let i = t8, | |
p = false; | |
t8.rank === 3 && (p = true, i = W(t8, [1, t8.shape[0], t8.shape[1], t8.shape[2]])); | |
let u = { | |
dy: i, | |
filter: e | |
}, | |
c = { | |
strides: o, | |
pad: n, | |
dimRoundingMode: a, | |
dilations: s, | |
inputShape: r | |
}, | |
l = T.runKernel(Li, u, c); | |
return p ? W(l, [l.shape[1], l.shape[2], l.shape[3]]) : l; | |
} | |
var eN = N({ | |
depthwiseConv2dNativeBackpropInput_: j6 | |
}); | |
function X6({ | |
x: r, | |
filter: t8, | |
strides: e, | |
pad: o, | |
dataFormat: n = "NHWC", | |
dilations: s = [1, 1], | |
dimRoundingMode: a, | |
bias: i, | |
activation: p = "linear", | |
preluActivationWeights: u, | |
leakyreluAlpha: c | |
}) { | |
if (op(T.state.gradientDepth, p) === false) { | |
let k = lc(r, t8, e, o, n, s, a); | |
return i != null && (k = Ce(k, i)), rp(k, p, u, c); | |
} | |
let l = v(r, "x", "depthwiseConv2d", "float32"), | |
m = v(t8, "filter", "depthwiseConv2d", "float32"), | |
d = l, | |
f = false; | |
l.rank === 3 && (f = true, d = W(l, [1, l.shape[0], l.shape[1], l.shape[2]])), $(d.rank === 4, () => `Error in fused depthwiseConv2d: input must be rank 4, but got rank ${d.rank}.`), $(m.rank === 4, () => `Error in fused depthwiseConv2d: filter must be rank 4, but got rank ${m.rank}.`), $(d.shape[3] === m.shape[2], () => `Error in fused depthwiseConv2d: number of input channels (${d.shape[3]}) must match the inChannels dimension in filter ${m.shape[2]}.`), s == null && (s = [1, 1]), $(xr(e, s), () => `Error in fused depthwiseConv2d: Either strides or dilations must be 1. Got strides ${e} and dilations '${s}'`), Bt("fused depthwiseConv2d", o, a); | |
let h = Hu(d.shape, m.shape, e, s, o, a, true), | |
g; | |
i != null && (g = v(i, "bias", "fused conv2d"), [g] = Oe(g, l), rt(h.outShape, g.shape)); | |
let x; | |
u != null && (x = v(u, "prelu weights", "fused depthwiseConv2d")); | |
let b = (k, _) => { | |
$(Gu(s), () => `Error in gradient of fused depthwiseConv2d: dilation rates greater than 1 are not yet supported. Got dilations '${s}'`); | |
let [E, R, D, P] = _, | |
O = ep(k, D, p), | |
M = eN(R.shape, O, E, e, o, s, a), | |
L = J1(R, O, E.shape, e, o, s, a); | |
if (P != null) { | |
let B = tp(g, O); | |
return [M, L, B]; | |
} | |
return [M, L]; | |
}, | |
C = { | |
x: d, | |
filter: m, | |
bias: g, | |
preluActivationWeights: x | |
}, | |
S = { | |
strides: e, | |
pad: o, | |
dataFormat: n, | |
dilations: s, | |
dimRoundingMode: a, | |
activation: p, | |
leakyreluAlpha: c | |
}; | |
return i == null ? vr((_, E, R) => { | |
let D = T.runKernel(ko, C, S); | |
return R([E, _, D]), f && (D = W(D, [D.shape[1], D.shape[2], D.shape[3]])), { | |
value: D, | |
gradFunc: b | |
}; | |
})(d, m) : vr((_, E, R, D) => { | |
let P = T.runKernel(ko, C, S); | |
return D([E, _, P, R]), f && (P = W(P, [P.shape[1], P.shape[2], P.shape[3]])), { | |
value: P, | |
gradFunc: b | |
}; | |
})(d, m, g); | |
} | |
var tN = N({ | |
fusedDepthwiseConv2d_: X6 | |
}); | |
function Y6({ | |
a: r, | |
b: t8, | |
transposeA: e = false, | |
transposeB: o = false, | |
bias: n, | |
activation: s = "linear", | |
preluActivationWeights: a, | |
leakyreluAlpha: i = 0.2 | |
}) { | |
if (op(T.state.gradientDepth, s) === false) { | |
let O = Ze(r, t8, e, o); | |
return n != null && (O = Ce(O, n)), rp(O, s, a, i); | |
} | |
let p = v(r, "a", "fused matMul"), | |
u = v(t8, "b", "fused matMul"); | |
[p, u] = Oe(p, u); | |
let c = e ? p.shape[p.rank - 2] : p.shape[p.rank - 1], | |
l = o ? u.shape[u.rank - 1] : u.shape[u.rank - 2], | |
m = e ? p.shape[p.rank - 1] : p.shape[p.rank - 2], | |
d = o ? u.shape[u.rank - 2] : u.shape[u.rank - 1], | |
f = p.shape.slice(0, -2), | |
h = u.shape.slice(0, -2), | |
g = He(f), | |
x = He(h); | |
$(c === l, () => `Error in fused matMul: inner shapes (${c}) and (${l}) of Tensors with shapes ${p.shape} and ${u.shape} and transposeA=${e} and transposeB=${o} must match.`); | |
let C = rt(p.shape.slice(0, -2), u.shape.slice(0, -2)).concat([m, d]), | |
S = e ? W(p, [g, c, m]) : W(p, [g, m, c]), | |
k = o ? W(u, [x, d, l]) : W(u, [x, l, d]), | |
_; | |
n != null && (_ = v(n, "bias", "fused matMul"), [_] = Oe(_, p), rt(C, _.shape)); | |
let E; | |
a != null && (E = v(a, "prelu weights", "fused matMul")); | |
let R = (O, M) => { | |
let [L, B, z, U] = M, | |
j = ep(W(O, z.shape), z, s), | |
q, | |
Y; | |
if (!e && !o ? (q = Ze(j, B, false, true), Y = Ze(L, j, true, false)) : !e && o ? (q = Ze(j, B, false, false), Y = Ze(j, L, true, false)) : e && !o ? (q = Ze(B, j, false, true), Y = Ze(L, j, false, false)) : (q = Ze(B, j, true, true), Y = Ze(j, L, true, true)), n != null) { | |
let J = tp(U, j); | |
return [q, Y, J]; | |
} else return [q, Y]; | |
}, | |
D = { | |
a: S, | |
b: k, | |
bias: _, | |
preluActivationWeights: E | |
}, | |
P = { | |
transposeA: e, | |
transposeB: o, | |
activation: s, | |
leakyreluAlpha: i | |
}; | |
return n == null ? vr((M, L, B) => { | |
let z = T.runKernel(Io, D, P); | |
return B([M, L, z]), { | |
value: W(z, C), | |
gradFunc: R | |
}; | |
})(S, k) : vr((M, L, B, z) => { | |
let U = T.runKernel(Io, D, P); | |
return z([M, L, U, B]), { | |
value: W(U, C), | |
gradFunc: R | |
}; | |
})(S, k, _); | |
} | |
var rN = N({ | |
fusedMatMul_: Y6 | |
}); | |
function Q6(r) { | |
return Rl(r, 0.54, 0.46); | |
} | |
var oN = N({ | |
hammingWindow_: Q6 | |
}); | |
function Z6(r) { | |
return Rl(r, 0.5, 0.5); | |
} | |
var Zd = N({ | |
hannWindow_: Z6 | |
}); | |
function J6(r, t8, e, o = false, n = 0) { | |
let s = 0, | |
a = []; | |
for (; s + t8 <= r.size;) a.push(Xe(r, s, t8)), s += e; | |
if (o) for (; s < r.size;) { | |
let i = s + t8 - r.size, | |
p = bt([Xe(r, s, t8 - i), Ea([i], n)]); | |
a.push(p), s += e; | |
} | |
return a.length === 0 ? fu([], [0, t8]) : W(bt(a), [a.length, t8]); | |
} | |
var Jd = N({ | |
frame_: J6 | |
}); | |
function ej(r, t8, e, o, n = Zd) { | |
o == null && (o = Qw(t8)); | |
let s = Jd(r, t8, e), | |
a = se(s, n(t8)); | |
return hc(a, o); | |
} | |
var nN = N({ | |
stft_: ej | |
}); | |
function tj(r, t8, e, o, n = "bilinear", s = 0) { | |
let a = v(r, "image", "cropAndResize"), | |
i = v(t8, "boxes", "cropAndResize", "float32"), | |
p = v(e, "boxInd", "cropAndResize", "int32"), | |
u = i.shape[0]; | |
$(a.rank === 4, () => `Error in cropAndResize: image must be rank 4,but got rank ${a.rank}.`), $(i.rank === 2 && i.shape[1] === 4, () => `Error in cropAndResize: boxes must be have size [${u},4] but had shape ${i.shape}.`), $(p.rank === 1 && p.shape[0] === u, () => `Error in cropAndResize: boxInd must be have size [${u}] but had shape ${i.shape}.`), $(o.length === 2, () => `Error in cropAndResize: cropSize must be of length 2, but got length ${o.length}.`), $(o[0] >= 1 && o[1] >= 1, () => `cropSize must be atleast [1,1], but was ${o}`), $(n === "bilinear" || n === "nearest", () => `method must be bilinear or nearest, but was ${n}`); | |
let c = { | |
image: a, | |
boxes: i, | |
boxInd: p | |
}, | |
l = { | |
method: n, | |
extrapolationValue: s, | |
cropSize: o | |
}; | |
return T.runKernel(ln, c, l); | |
} | |
var sN = N({ | |
cropAndResize_: tj | |
}); | |
function rj(r) { | |
let t8 = v(r, "image", "flipLeftRight", "float32"); | |
$(t8.rank === 4, () => `Error in flipLeftRight: image must be rank 4,but got rank ${t8.rank}.`); | |
let e = { | |
image: t8 | |
}; | |
return T.runKernel(wn, e, {}); | |
} | |
var aN = N({ | |
flipLeftRight_: rj | |
}); | |
function oj(r) { | |
let t8 = v(r, "image", "grayscaleToRGB"), | |
e = t8.rank - 1, | |
o = t8.shape[e]; | |
$(t8.rank >= 2, () => `Error in grayscaleToRGB: images must be at least rank 2, but got rank ${t8.rank}.`), $(o === 1, () => `Error in grayscaleToRGB: last dimension of a grayscale image should be size 1, but got size ${o}.`); | |
let n = new Array(t8.rank); | |
return n.fill(1, 0, e), n[e] = 3, cu(t8, n); | |
} | |
var iN = N({ | |
grayscaleToRGB_: oj | |
}); | |
function nj(r) { | |
let t8 = v(r, "image", "RGBToGrayscale"), | |
e = t8.rank - 1, | |
o = t8.shape[e]; | |
$(t8.rank >= 2, () => `Error in RGBToGrayscale: images must be at least rank 2, but got rank ${t8.rank}.`), $(o === 3, () => `Error in RGBToGrayscale: last dimension of an RGB image should be size 3, but got size ${o}.`); | |
let n = t8.dtype, | |
s = We(t8, "float32"), | |
a = tr([0.2989, 0.587, 0.114]), | |
i; | |
switch (t8.rank) { | |
case 2: | |
i = pu("ij,j->i", s, a); | |
break; | |
case 3: | |
i = pu("ijk,k->ij", s, a); | |
break; | |
case 4: | |
i = pu("ijkl,l->ijk", s, a); | |
break; | |
case 5: | |
i = pu("ijklm,m->ijkl", s, a); | |
break; | |
case 6: | |
i = pu("ijklmn,n->ijklm", s, a); | |
break; | |
default: | |
throw new Error("Not a valid tensor rank."); | |
} | |
return i = Ms(i, -1), We(i, n); | |
} | |
var uN = N({ | |
rgbToGrayscale_: nj | |
}); | |
function sj(r, t8, e = 0, o = 0.5) { | |
let n = v(r, "image", "rotateWithOffset", "float32"); | |
$(n.rank === 4, () => `Error in rotateWithOffset: image must be rank 4,but got rank ${n.rank}.`); | |
let s = { | |
image: n | |
}, | |
a = { | |
radians: t8, | |
fillValue: e, | |
center: o | |
}; | |
return T.runKernel(As, s, a); | |
} | |
var pN = N({ | |
rotateWithOffset_: sj | |
}); | |
function Eo(r, t8, e, o, n, s) { | |
o == null && (o = 0.5), n == null && (n = Number.NEGATIVE_INFINITY), s == null && (s = 0); | |
let a = r.shape[0]; | |
return e = Math.min(e, a), $(0 <= o && o <= 1, () => `iouThreshold must be in [0, 1], but was '${o}'`), $(r.rank === 2, () => `boxes must be a 2D tensor, but was of rank '${r.rank}'`), $(r.shape[1] === 4, () => `boxes must have 4 columns, but 2nd dimension was ${r.shape[1]}`), $(t8.rank === 1, () => "scores must be a 1D tensor"), $(t8.shape[0] === a, () => `scores has incompatible shape with boxes. Expected ${a}, but was ${t8.shape[0]}`), $(0 <= s && s <= 1, () => `softNmsSigma must be in [0, 1], but was '${s}'`), { | |
maxOutputSize: e, | |
iouThreshold: o, | |
scoreThreshold: n, | |
softNmsSigma: s | |
}; | |
} | |
function aj(r, t8, e, o = 0.5, n = Number.NEGATIVE_INFINITY) { | |
let s = v(r, "boxes", "nonMaxSuppression", "float32"), | |
a = v(t8, "scores", "nonMaxSuppression", "float32"), | |
i = Eo(s, a, e, o, n); | |
e = i.maxOutputSize, o = i.iouThreshold, n = i.scoreThreshold; | |
let p = { | |
maxOutputSize: e, | |
iouThreshold: o, | |
scoreThreshold: n | |
}; | |
return T.runKernel(Zn, { | |
boxes: s, | |
scores: a | |
}, p); | |
} | |
var cN = N({ | |
nonMaxSuppression_: aj | |
}); | |
function lN(r, t8, e) { | |
let o = ij(r, t8, e), | |
n = o < 0 ? -(o + 1) : o; | |
r.splice(n, 0, t8); | |
} | |
function ij(r, t8, e) { | |
return pj(r, t8, e || uj); | |
} | |
function uj(r, t8) { | |
return r > t8 ? 1 : r < t8 ? -1 : 0; | |
} | |
function pj(r, t8, e) { | |
let o = 0, | |
n = r.length, | |
s = 0, | |
a = false; | |
for (; o < n;) { | |
s = o + (n - o >>> 1); | |
let i = e(t8, r[s]); | |
i > 0 ? o = s + 1 : (n = s, a = !i); | |
} | |
return a ? o : -o - 1; | |
} | |
function ef(r, t8, e, o, n) { | |
return Jw(r, t8, e, o, n, 0); | |
} | |
function tf(r, t8, e, o, n, s) { | |
return Jw(r, t8, e, o, n, 0, false, s, true); | |
} | |
function rf(r, t8, e, o, n, s) { | |
return Jw(r, t8, e, o, n, s, true); | |
} | |
function Jw(r, t8, e, o, n, s, a = false, i = false, p = false) { | |
let u = []; | |
for (let g = 0; g < t8.length; g++) t8[g] > n && u.push({ | |
score: t8[g], | |
boxIndex: g, | |
suppressBeginIndex: 0 | |
}); | |
u.sort(mN); | |
let c = s > 0 ? -0.5 / s : 0, | |
l = [], | |
m = []; | |
for (; l.length < e && u.length > 0;) { | |
let g = u.pop(), | |
{ | |
score: x, | |
boxIndex: b, | |
suppressBeginIndex: C | |
} = g; | |
if (x < n) break; | |
let S = false; | |
for (let k = l.length - 1; k >= C; --k) { | |
let _ = cj(r, b, l[k]); | |
if (_ >= o) { | |
S = true; | |
break; | |
} | |
if (g.score = g.score * lj(o, c, _), g.score <= n) break; | |
} | |
g.suppressBeginIndex = l.length, S || (g.score === x ? (l.push(b), m.push(g.score)) : g.score > n && lN(u, g, mN)); | |
} | |
let d = l.length, | |
f = e - d; | |
i && f > 0 && (l.push(...new Array(f).fill(0)), m.push(...new Array(f).fill(0))); | |
let h = { | |
selectedIndices: l | |
}; | |
return a && (h.selectedScores = m), p && (h.validOutputs = d), h; | |
} | |
function cj(r, t8, e) { | |
let o = r.subarray(t8 * 4, t8 * 4 + 4), | |
n = r.subarray(e * 4, e * 4 + 4), | |
s = Math.min(o[0], o[2]), | |
a = Math.min(o[1], o[3]), | |
i = Math.max(o[0], o[2]), | |
p = Math.max(o[1], o[3]), | |
u = Math.min(n[0], n[2]), | |
c = Math.min(n[1], n[3]), | |
l = Math.max(n[0], n[2]), | |
m = Math.max(n[1], n[3]), | |
d = (i - s) * (p - a), | |
f = (l - u) * (m - c); | |
if (d <= 0 || f <= 0) return 0; | |
let h = Math.max(s, u), | |
g = Math.max(a, c), | |
x = Math.min(i, l), | |
b = Math.min(p, m), | |
C = Math.max(x - h, 0) * Math.max(b - g, 0); | |
return C / (d + f - C); | |
} | |
function lj(r, t8, e) { | |
let o = Math.exp(t8 * e * e); | |
return e <= r ? o : 0; | |
} | |
function mN(r, t8) { | |
return r.score - t8.score || r.score === t8.score && t8.boxIndex - r.boxIndex; | |
} | |
async function mj(r, t8, e, o = 0.5, n = Number.NEGATIVE_INFINITY) { | |
let s = v(r, "boxes", "nonMaxSuppressionAsync"), | |
a = v(t8, "scores", "nonMaxSuppressionAsync"), | |
i = Eo(s, a, e, o, n); | |
e = i.maxOutputSize, o = i.iouThreshold, n = i.scoreThreshold; | |
let p = await Promise.all([s.data(), a.data()]), | |
u = p[0], | |
c = p[1], | |
{ | |
selectedIndices: l | |
} = ef(u, c, e, o, n); | |
return s !== r && s.dispose(), a !== t8 && a.dispose(), tr(l, "int32"); | |
} | |
var dN = mj; | |
function dj(r, t8, e, o = 0.5, n = Number.NEGATIVE_INFINITY, s = 0) { | |
let a = v(r, "boxes", "nonMaxSuppression"), | |
i = v(t8, "scores", "nonMaxSuppression"), | |
p = Eo(a, i, e, o, n, s); | |
e = p.maxOutputSize, o = p.iouThreshold, n = p.scoreThreshold, s = p.softNmsSigma; | |
let u = { | |
boxes: a, | |
scores: i | |
}, | |
c = { | |
maxOutputSize: e, | |
iouThreshold: o, | |
scoreThreshold: n, | |
softNmsSigma: s | |
}, | |
l = T.runKernel(Jn, u, c); | |
return { | |
selectedIndices: l[0], | |
selectedScores: l[1] | |
}; | |
} | |
var fN = N({ | |
nonMaxSuppressionWithScore_: dj | |
}); | |
async function fj(r, t8, e, o = 0.5, n = Number.NEGATIVE_INFINITY, s = 0) { | |
let a = v(r, "boxes", "nonMaxSuppressionAsync"), | |
i = v(t8, "scores", "nonMaxSuppressionAsync"), | |
p = Eo(a, i, e, o, n, s); | |
e = p.maxOutputSize, o = p.iouThreshold, n = p.scoreThreshold, s = p.softNmsSigma; | |
let u = await Promise.all([a.data(), i.data()]), | |
c = u[0], | |
l = u[1], | |
{ | |
selectedIndices: m, | |
selectedScores: d | |
} = rf(c, l, e, o, n, s); | |
return a !== r && a.dispose(), i !== t8 && i.dispose(), { | |
selectedIndices: tr(m, "int32"), | |
selectedScores: tr(d) | |
}; | |
} | |
var hN = fj; | |
function hj(r, t8, e, o = 0.5, n = Number.NEGATIVE_INFINITY, s = false) { | |
let a = v(r, "boxes", "nonMaxSuppression"), | |
i = v(t8, "scores", "nonMaxSuppression"), | |
p = Eo(a, i, e, o, n, null), | |
u = p.maxOutputSize, | |
c = p.iouThreshold, | |
l = p.scoreThreshold, | |
m = { | |
boxes: a, | |
scores: i | |
}, | |
d = { | |
maxOutputSize: u, | |
iouThreshold: c, | |
scoreThreshold: l, | |
padToMaxOutputSize: s | |
}, | |
f = T.runKernel(Za, m, d); | |
return { | |
selectedIndices: f[0], | |
validOutputs: f[1] | |
}; | |
} | |
var gN = N({ | |
nonMaxSuppressionPadded_: hj | |
}); | |
async function gj(r, t8, e, o = 0.5, n = Number.NEGATIVE_INFINITY, s = false) { | |
let a = v(r, "boxes", "nonMaxSuppressionAsync"), | |
i = v(t8, "scores", "nonMaxSuppressionAsync"), | |
p = Eo(a, i, e, o, n, null), | |
u = p.maxOutputSize, | |
c = p.iouThreshold, | |
l = p.scoreThreshold, | |
[m, d] = await Promise.all([a.data(), i.data()]), | |
{ | |
selectedIndices: f, | |
validOutputs: h | |
} = tf(m, d, u, c, l, s); | |
return a !== r && a.dispose(), i !== t8 && i.dispose(), { | |
selectedIndices: tr(f, "int32"), | |
validOutputs: ke(h, "int32") | |
}; | |
} | |
var xN = gj; | |
function xj(r, t8, e = false, o = false) { | |
let n = v(r, "images", "resizeBilinear"); | |
$(n.rank === 3 || n.rank === 4, () => `Error in resizeBilinear: x must be rank 3 or 4, but got rank ${n.rank}.`), $(t8.length === 2, () => `Error in resizeBilinear: new shape must 2D, but got shape ${t8}.`), $(o === false || e === false, () => "Error in resizeBilinear: If halfPixelCenters is true, alignCorners must be false."); | |
let s = n, | |
a = false; | |
n.rank === 3 && (a = true, s = W(n, [1, n.shape[0], n.shape[1], n.shape[2]])); | |
let [] = t8, | |
i = { | |
images: s | |
}, | |
p = { | |
alignCorners: e, | |
halfPixelCenters: o, | |
size: t8 | |
}, | |
u = T.runKernel(us, i, p); | |
return a ? W(u, [u.shape[1], u.shape[2], u.shape[3]]) : u; | |
} | |
var yN = N({ | |
resizeBilinear_: xj | |
}); | |
function yj(r, t8, e = false, o = false) { | |
let n = v(r, "images", "resizeNearestNeighbor"); | |
$(n.rank === 3 || n.rank === 4, () => `Error in resizeNearestNeighbor: x must be rank 3 or 4, but got rank ${n.rank}.`), $(t8.length === 2, () => `Error in resizeNearestNeighbor: new shape must 2D, but got shape ${t8}.`), $(n.dtype === "float32" || n.dtype === "int32", () => "`images` must have `int32` or `float32` as dtype"), $(o === false || e === false, () => "Error in resizeNearestNeighbor: If halfPixelCenters is true, alignCorners must be false."); | |
let s = n, | |
a = false; | |
n.rank === 3 && (a = true, s = W(n, [1, n.shape[0], n.shape[1], n.shape[2]])); | |
let [] = t8, | |
i = { | |
images: s | |
}, | |
p = { | |
alignCorners: e, | |
halfPixelCenters: o, | |
size: t8 | |
}, | |
u = T.runKernel(is, i, p); | |
return a ? W(u, [u.shape[1], u.shape[2], u.shape[3]]) : u; | |
} | |
var bN = N({ | |
resizeNearestNeighbor_: yj | |
}); | |
function bj(r, t8 = "binary", e = false, o = 0.5) { | |
let n = v(r, "image", "threshold"), | |
s = 0.2989, | |
a = 0.587, | |
i = 0.114, | |
p = n.shape[0] * n.shape[1], | |
u = se(tr([o]), 255), | |
c, | |
l, | |
m, | |
d; | |
if ($(n.rank === 3, () => `Error in threshold: image must be rank 3,but got rank ${n.rank}.`), $(n.shape[2] === 3 || n.shape[2] === 1, () => `Error in threshold: image color channel must be equal to 3 or 1but got ${n.shape[2]}.`), $(n.dtype === "int32" || n.dtype === "float32", () => `Error in dtype: image dtype must be int32 or float32,but got dtype ${n.dtype}.`), $(t8 === "otsu" || t8 === "binary", () => `Method must be binary or otsu, but was ${t8}`), n.shape[2] === 3) { | |
[c, l, m] = li(n, [1, 1, 1], -1); | |
let g = se(c, s), | |
x = se(l, a), | |
b = se(m, i); | |
d = Ce(Ce(g, x), b); | |
} else d = r; | |
if (t8 === "otsu") { | |
let g = gd(We(Hd(d), "int32"), ur([]), 256); | |
u = Cj(g, p); | |
} | |
let f = e ? mc(d, u) : qu(d, u); | |
return We(se(f, 255), "int32"); | |
} | |
function Cj(r, t8) { | |
let e = tr([-1]), | |
o = tr([0]), | |
n = tr([0]), | |
s, | |
a, | |
i, | |
p, | |
u, | |
c; | |
for (let l = 0; l < r.size - 1; l++) { | |
s = Xe(r, 0, l + 1), a = Xe(r, l + 1), u = je(ot(s), t8), c = je(ot(a), t8); | |
let m = ot(se(s, mu(0, s.size))); | |
i = je(m, ot(s)); | |
let d = Ea(a.shape, s.size), | |
f = Ce(mu(0, a.size), d), | |
h = se(a, f); | |
p = je(ot(h), ot(a)); | |
let g = Te(i, p), | |
x = Te(i, p), | |
b = se(u, c); | |
n = se(se(b, g), x); | |
let C = qu(n, o); | |
o = lo(C, n, o), e = lo(C, tr([l]), e); | |
} | |
return e; | |
} | |
var CN = N({ | |
threshold_: bj | |
}); | |
function wj(r, t8, e = "nearest", o = "constant", n = 0, s) { | |
let a = v(r, "image", "transform", "float32"), | |
i = v(t8, "transforms", "transform", "float32"); | |
$(a.rank === 4, () => `Error in transform: image must be rank 4,but got rank ${a.rank}.`), $(i.rank === 2 && (i.shape[0] === a.shape[0] || i.shape[0] === 1) && i.shape[1] === 8, () => "Error in transform: Input transform should be batch x 8 or 1 x 8"), $(s == null || s.length === 2, () => `Error in transform: outputShape must be [height, width] or null, but got ${s}.`); | |
let p = { | |
image: a, | |
transforms: i | |
}, | |
u = { | |
interpolation: e, | |
fillMode: o, | |
fillValue: n, | |
outputShape: s | |
}; | |
return T.runKernel(Ds, p, u); | |
} | |
var wN = N({ | |
transform_: wj | |
}); | |
function Sj(r, t8, e) { | |
let o = v(r, "a", "bandPart"); | |
$(o.rank >= 2, () => `bandPart(): Rank must be at least 2, got ${o.rank}.`); | |
let n = o.shape, | |
[s, a] = o.shape.slice(-2), | |
i, | |
p; | |
typeof t8 == "number" ? ($(t8 % 1 === 0, () => `bandPart(): numLower must be an integer, got ${t8}.`), $(t8 <= s, () => `bandPart(): numLower (${t8}) must not be greater than the number of rows (${s}).`), i = v(t8 < 0 ? s : t8, "numLower", "bandPart")) : ($(t8.dtype === "int32", () => "bandPart(): numLower's dtype must be an int32."), i = lo(_l(t8, 0), s, Yu(t8, s))), typeof e == "number" ? ($(e % 1 === 0, () => `bandPart(): numUpper must be an integer, got ${e}.`), $(e <= a, () => `bandPart(): numUpper (${e}) must not be greater than the number of columns (${a}).`), p = v(e < 0 ? a : e, "numUpper", "bandPart")) : ($(e.dtype === "int32", () => "bandPart(): numUpper's dtype must be an int32."), p = lo(_l(e, 0), a, Yu(e, a))); | |
let u = W(mu(0, s, 1, "int32"), [-1, 1]), | |
c = mu(0, a, 1, "int32"), | |
l = Te(u, c), | |
m = ju(mc(l, i), vd(l, cr(p))), | |
d = Gr([s, a], o.dtype); | |
return W(kr(fo(W(o, [-1, s, a])).map(f => lo(m, f, d))), n); | |
} | |
var SN = N({ | |
bandPart_: Sj | |
}); | |
function Ij(r) { | |
let t8; | |
if (Array.isArray(r)) { | |
t8 = false, $(r != null && r.length > 0, () => "Gram-Schmidt process: input must not be null, undefined, or empty"); | |
let n = r[0].shape[0]; | |
for (let s = 1; s < r.length; ++s) $(r[s].shape[0] === n, () => `Gram-Schmidt: Non-unique lengths found in the input vectors: (${r[s].shape[0]} vs. ${n})`); | |
} else t8 = true, r = li(r, r.shape[0], 0).map(n => gc(n, [0])); | |
$(r.length <= r[0].shape[0], () => `Gram-Schmidt: Number of vectors (${r.length}) exceeds number of dimensions (${r[0].shape[0]}).`); | |
let e = [], | |
o = r; | |
for (let n = 0; n < r.length; ++n) e.push(T.tidy(() => { | |
let s = o[n]; | |
if (n > 0) for (let a = 0; a < n; ++a) { | |
let i = se(ot(se(e[a], s)), e[a]); | |
s = Te(s, i); | |
} | |
return je(s, Ku(s, "euclidean")); | |
})); | |
return t8 ? kr(e, 0) : e; | |
} | |
var IN = N({ | |
gramSchmidt_: Ij | |
}); | |
function vj(r, t8 = false) { | |
if ($(r.rank >= 2, () => `qr() requires input tensor to have a rank >= 2, but got rank ${r.rank}`), r.rank === 2) return vN(r, t8); | |
{ | |
let e = r.shape.slice(0, r.shape.length - 2).reduce((p, u) => p * u), | |
o = fo(W(r, [e, r.shape[r.shape.length - 2], r.shape[r.shape.length - 1]]), 0), | |
n = [], | |
s = []; | |
o.forEach(p => { | |
let [u, c] = vN(p, t8); | |
n.push(u), s.push(c); | |
}); | |
let a = W(kr(n, 0), r.shape), | |
i = W(kr(s, 0), r.shape); | |
return [a, i]; | |
} | |
} | |
function vN(r, t8 = false) { | |
return T.tidy(() => { | |
$(r.shape.length === 2, () => `qr2d() requires a 2D Tensor, but got a ${r.shape.length}D Tensor.`); | |
let e = r.shape[0], | |
o = r.shape[1], | |
n = wd(e), | |
s = Ur(r), | |
a = fu([[1]], [1, 1]), | |
i = Ur(a), | |
p = e >= o ? o : e; | |
for (let u = 0; u < p; ++u) { | |
let c = s, | |
l = i, | |
m = n; | |
[i, s, n] = T.tidy(() => { | |
let d = Xe(s, [u, u], [e - u, 1]), | |
f = Ku(d), | |
h = Xe(s, [u, u], [1, 1]), | |
g = lo(qu(h, 0), fu([[-1]]), fu([[1]])), | |
x = Te(h, se(g, f)), | |
b = je(d, x); | |
b.shape[0] === 1 ? i = Ur(a) : i = bt([a, Xe(b, [1, 0], [b.shape[0] - 1, b.shape[1]])], 0); | |
let C = cr(je(Ze(g, x), f)), | |
S = Xe(s, [u, 0], [e - u, o]), | |
k = se(C, i), | |
_ = yc(i); | |
if (u === 0) s = Te(S, Ze(k, Ze(_, S)));else { | |
let D = Te(S, Ze(k, Ze(_, S))); | |
s = bt([Xe(s, [0, 0], [u, o]), D], 0); | |
} | |
let E = yc(k), | |
R = Xe(n, [0, u], [e, n.shape[1] - u]); | |
if (u === 0) n = Te(R, Ze(Ze(R, i), E));else { | |
let D = Te(R, Ze(Ze(R, i), E)); | |
n = bt([Xe(n, [0, 0], [e, u]), D], 1); | |
} | |
return [i, s, n]; | |
}), Mt([c, l, m]); | |
} | |
return !t8 && e > o && (n = Xe(n, [0, 0], [e, o]), s = Xe(s, [0, 0], [o, o])), [n, s]; | |
}); | |
} | |
var kN = N({ | |
qr_: vj | |
}); | |
var Rt; | |
(function (r) { | |
r[r.NONE = 0] = "NONE", r[r.MEAN = 1] = "MEAN", r[r.SUM = 2] = "SUM", r[r.SUM_BY_NONZERO_WEIGHTS = 3] = "SUM_BY_NONZERO_WEIGHTS"; | |
})(Rt || (Rt = {})); | |
function kj(r, t8, e = Rt.SUM_BY_NONZERO_WEIGHTS) { | |
let o = v(r, "losses", "computeWeightedLoss"), | |
n = null; | |
t8 != null && (n = v(t8, "weights", "computeWeightedLoss")); | |
let s = n == null ? o : se(o, n); | |
if (e === Rt.NONE) return s; | |
if (e === Rt.SUM) return ot(s); | |
if (e === Rt.MEAN) { | |
if (n == null) return Xu(s); | |
{ | |
let a = o.size / n.size, | |
i = je(ot(s), ot(n)); | |
return a > 1 ? je(i, ke(a)) : i; | |
} | |
} | |
if (e === Rt.SUM_BY_NONZERO_WEIGHTS) { | |
if (n == null) return je(ot(s), ke(o.size)); | |
{ | |
let a = se(n, Da(o.shape)), | |
i = We(ot(Pd(a, ke(0))), "float32"); | |
return je(ot(s), i); | |
} | |
} | |
throw Error(`Unknown reduction: ${e}`); | |
} | |
var lr = N({ | |
computeWeightedLoss_: kj | |
}); | |
function Nj(r, t8, e, o = Rt.SUM_BY_NONZERO_WEIGHTS) { | |
let n = v(r, "labels", "absoluteDifference"), | |
s = v(t8, "predictions", "absoluteDifference"), | |
a = null; | |
e != null && (a = v(e, "weights", "absoluteDifference")), yt(n.shape, s.shape, "Error in absoluteDifference: "); | |
let i = Jt(Te(n, s)); | |
return lr(i, a, o); | |
} | |
var NN = N({ | |
absoluteDifference_: Nj | |
}); | |
function Tj(r, t8, e, o, n = Rt.SUM_BY_NONZERO_WEIGHTS) { | |
let s = v(r, "labels", "cosineDistance"), | |
a = v(t8, "predictions", "cosineDistance"), | |
i = null; | |
o != null && (i = v(o, "weights", "cosineDistance")), yt(s.shape, a.shape, "Error in cosineDistance: "); | |
let p = ke(1), | |
u = Te(p, ot(se(s, a), e, true)); | |
return lr(u, i, n); | |
} | |
var TN = N({ | |
cosineDistance_: Tj | |
}); | |
function _j(r, t8, e, o = Rt.SUM_BY_NONZERO_WEIGHTS) { | |
let n = v(r, "labels", "hingeLoss"), | |
s = v(t8, "predictions", "hingeLoss"), | |
a = null; | |
e != null && (a = v(e, "weights", "hingeLoss")), yt(n.shape, s.shape, "Error in hingeLoss: "); | |
let i = ke(1); | |
n = Te(se(ke(2), n), i); | |
let p = du(Te(i, se(n, s))); | |
return lr(p, a, o); | |
} | |
var _N = N({ | |
hingeLoss_: _j | |
}); | |
function $j(r, t8, e, o = 1, n = Rt.SUM_BY_NONZERO_WEIGHTS) { | |
let s = v(r, "labels", "huberLoss"), | |
a = v(t8, "predictions", "huberLoss"), | |
i = null; | |
e != null && (i = v(e, "weights", "huberLoss")), yt(s.shape, a.shape, "Error in huberLoss: "); | |
let p = ke(o), | |
u = Jt(Te(a, s)), | |
c = Yu(u, p), | |
l = Te(u, c), | |
m = Ce(se(ke(0.5), er(c)), se(p, l)); | |
return lr(m, i, n); | |
} | |
var $N = N({ | |
huberLoss_: $j | |
}); | |
function Ej(r, t8, e, o = 1e-7, n = Rt.SUM_BY_NONZERO_WEIGHTS) { | |
let s = v(r, "labels", "logLoss"), | |
a = v(t8, "predictions", "logLoss"), | |
i = null; | |
e != null && (i = v(e, "weights", "logLoss")), yt(s.shape, a.shape, "Error in logLoss: "); | |
let p = ke(1), | |
u = ke(o), | |
c = cr(se(s, pi(Ce(a, u)))), | |
l = se(Te(p, s), pi(Ce(Te(p, a), u))), | |
m = Te(c, l); | |
return lr(m, i, n); | |
} | |
var EN = N({ | |
logLoss_: Ej | |
}); | |
function Rj(r, t8, e, o = Rt.SUM_BY_NONZERO_WEIGHTS) { | |
let n = v(r, "labels", "meanSquaredError"), | |
s = v(t8, "predictions", "meanSquaredError"), | |
a = null; | |
e != null && (a = v(e, "weights", "meanSquaredError")), yt(n.shape, s.shape, "Error in meanSquaredError: "); | |
let i = qd(n, s); | |
return lr(i, a, o); | |
} | |
var RN = N({ | |
meanSquaredError_: Rj | |
}); | |
function Dj(r, t8) { | |
let e = v(r, "labels", "sigmoidCrossEntropyWithLogits"), | |
o = v(t8, "logits", "sigmoidCrossEntropyWithLogits"); | |
yt(e.shape, o.shape, "Error in sigmoidCrossEntropyWithLogits: "); | |
let n = du(o), | |
s = se(o, e), | |
a = Nd($o(cr(Jt(o)))); | |
return Ce(Te(n, s), a); | |
} | |
function Aj(r, t8, e, o = 0, n = Rt.SUM_BY_NONZERO_WEIGHTS) { | |
let s = v(r, "multiClassLabels", "sigmoidCrossEntropy"), | |
a = v(t8, "logits", "sigmoidCrossEntropy"), | |
i = null; | |
if (e != null && (i = v(e, "weights", "sigmoidCrossEntropy")), yt(s.shape, a.shape, "Error in sigmoidCrossEntropy: "), o > 0) { | |
let u = ke(o), | |
c = ke(1), | |
l = ke(0.5); | |
s = Ce(se(s, Te(c, u)), se(l, u)); | |
} | |
let p = Dj(s, a); | |
return lr(p, i, n); | |
} | |
var DN = N({ | |
sigmoidCrossEntropy_: Aj | |
}); | |
function Fj(r, t8, e = -1) { | |
if (e === -1 && (e = t8.rank - 1), e !== t8.rank - 1) throw Error(`Softmax cross entropy along a non-last dimension is not yet supported. Labels / logits was rank ${t8.rank} and dim was ${e}`); | |
return vr((n, s, a) => { | |
let p = $d(s, [e], true), | |
u = Te(We(s, "float32"), p); | |
a([n, u]); | |
let c = cr(se(u, n)); | |
return { | |
value: ot(c, [e]), | |
gradFunc: (d, f) => { | |
let [h, g] = f, | |
x = ii(d.shape, [e]); | |
return [se(W(d, x), Te(We(h, "float32"), $o(g))), se(W(d, x), Te($o(g), We(h, "float32")))]; | |
} | |
}; | |
})(r, t8); | |
} | |
function Pj(r, t8, e, o = 0, n = Rt.SUM_BY_NONZERO_WEIGHTS) { | |
let s = v(r, "onehotLabels", "softmaxCrossEntropy"), | |
a = v(t8, "logits", "softmaxCrossEntropy"), | |
i = null; | |
if (e != null && (i = v(e, "weights", "softmaxCrossEntropy")), yt(s.shape, a.shape, "Error in softmaxCrossEntropy: "), o > 0) { | |
let u = ke(o), | |
c = ke(1), | |
l = ke(s.shape[1]); | |
s = Ce(se(s, Te(c, u)), je(u, l)); | |
} | |
let p = Fj(s, a); | |
return lr(p, i, n); | |
} | |
var AN = N({ | |
softmaxCrossEntropy_: Pj | |
}); | |
function Oj(r, t8, e, o) { | |
let n = v(r, "indices", "sparseFillEmptyRows", "int32"), | |
s = v(t8, "values", "sparseFillEmptyRows"), | |
a = v(e, "denseShape", "sparseFillEmptyRows", "int32"), | |
i = v(o, "defaultValue", "sparseFillEmptyRows", s.dtype); | |
if (n.rank !== 2) throw new Error(`Indices should be Tensor2D but received shape | |
${n.shape}`); | |
if (s.rank !== 1) throw new Error(`Values should be Tensor1D but received shape ${s.shape}`); | |
if (a.rank !== 1) throw new Error(`Dense shape should be Tensor1D but received shape ${a.shape}`); | |
if (i.rank !== 0) throw new Error(`Default value should be a scalar but received shape ${i.shape}`); | |
let p = { | |
indices: n, | |
values: s, | |
denseShape: a, | |
defaultValue: i | |
}, | |
u = T.runKernel(ji, p); | |
return { | |
outputIndices: u[0], | |
outputValues: u[1], | |
emptyRowIndicator: u[2], | |
reverseIndexMap: u[3] | |
}; | |
} | |
var FN = N({ | |
sparseFillEmptyRows_: Oj | |
}); | |
function Mj(r, t8, e) { | |
let o = v(r, "inputIndices", "sparseReshape", "int32"), | |
n = v(t8, "inputShape", "sparseReshape", "int32"), | |
s = v(e, "newShape", "sparseReshape", "int32"); | |
if (o.rank !== 2) throw new Error(`Input indices should be Tensor2D but received shape | |
${o.shape}`); | |
if (n.rank !== 1) throw new Error(`Input shape should be Tensor1D but received shape ${n.shape}`); | |
if (s.rank !== 1) throw new Error(`New shape should be Tensor1D but received shape ${s.shape}`); | |
let a = { | |
inputIndices: o, | |
inputShape: n, | |
newShape: s | |
}, | |
i = T.runKernel(ti, a); | |
return { | |
outputIndices: i[0], | |
outputShape: i[1] | |
}; | |
} | |
var PN = N({ | |
sparseReshape_: Mj | |
}); | |
function Lj(r, t8, e) { | |
let o = v(r, "data", "sparseSegmentMean"), | |
n = v(t8, "indices", "sparseSegmentMean", "int32"), | |
s = v(e, "segmentIds", "sparseSegmentMean", "int32"); | |
if (o.rank < 1) throw new Error("Data should be at least 1 dimensional but received scalar"); | |
if (n.rank !== 1) throw new Error(`Indices should be Tensor1D but received shape | |
${n.shape}`); | |
if (s.rank !== 1) throw new Error(`Segment ids should be Tensor1D but received shape | |
${s.shape}`); | |
let a = { | |
data: o, | |
indices: n, | |
segmentIds: s | |
}; | |
return T.runKernel(ya, a); | |
} | |
var ON = N({ | |
sparseSegmentMean_: Lj | |
}); | |
function Bj(r, t8, e) { | |
let o = v(r, "data", "sparseSegmentSum"), | |
n = v(t8, "indices", "sparseSegmentSum", "int32"), | |
s = v(e, "segmentIds", "sparseSegmentSum", "int32"); | |
if (o.rank < 1) throw new Error("Data should be at least 1 dimensional but received scalar"); | |
if (n.rank !== 1) throw new Error(`Indices should be Tensor1D but received shape | |
${n.shape}`); | |
if (s.rank !== 1) throw new Error(`Segment ids should be Tensor1D but received shape | |
${s.shape}`); | |
let a = { | |
data: o, | |
indices: n, | |
segmentIds: s | |
}; | |
return T.runKernel(ba, a); | |
} | |
var MN = N({ | |
sparseSegmentSum_: Bj | |
}); | |
function zj(r, t8, e, o, n, s, a, i) { | |
let p = v(r, "data", "stringNGrams", "string"); | |
if (p.dtype !== "string") throw new Error("Data must be of datatype string"); | |
if (p.shape.length !== 1) throw new Error(`Data must be a vector, saw: ${p.shape}`); | |
let u = v(t8, "dataSplits", "stringNGrams"); | |
if (u.dtype !== "int32") throw new Error("Data splits must be of datatype int32"); | |
let c = { | |
separator: e, | |
nGramWidths: o, | |
leftPad: n, | |
rightPad: s, | |
padWidth: a, | |
preserveShortSequences: i | |
}, | |
l = { | |
data: p, | |
dataSplits: u | |
}, | |
m = T.runKernel(Ca, l, c); | |
return { | |
nGrams: m[0], | |
nGramsSplits: m[1] | |
}; | |
} | |
var LN = N({ | |
stringNGrams_: zj | |
}); | |
function Vj(r, t8, e = true) { | |
let o = v(r, "input", "stringSplit", "string"), | |
n = v(t8, "delimiter", "stringSplit", "string"); | |
if (o.rank !== 1) throw new Error(`Input should be Tensor1D but received shape ${o.shape}`); | |
if (n.rank !== 0) throw new Error(`Delimiter should be a scalar but received shape ${n.shape}`); | |
let s = { | |
skipEmpty: e | |
}, | |
a = { | |
input: o, | |
delimiter: n | |
}, | |
i = T.runKernel(Yi, a, s); | |
return { | |
indices: i[0], | |
values: i[1], | |
shape: i[2] | |
}; | |
} | |
var BN = N({ | |
stringSplit_: Vj | |
}); | |
function Wj(r, t8) { | |
let e = v(r, "input", "stringToHashBucketFast", "string"), | |
o = { | |
numBuckets: t8 | |
}; | |
if (t8 <= 0) throw new Error("Number of buckets must be at least 1"); | |
let n = { | |
input: e | |
}; | |
return T.runKernel(Qi, n, o); | |
} | |
var zN = N({ | |
stringToHashBucketFast_: Wj | |
}); | |
function Uj(r, t8, e, o = true) { | |
let n = v(r, "input", "staticRegexReplace", "string"), | |
s = { | |
pattern: t8, | |
rewrite: e, | |
replaceGlobal: o | |
}; | |
return T.runKernel(Ou, { | |
x: n | |
}, s); | |
} | |
var VN = N({ | |
staticRegexReplace_: Uj | |
}); | |
var Gj = { | |
fft: fc, | |
ifft: Ju, | |
rfft: hc, | |
irfft: Kd | |
}; | |
var Hj = { | |
hammingWindow: oN, | |
hannWindow: Zd, | |
frame: Jd, | |
stft: nN | |
}; | |
var Kj = { | |
flipLeftRight: aN, | |
grayscaleToRGB: iN, | |
resizeNearestNeighbor: bN, | |
resizeBilinear: yN, | |
rgbToGrayscale: uN, | |
rotateWithOffset: pN, | |
cropAndResize: sN, | |
nonMaxSuppression: cN, | |
nonMaxSuppressionAsync: dN, | |
nonMaxSuppressionWithScore: fN, | |
nonMaxSuppressionWithScoreAsync: hN, | |
nonMaxSuppressionPadded: gN, | |
nonMaxSuppressionPaddedAsync: xN, | |
threshold: CN, | |
transform: wN | |
}; | |
var qj = { | |
bandPart: SN, | |
gramSchmidt: IN, | |
qr: kN | |
}; | |
var jj = { | |
absoluteDifference: NN, | |
computeWeightedLoss: lr, | |
cosineDistance: TN, | |
hingeLoss: _N, | |
huberLoss: $N, | |
logLoss: EN, | |
meanSquaredError: RN, | |
sigmoidCrossEntropy: DN, | |
softmaxCrossEntropy: AN | |
}; | |
var Xj = { | |
sparseFillEmptyRows: FN, | |
sparseReshape: PN, | |
sparseSegmentMean: ON, | |
sparseSegmentSum: MN | |
}; | |
var Yj = { | |
stringNGrams: LN, | |
stringSplit: BN, | |
stringToHashBucketFast: zN, | |
staticRegexReplace: VN | |
}; | |
var WN = {}; | |
qe(WN, { | |
Serializable: () => Dl, | |
SerializationMap: () => Fa, | |
getRegisteredName: () => Zj, | |
registerClass: () => tS | |
}); | |
var Qj = /* @__PURE__ */new Map(); | |
var eS = /* @__PURE__ */new Map(); | |
var Dl = class { | |
getClassName() { | |
return this.constructor.className; | |
} | |
static fromConfig(t8, e) { | |
return new t8(e); | |
} | |
}; | |
var Fa = class { | |
constructor() { | |
this.classNameMap = {}; | |
} | |
static getMap() { | |
return Fa.instance == null && (Fa.instance = new Fa()), Fa.instance; | |
} | |
static register(t8) { | |
Fa.getMap().classNameMap[t8.className] = [t8, t8.fromConfig]; | |
} | |
}; | |
function tS(r, t8, e) { | |
$(r.className != null, () => "Class being registered does not have the static className property defined."), $(typeof r.className == "string", () => "className is required to be a string, but got type " + typeof r.className), $(r.className.length > 0, () => "Class being registered has an empty-string as its className, which is disallowed."), typeof t8 == "undefined" && (t8 = "Custom"), typeof e == "undefined" && (e = r.className); | |
let o = e, | |
n = t8 + ">" + o; | |
return Fa.register(r), Qj.set(n, r), eS.set(r, n), r; | |
} | |
function Zj(r) { | |
return eS.has(r) ? eS.get(r) : r.className; | |
} | |
var Nr = class extends Dl { | |
minimize(t8, e = false, o) { | |
let { | |
value: n, | |
grads: s | |
} = this.computeGradients(t8, o); | |
if (o != null) { | |
let a = o.map(i => ({ | |
name: i.name, | |
tensor: s[i.name] | |
})); | |
this.applyGradients(a); | |
} else this.applyGradients(s); | |
return Mt(s), e ? n : (n.dispose(), null); | |
} | |
get iterations() { | |
return this.iterations_ == null && (this.iterations_ = 0), this.iterations_; | |
} | |
incrementIterations() { | |
this.iterations_ = this.iterations + 1; | |
} | |
computeGradients(t8, e) { | |
return zw(t8, e); | |
} | |
dispose() { | |
this.iterations_ != null && Mt(this.iterations_); | |
} | |
async saveIterations() { | |
return this.iterations_ == null && (this.iterations_ = 0), { | |
name: "iter", | |
tensor: ke(this.iterations_, "int32") | |
}; | |
} | |
async getWeights() { | |
throw new Error("getWeights() is not implemented for this optimizer yet."); | |
} | |
async setWeights(t8) { | |
throw new Error(`setWeights() is not implemented for this optimizer class ${this.getClassName()}`); | |
} | |
async extractIterations(t8) { | |
return this.iterations_ = (await t8[0].tensor.data())[0], t8.slice(1); | |
} | |
}; | |
Object.defineProperty(Nr, Symbol.hasInstance, { | |
value: r => r.minimize != null && r.computeGradients != null && r.applyGradients != null | |
}); | |
var np = class extends Nr { | |
static get className() { | |
return "Adadelta"; | |
} | |
constructor(t8, e, o = null) { | |
super(), this.learningRate = t8, this.rho = e, this.epsilon = o, this.accumulatedGrads = [], this.accumulatedUpdates = [], o == null && (this.epsilon = T.backend.epsilon()); | |
} | |
applyGradients(t8) { | |
(Array.isArray(t8) ? t8.map(o => o.name) : Object.keys(t8)).forEach((o, n) => { | |
let s = T.registeredVariables[o], | |
a = false; | |
this.accumulatedGrads[n] == null && (this.accumulatedGrads[n] = { | |
originalName: `${o}/accum_grad`, | |
variable: De(() => Ht(s).variable(a)) | |
}), this.accumulatedUpdates[n] == null && (this.accumulatedUpdates[n] = { | |
originalName: `${o}/accum_var`, | |
variable: De(() => Ht(s).variable(a)) | |
}); | |
let i = Array.isArray(t8) ? t8[n].tensor : t8[o]; | |
if (i == null) return; | |
let p = this.accumulatedGrads[n].variable, | |
u = this.accumulatedUpdates[n].variable; | |
De(() => { | |
let c = Ce(se(p, this.rho), se(er(i), 1 - this.rho)), | |
l = se(je(Dr(Ce(u, this.epsilon)), Dr(Ce(p, this.epsilon))), i), | |
m = Ce(se(u, this.rho), se(er(l), 1 - this.rho)); | |
p.assign(c), u.assign(m); | |
let d = Ce(se(l, -this.learningRate), s); | |
s.assign(d); | |
}); | |
}), this.incrementIterations(); | |
} | |
dispose() { | |
this.accumulatedUpdates != null && (Mt(this.accumulatedGrads.map(t8 => t8.variable)), Mt(this.accumulatedUpdates.map(t8 => t8.variable))); | |
} | |
async getWeights() { | |
let t8 = [...this.accumulatedGrads, ...this.accumulatedUpdates]; | |
return [await this.saveIterations()].concat(t8.map(e => ({ | |
name: e.originalName, | |
tensor: e.variable | |
}))); | |
} | |
async setWeights(t8) { | |
t8 = await this.extractIterations(t8); | |
let e = t8.length / 2, | |
o = false; | |
this.accumulatedGrads = t8.slice(0, e).map(n => ({ | |
originalName: n.name, | |
variable: n.tensor.variable(o) | |
})), this.accumulatedUpdates = t8.slice(e, e * 2).map(n => ({ | |
originalName: n.name, | |
variable: n.tensor.variable(o) | |
})); | |
} | |
getConfig() { | |
return { | |
learningRate: this.learningRate, | |
rho: this.rho, | |
epsilon: this.epsilon | |
}; | |
} | |
static fromConfig(t8, e) { | |
return new t8(e.learningRate, e.rho, e.epsilon); | |
} | |
}; | |
var sp = class extends Nr { | |
static get className() { | |
return "Adagrad"; | |
} | |
constructor(t8, e = 0.1) { | |
super(), this.learningRate = t8, this.initialAccumulatorValue = e, this.accumulatedGrads = []; | |
} | |
applyGradients(t8) { | |
(Array.isArray(t8) ? t8.map(o => o.name) : Object.keys(t8)).forEach((o, n) => { | |
let s = T.registeredVariables[o]; | |
this.accumulatedGrads[n] == null && (this.accumulatedGrads[n] = { | |
originalName: `${o}/accumulator`, | |
variable: De(() => Ea(s.shape, this.initialAccumulatorValue).variable(false)) | |
}); | |
let a = Array.isArray(t8) ? t8[n].tensor : t8[o]; | |
if (a == null) return; | |
let i = this.accumulatedGrads[n].variable; | |
De(() => { | |
let p = Ce(i, er(a)); | |
i.assign(p); | |
let u = Ce(se(je(a, Dr(Ce(p, T.backend.epsilon()))), -this.learningRate), s); | |
s.assign(u); | |
}); | |
}), this.incrementIterations(); | |
} | |
dispose() { | |
this.accumulatedGrads != null && Mt(this.accumulatedGrads.map(t8 => t8.variable)); | |
} | |
async getWeights() { | |
return [await this.saveIterations()].concat(this.accumulatedGrads.map(t8 => ({ | |
name: t8.originalName, | |
tensor: t8.variable | |
}))); | |
} | |
async setWeights(t8) { | |
t8 = await this.extractIterations(t8); | |
let e = false; | |
this.accumulatedGrads = t8.map(o => ({ | |
originalName: o.name, | |
variable: o.tensor.variable(e) | |
})); | |
} | |
getConfig() { | |
return { | |
learningRate: this.learningRate, | |
initialAccumulatorValue: this.initialAccumulatorValue | |
}; | |
} | |
static fromConfig(t8, e) { | |
return new t8(e.learningRate, e.initialAccumulatorValue); | |
} | |
}; | |
var ap = class extends Nr { | |
static get className() { | |
return "Adam"; | |
} | |
constructor(t8, e, o, n = null) { | |
super(), this.learningRate = t8, this.beta1 = e, this.beta2 = o, this.epsilon = n, this.accumulatedFirstMoment = [], this.accumulatedSecondMoment = [], De(() => { | |
this.accBeta1 = ke(e).variable(), this.accBeta2 = ke(o).variable(); | |
}), n == null && (this.epsilon = T.backend.epsilon()); | |
} | |
applyGradients(t8) { | |
let e = Array.isArray(t8) ? t8.map(o => o.name) : Object.keys(t8); | |
De(() => { | |
let o = Te(1, this.accBeta1), | |
n = Te(1, this.accBeta2); | |
e.forEach((s, a) => { | |
let i = T.registeredVariables[s], | |
p = false; | |
this.accumulatedFirstMoment[a] == null && (this.accumulatedFirstMoment[a] = { | |
originalName: `${s}/m`, | |
variable: De(() => Ht(i).variable(p)) | |
}), this.accumulatedSecondMoment[a] == null && (this.accumulatedSecondMoment[a] = { | |
originalName: `${s}/v`, | |
variable: De(() => Ht(i).variable(p)) | |
}); | |
let u = Array.isArray(t8) ? t8[a].tensor : t8[s]; | |
if (u == null) return; | |
let c = this.accumulatedFirstMoment[a].variable, | |
l = this.accumulatedSecondMoment[a].variable, | |
m = Ce(se(c, this.beta1), se(u, 1 - this.beta1)), | |
d = Ce(se(l, this.beta2), se(er(u), 1 - this.beta2)), | |
f = je(m, o), | |
h = je(d, n); | |
c.assign(m), l.assign(d); | |
let g = Ce(se(je(f, Ce(Dr(h), this.epsilon)), -this.learningRate), i); | |
i.assign(g); | |
}), this.accBeta1.assign(se(this.accBeta1, this.beta1)), this.accBeta2.assign(se(this.accBeta2, this.beta2)); | |
}), this.incrementIterations(); | |
} | |
dispose() { | |
this.accBeta1.dispose(), this.accBeta2.dispose(), this.accumulatedFirstMoment != null && Mt(this.accumulatedFirstMoment.map(t8 => t8.variable)), this.accumulatedSecondMoment != null && Mt(this.accumulatedSecondMoment.map(t8 => t8.variable)); | |
} | |
async getWeights() { | |
let t8 = [...this.accumulatedFirstMoment, ...this.accumulatedSecondMoment]; | |
return [await this.saveIterations()].concat(t8.map(e => ({ | |
name: e.originalName, | |
tensor: e.variable | |
}))); | |
} | |
async setWeights(t8) { | |
t8 = await this.extractIterations(t8), De(() => { | |
this.accBeta1.assign(ui(this.beta1, this.iterations_ + 1)), this.accBeta2.assign(ui(this.beta2, this.iterations_ + 1)); | |
}); | |
let e = t8.length / 2, | |
o = false; | |
this.accumulatedFirstMoment = t8.slice(0, e).map(n => ({ | |
originalName: n.name, | |
variable: n.tensor.variable(o) | |
})), this.accumulatedSecondMoment = t8.slice(e, e * 2).map(n => ({ | |
originalName: n.name, | |
variable: n.tensor.variable(o) | |
})); | |
} | |
getConfig() { | |
return { | |
learningRate: this.learningRate, | |
beta1: this.beta1, | |
beta2: this.beta2, | |
epsilon: this.epsilon | |
}; | |
} | |
static fromConfig(t8, e) { | |
return new t8(e.learningRate, e.beta1, e.beta2, e.epsilon); | |
} | |
}; | |
var ip = class extends Nr { | |
static get className() { | |
return "Adamax"; | |
} | |
constructor(t8, e, o, n = null, s = 0) { | |
super(), this.learningRate = t8, this.beta1 = e, this.beta2 = o, this.epsilon = n, this.decay = s, this.accumulatedFirstMoment = [], this.accumulatedWeightedInfNorm = [], De(() => { | |
this.iteration = ke(0).variable(), this.accBeta1 = ke(e).variable(); | |
}), n == null && (this.epsilon = T.backend.epsilon()); | |
} | |
applyGradients(t8) { | |
let e = Array.isArray(t8) ? t8.map(o => o.name) : Object.keys(t8); | |
De(() => { | |
let o = Te(1, this.accBeta1), | |
n = je(-this.learningRate, Ce(se(this.iteration, this.decay), 1)); | |
e.forEach((s, a) => { | |
let i = T.registeredVariables[s], | |
p = false; | |
this.accumulatedFirstMoment[a] == null && (this.accumulatedFirstMoment[a] = { | |
originalName: `${s}/m`, | |
variable: Ht(i).variable(p) | |
}), this.accumulatedWeightedInfNorm[a] == null && (this.accumulatedWeightedInfNorm[a] = { | |
originalName: `${s}/v`, | |
variable: Ht(i).variable(p) | |
}); | |
let u = Array.isArray(t8) ? t8[a].tensor : t8[s]; | |
if (u == null) return; | |
let c = this.accumulatedFirstMoment[a].variable, | |
l = this.accumulatedWeightedInfNorm[a].variable, | |
m = Ce(se(c, this.beta1), se(u, 1 - this.beta1)), | |
d = se(l, this.beta2), | |
f = Jt(u), | |
h = Fd(d, f); | |
c.assign(m), l.assign(h); | |
let g = Ce(se(je(n, o), je(m, Ce(h, this.epsilon))), i); | |
i.assign(g); | |
}), this.iteration.assign(Ce(this.iteration, 1)), this.accBeta1.assign(se(this.accBeta1, this.beta1)); | |
}), this.incrementIterations(); | |
} | |
dispose() { | |
this.accBeta1.dispose(), this.iteration.dispose(), this.accumulatedFirstMoment != null && Mt(this.accumulatedFirstMoment.map(t8 => t8.variable)), this.accumulatedWeightedInfNorm != null && Mt(this.accumulatedWeightedInfNorm.map(t8 => t8.variable)); | |
} | |
async getWeights() { | |
throw new Error("getWeights() is not implemented for Adamax yet."); | |
} | |
async setWeights(t8) { | |
throw new Error("setWeights() is not implemented for Adamax yet."); | |
} | |
getConfig() { | |
return { | |
learningRate: this.learningRate, | |
beta1: this.beta1, | |
beta2: this.beta2, | |
epsilon: this.epsilon, | |
decay: this.decay | |
}; | |
} | |
static fromConfig(t8, e) { | |
return new t8(e.learningRate, e.beta1, e.beta2, e.epsilon, e.decay); | |
} | |
}; | |
var mi = class extends Nr { | |
static get className() { | |
return "SGD"; | |
} | |
constructor(t8) { | |
super(), this.learningRate = t8, this.setLearningRate(t8); | |
} | |
applyGradients(t8) { | |
(Array.isArray(t8) ? t8.map(o => o.name) : Object.keys(t8)).forEach((o, n) => { | |
let s = Array.isArray(t8) ? t8[n].tensor : t8[o]; | |
if (s == null) return; | |
let a = T.registeredVariables[o]; | |
De(() => { | |
let i = Ce(se(this.c, s), a); | |
a.assign(i); | |
}); | |
}), this.incrementIterations(); | |
} | |
setLearningRate(t8) { | |
this.learningRate = t8, this.c != null && this.c.dispose(), this.c = Rr(ke(-t8)); | |
} | |
dispose() { | |
this.c.dispose(); | |
} | |
async getWeights() { | |
return [await this.saveIterations()]; | |
} | |
async setWeights(t8) { | |
if (t8 = await this.extractIterations(t8), t8.length !== 0) throw new Error("SGD optimizer does not have settable weights."); | |
} | |
getConfig() { | |
return { | |
learningRate: this.learningRate | |
}; | |
} | |
static fromConfig(t8, e) { | |
return new t8(e.learningRate); | |
} | |
}; | |
var up = class extends mi { | |
static get className() { | |
return "Momentum"; | |
} | |
constructor(t8, e, o = false) { | |
super(t8), this.learningRate = t8, this.momentum = e, this.useNesterov = o, this.accumulations = [], this.m = ke(this.momentum); | |
} | |
applyGradients(t8) { | |
(Array.isArray(t8) ? t8.map(o => o.name) : Object.keys(t8)).forEach((o, n) => { | |
let s = T.registeredVariables[o]; | |
this.accumulations[n] == null && (this.accumulations[n] = { | |
originalName: `${o}/momentum`, | |
variable: De(() => Ht(s).variable(false)) | |
}); | |
let a = this.accumulations[n].variable, | |
i = Array.isArray(t8) ? t8[n].tensor : t8[o]; | |
i != null && De(() => { | |
let p, | |
u = Ce(se(this.m, a), i); | |
this.useNesterov ? p = Ce(se(this.c, Ce(i, se(u, this.m))), s) : p = Ce(se(this.c, u), s), a.assign(u), s.assign(p); | |
}); | |
}), this.incrementIterations(); | |
} | |
dispose() { | |
this.m.dispose(), this.accumulations != null && Mt(this.accumulations.map(t8 => t8.variable)); | |
} | |
setMomentum(t8) { | |
this.momentum = t8; | |
} | |
async getWeights() { | |
return [await this.saveIterations()].concat(this.accumulations.map(t8 => ({ | |
name: t8.originalName, | |
tensor: t8.variable | |
}))); | |
} | |
async setWeights(t8) { | |
t8 = await this.extractIterations(t8); | |
let e = false; | |
this.accumulations = t8.map(o => ({ | |
originalName: o.name, | |
variable: o.tensor.variable(e) | |
})); | |
} | |
getConfig() { | |
return { | |
learningRate: this.learningRate, | |
momentum: this.momentum, | |
useNesterov: this.useNesterov | |
}; | |
} | |
static fromConfig(t8, e) { | |
return new t8(e.learningRate, e.momentum, e.useNesterov); | |
} | |
}; | |
var pp = class extends Nr { | |
static get className() { | |
return "RMSProp"; | |
} | |
constructor(t8, e = 0.9, o = 0, n = null, s = false) { | |
if (super(), this.learningRate = t8, this.decay = e, this.momentum = o, this.epsilon = n, this.accumulatedMeanSquares = [], this.accumulatedMoments = [], this.accumulatedMeanGrads = [], this.centered = s, n == null && (this.epsilon = T.backend.epsilon()), t8 == null) throw new Error("learningRate for RMSPropOptimizer must be defined."); | |
} | |
applyGradients(t8) { | |
(Array.isArray(t8) ? t8.map(o => o.name) : Object.keys(t8)).forEach((o, n) => { | |
let s = T.registeredVariables[o], | |
a = false; | |
this.accumulatedMeanSquares[n] == null && (this.accumulatedMeanSquares[n] = { | |
originalName: `${o}/rms`, | |
variable: De(() => Ht(s).variable(a)) | |
}), this.accumulatedMoments[n] == null && (this.accumulatedMoments[n] = { | |
originalName: `${o}/momentum`, | |
variable: De(() => Ht(s).variable(a)) | |
}), this.accumulatedMeanGrads[n] == null && this.centered && (this.accumulatedMeanGrads[n] = { | |
originalName: `${o}/mg`, | |
variable: De(() => Ht(s).variable(a)) | |
}); | |
let i = Array.isArray(t8) ? t8[n].tensor : t8[o]; | |
if (i == null) return; | |
let p = this.accumulatedMeanSquares[n].variable, | |
u = this.accumulatedMoments[n].variable; | |
De(() => { | |
let c = Ce(se(p, this.decay), se(er(i), 1 - this.decay)); | |
if (this.centered) { | |
let l = this.accumulatedMeanGrads[n].variable, | |
m = Ce(se(l, this.decay), se(i, 1 - this.decay)), | |
d = je(se(i, this.learningRate), Dr(Te(c, Ce(er(m), this.epsilon)))), | |
f = Ce(se(u, this.momentum), d); | |
p.assign(c), l.assign(m), u.assign(f); | |
let h = Te(s, f); | |
s.assign(h); | |
} else { | |
let l = Ce(se(p, this.decay), se(er(i), 1 - this.decay)), | |
m = Ce(se(u, this.momentum), je(se(i, this.learningRate), Dr(Ce(l, this.epsilon)))); | |
p.assign(l), u.assign(m); | |
let d = Te(s, m); | |
s.assign(d); | |
} | |
}); | |
}), this.incrementIterations(); | |
} | |
dispose() { | |
this.accumulatedMeanSquares != null && Mt(this.accumulatedMeanSquares.map(t8 => t8.variable)), this.accumulatedMeanGrads != null && this.centered && Mt(this.accumulatedMeanGrads.map(t8 => t8.variable)), this.accumulatedMoments != null && Mt(this.accumulatedMoments.map(t8 => t8.variable)); | |
} | |
async getWeights() { | |
let t8 = [...this.accumulatedMeanSquares, ...this.accumulatedMoments]; | |
return this.centered && t8.push(...this.accumulatedMeanGrads), [await this.saveIterations()].concat(t8.map(e => ({ | |
name: e.originalName, | |
tensor: e.variable | |
}))); | |
} | |
async setWeights(t8) { | |
t8 = await this.extractIterations(t8); | |
let e = this.centered ? t8.length / 3 : t8.length / 2, | |
o = false; | |
this.accumulatedMeanSquares = t8.slice(0, e).map(n => ({ | |
originalName: n.name, | |
variable: n.tensor.variable(o) | |
})), this.accumulatedMoments = t8.slice(e, e * 2).map(n => ({ | |
originalName: n.name, | |
variable: n.tensor.variable(o) | |
})), this.centered && (this.accumulatedMeanGrads = t8.slice(e * 2, e * 3).map(n => ({ | |
originalName: n.name, | |
variable: n.tensor.variable(o) | |
}))); | |
} | |
getConfig() { | |
return { | |
learningRate: this.learningRate, | |
decay: this.decay, | |
momentum: this.momentum, | |
epsilon: this.epsilon, | |
centered: this.centered | |
}; | |
} | |
static fromConfig(t8, e) { | |
return new t8(e.learningRate, e.decay, e.momentum, e.epsilon, e.centered); | |
} | |
}; | |
var Jj = [np, sp, ap, ip, up, pp, mi]; | |
function UN() { | |
for (let r of Jj) tS(r); | |
} | |
var fi = {}; | |
qe(fi, { | |
CompositeArrayBuffer: () => jt, | |
browserFiles: () => HN, | |
browserHTTPRequest: () => jN, | |
concatenateArrayBuffers: () => ik, | |
copyModel: () => wk, | |
decodeWeights: () => ad, | |
encodeWeights: () => nk, | |
fromMemory: () => XN, | |
fromMemorySync: () => iS, | |
getLoadHandlers: () => lk, | |
getModelArtifactsForJSON: () => ic, | |
getModelArtifactsForJSONSync: () => _w, | |
getModelArtifactsInfoForJSON: () => va, | |
getSaveHandlers: () => ck, | |
getWeightSpecs: () => ud, | |
http: () => nf, | |
isHTTPScheme: () => of, | |
listModels: () => bk, | |
loadWeights: () => KN, | |
moveModel: () => Sk, | |
registerLoadRouter: () => pk, | |
registerSaveRouter: () => uk, | |
removeModel: () => Ck, | |
weightsLoaderFactory: () => sS, | |
withSaveHandler: () => YN, | |
withSaveHandlerSync: () => QN | |
}); | |
var eX = "model"; | |
var tX = ".json"; | |
var rX = ".weights.bin"; | |
function GN(r) { | |
return new Promise(t8 => setTimeout(t8)).then(r); | |
} | |
var di = class { | |
constructor(t8) { | |
if (!A().getBool("IS_BROWSER")) throw new Error("browserDownloads() cannot proceed because the current environment is not a browser."); | |
t8.startsWith(di.URL_SCHEME) && (t8 = t8.slice(di.URL_SCHEME.length)), (t8 == null || t8.length === 0) && (t8 = eX), this.modelJsonFileName = t8 + tX, this.weightDataFileName = t8 + rX; | |
} | |
async save(t8) { | |
if (typeof document == "undefined") throw new Error("Browser downloads are not supported in this environment since `document` is not present"); | |
let e = jt.join(t8.weightData), | |
o = window.URL.createObjectURL(new Blob([e], { | |
type: "application/octet-stream" | |
})); | |
if (t8.modelTopology instanceof ArrayBuffer) throw new Error("BrowserDownloads.save() does not support saving model topology in binary formats yet."); | |
{ | |
let n = [{ | |
paths: ["./" + this.weightDataFileName], | |
weights: t8.weightSpecs | |
}], | |
s = id(t8, n), | |
a = window.URL.createObjectURL(new Blob([JSON.stringify(s)], { | |
type: "application/json" | |
})), | |
i = this.modelJsonAnchor == null ? document.createElement("a") : this.modelJsonAnchor; | |
if (i.download = this.modelJsonFileName, i.href = a, await GN(() => i.dispatchEvent(new MouseEvent("click"))), t8.weightData != null) { | |
let p = this.weightDataAnchor == null ? document.createElement("a") : this.weightDataAnchor; | |
p.download = this.weightDataFileName, p.href = o, await GN(() => p.dispatchEvent(new MouseEvent("click"))); | |
} | |
return { | |
modelArtifactsInfo: va(t8) | |
}; | |
} | |
} | |
}; | |
di.URL_SCHEME = "downloads://"; | |
var rS = class { | |
constructor(t8) { | |
if (t8 == null || t8.length < 1) throw new Error(`When calling browserFiles, at least 1 file is required, but received ${t8}`); | |
this.jsonFile = t8[0], this.weightsFiles = t8.slice(1); | |
} | |
async load() { | |
return new Promise((t8, e) => { | |
let o = new FileReader(); | |
o.onload = n => { | |
let s = JSON.parse(n.target.result), | |
a = s.modelTopology; | |
if (a == null) { | |
e(new Error(`modelTopology field is missing from file ${this.jsonFile.name}`)); | |
return; | |
} | |
if (s.weightsManifest == null) { | |
e(new Error(`weightManifest field is missing from file ${this.jsonFile.name}`)); | |
return; | |
} | |
if (this.weightsFiles.length === 0) { | |
t8({ | |
modelTopology: a | |
}); | |
return; | |
} | |
let p = ic(s, u => this.loadWeights(u)); | |
t8(p); | |
}, o.onerror = n => e(`Failed to read model topology and weights manifest JSON from file '${this.jsonFile.name}'. BrowserFiles supports loading Keras-style tf.Model artifacts only.`), o.readAsText(this.jsonFile); | |
}); | |
} | |
loadWeights(t8) { | |
let e = [], | |
o = []; | |
for (let a of t8) e.push(...a.weights), o.push(...a.paths); | |
let n = this.checkManifestAndWeightFiles(t8), | |
s = o.map(a => this.loadWeightsFile(a, n[a])); | |
return Promise.all(s).then(a => [e, a]); | |
} | |
loadWeightsFile(t8, e) { | |
return new Promise((o, n) => { | |
let s = new FileReader(); | |
s.onload = a => { | |
let i = a.target.result; | |
o(i); | |
}, s.onerror = a => n(`Failed to weights data from file of path '${t8}'.`), s.readAsArrayBuffer(e); | |
}); | |
} | |
checkManifestAndWeightFiles(t8) { | |
let e = [], | |
o = this.weightsFiles.map(s => Tw(s.name)), | |
n = {}; | |
for (let s of t8) s.paths.forEach(a => { | |
let i = Tw(a); | |
if (e.indexOf(i) !== -1) throw new Error(`Duplicate file basename found in weights manifest: '${i}'`); | |
if (e.push(i), o.indexOf(i) === -1) throw new Error(`Weight file with basename '${i}' is not provided.`); | |
n[a] = this.weightsFiles[o.indexOf(i)]; | |
}); | |
if (e.length !== this.weightsFiles.length) throw new Error(`Mismatch in the number of files in weights manifest (${e.length}) and the number of weight files provided (${this.weightsFiles.length}).`); | |
return n; | |
} | |
}; | |
var oX = r => A().getBool("IS_BROWSER") && !Array.isArray(r) && r.startsWith(di.URL_SCHEME) ? nX(r.slice(di.URL_SCHEME.length)) : null; | |
ft.registerSaveRouter(oX); | |
function nX(r = "model") { | |
return new di(r); | |
} | |
function HN(r) { | |
return new rS(r); | |
} | |
function oS(r, t8, e, o) { | |
a(r), e = e == null ? 0 : e, o = o == null ? 1 : o, i(e, o); | |
let n = 0, | |
s = p => (p.then(u => { | |
let c = e + ++n / r.length * (o - e); | |
return t8(c), u; | |
}), p); | |
function a(p) { | |
$(p != null && Array.isArray(p) && p.length > 0, () => "promises must be a none empty array"); | |
} | |
function i(p, u) { | |
$(p >= 0 && p <= 1, () => `Progress fraction must be in range [0, 1], but got startFraction ${p}`), $(u >= 0 && u <= 1, () => `Progress fraction must be in range [0, 1], but got endFraction ${u}`), $(u >= p, () => `startFraction must be no more than endFraction, but got startFraction ${p} and endFraction ${u}`); | |
} | |
return Promise.all(r.map(s)); | |
} | |
async function nS(r, t8) { | |
t8 == null && (t8 = {}); | |
let e = t8.fetchFunc == null ? A().platform.fetch : t8.fetchFunc, | |
o = r.map(l => e(l, t8.requestInit, { | |
isBinary: true | |
})), | |
n = 0, | |
s = 0.5, | |
i = (t8.onProgress == null ? await Promise.all(o) : await oS(o, t8.onProgress, n, s)).map(l => l.arrayBuffer()), | |
p = 0.5, | |
u = 1; | |
return t8.onProgress == null ? await Promise.all(i) : await oS(i, t8.onProgress, p, u); | |
} | |
async function KN(r, t8 = "", e, o) { | |
return sS(a => nS(a, { | |
requestInit: o | |
}))(r, t8, e); | |
} | |
function sS(r) { | |
return async (t8, e = "", o) => { | |
let n = t8.map(() => false), | |
s = {}, | |
a = o != null ? o.map(() => false) : [], | |
i = []; | |
if (t8.forEach((d, f) => { | |
let h = 0; | |
d.weights.forEach(g => { | |
let x = "quantization" in g ? g.quantization.dtype : g.dtype, | |
b = Il[x] * He(g.shape), | |
C = () => { | |
n[f] = true, s[f] == null && (s[f] = []), s[f].push({ | |
manifestEntry: g, | |
groupOffset: h, | |
sizeBytes: b | |
}); | |
}; | |
o != null ? o.forEach((S, k) => { | |
S === g.name && (C(), a[k] = true); | |
}) : C(), i.push(g.name), h += b; | |
}); | |
}), !a.every(d => d)) { | |
let d = o.filter((f, h) => !a[h]); | |
throw new Error(`Could not find weights in manifest with names: ${d.join(", ")}. | |
Manifest JSON has weights with names: ${i.join(", ")}.`); | |
} | |
let p = n.reduce((d, f, h) => (f && d.push(h), d), []), | |
u = []; | |
p.forEach(d => { | |
t8[d].paths.forEach(f => { | |
let h = e + (e.endsWith("/") ? "" : "/") + f; | |
u.push(h); | |
}); | |
}); | |
let c = await r(u), | |
l = {}, | |
m = 0; | |
return p.forEach(d => { | |
let f = t8[d].paths.length, | |
h = new jt(c.slice(m, m + f)); | |
s[d].forEach(x => { | |
let b = h.slice(x.groupOffset, x.groupOffset + x.sizeBytes), | |
C = ad(b, [x.manifestEntry]); | |
for (let S in C) l[S] = C[S]; | |
}), m += f; | |
}), l; | |
}; | |
} | |
var sX = "application/octet-stream"; | |
var aX = "application/json"; | |
var Al = class { | |
constructor(t8, e) { | |
if (this.DEFAULT_METHOD = "POST", e == null && (e = {}), this.weightPathPrefix = e.weightPathPrefix, this.onProgress = e.onProgress, this.weightUrlConverter = e.weightUrlConverter, e.fetchFunc != null ? ($(typeof e.fetchFunc == "function", () => "Must pass a function that matches the signature of `fetch` (see https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API)"), this.fetch = e.fetchFunc) : this.fetch = A().platform.fetch, $(t8 != null && t8.length > 0, () => "URL path for http must not be null, undefined or empty."), Array.isArray(t8) && $(t8.length === 2, () => `URL paths for http must have a length of 2, (actual length is ${t8.length}).`), this.path = t8, e.requestInit != null && e.requestInit.body != null) throw new Error("requestInit is expected to have no pre-existing body, but has one."); | |
this.requestInit = e.requestInit || {}; | |
} | |
async save(t8) { | |
if (t8.modelTopology instanceof ArrayBuffer) throw new Error("BrowserHTTPRequest.save() does not support saving model topology in binary formats yet."); | |
let e = Object.assign({ | |
method: this.DEFAULT_METHOD | |
}, this.requestInit); | |
e.body = new FormData(); | |
let o = [{ | |
paths: ["./model.weights.bin"], | |
weights: t8.weightSpecs | |
}], | |
n = id(t8, o); | |
if (e.body.append("model.json", new Blob([JSON.stringify(n)], { | |
type: aX | |
}), "model.json"), t8.weightData != null) { | |
let a = jt.join(t8.weightData); | |
e.body.append("model.weights.bin", new Blob([a], { | |
type: sX | |
}), "model.weights.bin"); | |
} | |
let s = await this.fetch(this.path, e); | |
if (s.ok) return { | |
modelArtifactsInfo: va(t8), | |
responses: [s] | |
}; | |
throw new Error(`BrowserHTTPRequest.save() failed due to HTTP response status ${s.status}.`); | |
} | |
async load() { | |
let t8 = await this.fetch(this.path, this.requestInit); | |
if (!t8.ok) throw new Error(`Request to ${this.path} failed with status code ${t8.status}. Please verify this URL points to the model JSON of the model to load.`); | |
let e; | |
try { | |
e = await t8.json(); | |
} catch (s) { | |
let a = `Failed to parse model JSON of response from ${this.path}.`; | |
throw this.path.endsWith(".pb") ? a += " Your path contains a .pb file extension. Support for .pb models have been removed in TensorFlow.js 1.0 in favor of .json models. You can re-convert your Python TensorFlow model using the TensorFlow.js 1.0 conversion scripts or you can convert your.pb models with the 'pb2json'NPM script in the tensorflow/tfjs-converter repository." : a += " Please make sure the server is serving valid JSON for this request.", new Error(a); | |
} | |
let o = e.modelTopology, | |
n = e.weightsManifest; | |
if (o == null && n == null) throw new Error(`The JSON from HTTP path ${this.path} contains neither model topology or manifest for weights.`); | |
return ic(e, s => this.loadWeights(s)); | |
} | |
async loadWeights(t8) { | |
let e = Array.isArray(this.path) ? this.path[1] : this.path, | |
[o, n] = iX(e), | |
s = this.weightPathPrefix || o, | |
a = ud(t8), | |
i = [], | |
p = []; | |
for (let c of t8) for (let l of c.paths) this.weightUrlConverter != null ? p.push(this.weightUrlConverter(l)) : i.push(s + l + n); | |
this.weightUrlConverter && i.push(...(await Promise.all(p))); | |
let u = await nS(i, { | |
requestInit: this.requestInit, | |
fetchFunc: this.fetch, | |
onProgress: this.onProgress | |
}); | |
return [a, u]; | |
} | |
}; | |
Al.URL_SCHEME_REGEX = /^https?:\/\//; | |
function iX(r) { | |
let t8 = r.lastIndexOf("/"), | |
e = r.lastIndexOf("?"), | |
o = r.substring(0, t8), | |
n = e > t8 ? r.substring(e) : ""; | |
return [o + "/", n]; | |
} | |
function of(r) { | |
return r.match(Al.URL_SCHEME_REGEX) != null; | |
} | |
var qN = (r, t8) => { | |
if (typeof fetch == "undefined" && (t8 == null || t8.fetchFunc == null)) return null; | |
{ | |
let e = true; | |
if (Array.isArray(r) ? e = r.every(o => of(o)) : e = of(r), e) return nf(r, t8); | |
} | |
return null; | |
}; | |
ft.registerSaveRouter(qN); | |
ft.registerLoadRouter(qN); | |
function nf(r, t8) { | |
return new Al(r, t8); | |
} | |
function jN(r, t8) { | |
return nf(r, t8); | |
} | |
var Fl = class { | |
constructor(t8) { | |
this.modelArtifacts = t8; | |
} | |
load() { | |
return this.modelArtifacts; | |
} | |
}; | |
var sf = class { | |
constructor(t8) { | |
this.saveHandler = t8; | |
} | |
save(t8) { | |
return this.saveHandler(t8); | |
} | |
}; | |
var aS = class { | |
constructor(t8) { | |
t8.load && (this.load = () => Promise.resolve(t8.load())), t8.save && (this.save = e => Promise.resolve(t8.save(e))); | |
} | |
}; | |
function XN(r, t8, e, o) { | |
let n = arguments; | |
return new aS(iS(...n)); | |
} | |
function iS(r, t8, e, o) { | |
return arguments.length === 1 ? r.modelTopology != null || r.weightSpecs != null ? new Fl(r) : (console.warn("Please call tf.io.fromMemory() with only one argument. The argument should be of type ModelArtifacts. The multi-argument signature of tf.io.fromMemory() has been deprecated and will be removed in a future release."), new Fl({ | |
modelTopology: r | |
})) : (console.warn("Please call tf.io.fromMemory() with only one argument. The argument should be of type ModelArtifacts. The multi-argument signature of tf.io.fromMemory() has been deprecated and will be removed in a future release."), new Fl({ | |
modelTopology: r, | |
weightSpecs: t8, | |
weightData: e, | |
trainingConfig: o | |
})); | |
} | |
function YN(r) { | |
return new sf(r); | |
} | |
function QN(r) { | |
return new sf(r); | |
} | |
var JN = {}; | |
qe(JN, { | |
confusionMatrix: () => ZN | |
}); | |
function uX(r, t8, e) { | |
let o = v(r, "labels", "confusionMatrix"), | |
n = v(t8, "predictions", "confusionMatrix"); | |
$(e == null || e > 0 && Number.isInteger(e), () => `If provided, numClasses must be a positive integer, but got ${e}`), $(o.rank === 1, () => `Expected the rank of labels to be 1, but got ${o.rank}`), $(n.rank === 1, () => `Expected the rank of predictions to be 1, but got ${n.rank}`), $(o.shape[0] === n.shape[0], () => `Mismatch in the number of examples: ${o.shape[0]} vs. ${n.shape[0]}. Labels and predictions should have the same number of elements.`), $(e > 0 && Number.isInteger(e), () => `numClasses is required to be a positive integer, but got ${e}`); | |
let s = El(We(o, "int32"), e), | |
a = El(We(n, "int32"), e), | |
i = yc(s), | |
p = Ze(i, a); | |
return We(p, "int32"); | |
} | |
var ZN = N({ | |
confusionMatrix_: uX | |
}); | |
var oT = {}; | |
qe(oT, { | |
draw: () => gX, | |
fromPixels: () => xX, | |
fromPixelsAsync: () => dX, | |
toPixels: () => hX | |
}); | |
var cp; | |
var eT = false; | |
function tT(r, t8 = 3) { | |
if (t8 > 4) throw new Error("Cannot construct Tensor with more than 4 channels from pixels."); | |
if (r == null) throw new Error("pixels passed to tf.browser.fromPixels() can not be null"); | |
let e = false, | |
o = false, | |
n = false, | |
s = false, | |
a = false, | |
i = false; | |
if (r.data instanceof Uint8Array) e = true;else if (typeof ImageData != "undefined" && r instanceof ImageData) o = true;else if (typeof HTMLVideoElement != "undefined" && r instanceof HTMLVideoElement) n = true;else if (typeof HTMLImageElement != "undefined" && r instanceof HTMLImageElement) s = true;else if (r.getContext != null) a = true;else if (typeof ImageBitmap != "undefined" && r instanceof ImageBitmap) i = true;else throw new Error(`pixels passed to tf.browser.fromPixels() must be either an HTMLVideoElement, HTMLImageElement, HTMLCanvasElement, ImageData in browser, or OffscreenCanvas, ImageData in webworker or {data: Uint32Array, width: number, height: number}, but was ${r.constructor.name}`); | |
if (tc(Mu, T.backendName) != null) { | |
let f = { | |
pixels: r | |
}, | |
h = { | |
numChannels: t8 | |
}; | |
return T.runKernel(Mu, f, h); | |
} | |
let [u, c] = n ? [r.videoWidth, r.videoHeight] : [r.width, r.height], | |
l; | |
if (a) l = r.getContext("2d").getImageData(0, 0, u, c).data;else if (o || e) l = r.data;else if (s || n || i) { | |
if (cp == null) if (typeof document == "undefined") { | |
if (typeof OffscreenCanvas != "undefined" && typeof OffscreenCanvasRenderingContext2D != "undefined") cp = new OffscreenCanvas(1, 1).getContext("2d");else throw new Error("Cannot parse input in current context. Reason: OffscreenCanvas Context2D rendering is not supported."); | |
} else cp = document.createElement("canvas").getContext("2d", { | |
willReadFrequently: true | |
}); | |
cp.canvas.width = u, cp.canvas.height = c, cp.drawImage(r, 0, 0, u, c), l = cp.getImageData(0, 0, u, c).data; | |
} | |
let m; | |
if (t8 === 4) m = new Int32Array(l);else { | |
let f = u * c; | |
m = new Int32Array(f * t8); | |
for (let h = 0; h < f; h++) for (let g = 0; g < t8; ++g) m[h * t8 + g] = l[h * 4 + g]; | |
} | |
return Xd(m, [c, u, t8], "int32"); | |
} | |
function pX(r) { | |
return r != null && r.data instanceof Uint8Array; | |
} | |
function cX() { | |
return typeof window != "undefined" && typeof ImageBitmap != "undefined" && window.hasOwnProperty("createImageBitmap"); | |
} | |
function lX(r) { | |
return r != null && r.width !== 0 && r.height !== 0; | |
} | |
function mX(r) { | |
return cX() && !(r instanceof ImageBitmap) && lX(r) && !pX(r); | |
} | |
async function dX(r, t8 = 3) { | |
let e = null; | |
if (A().getBool("WRAP_TO_IMAGEBITMAP") && mX(r)) { | |
let o; | |
try { | |
o = await createImageBitmap(r, { | |
premultiplyAlpha: "none" | |
}); | |
} catch (n) { | |
o = null; | |
} | |
o != null && o.width === r.width && o.height === r.height ? e = o : e = r; | |
} else e = r; | |
return tT(e, t8); | |
} | |
function rT(r) { | |
if (r.rank !== 2 && r.rank !== 3) throw new Error(`toPixels only supports rank 2 or 3 tensors, got rank ${r.rank}.`); | |
let t8 = r.rank === 2 ? 1 : r.shape[2]; | |
if (t8 > 4 || t8 === 2) throw new Error(`toPixels only supports depth of size 1, 3 or 4 but got ${t8}`); | |
if (r.dtype !== "float32" && r.dtype !== "int32") throw new Error(`Unsupported type for toPixels: ${r.dtype}. Please use float32 or int32 tensors.`); | |
} | |
function fX(r) { | |
let t8 = (r == null ? void 0 : r.alpha) || 1; | |
if (t8 > 1 || t8 < 0) throw new Error(`Alpha value ${t8} is suppoed to be in range [0 - 1].`); | |
} | |
async function hX(r, t8) { | |
let e = v(r, "img", "toPixels"); | |
if (!(r instanceof ut)) { | |
let u = e; | |
e = We(u, "int32"), u.dispose(); | |
} | |
rT(e); | |
let [o, n] = e.shape.slice(0, 2), | |
s = e.rank === 2 ? 1 : e.shape[2], | |
a = await e.data(), | |
i = e.dtype === "float32" ? 255 : 1, | |
p = new Uint8ClampedArray(n * o * 4); | |
for (let u = 0; u < o * n; ++u) { | |
let c = [0, 0, 0, 255]; | |
for (let m = 0; m < s; m++) { | |
let d = a[u * s + m]; | |
if (e.dtype === "float32") { | |
if (d < 0 || d > 1) throw new Error(`Tensor values for a float32 Tensor must be in the range [0 - 1] but encountered ${d}.`); | |
} else if (e.dtype === "int32" && (d < 0 || d > 255)) throw new Error(`Tensor values for a int32 Tensor must be in the range [0 - 255] but encountered ${d}.`); | |
s === 1 ? (c[0] = d * i, c[1] = d * i, c[2] = d * i) : c[m] = d * i; | |
} | |
let l = u * 4; | |
p[l + 0] = Math.round(c[0]), p[l + 1] = Math.round(c[1]), p[l + 2] = Math.round(c[2]), p[l + 3] = Math.round(c[3]); | |
} | |
if (t8 != null) { | |
eT || tc(Pu, T.backendName) != null && (console.warn("tf.browser.toPixels is not efficient to draw tensor on canvas. Please try tf.browser.draw instead."), eT = true), t8.width = n, t8.height = o; | |
let u = t8.getContext("2d"), | |
c = new ImageData(p, n, o); | |
u.putImageData(c, 0, 0); | |
} | |
return e !== r && e.dispose(), p; | |
} | |
function gX(r, t8, e) { | |
let o = v(r, "img", "draw"); | |
if (!(r instanceof ut)) { | |
let a = o; | |
o = We(a, "int32"), a.dispose(); | |
} | |
rT(o), fX(e == null ? void 0 : e.imageOptions); | |
let n = { | |
image: o | |
}, | |
s = { | |
canvas: t8, | |
options: e | |
}; | |
T.runKernel(Pu, n, s); | |
} | |
var xX = N({ | |
fromPixels_: tT | |
}); | |
var af = {}; | |
qe(af, { | |
prepareAndValidate: () => nT | |
}); | |
function nT(r, t8) { | |
let e = r.shape.length, | |
o = t8.shape.length; | |
if (e < 1) throw new Error(`tf.gatherND() expects the input to be rank 1 or higher, but the rank was ${e}.`); | |
if (o < 1) throw new Error(`tf.gatherND() expects the indices to be rank 1 or higher, but the rank was ${o}.`); | |
if (t8.dtype !== "int32") throw new Error(`tf.gatherND() expects the indices to be int32 type, but the dtype was ${t8.dtype}.`); | |
if (t8.shape[o - 1] > e) throw new Error(`index innermost dimension length must be <= tensor rank; saw: ${t8.shape[o - 1]} vs. ${e}`); | |
if (He(r.shape) === 0) throw new Error(`Requested more than 0 entries, but input is empty. Input shape: ${r.shape}.`); | |
let n = t8.shape, | |
s = n[n.length - 1], | |
a = 1; | |
for (let l = 0; l < n.length - 1; ++l) a *= n[l]; | |
let i = r.shape, | |
p = n.slice(); | |
p.pop(); | |
let u = 1; | |
for (let l = s; l < e; ++l) u *= i[l], p.push(i[l]); | |
let c = [...js(r.shape).map(l => l / u), 1].slice(0, s); | |
return [p, a, u, c]; | |
} | |
var ct = {}; | |
qe(ct, { | |
assertParamsValid: () => bX, | |
computeFlatOffset: () => vX, | |
computeOutShape: () => wX, | |
getNormalizedAxes: () => SX, | |
isSliceContinous: () => IX, | |
maskToAxes: () => CX, | |
parseSliceParams: () => kX, | |
sliceInfo: () => NX, | |
startForAxis: () => mT, | |
startIndicesWithElidedDims: () => pT, | |
stopForAxis: () => dT, | |
stopIndicesWithElidedDims: () => cT, | |
stridesForAxis: () => lT, | |
stridesWithElidedDims: () => aT | |
}); | |
var uS = -2; | |
var yX = -1; | |
function bX(r, t8, e) { | |
let o = r.shape.length; | |
$(o === t8.length, () => `Error in slice${o}D: Length of begin ${t8} must match the rank of the array (${o}).`), $(o === e.length, () => `Error in slice${o}D: Length of size ${e} must match the rank of the array (${o}).`); | |
for (let n = 0; n < o; ++n) $(t8[n] + e[n] <= r.shape[n], () => `Error in slice${o}D: begin[${n}] + size[${n}] (${t8[n] + e[n]}) would overflow input.shape[${n}] (${r.shape[n]})`); | |
} | |
function CX(r) { | |
let t8 = [], | |
e = 0; | |
for (; r > 0;) r & 1 && t8.push(e), r /= 2, e++; | |
return t8; | |
} | |
function wX(r, t8, e) { | |
let o = []; | |
for (let n = 0; n < r.length; n++) o[n] = Math.ceil((t8[n] - r[n]) / e[n]); | |
return o; | |
} | |
function aT(r, t8, e, o) { | |
let n = [...r]; | |
for (let s = n.length; s < o.length; s++) n.push(1); | |
for (let s = 0; s < e; s++) s === 0 ? n[t8] = 1 : (n.splice(t8, 0, 1), n.pop()); | |
return n; | |
} | |
function iT(r, t8, e) { | |
return e <= r ? e : e - (t8 - 1); | |
} | |
function uT(r, t8) { | |
let e = []; | |
for (let o = 0; o < r; o++) e.push(t8 + o); | |
return e; | |
} | |
function SX(r, t8, e, o, n, s, a, i, p) { | |
let u = r.length, | |
c = new Array(u), | |
l = new Array(u), | |
m = new Array(u); | |
if (t8.length && e > 0) { | |
let d = t8[0], | |
f = e + 1; | |
c = pT(a, d, f, o, r), l = cT(i, d, f, n, r), m = aT(s, d, f, r); | |
} else for (let d = 0; d < u; d++) c[d] = mT(a, o, s, r, d, p), l[d] = dT(i, n, s, r, d, p), m[d] = lT(s, d, p); | |
return { | |
begin: c, | |
end: l, | |
strides: m | |
}; | |
} | |
function pT(r, t8, e, o, n) { | |
let s = [...n], | |
a = uT(e, t8); | |
for (let i = 0; i < s.length; i++) if (a.indexOf(i) > -1) s[i] = 0;else { | |
let p = iT(t8, e, i), | |
u = o[p]; | |
r & 1 << p && (u = 0), s[i] = u; | |
} | |
return s; | |
} | |
function cT(r, t8, e, o, n) { | |
let s = [...n], | |
a = uT(e, t8); | |
for (let i = 0; i < s.length; i++) if (a.indexOf(i) > -1) s[i] = Number.MAX_SAFE_INTEGER;else { | |
let p = iT(t8, e, i), | |
u = o[p]; | |
r & 1 << p && (u = Number.MAX_SAFE_INTEGER), s[i] = u; | |
} | |
for (let i = 0; i < s.length; i++) { | |
let p = n[i]; | |
s[i] < 0 && (s[i] += p), s[i] = qp(0, s[i], n[i]); | |
} | |
return s; | |
} | |
function lT(r, t8, e) { | |
let o = r[t8]; | |
return (e & 1 << t8 || o == null) && (o = 1), o; | |
} | |
function mT(r, t8, e, o, n, s) { | |
let a = t8[n], | |
i = e[n] || 1; | |
(r & 1 << n || s & 1 << n || a == null) && (i > 0 ? a = Number.MIN_SAFE_INTEGER : a = Number.MAX_SAFE_INTEGER); | |
let p = o[n]; | |
return a < 0 && (a += p), a = qp(0, a, p - 1), a; | |
} | |
function dT(r, t8, e, o, n, s) { | |
let a = t8[n], | |
i = e[n] || 1; | |
(r & 1 << n || s & 1 << n || a == null) && (i > 0 ? a = Number.MAX_SAFE_INTEGER : a = Number.MIN_SAFE_INTEGER); | |
let p = o[n]; | |
return a < 0 && (a += p), i > 0 ? a = qp(0, a, p) : a = qp(-1, a, p - 1), a; | |
} | |
function IX(r, t8, e) { | |
let o = e.length; | |
for (let n = 0; n < e.length; n++) if (e[n] > 1) { | |
o = n; | |
break; | |
} | |
for (let n = o + 1; n < e.length; n++) if (t8[n] > 0 || e[n] !== r[n]) return false; | |
return true; | |
} | |
function vX(r, t8) { | |
let e = r.length > 0 ? r[r.length - 1] : 1; | |
for (let o = 0; o < r.length - 1; o++) e += r[o] * t8[o]; | |
return e; | |
} | |
function kX(r, t8, e) { | |
let o, | |
n = r.shape.length; | |
typeof t8 == "number" ? o = [t8, ...new Array(n - 1).fill(0)] : t8.length < n ? o = t8.concat(new Array(n - t8.length).fill(0)) : o = t8.slice(), o.forEach(a => { | |
$(a !== -1, () => "slice() does not support negative begin indexing."); | |
}); | |
let s; | |
return e == null ? s = new Array(n).fill(-1) : typeof e == "number" ? s = [e, ...new Array(n - 1).fill(-1)] : e.length < n ? s = e.concat(new Array(n - e.length).fill(-1)) : s = e, s = s.map((a, i) => a >= 0 ? a : ($(a === -1, () => `Negative size values should be exactly -1 but got ${a} for the slice() size at index ${i}.`), r.shape[i] - o[i])), [o, s]; | |
} | |
function NX(r, t8, e, o, n, s, a, i, p) { | |
let u; | |
if (o == null ? (u = new Array(t8.length), u.fill(1)) : u = o, a != null && a & a - 1) throw new Error("Multiple ellipses in slice is not allowed."); | |
let c = false, | |
l = { | |
dims: u.length, | |
numAddAxisAfterEllipsis: 0, | |
begin: t8.slice(), | |
end: e.slice(), | |
strides: u.slice(), | |
beginMask: n, | |
endMask: s, | |
ellipsisMask: a, | |
newAxisMask: i, | |
shrinkAxisMask: p | |
}; | |
for (let C = 0; C < l.dims; C++) c && 1 << C & i && l.numAddAxisAfterEllipsis++, 1 << C & a && (c = true); | |
c || (l.ellipsisMask |= 1 << l.dims, l.dims++); | |
let m = { | |
dims: r.length, | |
beginMask: 0, | |
endMask: 0, | |
beginValid: false, | |
endValid: false | |
}; | |
TX(l, m); | |
let d = true, | |
f = true, | |
h = true, | |
g = [], | |
x = []; | |
for (let C = 0; C < r.length; ++C) { | |
if (m.strides[C] === 0) throw Error(`strides[${C}] must be non-zero`); | |
let S = !!(m.shrinkAxisMask & 1 << C), | |
k = r[C]; | |
if (k === -1) { | |
g.push(S ? 1 : -1); | |
continue; | |
} | |
let _ = [m.beginMask & 1 << C, m.endMask & 1 << C], | |
E = [m.strides[C] > 0 ? 0 : -1, m.strides[C] > 0 ? k : k - 1]; | |
if (S && m.strides[C] <= 0) throw Error("only stride 1 allowed on non-range indexing."); | |
h = h && m.strides[C] === 1; | |
let R = !!(m.beginMask & 1 << C && m.endMask & 1 << C); | |
if (m.beginValid && m.endValid) { | |
if (S) { | |
let M = m.begin[C] < 0 ? k + m.begin[C] : m.begin[C]; | |
if (m.begin[C] = M, m.end[C] = m.begin[C] + 1, M < 0 || M >= k) throw Error(`slice index ${m.begin[C]} of dimension ${C} out of bounds.`); | |
} else m.begin[C] = sT(m.begin[C], 0, m.strides[C], k, _, E), m.end[C] = sT(m.end[C], 1, m.strides[C], k, _, E); | |
let O = m.strides[C] === 1 && m.begin[C] === 0 && m.end[C] === k; | |
d = d && O, f = f && (C === 0 && m.strides[C] === 1 || O); | |
} else d = d && m.strides[C] === 1 && R, f = f && (C === 0 && m.strides[C] === 1 || R); | |
let D, | |
P = false; | |
if (m.beginValid && m.endValid ? (D = m.end[C] - m.begin[C], P = true) : S ? (D = 1, P = true) : R && k >= 0 && (m.strides[C] < 0 ? D = -k : D = k, P = true), P) { | |
let O; | |
D === 0 || D < 0 != m.strides[C] < 0 ? O = 0 : O = Math.trunc(D / m.strides[C]) + (D % m.strides[C] !== 0 ? 1 : 0), g.push(O); | |
} else g.push(-1); | |
} | |
for (let C = 0; C < m.finalShapeGatherIndices.length; ++C) { | |
let S = m.finalShapeGatherIndices[C]; | |
S >= 0 ? x.push(g[S]) : S === uS && x.push(1); | |
} | |
return { | |
finalShapeSparse: x.filter((C, S) => m.finalShapeGatherIndices[S] !== uS), | |
finalShape: x, | |
isIdentity: d, | |
sliceDim0: f, | |
isSimpleSlice: h, | |
begin: m.begin, | |
end: m.end, | |
strides: m.strides | |
}; | |
} | |
function TX(r, t8) { | |
t8.beginMask = 0, t8.endMask = 0, t8.shrinkAxisMask = 0; | |
let e = 0; | |
t8.beginValid = r.begin != null, t8.endValid = r.end != null, t8.begin = new Array(t8.dims), t8.end = new Array(t8.dims), t8.strides = new Array(t8.dims), t8.finalShapeGatherIndices = [], t8.finalShapeGatherIndicesSparse = [], t8.inputShapeGatherIndicesSparse = new Array(t8.dims); | |
for (let o = 0; o < r.dims; o++) if (1 << o & r.ellipsisMask) { | |
let n = Math.min(t8.dims - (r.dims - o) + 1 + r.numAddAxisAfterEllipsis, t8.dims); | |
for (; e < n; e++) t8.begin[e] = 0, t8.end[e] = 0, t8.strides[e] = 1, t8.beginMask |= 1 << e, t8.endMask |= 1 << e, t8.finalShapeGatherIndices.push(e), t8.finalShapeGatherIndicesSparse.push(-1), t8.inputShapeGatherIndicesSparse[e] = o; | |
} else if (1 << o & r.newAxisMask) t8.finalShapeGatherIndices.push(uS), t8.finalShapeGatherIndicesSparse.push(-1);else { | |
if (e === t8.begin.length) throw Error(`Index out of range using input dim ${e}; input has only ${t8.dims} dims, ${t8.begin.length}.`); | |
r.begin != null && (t8.begin[e] = r.begin[o]), r.end != null && (t8.end[e] = r.end[o]), t8.strides[e] = r.strides[o], r.beginMask & 1 << o && (t8.beginMask |= 1 << e), r.endMask & 1 << o && (t8.endMask |= 1 << e), r.shrinkAxisMask & 1 << o ? (t8.finalShapeGatherIndices.push(yX), t8.finalShapeGatherIndicesSparse.push(-1), t8.shrinkAxisMask |= 1 << e) : (t8.finalShapeGatherIndices.push(e), t8.finalShapeGatherIndicesSparse.push(o)), t8.inputShapeGatherIndicesSparse[e] = o, e++; | |
} | |
} | |
function sT(r, t8, e, o, n, s) { | |
if (n[t8]) return e > 0 ? s[t8] : s[t8 + 1 & 1]; | |
{ | |
let a = r < 0 ? o + r : r; | |
return a < s[0] ? s[0] : a > s[1] ? s[1] : a; | |
} | |
} | |
var _X = "4.11.0"; | |
var Pl = class { | |
static sgd(t8) { | |
return new mi(t8); | |
} | |
static momentum(t8, e, o = false) { | |
return new up(t8, e, o); | |
} | |
static rmsprop(t8, e = 0.9, o = 0, n = null, s = false) { | |
return new pp(t8, e, o, n, s); | |
} | |
static adam(t8 = 1e-3, e = 0.9, o = 0.999, n = null) { | |
return new ap(t8, e, o, n); | |
} | |
static adadelta(t8 = 1e-3, e = 0.95, o = null) { | |
return new np(t8, e, o); | |
} | |
static adamax(t8 = 2e-3, e = 0.9, o = 0.999, n = null, s = 0) { | |
return new ip(t8, e, o, n, s); | |
} | |
static adagrad(t8, e = 0.1) { | |
return new sp(t8, e); | |
} | |
}; | |
var TGe = Pl; | |
var $X = (() => typeof requestAnimationFrame != "undefined" ? requestAnimationFrame : typeof setImmediate != "undefined" ? setImmediate : r => r())(); | |
function pS() { | |
return new Promise(r => $X(() => r())); | |
} | |
var w = {}; | |
qe(w, { | |
ERF_A1: () => KX, | |
ERF_A2: () => qX, | |
ERF_A3: () => jX, | |
ERF_A4: () => XX, | |
ERF_A5: () => YX, | |
ERF_P: () => HX, | |
PARALLELIZE_THRESHOLD: () => uf, | |
RowPartitionType: () => Pa, | |
SELU_SCALE: () => GX, | |
SELU_SCALEALPHA: () => UX, | |
applyActivation: () => rp, | |
assertAndGetBroadcastShape: () => rt, | |
assertAxesAreInnerMostDims: () => tK, | |
assertParamsConsistent: () => EX, | |
assignToTypedArray: () => r5, | |
axesAreInnerMostDims: () => Bw, | |
calculateShapes: () => V1, | |
checkEinsumDimSizes: () => u5, | |
checkPadOnDimRoundingMode: () => Bt, | |
combineLocations: () => x2, | |
combineRaggedTensorToTensorShapes: () => DX, | |
complexWithEvenIndex: () => JX, | |
complexWithOddIndex: () => e5, | |
computeConv2DInfo: () => Hu, | |
computeConv3DInfo: () => Mk, | |
computeDefaultPad: () => Lw, | |
computeDilation2DInfo: () => J4, | |
computeOptimalWindowSize: () => OX, | |
computeOutAndReduceShapes: () => eK, | |
computeOutShape: () => RX, | |
computePool2DInfo: () => Mw, | |
computePool3DInfo: () => eH, | |
convertConv2DDataFormat: () => Lk, | |
decodeEinsumEquation: () => a5, | |
eitherStridesOrDilationsAreOne: () => xr, | |
expandShapeToKeepDim: () => ii, | |
exponent: () => n5, | |
exponents: () => o5, | |
fromStringArrayToUint8: () => $5, | |
fromUint8ToStringArray: () => _5, | |
getAxesPermutation: () => rK, | |
getBroadcastDims: () => m2, | |
getComplexWithIndex: () => t5, | |
getEinsumComputePath: () => p5, | |
getEinsumPermutation: () => i5, | |
getFusedBiasGradient: () => tp, | |
getFusedDyActivation: () => ep, | |
getImageCenter: () => MX, | |
getInnerMostAxes: () => nK, | |
getPermuted: () => BX, | |
getRaggedRank: () => FX, | |
getReductionAxes: () => yd, | |
getReshaped: () => LX, | |
getReshapedPermuted: () => zX, | |
getRowPartitionTypesHelper: () => AX, | |
getSliceBeginCoords: () => VX, | |
getSliceSize: () => WX, | |
getSparseFillEmptyRowsIndicesDenseShapeMismatch: () => d5, | |
getSparseFillEmptyRowsNegativeIndexErrorMessage: () => f5, | |
getSparseFillEmptyRowsOutOfRangeIndexErrorMessage: () => h5, | |
getSparseReshapeEmptyTensorZeroOutputDimErrorMessage: () => y5, | |
getSparseReshapeInputOutputMismatchErrorMessage: () => C5, | |
getSparseReshapeInputOutputMultipleErrorMessage: () => b5, | |
getSparseReshapeMultipleNegativeOneOutputDimErrorMessage: () => g5, | |
getSparseReshapeNegativeOutputDimErrorMessage: () => x5, | |
getSparseSegmentReductionIndicesOutOfRangeErrorMessage: () => v5, | |
getSparseSegmentReductionNegativeSegmentIdsErrorMessage: () => w5, | |
getSparseSegmentReductionNonIncreasingSegmentIdsErrorMessage: () => S5, | |
getSparseSegmentReductionSegmentIdOutOfRangeErrorMessage: () => I5, | |
getUndoAxesPermutation: () => oK, | |
isIdentityPermutation: () => c5, | |
log: () => jG, | |
mergeRealAndImagArrays: () => QX, | |
prepareAndValidate: () => nT, | |
prepareSplitSize: () => m5, | |
segment_util: () => lS, | |
shouldFuse: () => op, | |
slice_util: () => ct, | |
splitRealAndImagArrays: () => ZX, | |
stridesOrDilationsArePositive: () => Ta, | |
tupleValuesAreOne: () => Gu, | |
upcastType: () => dt, | |
validateDefaultValueShape: () => PX, | |
validateInput: () => xc, | |
validateUpdateShape: () => Yw, | |
warn: () => Ia | |
}); | |
function EX(r, t8) { | |
let e = r[0].length; | |
r.forEach((n, s) => { | |
$(n.length === e, () => `Error in concat${e}D: rank of tensors[${s}] must be the same as the rank of the rest (${e})`); | |
}), $(t8 >= 0 && t8 < e, () => `Error in concat${e}D: axis must be between 0 and ${e - 1}.`); | |
let o = r[0]; | |
r.forEach((n, s) => { | |
for (let a = 0; a < e; a++) $(a === t8 || n[a] === o[a], () => `Error in concat${e}D: Shape of tensors[${s}] (${n}) does not match the shape of the rest (${o}) along the non-concatenated axis ${s}.`); | |
}); | |
} | |
function RX(r, t8) { | |
let e = r[0].slice(); | |
for (let o = 1; o < r.length; o++) e[t8] += r[o][t8]; | |
return e; | |
} | |
var Pa; | |
(function (r) { | |
r[r.FIRST_DIM_SIZE = 0] = "FIRST_DIM_SIZE", r[r.VALUE_ROWIDS = 1] = "VALUE_ROWIDS", r[r.ROW_LENGTHS = 2] = "ROW_LENGTHS", r[r.ROW_SPLITS = 3] = "ROW_SPLITS", r[r.ROW_LIMITS = 4] = "ROW_LIMITS", r[r.ROW_STARTS = 5] = "ROW_STARTS"; | |
})(Pa || (Pa = {})); | |
function DX(r, t8, e) { | |
let o = new Array(); | |
if (e == null && t8 == null) return o; | |
if (t8 == null) for (; o.length < r + e.length;) o.push(-1);else o = t8.slice(); | |
if (e == null) return o; | |
if (r + e.length !== o.length) throw new Error(`rt input.shape and shape=${t8} are incompatible: rt input.rank = ${r + e.length}, but shape.rank = ${o.length}`); | |
for (let n = 1; n < e.length; ++n) { | |
let s = e[n], | |
a = o[o.length - e.length + n], | |
i = o[a]; | |
if (s >= 0) if (i >= 0) { | |
if (i !== s) throw new Error(`rt input.shape and shape=${t8} are incompatible: rt input.shape[${n + r}] = ${s} but shape[${n + r}] = ${i}`); | |
} else o[a] = s; | |
} | |
return o; | |
} | |
function AX(r) { | |
let t8 = { | |
FIRST_DIM_SIZE: Pa.FIRST_DIM_SIZE, | |
VALUE_ROWIDS: Pa.VALUE_ROWIDS, | |
ROW_LENGTHS: Pa.ROW_LENGTHS, | |
ROW_SPLITS: Pa.ROW_SPLITS, | |
ROW_LIMITS: Pa.ROW_LIMITS, | |
ROW_STARTS: Pa.ROW_STARTS | |
}, | |
e = []; | |
for (let o of r) if (o in t8) e.push(t8[o]);else break; | |
return e; | |
} | |
function FX(r) { | |
return r.length === 0 ? 0 : r[0] === Pa.FIRST_DIM_SIZE ? r.length - 1 : r.length; | |
} | |
function PX(r, t8) { | |
if (r == null || t8 == null) return; | |
let e = r.length, | |
o = t8.length; | |
if (e >= o) throw new Error(`defaultValue.shape=${r} and ragged tensor flatValues.shape=${t8}, are incompatible: defaultValue.rank = ${e} must be less than ragged tensor input flatValues.rank = ${o})`); | |
for (let n = 0; n < Math.min(e, o - 1); ++n) { | |
let s = r[n], | |
a = t8[n + 1]; | |
if (s >= 0 && a >= 0 && s !== 1 && s !== a) throw new Error(`defaultValue.shape=${r}, and ragged tensor input flatValues.shape=${t8} are incompatible: defaultValue.shape[${n - r.length}] = ${s} but ragged tensor input.flatValues.shape[${n - r.length}] = ${a}`); | |
} | |
} | |
var uf = 30; | |
function OX(r) { | |
return r <= uf ? r : Xp(r, Math.floor(Math.sqrt(r))); | |
} | |
function MX(r, t8, e) { | |
let o = e * (typeof r == "number" ? r : r[0]), | |
n = t8 * (typeof r == "number" ? r : r[1]); | |
return [o, n]; | |
} | |
function LX(r, t8, e, o = true) { | |
let n = []; | |
if (o) n = n.concat(t8.slice(0)), n.push(r[0] / e), n = n.concat(r.slice(1));else { | |
n = n.concat(r[0]); | |
let s = t8.length; | |
for (let a = 0; a < s; ++a) n = n.concat([r[a + 1] / t8[a], t8[a]]); | |
n = n.concat(r.slice(s + 1)); | |
} | |
return n; | |
} | |
function BX(r, t8, e = true) { | |
let o = []; | |
if (e) { | |
o.push(t8); | |
for (let n = t8 + 1; n < r; ++n) n <= 2 * t8 ? (o.push(n), o.push(n - (t8 + 1))) : o.push(n); | |
} else { | |
let n = [], | |
s = []; | |
for (let a = 1; a < r; ++a) a >= t8 * 2 + 1 || a % 2 === 1 ? s.push(a) : n.push(a); | |
o.push(...n), o.push(0), o.push(...s); | |
} | |
return o; | |
} | |
function zX(r, t8, e, o = true) { | |
let n = []; | |
o ? n.push(r[0] / e) : n.push(r[0] * e); | |
for (let s = 1; s < r.length; ++s) s <= t8.length ? o ? n.push(t8[s - 1] * r[s]) : n.push(r[s] / t8[s - 1]) : n.push(r[s]); | |
return n; | |
} | |
function VX(r, t8) { | |
let e = [0]; | |
for (let o = 0; o < t8; ++o) e.push(r[o][0]); | |
return e; | |
} | |
function WX(r, t8, e) { | |
let o = r.slice(0, 1); | |
for (let n = 0; n < e; ++n) o.push(r[n + 1] - t8[n][0] - t8[n][1]); | |
return o; | |
} | |
var UX = 1.7580993408473768; | |
var GX = 1.0507009873554805; | |
var HX = 0.3275911; | |
var KX = 0.254829592; | |
var qX = -0.284496736; | |
var jX = 1.421413741; | |
var XX = -1.453152027; | |
var YX = 1.061405429; | |
function QX(r, t8) { | |
if (r.length !== t8.length) throw new Error(`Cannot merge real and imag arrays of different lengths. real:${r.length}, imag: ${t8.length}.`); | |
let e = new Float32Array(r.length * 2); | |
for (let o = 0; o < e.length; o += 2) e[o] = r[o / 2], e[o + 1] = t8[o / 2]; | |
return e; | |
} | |
function ZX(r) { | |
let t8 = new Float32Array(r.length / 2), | |
e = new Float32Array(r.length / 2); | |
for (let o = 0; o < r.length; o += 2) t8[o / 2] = r[o], e[o / 2] = r[o + 1]; | |
return { | |
real: t8, | |
imag: e | |
}; | |
} | |
function JX(r) { | |
let t8 = Math.ceil(r.length / 4), | |
e = new Float32Array(t8), | |
o = new Float32Array(t8); | |
for (let n = 0; n < r.length; n += 4) e[Math.floor(n / 4)] = r[n], o[Math.floor(n / 4)] = r[n + 1]; | |
return { | |
real: e, | |
imag: o | |
}; | |
} | |
function e5(r) { | |
let t8 = Math.floor(r.length / 4), | |
e = new Float32Array(t8), | |
o = new Float32Array(t8); | |
for (let n = 2; n < r.length; n += 4) e[Math.floor(n / 4)] = r[n], o[Math.floor(n / 4)] = r[n + 1]; | |
return { | |
real: e, | |
imag: o | |
}; | |
} | |
function t5(r, t8) { | |
let e = r[t8 * 2], | |
o = r[t8 * 2 + 1]; | |
return { | |
real: e, | |
imag: o | |
}; | |
} | |
function r5(r, t8, e, o) { | |
r[o * 2] = t8, r[o * 2 + 1] = e; | |
} | |
function o5(r, t8) { | |
let e = new Float32Array(r / 2), | |
o = new Float32Array(r / 2); | |
for (let n = 0; n < Math.ceil(r / 2); n++) { | |
let s = (t8 ? 2 : -2) * Math.PI * (n / r); | |
e[n] = Math.cos(s), o[n] = Math.sin(s); | |
} | |
return { | |
real: e, | |
imag: o | |
}; | |
} | |
function n5(r, t8, e) { | |
let o = (e ? 2 : -2) * Math.PI * (r / t8), | |
n = Math.cos(o), | |
s = Math.sin(o); | |
return { | |
real: n, | |
imag: s | |
}; | |
} | |
var cS = "->"; | |
var s5 = /->/g; | |
var fT = ","; | |
var hT = "..."; | |
function a5(r, t8) { | |
r = r.replace(/\s/g, ""); | |
let e = (r.length - r.replace(s5, "").length) / cS.length; | |
if (e < 1) throw new Error("Equations without an arrow are not supported."); | |
if (e > 1) throw new Error(`Equation must contain exactly one arrow ("${cS}").`); | |
let [o, n] = r.split(cS); | |
$(o.indexOf(hT) === -1, () => `The ellipsis notation ("${hT}") is not supported yet.`); | |
let s = o.split(fT), | |
a = s.length; | |
if (t8 !== a) throw new Error(`Expected ${a} input tensors, received ${t8}`); | |
if (a > 2) throw new Error("Support for more than 2 input tensors is not implemented yet."); | |
let i = []; | |
for (let m = 0; m < n.length; ++m) { | |
let d = n[m]; | |
if (!s.some(f => f.indexOf(d) !== -1)) throw new Error(`Output subscripts contain the label ${d} not present in the input subscripts.`); | |
i.indexOf(d) === -1 && i.push(d); | |
} | |
for (let m = 0; m < o.length; ++m) { | |
let d = o[m]; | |
i.indexOf(d) === -1 && d !== fT && i.push(d); | |
} | |
let p = new Array(s.length); | |
for (let m = 0; m < a; ++m) { | |
if (new Set(s[m].split("")).size !== s[m].length) throw new Error(`Found duplicate axes in input component ${s[m]}. Support for duplicate axes in input is not implemented yet.`); | |
p[m] = []; | |
for (let d = 0; d < s[m].length; ++d) p[m].push(i.indexOf(s[m][d])); | |
} | |
let u = i.length, | |
c = n.length, | |
l = []; | |
for (let m = c; m < u; ++m) l.push(m); | |
return { | |
allDims: i, | |
summedDims: l, | |
idDims: p | |
}; | |
} | |
function i5(r, t8) { | |
let e = new Array(r); | |
e.fill(-1); | |
for (let n = 0; n < t8.length; ++n) e[t8[n]] = n; | |
let o = []; | |
for (let n = 0; n < r; ++n) e[n] === -1 && o.push(n); | |
return e = e.filter(n => n !== -1), { | |
permutationIndices: e, | |
expandDims: o | |
}; | |
} | |
function u5(r, t8, e) { | |
let o = new Array(r); | |
for (let n = 0; n < e.length; ++n) { | |
let s = e[n].shape; | |
for (let a = 0; a < t8[n].length; ++a) o[t8[n][a]] === void 0 ? o[t8[n][a]] = s[a] : $(o[t8[n][a]] === s[a], () => `Expected dimension ${o[t8[n][a]]} at axis ${a} of input shaped ${JSON.stringify(s)}, but got dimension ${s[a]}`); | |
} | |
} | |
function p5(r, t8) { | |
let e = r, | |
o = [], | |
n = 0; | |
r.length === 0 && e.push(-1), n = r.length + 1; | |
for (let a = 0; a < n; ++a) o.push([]); | |
let s = []; | |
for (let a = 0; a < e.length; ++a) { | |
let i = e[a], | |
p = l5(t8, i); | |
for (let u of p) s.indexOf(u) === -1 && (o[a].push(u), s.push(u)); | |
} | |
return { | |
path: e, | |
steps: o | |
}; | |
} | |
function c5(r) { | |
return r.every((t8, e) => t8 === e); | |
} | |
function l5(r, t8) { | |
let e = []; | |
for (let o = 0; o < r.length; ++o) (r[o].length === 0 || r[o].indexOf(t8) !== -1 || t8 === -1) && e.push(o); | |
return e; | |
} | |
function m5(r, t8, e = 0) { | |
let o = []; | |
if (typeof t8 == "number") $(r.shape[e] % t8 === 0, () => "Number of splits must evenly divide the axis."), o = new Array(t8).fill(r.shape[e] / t8);else { | |
let n = t8.reduce((a, i) => (i === -1 && (a += 1), a), 0); | |
$(n <= 1, () => "There should be only one negative value in split array."); | |
let s = t8.indexOf(-1); | |
if (s !== -1) { | |
let a = t8.reduce((i, p) => p > 0 ? i + p : i); | |
t8[s] = r.shape[e] - a; | |
} | |
$(r.shape[e] === t8.reduce((a, i) => a + i), () => "The sum of sizes must match the size of the axis dimension."), o = t8; | |
} | |
return o; | |
} | |
function d5(r) { | |
return `Received SparseTensor with denseShape[0] = 0 but | |
indices.shape[0] = ${r}`; | |
} | |
function f5(r, t8) { | |
return `indices(${r}, 0) is invalid: ${t8} < 0`; | |
} | |
function h5(r, t8, e) { | |
return `indices(${r}, 0) is invalid: ${t8} >= ${e}`; | |
} | |
function g5(r, t8) { | |
return `only one output dimension may be -1, not both ${r} and ${t8}`; | |
} | |
function x5(r, t8) { | |
return `size ${r} must be non-negative, not ${t8}`; | |
} | |
function y5() { | |
return "reshape cannot infer the missing input size for an empty tensor unless all specified input sizes are non-zero"; | |
} | |
function b5(r, t8) { | |
let e = He(r), | |
o = He(t8); | |
return `Input to reshape is a SparseTensor with ${e} | |
dense values, but the requested shape requires a multiple of ${o}. inputShape=${r} outputShape= ${t8}`; | |
} | |
function C5(r, t8) { | |
let e = He(r), | |
o = He(t8); | |
return `Input to reshape is a tensor with ${e} dense values, but the requested shape has ${o}. inputShape=${r} outputShape=${t8}`; | |
} | |
function w5() { | |
return "segment ids must be >= 0"; | |
} | |
function S5() { | |
return "segment ids are not increasing"; | |
} | |
function I5(r, t8) { | |
return `Segment id ${r} out of range [0, ${t8}), possibly because segmentIds input is not sorted.`; | |
} | |
function v5(r, t8, e) { | |
return `Bad: indices[${r}] == ${t8} out of range [0, ${e})`; | |
} | |
var lS = {}; | |
qe(lS, { | |
collectGatherOpShapeInfo: () => T5, | |
computeOutShape: () => N5, | |
segOpComputeOptimalWindowSize: () => k5 | |
}); | |
function k5(r, t8) { | |
let e = false, | |
o; | |
for (r <= uf ? (o = r, e = true) : o = Xp(r, Math.floor(Math.sqrt(r))); !e;) o > t8 || o === r ? e = true : o = Xp(r, o + 1); | |
return o; | |
} | |
function N5(r, t8, e) { | |
let o = [], | |
n = r.length; | |
for (let s = 0; s < n; s++) s !== t8 ? o.push(r[s]) : o.push(e); | |
return o; | |
} | |
function T5(r, t8, e, o) { | |
let n = t8.shape.length, | |
s = r.shape.length; | |
if (o !== 0 && (o < -n || o > n)) throw new Error(`Expect batchDims in the range of [-${n}, ${n}], but got ${o}`); | |
if (o < 0 && (o += n), o > s) throw new Error(`batchDims (${o}) must be less than rank(x) ( | |
${s}).`); | |
if (e < o) throw new Error(`batchDims (${o}) must be less than or equal to axis (${e}).`); | |
for (let l = 0; l < o; ++l) if (r.shape[l] !== t8.shape[l]) throw new Error(`x.shape[${l}]: ${r.shape[l]} should be equal to indices.shape[${l}]: ${t8.shape[l]}.`); | |
let a = r.shape[e], | |
i = [], | |
p = 1, | |
u = 1, | |
c = 1; | |
for (let l = 0; l < o; ++l) i.push(r.shape[l]), p *= r.shape[l]; | |
for (let l = o; l < e; l++) i.push(r.shape[l]), u *= r.shape[l]; | |
for (let l = o; l < n; l++) i.push(t8.shape[l]); | |
for (let l = e + 1; l < s; l++) i.push(r.shape[l]), c *= r.shape[l]; | |
return { | |
batchSize: p, | |
sliceSize: c, | |
outerSize: u, | |
dimSize: a, | |
outputShape: i | |
}; | |
} | |
function _5(r) { | |
try { | |
return r.map(t8 => sc(t8)); | |
} catch (t8) { | |
throw new Error(`Failed to decode encoded string bytes into utf-8, error: ${t8}`); | |
} | |
} | |
function $5(r) { | |
return r.map(t8 => tu(t8)); | |
} | |
var Wt = {}; | |
qe(Wt, { | |
nonMaxSuppressionV3Impl: () => ef, | |
nonMaxSuppressionV4Impl: () => tf, | |
nonMaxSuppressionV5Impl: () => rf, | |
whereImpl: () => Yd | |
}); | |
UN(); | |
var E5 = A(); | |
E5.registerFlag("KEEP_INTERMEDIATE_TENSORS", () => false, r => { | |
r && console.warn("Keep intermediate tensors is ON. This will print the values of all intermediate tensors during model inference. Not all models support this mode. For details, check e2e/benchmarks/ model_config.js. This significantly impacts performance."); | |
}); | |
var ho; | |
(function (r) { | |
r[r.DT_INVALID = 0] = "DT_INVALID", r[r.DT_FLOAT = 1] = "DT_FLOAT", r[r.DT_DOUBLE = 2] = "DT_DOUBLE", r[r.DT_INT32 = 3] = "DT_INT32", r[r.DT_UINT8 = 4] = "DT_UINT8", r[r.DT_INT16 = 5] = "DT_INT16", r[r.DT_INT8 = 6] = "DT_INT8", r[r.DT_STRING = 7] = "DT_STRING", r[r.DT_COMPLEX64 = 8] = "DT_COMPLEX64", r[r.DT_INT64 = 9] = "DT_INT64", r[r.DT_BOOL = 10] = "DT_BOOL", r[r.DT_QINT8 = 11] = "DT_QINT8", r[r.DT_QUINT8 = 12] = "DT_QUINT8", r[r.DT_QINT32 = 13] = "DT_QINT32", r[r.DT_BFLOAT16 = 14] = "DT_BFLOAT16", r[r.DT_QINT16 = 15] = "DT_QINT16", r[r.DT_QUINT16 = 16] = "DT_QUINT16", r[r.DT_UINT16 = 17] = "DT_UINT16", r[r.DT_COMPLEX128 = 18] = "DT_COMPLEX128", r[r.DT_HALF = 19] = "DT_HALF", r[r.DT_RESOURCE = 20] = "DT_RESOURCE", r[r.DT_VARIANT = 21] = "DT_VARIANT", r[r.DT_UINT32 = 22] = "DT_UINT32", r[r.DT_UINT64 = 23] = "DT_UINT64", r[r.DT_FLOAT_REF = 101] = "DT_FLOAT_REF", r[r.DT_DOUBLE_REF = 102] = "DT_DOUBLE_REF", r[r.DT_INT32_REF = 103] = "DT_INT32_REF", r[r.DT_UINT8_REF = 104] = "DT_UINT8_REF", r[r.DT_INT16_REF = 105] = "DT_INT16_REF", r[r.DT_INT8_REF = 106] = "DT_INT8_REF", r[r.DT_STRING_REF = 107] = "DT_STRING_REF", r[r.DT_COMPLEX64_REF = 108] = "DT_COMPLEX64_REF", r[r.DT_INT64_REF = 109] = "DT_INT64_REF", r[r.DT_BOOL_REF = 110] = "DT_BOOL_REF", r[r.DT_QINT8_REF = 111] = "DT_QINT8_REF", r[r.DT_QUINT8_REF = 112] = "DT_QUINT8_REF", r[r.DT_QINT32_REF = 113] = "DT_QINT32_REF", r[r.DT_BFLOAT16_REF = 114] = "DT_BFLOAT16_REF", r[r.DT_QINT16_REF = 115] = "DT_QINT16_REF", r[r.DT_QUINT16_REF = 116] = "DT_QUINT16_REF", r[r.DT_UINT16_REF = 117] = "DT_UINT16_REF", r[r.DT_COMPLEX128_REF = 118] = "DT_COMPLEX128_REF", r[r.DT_HALF_REF = 119] = "DT_HALF_REF", r[r.DT_RESOURCE_REF = 120] = "DT_RESOURCE_REF", r[r.DT_VARIANT_REF = 121] = "DT_VARIANT_REF", r[r.DT_UINT32_REF = 122] = "DT_UINT32_REF", r[r.DT_UINT64_REF = 123] = "DT_UINT64_REF"; | |
})(ho || (ho = {})); | |
var gT; | |
(function (r) { | |
let t8; | |
(function (e) { | |
e[e.LEGACY = 0] = "LEGACY", e[e.V1 = 1] = "V1", e[e.V2 = 2] = "V2"; | |
})(t8 = r.CheckpointFormatVersion || (r.CheckpointFormatVersion = {})); | |
})(gT || (gT = {})); | |
var dS = {}; | |
function D5(r, t8) { | |
let e = { | |
tfOpName: r, | |
category: "custom", | |
inputs: [], | |
attrs: [], | |
customExecutor: t8 | |
}; | |
dS[r] = e; | |
} | |
function pf(r) { | |
return dS[r]; | |
} | |
function A5(r) { | |
delete dS[r]; | |
} | |
function I(r, t8, e, o, n) { | |
let s = t8.inputParams[r]; | |
if (s && s.inputIndexStart !== void 0) { | |
let i = s.inputIndexStart, | |
p = s.inputIndexEnd === 0 ? void 0 : s.inputIndexEnd === void 0 ? i + 1 : s.inputIndexEnd, | |
u = i < 0 ? t8.inputNames.length + i : i; | |
if (s.type === "tensor") return zt(t8.inputNames[u], e, o, n); | |
if (s.type === "tensors") { | |
let m = t8.inputs.slice(i, p); | |
return t8.inputNames.slice(i, p).filter((f, h) => { | |
var g; | |
return ((g = m[h]) === null || g === void 0 ? void 0 : g.op) !== "NoOp"; | |
}).map(f => zt(f, e, o, n)); | |
} | |
let c = zt(t8.inputNames[u], e, o, n), | |
l = c.dataSync(); | |
return s.type === "number" ? l[0] : y.toNestedArray(c.shape, l); | |
} | |
let a = t8.attrParams[r]; | |
return a && a.value; | |
} | |
function zt(r, t8, e, o) { | |
let [n, s] = Tr(r, e); | |
if (o != null) { | |
let i = o.getHashTableHandleByName(n); | |
if (i != null) return i; | |
} | |
let a = e.currentContextIds.find(i => !!t8[cf(n, i)]); | |
return a !== void 0 ? t8[cf(n, a)][s] : void 0; | |
} | |
function fS(r, t8, e) { | |
return t8[cf(r, e.currentContextId)]; | |
} | |
function Ls(r, t8) { | |
let [e, o, n] = Tr(r, t8); | |
return [cf(e, t8 && t8.currentContextId), o, n]; | |
} | |
function cf(r, t8) { | |
return t8 ? `${r}-${t8}` : r; | |
} | |
function Tr(r, t8) { | |
if (r === "") return ["", 0, void 0]; | |
let e = t8 != null && t8.parseNodeNameCache != null; | |
if (e) { | |
let s = t8.parseNodeNameCache.get(r); | |
if (s != null) return s; | |
} | |
let o = r.split(":"), | |
n; | |
if (o.length === 1) n = [r, 0, void 0];else { | |
let s = o[0], | |
a = o.length === 3 ? o[1] : void 0, | |
i = Number(o[o.length - 1]); | |
n = [s, i, a]; | |
} | |
return e && t8.parseNodeNameCache.set(r, n), n; | |
} | |
function Ol(r, t8, e) { | |
let o = I("pad", r, t8, e); | |
if (o === "explicit") { | |
o = I("explicitPaddings", r, t8, e); | |
let n = [[0, 0], [0, 0], [0, 0], [0, 0]]; | |
for (let s = 0; s < 4; s++) n[s][0] = o[s * 2], n[s][1] = o[s * 2 + 1]; | |
return n; | |
} | |
return o; | |
} | |
function Bs(r) { | |
return r.kept ? r : Ur(r); | |
} | |
var hS = {}; | |
qe(hS, { | |
json: () => F5 | |
}); | |
var F5 = [{ | |
tfOpName: "Add", | |
category: "arithmetic", | |
inputs: [{ | |
start: 0, | |
name: "a", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "b", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "AddV2", | |
category: "arithmetic", | |
inputs: [{ | |
start: 0, | |
name: "a", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "b", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "AddN", | |
category: "arithmetic", | |
inputs: [{ | |
start: 0, | |
end: 0, | |
name: "tensors", | |
type: "tensors" | |
}] | |
}, { | |
tfOpName: "BiasAdd", | |
category: "arithmetic", | |
inputs: [{ | |
start: 0, | |
name: "a", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "b", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}, { | |
tfName: "data_format", | |
name: "dataFormat", | |
type: "string", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Sub", | |
category: "arithmetic", | |
inputs: [{ | |
start: 0, | |
name: "a", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "b", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "RealDiv", | |
category: "arithmetic", | |
inputs: [{ | |
start: 0, | |
name: "a", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "b", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Div", | |
category: "arithmetic", | |
inputs: [{ | |
start: 0, | |
name: "a", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "b", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "DivNoNan", | |
category: "arithmetic", | |
inputs: [{ | |
start: 0, | |
name: "a", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "b", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "FloorDiv", | |
category: "arithmetic", | |
inputs: [{ | |
start: 0, | |
name: "a", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "b", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Mul", | |
category: "arithmetic", | |
inputs: [{ | |
start: 0, | |
name: "a", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "b", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Maximum", | |
category: "arithmetic", | |
inputs: [{ | |
start: 0, | |
name: "a", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "b", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Minimum", | |
category: "arithmetic", | |
inputs: [{ | |
start: 0, | |
name: "a", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "b", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Pow", | |
category: "arithmetic", | |
inputs: [{ | |
start: 0, | |
name: "a", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "b", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "SquaredDifference", | |
category: "arithmetic", | |
inputs: [{ | |
start: 0, | |
name: "a", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "b", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Mod", | |
category: "arithmetic", | |
inputs: [{ | |
start: 0, | |
name: "a", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "b", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "FloorMod", | |
category: "arithmetic", | |
inputs: [{ | |
start: 0, | |
name: "a", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "b", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}]; | |
var gS = {}; | |
qe(gS, { | |
json: () => P5 | |
}); | |
var P5 = [{ | |
tfOpName: "Abs", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Acos", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Asin", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Atan", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Atan2", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "y", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Ceil", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "ClipByValue", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "clipValueMin", | |
type: "number" | |
}, { | |
start: 2, | |
name: "clipValueMax", | |
type: "number" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Complex", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "real", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "imag", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "ComplexAbs", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Cos", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Cosh", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Elu", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Exp", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Floor", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Log", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Imag", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}, { | |
tfName: "Tout", | |
name: "outputType", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Neg", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Real", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}, { | |
tfName: "Tout", | |
name: "outputType", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Prelu", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "alpha", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Relu", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Relu6", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Selu", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Sigmoid", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Sin", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Sinh", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Sqrt", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Rsqrt", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Square", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Tan", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Tanh", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Sign", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Round", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Expm1", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Log1p", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Reciprocal", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Softplus", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Asinh", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Acosh", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Atanh", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Erf", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "LeakyRelu", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "alpha", | |
name: "alpha", | |
type: "number", | |
defaultValue: 0.2 | |
}, { | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "IsNan", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "IsFinite", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "IsInf", | |
category: "basic_math", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}]; | |
var xS = {}; | |
qe(xS, { | |
json: () => O5 | |
}); | |
var O5 = [{ | |
tfOpName: "EmptyTensorList", | |
category: "control", | |
inputs: [{ | |
start: 0, | |
name: "elementShape", | |
type: "shape" | |
}, { | |
start: 1, | |
name: "maxNumElements", | |
type: "number" | |
}], | |
attrs: [{ | |
tfName: "element_dtype", | |
name: "elementDType", | |
type: "dtype" | |
}] | |
}, { | |
tfOpName: "LoopCond", | |
category: "control", | |
inputs: [{ | |
start: 0, | |
name: "pred", | |
type: "tensor" | |
}] | |
}, { | |
tfOpName: "Switch", | |
category: "control", | |
inputs: [{ | |
start: 0, | |
name: "data", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "pred", | |
type: "tensor" | |
}] | |
}, { | |
tfOpName: "Merge", | |
category: "control", | |
inputs: [{ | |
start: 0, | |
end: 0, | |
name: "tensors", | |
type: "tensors" | |
}] | |
}, { | |
tfOpName: "Enter", | |
category: "control", | |
inputs: [{ | |
start: 0, | |
name: "tensor", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}, { | |
tfName: "frame_name", | |
name: "frameName", | |
type: "string" | |
}, { | |
tfName: "is_constant", | |
name: "isConstant", | |
type: "bool" | |
}] | |
}, { | |
tfOpName: "Exit", | |
category: "control", | |
inputs: [{ | |
start: 0, | |
name: "tensor", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "NextIteration", | |
category: "control", | |
inputs: [{ | |
start: 0, | |
name: "tensor", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "TensorArrayV3", | |
category: "control", | |
inputs: [{ | |
start: 0, | |
name: "size", | |
type: "number" | |
}], | |
attrs: [{ | |
tfName: "dtype", | |
name: "dtype", | |
type: "dtype" | |
}, { | |
tfName: "element_shape", | |
name: "elementShape", | |
type: "shape" | |
}, { | |
tfName: "dynamic_size", | |
name: "dynamicSize", | |
type: "bool" | |
}, { | |
tfName: "clear_after_read", | |
name: "clearAfterRead", | |
type: "bool" | |
}, { | |
tfName: "identical_element_shapes", | |
name: "identicalElementShapes", | |
type: "bool" | |
}, { | |
tfName: "tensor_array_name", | |
name: "name", | |
type: "string" | |
}] | |
}, { | |
tfOpName: "TensorArrayWriteV3", | |
category: "control", | |
inputs: [{ | |
start: 0, | |
name: "tensorArrayId", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "index", | |
type: "number" | |
}, { | |
start: 2, | |
name: "tensor", | |
type: "tensor" | |
}, { | |
start: 3, | |
name: "flowIn", | |
type: "number" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "TensorArrayReadV3", | |
category: "control", | |
inputs: [{ | |
start: 0, | |
name: "tensorArrayId", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "index", | |
type: "number" | |
}, { | |
start: 2, | |
name: "flowIn", | |
type: "number" | |
}], | |
attrs: [{ | |
tfName: "dtype", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "TensorArrayGatherV3", | |
category: "control", | |
inputs: [{ | |
start: 0, | |
name: "tensorArrayId", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "indices", | |
type: "number[]" | |
}, { | |
start: 2, | |
name: "flowIn", | |
type: "number" | |
}], | |
attrs: [{ | |
tfName: "dtype", | |
name: "dtype", | |
type: "dtype" | |
}, { | |
tfName: "element_shape", | |
name: "elementShape", | |
type: "shape" | |
}] | |
}, { | |
tfOpName: "TensorArrayScatterV3", | |
category: "control", | |
inputs: [{ | |
start: 0, | |
name: "tensorArrayId", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "indices", | |
type: "number[]" | |
}, { | |
start: 2, | |
name: "tensor", | |
type: "tensor" | |
}, { | |
start: 3, | |
name: "flowIn", | |
type: "number" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype" | |
}] | |
}, { | |
tfOpName: "TensorArrayConcatV3", | |
category: "control", | |
inputs: [{ | |
start: 0, | |
name: "tensorArrayId", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "flowIn", | |
type: "number" | |
}], | |
attrs: [{ | |
tfName: "dtype", | |
name: "dtype", | |
type: "dtype" | |
}, { | |
tfName: "element_shape_except0", | |
name: "elementShapeExcept0", | |
type: "shape", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "TensorArraySplitV3", | |
category: "control", | |
inputs: [{ | |
start: 0, | |
name: "tensorArrayId", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "tensor", | |
type: "tensor" | |
}, { | |
start: 2, | |
name: "lengths", | |
type: "number[]" | |
}, { | |
start: 3, | |
name: "flowIn", | |
type: "number" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype" | |
}] | |
}, { | |
tfOpName: "TensorArraySizeV3", | |
category: "control", | |
inputs: [{ | |
start: 0, | |
name: "tensorArrayId", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "flowIn", | |
type: "number" | |
}] | |
}, { | |
tfOpName: "TensorArrayCloseV3", | |
category: "control", | |
inputs: [{ | |
start: 0, | |
name: "tensorArrayId", | |
type: "tensor" | |
}] | |
}, { | |
tfOpName: "StatelessIf", | |
category: "control", | |
inputs: [{ | |
start: 0, | |
name: "cond", | |
type: "tensor" | |
}, { | |
start: 1, | |
end: 0, | |
name: "args", | |
type: "tensors" | |
}], | |
attrs: [{ | |
tfName: "then_branch", | |
name: "thenBranch", | |
type: "func" | |
}, { | |
tfName: "else_branch", | |
name: "elseBranch", | |
type: "func" | |
}] | |
}, { | |
tfOpName: "If", | |
category: "control", | |
inputs: [{ | |
start: 0, | |
name: "cond", | |
type: "tensor" | |
}, { | |
start: 1, | |
end: 0, | |
name: "args", | |
type: "tensors" | |
}], | |
attrs: [{ | |
tfName: "then_branch", | |
name: "thenBranch", | |
type: "func" | |
}, { | |
tfName: "else_branch", | |
name: "elseBranch", | |
type: "func" | |
}] | |
}, { | |
tfOpName: "StatelessWhile", | |
category: "control", | |
inputs: [{ | |
start: 0, | |
end: 0, | |
name: "args", | |
type: "tensors" | |
}], | |
attrs: [{ | |
tfName: "cond", | |
name: "cond", | |
type: "func" | |
}, { | |
tfName: "body", | |
name: "body", | |
type: "func" | |
}] | |
}, { | |
tfOpName: "While", | |
category: "control", | |
inputs: [{ | |
start: 0, | |
end: 0, | |
name: "args", | |
type: "tensors" | |
}], | |
attrs: [{ | |
tfName: "cond", | |
name: "cond", | |
type: "func" | |
}, { | |
tfName: "body", | |
name: "body", | |
type: "func" | |
}] | |
}, { | |
tfOpName: "TensorListScatter", | |
category: "control", | |
inputs: [{ | |
start: 0, | |
name: "tensor", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "indices", | |
type: "number[]" | |
}, { | |
start: 2, | |
name: "elementShape", | |
type: "shape" | |
}], | |
attrs: [{ | |
tfName: "element_dtype", | |
name: "elementDType", | |
type: "dtype" | |
}] | |
}, { | |
tfOpName: "TensorListScatterV2", | |
category: "control", | |
inputs: [{ | |
start: 0, | |
name: "tensor", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "indices", | |
type: "number[]" | |
}, { | |
start: 2, | |
name: "elementShape", | |
type: "shape" | |
}, { | |
start: 3, | |
name: "numElements", | |
type: "number" | |
}], | |
attrs: [{ | |
tfName: "element_dtype", | |
name: "elementDType", | |
type: "dtype" | |
}] | |
}, { | |
tfOpName: "TensorListGather", | |
category: "control", | |
inputs: [{ | |
start: 0, | |
name: "tensorListId", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "indices", | |
type: "number[]" | |
}, { | |
start: 2, | |
name: "elementShape", | |
type: "shape" | |
}], | |
attrs: [{ | |
tfName: "element_dtype", | |
name: "elementDType", | |
type: "dtype" | |
}] | |
}, { | |
tfOpName: "TensorListGetItem", | |
category: "control", | |
inputs: [{ | |
start: 0, | |
name: "tensorListId", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "index", | |
type: "number" | |
}, { | |
start: 2, | |
name: "elementShape", | |
type: "shape" | |
}], | |
attrs: [{ | |
tfName: "element_dtype", | |
name: "elementDType", | |
type: "dtype" | |
}] | |
}, { | |
tfOpName: "TensorListSetItem", | |
category: "control", | |
inputs: [{ | |
start: 0, | |
name: "tensorListId", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "index", | |
type: "number" | |
}, { | |
start: 2, | |
name: "tensor", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "element_dtype", | |
name: "elementDType", | |
type: "dtype" | |
}] | |
}, { | |
tfOpName: "TensorListReserve", | |
category: "control", | |
inputs: [{ | |
start: 0, | |
name: "elementShape", | |
type: "shape" | |
}, { | |
start: 1, | |
name: "numElements", | |
type: "number" | |
}], | |
attrs: [{ | |
tfName: "element_dtype", | |
name: "elementDType", | |
type: "dtype" | |
}] | |
}, { | |
tfOpName: "TensorListFromTensor", | |
category: "control", | |
inputs: [{ | |
start: 0, | |
name: "tensor", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "elementShape", | |
type: "shape" | |
}], | |
attrs: [{ | |
tfName: "element_dtype", | |
name: "elementDType", | |
type: "dtype" | |
}] | |
}, { | |
tfOpName: "TensorListStack", | |
category: "control", | |
inputs: [{ | |
start: 0, | |
name: "tensorListId", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "elementShape", | |
type: "shape" | |
}], | |
attrs: [{ | |
tfName: "element_dtype", | |
name: "elementDType", | |
type: "dtype" | |
}, { | |
tfName: "num_elements", | |
name: "numElements", | |
type: "dtype" | |
}] | |
}, { | |
tfOpName: "TensorListSplit", | |
category: "control", | |
inputs: [{ | |
start: 0, | |
name: "tensor", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "elementShape", | |
type: "shape" | |
}, { | |
start: 2, | |
name: "lengths", | |
type: "number[]" | |
}], | |
attrs: [{ | |
tfName: "element_dtype", | |
name: "elementDType", | |
type: "dtype" | |
}] | |
}, { | |
tfOpName: "TensorListConcat", | |
category: "control", | |
inputs: [{ | |
start: 0, | |
name: "tensorListId", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "element_shape", | |
name: "elementShape", | |
type: "shape" | |
}, { | |
tfName: "element_dtype", | |
name: "elementDType", | |
type: "dtype" | |
}] | |
}, { | |
tfOpName: "TensorListConcatV2", | |
category: "control", | |
inputs: [{ | |
start: 0, | |
name: "tensorListId", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "element_shape", | |
name: "elementShape", | |
type: "shape" | |
}, { | |
tfName: "element_dtype", | |
name: "elementDType", | |
type: "dtype" | |
}] | |
}, { | |
tfOpName: "TensorListPopBack", | |
category: "control", | |
inputs: [{ | |
start: 0, | |
name: "tensorListId", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "elementShape", | |
type: "shape" | |
}], | |
attrs: [{ | |
tfName: "element_dtype", | |
name: "elementDType", | |
type: "dtype" | |
}] | |
}, { | |
tfOpName: "TensorListPushBack", | |
category: "control", | |
inputs: [{ | |
start: 0, | |
name: "tensorListId", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "tensor", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "element_dtype", | |
name: "elementDType", | |
type: "dtype" | |
}] | |
}, { | |
tfOpName: "TensorListLength", | |
category: "control", | |
inputs: [{ | |
start: 0, | |
name: "tensorListId", | |
type: "tensor" | |
}] | |
}, { | |
tfOpName: "TensorListResize", | |
category: "control", | |
inputs: [{ | |
start: 0, | |
name: "tensorListId", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "size", | |
type: "number" | |
}] | |
}]; | |
var yS = {}; | |
qe(yS, { | |
json: () => M5 | |
}); | |
var M5 = [{ | |
tfOpName: "AvgPool", | |
category: "convolution", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "strides", | |
name: "strides", | |
type: "number[]" | |
}, { | |
tfName: "padding", | |
name: "pad", | |
type: "string" | |
}, { | |
tfName: "data_format", | |
name: "dataFormat", | |
type: "string", | |
notSupported: true | |
}, { | |
tfName: "ksize", | |
name: "kernelSize", | |
type: "number[]" | |
}, { | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "MaxPool", | |
category: "convolution", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "strides", | |
name: "strides", | |
type: "number[]" | |
}, { | |
tfName: "padding", | |
name: "pad", | |
type: "string" | |
}, { | |
tfName: "data_format", | |
name: "dataFormat", | |
type: "string", | |
notSupported: true | |
}, { | |
tfName: "ksize", | |
name: "kernelSize", | |
type: "number[]" | |
}, { | |
tfName: "explicit_paddings", | |
name: "explicitPaddings", | |
type: "number[]", | |
defaultValue: [], | |
notSupported: true | |
}, { | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "MaxPoolWithArgmax", | |
category: "convolution", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "strides", | |
name: "strides", | |
type: "number[]" | |
}, { | |
tfName: "padding", | |
name: "pad", | |
type: "string" | |
}, { | |
tfName: "ksize", | |
name: "kernelSize", | |
type: "number[]" | |
}, { | |
tfName: "include_batch_in_index", | |
name: "includeBatchInIndex", | |
type: "bool" | |
}, { | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "AvgPool3D", | |
category: "convolution", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "strides", | |
name: "strides", | |
type: "number[]" | |
}, { | |
tfName: "padding", | |
name: "pad", | |
type: "string" | |
}, { | |
tfName: "data_format", | |
name: "dataFormat", | |
type: "string", | |
notSupported: true | |
}, { | |
tfName: "ksize", | |
name: "kernelSize", | |
type: "number[]" | |
}, { | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "MaxPool3D", | |
category: "convolution", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "strides", | |
name: "strides", | |
type: "number[]" | |
}, { | |
tfName: "padding", | |
name: "pad", | |
type: "string" | |
}, { | |
tfName: "data_format", | |
name: "dataFormat", | |
type: "string", | |
notSupported: true | |
}, { | |
tfName: "ksize", | |
name: "kernelSize", | |
type: "number[]" | |
}, { | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Conv1D", | |
category: "convolution", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "filter", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "stride", | |
name: "stride", | |
type: "number" | |
}, { | |
tfName: "padding", | |
name: "pad", | |
type: "string" | |
}, { | |
tfName: "data_format", | |
name: "dataFormat", | |
type: "string", | |
defaultValue: "NWC" | |
}, { | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}, { | |
tfName: "dilation", | |
name: "dilation", | |
type: "number", | |
defaultValue: 1 | |
}] | |
}, { | |
tfOpName: "Conv2D", | |
category: "convolution", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "filter", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}, { | |
tfName: "strides", | |
name: "strides", | |
type: "number[]" | |
}, { | |
tfName: "padding", | |
name: "pad", | |
type: "string" | |
}, { | |
tfName: "useCudnnOnGpu", | |
name: "useCudnnOnGpu", | |
type: "bool" | |
}, { | |
tfName: "data_format", | |
name: "dataFormat", | |
type: "string", | |
defaultValue: "NHWC" | |
}, { | |
tfName: "explicit_paddings", | |
name: "explicitPaddings", | |
type: "number[]", | |
defaultValue: [] | |
}, { | |
tfName: "dilations", | |
name: "dilations", | |
type: "number[]" | |
}] | |
}, { | |
tfOpName: "_FusedConv2D", | |
category: "convolution", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "filter", | |
type: "tensor" | |
}, { | |
start: 2, | |
end: 0, | |
name: "args", | |
type: "tensors" | |
}], | |
attrs: [{ | |
tfName: "num_args", | |
name: "numArgs", | |
type: "number" | |
}, { | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}, { | |
tfName: "strides", | |
name: "strides", | |
type: "number[]" | |
}, { | |
tfName: "padding", | |
name: "pad", | |
type: "string" | |
}, { | |
tfName: "explicit_paddings", | |
name: "explicitPaddings", | |
type: "number[]", | |
defaultValue: [] | |
}, { | |
tfName: "use_cudnn_on_gpu", | |
name: "useCudnnOnGpu", | |
type: "bool", | |
defaultValue: true | |
}, { | |
tfName: "data_format", | |
name: "dataFormat", | |
type: "string", | |
defaultValue: "NHWC" | |
}, { | |
tfName: "dilations", | |
name: "dilations", | |
type: "number[]", | |
defaultValue: [1, 1, 1, 1] | |
}, { | |
tfName: "fused_ops", | |
name: "fusedOps", | |
type: "string[]", | |
defaultValue: [] | |
}, { | |
tfName: "epsilon", | |
name: "epsilon", | |
type: "number", | |
defaultValue: 1e-4 | |
}, { | |
tfName: "leakyrelu_alpha", | |
name: "leakyreluAlpha", | |
type: "number", | |
defaultValue: 0.2 | |
}] | |
}, { | |
tfOpName: "Conv2DBackpropInput", | |
category: "convolution", | |
inputs: [{ | |
start: 2, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "filter", | |
type: "tensor" | |
}, { | |
start: 0, | |
name: "outputShape", | |
type: "number[]" | |
}], | |
attrs: [{ | |
tfName: "strides", | |
name: "strides", | |
type: "number[]" | |
}, { | |
tfName: "padding", | |
name: "pad", | |
type: "string" | |
}, { | |
tfName: "data_format", | |
name: "dataFormat", | |
type: "string", | |
notSupported: true | |
}, { | |
tfName: "explicit_paddings", | |
name: "explicitPaddings", | |
type: "number[]", | |
defaultValue: [] | |
}, { | |
tfName: "dilations", | |
name: "dilations", | |
type: "number[]", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "DepthwiseConv2d", | |
category: "convolution", | |
inputs: [{ | |
start: 0, | |
name: "input", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "filter", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "strides", | |
name: "strides", | |
type: "number[]" | |
}, { | |
tfName: "padding", | |
name: "pad", | |
type: "string" | |
}, { | |
tfName: "data_format", | |
name: "dataFormat", | |
type: "string", | |
defaultValue: "NHWC" | |
}, { | |
tfName: "explicit_paddings", | |
name: "explicitPaddings", | |
type: "number[]", | |
defaultValue: [] | |
}, { | |
tfName: "dilations", | |
name: "dilations", | |
type: "number[]" | |
}] | |
}, { | |
tfOpName: "DepthwiseConv2dNative", | |
category: "convolution", | |
inputs: [{ | |
start: 0, | |
name: "input", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "filter", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "strides", | |
name: "strides", | |
type: "number[]" | |
}, { | |
tfName: "padding", | |
name: "pad", | |
type: "string" | |
}, { | |
tfName: "data_format", | |
name: "dataFormat", | |
type: "string", | |
defaultValue: "NHWC" | |
}, { | |
tfName: "explicit_paddings", | |
name: "explicitPaddings", | |
type: "number[]", | |
defaultValue: [] | |
}, { | |
tfName: "dilations", | |
name: "dilations", | |
type: "number[]" | |
}] | |
}, { | |
tfOpName: "FusedDepthwiseConv2dNative", | |
category: "convolution", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "filter", | |
type: "tensor" | |
}, { | |
start: 2, | |
end: 0, | |
name: "args", | |
type: "tensors" | |
}], | |
attrs: [{ | |
tfName: "num_args", | |
name: "numArgs", | |
type: "number" | |
}, { | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}, { | |
tfName: "strides", | |
name: "strides", | |
type: "number[]" | |
}, { | |
tfName: "padding", | |
name: "pad", | |
type: "string" | |
}, { | |
tfName: "data_format", | |
name: "dataFormat", | |
type: "string", | |
defaultValue: "NHWC" | |
}, { | |
tfName: "dilations", | |
name: "dilations", | |
type: "number[]", | |
defaultValue: [1, 1, 1, 1] | |
}, { | |
tfName: "fused_ops", | |
name: "fusedOps", | |
type: "string[]", | |
defaultValue: [] | |
}, { | |
tfName: "explicit_paddings", | |
name: "explicitPaddings", | |
type: "number[]", | |
defaultValue: [] | |
}] | |
}, { | |
tfOpName: "Conv3D", | |
category: "convolution", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "filter", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "strides", | |
name: "strides", | |
type: "number[]" | |
}, { | |
tfName: "padding", | |
name: "pad", | |
type: "string" | |
}, { | |
tfName: "data_format", | |
name: "dataFormat", | |
type: "string", | |
defaultValue: "NHWC" | |
}, { | |
tfName: "dilations", | |
name: "dilations", | |
type: "number[]" | |
}] | |
}, { | |
tfOpName: "Dilation2D", | |
category: "convolution", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "filter", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "strides", | |
name: "strides", | |
type: "number[]" | |
}, { | |
tfName: "rates", | |
name: "dilations", | |
type: "number[]" | |
}, { | |
tfName: "padding", | |
name: "pad", | |
type: "string" | |
}] | |
}]; | |
var bS = {}; | |
qe(bS, { | |
json: () => L5 | |
}); | |
var L5 = [{ | |
tfOpName: "Fill", | |
category: "creation", | |
inputs: [{ | |
start: 0, | |
name: "shape", | |
type: "number[]" | |
}, { | |
start: 1, | |
name: "value", | |
type: "number" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype" | |
}] | |
}, { | |
tfOpName: "LinSpace", | |
category: "creation", | |
inputs: [{ | |
start: 0, | |
name: "start", | |
type: "number" | |
}, { | |
start: 1, | |
name: "stop", | |
type: "number" | |
}, { | |
start: 2, | |
name: "num", | |
type: "number" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "OneHot", | |
category: "creation", | |
inputs: [{ | |
start: 0, | |
name: "indices", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "depth", | |
type: "number" | |
}, { | |
start: 2, | |
name: "onValue", | |
type: "number", | |
defaultValue: 1 | |
}, { | |
start: 3, | |
name: "offValue", | |
type: "number", | |
defaultValue: 0 | |
}], | |
attrs: [{ | |
tfName: "axis", | |
name: "axis", | |
type: "number", | |
notSupported: true | |
}, { | |
tfName: "T", | |
name: "dtype", | |
type: "dtype" | |
}] | |
}, { | |
tfOpName: "Ones", | |
category: "creation", | |
inputs: [{ | |
start: 0, | |
name: "shape", | |
type: "number[]" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype" | |
}] | |
}, { | |
tfOpName: "OnesLike", | |
category: "creation", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "dtype", | |
name: "dtype", | |
type: "dtype" | |
}] | |
}, { | |
tfOpName: "RandomStandardNormal", | |
category: "creation", | |
inputs: [{ | |
start: 0, | |
name: "shape", | |
type: "number[]" | |
}], | |
attrs: [{ | |
tfName: "seed", | |
name: "seed", | |
type: "number", | |
defaultValue: 0 | |
}, { | |
tfName: "seed2", | |
name: "seed2", | |
type: "number", | |
defaultValue: 0, | |
notSupported: true | |
}, { | |
tfName: "dtype", | |
name: "dtype", | |
type: "dtype" | |
}, { | |
tfName: "T", | |
name: "T", | |
type: "number", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "RandomUniform", | |
category: "creation", | |
inputs: [{ | |
start: 0, | |
name: "shape", | |
type: "number[]" | |
}], | |
attrs: [{ | |
tfName: "minval", | |
name: "minval", | |
type: "number", | |
defaultValue: 0 | |
}, { | |
tfName: "maxval", | |
name: "maxval", | |
type: "number", | |
defaultValue: 1 | |
}, { | |
tfName: "dtype", | |
name: "dtype", | |
type: "dtype" | |
}, { | |
tfName: "seed", | |
name: "seed", | |
type: "number", | |
defaultValue: 0 | |
}, { | |
tfName: "seed2", | |
name: "seed2", | |
type: "number", | |
defaultValue: 0, | |
notSupported: true | |
}, { | |
tfName: "T", | |
name: "T", | |
type: "number", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "RandomUniformInt", | |
category: "creation", | |
inputs: [{ | |
start: 0, | |
name: "shape", | |
type: "number[]" | |
}], | |
attrs: [{ | |
tfName: "minval", | |
name: "minval", | |
type: "number" | |
}, { | |
tfName: "maxval", | |
name: "maxval", | |
type: "number" | |
}, { | |
tfName: "seed", | |
name: "seed", | |
type: "number", | |
defaultValue: 0 | |
}, { | |
tfName: "seed2", | |
name: "seed2", | |
type: "number", | |
defaultValue: 0, | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Range", | |
category: "creation", | |
inputs: [{ | |
start: 0, | |
name: "start", | |
type: "number" | |
}, { | |
start: 1, | |
name: "stop", | |
type: "number" | |
}, { | |
start: 2, | |
name: "step", | |
type: "number", | |
defaultValue: 0 | |
}], | |
attrs: [{ | |
tfName: "Tidx", | |
name: "dtype", | |
type: "dtype" | |
}] | |
}, { | |
tfOpName: "TruncatedNormal", | |
category: "creation", | |
inputs: [{ | |
start: 0, | |
name: "shape", | |
type: "number[]" | |
}], | |
attrs: [{ | |
tfName: "means", | |
name: "mean", | |
type: "number", | |
defaultValue: 0 | |
}, { | |
tfName: "stddev", | |
name: "stdDev", | |
type: "number", | |
defaultValue: 1 | |
}, { | |
tfName: "seed", | |
name: "seed", | |
type: "number" | |
}, { | |
tfName: "seed2", | |
name: "seed2", | |
type: "number", | |
defaultValue: 0, | |
notSupported: true | |
}, { | |
tfName: "dtype", | |
name: "dtype", | |
type: "dtype" | |
}, { | |
tfName: "T", | |
name: "T", | |
type: "number", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Zeros", | |
category: "creation", | |
inputs: [{ | |
start: 0, | |
name: "shape", | |
type: "number[]" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype" | |
}] | |
}, { | |
tfOpName: "ZerosLike", | |
category: "creation", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype" | |
}] | |
}, { | |
tfOpName: "Multinomial", | |
category: "creation", | |
inputs: [{ | |
start: 0, | |
name: "logits", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "numSamples", | |
type: "number" | |
}], | |
attrs: [{ | |
tfName: "seed", | |
name: "seed", | |
type: "number" | |
}, { | |
tfName: "seed2", | |
name: "seed2", | |
type: "number" | |
}, { | |
tfName: "T", | |
name: "dtype", | |
type: "dtype" | |
}, { | |
tfName: "output_dtype", | |
name: "output_dtype", | |
type: "dtype" | |
}] | |
}]; | |
var CS = {}; | |
qe(CS, { | |
json: () => B5 | |
}); | |
var B5 = [{ | |
tfOpName: "NonMaxSuppressionV2", | |
category: "dynamic", | |
inputs: [{ | |
start: 0, | |
name: "boxes", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "scores", | |
type: "tensor" | |
}, { | |
start: 2, | |
name: "maxOutputSize", | |
type: "number" | |
}, { | |
start: 3, | |
name: "iouThreshold", | |
type: "number" | |
}] | |
}, { | |
tfOpName: "NonMaxSuppressionV3", | |
category: "dynamic", | |
inputs: [{ | |
start: 0, | |
name: "boxes", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "scores", | |
type: "tensor" | |
}, { | |
start: 2, | |
name: "maxOutputSize", | |
type: "number" | |
}, { | |
start: 3, | |
name: "iouThreshold", | |
type: "number" | |
}, { | |
start: 4, | |
name: "scoreThreshold", | |
type: "number" | |
}] | |
}, { | |
tfOpName: "NonMaxSuppressionV4", | |
category: "dynamic", | |
inputs: [{ | |
start: 0, | |
name: "boxes", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "scores", | |
type: "tensor" | |
}, { | |
start: 2, | |
name: "maxOutputSize", | |
type: "number" | |
}, { | |
start: 3, | |
name: "iouThreshold", | |
type: "number" | |
}, { | |
start: 4, | |
name: "scoreThreshold", | |
type: "number" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}, { | |
tfName: "T_threshold", | |
name: "threshold", | |
type: "dtype", | |
notSupported: true | |
}, { | |
tfName: "pad_to_max_output_size", | |
name: "padToMaxOutputSize", | |
type: "bool" | |
}] | |
}, { | |
tfOpName: "NonMaxSuppressionV5", | |
category: "dynamic", | |
inputs: [{ | |
start: 0, | |
name: "boxes", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "scores", | |
type: "tensor" | |
}, { | |
start: 2, | |
name: "maxOutputSize", | |
type: "number" | |
}, { | |
start: 3, | |
name: "iouThreshold", | |
type: "number" | |
}, { | |
start: 4, | |
name: "scoreThreshold", | |
type: "number" | |
}, { | |
start: 5, | |
name: "softNmsSigma", | |
type: "number" | |
}] | |
}, { | |
tfOpName: "Where", | |
category: "dynamic", | |
inputs: [{ | |
start: 0, | |
name: "condition", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "ListDiff", | |
category: "dynamic", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "y", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}]; | |
var wS = {}; | |
qe(wS, { | |
json: () => z5 | |
}); | |
var z5 = [{ | |
tfOpName: "LowerBound", | |
category: "evaluation", | |
inputs: [{ | |
start: 0, | |
name: "sortedSequence", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "values", | |
type: "tensor" | |
}] | |
}, { | |
tfOpName: "TopKV2", | |
category: "evaluation", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "k", | |
type: "number" | |
}], | |
attrs: [{ | |
tfName: "sorted", | |
name: "sorted", | |
type: "bool" | |
}] | |
}, { | |
tfOpName: "UpperBound", | |
category: "evaluation", | |
inputs: [{ | |
start: 0, | |
name: "sortedSequence", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "values", | |
type: "tensor" | |
}] | |
}, { | |
tfOpName: "Unique", | |
category: "evaluation", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}] | |
}, { | |
tfOpName: "UniqueV2", | |
category: "evaluation", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "axis", | |
type: "number" | |
}] | |
}]; | |
var SS = {}; | |
qe(SS, { | |
json: () => V5 | |
}); | |
var V5 = [{ | |
tfOpName: "PlaceholderWithDefault", | |
category: "graph", | |
inputs: [{ | |
start: 0, | |
name: "default", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "shape", | |
name: "shape", | |
type: "shape" | |
}, { | |
tfName: "dtype", | |
name: "dtype", | |
type: "dtype" | |
}] | |
}, { | |
tfOpName: "Placeholder", | |
category: "graph", | |
attrs: [{ | |
tfName: "shape", | |
name: "shape", | |
type: "shape" | |
}, { | |
tfName: "dtype", | |
name: "dtype", | |
type: "dtype" | |
}] | |
}, { | |
tfOpName: "Const", | |
category: "graph" | |
}, { | |
tfOpName: "Identity", | |
category: "graph", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}] | |
}, { | |
tfOpName: "IdentityN", | |
category: "graph", | |
inputs: [{ | |
start: 0, | |
end: 0, | |
name: "x", | |
type: "tensors" | |
}] | |
}, { | |
tfOpName: "Snapshot", | |
category: "graph", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}] | |
}, { | |
tfOpName: "Rank", | |
category: "graph", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}] | |
}, { | |
tfOpName: "Size", | |
category: "graph", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}] | |
}, { | |
tfOpName: "Shape", | |
category: "graph", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}] | |
}, { | |
tfOpName: "ShapeN", | |
category: "graph", | |
inputs: [{ | |
start: 0, | |
end: 0, | |
name: "x", | |
type: "tensors" | |
}] | |
}, { | |
tfOpName: "Print", | |
category: "graph", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "data", | |
type: "tensors" | |
}], | |
attrs: [{ | |
tfName: "message", | |
name: "message", | |
type: "string" | |
}, { | |
tfName: "first_n", | |
name: "firstN", | |
type: "number", | |
notSupported: true | |
}, { | |
tfName: "summarize", | |
name: "summarize", | |
type: "number", | |
defaultValue: 3 | |
}] | |
}, { | |
tfOpName: "NoOp", | |
category: "graph", | |
inputs: [] | |
}, { | |
tfOpName: "StopGradient", | |
category: "graph", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}] | |
}, { | |
tfOpName: "FakeQuantWithMinMaxVars", | |
category: "graph", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "min", | |
name: "min", | |
type: "number" | |
}, { | |
tfName: "max", | |
name: "max", | |
type: "number" | |
}] | |
}]; | |
var IS = {}; | |
qe(IS, { | |
json: () => W5 | |
}); | |
var W5 = [{ | |
tfOpName: "HashTable", | |
category: "hash_table", | |
inputs: [], | |
attrs: [{ | |
tfName: "shared_name", | |
name: "sharedName", | |
type: "string" | |
}, { | |
tfName: "use_node_name_sharing", | |
name: "useNodeNameSharing", | |
type: "bool" | |
}, { | |
tfName: "key_dtype", | |
name: "keyDType", | |
type: "dtype" | |
}, { | |
tfName: "value_dtype", | |
name: "valueDType", | |
type: "dtype" | |
}] | |
}, { | |
tfOpName: "HashTableV2", | |
category: "hash_table", | |
inputs: [], | |
attrs: [{ | |
tfName: "shared_name", | |
name: "sharedName", | |
type: "string" | |
}, { | |
tfName: "use_node_name_sharing", | |
name: "useNodeNameSharing", | |
type: "bool" | |
}, { | |
tfName: "key_dtype", | |
name: "keyDType", | |
type: "dtype" | |
}, { | |
tfName: "value_dtype", | |
name: "valueDType", | |
type: "dtype" | |
}] | |
}, { | |
tfOpName: "LookupTableImport", | |
category: "hash_table", | |
inputs: [{ | |
start: 0, | |
name: "tableHandle", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "keys", | |
type: "tensor" | |
}, { | |
start: 2, | |
name: "values", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "Tin", | |
name: "tIn", | |
type: "dtype", | |
notSupported: true | |
}, { | |
tfName: "Tout", | |
name: "tOut", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "LookupTableImportV2", | |
category: "hash_table", | |
inputs: [{ | |
start: 0, | |
name: "tableHandle", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "keys", | |
type: "tensor" | |
}, { | |
start: 2, | |
name: "values", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "Tin", | |
name: "tIn", | |
type: "dtype", | |
notSupported: true | |
}, { | |
tfName: "Tout", | |
name: "tOut", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "LookupTableFind", | |
category: "hash_table", | |
inputs: [{ | |
start: 0, | |
name: "tableHandle", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "keys", | |
type: "tensor" | |
}, { | |
start: 2, | |
name: "defaultValue", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "Tin", | |
name: "tIn", | |
type: "dtype", | |
notSupported: true | |
}, { | |
tfName: "Tout", | |
name: "tOut", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "LookupTableFindV2", | |
category: "hash_table", | |
inputs: [{ | |
start: 0, | |
name: "tableHandle", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "keys", | |
type: "tensor" | |
}, { | |
start: 2, | |
name: "defaultValue", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "Tin", | |
name: "tIn", | |
type: "dtype", | |
notSupported: true | |
}, { | |
tfName: "Tout", | |
name: "tOut", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "LookupTableSize", | |
category: "hash_table", | |
inputs: [{ | |
start: 0, | |
name: "tableHandle", | |
type: "tensor" | |
}] | |
}, { | |
tfOpName: "LookupTableSizeV2", | |
category: "hash_table", | |
inputs: [{ | |
start: 0, | |
name: "tableHandle", | |
type: "tensor" | |
}] | |
}, { | |
tfOpName: "InitializeTable", | |
category: "hash_table", | |
inputs: [{ | |
start: 0, | |
name: "tableHandle", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "keys", | |
type: "tensor" | |
}, { | |
start: 2, | |
name: "values", | |
type: "tensor" | |
}] | |
}, { | |
tfOpName: "InitializeTableV2", | |
category: "hash_table", | |
inputs: [{ | |
start: 0, | |
name: "tableHandle", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "keys", | |
type: "tensor" | |
}, { | |
start: 2, | |
name: "values", | |
type: "tensor" | |
}] | |
}]; | |
var vS = {}; | |
qe(vS, { | |
json: () => U5 | |
}); | |
var U5 = [{ | |
tfOpName: "ResizeBilinear", | |
category: "image", | |
inputs: [{ | |
start: 0, | |
name: "images", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "size", | |
type: "number[]" | |
}], | |
attrs: [{ | |
tfName: "align_corners", | |
name: "alignCorners", | |
type: "bool" | |
}, { | |
tfName: "half_pixel_centers", | |
name: "halfPixelCenters", | |
type: "bool" | |
}, { | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "ResizeNearestNeighbor", | |
category: "image", | |
inputs: [{ | |
start: 0, | |
name: "images", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "size", | |
type: "number[]" | |
}], | |
attrs: [{ | |
tfName: "align_corners", | |
name: "alignCorners", | |
type: "bool" | |
}, { | |
tfName: "half_pixel_centers", | |
name: "halfPixelCenters", | |
type: "bool" | |
}, { | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "CropAndResize", | |
category: "image", | |
inputs: [{ | |
start: 0, | |
name: "image", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "boxes", | |
type: "tensor" | |
}, { | |
start: 2, | |
name: "boxInd", | |
type: "tensor" | |
}, { | |
start: 3, | |
name: "cropSize", | |
type: "number[]" | |
}], | |
attrs: [{ | |
tfName: "method", | |
name: "method", | |
type: "string" | |
}, { | |
tfName: "extrapolation_value", | |
name: "extrapolationValue", | |
type: "number" | |
}] | |
}, { | |
tfOpName: "ImageProjectiveTransformV3", | |
category: "image", | |
inputs: [{ | |
start: 0, | |
name: "images", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "transforms", | |
type: "tensor" | |
}, { | |
start: 2, | |
name: "outputShape", | |
type: "number[]" | |
}, { | |
start: 3, | |
name: "fillValue", | |
type: "number" | |
}], | |
attrs: [{ | |
tfName: "interpolation", | |
name: "interpolation", | |
type: "string" | |
}, { | |
tfName: "fill_mode", | |
name: "fillMode", | |
type: "string" | |
}] | |
}]; | |
var kS = {}; | |
qe(kS, { | |
json: () => G5 | |
}); | |
var G5 = [{ | |
tfOpName: "Equal", | |
category: "logical", | |
inputs: [{ | |
start: 0, | |
name: "a", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "b", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "NotEqual", | |
category: "logical", | |
inputs: [{ | |
start: 0, | |
name: "a", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "b", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Greater", | |
category: "logical", | |
inputs: [{ | |
start: 0, | |
name: "a", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "b", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "GreaterEqual", | |
category: "logical", | |
inputs: [{ | |
start: 0, | |
name: "a", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "b", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Less", | |
category: "logical", | |
inputs: [{ | |
start: 0, | |
name: "a", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "b", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "LessEqual", | |
category: "logical", | |
inputs: [{ | |
start: 0, | |
name: "a", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "b", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "LogicalAnd", | |
category: "logical", | |
inputs: [{ | |
start: 0, | |
name: "a", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "b", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "LogicalNot", | |
category: "logical", | |
inputs: [{ | |
start: 0, | |
name: "a", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "LogicalOr", | |
category: "logical", | |
inputs: [{ | |
start: 0, | |
name: "a", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "b", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Select", | |
category: "logical", | |
inputs: [{ | |
start: 0, | |
name: "condition", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "a", | |
type: "tensor" | |
}, { | |
start: 2, | |
name: "b", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "SelectV2", | |
category: "logical", | |
inputs: [{ | |
start: 0, | |
name: "condition", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "a", | |
type: "tensor" | |
}, { | |
start: 2, | |
name: "b", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "BitwiseAnd", | |
category: "logical", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "y", | |
type: "tensor" | |
}] | |
}]; | |
var NS = {}; | |
qe(NS, { | |
json: () => H5 | |
}); | |
var H5 = [{ | |
tfOpName: "_FusedMatMul", | |
category: "matrices", | |
inputs: [{ | |
start: 0, | |
name: "a", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "b", | |
type: "tensor" | |
}, { | |
start: 2, | |
end: 0, | |
name: "args", | |
type: "tensors" | |
}], | |
attrs: [{ | |
tfName: "num_args", | |
name: "numArgs", | |
type: "number" | |
}, { | |
tfName: "fused_ops", | |
name: "fusedOps", | |
type: "string[]", | |
defaultValue: [] | |
}, { | |
tfName: "epsilon", | |
name: "epsilon", | |
type: "number", | |
defaultValue: 1e-4 | |
}, { | |
tfName: "transpose_a", | |
name: "transposeA", | |
type: "bool", | |
defaultValue: false | |
}, { | |
tfName: "transpose_b", | |
name: "transposeB", | |
type: "bool", | |
defaultValue: false | |
}, { | |
tfName: "leakyrelu_alpha", | |
name: "leakyreluAlpha", | |
type: "number", | |
defaultValue: 0.2 | |
}, { | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "MatMul", | |
category: "matrices", | |
inputs: [{ | |
start: 0, | |
name: "a", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "b", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "transpose_a", | |
name: "transposeA", | |
type: "bool", | |
defaultValue: false | |
}, { | |
tfName: "transpose_b", | |
name: "transposeB", | |
type: "bool", | |
defaultValue: false | |
}, { | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "BatchMatMul", | |
category: "matrices", | |
inputs: [{ | |
start: 0, | |
name: "a", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "b", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "adj_x", | |
name: "transposeA", | |
type: "bool", | |
defaultValue: false | |
}, { | |
tfName: "adj_y", | |
name: "transposeB", | |
type: "bool", | |
defaultValue: false | |
}, { | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "BatchMatMulV2", | |
category: "matrices", | |
inputs: [{ | |
start: 0, | |
name: "a", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "b", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "adj_x", | |
name: "transposeA", | |
type: "bool", | |
defaultValue: false | |
}, { | |
tfName: "adj_y", | |
name: "transposeB", | |
type: "bool", | |
defaultValue: false | |
}, { | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Transpose", | |
category: "matrices", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "perm", | |
type: "number[]" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Einsum", | |
category: "matrices", | |
inputs: [{ | |
start: 0, | |
end: 0, | |
name: "tensors", | |
type: "tensors" | |
}], | |
attrs: [{ | |
tfName: "equation", | |
name: "equation", | |
type: "string" | |
}, { | |
tfName: "N", | |
name: "n", | |
type: "number", | |
defaultValue: 2 | |
}, { | |
tfName: "T", | |
name: "dtype", | |
type: "dtype" | |
}] | |
}, { | |
tfOpName: "MatrixBandPart", | |
category: "matrices", | |
inputs: [{ | |
start: 0, | |
name: "a", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "numLower", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "numUpper", | |
type: "tensor" | |
}] | |
}]; | |
var TS = {}; | |
qe(TS, { | |
json: () => K5 | |
}); | |
var K5 = [{ | |
tfOpName: "EuclideanNorm", | |
category: "normalization", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "axis", | |
type: "number[]" | |
}], | |
attrs: [{ | |
tfName: "keep_dims", | |
name: "keepDims", | |
type: "bool", | |
defaultValue: false | |
}] | |
}, { | |
tfOpName: "FusedBatchNorm", | |
category: "normalization", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "scale", | |
type: "tensor" | |
}, { | |
start: 2, | |
name: "offset", | |
type: "tensor" | |
}, { | |
start: 3, | |
name: "mean", | |
type: "tensor" | |
}, { | |
start: 4, | |
name: "variance", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "epsilon", | |
name: "epsilon", | |
type: "number", | |
defaultValue: 1e-3 | |
}, { | |
tfName: "data_format", | |
name: "dataFormat", | |
type: "string", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "FusedBatchNormV2", | |
category: "normalization", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "scale", | |
type: "tensor" | |
}, { | |
start: 2, | |
name: "offset", | |
type: "tensor" | |
}, { | |
start: 3, | |
name: "mean", | |
type: "tensor" | |
}, { | |
start: 4, | |
name: "variance", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "epsilon", | |
name: "epsilon", | |
type: "number", | |
defaultValue: 1e-3 | |
}, { | |
tfName: "data_format", | |
name: "dataFormat", | |
type: "string", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "FusedBatchNormV3", | |
category: "normalization", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "scale", | |
type: "tensor" | |
}, { | |
start: 2, | |
name: "offset", | |
type: "tensor" | |
}, { | |
start: 3, | |
name: "mean", | |
type: "tensor" | |
}, { | |
start: 4, | |
name: "variance", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "epsilon", | |
name: "epsilon", | |
type: "number", | |
defaultValue: 1e-3 | |
}, { | |
tfName: "data_format", | |
name: "dataFormat", | |
type: "string", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "LRN", | |
category: "normalization", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "depth_radius", | |
name: "radius", | |
type: "number", | |
defaultValue: 5 | |
}, { | |
tfName: "bias", | |
name: "bias", | |
type: "number", | |
defaultValue: 1 | |
}, { | |
tfName: "alpha", | |
name: "alpha", | |
type: "number", | |
defaultValue: 1 | |
}, { | |
tfName: "beta", | |
name: "beta", | |
type: "number", | |
defaultValue: 0.5 | |
}] | |
}, { | |
tfOpName: "Softmax", | |
category: "normalization", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}] | |
}, { | |
tfOpName: "LogSoftmax", | |
category: "normalization", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}] | |
}]; | |
var _S = {}; | |
qe(_S, { | |
json: () => q5 | |
}); | |
var q5 = [{ | |
tfOpName: "Bincount", | |
category: "reduction", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "size", | |
type: "number" | |
}, { | |
start: 2, | |
name: "weights", | |
type: "tensor" | |
}] | |
}, { | |
tfOpName: "DenseBincount", | |
category: "reduction", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "size", | |
type: "number" | |
}, { | |
start: 2, | |
name: "weights", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "binary_output", | |
name: "binaryOutput", | |
type: "bool" | |
}] | |
}, { | |
tfOpName: "Max", | |
category: "reduction", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "axis", | |
type: "number[]" | |
}], | |
attrs: [{ | |
tfName: "keep_dims", | |
name: "keepDims", | |
type: "bool" | |
}] | |
}, { | |
tfOpName: "Mean", | |
category: "reduction", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "axis", | |
type: "number[]" | |
}], | |
attrs: [{ | |
tfName: "keep_dims", | |
name: "keepDims", | |
type: "bool" | |
}] | |
}, { | |
tfOpName: "Min", | |
category: "reduction", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "axis", | |
type: "number[]" | |
}], | |
attrs: [{ | |
tfName: "keep_dims", | |
name: "keepDims", | |
type: "bool" | |
}] | |
}, { | |
tfOpName: "Sum", | |
category: "reduction", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "axis", | |
type: "number[]" | |
}], | |
attrs: [{ | |
tfName: "keep_dims", | |
name: "keepDims", | |
type: "bool" | |
}] | |
}, { | |
tfOpName: "All", | |
category: "reduction", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "axis", | |
type: "number[]" | |
}], | |
attrs: [{ | |
tfName: "keep_dims", | |
name: "keepDims", | |
type: "bool" | |
}] | |
}, { | |
tfOpName: "Any", | |
category: "reduction", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "axis", | |
type: "number[]" | |
}], | |
attrs: [{ | |
tfName: "keep_dims", | |
name: "keepDims", | |
type: "bool" | |
}] | |
}, { | |
tfOpName: "ArgMax", | |
category: "reduction", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "axis", | |
type: "number" | |
}] | |
}, { | |
tfOpName: "ArgMin", | |
category: "reduction", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "axis", | |
type: "number" | |
}] | |
}, { | |
tfOpName: "Prod", | |
category: "reduction", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "axis", | |
type: "number[]" | |
}], | |
attrs: [{ | |
tfName: "keep_dims", | |
name: "keepDims", | |
type: "bool" | |
}, { | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Cumprod", | |
category: "reduction", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "axis", | |
type: "number" | |
}], | |
attrs: [{ | |
tfName: "exclusive", | |
name: "exclusive", | |
type: "bool" | |
}, { | |
tfName: "reverse", | |
name: "reverse", | |
type: "bool" | |
}] | |
}, { | |
tfOpName: "Cumsum", | |
category: "reduction", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "axis", | |
type: "number" | |
}], | |
attrs: [{ | |
tfName: "exclusive", | |
name: "exclusive", | |
type: "bool" | |
}, { | |
tfName: "reverse", | |
name: "reverse", | |
type: "bool" | |
}] | |
}]; | |
var $S = {}; | |
qe($S, { | |
json: () => j5 | |
}); | |
var j5 = [{ | |
tfOpName: "ConcatV2", | |
category: "slice_join", | |
inputs: [{ | |
start: 0, | |
end: -1, | |
name: "tensors", | |
type: "tensors" | |
}, { | |
start: -1, | |
name: "axis", | |
type: "number" | |
}], | |
attrs: [{ | |
tfName: "N", | |
name: "n", | |
type: "number", | |
defaultValue: 2 | |
}] | |
}, { | |
tfOpName: "Concat", | |
category: "slice_join", | |
inputs: [{ | |
start: 1, | |
end: 0, | |
name: "tensors", | |
type: "tensors" | |
}, { | |
start: 0, | |
name: "axis", | |
type: "number" | |
}], | |
attrs: [{ | |
tfName: "N", | |
name: "n", | |
type: "number", | |
defaultValue: 2 | |
}] | |
}, { | |
tfOpName: "GatherV2", | |
category: "slice_join", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "indices", | |
type: "tensor" | |
}, { | |
start: 2, | |
name: "axis", | |
type: "number", | |
defaultValue: 0 | |
}], | |
attrs: [{ | |
tfName: "batch_dims", | |
name: "batchDims", | |
type: "number", | |
defaultValue: 0 | |
}] | |
}, { | |
tfOpName: "Gather", | |
category: "slice_join", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "indices", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "validate_indices", | |
name: "validateIndices", | |
type: "bool", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Reverse", | |
category: "slice_join", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "dims", | |
type: "bool[]" | |
}] | |
}, { | |
tfOpName: "ReverseV2", | |
category: "slice_join", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "axis", | |
type: "number[]" | |
}] | |
}, { | |
tfOpName: "Slice", | |
category: "slice_join", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "begin", | |
type: "number[]" | |
}, { | |
start: 2, | |
name: "size", | |
type: "number[]" | |
}] | |
}, { | |
tfOpName: "StridedSlice", | |
category: "slice_join", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "begin", | |
type: "number[]" | |
}, { | |
start: 2, | |
name: "end", | |
type: "number[]" | |
}, { | |
start: 3, | |
name: "strides", | |
type: "number[]" | |
}], | |
attrs: [{ | |
tfName: "begin_mask", | |
name: "beginMask", | |
type: "number", | |
defaultValue: 0 | |
}, { | |
tfName: "end_mask", | |
name: "endMask", | |
type: "number", | |
defaultValue: 0 | |
}, { | |
tfName: "new_axis_mask", | |
name: "newAxisMask", | |
type: "number", | |
defaultValue: 0 | |
}, { | |
tfName: "ellipsis_mask", | |
name: "ellipsisMask", | |
type: "number", | |
defaultValue: 0 | |
}, { | |
tfName: "shrink_axis_mask", | |
name: "shrinkAxisMask", | |
type: "number", | |
defaultValue: 0 | |
}] | |
}, { | |
tfOpName: "Pack", | |
category: "slice_join", | |
inputs: [{ | |
start: 0, | |
end: 0, | |
name: "tensors", | |
type: "tensors" | |
}], | |
attrs: [{ | |
tfName: "axis", | |
name: "axis", | |
type: "number", | |
defaultValue: 0 | |
}] | |
}, { | |
tfOpName: "Unpack", | |
category: "slice_join", | |
inputs: [{ | |
start: 0, | |
name: "tensor", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "axis", | |
name: "axis", | |
type: "number", | |
defaultValue: 0 | |
}, { | |
tfName: "num", | |
name: "num", | |
type: "number", | |
defaultValue: 0, | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "Tile", | |
category: "slice_join", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "reps", | |
type: "number[]" | |
}] | |
}, { | |
tfOpName: "Split", | |
category: "slice_join", | |
inputs: [{ | |
start: 0, | |
name: "axis", | |
type: "number", | |
defaultValue: 0 | |
}, { | |
start: 1, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "num_split", | |
name: "numOrSizeSplits", | |
type: "number", | |
defaultValue: 1 | |
}] | |
}, { | |
tfOpName: "SplitV", | |
category: "slice_join", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "numOrSizeSplits", | |
type: "number[]" | |
}, { | |
start: 2, | |
name: "axis", | |
type: "number", | |
defaultValue: 0 | |
}] | |
}, { | |
tfOpName: "ScatterNd", | |
category: "slice_join", | |
inputs: [{ | |
start: 0, | |
name: "indices", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "values", | |
type: "tensor" | |
}, { | |
start: 2, | |
name: "shape", | |
type: "number[]" | |
}] | |
}, { | |
tfOpName: "GatherNd", | |
category: "slice_join", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "indices", | |
type: "tensor" | |
}] | |
}, { | |
tfOpName: "SparseToDense", | |
category: "slice_join", | |
inputs: [{ | |
start: 0, | |
name: "sparseIndices", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "outputShape", | |
type: "number[]" | |
}, { | |
start: 2, | |
name: "sparseValues", | |
type: "tensor" | |
}, { | |
start: 3, | |
name: "defaultValue", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "validate_indices", | |
name: "validateIndices", | |
type: "bool", | |
defaultValue: false, | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "TensorScatterUpdate", | |
category: "slice_join", | |
inputs: [{ | |
start: 0, | |
name: "tensor", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "indices", | |
type: "tensor" | |
}, { | |
start: 2, | |
name: "values", | |
type: "tensor" | |
}] | |
}]; | |
var ES = {}; | |
qe(ES, { | |
json: () => X5 | |
}); | |
var X5 = [{ | |
tfOpName: "SparseFillEmptyRows", | |
category: "sparse", | |
inputs: [{ | |
start: 0, | |
name: "indices", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "values", | |
type: "tensor" | |
}, { | |
start: 2, | |
name: "denseShape", | |
type: "tensor" | |
}, { | |
start: 3, | |
name: "defaultValue", | |
type: "tensor" | |
}] | |
}, { | |
tfOpName: "SparseReshape", | |
category: "sparse", | |
inputs: [{ | |
start: 0, | |
name: "inputIndices", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "inputShape", | |
type: "tensor" | |
}, { | |
start: 2, | |
name: "newShape", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "T", | |
name: "dtype", | |
type: "dtype", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "SparseSegmentMean", | |
category: "sparse", | |
inputs: [{ | |
start: 0, | |
name: "data", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "indices", | |
type: "tensor" | |
}, { | |
start: 2, | |
name: "segmentIds", | |
type: "tensor" | |
}] | |
}, { | |
tfOpName: "SparseSegmentSum", | |
category: "sparse", | |
inputs: [{ | |
start: 0, | |
name: "data", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "indices", | |
type: "tensor" | |
}, { | |
start: 2, | |
name: "segmentIds", | |
type: "tensor" | |
}] | |
}]; | |
var RS = {}; | |
qe(RS, { | |
json: () => Y5 | |
}); | |
var Y5 = [{ | |
tfOpName: "FFT", | |
category: "spectral", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}] | |
}, { | |
tfOpName: "IFFT", | |
category: "spectral", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}] | |
}, { | |
tfOpName: "RFFT", | |
category: "spectral", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "fft_length", | |
type: "number", | |
notSupported: true | |
}] | |
}, { | |
tfOpName: "IRFFT", | |
category: "spectral", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "fft_length", | |
type: "number", | |
notSupported: true | |
}] | |
}]; | |
var DS = {}; | |
qe(DS, { | |
json: () => Q5 | |
}); | |
var Q5 = [{ | |
tfOpName: "StaticRegexReplace", | |
category: "string", | |
inputs: [{ | |
start: 0, | |
name: "input", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "pattern", | |
name: "pattern", | |
type: "string" | |
}, { | |
tfName: "rewrite", | |
name: "rewrite", | |
type: "string" | |
}, { | |
tfName: "replace_global", | |
name: "replaceGlobal", | |
type: "bool" | |
}] | |
}, { | |
tfOpName: "StringNGrams", | |
category: "string", | |
inputs: [{ | |
start: 0, | |
name: "data", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "dataSplits", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "separator", | |
name: "separator", | |
type: "string" | |
}, { | |
tfName: "ngram_widths", | |
name: "nGramWidths", | |
type: "number[]" | |
}, { | |
tfName: "left_pad", | |
name: "leftPad", | |
type: "string" | |
}, { | |
tfName: "right_pad", | |
name: "rightPad", | |
type: "string" | |
}, { | |
tfName: "pad_width", | |
name: "padWidth", | |
type: "number" | |
}, { | |
tfName: "preserve_short_sequences", | |
name: "preserveShortSequences", | |
type: "bool" | |
}], | |
outputs: ["ngrams", "ngrams_splits"] | |
}, { | |
tfOpName: "StringSplit", | |
category: "string", | |
inputs: [{ | |
start: 0, | |
name: "input", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "delimiter", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "skip_empty", | |
name: "skipEmpty", | |
type: "bool" | |
}], | |
outputs: ["indices", "values", "shape"] | |
}, { | |
tfOpName: "StringToHashBucketFast", | |
category: "string", | |
inputs: [{ | |
start: 0, | |
name: "input", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "num_buckets", | |
name: "numBuckets", | |
type: "number" | |
}] | |
}]; | |
var AS = {}; | |
qe(AS, { | |
json: () => Z5 | |
}); | |
var Z5 = [{ | |
tfOpName: "Cast", | |
category: "transformation", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "SrcT", | |
name: "sdtype", | |
type: "dtype", | |
notSupported: true | |
}, { | |
tfName: "DstT", | |
name: "dtype", | |
type: "dtype" | |
}] | |
}, { | |
tfOpName: "ExpandDims", | |
category: "transformation", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "axis", | |
type: "number" | |
}] | |
}, { | |
tfOpName: "MirrorPad", | |
category: "transformation", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "padding", | |
type: "number[]" | |
}], | |
attrs: [{ | |
tfName: "mode", | |
name: "mode", | |
type: "string" | |
}] | |
}, { | |
tfOpName: "Pad", | |
category: "transformation", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "padding", | |
type: "number[]" | |
}], | |
attrs: [{ | |
tfName: "constant_value", | |
name: "constantValue", | |
type: "number", | |
defaultValue: 0 | |
}] | |
}, { | |
tfOpName: "PadV2", | |
category: "transformation", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "padding", | |
type: "number[]" | |
}, { | |
start: 2, | |
name: "constantValue", | |
type: "number", | |
defaultValue: 0 | |
}] | |
}, { | |
tfOpName: "Reshape", | |
category: "transformation", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "shape", | |
type: "number[]" | |
}] | |
}, { | |
tfOpName: "EnsureShape", | |
category: "transformation", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "shape", | |
type: "number[]" | |
}] | |
}, { | |
tfOpName: "Squeeze", | |
category: "transformation", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "axis", | |
tfDeprecatedName: "squeeze_dims", | |
name: "axis", | |
type: "number[]" | |
}] | |
}, { | |
tfOpName: "SpaceToBatchND", | |
category: "transformation", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "blockShape", | |
type: "number[]" | |
}, { | |
start: 2, | |
name: "paddings", | |
type: "number[]" | |
}] | |
}, { | |
tfOpName: "BatchToSpaceND", | |
category: "transformation", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "blockShape", | |
type: "number[]" | |
}, { | |
start: 2, | |
name: "crops", | |
type: "number[]" | |
}] | |
}, { | |
tfOpName: "DepthToSpace", | |
category: "transformation", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}], | |
attrs: [{ | |
tfName: "block_size", | |
name: "blockSize", | |
type: "number" | |
}, { | |
tfName: "data_format", | |
name: "dataFormat", | |
type: "string" | |
}] | |
}, { | |
tfOpName: "BroadcastTo", | |
category: "transformation", | |
inputs: [{ | |
start: 0, | |
name: "x", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "shape", | |
type: "number[]" | |
}], | |
attrs: [] | |
}, { | |
tfOpName: "BroadcastArgs", | |
category: "transformation", | |
inputs: [{ | |
start: 0, | |
name: "s0", | |
type: "tensor" | |
}, { | |
start: 1, | |
name: "s1", | |
type: "tensor" | |
}], | |
attrs: [] | |
}]; | |
var Ml = class { | |
static get Instance() { | |
return this._instance || (this._instance = new this()); | |
} | |
constructor() { | |
let t8 = [hS, gS, xS, yS, bS, CS, wS, SS, IS, vS, kS, NS, TS, _S, $S, ES, RS, DS, AS], | |
e = [].concat(...t8.map(o => o.json)); | |
this.opMappers = e.reduce((o, n) => (o[n.tfOpName] = n, o), {}); | |
} | |
transformGraph(t8, e = {}) { | |
let o = t8.node, | |
n = [], | |
s = [], | |
a = [], | |
i = o.reduce((h, g) => (h[g.name] = this.mapNode(g), g.op.startsWith("Placeholder") ? n.push(h[g.name]) : g.op === "Const" ? s.push(h[g.name]) : (g.input == null || g.input.length === 0) && a.push(h[g.name]), h), {}), | |
p = [], | |
u = [], | |
c = {}, | |
l = {}; | |
e != null && (c = this.mapSignatureEntries(e.inputs), l = this.mapSignatureEntries(e.outputs)); | |
let m = Object.keys(i); | |
m.forEach(h => { | |
let g = i[h]; | |
g.inputNames.forEach((x, b) => { | |
let [C,, S] = Ls(x), | |
k = i[C]; | |
if (k.outputs != null) { | |
let _ = k.outputs.indexOf(S); | |
if (_ !== -1) { | |
let E = `${C}:${_}`; | |
g.inputNames[b] = E; | |
} | |
} | |
g.inputs.push(k), k.children.push(g); | |
}); | |
}), Object.keys(l).length === 0 ? m.forEach(h => { | |
let g = i[h]; | |
g.children.length === 0 && u.push(g); | |
}) : Object.keys(l).forEach(h => { | |
let [g] = Ls(h), | |
x = i[g]; | |
x != null && (x.signatureKey = l[h], u.push(x)); | |
}), Object.keys(c).length > 0 ? Object.keys(c).forEach(h => { | |
let [g] = Ls(h), | |
x = i[g]; | |
x && (x.signatureKey = c[h], p.push(x)); | |
}) : p = n; | |
let d = {}; | |
t8.library != null && t8.library.function != null && (d = t8.library.function.reduce((h, g) => (h[g.signature.name] = this.mapFunction(g), h), {})); | |
let f = { | |
nodes: i, | |
inputs: p, | |
outputs: u, | |
weights: s, | |
placeholders: n, | |
signature: e, | |
functions: d | |
}; | |
return a.length > 0 && (f.initNodes = a), f; | |
} | |
mapSignatureEntries(t8) { | |
return Object.keys(t8 || {}).reduce((e, o) => (e[t8[o].name] = o, e), {}); | |
} | |
mapNode(t8) { | |
let e = pf(t8.op) || this.opMappers[t8.op] || {}; | |
t8.attr == null && (t8.attr = {}); | |
let o = { | |
name: t8.name, | |
op: t8.op, | |
category: e.category, | |
inputNames: (t8.input || []).map(n => n.startsWith("^") ? n.slice(1) : n), | |
inputs: [], | |
children: [], | |
inputParams: {}, | |
attrParams: {}, | |
rawAttrs: t8.attr, | |
outputs: e.outputs | |
}; | |
return e.inputs != null && (o.inputParams = e.inputs.reduce((n, s) => (n[s.name] = { | |
type: s.type, | |
inputIndexStart: s.start, | |
inputIndexEnd: s.end | |
}, n), {})), e.attrs != null && (o.attrParams = e.attrs.reduce((n, s) => { | |
let a = s.type, | |
i; | |
switch (s.type) { | |
case "string": | |
i = lf(t8.attr, s.tfName, s.defaultValue), i === void 0 && s.tfDeprecatedName && (i = lf(t8.attr, s.tfDeprecatedName, s.defaultValue)); | |
break; | |
case "string[]": | |
i = yf(t8.attr, s.tfName, s.defaultValue), i === void 0 && s.tfDeprecatedName && (i = yf(t8.attr, s.tfDeprecatedName, s.defaultValue)); | |
break; | |
case "number": | |
i = df(t8.attr, s.tfName, s.defaultValue || 0), i === void 0 && s.tfDeprecatedName && (i = df(t8.attr, s.tfDeprecatedName, s.defaultValue)); | |
break; | |
case "number[]": | |
i = xf(t8.attr, s.tfName, s.defaultValue), i === void 0 && s.tfDeprecatedName && (i = xf(t8.attr, s.tfDeprecatedName, s.defaultValue)); | |
break; | |
case "bool": | |
i = mf(t8.attr, s.tfName, s.defaultValue), i === void 0 && s.tfDeprecatedName && (i = mf(t8.attr, s.tfDeprecatedName, s.defaultValue)); | |
break; | |
case "bool[]": | |
i = Cf(t8.attr, s.tfName, s.defaultValue), i === void 0 && s.tfDeprecatedName && (i = Cf(t8.attr, s.tfDeprecatedName, s.defaultValue)); | |
break; | |
case "shape": | |
i = gf(t8.attr, s.tfName, s.defaultValue), i === void 0 && s.tfDeprecatedName && (i = gf(t8.attr, s.tfDeprecatedName, s.defaultValue)); | |
break; | |
case "shape[]": | |
i = bf(t8.attr, s.tfName, s.defaultValue), i === void 0 && s.tfDeprecatedName && (i = bf(t8.attr, s.tfDeprecatedName, s.defaultValue)); | |
break; | |
case "dtype": | |
i = ff(t8.attr, s.tfName, s.defaultValue), i === void 0 && s.tfDeprecatedName && (i = ff(t8.attr, s.tfDeprecatedName, s.defaultValue)); | |
break; | |
case "dtype[]": | |
i = hf(t8.attr, s.tfName, s.defaultValue), i === void 0 && s.tfDeprecatedName && (i = hf(t8.attr, s.tfDeprecatedName, s.defaultValue)); | |
break; | |
case "func": | |
i = xT(t8.attr, s.tfName, s.defaultValue), i === void 0 && s.tfDeprecatedName && (i = xT(t8.attr, s.tfDeprecatedName, s.defaultValue)); | |
break; | |
case "tensor": | |
case "tensors": | |
break; | |
default: | |
throw new Error(`Unsupported param type: ${s.type} for op: ${t8.op}`); | |
} | |
return n[s.name] = { | |
value: i, | |
type: a | |
}, n; | |
}, {})), o; | |
} | |
mapFunction(t8) { | |
let e = t8.nodeDef, | |
o = [], | |
n = [], | |
s = {}; | |
e != null && (s = e.reduce((l, m) => (l[m.name] = this.mapNode(m), m.op === "Const" && n.push(l[m.name]), l), {})); | |
let a = [], | |
i = []; | |
t8.signature.inputArg.forEach(l => { | |
let [m] = Ls(l.name), | |
d = { | |
name: m, | |
op: "Placeholder", | |
inputs: [], | |
inputNames: [], | |
category: "graph", | |
inputParams: {}, | |
attrParams: { | |
dtype: { | |
value: FS(l.type), | |
type: "dtype" | |
} | |
}, | |
children: [] | |
}; | |
d.signatureKey = l.name, a.push(d), s[m] = d; | |
}), Object.keys(s).forEach(l => { | |
let m = s[l]; | |
m.inputNames.forEach((d, f) => { | |
let [h,, g] = Ls(d), | |
x = s[h]; | |
if (x.outputs != null) { | |
let b = x.outputs.indexOf(g); | |
if (b !== -1) { | |
let C = `${h}:${b}`; | |
m.inputNames[f] = C; | |
} | |
} | |
m.inputs.push(x), x.children.push(m); | |
}); | |
}); | |
let u = t8.ret; | |
t8.signature.outputArg.forEach(l => { | |
let [m, d] = Ls(u[l.name]), | |
f = s[m]; | |
f != null && (f.defaultOutput = d, i.push(f)); | |
}); | |
let c = this.mapArgsToSignature(t8); | |
return { | |
nodes: s, | |
inputs: a, | |
outputs: i, | |
weights: n, | |
placeholders: o, | |
signature: c | |
}; | |
} | |
mapArgsToSignature(t8) { | |
return { | |
methodName: t8.signature.name, | |
inputs: t8.signature.inputArg.reduce((e, o) => (e[o.name] = this.mapArgToTensorInfo(o), e), {}), | |
outputs: t8.signature.outputArg.reduce((e, o) => (e[o.name] = this.mapArgToTensorInfo(o, t8.ret), e), {}) | |
}; | |
} | |
mapArgToTensorInfo(t8, e) { | |
let o = t8.name; | |
return e != null && (o = e[o]), { | |
name: o, | |
dtype: t8.type | |
}; | |
} | |
}; | |
function J5(r) { | |
let t8 = A().global; | |
if (typeof t8.atob != "undefined") return t8.atob(r); | |
if (typeof Buffer != "undefined") return new Buffer(r, "base64").toString(); | |
throw new Error("Unable to decode base64 in this environment. Missing built-in atob() or Buffer()"); | |
} | |
function yT(r, t8) { | |
let e = Array.isArray(r) ? String.fromCharCode.apply(null, r) : J5(r); | |
return t8 ? e : e.toLowerCase(); | |
} | |
function lf(r, t8, e, o = false) { | |
let n = r[t8]; | |
return n != null ? yT(n.s, o) : e; | |
} | |
function mf(r, t8, e) { | |
let o = r[t8]; | |
return o ? o.b : e; | |
} | |
function df(r, t8, e) { | |
let o = r[t8] || {}, | |
n = o.i != null ? o.i : o.f != null ? o.f : e; | |
return typeof n == "number" ? n : parseInt(n, 10); | |
} | |
function FS(r) { | |
switch (typeof r == "string" && (r = ho[r]), r) { | |
case ho.DT_FLOAT: | |
case ho.DT_HALF: | |
return "float32"; | |
case ho.DT_INT32: | |
case ho.DT_INT64: | |
case ho.DT_INT8: | |
case ho.DT_UINT8: | |
return "int32"; | |
case ho.DT_BOOL: | |
return "bool"; | |
case ho.DT_DOUBLE: | |
return "float32"; | |
case ho.DT_STRING: | |
return "string"; | |
default: | |
return null; | |
} | |
} | |
function xT(r, t8, e) { | |
let o = r[t8]; | |
return o && o.func ? o.func.name : e; | |
} | |
function ff(r, t8, e) { | |
let o = r[t8]; | |
return o && o.type ? FS(o.type) : e; | |
} | |
function hf(r, t8, e) { | |
let o = r[t8]; | |
return o && o.list && o.list.type ? o.list.type.map(n => FS(n)) : e; | |
} | |
function bT(r) { | |
if (!r.unknownRank) return r.dim != null ? r.dim.map(t8 => typeof t8.size == "number" ? t8.size : parseInt(t8.size, 10)) : []; | |
} | |
function gf(r, t8, e) { | |
let o = r[t8]; | |
return o && o.shape ? bT(o.shape) : e; | |
} | |
function xf(r, t8, e) { | |
let o = r[t8]; | |
return o ? ((o.list.f && o.list.f.length ? o.list.f : o.list.i) || []).map(n => typeof n == "number" ? n : parseInt(n, 10)) : e; | |
} | |
function yf(r, t8, e, o = false) { | |
let n = r[t8]; | |
return n && n.list && n.list.s ? n.list.s.map(s => yT(s, o)) : e; | |
} | |
function bf(r, t8, e) { | |
let o = r[t8]; | |
return o && o.list && o.list.shape ? o.list.shape.map(n => bT(n)) : e; | |
} | |
function Cf(r, t8, e) { | |
let o = r[t8]; | |
return o && o.list && o.list.b ? o.list.b : e; | |
} | |
var wf = class { | |
constructor(t8, e, o) { | |
this.node = t8, this.tensorMap = e, this.context = o, this.inputs = [], this.attrs = {}, this.inputs = t8.inputNames.map(n => this.getInput(n)), t8.rawAttrs != null && (this.attrs = Object.keys(t8.rawAttrs).reduce((n, s) => (n[s] = this.getAttr(s), n), {})); | |
} | |
getInput(t8) { | |
return zt(t8, this.tensorMap, this.context); | |
} | |
getAttr(t8, e) { | |
let o = this.node.rawAttrs[t8]; | |
if (o.tensor != null) return zt(t8, this.tensorMap, this.context); | |
if (o.i != null || o.f != null) return df(this.node.rawAttrs, t8, e); | |
if (o.s != null) return lf(this.node.rawAttrs, t8, e); | |
if (o.b != null) return mf(this.node.rawAttrs, t8, e); | |
if (o.shape != null) return gf(this.node.rawAttrs, t8, e); | |
if (o.type != null) return ff(this.node.rawAttrs, t8, e); | |
if (o.list != null) { | |
if (o.list.i != null || o.list.f != null) return xf(this.node.rawAttrs, t8, e); | |
if (o.list.s != null) return yf(this.node.rawAttrs, t8, e); | |
if (o.list.shape != null) return bf(this.node.rawAttrs, t8, e); | |
if (o.list.b != null) return Cf(this.node.rawAttrs, t8, e); | |
if (o.list.type != null) return hf(this.node.rawAttrs, t8, e); | |
} | |
return e; | |
} | |
}; | |
var Je = {}; | |
qe(Je, { | |
OP_SCOPE_SUFFIX: () => kw, | |
abs: () => Jt, | |
acos: () => kk, | |
acosh: () => Nk, | |
add: () => Ce, | |
addN: () => Tk, | |
all: () => _k, | |
any: () => $k, | |
argMax: () => Ek, | |
argMin: () => Rk, | |
asin: () => Dk, | |
asinh: () => Ak, | |
atan: () => Fk, | |
atan2: () => Pk, | |
atanh: () => Ok, | |
avgPool: () => fd, | |
avgPool3d: () => Bk, | |
basicLSTMCell: () => zk, | |
batchNorm: () => au, | |
batchNorm2d: () => Wk, | |
batchNorm3d: () => Uk, | |
batchNorm4d: () => Gk, | |
batchToSpaceND: () => hd, | |
bincount: () => gd, | |
bitwiseAnd: () => Hk, | |
booleanMaskAsync: () => E6, | |
broadcastArgs: () => Kk, | |
broadcastTo: () => iu, | |
buffer: () => me, | |
cast: () => We, | |
ceil: () => qk, | |
clipByValue: () => jk, | |
clone: () => Ur, | |
complex: () => Er, | |
concat: () => bt, | |
concat1d: () => Xk, | |
concat2d: () => Yk, | |
concat3d: () => Qk, | |
concat4d: () => Zk, | |
conv1d: () => Jk, | |
conv2d: () => uu, | |
conv2dTranspose: () => e2, | |
conv3d: () => t2, | |
conv3dTranspose: () => o2, | |
cos: () => n2, | |
cosh: () => s2, | |
cosineWindow: () => Rl, | |
cumprod: () => a2, | |
cumsum: () => i2, | |
denseBincount: () => u2, | |
depthToSpace: () => p2, | |
depthwiseConv2d: () => lc, | |
diag: () => c2, | |
dilation2d: () => l2, | |
div: () => je, | |
divNoNan: () => d2, | |
dot: () => f2, | |
dropout: () => W6, | |
einsum: () => pu, | |
elu: () => Cd, | |
enclosingPowerOfTwo: () => Qw, | |
ensureShape: () => h2, | |
equal: () => bd, | |
erf: () => g2, | |
euclideanNorm: () => b2, | |
exp: () => $o, | |
expandDims: () => Ms, | |
expm1: () => C2, | |
eye: () => wd, | |
fft: () => fc, | |
fill: () => Ea, | |
floor: () => Sd, | |
floorDiv: () => dd, | |
fused: () => Zw, | |
gather: () => Id, | |
gatherND: () => z6, | |
greater: () => qu, | |
greaterEqual: () => vd, | |
ifft: () => Ju, | |
imag: () => lu, | |
image: () => Kj, | |
inTopKAsync: () => G6, | |
irfft: () => Kd, | |
isFinite: () => w2, | |
isInf: () => S2, | |
isNaN: () => I2, | |
leakyRelu: () => kd, | |
less: () => _l, | |
lessEqual: () => mc, | |
linalg: () => qj, | |
linspace: () => v2, | |
localResponseNormalization: () => k2, | |
log: () => pi, | |
log1p: () => Nd, | |
logSigmoid: () => N2, | |
logSoftmax: () => T2, | |
logSumExp: () => $d, | |
logicalAnd: () => ju, | |
logicalNot: () => Ed, | |
logicalOr: () => Rd, | |
logicalXor: () => _2, | |
losses: () => jj, | |
lowerBound: () => $2, | |
matMul: () => Ze, | |
max: () => Ra, | |
maxPool: () => Ad, | |
maxPool3d: () => E2, | |
maxPoolWithArgmax: () => R2, | |
maximum: () => Fd, | |
mean: () => Xu, | |
meshgrid: () => D2, | |
min: () => Tl, | |
minimum: () => Yu, | |
mirrorPad: () => A2, | |
mod: () => F2, | |
moments: () => P2, | |
movingAverage: () => A6, | |
mul: () => se, | |
multiRNNCell: () => O2, | |
multinomial: () => M2, | |
neg: () => cr, | |
norm: () => Ku, | |
notEqual: () => Pd, | |
oneHot: () => El, | |
ones: () => Da, | |
onesLike: () => L2, | |
op: () => N, | |
outerProduct: () => B2, | |
pad: () => Aa, | |
pad1d: () => z2, | |
pad2d: () => V2, | |
pad3d: () => W2, | |
pad4d: () => U2, | |
pool: () => G2, | |
pow: () => ui, | |
prelu: () => Md, | |
print: () => md, | |
prod: () => H2, | |
raggedGather: () => K2, | |
raggedRange: () => q2, | |
raggedTensorToTensor: () => j2, | |
rand: () => X2, | |
randomGamma: () => g1, | |
randomNormal: () => Ud, | |
randomStandardNormal: () => x1, | |
randomUniform: () => dc, | |
randomUniformInt: () => y1, | |
range: () => mu, | |
real: () => ci, | |
reciprocal: () => b1, | |
relu: () => du, | |
relu6: () => Gd, | |
reshape: () => W, | |
reverse: () => mo, | |
reverse1d: () => C1, | |
reverse2d: () => w1, | |
reverse3d: () => S1, | |
reverse4d: () => I1, | |
rfft: () => hc, | |
round: () => Hd, | |
rsqrt: () => v1, | |
scalar: () => ke, | |
scatterND: () => P6, | |
searchSorted: () => $l, | |
selu: () => k1, | |
separableConv2d: () => N1, | |
setdiff1dAsync: () => T1, | |
sigmoid: () => $a, | |
sign: () => _1, | |
signal: () => Hj, | |
sin: () => $1, | |
sinh: () => E1, | |
slice: () => Xe, | |
slice1d: () => R1, | |
slice2d: () => D1, | |
slice3d: () => A1, | |
slice4d: () => F1, | |
softmax: () => P1, | |
softplus: () => _d, | |
spaceToBatchND: () => Od, | |
sparse: () => Xj, | |
sparseToDense: () => L6, | |
spectral: () => Gj, | |
split: () => li, | |
sqrt: () => Dr, | |
square: () => er, | |
squaredDifference: () => qd, | |
squeeze: () => gc, | |
stack: () => kr, | |
step: () => jd, | |
stridedSlice: () => O1, | |
string: () => Yj, | |
sub: () => Te, | |
sum: () => ot, | |
tan: () => M1, | |
tanh: () => Nl, | |
tensor: () => ur, | |
tensor1d: () => tr, | |
tensor2d: () => fu, | |
tensor3d: () => Xd, | |
tensor4d: () => L1, | |
tensor5d: () => B1, | |
tensor6d: () => z1, | |
tensorScatterUpdate: () => W1, | |
tile: () => cu, | |
topk: () => U1, | |
transpose: () => yc, | |
truncatedNormal: () => G1, | |
unique: () => H1, | |
unsortedSegmentSum: () => K1, | |
unstack: () => fo, | |
upperBound: () => q1, | |
variable: () => j1, | |
where: () => lo, | |
whereAsync: () => Qd, | |
zeros: () => Gr, | |
zerosLike: () => Ht | |
}); | |
var CT = (r, t8, e, o = Je) => { | |
switch (r.op) { | |
case "BiasAdd": | |
case "AddV2": | |
case "Add": | |
return [o.add(I("a", r, t8, e), I("b", r, t8, e))]; | |
case "AddN": | |
return [o.addN(I("tensors", r, t8, e))]; | |
case "FloorMod": | |
case "Mod": | |
return [o.mod(I("a", r, t8, e), I("b", r, t8, e))]; | |
case "Mul": | |
return [o.mul(I("a", r, t8, e), I("b", r, t8, e))]; | |
case "RealDiv": | |
case "Div": | |
return [o.div(I("a", r, t8, e), I("b", r, t8, e))]; | |
case "DivNoNan": | |
return [o.divNoNan(I("a", r, t8, e), I("b", r, t8, e))]; | |
case "FloorDiv": | |
return [o.floorDiv(I("a", r, t8, e), I("b", r, t8, e))]; | |
case "Sub": | |
return [o.sub(I("a", r, t8, e), I("b", r, t8, e))]; | |
case "Minimum": | |
return [o.minimum(I("a", r, t8, e), I("b", r, t8, e))]; | |
case "Maximum": | |
return [o.maximum(I("a", r, t8, e), I("b", r, t8, e))]; | |
case "Pow": | |
return [o.pow(I("a", r, t8, e), I("b", r, t8, e))]; | |
case "SquaredDifference": | |
return [o.squaredDifference(I("a", r, t8, e), I("b", r, t8, e))]; | |
default: | |
throw TypeError(`Node type ${r.op} is not implemented`); | |
} | |
}; | |
var wT = (r, t8, e, o = Je) => { | |
switch (r.op) { | |
case "Abs": | |
case "ComplexAbs": | |
return [o.abs(I("x", r, t8, e))]; | |
case "Acos": | |
return [o.acos(I("x", r, t8, e))]; | |
case "Acosh": | |
return [o.acosh(I("x", r, t8, e))]; | |
case "Asin": | |
return [o.asin(I("x", r, t8, e))]; | |
case "Asinh": | |
return [o.asinh(I("x", r, t8, e))]; | |
case "Atan": | |
return [o.atan(I("x", r, t8, e))]; | |
case "Atan2": | |
return [o.atan2(I("x", r, t8, e), I("y", r, t8, e))]; | |
case "Atanh": | |
return [o.atanh(I("x", r, t8, e))]; | |
case "Ceil": | |
return [o.ceil(I("x", r, t8, e))]; | |
case "Complex": | |
return [o.complex(I("real", r, t8, e), I("imag", r, t8, e))]; | |
case "Cos": | |
return [o.cos(I("x", r, t8, e))]; | |
case "Cosh": | |
return [o.cosh(I("x", r, t8, e))]; | |
case "Elu": | |
return [o.elu(I("x", r, t8, e))]; | |
case "Erf": | |
return [o.erf(I("x", r, t8, e))]; | |
case "Exp": | |
return [o.exp(I("x", r, t8, e))]; | |
case "Expm1": | |
return [o.expm1(I("x", r, t8, e))]; | |
case "Floor": | |
return [o.floor(I("x", r, t8, e))]; | |
case "Log": | |
return [o.log(I("x", r, t8, e))]; | |
case "Log1p": | |
return [o.log1p(I("x", r, t8, e))]; | |
case "Imag": | |
return [o.imag(I("x", r, t8, e))]; | |
case "Neg": | |
return [o.neg(I("x", r, t8, e))]; | |
case "Reciprocal": | |
return [o.reciprocal(I("x", r, t8, e))]; | |
case "Real": | |
return [o.real(I("x", r, t8, e))]; | |
case "Relu": | |
return [o.relu(I("x", r, t8, e))]; | |
case "Round": | |
return [o.round(I("x", r, t8, e))]; | |
case "Selu": | |
return [o.selu(I("x", r, t8, e))]; | |
case "Sigmoid": | |
return [o.sigmoid(I("x", r, t8, e))]; | |
case "Sin": | |
return [o.sin(I("x", r, t8, e))]; | |
case "Sign": | |
return [o.sign(I("x", r, t8, e))]; | |
case "Sinh": | |
return [o.sinh(I("x", r, t8, e))]; | |
case "Softplus": | |
return [o.softplus(I("x", r, t8, e))]; | |
case "Sqrt": | |
return [o.sqrt(I("x", r, t8, e))]; | |
case "Square": | |
return [o.square(I("x", r, t8, e))]; | |
case "Tanh": | |
return [o.tanh(I("x", r, t8, e))]; | |
case "Tan": | |
return [o.tan(I("x", r, t8, e))]; | |
case "ClipByValue": | |
return [o.clipByValue(I("x", r, t8, e), I("clipValueMin", r, t8, e), I("clipValueMax", r, t8, e))]; | |
case "Relu6": | |
return [o.relu6(I("x", r, t8, e))]; | |
case "Rsqrt": | |
return [o.rsqrt(zt(r.inputNames[0], t8, e))]; | |
case "LeakyRelu": | |
return [o.leakyRelu(I("x", r, t8, e), I("alpha", r, t8, e))]; | |
case "Prelu": | |
return [o.prelu(I("x", r, t8, e), I("alpha", r, t8, e))]; | |
case "IsNan": | |
return [o.isNaN(zt(r.inputNames[0], t8, e))]; | |
case "IsInf": | |
return [o.isInf(zt(r.inputNames[0], t8, e))]; | |
case "IsFinite": | |
return [o.isFinite(zt(r.inputNames[0], t8, e))]; | |
default: | |
throw TypeError(`Node type ${r.op} is not implemented`); | |
} | |
}; | |
function Hr(r, t8, e = "") { | |
if (!(typeof r == "number" || typeof t8 == "number")) { | |
y.assert(r.length === t8.length, () => e + ` Shapes ${r} and ${t8} must match`); | |
for (let o = 0; o < r.length; o++) { | |
let n = r[o], | |
s = t8[o]; | |
y.assert(n < 0 || s < 0 || n === s, () => e + ` Shapes ${r} and ${t8} must match`); | |
} | |
} | |
} | |
function ST(r) { | |
return !(typeof r == "number" || r.some(t8 => t8 < 0)); | |
} | |
function bc(r, t8, e) { | |
let o = Sf(r, e), | |
n = !ST(o); | |
if (n && t8.length === 0) throw new Error(`Tried to calculate elements of an empty list with non-fully-defined elementShape: ${o}`); | |
if (n && t8.forEach(s => { | |
o = Sf(s.shape, o); | |
}), !ST(o)) throw new Error(`Non-fully-defined elementShape: ${o}`); | |
return o; | |
} | |
function Sf(r, t8) { | |
if (typeof r == "number") return t8; | |
if (typeof t8 == "number") return r; | |
if (r.length !== t8.length) throw new Error(`Incompatible ranks during merge: ${r} vs. ${t8}`); | |
let e = []; | |
for (let o = 0; o < r.length; ++o) { | |
let n = r[o], | |
s = t8[o]; | |
if (n >= 0 && s >= 0 && n !== s) throw new Error(`Incompatible shape during merge: ${r} vs. ${t8}`); | |
e[o] = n >= 0 ? n : s; | |
} | |
return e; | |
} | |
var If = class { | |
constructor(t8, e, o, n, s, a, i) { | |
this.name = t8, this.dtype = e, this.maxSize = o, this.elementShape = n, this.identicalElementShapes = s, this.dynamicSize = a, this.clearAfterRead = i, this.tensors = [], this.closed_ = false, this.idTensor = ke(0), Rr(this.idTensor); | |
} | |
get id() { | |
return this.idTensor.id; | |
} | |
get closed() { | |
return this.closed_; | |
} | |
clearAndClose(t8) { | |
this.tensors.forEach(e => { | |
(t8 == null || !t8.has(e.tensor.id)) && e.tensor.dispose(); | |
}), this.tensors = [], this.closed_ = true, this.idTensor.dispose(); | |
} | |
size() { | |
return this.tensors.length; | |
} | |
read(t8) { | |
if (this.closed_) throw new Error(`TensorArray ${this.name} has already been closed.`); | |
if (t8 < 0 || t8 >= this.size()) throw new Error(`Tried to read from index ${t8}, but array size is: ${this.size()}`); | |
let e = this.tensors[t8]; | |
if (e.cleared) throw new Error(`TensorArray ${this.name}: Could not read index ${t8} twice because it was cleared after a previous read (perhaps try setting clear_after_read = false?).`); | |
return this.clearAfterRead && (e.cleared = true), e.read = true, e.tensor; | |
} | |
readMany(t8) { | |
return t8.map(e => this.read(e)); | |
} | |
write(t8, e) { | |
if (this.closed_) throw new Error(`TensorArray ${this.name} has already been closed.`); | |
if (t8 < 0 || !this.dynamicSize && t8 >= this.maxSize) throw new Error(`Tried to write to index ${t8}, but array is not resizeable and size is: ${this.maxSize}`); | |
let o = this.tensors[t8] || {}; | |
if (e.dtype !== this.dtype) throw new Error(`TensorArray ${this.name}: Could not write to TensorArray index ${t8}, | |
because the value dtype is ${e.dtype}, but TensorArray dtype is ${this.dtype}.`); | |
if (this.size() === 0 && (this.elementShape == null || this.elementShape.length === 0) && (this.elementShape = e.shape), Hr(this.elementShape, e.shape, `TensorArray ${this.name}: Could not write to TensorArray index ${t8}.`), o.read) throw new Error(`TensorArray ${this.name}: Could not write to TensorArray index ${t8}, because it has already been read.`); | |
if (o.written) throw new Error(`TensorArray ${this.name}: Could not write to TensorArray index ${t8}, because it has already been written.`); | |
o.tensor = e, Rr(e), o.written = true, this.tensors[t8] = o; | |
} | |
writeMany(t8, e) { | |
if (t8.length !== e.length) throw new Error(`TensorArray ${this.name}: could not write multiple tensors,because the index size: ${t8.length} is not the same as tensors size: ${e.length}.`); | |
t8.forEach((o, n) => this.write(o, e[n])); | |
} | |
gather(t8, e) { | |
if (e && e !== this.dtype) throw new Error(`TensorArray dtype is ${this.dtype} but gather requested dtype ${e}`); | |
if (t8) t8 = t8.slice(0, this.size());else { | |
t8 = []; | |
for (let n = 0; n < this.size(); n++) t8.push(n); | |
} | |
if (t8.length === 0) return ur([], [0].concat(this.elementShape)); | |
let o = this.readMany(t8); | |
return Hr(this.elementShape, o[0].shape, "TensorArray shape mismatch: "), kr(o, 0); | |
} | |
concat(t8) { | |
if (t8 && t8 !== this.dtype) throw new Error(`TensorArray dtype is ${this.dtype} but concat requested dtype ${t8}`); | |
if (this.size() === 0) return ur([], [0].concat(this.elementShape)); | |
let e = []; | |
for (let n = 0; n < this.size(); n++) e.push(n); | |
let o = this.readMany(e); | |
return Hr(this.elementShape, o[0].shape, `TensorArray shape mismatch: tensor array shape (${this.elementShape}) vs first tensor shape (${o[0].shape})`), bt(o, 0); | |
} | |
scatter(t8, e) { | |
if (e.dtype !== this.dtype) throw new Error(`TensorArray dtype is ${this.dtype} but tensor has dtype ${e.dtype}`); | |
if (t8.length !== e.shape[0]) throw new Error(`Expected len(indices) == tensor.shape[0], but saw: ${t8.length} vs. ${e.shape[0]}`); | |
let o = Math.max(...t8); | |
if (!this.dynamicSize && o >= this.maxSize) throw new Error(`Max index must be < array size (${o} vs. ${this.maxSize})`); | |
this.writeMany(t8, fo(e, 0)); | |
} | |
split(t8, e) { | |
if (e.dtype !== this.dtype) throw new Error(`TensorArray dtype is ${this.dtype} but tensor has dtype ${e.dtype}`); | |
let o = 0, | |
n = t8.map(p => (o += p, o)); | |
if (o !== e.shape[0]) throw new Error(`Expected sum of lengths to be equal to | |
tensor.shape[0], but sum of lengths is | |
${o}, and tensor's shape is: ${e.shape}`); | |
if (!this.dynamicSize && t8.length !== this.maxSize) throw new Error(`TensorArray's size is not equal to the size of lengths (${this.maxSize} vs. ${t8.length}), and the TensorArray is not marked as dynamically resizeable`); | |
let s = o === 0 ? 0 : e.size / o, | |
a = []; | |
De(() => { | |
e = W(e, [1, o, s]); | |
for (let p = 0; p < t8.length; ++p) { | |
let c = [0, p === 0 ? 0 : n[p - 1], 0], | |
l = [1, t8[p], s]; | |
a[p] = W(Xe(e, c, l), this.elementShape); | |
} | |
return a; | |
}); | |
let i = []; | |
for (let p = 0; p < t8.length; p++) i[p] = p; | |
this.writeMany(i, a); | |
} | |
}; | |
var hi = class { | |
get id() { | |
return this.idTensor.id; | |
} | |
constructor(t8, e, o, n = -1) { | |
this.tensors = t8, this.elementShape = e, this.elementDtype = o, t8 != null && t8.forEach(s => { | |
if (o !== s.dtype) throw new Error(`Invalid data types; op elements ${o}, but list elements ${s.dtype}`); | |
Hr(e, s.shape, "TensorList shape mismatch: "), Rr(s); | |
}), this.idTensor = ke(0), this.maxNumElements = n, Rr(this.idTensor); | |
} | |
copy() { | |
return new hi([...this.tensors], this.elementShape, this.elementDtype); | |
} | |
clearAndClose(t8) { | |
this.tensors.forEach(e => { | |
(t8 == null || !t8.has(e.id)) && e.dispose(); | |
}), this.tensors.length = 0, this.idTensor.dispose(); | |
} | |
size() { | |
return this.tensors.length; | |
} | |
stack(t8, e, o = -1) { | |
if (e !== this.elementDtype) throw new Error(`Invalid data types; op elements ${e}, but list elements ${this.elementDtype}`); | |
if (o !== -1 && this.tensors.length !== o) throw new Error(`Operation expected a list with ${o} elements but got a list with ${this.tensors.length} elements.`); | |
Hr(t8, this.elementShape, "TensorList shape mismatch: "); | |
let n = bc(this.elementShape, this.tensors, t8); | |
return De(() => { | |
let s = this.tensors.map(a => W(a, n)); | |
return kr(s, 0); | |
}); | |
} | |
popBack(t8, e) { | |
if (e !== this.elementDtype) throw new Error(`Invalid data types; op elements ${e}, but list elements ${this.elementDtype}`); | |
if (this.size() === 0) throw new Error("Trying to pop from an empty list."); | |
let o = bc(this.elementShape, this.tensors, t8), | |
n = this.tensors.pop(); | |
return n.kept = false, Hr(n.shape, t8, "TensorList shape mismatch: "), W(n, o); | |
} | |
pushBack(t8) { | |
if (t8.dtype !== this.elementDtype) throw new Error(`Invalid data types; op elements ${t8.dtype}, but list elements ${this.elementDtype}`); | |
if (Hr(t8.shape, this.elementShape, "TensorList shape mismatch: "), this.maxNumElements === this.size()) throw new Error("Trying to push element into a full list."); | |
Rr(t8), this.tensors.push(t8); | |
} | |
resize(t8) { | |
if (t8 < 0) throw new Error(`TensorListResize expects size to be non-negative. Got: ${t8}`); | |
if (this.maxNumElements !== -1 && t8 > this.maxNumElements) throw new Error(`TensorListResize input size ${t8} is greater maxNumElement ${this.maxNumElements}.`); | |
let e = new hi([], this.elementShape, this.elementDtype, this.maxNumElements); | |
e.tensors.length = t8; | |
for (let o = 0; o < Math.min(this.tensors.length, t8); ++o) e.tensors[o] = this.tensors[o]; | |
return e; | |
} | |
getItem(t8, e, o) { | |
if (o !== this.elementDtype) throw new Error(`Invalid data types; op elements ${o}, but list elements ${this.elementDtype}`); | |
if (t8 < 0 || t8 > this.tensors.length) throw new Error(`Trying to access element ${t8} in a list with ${this.tensors.length} elements.`); | |
if (this.tensors[t8] == null) throw new Error(`element at index ${t8} is null.`); | |
Hr(this.tensors[t8].shape, e, "TensorList shape mismatch: "); | |
let n = bc(this.elementShape, this.tensors, e); | |
return W(this.tensors[t8], n); | |
} | |
setItem(t8, e) { | |
if (e.dtype !== this.elementDtype) throw new Error(`Invalid data types; op elements ${e.dtype}, but list elements ${this.elementDtype}`); | |
if (t8 < 0 || this.maxNumElements !== -1 && t8 >= this.maxNumElements) throw new Error(`Trying to set element ${t8} in a list with max ${this.maxNumElements} elements.`); | |
Hr(this.elementShape, e.shape, "TensorList shape mismatch: "), Rr(e), this.tensors[t8] != null && (this.tensors[t8].kept = false), this.tensors[t8] = e; | |
} | |
gather(t8, e, o) { | |
if (e !== this.elementDtype) throw new Error(`Invalid data types; op elements ${e}, but list elements ${this.elementDtype}`); | |
Hr(this.elementShape, o, "TensorList shape mismatch: "), t8 = t8.slice(0, this.size()); | |
let n = bc(this.elementShape, this.tensors, o); | |
return t8.length === 0 ? ur([], [0].concat(n)) : De(() => { | |
let s = t8.map(a => W(this.tensors[a], n)); | |
return kr(s, 0); | |
}); | |
} | |
concat(t8, e) { | |
if (t8 && t8 !== this.elementDtype) throw new Error(`TensorList dtype is ${this.elementDtype} but concat requested dtype ${t8}`); | |
Hr(this.elementShape, e, "TensorList shape mismatch: "); | |
let o = bc(this.elementShape, this.tensors, e); | |
return this.size() === 0 ? ur([], [0].concat(o)) : De(() => { | |
let n = this.tensors.map(s => W(s, o)); | |
return bt(n, 0); | |
}); | |
} | |
}; | |
function IT(r, t8, e) { | |
let o = r.dtype; | |
if (r.shape.length < 1) throw new Error(`Tensor must be at least a vector, but saw shape: ${r.shape}`); | |
if (r.dtype !== e) throw new Error(`Invalid data types; op elements ${r.dtype}, but list elements ${e}`); | |
let n = r.shape.slice(1); | |
Hr(n, t8, "TensorList shape mismatch: "); | |
let s = fo(r); | |
return new hi(s, t8, o); | |
} | |
function vT(r, t8, e, o) { | |
return new hi([], r, t8, o); | |
} | |
function kT(r, t8, e, o) { | |
if (t8.length !== r.shape[0]) throw new Error(`Expected len(indices) == tensor.shape[0], but saw: ${t8.length} vs. ${r.shape[0]}`); | |
let n = Math.max(...t8); | |
if (o != null && o !== -1 && n >= o) throw new Error(`Max index must be < array size (${n} vs. ${o})`); | |
let s = new hi([], e, r.dtype, o), | |
a = fo(r, 0); | |
return t8.forEach((i, p) => { | |
s.setItem(i, a[p]); | |
}), s; | |
} | |
function NT(r, t8, e) { | |
let o = 0, | |
n = t8.map(c => (o += c, o)); | |
if (o !== r.shape[0]) throw new Error(`Expected sum of lengths to be equal to | |
tensor.shape[0], but sum of lengths is | |
${o}, and tensor's shape is: ${r.shape}`); | |
let s = r.shape.slice(1), | |
a = Sf(s, e), | |
i = o === 0 ? 0 : r.size / o, | |
p = De(() => { | |
let c = []; | |
r = W(r, [1, o, i]); | |
for (let l = 0; l < t8.length; ++l) { | |
let d = [0, l === 0 ? 0 : n[l - 1], 0], | |
f = [1, t8[l], i]; | |
c[l] = W(Xe(r, d, f), a); | |
} | |
return r.dispose(), c; | |
}), | |
u = new hi([], e, r.dtype, t8.length); | |
for (let c = 0; c < p.length; c++) u.setItem(c, p[c]); | |
return u; | |
} | |
var TT = async (r, t8, e) => { | |
switch (r.op) { | |
case "If": | |
case "StatelessIf": | |
{ | |
let o = I("thenBranch", r, t8, e), | |
n = I("elseBranch", r, t8, e), | |
s = I("cond", r, t8, e), | |
a = I("args", r, t8, e); | |
return (await s.data())[0] ? e.functionMap[o].executeFunctionAsync(a, e.tensorArrayMap, e.tensorListMap) : e.functionMap[n].executeFunctionAsync(a, e.tensorArrayMap, e.tensorListMap); | |
} | |
case "While": | |
case "StatelessWhile": | |
{ | |
let o = I("body", r, t8, e), | |
n = I("cond", r, t8, e), | |
s = I("args", r, t8, e), | |
a = await e.functionMap[n].executeFunctionAsync(s, e.tensorArrayMap, e.tensorListMap), | |
i = s.map(c => c.id), | |
p = await a[0].data(); | |
a.forEach(c => { | |
!c.kept && i.indexOf(c.id) === -1 && c.dispose(); | |
}); | |
let u = s; | |
for (; p[0];) { | |
let c = u; | |
u = await e.functionMap[o].executeFunctionAsync(u, e.tensorArrayMap, e.tensorListMap); | |
let l = u.map(d => d.id); | |
c.forEach(d => { | |
!d.kept && i.indexOf(d.id) === -1 && l.indexOf(d.id) === -1 && d.dispose(); | |
}); | |
let m = await e.functionMap[n].executeFunctionAsync(u, e.tensorArrayMap, e.tensorListMap); | |
p = await m[0].data(), m.forEach(d => { | |
!d.kept && i.indexOf(d.id) === -1 && l.indexOf(d.id) === -1 && d.dispose(); | |
}); | |
} | |
return u; | |
} | |
case "LoopCond": | |
{ | |
let o = I("pred", r, t8, e); | |
return [Bs(o)]; | |
} | |
case "Switch": | |
{ | |
let o = I("pred", r, t8, e), | |
n = I("data", r, t8, e); | |
return n.kept || (n = Bs(n)), (await o.data())[0] ? [void 0, n] : [n, void 0]; | |
} | |
case "Merge": | |
{ | |
let o = r.inputNames.find(n => zt(n, t8, e) !== void 0); | |
if (o) { | |
let n = zt(o, t8, e); | |
return [Bs(n)]; | |
} | |
return; | |
} | |
case "Enter": | |
{ | |
let o = I("frameName", r, t8, e), | |
n = I("tensor", r, t8, e); | |
return e.enterFrame(o), [Bs(n)]; | |
} | |
case "Exit": | |
{ | |
let o = I("tensor", r, t8, e); | |
return e.exitFrame(), [Bs(o)]; | |
} | |
case "NextIteration": | |
{ | |
let o = I("tensor", r, t8, e); | |
return e.nextIteration(), [Bs(o)]; | |
} | |
case "TensorArrayV3": | |
{ | |
let o = I("size", r, t8, e), | |
n = I("dtype", r, t8, e), | |
s = I("elementShape", r, t8, e), | |
a = I("dynamicSize", r, t8, e), | |
i = I("clearAfterRead", r, t8, e), | |
p = I("identicalElementShapes", r, t8, e), | |
u = I("name", r, t8, e), | |
c = new If(u, n, o, s, p, a, i); | |
return e.addTensorArray(c), [c.idTensor, ke(1)]; | |
} | |
case "TensorArrayWriteV3": | |
{ | |
let o = I("tensorArrayId", r, t8, e), | |
n = I("index", r, t8, e), | |
s = I("tensor", r, t8, e), | |
a = e.getTensorArray(o.id); | |
return a.write(n, s), [a.idTensor]; | |
} | |
case "TensorArrayReadV3": | |
{ | |
let o = I("tensorArrayId", r, t8, e), | |
n = I("index", r, t8, e); | |
return [e.getTensorArray(o.id).read(n)]; | |
} | |
case "TensorArrayGatherV3": | |
{ | |
let o = I("tensorArrayId", r, t8, e), | |
n = I("indices", r, t8, e), | |
s = I("dtype", r, t8, e); | |
return [e.getTensorArray(o.id).gather(n, s)]; | |
} | |
case "TensorArrayScatterV3": | |
{ | |
let o = I("tensorArrayId", r, t8, e), | |
n = I("indices", r, t8, e), | |
s = I("tensor", r, t8, e), | |
a = e.getTensorArray(o.id); | |
return a.scatter(n, s), [a.idTensor]; | |
} | |
case "TensorArrayConcatV3": | |
{ | |
let o = I("tensorArrayId", r, t8, e), | |
n = e.getTensorArray(o.id), | |
s = I("dtype", r, t8, e); | |
return [n.concat(s)]; | |
} | |
case "TensorArraySplitV3": | |
{ | |
let o = I("tensorArrayId", r, t8, e), | |
n = I("tensor", r, t8, e), | |
s = I("lengths", r, t8, e), | |
a = e.getTensorArray(o.id); | |
return a.split(s, n), [a.idTensor]; | |
} | |
case "TensorArraySizeV3": | |
{ | |
let o = I("tensorArrayId", r, t8, e), | |
n = e.getTensorArray(o.id); | |
return [ke(n.size(), "int32")]; | |
} | |
case "TensorArrayCloseV3": | |
{ | |
let o = I("tensorArrayId", r, t8, e), | |
n = e.getTensorArray(o.id); | |
return n.clearAndClose(), [n.idTensor]; | |
} | |
case "TensorListSetItem": | |
{ | |
let o = I("tensorListId", r, t8, e), | |
n = I("index", r, t8, e), | |
s = I("tensor", r, t8, e), | |
a = e.getTensorList(o.id); | |
return a.setItem(n, s), [a.idTensor]; | |
} | |
case "TensorListGetItem": | |
{ | |
let o = I("tensorListId", r, t8, e), | |
n = I("index", r, t8, e), | |
s = I("elementShape", r, t8, e), | |
a = I("elementDType", r, t8, e); | |
return [e.getTensorList(o.id).getItem(n, s, a)]; | |
} | |
case "TensorListScatterV2": | |
case "TensorListScatter": | |
{ | |
let o = I("indices", r, t8, e), | |
n = I("tensor", r, t8, e), | |
s = I("elementShape", r, t8, e), | |
a = I("numElements", r, t8, e), | |
i = kT(n, o, s, a); | |
return e.addTensorList(i), [i.idTensor]; | |
} | |
case "TensorListReserve": | |
case "EmptyTensorList": | |
{ | |
let o = I("elementShape", r, t8, e), | |
n = I("elementDType", r, t8, e), | |
s; | |
r.op === "TensorListReserve" ? s = "numElements" : s = "maxNumElements"; | |
let a = I(s, r, t8, e), | |
i = r.op === "TensorListReserve" ? -1 : a, | |
p = vT(o, n, a, i); | |
return e.addTensorList(p), [p.idTensor]; | |
} | |
case "TensorListGather": | |
{ | |
let o = I("tensorListId", r, t8, e), | |
n = I("indices", r, t8, e), | |
s = I("elementShape", r, t8, e), | |
a = I("elementDType", r, t8, e); | |
return [e.getTensorList(o.id).gather(n, a, s)]; | |
} | |
case "TensorListStack": | |
{ | |
let o = I("tensorListId", r, t8, e), | |
n = I("elementShape", r, t8, e), | |
s = I("elementDType", r, t8, e), | |
a = I("numElements", r, t8, e); | |
return [e.getTensorList(o.id).stack(n, s, a)]; | |
} | |
case "TensorListFromTensor": | |
{ | |
let o = I("tensor", r, t8, e), | |
n = I("elementShape", r, t8, e), | |
s = I("elementDType", r, t8, e), | |
a = IT(o, n, s); | |
return e.addTensorList(a), [a.idTensor]; | |
} | |
case "TensorListConcat": | |
case "TensorListConcatV2": | |
{ | |
let o = I("tensorListId", r, t8, e), | |
n = e.getTensorList(o.id), | |
s = I("dtype", r, t8, e), | |
a = I("elementShape", r, t8, e); | |
return [n.concat(s, a)]; | |
} | |
case "TensorListPushBack": | |
{ | |
let o = I("tensorListId", r, t8, e), | |
n = I("tensor", r, t8, e), | |
s = e.getTensorList(o.id); | |
return s.pushBack(n), [s.idTensor]; | |
} | |
case "TensorListPopBack": | |
{ | |
let o = I("tensorListId", r, t8, e), | |
n = I("elementShape", r, t8, e), | |
s = I("elementDType", r, t8, e); | |
return [e.getTensorList(o.id).popBack(n, s)]; | |
} | |
case "TensorListSplit": | |
{ | |
let o = I("tensor", r, t8, e), | |
n = I("elementShape", r, t8, e), | |
s = I("lengths", r, t8, e), | |
a = NT(o, s, n); | |
return e.addTensorList(a), [a.idTensor]; | |
} | |
case "TensorListLength": | |
{ | |
let o = I("tensorListId", r, t8, e), | |
n = e.getTensorList(o.id); | |
return [ke(n.size(), "int32")]; | |
} | |
case "TensorListResize": | |
{ | |
let o = I("tensorListId", r, t8, e), | |
n = I("size", r, t8, e), | |
a = e.getTensorList(o.id).resize(n); | |
return e.addTensorList(a), [a.idTensor]; | |
} | |
default: | |
throw TypeError(`Node type ${r.op} is not implemented`); | |
} | |
}; | |
function _T(r, t8, e) { | |
let [o, n] = I("fusedOps", r, t8, e), | |
s = o === "biasadd", | |
a = !s, | |
i = n === "prelu", | |
p = o === "fusedbatchnorm", | |
u = I("numArgs", r, t8, e); | |
if (s) { | |
if (i && u !== 2) throw new Error("FusedConv2d and DepthwiseConv2d with BiasAdd and Prelu must have two extra arguments: bias and alpha."); | |
if (!i && s && u !== 1) throw new Error("FusedConv2d and DepthwiseConv2d with BiasAdd must have one extra argument: bias."); | |
} | |
if (p) throw new Error("FusedConv2d and DepthwiseConv2d with FusedBatchNorm is not supported"); | |
let c = I("strides", r, t8, e), | |
l = Ol(r, t8, e), | |
m = I("dataFormat", r, t8, e).toUpperCase(), | |
d = I("dilations", r, t8, e), | |
[f, h] = I("args", r, t8, e); | |
a && (h = f, f = void 0); | |
let g = I("leakyreluAlpha", r, t8, e); | |
return { | |
stride: c, | |
pad: l, | |
dataFormat: m, | |
dilations: d, | |
biasArg: f, | |
preluArg: h, | |
activationFunc: n, | |
leakyreluAlpha: g | |
}; | |
} | |
var $T = (r, t8, e, o = Je) => { | |
switch (r.op) { | |
case "Conv1D": | |
{ | |
let n = I("stride", r, t8, e), | |
s = I("pad", r, t8, e), | |
a = I("dataFormat", r, t8, e).toUpperCase(), | |
i = I("dilation", r, t8, e); | |
return [o.conv1d(I("x", r, t8, e), I("filter", r, t8, e), n, s, a, i)]; | |
} | |
case "Conv2D": | |
{ | |
let n = I("strides", r, t8, e), | |
s = Ol(r, t8, e), | |
a = I("dataFormat", r, t8, e).toUpperCase(), | |
i = I("dilations", r, t8, e); | |
return [o.conv2d(I("x", r, t8, e), I("filter", r, t8, e), [n[1], n[2]], s, a, [i[1], i[2]])]; | |
} | |
case "_FusedConv2D": | |
{ | |
let { | |
stride: n, | |
pad: s, | |
dataFormat: a, | |
dilations: i, | |
biasArg: p, | |
preluArg: u, | |
activationFunc: c, | |
leakyreluAlpha: l | |
} = _T(r, t8, e); | |
return [o.fused.conv2d({ | |
x: I("x", r, t8, e), | |
filter: I("filter", r, t8, e), | |
strides: [n[1], n[2]], | |
pad: s, | |
dataFormat: a, | |
dilations: [i[1], i[2]], | |
bias: p, | |
activation: c, | |
preluActivationWeights: u, | |
leakyreluAlpha: l | |
})]; | |
} | |
case "FusedDepthwiseConv2dNative": | |
{ | |
let { | |
stride: n, | |
pad: s, | |
dataFormat: a, | |
dilations: i, | |
biasArg: p, | |
preluArg: u, | |
activationFunc: c, | |
leakyreluAlpha: l | |
} = _T(r, t8, e); | |
return [o.fused.depthwiseConv2d({ | |
x: I("x", r, t8, e), | |
filter: I("filter", r, t8, e), | |
strides: [n[1], n[2]], | |
pad: s, | |
dataFormat: a, | |
dilations: [i[1], i[2]], | |
bias: p, | |
activation: c, | |
preluActivationWeights: u, | |
leakyreluAlpha: l | |
})]; | |
} | |
case "Conv2DBackpropInput": | |
case "Conv2dTranspose": | |
{ | |
let n = I("outputShape", r, t8, e), | |
s = I("strides", r, t8, e), | |
a = Ol(r, t8, e); | |
return [o.conv2dTranspose(I("x", r, t8, e), I("filter", r, t8, e), n, [s[1], s[2]], a)]; | |
} | |
case "DepthwiseConv2dNative": | |
case "DepthwiseConv2d": | |
{ | |
let n = I("strides", r, t8, e), | |
s = Ol(r, t8, e), | |
a = I("dilations", r, t8, e), | |
i = I("dataFormat", r, t8, e).toUpperCase(); | |
return [o.depthwiseConv2d(I("input", r, t8, e), I("filter", r, t8, e), [n[1], n[2]], s, i, [a[1], a[2]])]; | |
} | |
case "Conv3D": | |
{ | |
let n = I("strides", r, t8, e), | |
s = I("pad", r, t8, e), | |
a = I("dataFormat", r, t8, e).toUpperCase(), | |
i = I("dilations", r, t8, e); | |
return [o.conv3d(I("x", r, t8, e), I("filter", r, t8, e), [n[1], n[2], n[3]], s, a, [i[1], i[2], i[3]])]; | |
} | |
case "AvgPool": | |
{ | |
let n = I("strides", r, t8, e), | |
s = I("pad", r, t8, e), | |
a = I("kernelSize", r, t8, e); | |
return [o.avgPool(I("x", r, t8, e), [a[1], a[2]], [n[1], n[2]], s)]; | |
} | |
case "MaxPool": | |
{ | |
let n = I("strides", r, t8, e), | |
s = I("pad", r, t8, e), | |
a = I("kernelSize", r, t8, e); | |
return [o.maxPool(I("x", r, t8, e), [a[1], a[2]], [n[1], n[2]], s)]; | |
} | |
case "MaxPoolWithArgmax": | |
{ | |
let n = I("strides", r, t8, e), | |
s = I("pad", r, t8, e), | |
a = I("kernelSize", r, t8, e), | |
i = I("includeBatchInIndex", r, t8, e), | |
{ | |
result: p, | |
indexes: u | |
} = o.maxPoolWithArgmax(I("x", r, t8, e), [a[1], a[2]], [n[1], n[2]], s, i); | |
return [p, u]; | |
} | |
case "AvgPool3D": | |
{ | |
let n = I("strides", r, t8, e), | |
s = I("pad", r, t8, e), | |
a = I("kernelSize", r, t8, e); | |
return [o.avgPool3d(I("x", r, t8, e), [a[1], a[2], a[3]], [n[1], n[2], n[3]], s)]; | |
} | |
case "MaxPool3D": | |
{ | |
let n = I("strides", r, t8, e), | |
s = I("pad", r, t8, e), | |
a = I("kernelSize", r, t8, e); | |
return [o.maxPool3d(I("x", r, t8, e), [a[1], a[2], a[3]], [n[1], n[2], n[3]], s)]; | |
} | |
case "Dilation2D": | |
{ | |
let n = I("strides", r, t8, e), | |
s = I("pad", r, t8, e), | |
a = I("dilations", r, t8, e), | |
i = n[1], | |
p = n[2], | |
u = a[1], | |
c = a[2]; | |
return [o.dilation2d(I("x", r, t8, e), I("filter", r, t8, e), [i, p], s, [u, c], "NHWC")]; | |
} | |
default: | |
throw TypeError(`Node type ${r.op} is not implemented`); | |
} | |
}; | |
var ET = (r, t8, e, o = Je) => { | |
switch (r.op) { | |
case "Fill": | |
{ | |
let n = I("shape", r, t8, e), | |
s = I("dtype", r, t8, e), | |
a = I("value", r, t8, e); | |
return [o.fill(n, a, s)]; | |
} | |
case "LinSpace": | |
{ | |
let n = I("start", r, t8, e), | |
s = I("stop", r, t8, e), | |
a = I("num", r, t8, e); | |
return [o.linspace(n, s, a)]; | |
} | |
case "Multinomial": | |
{ | |
let n = I("logits", r, t8, e), | |
s = I("numSamples", r, t8, e), | |
a = I("seed", r, t8, e); | |
return [o.multinomial(n, s, a)]; | |
} | |
case "OneHot": | |
{ | |
let n = I("indices", r, t8, e), | |
s = I("depth", r, t8, e), | |
a = I("onValue", r, t8, e), | |
i = I("offValue", r, t8, e), | |
p = I("dtype", r, t8, e); | |
return [o.oneHot(n, s, a, i, p)]; | |
} | |
case "Ones": | |
return [o.ones(I("shape", r, t8, e), I("dtype", r, t8, e))]; | |
case "OnesLike": | |
return [o.onesLike(I("x", r, t8, e))]; | |
case "RandomStandardNormal": | |
return [o.randomStandardNormal(I("shape", r, t8, e), I("dtype", r, t8, e), I("seed", r, t8, e))]; | |
case "RandomUniform": | |
return [o.randomUniform(I("shape", r, t8, e), I("minval", r, t8, e), I("maxval", r, t8, e), I("dtype", r, t8, e))]; | |
case "RandomUniformInt": | |
return [o.randomUniformInt(I("shape", r, t8, e), I("minval", r, t8, e), I("maxval", r, t8, e), I("seed", r, t8, e))]; | |
case "Range": | |
{ | |
let n = I("start", r, t8, e), | |
s = I("stop", r, t8, e), | |
a = I("step", r, t8, e); | |
return [o.range(n, s, a, I("dtype", r, t8, e))]; | |
} | |
case "TruncatedNormal": | |
{ | |
let n = I("shape", r, t8, e), | |
s = I("mean", r, t8, e), | |
a = I("stdDev", r, t8, e), | |
i = I("seed", r, t8, e); | |
return [o.truncatedNormal(n, s, a, I("dtype", r, t8, e), i)]; | |
} | |
case "Zeros": | |
return [o.zeros(I("shape", r, t8, e), I("dtype", r, t8, e))]; | |
case "ZerosLike": | |
return [o.zerosLike(I("x", r, t8, e))]; | |
default: | |
throw TypeError(`Node type ${r.op} is not implemented`); | |
} | |
}; | |
function PS(r, t8, e) { | |
let o = I("boxes", r, t8, e), | |
n = I("scores", r, t8, e), | |
s = I("maxOutputSize", r, t8, e), | |
a = I("iouThreshold", r, t8, e), | |
i = I("scoreThreshold", r, t8, e), | |
p = I("softNmsSigma", r, t8, e); | |
return { | |
boxes: o, | |
scores: n, | |
maxOutputSize: s, | |
iouThreshold: a, | |
scoreThreshold: i, | |
softNmsSigma: p | |
}; | |
} | |
var RT = async (r, t8, e, o, n = Je) => { | |
switch (r.op) { | |
case "NonMaxSuppressionV5": | |
{ | |
let { | |
boxes: s, | |
scores: a, | |
maxOutputSize: i, | |
iouThreshold: p, | |
scoreThreshold: u, | |
softNmsSigma: c | |
} = PS(r, t8, e), | |
l = await n.image.nonMaxSuppressionWithScoreAsync(s, a, i, p, u, c); | |
return [l.selectedIndices, l.selectedScores]; | |
} | |
case "NonMaxSuppressionV4": | |
{ | |
let { | |
boxes: s, | |
scores: a, | |
maxOutputSize: i, | |
iouThreshold: p, | |
scoreThreshold: u | |
} = PS(r, t8, e), | |
c = I("padToMaxOutputSize", r, t8, e), | |
l = await n.image.nonMaxSuppressionPaddedAsync(s, a, i, p, u, c); | |
return [l.selectedIndices, l.validOutputs]; | |
} | |
case "NonMaxSuppressionV3": | |
case "NonMaxSuppressionV2": | |
{ | |
let { | |
boxes: s, | |
scores: a, | |
maxOutputSize: i, | |
iouThreshold: p, | |
scoreThreshold: u | |
} = PS(r, t8, e); | |
return [await n.image.nonMaxSuppressionAsync(s, a, i, p, u)]; | |
} | |
case "Where": | |
{ | |
let s = n.cast(I("condition", r, t8, e), "bool"), | |
a = [await n.whereAsync(s)]; | |
return s.dispose(), a; | |
} | |
case "ListDiff": | |
return n.setdiff1dAsync(I("x", r, t8, e), I("y", r, t8, e)); | |
default: | |
throw TypeError(`Node type ${r.op} is not implemented`); | |
} | |
}; | |
var DT = (r, t8, e, o = Je) => { | |
switch (r.op) { | |
case "LowerBound": | |
{ | |
let n = I("sortedSequence", r, t8, e), | |
s = I("values", r, t8, e); | |
return [o.lowerBound(n, s)]; | |
} | |
case "TopKV2": | |
{ | |
let n = I("x", r, t8, e), | |
s = I("k", r, t8, e), | |
a = I("sorted", r, t8, e), | |
i = o.topk(n, s, a); | |
return [i.values, i.indices]; | |
} | |
case "UpperBound": | |
{ | |
let n = I("sortedSequence", r, t8, e), | |
s = I("values", r, t8, e); | |
return [o.upperBound(n, s)]; | |
} | |
case "Unique": | |
{ | |
let n = I("x", r, t8, e), | |
s = o.unique(n); | |
return [s.values, s.indices]; | |
} | |
case "UniqueV2": | |
{ | |
let n = I("x", r, t8, e), | |
s = I("axis", r, t8, e), | |
a = o.unique(n, s); | |
return [a.values, a.indices]; | |
} | |
default: | |
throw TypeError(`Node type ${r.op} is not implemented`); | |
} | |
}; | |
var AT = (r, t8, e, o = Je) => { | |
switch (r.op) { | |
case "Const": | |
return t8[r.name]; | |
case "PlaceholderWithDefault": | |
let n = I("default", r, t8, e); | |
return [zt(r.name, t8, e) || n]; | |
case "Placeholder": | |
return [zt(r.name, t8, e)]; | |
case "Identity": | |
case "StopGradient": | |
case "FakeQuantWithMinMaxVars": | |
{ | |
let c = I("x", r, t8, e); | |
return [Bs(c)]; | |
} | |
case "IdentityN": | |
return I("x", r, t8, e).map(c => Bs(c)); | |
case "Snapshot": | |
let s = I("x", r, t8, e); | |
return [Bs(s)]; | |
case "Shape": | |
return [o.tensor1d(I("x", r, t8, e).shape, "int32")]; | |
case "ShapeN": | |
return I("x", r, t8, e).map(c => o.tensor1d(c.shape)); | |
case "Size": | |
return [o.scalar(I("x", r, t8, e).size, "int32")]; | |
case "Rank": | |
return [o.scalar(I("x", r, t8, e).rank, "int32")]; | |
case "NoOp": | |
return [o.scalar(1)]; | |
case "Print": | |
let a = I("x", r, t8, e), | |
i = I("data", r, t8, e), | |
p = I("message", r, t8, e), | |
u = I("summarize", r, t8, e); | |
console.warn("The graph has a tf.print() operation,usually used for debugging, which slows down performance."), console.log(p); | |
for (let c = 0; c < i.length; c++) console.log(Array.prototype.slice.call(i[c].dataSync()).slice(0, u)); | |
return [a]; | |
default: | |
throw TypeError(`Node type ${r.op} is not implemented`); | |
} | |
}; | |
var vf = class { | |
get id() { | |
return this.handle.id; | |
} | |
constructor(t8, e) { | |
this.keyDType = t8, this.valueDType = e, this.handle = ke(0), this.tensorMap = /* @__PURE__ */new Map(), Rr(this.handle); | |
} | |
clearAndClose() { | |
this.tensorMap.forEach(t8 => t8.dispose()), this.tensorMap.clear(), this.handle.dispose(); | |
} | |
size() { | |
return this.tensorMap.size; | |
} | |
tensorSize() { | |
return ke(this.size(), "int32"); | |
} | |
async import(t8, e) { | |
this.checkKeyAndValueTensor(t8, e); | |
let o = await t8.data(); | |
return this.tensorMap.forEach(n => n.dispose()), this.tensorMap.clear(), De(() => { | |
let n = fo(e), | |
s = o.length, | |
a = n.length; | |
y.assert(s === a, () => `The number of elements doesn't match, keys has ${s} elements, the values has ${a} elements.`); | |
for (let i = 0; i < s; i++) { | |
let p = o[i], | |
u = n[i]; | |
Rr(u), this.tensorMap.set(p, u); | |
} | |
return this.handle; | |
}); | |
} | |
async find(t8, e) { | |
this.checkKeyAndValueTensor(t8, e); | |
let o = await t8.data(); | |
return De(() => { | |
let n = []; | |
for (let s = 0; s < o.length; s++) { | |
let a = o[s], | |
i = this.findWithDefault(a, e); | |
n.push(i); | |
} | |
return kr(n); | |
}); | |
} | |
findWithDefault(t8, e) { | |
let o = this.tensorMap.get(t8); | |
return o != null ? o : e; | |
} | |
checkKeyAndValueTensor(t8, e) { | |
if (t8.dtype !== this.keyDType) throw new Error(`Expect key dtype ${this.keyDType}, but got ${t8.dtype}`); | |
if (e.dtype !== this.valueDType) throw new Error(`Expect value dtype ${this.valueDType}, but got ${e.dtype}`); | |
} | |
}; | |
var FT = async (r, t8, e, o) => { | |
switch (r.op) { | |
case "HashTable": | |
case "HashTableV2": | |
{ | |
let n = o.getHashTableHandleByName(r.name); | |
if (n != null) return [n]; | |
{ | |
let s = I("keyDType", r, t8, e), | |
a = I("valueDType", r, t8, e), | |
i = new vf(s, a); | |
return o.addHashTable(r.name, i), [i.handle]; | |
} | |
} | |
case "InitializeTable": | |
case "InitializeTableV2": | |
case "LookupTableImport": | |
case "LookupTableImportV2": | |
{ | |
let n = I("tableHandle", r, t8, e, o), | |
s = I("keys", r, t8, e), | |
a = I("values", r, t8, e); | |
return [await o.getHashTableById(n.id).import(s, a)]; | |
} | |
case "LookupTableFind": | |
case "LookupTableFindV2": | |
{ | |
let n = I("tableHandle", r, t8, e, o), | |
s = I("keys", r, t8, e), | |
a = I("defaultValue", r, t8, e); | |
return [await o.getHashTableById(n.id).find(s, a)]; | |
} | |
case "LookupTableSize": | |
case "LookupTableSizeV2": | |
{ | |
let n = I("tableHandle", r, t8, e, o); | |
return [o.getHashTableById(n.id).tensorSize()]; | |
} | |
default: | |
throw TypeError(`Node type ${r.op} is not implemented`); | |
} | |
}; | |
var PT = (r, t8, e, o = Je) => { | |
switch (r.op) { | |
case "ResizeBilinear": | |
{ | |
let n = I("images", r, t8, e), | |
s = I("size", r, t8, e), | |
a = I("alignCorners", r, t8, e), | |
i = I("halfPixelCenters", r, t8, e); | |
return [o.image.resizeBilinear(n, [s[0], s[1]], a, i)]; | |
} | |
case "ResizeNearestNeighbor": | |
{ | |
let n = I("images", r, t8, e), | |
s = I("size", r, t8, e), | |
a = I("alignCorners", r, t8, e), | |
i = I("halfPixelCenters", r, t8, e); | |
return [o.image.resizeNearestNeighbor(n, [s[0], s[1]], a, i)]; | |
} | |
case "CropAndResize": | |
{ | |
let n = I("image", r, t8, e), | |
s = I("boxes", r, t8, e), | |
a = I("boxInd", r, t8, e), | |
i = I("cropSize", r, t8, e), | |
p = I("method", r, t8, e), | |
u = I("extrapolationValue", r, t8, e); | |
return [o.image.cropAndResize(n, s, a, i, p, u)]; | |
} | |
case "ImageProjectiveTransformV3": | |
{ | |
let n = I("images", r, t8, e), | |
s = I("transforms", r, t8, e), | |
a = I("outputShape", r, t8, e), | |
i = I("fillValue", r, t8, e), | |
p = I("interpolation", r, t8, e), | |
u = I("fillMode", r, t8, e); | |
return [o.image.transform(n, s, p.toLowerCase(), u.toLowerCase(), i, a)]; | |
} | |
default: | |
throw TypeError(`Node type ${r.op} is not implemented`); | |
} | |
}; | |
var OT = (r, t8, e, o = Je) => { | |
switch (r.op) { | |
case "Equal": | |
return [o.equal(I("a", r, t8, e), I("b", r, t8, e))]; | |
case "NotEqual": | |
return [o.notEqual(I("a", r, t8, e), I("b", r, t8, e))]; | |
case "Greater": | |
return [o.greater(I("a", r, t8, e), I("b", r, t8, e))]; | |
case "GreaterEqual": | |
return [o.greaterEqual(I("a", r, t8, e), I("b", r, t8, e))]; | |
case "Less": | |
return [o.less(I("a", r, t8, e), I("b", r, t8, e))]; | |
case "LessEqual": | |
return [o.lessEqual(I("a", r, t8, e), I("b", r, t8, e))]; | |
case "LogicalAnd": | |
return [o.logicalAnd(I("a", r, t8, e), I("b", r, t8, e))]; | |
case "LogicalNot": | |
return [o.logicalNot(I("a", r, t8, e))]; | |
case "LogicalOr": | |
return [o.logicalOr(I("a", r, t8, e), I("b", r, t8, e))]; | |
case "Select": | |
case "SelectV2": | |
return [o.where(I("condition", r, t8, e), I("a", r, t8, e), I("b", r, t8, e))]; | |
case "BitwiseAnd": | |
return [o.bitwiseAnd(I("a", r, t8, e), I("b", r, t8, e))]; | |
default: | |
throw TypeError(`Node type ${r.op} is not implemented`); | |
} | |
}; | |
var MT = (r, t8, e, o = Je) => { | |
switch (r.op) { | |
case "BatchMatMul": | |
case "BatchMatMulV2": | |
case "MatMul": | |
return [o.matMul(I("a", r, t8, e), I("b", r, t8, e), I("transposeA", r, t8, e), I("transposeB", r, t8, e))]; | |
case "Einsum": | |
return [o.einsum(I("equation", r, t8, e), ...I("tensors", r, t8, e))]; | |
case "Transpose": | |
return [o.transpose(I("x", r, t8, e), I("perm", r, t8, e))]; | |
case "_FusedMatMul": | |
let [n, s] = I("fusedOps", r, t8, e), | |
a = n === "biasadd", | |
i = s === "prelu", | |
p = I("numArgs", r, t8, e), | |
u = I("leakyreluAlpha", r, t8, e); | |
if (a) { | |
if (i && p !== 2) throw new Error("Fused MatMul with BiasAdd and Prelu must have two extra arguments: bias and alpha."); | |
if (!i && p !== 1) throw new Error("Fused MatMul with BiasAdd must have one extra argument: bias."); | |
} | |
let [c, l] = I("args", r, t8, e); | |
return [o.fused.matMul({ | |
a: I("a", r, t8, e), | |
b: I("b", r, t8, e), | |
transposeA: I("transposeA", r, t8, e), | |
transposeB: I("transposeB", r, t8, e), | |
bias: c, | |
activation: s, | |
preluActivationWeights: l, | |
leakyreluAlpha: u | |
})]; | |
case "MatrixBandPart": | |
return [o.linalg.bandPart(I("a", r, t8, e), I("numLower", r, t8, e), I("numUpper", r, t8, e))]; | |
default: | |
throw TypeError(`Node type ${r.op} is not implemented`); | |
} | |
}; | |
var LT = (r, t8, e, o = Je) => { | |
switch (r.op) { | |
case "EuclideanNorm": | |
return [o.euclideanNorm(I("x", r, t8, e), I("axis", r, t8, e), I("keepDims", r, t8, e))]; | |
case "FusedBatchNorm": | |
case "FusedBatchNormV2": | |
return [o.batchNorm(I("x", r, t8, e), I("mean", r, t8, e), I("variance", r, t8, e), I("offset", r, t8, e), I("scale", r, t8, e), I("epsilon", r, t8, e))]; | |
case "FusedBatchNormV3": | |
return [o.batchNorm(I("x", r, t8, e), I("mean", r, t8, e), I("variance", r, t8, e), I("offset", r, t8, e), I("scale", r, t8, e), I("epsilon", r, t8, e))]; | |
case "LRN": | |
return [o.localResponseNormalization(I("x", r, t8, e), I("radius", r, t8, e), I("bias", r, t8, e), I("alpha", r, t8, e), I("beta", r, t8, e))]; | |
case "Softmax": | |
return [o.softmax(I("x", r, t8, e))]; | |
case "LogSoftmax": | |
return [o.logSoftmax(I("x", r, t8, e))]; | |
default: | |
throw TypeError(`Node type ${r.op} is not implemented`); | |
} | |
}; | |
var BT = (r, t8, e, o = Je) => { | |
switch (r.op) { | |
case "RaggedGather": | |
{ | |
let { | |
outputNestedSplits: n, | |
outputDenseValues: s | |
} = o.raggedGather(I("paramsNestedSplits", r, t8, e), I("paramsDenseValues", r, t8, e), I("indices", r, t8, e), I("outputRaggedRank", r, t8, e)); | |
return n.concat(s); | |
} | |
case "RaggedRange": | |
{ | |
let { | |
rtNestedSplits: n, | |
rtDenseValues: s | |
} = o.raggedRange(I("starts", r, t8, e), I("limits", r, t8, e), I("splits", r, t8, e)); | |
return [n, s]; | |
} | |
case "RaggedTensorToTensor": | |
return [o.raggedTensorToTensor(I("shape", r, t8, e), I("values", r, t8, e), I("defaultValue", r, t8, e), I("rowPartitionTensors", r, t8, e), I("rowPartitionTypes", r, t8, e))]; | |
default: | |
throw TypeError(`Node type ${r.op} is not implemented`); | |
} | |
}; | |
var zT = (r, t8, e, o = Je) => { | |
switch (r.op) { | |
case "Max": | |
{ | |
let i = I("axis", r, t8, e), | |
p = I("keepDims", r, t8, e); | |
return [o.max(I("x", r, t8, e), i, p)]; | |
} | |
case "Mean": | |
{ | |
let i = I("axis", r, t8, e), | |
p = I("keepDims", r, t8, e); | |
return [o.mean(I("x", r, t8, e), i, p)]; | |
} | |
case "Min": | |
{ | |
let i = I("axis", r, t8, e), | |
p = I("keepDims", r, t8, e); | |
return [o.min(I("x", r, t8, e), i, p)]; | |
} | |
case "Sum": | |
{ | |
let i = I("axis", r, t8, e), | |
p = I("keepDims", r, t8, e); | |
return [o.sum(I("x", r, t8, e), i, p)]; | |
} | |
case "All": | |
{ | |
let i = I("axis", r, t8, e), | |
p = I("keepDims", r, t8, e); | |
return [o.all(I("x", r, t8, e), i, p)]; | |
} | |
case "Any": | |
{ | |
let i = I("axis", r, t8, e), | |
p = I("keepDims", r, t8, e); | |
return [o.any(I("x", r, t8, e), i, p)]; | |
} | |
case "ArgMax": | |
{ | |
let i = I("axis", r, t8, e); | |
return [o.argMax(I("x", r, t8, e), i)]; | |
} | |
case "ArgMin": | |
{ | |
let i = I("axis", r, t8, e); | |
return [o.argMin(I("x", r, t8, e), i)]; | |
} | |
case "Prod": | |
{ | |
let i = I("axis", r, t8, e), | |
p = I("keepDims", r, t8, e); | |
return [o.prod(I("x", r, t8, e), i, p)]; | |
} | |
case "Cumprod": | |
{ | |
let i = I("axis", r, t8, e), | |
p = I("exclusive", r, t8, e), | |
u = I("reverse", r, t8, e); | |
return [o.cumprod(I("x", r, t8, e), i, p, u)]; | |
} | |
case "Cumsum": | |
{ | |
let i = I("axis", r, t8, e), | |
p = I("exclusive", r, t8, e), | |
u = I("reverse", r, t8, e); | |
return [o.cumsum(I("x", r, t8, e), i, p, u)]; | |
} | |
case "Bincount": | |
let n = I("x", r, t8, e), | |
s = I("weights", r, t8, e), | |
a = I("size", r, t8, e); | |
return [o.bincount(n, s, a)]; | |
case "DenseBincount": | |
{ | |
let i = I("x", r, t8, e), | |
p = I("weights", r, t8, e), | |
u = I("size", r, t8, e), | |
c = I("binaryOutput", r, t8, e); | |
return [o.denseBincount(i, p, u, c)]; | |
} | |
default: | |
throw TypeError(`Node type ${r.op} is not implemented`); | |
} | |
}; | |
var VT = (r, t8, e, o = Je) => { | |
switch (r.op) { | |
case "ConcatV2": | |
case "Concat": | |
{ | |
let n = I("n", r, t8, e), | |
s = I("axis", r, t8, e), | |
a = I("tensors", r, t8, e); | |
return a = a.slice(0, n), [o.concat(a, s)]; | |
} | |
case "Gather": | |
{ | |
let n = I("x", r, t8, e), | |
s = I("indices", r, t8, e); | |
return [o.gather(n, o.cast(s, "int32"), 0)]; | |
} | |
case "GatherV2": | |
{ | |
let n = I("axis", r, t8, e), | |
s = I("batchDims", r, t8, e), | |
a = I("x", r, t8, e), | |
i = I("indices", r, t8, e); | |
return [o.gather(a, o.cast(i, "int32"), n, s)]; | |
} | |
case "Reverse": | |
{ | |
let n = I("dims", r, t8, e), | |
s = []; | |
for (let i = 0; i < n.length; i++) n[i] && s.push(i); | |
let a = I("x", r, t8, e); | |
return [o.reverse(a, s)]; | |
} | |
case "ReverseV2": | |
{ | |
let n = I("axis", r, t8, e), | |
s = I("x", r, t8, e); | |
return [o.reverse(s, n)]; | |
} | |
case "Slice": | |
{ | |
let n = I("begin", r, t8, e), | |
s = I("size", r, t8, e); | |
return [o.slice(I("x", r, t8, e), n, s)]; | |
} | |
case "StridedSlice": | |
{ | |
let n = I("begin", r, t8, e), | |
s = I("end", r, t8, e), | |
a = I("strides", r, t8, e), | |
i = I("beginMask", r, t8, e), | |
p = I("endMask", r, t8, e), | |
u = I("ellipsisMask", r, t8, e), | |
c = I("newAxisMask", r, t8, e), | |
l = I("shrinkAxisMask", r, t8, e), | |
m = I("x", r, t8, e); | |
return [o.stridedSlice(m, n, s, a, i, p, u, c, l)]; | |
} | |
case "Pack": | |
return De(() => { | |
let n = I("axis", r, t8, e), | |
s = I("tensors", r, t8, e), | |
a = s[0].shape, | |
i = o.squeeze(s[0]).shape, | |
p = s.map(u => { | |
let c = y.arraysEqual(u.shape, a); | |
if (!c && !y.arraysEqual(o.squeeze(u).shape, i)) throw new Error("the input tensors shape does not match"); | |
return c ? u : o.reshape(u, a); | |
}); | |
return [o.stack(p, n)]; | |
}); | |
case "Unpack": | |
{ | |
let n = I("axis", r, t8, e), | |
s = I("tensor", r, t8, e); | |
return o.unstack(s, n); | |
} | |
case "Tile": | |
{ | |
let n = I("reps", r, t8, e); | |
return [o.tile(I("x", r, t8, e), n)]; | |
} | |
case "Split": | |
case "SplitV": | |
{ | |
let n = I("axis", r, t8, e), | |
s = I("numOrSizeSplits", r, t8, e), | |
a = I("x", r, t8, e); | |
return o.split(a, s, n); | |
} | |
case "ScatterNd": | |
{ | |
let n = I("indices", r, t8, e), | |
s = I("values", r, t8, e), | |
a = I("shape", r, t8, e); | |
return [o.scatterND(n, s, a)]; | |
} | |
case "GatherNd": | |
{ | |
let n = I("x", r, t8, e), | |
s = I("indices", r, t8, e); | |
return [o.gatherND(n, s)]; | |
} | |
case "SparseToDense": | |
{ | |
let n = I("sparseIndices", r, t8, e), | |
s = I("outputShape", r, t8, e), | |
a = I("sparseValues", r, t8, e), | |
i = I("defaultValue", r, t8, e); | |
return [o.sparseToDense(n, a, s, a.dtype === i.dtype ? i : o.cast(i, a.dtype))]; | |
} | |
case "TensorScatterUpdate": | |
{ | |
let n = I("indices", r, t8, e), | |
s = I("values", r, t8, e), | |
a = I("tensor", r, t8, e); | |
return [o.tensorScatterUpdate(a, n, s)]; | |
} | |
default: | |
throw TypeError(`Node type ${r.op} is not implemented`); | |
} | |
}; | |
var WT = (r, t8, e, o = Je) => { | |
switch (r.op) { | |
case "SparseFillEmptyRows": | |
{ | |
let { | |
outputIndices: n, | |
outputValues: s, | |
emptyRowIndicator: a, | |
reverseIndexMap: i | |
} = o.sparse.sparseFillEmptyRows(I("indices", r, t8, e), I("values", r, t8, e), I("denseShape", r, t8, e), I("defaultValue", r, t8, e)); | |
return [n, s, a, i]; | |
} | |
case "SparseReshape": | |
{ | |
let { | |
outputIndices: n, | |
outputShape: s | |
} = o.sparse.sparseReshape(I("inputIndices", r, t8, e), I("inputShape", r, t8, e), I("newShape", r, t8, e)); | |
return [n, s]; | |
} | |
case "SparseSegmentMean": | |
return [o.sparse.sparseSegmentMean(I("data", r, t8, e), I("indices", r, t8, e), I("segmentIds", r, t8, e))]; | |
case "SparseSegmentSum": | |
return [o.sparse.sparseSegmentSum(I("data", r, t8, e), I("indices", r, t8, e), I("segmentIds", r, t8, e))]; | |
default: | |
throw TypeError(`Node type ${r.op} is not implemented`); | |
} | |
}; | |
var UT = (r, t8, e, o = Je) => { | |
switch (r.op) { | |
case "FFT": | |
return [o.fft(I("x", r, t8, e))]; | |
case "IFFT": | |
return [o.ifft(I("x", r, t8, e))]; | |
case "RFFT": | |
return [o.rfft(I("x", r, t8, e))]; | |
case "IRFFT": | |
return [o.irfft(I("x", r, t8, e))]; | |
default: | |
throw TypeError(`Node type ${r.op} is not implemented`); | |
} | |
}; | |
var GT = (r, t8, e, o = Je) => { | |
switch (r.op) { | |
case "StaticRegexReplace": | |
return [o.string.staticRegexReplace(I("input", r, t8, e), I("pattern", r, t8, e), I("rewrite", r, t8, e), I("replaceGlobal", r, t8, e))]; | |
case "StringNGrams": | |
{ | |
let { | |
nGrams: n, | |
nGramsSplits: s | |
} = o.string.stringNGrams(I("data", r, t8, e), I("dataSplits", r, t8, e), I("separator", r, t8, e), I("nGramWidths", r, t8, e), I("leftPad", r, t8, e), I("rightPad", r, t8, e), I("padWidth", r, t8, e), I("preserveShortSequences", r, t8, e)); | |
return [n, s]; | |
} | |
case "StringSplit": | |
{ | |
let { | |
indices: n, | |
values: s, | |
shape: a | |
} = o.string.stringSplit(I("input", r, t8, e), I("delimiter", r, t8, e), I("skipEmpty", r, t8, e)); | |
return [n, s, a]; | |
} | |
case "StringToHashBucketFast": | |
return [o.string.stringToHashBucketFast(I("input", r, t8, e), I("numBuckets", r, t8, e))]; | |
default: | |
throw TypeError(`Node type ${r.op} is not implemented`); | |
} | |
}; | |
var HT = (r, t8, e, o = Je) => { | |
switch (r.op) { | |
case "Cast": | |
return [o.cast(I("x", r, t8, e), I("dtype", r, t8, e))]; | |
case "ExpandDims": | |
{ | |
let n = I("axis", r, t8, e); | |
return [o.expandDims(I("x", r, t8, e), n)]; | |
} | |
case "Squeeze": | |
{ | |
let n = I("axis", r, t8, e); | |
return [o.squeeze(I("x", r, t8, e), n)]; | |
} | |
case "Reshape": | |
return [o.reshape(I("x", r, t8, e), I("shape", r, t8, e))]; | |
case "EnsureShape": | |
return [o.ensureShape(I("x", r, t8, e), I("shape", r, t8, e))]; | |
case "MirrorPad": | |
return [o.mirrorPad(I("x", r, t8, e), I("padding", r, t8, e), I("mode", r, t8, e))]; | |
case "PadV2": | |
case "Pad": | |
return [o.pad(I("x", r, t8, e), I("padding", r, t8, e), I("constantValue", r, t8, e))]; | |
case "SpaceToBatchND": | |
{ | |
let n = I("blockShape", r, t8, e), | |
s = I("paddings", r, t8, e); | |
return [o.spaceToBatchND(I("x", r, t8, e), n, s)]; | |
} | |
case "BatchToSpaceND": | |
{ | |
let n = I("blockShape", r, t8, e), | |
s = I("crops", r, t8, e); | |
return [o.batchToSpaceND(I("x", r, t8, e), n, s)]; | |
} | |
case "DepthToSpace": | |
{ | |
let n = I("blockSize", r, t8, e), | |
s = I("dataFormat", r, t8, e).toUpperCase(); | |
return [o.depthToSpace(I("x", r, t8, e), n, s)]; | |
} | |
case "BroadcastTo": | |
return [o.broadcastTo(I("x", r, t8, e), I("shape", r, t8, e))]; | |
case "BroadcastArgs": | |
return [o.broadcastArgs(I("s0", r, t8, e), I("s1", r, t8, e))]; | |
default: | |
throw TypeError(`Node type ${r.op} is not implemented`); | |
} | |
}; | |
function OS(r, t8, e, o, n = De) { | |
let s = ((a, i, p) => { | |
switch (a.category) { | |
case "arithmetic": | |
return n(() => CT(a, i, p)); | |
case "basic_math": | |
return n(() => wT(a, i, p)); | |
case "control": | |
return TT(a, i, p); | |
case "convolution": | |
return n(() => $T(a, i, p)); | |
case "creation": | |
return n(() => ET(a, i, p)); | |
case "dynamic": | |
return RT(a, i, p); | |
case "evaluation": | |
return n(() => DT(a, i, p)); | |
case "image": | |
return n(() => PT(a, i, p)); | |
case "graph": | |
return n(() => AT(a, i, p)); | |
case "logical": | |
return n(() => OT(a, i, p)); | |
case "matrices": | |
return n(() => MT(a, i, p)); | |
case "normalization": | |
return n(() => LT(a, i, p)); | |
case "ragged": | |
return n(() => BT(a, i, p)); | |
case "reduction": | |
return n(() => zT(a, i, p)); | |
case "slice_join": | |
return n(() => VT(a, i, p)); | |
case "sparse": | |
return n(() => WT(a, i, p)); | |
case "spectral": | |
return n(() => UT(a, i, p)); | |
case "string": | |
return n(() => GT(a, i, p)); | |
case "transformation": | |
return n(() => HT(a, i, p)); | |
case "hash_table": | |
return FT(a, i, p, o); | |
case "custom": | |
let u = pf(a.op); | |
if (u && u.customExecutor) return u.customExecutor(new wf(a, i, p)); | |
throw TypeError(`Custom op ${a.op} is not registered.`); | |
default: | |
throw TypeError(`Unknown op '${a.op}'. File an issue at https://github.com/tensorflow/tfjs/issues so we can add it, or register a custom execution with tf.registerOp()`); | |
} | |
})(r, t8, e); | |
return y.isPromise(s) ? s.then(a => [].concat(a)) : [].concat(s); | |
} | |
var Ll = class { | |
constructor(t8 = {}, e = {}, o = {}, n = {}, s) { | |
this.weightMap = t8, this.tensorArrayMap = e, this.tensorListMap = o, this.functionMap = n, this.parseNodeNameCache = s, this.rootContext = { | |
id: 0, | |
frameName: "", | |
iterationId: 0 | |
}, this.contexts = [this.rootContext], this.lastId = 0, this.generateCurrentContextIds(); | |
} | |
newFrame(t8, e) { | |
return { | |
id: t8, | |
frameName: e, | |
iterationId: 0 | |
}; | |
} | |
set currentContext(t8) { | |
this.contexts !== t8 && (this.contexts = t8, this.generateCurrentContextIds()); | |
} | |
get currentContext() { | |
return this.contexts; | |
} | |
get currentContextId() { | |
return this._currentContextIds[0]; | |
} | |
get currentContextIds() { | |
return this._currentContextIds; | |
} | |
generateCurrentContextIds() { | |
let t8 = []; | |
for (let e = 0; e < this.contexts.length - 1; e++) { | |
let o = this.contexts.slice(0, this.contexts.length - e); | |
t8.push(this.contextIdforContexts(o)); | |
} | |
t8.push(""), this._currentContextIds = t8; | |
} | |
contextIdforContexts(t8) { | |
return t8 ? t8.map(e => e.id === 0 && e.iterationId === 0 ? "" : `${e.frameName}-${e.iterationId}`).join("/") : ""; | |
} | |
enterFrame(t8) { | |
this.contexts && (this.lastId++, this.contexts = this.contexts.slice(), this.contexts.push(this.newFrame(this.lastId, t8)), this._currentContextIds.unshift(this.contextIdforContexts(this.contexts))); | |
} | |
exitFrame() { | |
if (this.contexts && this.contexts.length > 1) this.contexts = this.contexts.slice(), this.contexts.splice(-1), this.currentContextIds.shift();else throw new Error("Cannot exit frame, the context is empty"); | |
} | |
nextIteration() { | |
if (this.contexts && this.contexts.length > 0) { | |
this.contexts = this.contexts.slice(), this.lastId++; | |
let t8 = Object.assign({}, this.contexts[this.contexts.length - 1]); | |
t8.iterationId += 1, t8.id = this.lastId, this.contexts.splice(-1, 1, t8), this._currentContextIds.splice(0, 1, this.contextIdforContexts(this.contexts)); | |
} else throw new Error("Cannot increase frame iteration, the context is empty"); | |
} | |
getWeight(t8) { | |
return this.weightMap[t8]; | |
} | |
addTensorArray(t8) { | |
this.tensorArrayMap[t8.id] = t8; | |
} | |
getTensorArray(t8) { | |
return this.tensorArrayMap[t8]; | |
} | |
addTensorList(t8) { | |
this.tensorListMap[t8.id] = t8; | |
} | |
getTensorList(t8) { | |
return this.tensorListMap[t8]; | |
} | |
dispose(t8) { | |
for (let e in this.tensorArrayMap) this.tensorArrayMap[e].clearAndClose(t8); | |
for (let e in this.tensorListMap) this.tensorListMap[e].clearAndClose(t8); | |
} | |
}; | |
function MS(r, t8, e, o) { | |
let n = /* @__PURE__ */new Set(), | |
s = [], | |
a = null, | |
i = null, | |
p = /* @__PURE__ */new Set(), | |
u = new Set(Object.keys(r).map(m => Tr(m)[0])); | |
o = o || []; | |
let c = new Set(o.map(m => Tr(m.name)[0])), | |
l = [...t8]; | |
for (; l.length > 0;) { | |
let m = l.pop(); | |
if ((gu(m) || k8(m) || N8(m)) && a == null && (a = m, i = a.children.map(d => d.name).filter(d => n.has(d))), n.add(m.name), e[m.name] == null && !u.has(m.name) && !c.has(m.name)) { | |
if (m.inputs.length === 0) { | |
s.push(m.name); | |
continue; | |
} | |
m.inputs.forEach(d => { | |
p.has(d.name) || (p.add(d.name), l.push(d)); | |
}); | |
} | |
} | |
return { | |
inputs: r, | |
outputs: t8, | |
usedNodes: n, | |
missingInputs: s, | |
dynamicNode: a, | |
syncInputs: i | |
}; | |
} | |
function KT(r, t8) { | |
let { | |
usedNodes: e, | |
inputs: o | |
} = t8, | |
n = Object.keys(o).map(g => Tr(g)[0]).map(g => r.nodes[g]), | |
s = r.initNodes || [], | |
a = g => e.has(typeof g == "string" ? g : g.name); | |
function i(g) { | |
return [...new Map(g.map(x => [x.name, x])).values()]; | |
} | |
let p = i([...n, ...r.weights, ...s]).filter(a), | |
u = i([...p, ...Object.values(r.nodes)]).filter(a), | |
c = new Map(u.map(g => [g.name, g])), | |
l = {}; | |
for (let g of u) { | |
l[g.name] = l[g.name] || 0; | |
for (let x of g.children) a(x) || (l[x.name] = Number.POSITIVE_INFINITY), l[x.name] = (l[x.name] || 0) + 1; | |
} | |
let m = Object.entries(l).filter(([, g]) => g === 0).map(([g]) => g), | |
d = [...m]; | |
for (; m.length > 0;) { | |
let g = m.pop(), | |
x = c.get(g); | |
for (let b of x.children.filter(a)) --l[b.name] === 0 && (d.push(b.name), m.push(b.name)); | |
} | |
let f = d.map(g => c.get(g)), | |
h = C8(f, p); | |
return w8(h, p), h; | |
} | |
function C8(r, t8) { | |
let e = new Map(r.map(a => [a.name, a])), | |
o = t8.map(a => a.name), | |
n = new Set(o); | |
for (; o.length > 0;) { | |
let a = o.pop(), | |
i = e.get(a); | |
for (let p of i.children) !e.has(p.name) || n.has(p.name) || (n.add(p.name), o.push(p.name)); | |
} | |
return r.filter(a => n.has(a.name)); | |
} | |
var Cc = class extends Error { | |
constructor(t8) { | |
super(`NodesExecutionOrderError: ${t8}`); | |
} | |
}; | |
function w8(r, t8) { | |
let e = new Map(r.map((i, p) => [i.name, p])), | |
o = new Set(t8.map(i => i.name)), | |
n = i => o.has(typeof i == "string" ? i : i.name), | |
s = new Set(r.map(i => i.name)), | |
a = i => s.has(typeof i == "string" ? i : i.name); | |
for (let i of r) { | |
for (let p of i.children.filter(a)) { | |
if (!e.has(p.name)) throw new Cc(`Child ${p.name} of node ${i.name} is unreachable.`); | |
if (e.get(i.name) > e.get(p.name)) throw new Cc(`Node ${i.name} is scheduled to run after its child ${p.name}.`); | |
} | |
if (!n(i)) for (let p of i.inputs) { | |
if (!e.has(p.name)) throw new Cc(`Input ${p.name} of node ${i.name} is unreachable.`); | |
if (e.get(p.name) > e.get(i.name)) throw new Cc(`Node ${i.name} is scheduled to run before its input ${p.name}.`); | |
} | |
} | |
} | |
function qT(r) { | |
let t8 = new Map(r.map((i, p) => [i.name, p])), | |
e = Number.MAX_SAFE_INTEGER, | |
o = r.map((i, p) => gu(i) ? e : p), | |
n = i => { | |
let p = o[t8.get(i.name)]; | |
return p == null ? -1 : p; | |
}, | |
s = r.map((i, p) => i.children.map(n).reduce((u, c) => Math.max(u, c), o[p])), | |
a = /* @__PURE__ */new Map(); | |
for (let i = 0; i < r.length; ++i) { | |
let p = s[i]; | |
if (p === e) continue; | |
let u = r[i], | |
c = r[p]; | |
a.has(c.name) || a.set(c.name, []), a.get(c.name).push(u); | |
} | |
return a; | |
} | |
var S8 = /* @__PURE__ */new Set(["Switch", "Merge", "Enter", "Exit", "NextIteration", "StatelessIf", "StatelessWhile", "if", "While"]); | |
var I8 = /* @__PURE__ */new Set(["NonMaxSuppressionV2", "NonMaxSuppressionV3", "NonMaxSuppressionV5", "Where"]); | |
var v8 = /* @__PURE__ */new Set(["HashTable", "HashTableV2", "LookupTableImport", "LookupTableImportV2", "LookupTableFind", "LookupTableFindV2", "LookupTableSize", "LookupTableSizeV2"]); | |
function gu(r) { | |
return S8.has(r.op); | |
} | |
function k8(r) { | |
return I8.has(r.op); | |
} | |
function N8(r) { | |
return v8.has(r.op); | |
} | |
var lp = class { | |
get weightIds() { | |
return this.parent ? this.parent.weightIds : this._weightIds; | |
} | |
get functionExecutorMap() { | |
return this.parent ? this.parent.functionExecutorMap : this._functionExecutorMap; | |
} | |
get weightMap() { | |
return this.parent ? this.parent.weightMap : this._weightMap; | |
} | |
set weightMap(t8) { | |
let e = Object.keys(t8).map(o => t8[o].map(n => n.id)); | |
this._weightIds = [].concat(...e), this._weightMap = t8; | |
} | |
set resourceManager(t8) { | |
this._resourceManager = t8; | |
} | |
get inputs() { | |
return this._inputs.map(t8 => ({ | |
name: t8.name, | |
shape: t8.attrParams.shape ? t8.attrParams.shape.value : void 0, | |
dtype: t8.attrParams.dtype ? t8.attrParams.dtype.value : void 0 | |
})); | |
} | |
get outputs() { | |
return this._outputs.map(t8 => ({ | |
name: t8.name, | |
shape: t8.attrParams.shape ? t8.attrParams.shape.value : void 0, | |
dtype: t8.attrParams.dtype ? t8.attrParams.dtype.value : void 0 | |
})); | |
} | |
get inputNodes() { | |
return this._inputs.map(t8 => t8.signatureKey || t8.name); | |
} | |
get outputNodes() { | |
return this._outputs.map(t8 => { | |
let e = t8.signatureKey || t8.name; | |
return t8.defaultOutput ? `${e}:${t8.defaultOutput}` : e; | |
}); | |
} | |
get functions() { | |
return Object.keys(this._functions).reduce((t8, e) => (t8[e] = this._functions[e].signature, t8), {}); | |
} | |
constructor(t8, e) { | |
this.graph = t8, this.parent = e, this.compiledMap = /* @__PURE__ */new Map(), this.parseNodeNameCache = /* @__PURE__ */new Map(), this._weightMap = {}, this.SEPARATOR = ",", this._functions = {}, this._functionExecutorMap = {}, this.keepIntermediateTensors = false, this._outputs = t8.outputs, this._inputs = t8.inputs, this._initNodes = t8.initNodes, this._signature = t8.signature, this._functions = t8.functions, t8.functions != null && Object.keys(t8.functions).forEach(o => { | |
this._functionExecutorMap[o] = new lp(t8.functions[o], this); | |
}); | |
} | |
getCompilationKey(t8, e) { | |
let o = t8.map(s => s.name).sort(), | |
n = e.map(s => s.name).sort(); | |
return o.join(this.SEPARATOR) + "--" + n.join(this.SEPARATOR); | |
} | |
compile(t8, e) { | |
let o = MS(t8, e, this.weightMap, this._initNodes), | |
{ | |
missingInputs: n, | |
dynamicNode: s, | |
syncInputs: a | |
} = o; | |
if (s != null) throw new Error(`This execution contains the node '${s.name}', which has the dynamic op '${s.op}'. Please use model.executeAsync() instead. Alternatively, to avoid the dynamic ops, specify the inputs [${a}]`); | |
if (n.length > 0) { | |
let u = e.map(l => l.name), | |
c = Object.keys(t8); | |
throw new Error(`Cannot compute the outputs [${u}] from the provided inputs [${c}]. Missing the following inputs: [${n}]`); | |
} | |
let i = KT(this.graph, o), | |
p = qT(i); | |
return { | |
orderedNodes: i, | |
nodeLiveUntilMap: p | |
}; | |
} | |
cloneAndKeepTensor(t8) { | |
if (t8 == null) return null; | |
let e = t8.clone(); | |
return Rr(e), e; | |
} | |
cloneTensorList(t8) { | |
return t8 ? t8.map(o => this.cloneAndKeepTensor(o)) : null; | |
} | |
cloneTensorMap(t8) { | |
return Object.fromEntries(Object.entries(t8).map(([e, o]) => [e, this.cloneTensorList(o)])); | |
} | |
execute(t8, e) { | |
this.disposeIntermediateTensors(), t8 = this.mapInputs(t8); | |
let o = Object.keys(t8).sort(); | |
this.checkInputs(t8), this.checkInputShapeAndType(t8), e = this.mapOutputs(e), this.checkOutputs(e); | |
let n = o.map(m => this.graph.nodes[Tr(m)[0]]), | |
s = e.map(m => Tr(m)[0]), | |
a = new Set(s), | |
i = s.map(m => this.graph.nodes[m]); | |
i.length === 0 && (i = this._outputs); | |
let p = this.getCompilationKey(n, i), | |
u = this.compiledMap.get(p); | |
u == null && (u = this.compile(t8, i), this.compiledMap.set(p, u)); | |
try { | |
this.keepIntermediateTensors = A().getBool("KEEP_INTERMEDIATE_TENSORS"); | |
} catch (m) { | |
this.keepIntermediateTensors = false, console.warn(m.message); | |
} | |
let c = {}, | |
l = {}; | |
return De(() => { | |
let m = new Ll(this.weightMap, c, l, this.functionExecutorMap, this.parseNodeNameCache), | |
d = Object.assign({}, this.weightMap); | |
this.keepIntermediateTensors && (this.clonedTensorsMap = this.cloneTensorMap(this.weightMap)), Object.keys(t8).forEach(x => { | |
let [b, C] = Tr(x, m), | |
S = []; | |
S[C] = t8[x], d[b] = S, this.keepIntermediateTensors && (this.clonedTensorsMap[b] = this.cloneTensorList(S)); | |
}); | |
let f = this.getFrozenTensorIds(d), | |
{ | |
orderedNodes: h, | |
nodeLiveUntilMap: g | |
} = u; | |
for (let x of h) { | |
if (d[x.name]) continue; | |
let b = OS(x, d, m, this._resourceManager); | |
if (y.isPromise(b)) throw new Error(`The execution of the op '${x.op}' returned a promise. Please use model.executeAsync() instead.`); | |
d[x.name] = b, this.keepIntermediateTensors && (this.clonedTensorsMap[x.name] = this.cloneTensorList(b)), this.checkTensorForDisposalWithNodeLiveUntilInfo(x, d, m, f, a, g.get(x.name)); | |
} | |
return this.parent == null && m.dispose(f), e.map(x => zt(x, d, m)); | |
}); | |
} | |
getFrozenTensorIds(t8) { | |
let e = [].concat.apply([], Object.keys(t8).map(o => t8[o]).map(o => o.map(n => n.id))); | |
return new Set(e); | |
} | |
checkTensorForDisposal(t8, e, o, n, s, a, i) { | |
if (!(gu(e) || a.has(t8))) { | |
for (let p of o[t8]) p != null && (i[p.id] = (i[p.id] || 0) + e.children.length); | |
for (let p of e.inputs) { | |
if (gu(p)) continue; | |
let u = fS(p.name, o, n); | |
if (u != null) for (let c of u) { | |
if (!c || c.kept || s.has(c.id)) continue; | |
let l = i[c.id]; | |
l === 1 ? (c.dispose(), delete i[c.id]) : l != null && i[c.id]--; | |
} | |
} | |
} | |
} | |
checkTensorForDisposalWithNodeLiveUntilInfo(t8, e, o, n, s, a) { | |
function i(p) { | |
return gu(p) || s.has(p.name); | |
} | |
if (!(gu(t8) || a == null)) for (let p of a) { | |
if (i(p)) continue; | |
let u = fS(p.name, e, o); | |
for (let c of u) !c || c.kept || n.has(c.id) || c.dispose(); | |
} | |
} | |
async executeAsync(t8, e) { | |
return this._executeAsync(t8, e); | |
} | |
disposeIntermediateTensors() { | |
this.clonedTensorsMap && (Object.values(this.clonedTensorsMap).forEach(t8 => { | |
for (let e of t8) e && !e.isDisposed && e.dispose(); | |
}), this.clonedTensorsMap = null); | |
} | |
getIntermediateTensors() { | |
return this.clonedTensorsMap; | |
} | |
async _executeAsync(t8, e, o = false, n = {}, s = {}) { | |
this.disposeIntermediateTensors(), o || (t8 = this.mapInputs(t8), this.checkInputs(t8), this.checkInputShapeAndType(t8), e = this.mapOutputs(e), this.checkOutputs(e)); | |
try { | |
this.keepIntermediateTensors = A().getBool("KEEP_INTERMEDIATE_TENSORS"); | |
} catch (m) { | |
this.keepIntermediateTensors = false, console.warn(m.message); | |
} | |
let a = new Ll(this.weightMap, n, s, this.functionExecutorMap, this.parseNodeNameCache); | |
this.keepIntermediateTensors && (this.clonedTensorsMap = this.cloneTensorMap(this.weightMap)); | |
let i = await this.executeWithControlFlow(t8, a, e, o), | |
p = e.map(m => zt(m, i, a)), | |
u = p.map(m => m.id), | |
c = Object.keys(t8).map(m => t8[m].id), | |
l = /* @__PURE__ */new Set([...u, ...c, ...this.weightIds]); | |
return Object.values(i).forEach(m => { | |
m.forEach(d => { | |
d && !d.isDisposed && !l.has(d.id) && d.dispose(); | |
}); | |
}), this.parent == null && a.dispose(l), p; | |
} | |
async executeFunctionAsync(t8, e, o) { | |
let n = t8.reduce((s, a, i) => (s[this.inputs[i].name] = a, s), {}); | |
return this._executeAsync(n, this.outputNodes, true, e, o); | |
} | |
async executeWithControlFlow(t8, e, o, n) { | |
let s = Object.keys(t8), | |
a = s.map(S => this.graph.nodes[Tr(S)[0]]), | |
i = o.map(S => Tr(S)[0]), | |
p = new Set(i), | |
u = i.map(S => this.graph.nodes[S]); | |
u.length === 0 && (u = this._outputs); | |
let { | |
usedNodes: c, | |
missingInputs: l, | |
dynamicNode: m, | |
syncInputs: d | |
} = MS(t8, u, this.weightMap, this._initNodes), | |
f = [...a, ...this.graph.weights, ...(this._initNodes || [])].map(S => ({ | |
node: S, | |
contexts: e.currentContext | |
})), | |
h = Object.assign({}, this.weightMap); | |
Object.keys(t8).forEach(S => { | |
let [k, _] = Tr(S), | |
E = []; | |
E[_] = t8[S], h[k] = E; | |
}); | |
let g = {}, | |
x = this.getFrozenTensorIds(h), | |
b = {}; | |
for (; f.length > 0;) { | |
let S = this.processStack(a, f, e, h, b, x, p, g, c); | |
await Promise.all(S); | |
} | |
m == null && !n && console.warn("This model execution did not contain any nodes with control flow or dynamic output shapes. You can use model.execute() instead."); | |
let C = u.filter(S => !gu(S) && !zt(S.name, h, e)).map(S => S.name); | |
if (C.length > 0) { | |
let S = ""; | |
throw m != null && (S = `Alternatively, to avoid the dynamic ops, use model.execute() and specify the inputs [${d}]`), new Error(`Cannot compute the outputs [${C}] from the provided inputs [${s}]. Consider providing the following inputs: [${l}]. ${S}`); | |
} | |
return h; | |
} | |
processStack(t8, e, o, n, s, a, i, p, u) { | |
let c = []; | |
for (; e.length > 0;) { | |
let l = e.pop(); | |
o.currentContext = l.contexts; | |
let m = ""; | |
if (l.node.op === "Enter" && I("isConstant", l.node, n, o) && ([m] = Ls(l.node.name, o)), n[l.node.name] == null) { | |
let d = OS(l.node, n, o, this._resourceManager); | |
m || ([m] = Ls(l.node.name, o)); | |
let f = o.currentContext; | |
y.isPromise(d) ? c.push(d.then(h => (n[m] = h, this.keepIntermediateTensors && (this.clonedTensorsMap[m] = this.cloneTensorList(h)), o.currentContext = f, this.checkTensorForDisposal(m, l.node, n, o, a, i, p), this.processChildNodes(l.node, e, o, n, s, u), h))) : (n[m] = d, this.keepIntermediateTensors && (this.clonedTensorsMap[m] = this.cloneTensorList(d)), this.checkTensorForDisposal(m, l.node, n, o, a, i, p), this.processChildNodes(l.node, e, o, n, s, u)); | |
} else this.processChildNodes(l.node, e, o, n, s, u); | |
} | |
return c; | |
} | |
processChildNodes(t8, e, o, n, s, a) { | |
t8.children.forEach(i => { | |
let [p] = Ls(i.name, o); | |
s[p] || !a.has(i.name) || (i.op === "Merge" ? i.inputNames.some(u => !!zt(u, n, o)) && (s[p] = true, e.push({ | |
contexts: o.currentContext, | |
node: i | |
})) : i.inputNames.every(u => !!zt(u, n, o)) && (s[p] = true, e.push({ | |
contexts: o.currentContext, | |
node: i | |
}))); | |
}); | |
} | |
dispose() { | |
Object.keys(this.weightMap).forEach(t8 => this.weightMap[t8].forEach(e => e.dispose())); | |
} | |
checkInputShapeAndType(t8) { | |
Object.keys(t8).forEach(e => { | |
let o = t8[e], | |
[n] = Tr(e), | |
s = this.graph.nodes[n]; | |
if (s.attrParams.shape && s.attrParams.shape.value) { | |
let a = s.attrParams.shape.value, | |
i = a.length === o.shape.length && o.shape.every((p, u) => a[u] === -1 || a[u] === p); | |
y.assert(i, () => `The shape of dict['${s.name}'] provided in model.execute(dict) must be [${a}], but was [${o.shape}]`); | |
} | |
s.attrParams.dtype && s.attrParams.dtype.value && y.assert(o.dtype === s.attrParams.dtype.value, () => `The dtype of dict['${s.name}'] provided in model.execute(dict) must be ${s.attrParams.dtype.value}, but was ${o.dtype}`); | |
}); | |
} | |
mapInputs(t8) { | |
var e, o; | |
let n = {}; | |
for (let s in t8) { | |
let a = (o = (e = this._signature) === null || e === void 0 ? void 0 : e.inputs) === null || o === void 0 ? void 0 : o[s]; | |
a != null ? n[a.name] = t8[s] : n[s] = t8[s]; | |
} | |
return n; | |
} | |
checkInputs(t8) { | |
let e = Object.keys(t8).filter(o => { | |
let [n] = Tr(o); | |
return this.graph.nodes[n] == null; | |
}); | |
if (e.length > 0) throw new Error(`The dict provided in model.execute(dict) has keys: [${e}] that are not part of graph`); | |
} | |
mapOutputs(t8) { | |
return t8.map(e => { | |
var o, n; | |
let s = (n = (o = this._signature) === null || o === void 0 ? void 0 : o.outputs) === null || n === void 0 ? void 0 : n[e]; | |
return s != null ? s.name : e; | |
}, {}); | |
} | |
checkOutputs(t8) { | |
t8.forEach(e => { | |
let [o] = Tr(e); | |
if (!this.graph.nodes[o]) throw new Error(`The output '${e}' is not found in the graph`); | |
}); | |
} | |
}; | |
var kf = class { | |
constructor(t8 = {}, e = {}) { | |
this.hashTableNameToHandle = t8, this.hashTableMap = e; | |
} | |
addHashTable(t8, e) { | |
this.hashTableNameToHandle[t8] = e.handle, this.hashTableMap[e.id] = e; | |
} | |
getHashTableHandleByName(t8) { | |
return this.hashTableNameToHandle[t8]; | |
} | |
getHashTableById(t8) { | |
return this.hashTableMap[t8]; | |
} | |
dispose() { | |
for (let t8 in this.hashTableMap) this.hashTableMap[t8].clearAndClose(), delete this.hashTableMap[t8]; | |
for (let t8 in this.hashTableNameToHandle) this.hashTableNameToHandle[t8].dispose(), delete this.hashTableNameToHandle[t8]; | |
} | |
}; | |
var T8 = "?tfjs-format=file"; | |
var _8 = "model.json"; | |
var Bl = class { | |
get modelVersion() { | |
return this.version; | |
} | |
get inputNodes() { | |
return this.executor.inputNodes; | |
} | |
get outputNodes() { | |
return this.executor.outputNodes; | |
} | |
get inputs() { | |
return this.executor.inputs; | |
} | |
get outputs() { | |
return this.executor.outputs; | |
} | |
get weights() { | |
return this.executor.weightMap; | |
} | |
get metadata() { | |
return this.artifacts.userDefinedMetadata; | |
} | |
get modelSignature() { | |
return this.signature; | |
} | |
get modelStructuredOutputKeys() { | |
return this.structuredOutputKeys; | |
} | |
constructor(t8, e = {}, o = fi) { | |
this.modelUrl = t8, this.loadOptions = e, this.version = "n/a", this.io = o, e == null && (this.loadOptions = {}), this.resourceManager = new kf(); | |
} | |
findIOHandler() { | |
let t8 = this.modelUrl; | |
if (t8.load != null) this.handler = t8;else if (this.loadOptions.requestInit != null) this.handler = this.io.browserHTTPRequest(t8, this.loadOptions);else { | |
let e = this.io.getLoadHandlers(t8, this.loadOptions); | |
if (e.length === 0) e.push(this.io.browserHTTPRequest(t8, this.loadOptions));else if (e.length > 1) throw new Error(`Found more than one (${e.length}) load handlers for URL '${[t8]}'`); | |
this.handler = e[0]; | |
} | |
} | |
load() { | |
if (this.findIOHandler(), this.handler.load == null) throw new Error("Cannot proceed with model loading because the IOHandler provided does not have the `load` method implemented."); | |
let t8 = this.handler.load(); | |
return y.isPromise(t8) ? t8.then(e => this.loadSync(e)) : this.loadSync(t8); | |
} | |
loadSync(t8) { | |
this.artifacts = t8; | |
let e = this.artifacts.modelTopology, | |
o = this.artifacts.signature; | |
if (this.artifacts.userDefinedMetadata != null) { | |
let s = this.artifacts.userDefinedMetadata; | |
s.signature != null && (o = s.signature), s.structuredOutputKeys != null && (this.structuredOutputKeys = s.structuredOutputKeys); | |
} | |
this.signature = o, this.version = `${e.versions.producer}.${e.versions.minConsumer}`; | |
let n = this.io.decodeWeights(this.artifacts.weightData, this.artifacts.weightSpecs); | |
if (this.executor = new lp(Ml.Instance.transformGraph(e, this.signature)), this.executor.weightMap = this.convertTensorMapToTensorsMap(n), this.executor.resourceManager = this.resourceManager, t8.modelInitializer != null && t8.modelInitializer.node != null) { | |
let s = Ml.Instance.transformGraph(t8.modelInitializer); | |
this.initializer = new lp(s), this.initializer.weightMap = this.executor.weightMap, this.initializer.resourceManager = this.resourceManager, this.initializerSignature = t8.initializerSignature; | |
} | |
return true; | |
} | |
async save(t8, e) { | |
if (typeof t8 == "string") { | |
let o = this.io.getSaveHandlers(t8); | |
if (o.length === 0) throw new Error(`Cannot find any save handlers for URL '${t8}'`); | |
if (o.length > 1) throw new Error(`Found more than one (${o.length}) save handlers for URL '${t8}'`); | |
t8 = o[0]; | |
} | |
if (t8.save == null) throw new Error("GraphModel.save() cannot proceed because the IOHandler provided does not have the `save` attribute defined."); | |
return t8.save(this.artifacts); | |
} | |
addStructuredOutputNames(t8) { | |
if (this.structuredOutputKeys) { | |
let e = t8 instanceof ut ? [t8] : t8, | |
o = {}; | |
return e.forEach((n, s) => o[this.structuredOutputKeys[s]] = n), o; | |
} | |
return t8; | |
} | |
predict(t8, e) { | |
let o = this.execute(t8, this.outputNodes); | |
return this.addStructuredOutputNames(o); | |
} | |
async predictAsync(t8, e) { | |
let o = await this.executeAsync(t8, this.outputNodes); | |
return this.addStructuredOutputNames(o); | |
} | |
normalizeInputs(t8) { | |
var e; | |
if (!(t8 instanceof ut) && !Array.isArray(t8)) { | |
let s = (e = this.signature) === null || e === void 0 ? void 0 : e.inputs; | |
if (s != null) for (let a in s) { | |
let i = s[a]; | |
i.resourceId != null && (t8[a] = this.resourceIdToCapturedInput[i.resourceId]); | |
} | |
return t8; | |
} | |
t8 = Array.isArray(t8) ? t8 : [t8]; | |
let o = Object.keys(this.resourceIdToCapturedInput).length; | |
if (t8.length + o !== this.inputNodes.length) throw new Error(`Input tensor count mismatch, the graph model has ${this.inputNodes.length - o} non-resource placeholders, while there are ${t8.length} input tensors provided.`); | |
let n = 0; | |
return this.inputNodes.reduce((s, a) => { | |
var i, p, u; | |
let c = (u = (p = (i = this.signature) === null || i === void 0 ? void 0 : i.inputs) === null || p === void 0 ? void 0 : p[a]) === null || u === void 0 ? void 0 : u.resourceId; | |
return c != null ? s[a] = this.resourceIdToCapturedInput[c] : s[a] = t8[n++], s; | |
}, {}); | |
} | |
normalizeOutputs(t8) { | |
return t8 = t8 || this.outputNodes, Array.isArray(t8) ? t8 : [t8]; | |
} | |
executeInitializerGraph() { | |
return this.initializer == null ? [] : this.initializerSignature == null ? this.initializer.execute({}, []) : this.initializer.execute({}, Object.keys(this.initializerSignature.outputs)); | |
} | |
async executeInitializerGraphAsync() { | |
return this.initializer == null ? [] : this.initializerSignature == null ? this.initializer.executeAsync({}, []) : this.initializer.executeAsync({}, Object.keys(this.initializerSignature.outputs)); | |
} | |
setResourceIdToCapturedInput(t8) { | |
if (this.resourceIdToCapturedInput = {}, this.initializerSignature) { | |
let e = this.initializerSignature.outputs, | |
o = Object.keys(e); | |
for (let n = 0; n < o.length; n++) { | |
let s = o[n], | |
a = e[s]; | |
this.resourceIdToCapturedInput[a.resourceId] = t8[n]; | |
} | |
} | |
} | |
execute(t8, e) { | |
this.resourceIdToCapturedInput == null && this.setResourceIdToCapturedInput(this.executeInitializerGraph()), t8 = this.normalizeInputs(t8), e = this.normalizeOutputs(e); | |
let o = this.executor.execute(t8, e); | |
return o.length > 1 ? o : o[0]; | |
} | |
async executeAsync(t8, e) { | |
this.resourceIdToCapturedInput == null && this.setResourceIdToCapturedInput(await this.executeInitializerGraphAsync()), t8 = this.normalizeInputs(t8), e = this.normalizeOutputs(e); | |
let o = await this.executor.executeAsync(t8, e); | |
return o.length > 1 ? o : o[0]; | |
} | |
getIntermediateTensors() { | |
return this.executor.getIntermediateTensors(); | |
} | |
disposeIntermediateTensors() { | |
this.executor.disposeIntermediateTensors(); | |
} | |
convertTensorMapToTensorsMap(t8) { | |
return Object.keys(t8).reduce((e, o) => (e[o] = [t8[o]], e), {}); | |
} | |
dispose() { | |
this.executor.dispose(), this.initializer && (this.initializer.dispose(), this.resourceIdToCapturedInput && Mt(this.resourceIdToCapturedInput)), this.resourceManager.dispose(); | |
} | |
}; | |
async function $8(r, t8 = {}, e = fi) { | |
if (r == null) throw new Error("modelUrl in loadGraphModel() cannot be null. Please provide a url or an IOHandler that loads the model"); | |
t8 == null && (t8 = {}), t8.fromTFHub && typeof r == "string" && (r = R8(r)); | |
let o = new Bl(r, t8, e); | |
return await o.load(), o; | |
} | |
function E8(r) { | |
if (r == null) throw new Error("modelUrl in loadGraphModelSync() cannot be null. Please provide model artifacts or an IOHandler that loads the model"); | |
let t8; | |
if (r instanceof Array) { | |
let [o, n] = r; | |
if (!o) throw new Error("modelJSON must be the first element of the array"); | |
if (!n || !(n instanceof ArrayBuffer)) throw new Error("An ArrayBuffer of weights must be the second element of the array"); | |
if (!("modelTopology" in o)) throw new Error("Model JSON is missing 'modelTopology'"); | |
if (!("weightsManifest" in o)) throw new Error("Model JSON is missing 'weightsManifest'"); | |
let s = fi.getWeightSpecs(o.weightsManifest), | |
a = fi.getModelArtifactsForJSONSync(o, s, n); | |
t8 = fi.fromMemorySync(a); | |
} else if ("load" in r) t8 = r;else if ("modelTopology" in r && "weightSpecs" in r && "weightData" in r) t8 = fi.fromMemorySync(r);else throw new Error("Unknown model format"); | |
let e = new Bl(t8); | |
return e.load(), e; | |
} | |
function R8(r) { | |
return r.endsWith("/") || (r = r + "/"), `${r}${_8}${T8}`; | |
} | |
var D8 = "4.11.0"; | |
function Q(r, t8) { | |
Array.isArray(r) || (r = [r]), r.forEach(e => { | |
e != null && y.assert(e.dtype !== "complex64", () => `${t8} does not support complex64 tensors in the CPU backend.`); | |
}); | |
} | |
var A8 = Wt.whereImpl; | |
var xu = class extends ao { | |
nextDataId() { | |
return xu.nextDataId++; | |
} | |
constructor() { | |
super(), this.blockSize = 48, this.firstUse = true, this.data = new zo(this, pr()); | |
} | |
write(t8, e, o) { | |
this.firstUse && (this.firstUse = false, A().get("IS_NODE") && w.warn(` | |
============================ | |
Hi, looks like you are running TensorFlow.js in Node.js. To speed things up dramatically, install our node backend, visit https://github.com/tensorflow/tfjs-node for more details. | |
============================`)); | |
let n = { | |
id: this.nextDataId() | |
}; | |
return this.data.set(n, { | |
values: t8, | |
dtype: o, | |
refCount: 1 | |
}), n; | |
} | |
makeTensorInfo(t8, e, o) { | |
let n; | |
if (e === "string" && o != null && o.length > 0 && y.isString(o[0])) { | |
let s = o.map(a => y.encodeString(a)); | |
n = this.write(s, t8, e); | |
} else n = this.write(o, t8, e); | |
return { | |
dataId: n, | |
shape: t8, | |
dtype: e | |
}; | |
} | |
refCount(t8) { | |
return this.data.has(t8) ? this.data.get(t8).refCount : 0; | |
} | |
incRef(t8) { | |
let e = this.data.get(t8); | |
e.refCount++; | |
} | |
decRef(t8) { | |
if (this.data.has(t8)) { | |
let e = this.data.get(t8); | |
e.refCount--; | |
} | |
} | |
move(t8, e, o, n, s) { | |
this.data.set(t8, { | |
values: e, | |
dtype: n, | |
refCount: s | |
}); | |
} | |
numDataIds() { | |
return this.data.numDataIds(); | |
} | |
async read(t8) { | |
return this.readSync(t8); | |
} | |
readSync(t8) { | |
let { | |
dtype: e, | |
complexTensorInfos: o | |
} = this.data.get(t8); | |
if (e === "complex64") { | |
let n = this.readSync(o.real.dataId), | |
s = this.readSync(o.imag.dataId); | |
return w.mergeRealAndImagArrays(n, s); | |
} | |
return y.convertBackendValuesAndArrayBuffer(this.data.get(t8).values, e); | |
} | |
bufferSync(t8) { | |
let e = this.readSync(t8.dataId); | |
if (t8.dtype === "string") try { | |
let o = e.map(n => y.decodeString(n)); | |
return me(t8.shape, t8.dtype, o); | |
} catch (o) { | |
throw new Error("Failed to decode encoded string bytes into utf-8"); | |
} | |
return me(t8.shape, t8.dtype, e); | |
} | |
makeOutput(t8, e, o) { | |
return pr().makeTensorFromTensorInfo(this.makeTensorInfo(e, o, t8), this); | |
} | |
disposeData(t8, e = false) { | |
if (this.data.has(t8)) { | |
if (this.data.get(t8).refCount--, !e && this.data.get(t8).refCount > 0) return false; | |
let { | |
complexTensorInfos: o | |
} = this.data.get(t8); | |
o != null && (this.disposeData(o.real.dataId, true), this.disposeData(o.imag.dataId, true)), this.data.delete(t8); | |
} | |
return true; | |
} | |
disposeIntermediateTensorInfo(t8) { | |
this.disposeData(t8.dataId); | |
} | |
async time(t8) { | |
let e = y.now(); | |
return t8(), { | |
kernelMs: y.now() - e | |
}; | |
} | |
memory() { | |
return { | |
unreliable: true, | |
reasons: ["The reported memory is an upper bound. Due to automatic garbage collection, the true allocated memory may be less."] | |
}; | |
} | |
where(t8) { | |
Q([t8], "where"); | |
let e = this.readSync(t8.dataId); | |
return A8(t8.shape, e); | |
} | |
dispose() {} | |
floatPrecision() { | |
return 32; | |
} | |
epsilon() { | |
return super.epsilon(); | |
} | |
}; | |
xu.nextDataId = 0; | |
var Tc = {}; | |
qe(Tc, { | |
addImpl: () => zS, | |
bincountImpl: () => Ic, | |
bincountReduceImpl: () => Nf, | |
bitwiseAndImpl: () => VS, | |
castImpl: () => BS, | |
ceilImpl: () => WS, | |
concatImpl: () => mp, | |
equalImpl: () => US, | |
expImpl: () => HS, | |
expm1Impl: () => qS, | |
floorDivImpl: () => XS, | |
floorImpl: () => jS, | |
gatherNdImpl: () => Tf, | |
gatherV2Impl: () => _f, | |
greaterEqualImpl: () => QS, | |
greaterImpl: () => YS, | |
lessEqualImpl: () => JS, | |
lessImpl: () => ZS, | |
linSpaceImpl: () => $f, | |
logImpl: () => eI, | |
maxImpl: () => Ef, | |
maximumImpl: () => tI, | |
minimumImpl: () => rI, | |
multiplyImpl: () => zl, | |
negImpl: () => oI, | |
notEqualImpl: () => nI, | |
prodImpl: () => sI, | |
raggedGatherImpl: () => Rf, | |
raggedRangeImpl: () => Df, | |
raggedTensorToTensorImpl: () => Af, | |
rangeImpl: () => fp, | |
rsqrtImpl: () => aI, | |
scatterImpl: () => zs, | |
sigmoidImpl: () => v_, | |
simpleAbsImpl: () => LS, | |
sliceImpl: () => hp, | |
sparseFillEmptyRowsImpl: () => Ff, | |
sparseReshapeImpl: () => Pf, | |
sparseSegmentReductionImpl: () => Nc, | |
sqrtImpl: () => T_, | |
squaredDifferenceImpl: () => uI, | |
staticRegexReplaceImpl: () => pI, | |
stridedSliceImpl: () => Of, | |
stringNGramsImpl: () => gp, | |
stringSplitImpl: () => xp, | |
stringToHashBucketFastImpl: () => yp, | |
subImpl: () => lI, | |
tileImpl: () => Mf, | |
topKImpl: () => Lf, | |
transposeImpl: () => vc, | |
uniqueImpl: () => bp | |
}); | |
function LS(r) { | |
let t8 = new Float32Array(r.length); | |
for (let e = 0; e < r.length; ++e) t8[e] = Math.abs(r[e]); | |
return t8; | |
} | |
var F8 = r => { | |
let { | |
x: t8 | |
} = r.inputs, | |
e = r.backend; | |
Q(t8, "abs"); | |
let o = new Float32Array(y.sizeFromShape(t8.shape)), | |
n = e.data.get(t8.dataId).values; | |
return o = LS(n), e.makeOutput(o, t8.shape, t8.dtype); | |
}; | |
var jT = { | |
kernelName: Xs, | |
backendName: "cpu", | |
kernelFunc: F8 | |
}; | |
function ze(r) { | |
return (t8, e, o, n, s) => { | |
let a = w.assertAndGetBroadcastShape(t8, e), | |
i = a.length, | |
p = y.computeStrides(a), | |
u = y.sizeFromShape(a), | |
c = y.getTypedArrayFromDType(s, u), | |
l = t8.length, | |
m = e.length, | |
d = y.computeStrides(t8), | |
f = y.computeStrides(e), | |
h = w.getBroadcastDims(t8, a), | |
g = w.getBroadcastDims(e, a); | |
if (h.length + g.length === 0) for (let x = 0; x < c.length; ++x) c[x] = r(o[x % o.length], n[x % n.length]);else for (let x = 0; x < c.length; ++x) { | |
let b = y.indexToLoc(x, i, p), | |
C = b.slice(-l); | |
h.forEach(E => C[E] = 0); | |
let S = y.locToIndex(C, l, d), | |
k = b.slice(-m); | |
g.forEach(E => k[E] = 0); | |
let _ = y.locToIndex(k, m, f); | |
c[x] = r(o[S], n[_]); | |
} | |
return [c, a]; | |
}; | |
} | |
function Kt(r) { | |
let { | |
inputs: t8, | |
backend: e | |
} = r, | |
{ | |
real: o, | |
imag: n | |
} = t8, | |
s = e.data.get(o.dataId).values, | |
a = e.data.get(n.dataId).values, | |
i = e.makeTensorInfo(o.shape, "complex64"), | |
p = e.data.get(i.dataId); | |
return p.complexTensorInfos = { | |
real: e.makeTensorInfo(o.shape, "float32", s), | |
imag: e.makeTensorInfo(n.shape, "float32", a) | |
}, i; | |
} | |
var XT = { | |
kernelName: Fi, | |
backendName: "cpu", | |
kernelFunc: Kt | |
}; | |
function wc(r, t8, e = "float32") { | |
if (e === "complex64") { | |
let n = wc(r, t8, "float32"), | |
s = wc(r, t8, "float32"); | |
return Kt({ | |
inputs: { | |
real: n, | |
imag: s | |
}, | |
backend: r | |
}); | |
} | |
let o = y.makeZerosTypedArray(y.sizeFromShape(t8), e); | |
return r.makeTensorInfo(t8, e, o); | |
} | |
function mr(r) { | |
let { | |
inputs: t8, | |
backend: e | |
} = r, | |
{ | |
x: o | |
} = t8; | |
return e.incRef(o.dataId), { | |
dataId: o.dataId, | |
shape: o.shape, | |
dtype: o.dtype | |
}; | |
} | |
var YT = { | |
kernelName: wo, | |
backendName: "cpu", | |
kernelFunc: mr | |
}; | |
function Ro(r) { | |
let { | |
inputs: t8, | |
backend: e | |
} = r, | |
{ | |
input: o | |
} = t8, | |
n = e.data.get(o.dataId).complexTensorInfos.real, | |
s = e.data.get(n.dataId).values; | |
return e.makeTensorInfo(n.shape, n.dtype, s); | |
} | |
var QT = { | |
kernelName: qi, | |
backendName: "cpu", | |
kernelFunc: Ro | |
}; | |
function BS(r, t8, e, o) { | |
if (o === "int32") { | |
let n = Int32Array.from(r); | |
return [t8, "int32", n]; | |
} | |
if (o === "bool") { | |
let n = y.toTypedArray([0], e), | |
[s, a] = ze((i, p) => i !== p ? 1 : 0)(t8, [], r, n, "bool"); | |
return [a, "bool", s]; | |
} | |
throw new Error(`Error in Cast: failed to cast ${e} to ${o}`); | |
} | |
function Do(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n | |
} = t8, | |
{ | |
dtype: s | |
} = o; | |
if (s === "complex64") { | |
if (n.dtype === "complex64") return mr({ | |
inputs: { | |
x: n | |
}, | |
backend: e | |
}); | |
let c = wc(e, n.shape, n.dtype), | |
l = Do({ | |
inputs: { | |
x: n | |
}, | |
backend: e, | |
attrs: { | |
dtype: "float32" | |
} | |
}), | |
m = Kt({ | |
inputs: { | |
real: l, | |
imag: c | |
}, | |
backend: e | |
}); | |
return e.disposeIntermediateTensorInfo(c), e.disposeIntermediateTensorInfo(l), m; | |
} | |
if (n.dtype === "complex64") { | |
let c = Ro({ | |
inputs: { | |
input: n | |
}, | |
backend: e | |
}), | |
l = Do({ | |
inputs: { | |
x: c | |
}, | |
backend: e, | |
attrs: { | |
dtype: s | |
} | |
}); | |
return e.disposeIntermediateTensorInfo(c), l; | |
} | |
if (!y.hasEncodingLoss(n.dtype, s)) { | |
let c = mr({ | |
inputs: { | |
x: n | |
}, | |
backend: e | |
}); | |
return { | |
dataId: c.dataId, | |
shape: c.shape, | |
dtype: s | |
}; | |
} | |
let a = e.data.get(n.dataId).values, | |
[i, p, u] = BS(a, n.shape, n.dtype, s); | |
return e.makeTensorInfo(i, p, u); | |
} | |
var ZT = { | |
kernelName: bo, | |
backendName: "cpu", | |
kernelFunc: Do | |
}; | |
function Ye(r, t8, e, o) { | |
return e == null ? ({ | |
inputs: n, | |
backend: s | |
}) => { | |
let { | |
a, | |
b: i | |
} = n, | |
p = s; | |
Q([a, i], r); | |
let u = p.data.get(a.dataId).values, | |
c = p.data.get(i.dataId).values, | |
l = a.dtype === "string" ? w.fromUint8ToStringArray(u) : u, | |
m = a.dtype === "string" ? w.fromUint8ToStringArray(c) : c, | |
d = o || a.dtype, | |
[f, h] = t8(a.shape, i.shape, l, m, d); | |
return p.makeTensorInfo(h, d, f); | |
} : ({ | |
inputs: n, | |
backend: s | |
}) => { | |
let { | |
a, | |
b: i | |
} = n, | |
p = s; | |
if (a.dtype === "complex64" || i.dtype === "complex64") { | |
let u = Do({ | |
inputs: { | |
x: a | |
}, | |
backend: p, | |
attrs: { | |
dtype: "complex64" | |
} | |
}), | |
c = p.data.get(u.dataId), | |
l = c.complexTensorInfos.real, | |
m = c.complexTensorInfos.imag, | |
d = p.data.get(l.dataId).values, | |
f = p.data.get(m.dataId).values, | |
h = Do({ | |
inputs: { | |
x: i | |
}, | |
backend: p, | |
attrs: { | |
dtype: "complex64" | |
} | |
}), | |
g = p.data.get(h.dataId), | |
x = g.complexTensorInfos.real, | |
b = g.complexTensorInfos.imag, | |
C = p.data.get(x.dataId).values, | |
S = p.data.get(b.dataId).values, | |
[k, _, E] = e(a.shape, i.shape, d, f, C, S), | |
R = p.makeTensorInfo(E, "float32", k), | |
D = p.makeTensorInfo(E, "float32", _), | |
P = Kt({ | |
inputs: { | |
real: R, | |
imag: D | |
}, | |
backend: p | |
}); | |
return p.disposeIntermediateTensorInfo(u), p.disposeIntermediateTensorInfo(h), p.disposeIntermediateTensorInfo(R), p.disposeIntermediateTensorInfo(D), P; | |
} else { | |
let u = p.data.get(a.dataId).values, | |
c = p.data.get(i.dataId).values, | |
l = o || a.dtype, | |
[m, d] = t8(a.shape, i.shape, u, c, l); | |
return p.makeTensorInfo(d, l, m); | |
} | |
}; | |
} | |
function Sc(r) { | |
return (t8, e, o, n, s, a) => { | |
let i = w.assertAndGetBroadcastShape(t8, e), | |
p = y.sizeFromShape(i), | |
u = i.length, | |
c = y.computeStrides(i), | |
l = y.getTypedArrayFromDType("float32", p), | |
m = y.getTypedArrayFromDType("float32", p), | |
d = w.getBroadcastDims(t8, i), | |
f = w.getBroadcastDims(e, i), | |
h = w.mergeRealAndImagArrays(o, n), | |
g = w.mergeRealAndImagArrays(s, a), | |
x = t8.length, | |
b = y.computeStrides(t8), | |
C = e.length, | |
S = y.computeStrides(e); | |
if (d.length + f.length === 0) for (let k = 0; k < l.length; k++) { | |
let _ = k % h.length, | |
E = k % g.length, | |
R = r(h[_ * 2], h[_ * 2 + 1], g[E * 2], g[E * 2 + 1]); | |
l[k] = R.real, m[k] = R.imag; | |
} else for (let k = 0; k < l.length; k++) { | |
let _ = y.indexToLoc(k, u, c), | |
E = _.slice(-x); | |
d.forEach(M => E[M] = 0); | |
let R = y.locToIndex(E, x, b), | |
D = _.slice(-C); | |
f.forEach(M => D[M] = 0); | |
let P = y.locToIndex(D, C, S), | |
O = r(h[R * 2], h[R * 2 + 1], g[P * 2], g[P * 2 + 1]); | |
l[k] = O.real, m[k] = O.imag; | |
} | |
return [l, m, i]; | |
}; | |
} | |
var zS = ze((r, t8) => r + t8); | |
var P8 = Sc((r, t8, e, o) => ({ | |
real: r + e, | |
imag: t8 + o | |
})); | |
var Oa = Ye(uo, zS, P8); | |
var JT = { | |
kernelName: uo, | |
backendName: "cpu", | |
kernelFunc: Oa | |
}; | |
function Ic(r, t8, e, o, n) { | |
let s = y.sizeFromShape(o), | |
a = y.makeZerosTypedArray(n, e); | |
for (let i = 0; i < r.length; i++) { | |
let p = r[i]; | |
if (p < 0) throw new Error("Input x must be non-negative!"); | |
p >= n || (s > 0 ? a[p] += t8[i] : a[p] += 1); | |
} | |
return a; | |
} | |
function Nf(r, t8, e, o = false) { | |
let n = r.shape[0], | |
s = r.shape[1], | |
a = me([n, e], t8.dtype); | |
for (let i = 0; i < n; i++) for (let p = 0; p < s; p++) { | |
let u = r.get(i, p); | |
if (u < 0) throw new Error("Input x must be non-negative!"); | |
u >= e || (o ? a.set(1, i, u) : t8.size > 0 ? a.set(a.get(i, u) + t8.get(i, p), i, u) : a.set(a.get(i, u) + 1, i, u)); | |
} | |
return a; | |
} | |
var VS = ze((r, t8) => r & t8); | |
var O8 = Ye(ja, VS); | |
var e_ = { | |
kernelName: ja, | |
backendName: "cpu", | |
kernelFunc: O8 | |
}; | |
function Xt(r) { | |
return (t8, e, o) => { | |
let n = y.getArrayFromDType(e, t8.length); | |
for (let s = 0; s < t8.length; ++s) n[s] = r(t8[s], o); | |
return n; | |
}; | |
} | |
function Ie(r, t8, e) { | |
let o = Xt(t8); | |
return Ar(r, o, e); | |
} | |
function Ar(r, t8, e) { | |
return ({ | |
inputs: o, | |
attrs: n, | |
backend: s | |
}) => { | |
let { | |
x: a | |
} = o; | |
Q(a, r); | |
let i = s, | |
p = i.data.get(a.dataId).values, | |
u; | |
if (a.dtype === "string") { | |
if (!Array.isArray(p)) throw new Error("String tensor's value was not an instance of Array"); | |
u = w.fromUint8ToStringArray(p); | |
} else u = p; | |
let c = e || a.dtype, | |
l = t8(u, c, n); | |
return i.makeTensorInfo(a.shape, c, l); | |
}; | |
} | |
var WS = Xt(r => Math.ceil(r)); | |
var M8 = Ar(tn, WS); | |
var t_ = { | |
kernelName: tn, | |
backendName: "cpu", | |
kernelFunc: M8 | |
}; | |
function mp(r, t8, e, o) { | |
let n = y.getArrayFromDType(e, y.sizeFromShape(t8)); | |
if (o && e !== "string") { | |
let s = 0; | |
r.forEach(a => { | |
let i = y.sizeFromShape(a.shape); | |
n.set(a.vals, s), s += i; | |
}); | |
} else { | |
let s = 0; | |
r.forEach(a => { | |
let i = e === "string" ? w.fromUint8ToStringArray(a.vals) : a.vals, | |
p = 0; | |
for (let u = 0; u < a.shape[0]; ++u) { | |
let c = u * t8[1] + s; | |
for (let l = 0; l < a.shape[1]; ++l) n[c + l] = i[p++]; | |
} | |
s += a.shape[1]; | |
}); | |
} | |
return n; | |
} | |
var US = ze((r, t8) => r === t8 ? 1 : 0); | |
var GS = Ye(yn, US, null, "bool"); | |
var r_ = { | |
kernelName: yn, | |
backendName: "cpu", | |
kernelFunc: GS | |
}; | |
var HS = Xt(r => Math.exp(r)); | |
var KS = Ar(bn, HS, "float32"); | |
var o_ = { | |
kernelName: bn, | |
backendName: "cpu", | |
kernelFunc: KS | |
}; | |
var qS = Xt(r => Math.expm1(r)); | |
var L8 = Ar(Cn, qS); | |
var n_ = { | |
kernelName: Cn, | |
backendName: "cpu", | |
kernelFunc: L8 | |
}; | |
var jS = Xt(r => Math.floor(r)); | |
var B8 = Ar(Sn, jS); | |
var s_ = { | |
kernelName: Sn, | |
backendName: "cpu", | |
kernelFunc: B8 | |
}; | |
var XS = ze((r, t8) => Math.floor(r / t8)); | |
var z8 = Ye(In, XS, null, "int32"); | |
var a_ = { | |
kernelName: In, | |
backendName: "cpu", | |
kernelFunc: z8 | |
}; | |
function Tf(r, t8, e, o, n, s, a, i, p) { | |
let u = me([o, s], e); | |
for (let c = 0; c < o; c++) { | |
let l = [], | |
m = 0; | |
for (let d = 0; d < n; d++) { | |
let f = r[c * n + d]; | |
m += f * a[d], l.push(f); | |
} | |
if (m < 0 || m >= p / s) throw new Error(`Invalid indices: ${l} does not index into ${i}`); | |
for (let d = 0; d < s; d++) u.values[c * s + d] = t8.get(...t8.indexToLoc(m * s + d)); | |
} | |
return u; | |
} | |
function _f(r, t8, e) { | |
let o = me(e, r.dtype); | |
for (let n = 0; n < o.size; ++n) { | |
let a = o.indexToLoc(n).slice(), | |
i = a[0], | |
p = a[2], | |
u = t8.locToIndex([i, p]); | |
a[2] = t8.values[u]; | |
let c = r.locToIndex(a); | |
0 <= c && c < r.values.length && (o.values[n] = r.values[c]); | |
} | |
return o; | |
} | |
var YS = ze((r, t8) => r > t8 ? 1 : 0); | |
var V8 = Ye(Nn, YS, null, "bool"); | |
var i_ = { | |
kernelName: Nn, | |
backendName: "cpu", | |
kernelFunc: V8 | |
}; | |
var QS = ze((r, t8) => r >= t8 ? 1 : 0); | |
var W8 = Ye(Tn, QS, null, "bool"); | |
var u_ = { | |
kernelName: Tn, | |
backendName: "cpu", | |
kernelFunc: W8 | |
}; | |
var ZS = ze((r, t8) => r < t8 ? 1 : 0); | |
var U8 = Ye(Dn, ZS, null, "bool"); | |
var p_ = { | |
kernelName: Dn, | |
backendName: "cpu", | |
kernelFunc: U8 | |
}; | |
var JS = ze((r, t8) => r <= t8 ? 1 : 0); | |
var G8 = Ye(An, JS, null, "bool"); | |
var c_ = { | |
kernelName: An, | |
backendName: "cpu", | |
kernelFunc: G8 | |
}; | |
function $f(r, t8, e) { | |
let o = (t8 - r) / (e - 1), | |
n = y.makeZerosTypedArray(e, "float32"); | |
n[0] = r; | |
for (let s = 1; s < n.length; s++) n[s] = n[s - 1] + o; | |
return n; | |
} | |
var eI = Xt(r => Math.log(r)); | |
var H8 = Ar(Pn, eI); | |
var l_ = { | |
kernelName: Pn, | |
backendName: "cpu", | |
kernelFunc: H8 | |
}; | |
function Ef(r, t8, e, o) { | |
let n = y.getTypedArrayFromDType(o, y.sizeFromShape(e)); | |
for (let s = 0; s < n.length; ++s) { | |
let a = s * t8, | |
i = r[a]; | |
for (let p = 0; p < t8; ++p) { | |
let u = r[a + p]; | |
(Number.isNaN(u) || u > i) && (i = u); | |
} | |
n[s] = i; | |
} | |
return n; | |
} | |
var tI = ze((r, t8) => Math.max(r, t8)); | |
var K8 = Ye(Wn, tI); | |
var m_ = { | |
kernelName: Wn, | |
backendName: "cpu", | |
kernelFunc: K8 | |
}; | |
var rI = ze((r, t8) => Math.min(r, t8)); | |
var q8 = Ye(Kn, rI); | |
var d_ = { | |
kernelName: Kn, | |
backendName: "cpu", | |
kernelFunc: q8 | |
}; | |
var zl = ze((r, t8) => r * t8); | |
var j8 = Sc((r, t8, e, o) => ({ | |
real: r * e - t8 * o, | |
imag: r * o + t8 * e | |
})); | |
var dp = Ye(Yn, zl, j8); | |
var f_ = { | |
kernelName: Yn, | |
backendName: "cpu", | |
kernelFunc: dp | |
}; | |
function oI(r, t8, e) { | |
let o = y.createScalarValue(-1, e); | |
return zl([], t8, o, r, e); | |
} | |
function X8(r) { | |
let { | |
inputs: t8, | |
backend: e | |
} = r, | |
{ | |
x: o | |
} = t8; | |
Q(o, "neg"); | |
let n = e.data.get(o.dataId).values, | |
[s, a] = oI(n, o.shape, o.dtype); | |
return e.makeTensorInfo(a, o.dtype, s); | |
} | |
var h_ = { | |
kernelName: pa, | |
backendName: "cpu", | |
kernelFunc: X8 | |
}; | |
var nI = ze((r, t8) => r !== t8 ? 1 : 0); | |
var Y8 = Ye(Qn, nI, null, "bool"); | |
var g_ = { | |
kernelName: Qn, | |
backendName: "cpu", | |
kernelFunc: Y8 | |
}; | |
function vc(r, t8, e, o, n) { | |
let s = t8.length, | |
a = y.sizeFromShape(t8), | |
i = y.computeStrides(t8), | |
p = y.computeStrides(n), | |
u = y.getTypedArrayFromDType(e, y.sizeFromShape(n)); | |
for (let c = 0; c < a; ++c) { | |
let l = y.indexToLoc(c, s, i), | |
m = new Array(l.length); | |
for (let f = 0; f < m.length; f++) m[f] = l[o[f]]; | |
let d = y.locToIndex(m, s, p); | |
u[d] = r[c]; | |
} | |
return u; | |
} | |
function It(r) { | |
let { | |
inputs: t8, | |
attrs: e, | |
backend: o | |
} = r, | |
{ | |
x: n | |
} = t8, | |
{ | |
perm: s | |
} = e; | |
Q(n, "transpose"); | |
let a = n.shape.length, | |
i = new Array(a); | |
for (let l = 0; l < i.length; l++) i[l] = n.shape[s[l]]; | |
let p = o.data.get(n.dataId).values, | |
u = vc(p, n.shape, n.dtype, s, i); | |
return { | |
dataId: o.write(u, i, n.dtype), | |
shape: i, | |
dtype: n.dtype | |
}; | |
} | |
var x_ = { | |
kernelName: co, | |
backendName: "cpu", | |
kernelFunc: It | |
}; | |
function sI(r, t8, e, o) { | |
let [n, s] = w.computeOutAndReduceShapes(r, o), | |
a = dt(t8, "int32"), | |
i = y.makeZerosTypedArray(y.sizeFromShape(n), a), | |
p = y.sizeFromShape(s); | |
for (let u = 0; u < i.length; ++u) { | |
let c = u * p, | |
l = 1; | |
for (let m = 0; m < p; ++m) l *= e[c + m]; | |
i[u] = l; | |
} | |
return { | |
outVals: i, | |
outShape: n, | |
outDtype: a | |
}; | |
} | |
function Q8(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n | |
} = t8, | |
{ | |
axis: s, | |
keepDims: a | |
} = o; | |
Q(n, "prod"); | |
let i = n.shape.length, | |
p = y.parseAxisParam(s, n.shape), | |
u = w.getAxesPermutation(p, i), | |
c = p, | |
l = n, | |
m = []; | |
u != null && (l = It({ | |
inputs: { | |
x: n | |
}, | |
backend: e, | |
attrs: { | |
perm: u | |
} | |
}), m.push(l), c = w.getInnerMostAxes(c.length, i)); | |
let d = e.data.get(l.dataId).values, | |
{ | |
outVals: f, | |
outShape: h, | |
outDtype: g | |
} = sI(l.shape, l.dtype, d, c), | |
x = h; | |
return a && (x = w.expandShapeToKeepDim(h, p)), m.forEach(b => e.disposeIntermediateTensorInfo(b)), e.makeTensorInfo(x, g, f); | |
} | |
var y_ = { | |
kernelName: ns, | |
backendName: "cpu", | |
kernelFunc: Q8 | |
}; | |
function Z8(r, t8, e) { | |
r.forEach((o, n) => { | |
if (o < 0 || o >= e) { | |
let s = y.indexToLoc(n, t8.length, y.computeStrides(t8)).join(","); | |
throw new Error(`indices[${s}] = ${o} is not in [0, ${e})`); | |
} | |
}); | |
} | |
function J8(r, t8) { | |
for (let e = 0; e < r.length; ++e) { | |
let o = r[e], | |
n = e === r.length - 1 ? t8 : r[e + 1].length; | |
if (o.length === 0) throw new Error("Ragged splits may not be empty"); | |
if (o[0] < 0) throw new Error("Ragged splits must be non-negative"); | |
if (o[o.length - 1] > n) throw new Error("Ragged splits must not point past values"); | |
for (let s = 1; s < o.length; ++s) if (o[s - 1] > o[s]) throw new Error("Ragged splits must be sorted in ascending order"); | |
} | |
} | |
function eY(r, t8, e, o) { | |
let n = [], | |
s = 0, | |
a = t8.length - 1 + e.length, | |
i = new Array(a).fill(null).map(() => [0]); | |
J8(e, o); | |
let p = 1; | |
for (let u = 0; u < t8.length - 1; ++u) { | |
p *= t8[u]; | |
let c = t8[u + 1]; | |
for (let l = 1; l < p + 1; ++l) i[u].push(l * c); | |
} | |
for (let u = 0; u < r.length; ++u) { | |
let c = r[u], | |
l = r[u] + 1; | |
for (let m = 0; m < e.length; ++m) { | |
let d = e[m], | |
f = m + t8.length - 1; | |
if (f >= 0) { | |
let h = i[f], | |
g = h[h.length - 1] - d[c]; | |
for (let x = c; x < l; ++x) i[f].push(d[x + 1] + g); | |
} | |
c = d[c], l = d[l]; | |
} | |
l !== c && (n.push([c, l]), s += l - c); | |
} | |
return { | |
outSplits: i, | |
valueSlices: n, | |
numValues: s | |
}; | |
} | |
function tY(r) { | |
let t8 = []; | |
for (let e = 0; e < r.length; ++e) { | |
let o = r[e].length, | |
n = y.getArrayFromDType("int32", o); | |
t8.push(n), r[e].forEach((s, a) => n[a] = s); | |
} | |
return t8; | |
} | |
function b_(r, t8) { | |
let e = r.slice(0, t8); | |
for (; e.length < t8;) e.push(1); | |
for (let o = t8; o < r.length; o++) e[t8 - 1] *= r[o]; | |
return e; | |
} | |
function rY(r, t8, e, o, n, s) { | |
let a = b_(t8, 2)[1], | |
i = b_(s, 2)[1], | |
p = 0; | |
for (let u of e) for (let c = u[0]; c < u[1]; ++c) { | |
for (let l = 0; l < o; ++l) n[p * i + l] = r[c * a + l]; | |
++p; | |
} | |
} | |
function oY(r, t8, e, o, n) { | |
let s = t8.slice(); | |
s[0] = n; | |
let a = y.getArrayFromDType(e, y.sizeFromShape(s)), | |
i = r.length, | |
p = i === 0 ? 0 : i / t8[0]; | |
return rY(r, t8, o, p, a, s), [a, s]; | |
} | |
function Rf(r, t8, e, o, n, s, a, i) { | |
if (r.length === 0) throw new Error("paramsNestedSplits must be non empty"); | |
if (t8[0].length === 0) throw new Error("Split tensors must not be scalars"); | |
let p = t8[0][0] - 1; | |
if (Z8(s, a, p), o.length === 0) throw new Error("params.rank must be nonzero"); | |
let u = o[0], | |
{ | |
outSplits: c, | |
valueSlices: l, | |
numValues: m | |
} = eY(s, a, r, u), | |
d = tY(c), | |
f = oY(e, o, n, l, m); | |
return [d, f[0], f[1]]; | |
} | |
var C_ = 2147483647; | |
function Df(r, t8, e, o, n, s, a) { | |
if (t8.length > 1) throw new Error("starts must be a scalar or vector"); | |
if (n.length > 1) throw new Error("limits must be a scalar or vector"); | |
if (a.length > 1) throw new Error("deltas must be a scalar or vector"); | |
let i = t8.length === 0, | |
p = n.length === 0, | |
u = a.length === 0, | |
c = []; | |
i || c.push(t8[0]), p || c.push(n[0]), u || c.push(a[0]); | |
for (let g = 1; g < c.length; ++g) if (c[g] !== c[g - 1]) throw new Error("starts, limits, and deltas must have the same shape"); | |
let l = c.length === 0 ? 1 : c[0], | |
m = y.getArrayFromDType("int32", l + 1); | |
m[0] = 0; | |
for (let g = 0; g < l; ++g) { | |
let x = i ? r[0] : r[g], | |
b = p ? o[0] : o[g], | |
C = u ? s[0] : s[g]; | |
if (C === 0) throw new Error("Requires delta != 0"); | |
let S; | |
if (C > 0 && b < x || C < 0 && b > x) S = 0;else if (S = Math.ceil(Math.abs((b - x) / C)), S > C_) throw new Error(`Requires ((limit - start) / delta) <= ${C_}`); | |
m[g + 1] = m[g] + S; | |
} | |
let d = m[l], | |
f = y.getArrayFromDType(e, d), | |
h = 0; | |
for (let g = 0; g < l; ++g) { | |
let x = m[g + 1] - m[g], | |
b = i ? r[0] : r[g], | |
C = u ? s[0] : s[g]; | |
for (let S = 0; S < x; ++S) f[h++] = b, b += C; | |
} | |
return [m, f]; | |
} | |
var Ao = w.RowPartitionType; | |
var kc = class { | |
constructor(t8, e, o, n, s, a, i, p, u, c) { | |
this.shape = t8, this.shapeShape = e, this.values = o, this.valuesShape = n, this.valuesDType = s, this.defaultValue = a, this.defaultValueShape = i, this.rowPartitionValues = p, this.rowPartitionValuesShapes = u, this.rowPartitionTypes = w.getRowPartitionTypesHelper(c), this.raggedRank = w.getRaggedRank(this.rowPartitionTypes); | |
} | |
getRowPartitionTypeByDimension(t8) { | |
return this.rowPartitionTypes[0] === Ao.FIRST_DIM_SIZE ? this.rowPartitionTypes[t8 + 1] : this.rowPartitionTypes[t8]; | |
} | |
getRowPartitionTensor(t8) { | |
return this.rowPartitionTypes[0] === Ao.FIRST_DIM_SIZE ? this.rowPartitionValues[t8 + 1] : this.rowPartitionValues[t8]; | |
} | |
getMaxWidth(t8) { | |
let e = this.getRowPartitionTensor(t8 - 1); | |
switch (this.getRowPartitionTypeByDimension(t8 - 1)) { | |
case Ao.VALUE_ROWIDS: | |
return kc.getMaxWidthValueRowID(e); | |
case Ao.ROW_SPLITS: | |
return kc.getMaxWidthRowSplit(e); | |
default: | |
throw new Error(`Cannot handle partition type ${Ao[this.getRowPartitionTypeByDimension(t8 - 1)]}`); | |
} | |
} | |
static getMaxWidthRowSplit(t8) { | |
let e = t8.length; | |
if (e === 0 || e === 1) return 0; | |
let o = 0; | |
for (let n = 0; n < e - 1; ++n) { | |
let s = t8[n + 1] - t8[n]; | |
s > o && (o = s); | |
} | |
return o; | |
} | |
static getMaxWidthValueRowID(t8) { | |
let e = t8.length; | |
if (e === 0) return 0; | |
let o = 0, | |
n = t8[0], | |
s = 0; | |
for (let a = 1; a < e; ++a) { | |
let i = t8[a]; | |
i !== n && (n = i, s = Math.max(a - o, s), o = a); | |
} | |
return Math.max(e - o, s); | |
} | |
tensorShapeFromTensor(t8, e, o = true) { | |
if (e.length === 0) { | |
if (t8[0] === -1) return []; | |
throw new Error("The only valid scalar shape tensor is the fully unknown shape specified as -1."); | |
} | |
return S_(t8, o); | |
} | |
calculateOutputSize(t8) { | |
let e = this.valuesShape, | |
o = this.defaultValueShape; | |
w.validateDefaultValueShape(o, e); | |
let n = this.tensorShapeFromTensor(this.shape, this.shapeShape), | |
a = w.combineRaggedTensorToTensorShapes(this.raggedRank, n, e); | |
a[0] < 0 && (a[0] = t8); | |
for (let i = 1; i <= this.raggedRank; ++i) a[i] < 0 && (a[i] = this.getMaxWidth(i)); | |
return a; | |
} | |
calculateFirstParentOutputIndex(t8, e, o) { | |
let n = Math.min(t8, o), | |
s = [], | |
a = 0; | |
for (let i = 0; i < n; ++i, a += e) s.push(a); | |
for (let i = n; i < t8; ++i) s.push(-1); | |
return y.assert(s.length === t8, () => "Final length of result must be equal to firstDimension."), s; | |
} | |
calculateOutputIndexRowSplit(t8, e, o, n) { | |
let s = t8.length, | |
a = []; | |
for (let i = 0; i < s - 1; ++i) { | |
let p = t8[i + 1] - t8[i], | |
u = Math.min(n, p), | |
c = e[i]; | |
c === -1 && (u = 0); | |
for (let l = 0; l < u; ++l) a.push(c), c += o; | |
for (let l = 0; l < p - u; ++l) a.push(-1); | |
} | |
if (s > 0 && a.length !== t8[s - 1]) throw new Error("Invalid row split size."); | |
return a; | |
} | |
calculateOutputIndexValueRowID(t8, e, o, n) { | |
let s = t8.length, | |
a = []; | |
if (s === 0) return []; | |
let i = 0, | |
p = t8[0]; | |
if (p >= e.length) throw new Error(`Got currentValueRowId=${p}, which is not less than ${e.length}`); | |
let u = e[p]; | |
a.push(u); | |
for (let c = 1; c < s; ++c) { | |
let l = t8[c]; | |
if (l === p) u >= 0 && (++i, i < n ? u += o : u = -1);else { | |
if (i = 0, p = l, l >= e.length) throw new Error(`Got nextValueRowId=${l} which is not less than ${e.length}`); | |
u = e[l]; | |
} | |
a.push(u); | |
} | |
if (a.length !== t8.length) throw new Error("Invalid row ids."); | |
return a; | |
} | |
calculateOutputIndex(t8, e, o, n) { | |
let s = this.getRowPartitionTensor(t8), | |
a = this.getRowPartitionTypeByDimension(t8); | |
switch (a) { | |
case Ao.VALUE_ROWIDS: | |
return this.calculateOutputIndexValueRowID(s, e, o, n); | |
case Ao.ROW_SPLITS: | |
if (s.length - 1 > e.length) throw new Error(`Row partition size is greater than output size: ${s.length - 1} > ${e.length}`); | |
return this.calculateOutputIndexRowSplit(s, e, o, n); | |
default: | |
throw new Error(`Unsupported partition type: ${Ao[a]}`); | |
} | |
} | |
getFirstDimensionSize() { | |
let t8 = this.rowPartitionValues[0]; | |
if (this.rowPartitionTypes.length === 0) throw new Error("No row_partition_types given."); | |
let e = this.rowPartitionTypes[0]; | |
switch (e) { | |
case Ao.FIRST_DIM_SIZE: | |
return t8[0]; | |
case Ao.VALUE_ROWIDS: | |
throw new Error("Cannot handle VALUE_ROWIDS in first dimension."); | |
case Ao.ROW_SPLITS: | |
return this.rowPartitionValuesShapes[0][0] - 1; | |
default: | |
throw new Error(`Cannot handle type ${Ao[e]}`); | |
} | |
} | |
compute() { | |
if (this.rowPartitionValues[0].length <= 0) throw new Error("Invalid first partition input. Tensor requires at least one element."); | |
let e = this.getFirstDimensionSize(), | |
o = this.calculateOutputSize(e), | |
n = new Array(this.raggedRank + 1); | |
n[n.length - 1] = 1; | |
for (let p = n.length - 2; p >= 0; --p) n[p] = n[p + 1] * o[p + 1]; | |
let s = S_(o, false), | |
a = y.getArrayFromDType(this.valuesDType, y.sizeFromShape(s)); | |
if (n[0] * o[0] > 0) { | |
let p = this.calculateFirstParentOutputIndex(e, n[0], o[0]); | |
for (let u = 1; u <= this.raggedRank; ++u) p = this.calculateOutputIndex(u - 1, p, n[u], o[u]); | |
this.setOutput(this.raggedRank, p, a, s); | |
} | |
return [s, a]; | |
} | |
setOutput(t8, e, o, n) { | |
if (o.length === 0) return; | |
let s = this.values, | |
a = o, | |
i = n.slice(); | |
i = i.slice(t8 + 1); | |
let p = y.sizeFromShape(i), | |
u = e.length, | |
c = this.defaultValue; | |
if (c.length !== p && c.length !== 1) { | |
let f = this.defaultValueShape; | |
De(() => { | |
let h = W(c, f); | |
c = iu(h, i).dataSync(); | |
}); | |
} | |
let l = 0, | |
m = 0, | |
d = 0; | |
for (let f = 0; f <= u; ++f) { | |
let h = f < u ? e[f] : -1; | |
if (h === d) { | |
++d; | |
continue; | |
} | |
if (m < d) { | |
let g = s.subarray(l * p), | |
x = a.subarray(m * p), | |
b = (d - m) * p; | |
w_(x, g, b); | |
} | |
if (f >= u) { | |
let g = o.length; | |
h = Math.floor(g / p); | |
} | |
if (h > d) if (this.defaultValue.length === 1) a.subarray(d * p, h * p).fill(this.defaultValue[0]), d = h;else for (; h > d;) { | |
let g = a.slice(d * p); | |
w_(g, c, p), ++d; | |
} | |
h < 0 ? (l = f + 1, m = d) : (l = f, m = d, d = m + 1); | |
} | |
} | |
}; | |
function w_(r, t8, e) { | |
for (let o = 0; o < e; o++) r[o] = t8[o]; | |
} | |
function S_(r, t8) { | |
let e = []; | |
for (let o of r) { | |
if (o < 0) { | |
if (!t8) throw new Error(`Dimension ${o} must be >= 0`); | |
if (o < -1) throw new Error(`Dimension ${o} must be >= -1`); | |
o = -1; | |
} | |
e.push(o); | |
} | |
return e; | |
} | |
function Af(r, t8, e, o, n, s, a, i, p, u) { | |
return new kc(r, t8, e, o, n, s, a, i, p, u).compute(); | |
} | |
function fp(r, t8, e, o) { | |
let n = r === t8, | |
s = r < t8 && e < 0, | |
a = t8 < r && e > 1; | |
if (n || s || a) return y.makeZerosTypedArray(0, o); | |
let i = Math.abs(Math.ceil((t8 - r) / e)), | |
p = y.makeZerosTypedArray(i, o); | |
t8 < r && e === 1 && (e = -1), p[0] = r; | |
for (let u = 1; u < p.length; u++) p[u] = p[u - 1] + e; | |
return p; | |
} | |
var aI = Xt(r => 1 / Math.sqrt(r)); | |
var nY = Ar(ms, aI); | |
var I_ = { | |
kernelName: ms, | |
backendName: "cpu", | |
kernelFunc: nY | |
}; | |
function zs(r, t8, e, o, n, s, a, i, p, u) { | |
let c = [o / n, n], | |
l = r.values, | |
m = t8.values; | |
if (o === 0) return me(e, t8.dtype); | |
let d = p instanceof tt ? p : me(c, t8.dtype); | |
typeof p == "string" || typeof p == "number" ? d.values.fill(p) : typeof p == "boolean" && d.values.fill(+p); | |
for (let f = 0; f < s; f++) { | |
let h = [], | |
g = 0; | |
for (let x = 0; x < a; x++) { | |
let b = l[f * a + x]; | |
h.push(b), g += b * i[x]; | |
} | |
if (g < 0 || g >= o / n) throw new Error(`Invalid indices: ${h} does not index into ${e}`); | |
for (let x = 0; x < n; x++) u ? d.values[g * n + x] += m[f * n + x] : d.values[g * n + x] = t8.rank === 0 ? m[0] : m[f * n + x]; | |
} | |
return d; | |
} | |
var v_ = Xt(r => 1 / (1 + Math.exp(-r))); | |
var iI = Ie(Cs, r => 1 / (1 + Math.exp(-r))); | |
var k_ = { | |
kernelName: Cs, | |
backendName: "cpu", | |
kernelFunc: iI | |
}; | |
function hp(r, t8, e, o, n) { | |
let s = ct.isSliceContinous(o, t8, e), | |
a = y.sizeFromShape(e), | |
i = y.computeStrides(o); | |
if (s) { | |
let l = ct.computeFlatOffset(t8, i); | |
return n === "string" ? r.slice(l, l + a) : r.subarray(l, l + a); | |
} | |
let p = n === "string" ? w.fromUint8ToStringArray(r) : r, | |
u = me(o, n, p), | |
c = me(e, n); | |
for (let l = 0; l < c.size; ++l) { | |
let m = c.indexToLoc(l), | |
d = m.map((f, h) => f + t8[h]); | |
c.set(u.get(...d), ...m); | |
} | |
return n === "string" ? w.fromStringArrayToUint8(c.values) : c.values; | |
} | |
function Fo(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n | |
} = t8, | |
{ | |
begin: s, | |
size: a | |
} = o; | |
Q(n, "slice"); | |
let [i, p] = ct.parseSliceParams(n, s, a); | |
ct.assertParamsValid(n, i, p); | |
let u = e.data.get(n.dataId).values, | |
c = hp(u, i, p, n.shape, n.dtype); | |
return e.makeTensorInfo(p, n.dtype, c); | |
} | |
var N_ = { | |
kernelName: ha, | |
backendName: "cpu", | |
kernelFunc: Fo | |
}; | |
function Ff(r, t8, e, o, n, s, a) { | |
let i = t8[0], | |
p = s[0], | |
u = new Array(p), | |
c = new Array(i), | |
l = t8[1]; | |
if (p === 0) { | |
if (i !== 0) throw new Error(w.getSparseFillEmptyRowsIndicesDenseShapeMismatch(i)); | |
let g = y.getArrayFromDType(e, 0), | |
x = y.getArrayFromDType(n, 0); | |
return [g, [0, l], x, u, c]; | |
} | |
let m = true, | |
d = 0, | |
f = new Array(p).fill(0); | |
for (let g = 0; g < i; ++g) { | |
let x = r[g * l]; | |
if (x < 0) throw new Error(w.getSparseFillEmptyRowsNegativeIndexErrorMessage(g, x)); | |
if (x >= p) throw new Error(w.getSparseFillEmptyRowsOutOfRangeIndexErrorMessage(g, x, p)); | |
++f[x], m = m && x >= d, d = x; | |
} | |
let h = true; | |
for (let g = 0; g < p; ++g) { | |
let x = f[g] === 0; | |
u[g] = x, h = h && !x, f[g] = Math.max(f[g], 1), g > 0 && (f[g] += f[g - 1]); | |
} | |
if (h && m) { | |
let g = r, | |
x = o; | |
for (let b = 0; b < i; ++b) c[b] = b; | |
return [g, [i, l], x, u, c]; | |
} else { | |
let g = f[p - 1], | |
x = y.getArrayFromDType(e, g * l), | |
b = y.getArrayFromDType(n, g), | |
C = new Array(p).fill(0); | |
for (let S = 0; S < i; ++S) { | |
let k = r[S * l], | |
_ = C[k], | |
E = (k === 0 ? 0 : f[k - 1]) + _; | |
C[k]++; | |
for (let R = 0; R < l; ++R) x[E * l + R] = r[S * l + R]; | |
b[E] = o[S], c[S] = E; | |
} | |
for (let S = 0; S < p; ++S) if (C[S] === 0) { | |
let _ = S === 0 ? 0 : f[S - 1]; | |
x[_ * l + 0] = S; | |
for (let E = 1; E < l; ++E) x[_ * l + E] = 0; | |
b[_] = a; | |
} | |
return [x, [g, l], b, u, c]; | |
} | |
} | |
function Pf(r, t8, e, o, n) { | |
let s = y.sizeFromShape(o), | |
a = t8[0], | |
i = n.length, | |
p = [], | |
u = 1, | |
c = -1; | |
for (let g = 0; g < i; ++g) { | |
let x = n[g]; | |
if (x === -1) { | |
if (c !== -1) throw new Error(w.getSparseReshapeMultipleNegativeOneOutputDimErrorMessage(c, g)); | |
c = g, p.push(1); | |
} else { | |
if (x < 0) throw new Error(w.getSparseReshapeNegativeOutputDimErrorMessage(g, x)); | |
u *= x, p.push(x); | |
} | |
} | |
if (c !== -1) { | |
if (u <= 0) throw new Error(w.getSparseReshapeEmptyTensorZeroOutputDimErrorMessage()); | |
let g = Math.trunc(s / u); | |
if (u * g !== s) throw new Error(w.getSparseReshapeInputOutputMultipleErrorMessage(o, p)); | |
p[c] = g; | |
} | |
if (y.sizeFromShape(p) !== s) throw new Error(w.getSparseReshapeInputOutputMismatchErrorMessage(o, p)); | |
let m = o.length, | |
d = []; | |
if (m > 0) { | |
d[m - 1] = 1; | |
for (let g = m - 2; g >= 0; --g) d[g] = d[g + 1] * o[g + 1]; | |
} | |
let f = []; | |
if (i > 0) { | |
f[i - 1] = 1; | |
for (let g = i - 2; g >= 0; --g) f[g] = f[g + 1] * p[g + 1]; | |
} | |
let h = y.getArrayFromDType(e, a * i); | |
for (let g = 0; g < a; ++g) { | |
let x = 0; | |
for (let b = 0; b < m; ++b) x += r[g * m + b] * d[b]; | |
for (let b = 0; b < i; ++b) h[g * i + b] = Math.trunc(x / f[b]), x %= f[b]; | |
} | |
return [h, [a, i], p]; | |
} | |
function Nc(r, t8, e, o, n, s = false, a = 0) { | |
let i = o.length, | |
p = [t8[0], r.length / t8[0]], | |
u = p[1], | |
l = i > 0 ? n[i - 1] + 1 : 0; | |
if (l < 0) throw new Error(w.getSparseSegmentReductionNegativeSegmentIdsErrorMessage()); | |
let m = t8.slice(); | |
m[0] = l; | |
let d = m.reduce((C, S) => C * S, 1), | |
f = y.getArrayFromDType(e, d); | |
if (i === 0) return l > 0 && f.fill(a), [f, m]; | |
if (l <= 0) throw new Error(w.getSparseSegmentReductionNegativeSegmentIdsErrorMessage()); | |
let h = 0, | |
g = 1, | |
x = 0, | |
b = n[h]; | |
for (;;) { | |
let C = 0; | |
if (g < i) { | |
if (C = n[g], b === C) { | |
++g; | |
continue; | |
} | |
if (b >= C) throw new Error(w.getSparseSegmentReductionNonIncreasingSegmentIdsErrorMessage()); | |
} | |
if (b < 0 || b >= l) throw new Error(w.getSparseSegmentReductionSegmentIdOutOfRangeErrorMessage(b, l)); | |
b > x && f.fill(a, x * u, b * u); | |
for (let S = h; S < g; ++S) { | |
let k = o[S]; | |
if (k < 0 || k >= p[0]) throw new Error(w.getSparseSegmentReductionIndicesOutOfRangeErrorMessage(S, o[S], p[0])); | |
for (let _ = 0; _ < u; _++) f[b * u + _] += r[k * u + _]; | |
} | |
if (s) for (let S = 0; S < u; S++) f[b * u + S] /= g - h; | |
if (h = g, ++g, x = b + 1, b = C, g > i) break; | |
} | |
return x < l && f.fill(a, x * u, l * u), [f, m]; | |
} | |
var T_ = Xt(r => Math.sqrt(r)); | |
var sY = Ie(Ss, r => Math.sqrt(r)); | |
var __ = { | |
kernelName: Ss, | |
backendName: "cpu", | |
kernelFunc: sY | |
}; | |
var uI = ze((r, t8) => { | |
let e = r - t8; | |
return e * e; | |
}); | |
var aY = Ye(Ns, uI); | |
var $_ = { | |
kernelName: Ns, | |
backendName: "cpu", | |
kernelFunc: aY | |
}; | |
var pI = Xt((r, t8) => { | |
let { | |
pattern: e, | |
replaceGlobal: o, | |
rewrite: n | |
} = t8; | |
return r.replace(new RegExp(e, o ? "g" : ""), n); | |
}); | |
var iY = Ar(Ou, pI); | |
var E_ = { | |
kernelName: Ou, | |
backendName: "cpu", | |
kernelFunc: iY | |
}; | |
function Of(r, t8, e, o) { | |
let n = me(r, t8.dtype); | |
for (let s = 0; s < n.size; s++) { | |
let a = n.indexToLoc(s), | |
i = new Array(a.length); | |
for (let p = 0; p < i.length; p++) i[p] = a[p] * e[p] + o[p]; | |
n.set(t8.get(...i), ...a); | |
} | |
return n; | |
} | |
var cI = class { | |
constructor(t8, e, o, n, s, a) { | |
this.separator = y.encodeString(t8), this.nGramWidths = e, this.leftPad = y.encodeString(o), this.rightPad = y.encodeString(n), this.padWidth = s, this.preserveShort = a; | |
} | |
getPadWidth(t8) { | |
return Math.min(this.padWidth < 0 ? t8 - 1 : this.padWidth, t8 - 1); | |
} | |
getNumNGrams(t8, e) { | |
let o = this.getPadWidth(e); | |
return Math.max(0, t8 + 2 * o - e + 1); | |
} | |
createNGrams(t8, e, o, n, s, a) { | |
for (let i = 0; i < s; ++i) { | |
let p = this.getPadWidth(a), | |
u = Math.max(0, p - i), | |
c = Math.max(0, p - (s - (i + 1))), | |
l = a - (u + c), | |
m = e + (u > 0 ? 0 : i - p), | |
d = 0; | |
d += u * this.leftPad.length; | |
for (let b = 0; b < l; ++b) d += t8[m + b].length; | |
d += c * this.rightPad.length; | |
let f = u + c + l - 1; | |
d += f * this.separator.length, o[n + i] = new Uint8Array(d); | |
let h = o[n + i], | |
g = 0, | |
x = b => b.forEach(C => h[g++] = C); | |
for (let b = 0; b < u; ++b) x(this.leftPad), x(this.separator); | |
for (let b = 0; b < l - 1; ++b) x(t8[m + b]), x(this.separator); | |
if (l > 0) { | |
x(t8[m + l - 1]); | |
for (let b = 0; b < c; ++b) x(this.separator), x(this.rightPad); | |
} else { | |
for (let b = 0; b < c - 1; ++b) x(this.rightPad), x(this.separator); | |
x(this.rightPad); | |
} | |
} | |
} | |
compute(t8, e) { | |
let o = t8.length, | |
n = e.length; | |
if (n > 0) { | |
let p = e[0]; | |
if (p !== 0) throw new Error(`First split value must be 0, got ${p}`); | |
for (let u = 1; u < n; ++u) { | |
let c = e[u] >= p; | |
if (c = c && e[u] <= o, !c) throw new Error(`Invalid split value ${e[u]}, must be in [${p}, ${o}]`); | |
p = e[u]; | |
} | |
if (p !== o) throw new Error(`Last split value must be data size. Expected ${o}, got ${p}`); | |
} | |
let s = n - 1, | |
a = y.getArrayFromDType("int32", n); | |
if (o === 0 || n === 0) { | |
let p = new Array(o); | |
for (let u = 0; u <= s; ++u) a[u] = 0; | |
return [p, a]; | |
} | |
a[0] = 0; | |
for (let p = 1; p <= s; ++p) { | |
let u = e[p] - e[p - 1], | |
c = 0; | |
this.nGramWidths.forEach(l => { | |
c += this.getNumNGrams(u, l); | |
}), this.preserveShort && u > 0 && c === 0 && (c = 1), a[p] = a[p - 1] + c; | |
} | |
let i = new Array(a[s]); | |
for (let p = 0; p < s; ++p) { | |
let u = e[p], | |
c = a[p]; | |
if (this.nGramWidths.forEach(l => { | |
let m = e[p + 1] - e[p], | |
d = this.getNumNGrams(m, l); | |
this.createNGrams(t8, u, i, c, d, l), c += d; | |
}), this.preserveShort && c === a[p]) { | |
let l = e[p + 1] - e[p]; | |
if (l === 0) continue; | |
let m = l + 2 * this.padWidth, | |
d = 1; | |
this.createNGrams(t8, u, i, c, d, m); | |
} | |
} | |
return [i, a]; | |
} | |
}; | |
function gp(r, t8, e, o, n, s, a, i) { | |
return new cI(e, o, n, s, a, i).compute(r, t8); | |
} | |
function uY(r, t8, e, o) { | |
if (!r.length) return; | |
if (t8.length === 0) { | |
for (let s = 0; s < r.length; ++s) o.push(r.subarray(s, s + 1)); | |
return; | |
} | |
if (t8.length === 1) { | |
let s = t8[0], | |
a = r.indexOf(s); | |
for (; a !== -1;) { | |
let i = r.subarray(0, a); | |
(!e || i.length !== 0) && o.push(i), r = r.subarray(a + 1), a = r.indexOf(s); | |
} | |
(!e || r.length !== 0) && o.push(r); | |
return; | |
} | |
let n = 0; | |
for (let s = 0; s < r.length + 1; s++) if (s === r.length || t8.indexOf(r[s]) !== -1) { | |
let a = r.subarray(n, s); | |
(!e || a.length !== 0) && o.push(a), n = s + 1; | |
} | |
} | |
function xp(r, t8, e) { | |
let o = r.length, | |
n = [], | |
s = 0, | |
a = 0, | |
i = new Array(o); | |
for (let m = 0; m < o; ++m) { | |
let d = n.length; | |
uY(r[m], t8, e, n); | |
let f = n.length - d; | |
i[m] = f, s += f, a = Math.max(a, f); | |
} | |
let p = y.getArrayFromDType("int32", s * 2), | |
u = new Array(s), | |
c = [o, a], | |
l = 0; | |
for (let m = 0; m < o; ++m) for (let d = 0; d < i[m]; ++d) p[l * 2] = m, p[l * 2 + 1] = d, u[l] = n[l], ++l; | |
return [p, u, c]; | |
} | |
function yp(r, t8) { | |
let e = y.getArrayFromDType("int32", r.length); | |
for (let o = 0; o < r.length; ++o) e[o] = y.fingerPrint64(r[o]).modulo(t8).getLowBitsUnsigned(); | |
return e; | |
} | |
var lI = ze((r, t8) => r - t8); | |
var pY = Sc((r, t8, e, o) => ({ | |
real: r - e, | |
imag: t8 - o | |
})); | |
var Vl = Ye(_s, lI, pY); | |
var R_ = { | |
kernelName: _s, | |
backendName: "cpu", | |
kernelFunc: Vl | |
}; | |
function Mf(r, t8) { | |
let e = new Array(r.rank); | |
for (let n = 0; n < e.length; n++) e[n] = r.shape[n] * t8[n]; | |
let o = me(e, r.dtype); | |
for (let n = 0; n < o.values.length; ++n) { | |
let s = o.indexToLoc(n), | |
a = new Array(r.rank); | |
for (let p = 0; p < a.length; p++) a[p] = s[p] % r.shape[p]; | |
let i = r.locToIndex(a); | |
o.values[n] = r.values[i]; | |
} | |
return o; | |
} | |
var Wl = (r, t8) => { | |
let e = t8.value - r.value; | |
return e === 0 ? r.index - t8.index : e; | |
}; | |
function D_(r, t8, e = 0, o = r.length - 1) { | |
for (; o > e;) { | |
if (o - e > 600) { | |
let i = o - e + 1, | |
p = t8 - e + 1, | |
u = Math.log(i), | |
c = 0.5 * Math.exp(2 * u / 3), | |
l = 0.5 * Math.sqrt(u * c * (i - c) / i) * Math.sign(p - i / 2), | |
m = Math.max(e, Math.floor(t8 - p * c / i + l)), | |
d = Math.min(o, Math.floor(t8 + (i - p) * c / i + l)); | |
D_(r, t8, m, d); | |
} | |
let n = r[t8], | |
s = e, | |
a = o; | |
for (y.swap(r, e, t8), Wl(r[o], n) > 0 && y.swap(r, e, o); s < a;) { | |
for (y.swap(r, s, a), s++, a--; Wl(r[s], n) < 0;) s = s + 1; | |
for (; Wl(r[a], n) > 0;) a = a - 1; | |
} | |
Wl(r[e], n) === 0 ? y.swap(r, e, a) : (a = a + 1, y.swap(r, a, o)), a <= t8 && (e = a + 1), t8 <= a && (o = a - 1); | |
} | |
} | |
function Lf(r, t8, e, o, n) { | |
let s = t8[t8.length - 1], | |
[a, i] = [r.length / s, s], | |
p = y.getTypedArrayFromDType(e, a * o), | |
u = y.getTypedArrayFromDType("int32", a * o); | |
for (let l = 0; l < a; l++) { | |
let m = l * i, | |
d = r.subarray(m, m + i), | |
f = new Array(d.length); | |
d.forEach((b, C) => f[C] = { | |
value: b, | |
index: C | |
}), o < f.length && (D_(f, o), f = f.slice(0, o)), n && f.sort(Wl); | |
let h = l * o, | |
g = p.subarray(h, h + o), | |
x = u.subarray(h, h + o); | |
for (let b = 0; b < o; b++) g[b] = f[b].value, x[b] = f[b].index; | |
} | |
let c = t8.slice(); | |
return c[c.length - 1] = o, [me(c, e, p), me(c, "int32", u)]; | |
} | |
function bp(r, t8, e, o) { | |
let n = y.parseAxisParam(t8, e)[0], | |
s = [1, e[0], 1]; | |
for (let f = 0; f < n; f++) s[0] *= e[f]; | |
s[1] = e[n]; | |
for (let f = n + 1; f < e.length; f++) s[2] *= e[f]; | |
let a = /* @__PURE__ */new Map(), | |
i = new Int32Array(e[n]), | |
p = new tt(s, o, r), | |
u = [], | |
c = s[0] === 1 && s[2] === 1; | |
for (let f = 0; f < e[n]; f++) { | |
let h; | |
if (c) h = r[f].toString();else { | |
let x = []; | |
for (let b = 0; b < s[0]; b++) for (let C = 0; C < s[2]; C++) x.push(p.get(b, f, C)); | |
h = x.join(","); | |
} | |
let g = a.get(h); | |
if (g != null) i[f] = g;else { | |
let x = a.size; | |
a.set(h, x), i[f] = x, u.push(f); | |
} | |
} | |
let l = s.slice(); | |
l[1] = a.size; | |
let m = new tt(l, o); | |
u.forEach((f, h) => { | |
for (let g = 0; g < s[0]; g++) for (let x = 0; x < s[2]; x++) m.set(p.get(g, f, x), g, h, x); | |
}); | |
let d = e.slice(); | |
return d[n] = l[1], { | |
outputValues: m.values, | |
outputShape: d, | |
indices: i | |
}; | |
} | |
var cY = "4.11.0"; | |
su("cpu", () => new xu(), 1); | |
var mI = Ie(gn, r => r >= 0 ? r : Math.exp(r) - 1); | |
var A_ = { | |
kernelName: gn, | |
backendName: "cpu", | |
kernelFunc: mI | |
}; | |
function dI(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n | |
} = t8, | |
{ | |
alpha: s | |
} = o; | |
Q([n], "leakyRelu"); | |
let a = y.sizeFromShape(n.shape), | |
i = e.data.get(n.dataId).values, | |
p = y.getTypedArrayFromDType("float32", a); | |
for (let u = 0; u < i.length; u++) p[u] = i[u] < 0 ? s * i[u] : i[u]; | |
return e.makeTensorInfo(n.shape, "float32", p); | |
} | |
var F_ = { | |
kernelName: Rn, | |
backendName: "cpu", | |
kernelFunc: dI | |
}; | |
var lY = ze((r, t8) => r < 0 ? t8 * r : r); | |
function fI(r) { | |
let { | |
inputs: t8, | |
backend: e | |
} = r, | |
{ | |
x: o, | |
alpha: n | |
} = t8; | |
Q([o, n], "prelu"); | |
let s = e.data.get(o.dataId).values, | |
a = e.data.get(n.dataId).values, | |
[i, p] = lY(o.shape, n.shape, s, a, "float32"); | |
return e.makeTensorInfo(p, "float32", i); | |
} | |
var P_ = { | |
kernelName: os, | |
backendName: "cpu", | |
kernelFunc: fI | |
}; | |
var hI = Ie(as, r => Math.max(0, r)); | |
var O_ = { | |
kernelName: as, | |
backendName: "cpu", | |
kernelFunc: hI | |
}; | |
var gI = Ie(ps, r => Math.min(Math.max(0, r), 6)); | |
var M_ = { | |
kernelName: ps, | |
backendName: "cpu", | |
kernelFunc: gI | |
}; | |
function Cp(r, t8, e, o, n) { | |
if (e === "linear") return mr({ | |
inputs: { | |
x: t8 | |
}, | |
backend: r | |
}); | |
if (e === "relu") return hI({ | |
inputs: { | |
x: t8 | |
}, | |
backend: r | |
}); | |
if (e === "elu") return mI({ | |
inputs: { | |
x: t8 | |
}, | |
backend: r | |
}); | |
if (e === "relu6") return gI({ | |
inputs: { | |
x: t8 | |
}, | |
backend: r | |
}); | |
if (e === "prelu") return fI({ | |
inputs: { | |
x: t8, | |
alpha: o | |
}, | |
backend: r | |
}); | |
if (e === "leakyrelu") return dI({ | |
inputs: { | |
x: t8 | |
}, | |
backend: r, | |
attrs: { | |
alpha: n | |
} | |
}); | |
if (e === "sigmoid") return iI({ | |
inputs: { | |
x: t8 | |
}, | |
backend: r | |
}); | |
throw new Error(`Activation ${e} has not been implemented for the CPU backend.`); | |
} | |
function Ve(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n | |
} = t8, | |
{ | |
shape: s | |
} = o, | |
a = y.sizeFromShape(n.shape), | |
i = y.inferFromImplicitShape(s, a), | |
p = y.sizeFromShape(i); | |
y.assert(a === p, () => `The new shape (${i}) has ${p} elements and the old shape (${n.shape}) has ${a} elements. The new shape and old shape must have the same number of elements.`), e.incRef(n.dataId); | |
let u = e.data.get(n.dataId); | |
if (u.complexTensorInfos != null) { | |
let c = u.complexTensorInfos.real, | |
l = u.complexTensorInfos.imag; | |
c.shape = i, l.shape = i; | |
} | |
return { | |
dataId: n.dataId, | |
shape: i, | |
dtype: n.dtype | |
}; | |
} | |
var L_ = { | |
kernelName: da, | |
backendName: "cpu", | |
kernelFunc: Ve | |
}; | |
function xI(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
a: n, | |
b: s | |
} = t8, | |
{ | |
transposeA: a, | |
transposeB: i | |
} = o; | |
Q([n, s], "matMul"); | |
let p = n.shape.length, | |
u = s.shape.length, | |
c = a ? n.shape[p - 2] : n.shape[p - 1], | |
l = i ? s.shape[u - 1] : s.shape[u - 2], | |
m = a ? n.shape[p - 1] : n.shape[p - 2], | |
d = i ? s.shape[u - 2] : s.shape[u - 1], | |
f = n.shape.slice(0, -2), | |
h = s.shape.slice(0, -2), | |
g = y.sizeFromShape(f), | |
x = y.sizeFromShape(h), | |
C = Ir.assertAndGetBroadcastShape(n.shape.slice(0, -2), s.shape.slice(0, -2)).concat([m, d]); | |
y.assert(c === l, () => `Error in matMul: inner shapes (${c}) and (${l}) of Tensors with shapes ${n.shape} and ${s.shape} and transposeA=${a} and transposeB=${i} must match.`); | |
let S = a ? [g, c, m] : [g, m, c], | |
k = i ? [x, d, l] : [x, l, d], | |
_ = Ve({ | |
inputs: { | |
x: n | |
}, | |
backend: e, | |
attrs: { | |
shape: S | |
} | |
}), | |
E = Ve({ | |
inputs: { | |
x: s | |
}, | |
backend: e, | |
attrs: { | |
shape: k | |
} | |
}), | |
R = a ? _.shape[1] : _.shape[2], | |
D = a ? _.shape[2] : _.shape[1], | |
P = i ? E.shape[1] : E.shape[2], | |
O = Math.max(g, x), | |
M = e.data.get(_.dataId).values, | |
L = e.data.get(E.dataId).values, | |
B = y.computeStrides(_.shape), | |
z = y.computeStrides(E.shape), | |
[U, j, q] = a ? [B[0], 1, B[1]] : [B[0], B[1], 1], | |
[Y, J, re] = i ? [1, z[1], z[0]] : [z[1], 1, z[0]], | |
ne = D * P, | |
ee = me([O, D, P], _.dtype), | |
oe = ee.values, | |
ie = e.blockSize; | |
for (let le = 0; le < O; le++) { | |
let be = le % g, | |
_e = le % x; | |
for (let ve = 0; ve < D; ve += ie) { | |
let Fe = Math.min(ve + ie, D); | |
for (let Pe = 0; Pe < P; Pe += ie) { | |
let st = Math.min(Pe + ie, P); | |
for (let lt = 0; lt < R; lt += ie) { | |
let Ge = Math.min(lt + ie, R); | |
for (let mt = ve; mt < Fe; mt++) for (let it = Pe; it < st; it++) { | |
let gt = 0; | |
for (let xt = lt; xt < Ge; xt++) { | |
let Lr = M[be * U + mt * j + xt * q], | |
Lt = L[xt * Y + it * J + _e * re]; | |
gt += Lr * Lt; | |
} | |
oe[le * ne + (mt * P + it)] += gt; | |
} | |
} | |
} | |
} | |
} | |
return e.disposeIntermediateTensorInfo(_), e.disposeIntermediateTensorInfo(E), e.makeTensorInfo(C, ee.dtype, ee.values); | |
} | |
var B_ = { | |
kernelName: Jo, | |
backendName: "cpu", | |
kernelFunc: xI | |
}; | |
function mY(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
a: n, | |
b: s, | |
bias: a, | |
preluActivationWeights: i | |
} = t8, | |
{ | |
transposeA: p, | |
transposeB: u, | |
activation: c, | |
leakyreluAlpha: l | |
} = o, | |
m, | |
d, | |
f, | |
h = []; | |
m = xI({ | |
inputs: { | |
a: n, | |
b: s | |
}, | |
attrs: { | |
transposeA: p, | |
transposeB: u | |
}, | |
backend: e | |
}), a && (d = Oa({ | |
inputs: { | |
a: m, | |
b: a | |
}, | |
backend: e | |
}), h.push(m), m = d), c && (f = Cp(e, m, c, i, l), h.push(m), m = f); | |
for (let x of h) e.disposeIntermediateTensorInfo(x); | |
return m; | |
} | |
var z_ = { | |
kernelName: Io, | |
backendName: "cpu", | |
kernelFunc: mY | |
}; | |
var dY = Ie(Wo, r => Math.acos(r)); | |
var V_ = { | |
kernelName: Wo, | |
backendName: "cpu", | |
kernelFunc: dY | |
}; | |
var fY = Ie(Uo, r => Math.acosh(r)); | |
var W_ = { | |
kernelName: Uo, | |
backendName: "cpu", | |
kernelFunc: fY | |
}; | |
function hY(r) { | |
let { | |
inputs: t8, | |
backend: e | |
} = r, | |
o = t8; | |
Q(t8, "addN"); | |
let n = o.map(i => e.data.get(i.dataId).values), | |
s = me(o[0].shape, o[0].dtype), | |
a = s.values; | |
for (let i = 0; i < o.length; i++) { | |
let p = n[i]; | |
for (let u = 0; u < a.length; u++) a[u] += p[u]; | |
} | |
return e.makeTensorInfo(s.shape, s.dtype, s.values); | |
} | |
var U_ = { | |
kernelName: Go, | |
backendName: "cpu", | |
kernelFunc: hY | |
}; | |
function gY(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n | |
} = t8, | |
{ | |
axis: s, | |
keepDims: a | |
} = o; | |
Q(n, "all"); | |
let i = y.parseAxisParam(s, n.shape), | |
p = i, | |
u = w.getAxesPermutation(p, n.shape.length), | |
c = n; | |
u != null && (c = It({ | |
inputs: { | |
x: n | |
}, | |
backend: e, | |
attrs: { | |
perm: u | |
} | |
}), p = w.getInnerMostAxes(p.length, n.shape.length)), w.assertAxesAreInnerMostDims("all", p, c.shape.length); | |
let [l, m] = w.computeOutAndReduceShapes(c.shape, p), | |
d = y.sizeFromShape(m), | |
f = y.makeZerosTypedArray(y.sizeFromShape(l), c.dtype), | |
h = e.data.get(c.dataId).values; | |
for (let x = 0; x < f.length; ++x) { | |
let b = x * d, | |
C = h[b]; | |
for (let S = 0; S < d; ++S) { | |
let k = h[b + S]; | |
C = C && k; | |
} | |
f[x] = C; | |
} | |
u != null && e.disposeIntermediateTensorInfo(c); | |
let g = e.makeTensorInfo(l, c.dtype, f); | |
if (a) { | |
let x = w.expandShapeToKeepDim(l, i), | |
b = Ve({ | |
inputs: { | |
x: g | |
}, | |
backend: e, | |
attrs: { | |
shape: x | |
} | |
}); | |
return e.disposeIntermediateTensorInfo(g), b; | |
} | |
return g; | |
} | |
var G_ = { | |
kernelName: Ho, | |
backendName: "cpu", | |
kernelFunc: gY | |
}; | |
function xY(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n | |
} = t8, | |
{ | |
axis: s, | |
keepDims: a | |
} = o; | |
Q(n, "any"); | |
let i = y.parseAxisParam(s, n.shape), | |
p = i, | |
u = w.getAxesPermutation(p, n.shape.length), | |
c = n; | |
u != null && (c = It({ | |
inputs: { | |
x: n | |
}, | |
backend: e, | |
attrs: { | |
perm: u | |
} | |
}), p = w.getInnerMostAxes(p.length, n.shape.length)), w.assertAxesAreInnerMostDims("any", p, c.shape.length); | |
let [l, m] = w.computeOutAndReduceShapes(c.shape, p), | |
d = y.sizeFromShape(m), | |
f = y.makeZerosTypedArray(y.sizeFromShape(l), c.dtype), | |
h = e.data.get(c.dataId).values; | |
for (let x = 0; x < f.length; ++x) { | |
let b = x * d, | |
C = h[b]; | |
for (let S = 0; S < d; ++S) { | |
let k = h[b + S]; | |
C = C || k; | |
} | |
f[x] = C; | |
} | |
u != null && e.disposeIntermediateTensorInfo(c); | |
let g = e.makeTensorInfo(l, c.dtype, f); | |
if (a) { | |
let x = w.expandShapeToKeepDim(l, i), | |
b = Ve({ | |
inputs: { | |
x: g | |
}, | |
backend: e, | |
attrs: { | |
shape: x | |
} | |
}); | |
return e.disposeIntermediateTensorInfo(g), b; | |
} | |
return g; | |
} | |
var H_ = { | |
kernelName: Ko, | |
backendName: "cpu", | |
kernelFunc: xY | |
}; | |
function yY(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n | |
} = t8, | |
{ | |
axis: s | |
} = o; | |
Q(n, "argMax"); | |
let a = y.parseAxisParam(s, n.shape), | |
i = w.getAxesPermutation(a, n.shape.length), | |
p = n, | |
u = []; | |
i != null && (p = It({ | |
inputs: { | |
x: n | |
}, | |
backend: e, | |
attrs: { | |
perm: i | |
} | |
}), u.push(p), a = w.getInnerMostAxes(a.length, p.shape.length)), a = [a[0]], w.assertAxesAreInnerMostDims("argMax", a, p.shape.length); | |
let [c, l] = w.computeOutAndReduceShapes(p.shape, a), | |
m = y.sizeFromShape(c), | |
d = y.makeZerosTypedArray(m, "int32"), | |
f = y.sizeFromShape(l), | |
h = e.data.get(p.dataId).values; | |
for (let g = 0; g < d.length; ++g) { | |
let x = g * f, | |
b = h[x], | |
C = 0; | |
for (let S = 0; S < f; ++S) { | |
let k = h[x + S]; | |
k > b && (b = k, C = S); | |
} | |
d[g] = C; | |
} | |
return u.forEach(g => e.disposeIntermediateTensorInfo(g)), e.makeTensorInfo(c, "int32", d); | |
} | |
var K_ = { | |
kernelName: Ys, | |
backendName: "cpu", | |
kernelFunc: yY | |
}; | |
function bY(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n | |
} = t8, | |
{ | |
axis: s | |
} = o; | |
Q(n, "argMin"); | |
let a = y.parseAxisParam(s, n.shape), | |
i = w.getAxesPermutation(a, n.shape.length), | |
p = n, | |
u = []; | |
i != null && (p = It({ | |
inputs: { | |
x: n | |
}, | |
backend: e, | |
attrs: { | |
perm: i | |
} | |
}), u.push(p), a = w.getInnerMostAxes(a.length, p.shape.length)), a = [a[0]], w.assertAxesAreInnerMostDims("argMin", a, p.shape.length); | |
let [c, l] = w.computeOutAndReduceShapes(p.shape, a), | |
m = y.sizeFromShape(c), | |
d = y.makeZerosTypedArray(m, "int32"), | |
f = y.sizeFromShape(l), | |
h = e.data.get(p.dataId).values; | |
for (let g = 0; g < d.length; ++g) { | |
let x = g * f, | |
b = h[x], | |
C = 0; | |
for (let S = 0; S < f; ++S) { | |
let k = h[x + S]; | |
k < b && (b = k, C = S); | |
} | |
d[g] = C; | |
} | |
return u.forEach(g => e.disposeIntermediateTensorInfo(g)), e.makeTensorInfo(c, "int32", d); | |
} | |
var q_ = { | |
kernelName: Qs, | |
backendName: "cpu", | |
kernelFunc: bY | |
}; | |
var CY = Ie(qo, r => Math.asin(r)); | |
var j_ = { | |
kernelName: qo, | |
backendName: "cpu", | |
kernelFunc: CY | |
}; | |
var wY = Ie(jo, r => Math.asinh(r)); | |
var X_ = { | |
kernelName: jo, | |
backendName: "cpu", | |
kernelFunc: wY | |
}; | |
var SY = Ie(Xo, r => Math.atan(r)); | |
var Y_ = { | |
kernelName: Xo, | |
backendName: "cpu", | |
kernelFunc: SY | |
}; | |
var IY = ze((r, t8) => Math.atan2(r, t8)); | |
var vY = Ye(Qo, IY); | |
var Q_ = { | |
kernelName: Qo, | |
backendName: "cpu", | |
kernelFunc: vY | |
}; | |
var kY = Ie(Yo, r => Math.atanh(r)); | |
var Z_ = { | |
kernelName: Yo, | |
backendName: "cpu", | |
kernelFunc: kY | |
}; | |
function _c(r, t8, e, o, n, s) { | |
let a = n.strideHeight, | |
i = n.strideWidth, | |
p = n.dilationHeight, | |
u = n.dilationWidth, | |
c = n.effectiveFilterHeight, | |
l = n.effectiveFilterWidth, | |
m = n.padInfo.top, | |
d = n.padInfo.left, | |
f = s === "max" ? Number.NEGATIVE_INFINITY : Number.POSITIVE_INFINITY, | |
h = me(n.outShape, e), | |
g = h.values, | |
x = n.outShape[1] * n.outShape[2] * n.outShape[3], | |
b = n.outShape[2] * n.outShape[3], | |
C = n.outShape[3]; | |
for (let S = 0; S < n.batchSize; ++S) { | |
let k = S * x, | |
_ = S * o[0]; | |
for (let E = 0; E < n.inChannels; ++E) for (let R = 0; R < n.outHeight; ++R) { | |
let D = R * a - m, | |
P = Math.max(0, D), | |
O = Math.min(n.inHeight, c + D), | |
M = k + R * b; | |
for (let L = 0; L < n.outWidth; ++L) { | |
let B = L * i - d, | |
z = Math.max(0, B), | |
U = Math.min(n.inWidth, l + B), | |
j = f, | |
q = 0, | |
Y = 0; | |
for (let re = P; re < O; re += p) { | |
let ne = _ + re * o[1]; | |
for (let ee = z; ee < U; ee += u) { | |
let oe = ne + ee * o[2], | |
ie = r[oe + E]; | |
s === "max" && ie > j ? j = ie : s === "avg" && (q += ie, Y++); | |
} | |
if (isNaN(j)) break; | |
} | |
let J = M + L * C + E; | |
g[J] = s === "avg" ? q / Y : j; | |
} | |
} | |
} | |
return h; | |
} | |
function Bf(r, t8, e, o, n = false, s = false) { | |
let a = me(o.outShape, "int32"), | |
i = o.strideHeight, | |
p = o.strideWidth, | |
u = o.dilationHeight, | |
c = o.dilationWidth, | |
l = o.effectiveFilterHeight, | |
m = o.effectiveFilterWidth, | |
d = o.padInfo.top, | |
f = o.padInfo.left, | |
h = me(t8, e, r); | |
for (let g = 0; g < o.batchSize; ++g) for (let x = 0; x < o.inChannels; ++x) for (let b = 0; b < o.outHeight; ++b) { | |
let C = b * i - d, | |
S = C; | |
for (; S < 0;) S += u; | |
let k = Math.min(o.inHeight, l + C); | |
for (let _ = 0; _ < o.outWidth; ++_) { | |
let E = _ * p - f, | |
R = E; | |
for (; R < 0;) R += c; | |
let D = Math.min(o.inWidth, m + E), | |
P = Number.NEGATIVE_INFINITY, | |
O = -1; | |
for (let M = S; M < k; M += u) { | |
let L = M - C; | |
for (let B = R; B < D; B += c) { | |
let z = B - E, | |
U = h.get(g, M, B, x); | |
U > P && (P = U, n ? O = s ? ((g * o.inHeight + M) * o.inWidth + B) * o.inChannels + x : (M * o.inWidth + B) * o.inChannels + x : O = L * m + z); | |
} | |
} | |
a.set(O, g, b, _, x); | |
} | |
} | |
return a; | |
} | |
function zf(r, t8, e, o, n, s) { | |
let a = n.strideDepth, | |
i = n.strideHeight, | |
p = n.strideWidth, | |
u = n.dilationDepth, | |
c = n.dilationHeight, | |
l = n.dilationWidth, | |
m = n.effectiveFilterDepth, | |
d = n.effectiveFilterHeight, | |
f = n.effectiveFilterWidth, | |
h = n.padInfo.front, | |
g = n.padInfo.top, | |
x = n.padInfo.left, | |
b = s === "max" ? Number.NEGATIVE_INFINITY : Number.POSITIVE_INFINITY, | |
C = me(n.outShape, e), | |
S = C.values, | |
k = n.outShape[1] * n.outShape[2] * n.outShape[3] * n.outShape[4], | |
_ = n.outShape[2] * n.outShape[3] * n.outShape[4], | |
E = n.outShape[3] * n.outShape[4], | |
R = n.outShape[4]; | |
for (let D = 0; D < n.batchSize; ++D) { | |
let P = D * k, | |
O = D * o[0]; | |
for (let M = 0; M < n.inChannels; ++M) for (let L = 0; L < n.outDepth; ++L) { | |
let B = L * a - h, | |
z = B; | |
for (; z < 0;) z += u; | |
let U = Math.min(n.inDepth, m + B), | |
j = P + L * _; | |
for (let q = 0; q < n.outHeight; ++q) { | |
let Y = q * i - g, | |
J = Y; | |
for (; J < 0;) J += c; | |
let re = Math.min(n.inHeight, d + Y), | |
ne = j + q * E; | |
for (let ee = 0; ee < n.outWidth; ++ee) { | |
let oe = ee * p - x, | |
ie = oe; | |
for (; ie < 0;) ie += l; | |
let le = Math.min(n.inWidth, f + oe), | |
be = ne + ee * R, | |
_e = b, | |
ve = 0, | |
Fe = 0; | |
for (let st = z; st < U; st += u) { | |
let lt = O + st * o[1]; | |
for (let Ge = J; Ge < re; Ge += c) { | |
let mt = lt + Ge * o[2]; | |
for (let it = ie; it < le; it += l) { | |
let gt = mt + it * o[3], | |
xt = r[gt + M]; | |
if (s === "max" && xt > _e ? _e = xt : s === "avg" && (ve += xt, Fe++), isNaN(_e)) break; | |
} | |
if (isNaN(_e)) break; | |
} | |
if (isNaN(_e)) break; | |
} | |
let Pe = be + M; | |
S[Pe] = s === "avg" ? ve / Math.max(Fe, 1) : _e; | |
} | |
} | |
} | |
} | |
return C; | |
} | |
function J_(r, t8) { | |
let e = me(t8.outShape, "int32"), | |
o = t8.strideDepth, | |
n = t8.strideHeight, | |
s = t8.strideWidth, | |
a = t8.dilationDepth, | |
i = t8.dilationHeight, | |
p = t8.dilationWidth, | |
u = t8.effectiveFilterDepth, | |
c = t8.effectiveFilterHeight, | |
l = t8.effectiveFilterWidth, | |
m = t8.padInfo.front, | |
d = t8.padInfo.top, | |
f = t8.padInfo.left; | |
for (let h = 0; h < t8.batchSize; ++h) for (let g = 0; g < t8.inChannels; ++g) for (let x = 0; x < t8.outDepth; ++x) { | |
let b = x * o - m, | |
C = b; | |
for (; C < 0;) C += a; | |
let S = Math.min(t8.inDepth, u + b); | |
for (let k = 0; k < t8.outHeight; ++k) { | |
let _ = k * n - d, | |
E = _; | |
for (; E < 0;) E += i; | |
let R = Math.min(t8.inHeight, c + _); | |
for (let D = 0; D < t8.outWidth; ++D) { | |
let P = D * s - f, | |
O = P; | |
for (; O < 0;) O += p; | |
let M = Math.min(t8.inWidth, l + P), | |
L = Number.NEGATIVE_INFINITY, | |
B = -1; | |
for (let z = C; z < S; z += a) { | |
let U = z - b; | |
for (let j = E; j < R; j += i) { | |
let q = j - _; | |
for (let Y = O; Y < M; Y += p) { | |
let J = Y - P, | |
re = r.get(h, z, j, Y, g); | |
re >= L && (L = re, B = U * c * l + q * c + J); | |
} | |
} | |
} | |
e.set(B, h, x, k, D, g); | |
} | |
} | |
} | |
return e; | |
} | |
function NY(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n | |
} = t8; | |
Q(n, "avgPool"); | |
let { | |
filterSize: s, | |
strides: a, | |
pad: i, | |
dimRoundingMode: p | |
} = o, | |
u = 1; | |
y.assert(w.eitherStridesOrDilationsAreOne(a, u), () => `Error in avgPool: Either strides or dilations must be 1. Got strides ${a} and dilations '${u}'`); | |
let c = w.computePool2DInfo(n.shape, s, a, u, i, p), | |
l; | |
if (c.filterWidth === 1 && c.filterHeight === 1 && y.arraysEqual(c.inShape, c.outShape)) l = mr({ | |
inputs: { | |
x: n | |
}, | |
backend: e | |
});else { | |
let m = e.data.get(n.dataId).values, | |
d = y.computeStrides(n.shape), | |
f = _c(m, n.shape, n.dtype, d, c, "avg"); | |
l = e.makeTensorInfo(c.outShape, n.dtype, f.values); | |
} | |
return l; | |
} | |
var e$ = { | |
kernelName: Zo, | |
backendName: "cpu", | |
kernelFunc: NY | |
}; | |
function TY(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n | |
} = t8, | |
{ | |
filterSize: s, | |
strides: a, | |
pad: i, | |
dimRoundingMode: p, | |
dataFormat: u | |
} = o; | |
Q(n, "avgPool3d"); | |
let c = w.computePool3DInfo(n.shape, s, a, 1, i, p, u), | |
l = e.data.get(n.dataId).values, | |
m = zf(l, n.shape, n.dtype, y.computeStrides(n.shape), c, "avg"); | |
return e.makeTensorInfo(m.shape, "float32", m.values); | |
} | |
var t$ = { | |
kernelName: Zs, | |
backendName: "cpu", | |
kernelFunc: TY | |
}; | |
function _Y(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
dy: n, | |
input: s | |
} = t8, | |
{ | |
filterSize: a, | |
strides: i, | |
pad: p, | |
dimRoundingMode: u | |
} = o; | |
Q([n, s], "avgPool3DGrad"); | |
let c = w.computePool3DInfo(s.shape, a, i, 1, p, u), | |
l = c.strideDepth, | |
m = c.strideHeight, | |
d = c.strideWidth, | |
f = c.filterDepth, | |
h = c.filterHeight, | |
g = c.filterWidth, | |
x = c.dilationDepth, | |
b = c.dilationHeight, | |
C = c.dilationWidth, | |
S = c.effectiveFilterDepth, | |
k = c.effectiveFilterHeight, | |
_ = c.effectiveFilterWidth, | |
E = S - 1 - c.padInfo.front, | |
R = _ - 1 - c.padInfo.left, | |
D = k - 1 - c.padInfo.top, | |
P = me(s.shape, "float32"), | |
O = 1 / (f * h * g), | |
M = e.bufferSync(n); | |
for (let L = 0; L < c.batchSize; ++L) for (let B = 0; B < c.inChannels; ++B) for (let z = 0; z < c.inDepth; ++z) for (let U = 0; U < c.inHeight; ++U) for (let j = 0; j < c.inWidth; ++j) { | |
let q = z - E, | |
Y = U - D, | |
J = j - R, | |
re = 0; | |
for (let ne = 0; ne < S; ne += x) { | |
let ee = (q + ne) / l; | |
if (!(ee < 0 || ee >= c.outDepth || Math.floor(ee) !== ee)) for (let oe = 0; oe < k; oe += b) { | |
let ie = (Y + oe) / m; | |
if (!(ie < 0 || ie >= c.outHeight || Math.floor(ie) !== ie)) for (let le = 0; le < _; le += C) { | |
let be = (J + le) / d; | |
if (be < 0 || be >= c.outWidth || Math.floor(be) !== be) continue; | |
let _e = M.get(L, ee, ie, be, B); | |
re += _e; | |
} | |
} | |
} | |
P.set(re * O, L, z, U, j, B); | |
} | |
return e.makeTensorInfo(P.shape, P.dtype, P.values); | |
} | |
var r$ = { | |
kernelName: Ai, | |
backendName: "cpu", | |
kernelFunc: _Y | |
}; | |
function $Y(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
dy: n, | |
input: s | |
} = t8, | |
a = s; | |
Q([n, s], "avgPoolGrad"); | |
let { | |
filterSize: i, | |
strides: p, | |
pad: u | |
} = o, | |
c = w.computePool2DInfo(a.shape, i, p, 1, u), | |
l = c.strideHeight, | |
m = c.strideWidth, | |
d = c.filterHeight, | |
f = c.filterWidth, | |
h = c.dilationHeight, | |
g = c.dilationWidth, | |
x = c.effectiveFilterHeight, | |
b = c.effectiveFilterWidth, | |
C = b - 1 - c.padInfo.left, | |
S = x - 1 - c.padInfo.top, | |
k = me(a.shape, "float32"), | |
_ = 1 / (d * f), | |
E = e.data.get(n.dataId).values, | |
R = me(n.shape, "float32", E); | |
for (let D = 0; D < c.batchSize; ++D) for (let P = 0; P < c.inChannels; ++P) for (let O = 0; O < c.inHeight; ++O) for (let M = 0; M < c.inWidth; ++M) { | |
let L = O - S, | |
B = M - C, | |
z = 0; | |
for (let U = 0; U < x; U += h) { | |
let j = (L + U) / l; | |
if (!(j < 0 || j >= c.outHeight || Math.floor(j) !== j)) for (let q = 0; q < b; q += g) { | |
let Y = (B + q) / m; | |
if (Y < 0 || Y >= c.outWidth || Math.floor(Y) !== Y) continue; | |
let J = R.get(D, j, Y, P); | |
z += J; | |
} | |
} | |
k.set(z * _, D, O, M, P); | |
} | |
return e.makeTensorInfo(k.shape, k.dtype, k.values); | |
} | |
var o$ = { | |
kernelName: Di, | |
backendName: "cpu", | |
kernelFunc: $Y | |
}; | |
function EY(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n, | |
scale: s, | |
offset: a, | |
mean: i, | |
variance: p | |
} = t8; | |
y.assert(i.shape.length === p.shape.length, () => "Batch normalization gradient requires mean and variance to have equal ranks."), y.assert(a == null || i.shape.length === a.shape.length, () => "Batch normalization gradient requires mean and offset to have equal ranks."), y.assert(s == null || i.shape.length === s.shape.length, () => "Batch normalization gradient requires mean and scale to have equal ranks."), Q([n, i, p, s, a], "batchNorm"); | |
let { | |
varianceEpsilon: u | |
} = o; | |
u == null && (u = 1e-3); | |
let c = e.data.get(n.dataId).values, | |
l = e.data.get(i.dataId).values, | |
m = e.data.get(p.dataId).values, | |
d = s ? e.data.get(s.dataId).values : new Float32Array([1]), | |
f = a ? e.data.get(a.dataId).values : new Float32Array([0]), | |
h = new Float32Array(c.length), | |
g = f.length, | |
x = d.length, | |
b = m.length, | |
C = l.length, | |
S = 0, | |
k = 0, | |
_ = 0, | |
E = 0; | |
for (let R = 0; R < c.length; ++R) h[R] = f[S++] + (c[R] - l[k++]) * d[_++] / Math.sqrt(m[E++] + u), S >= g && (S = 0), k >= C && (k = 0), _ >= x && (_ = 0), E >= b && (E = 0); | |
return e.makeTensorInfo(n.shape, n.dtype, h); | |
} | |
var n$ = { | |
kernelName: vn, | |
backendName: "cpu", | |
kernelFunc: EY | |
}; | |
function RY(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n | |
} = t8, | |
{ | |
blockShape: s, | |
crops: a | |
} = o; | |
Q([n], "batchToSpaceND"); | |
let i = s.reduce((x, b) => x * b), | |
p = w.getReshaped(n.shape, s, i), | |
u = w.getPermuted(p.length, s.length), | |
c = w.getReshapedPermuted(n.shape, s, i), | |
l = w.getSliceBeginCoords(a, s.length), | |
m = w.getSliceSize(c, a, s.length), | |
d = Ve({ | |
inputs: { | |
x: n | |
}, | |
backend: e, | |
attrs: { | |
shape: p | |
} | |
}), | |
f = It({ | |
inputs: { | |
x: d | |
}, | |
backend: e, | |
attrs: { | |
perm: u | |
} | |
}), | |
h = Ve({ | |
inputs: { | |
x: f | |
}, | |
backend: e, | |
attrs: { | |
shape: c | |
} | |
}), | |
g = Fo({ | |
inputs: { | |
x: h | |
}, | |
backend: e, | |
attrs: { | |
begin: l, | |
size: m | |
} | |
}); | |
return e.disposeIntermediateTensorInfo(d), e.disposeIntermediateTensorInfo(f), e.disposeIntermediateTensorInfo(h), g; | |
} | |
var s$ = { | |
kernelName: Js, | |
backendName: "cpu", | |
kernelFunc: RY | |
}; | |
function DY(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n, | |
weights: s | |
} = t8, | |
{ | |
size: a | |
} = o, | |
i = e.data.get(n.dataId).values, | |
p = e.data.get(s.dataId).values, | |
u = Ic(i, p, s.dtype, s.shape, a); | |
return e.makeTensorInfo([a], s.dtype, u); | |
} | |
var a$ = { | |
kernelName: en, | |
backendName: "cpu", | |
kernelFunc: DY | |
}; | |
function AY(r) { | |
let { | |
inputs: t8, | |
backend: e | |
} = r, | |
{ | |
s0: o, | |
s1: n | |
} = t8, | |
s = e.data.get(o.dataId).values, | |
a = e.data.get(n.dataId).values, | |
i = w.assertAndGetBroadcastShape(Array.from(s), Array.from(a)); | |
return e.makeTensorInfo([i.length], "int32", Int32Array.from(i)); | |
} | |
var i$ = { | |
kernelName: ea, | |
backendName: "cpu", | |
kernelFunc: AY | |
}; | |
var FY = Ie(Co, (r, t8) => { | |
let e = t8; | |
return r > e.clipValueMax ? e.clipValueMax : r < e.clipValueMin ? e.clipValueMin : r; | |
}); | |
var u$ = { | |
kernelName: Co, | |
backendName: "cpu", | |
kernelFunc: FY | |
}; | |
var PY = r => { | |
let { | |
x: t8 | |
} = r.inputs, | |
e = r.backend, | |
o = new Float32Array(y.sizeFromShape(t8.shape)), | |
n = e.data.get(t8.dataId), | |
s = n.complexTensorInfos.real, | |
a = n.complexTensorInfos.imag, | |
i = e.data.get(s.dataId).values, | |
p = e.data.get(a.dataId).values; | |
for (let u = 0; u < i.length; u++) { | |
let c = i[u], | |
l = p[u]; | |
o[u] = Math.hypot(c, l); | |
} | |
return e.makeOutput(o, t8.shape, "float32"); | |
}; | |
var p$ = { | |
kernelName: Pi, | |
backendName: "cpu", | |
kernelFunc: PY | |
}; | |
function Ma(r) { | |
let { | |
inputs: t8, | |
backend: e | |
} = r, | |
{ | |
input: o | |
} = t8, | |
n = e.data.get(o.dataId).complexTensorInfos.imag, | |
s = e.data.get(n.dataId).values; | |
return e.makeTensorInfo(n.shape, n.dtype, s); | |
} | |
var c$ = { | |
kernelName: Gi, | |
backendName: "cpu", | |
kernelFunc: Ma | |
}; | |
function yu(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
axis: n | |
} = o, | |
s = y.parseAxisParam(n, t8[0].shape)[0], | |
a = t8.map(h => h.shape); | |
w.assertParamsConsistent(a, s); | |
let i = w.computeOutShape(t8.map(h => h.shape), s); | |
if (y.sizeFromShape(i) === 0) return e.makeTensorInfo(i, t8[0].dtype, []); | |
let p = t8.filter(h => y.sizeFromShape(h.shape) > 0); | |
if (p.length === 1) return mr({ | |
inputs: { | |
x: p[0] | |
}, | |
backend: e | |
}); | |
if (p[0].dtype === "complex64") { | |
let h = p.map(S => Ro({ | |
inputs: { | |
input: S | |
}, | |
backend: e | |
})), | |
g = p.map(S => Ma({ | |
inputs: { | |
input: S | |
}, | |
backend: e | |
})), | |
x = yu({ | |
inputs: h, | |
backend: e, | |
attrs: { | |
axis: s | |
} | |
}), | |
b = yu({ | |
inputs: g, | |
backend: e, | |
attrs: { | |
axis: s | |
} | |
}), | |
C = Kt({ | |
inputs: { | |
real: x, | |
imag: b | |
}, | |
backend: e | |
}); | |
return h.forEach(S => e.disposeIntermediateTensorInfo(S)), g.forEach(S => e.disposeIntermediateTensorInfo(S)), e.disposeIntermediateTensorInfo(x), e.disposeIntermediateTensorInfo(b), C; | |
} | |
let u = p.map(h => { | |
let x = [-1, y.sizeFromShape(h.shape.slice(s))]; | |
return Ve({ | |
inputs: { | |
x: h | |
}, | |
backend: e, | |
attrs: { | |
shape: x | |
} | |
}); | |
}), | |
c = u.map(h => ({ | |
vals: e.data.get(h.dataId).values, | |
shape: h.shape | |
})); | |
i = w.computeOutShape(u.map(h => h.shape), 1); | |
let l = u[0].shape[0] === 1, | |
m = mp(c, i, t8[0].dtype, l), | |
d = w.computeOutShape(p.map(h => h.shape), s), | |
f = e.makeTensorInfo(d, t8[0].dtype, m); | |
return u.forEach(h => e.disposeIntermediateTensorInfo(h)), f; | |
} | |
var l$ = { | |
kernelName: ta, | |
backendName: "cpu", | |
kernelFunc: yu | |
}; | |
function yI(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n, | |
filter: s | |
} = t8, | |
{ | |
strides: a, | |
pad: i, | |
dataFormat: p, | |
dilations: u, | |
dimRoundingMode: c | |
} = o; | |
Q([n, s], "conv2d"); | |
let l = w.convertConv2DDataFormat(p), | |
m = w.computeConv2DInfo(n.shape, s.shape, a, u, i, c, false, l), | |
d = m.filterHeight, | |
f = m.filterWidth, | |
h = m.dilationHeight, | |
g = m.dilationWidth, | |
x = m.padInfo.left, | |
b = m.padInfo.top, | |
C = m.dataFormat === "channelsLast", | |
S = new tt(m.outShape, n.dtype), | |
k = y.computeStrides(n.shape), | |
_ = y.computeStrides(s.shape), | |
E = k[0], | |
R = C ? k[1] : k[2], | |
D = C ? k[2] : 1, | |
P = C ? 1 : k[1], | |
O = S.strides[0], | |
M = C ? S.strides[1] : S.strides[2], | |
L = C ? S.strides[2] : 1, | |
B = C ? 1 : S.strides[1], | |
z = e.data.get(n.dataId).values, | |
U = e.data.get(s.dataId).values, | |
j = S.values; | |
for (let q = 0; q < m.batchSize; ++q) { | |
let Y = q * E, | |
J = q * O; | |
for (let re = 0; re < m.outHeight; ++re) { | |
let ne = J + re * M, | |
ee = re * m.strideHeight - b; | |
for (let oe = 0; oe < d; ++oe) { | |
let ie = ee + oe * h; | |
if (ie < 0 || ie >= m.inHeight) continue; | |
let le = oe * _[0], | |
be = Y + ie * R; | |
for (let _e = 0; _e < m.outWidth; ++_e) { | |
let ve = ne + _e * L, | |
Fe = _e * m.strideWidth - x; | |
for (let Pe = 0; Pe < f; ++Pe) { | |
let st = Fe + Pe * g; | |
if (st < 0 || st >= m.inWidth) continue; | |
let lt = le + Pe * _[1], | |
Ge = be + st * D, | |
mt = lt; | |
for (let it = 0; it < m.inChannels; ++it) { | |
let gt = z[Ge + it * P]; | |
for (let xt = 0; xt < m.outChannels; ++xt) j[ve + xt * B] += gt * U[mt + xt]; | |
mt += m.outChannels; | |
} | |
} | |
} | |
} | |
} | |
} | |
return e.makeTensorInfo(S.shape, S.dtype, j); | |
} | |
var m$ = { | |
kernelName: rn, | |
backendName: "cpu", | |
kernelFunc: yI | |
}; | |
function OY(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n, | |
dy: s | |
} = t8, | |
{ | |
strides: a, | |
pad: i, | |
dataFormat: p, | |
dimRoundingMode: u, | |
filterShape: c | |
} = o; | |
Q([n, s], "conv2dBackpropFilter"); | |
let l = w.convertConv2DDataFormat(p), | |
m = w.computeConv2DInfo(n.shape, c, a, 1, i, u, false, l), | |
{ | |
strideHeight: d, | |
strideWidth: f, | |
filterHeight: h, | |
filterWidth: g | |
} = m, | |
x = m.dataFormat === "channelsLast", | |
b = new tt(m.filterShape, "float32"), | |
C = m.padInfo.left, | |
S = m.padInfo.top, | |
k = e.data.get(n.dataId).values, | |
_ = e.data.get(s.dataId).values, | |
E = new tt(n.shape, n.dtype, k), | |
R = new tt(s.shape, s.dtype, _); | |
for (let D = 0; D < h; ++D) { | |
let P = Math.max(0, Math.ceil((S - D) / d)), | |
O = Math.min(m.outHeight, (m.inHeight + S - D) / d); | |
for (let M = 0; M < g; ++M) { | |
let L = Math.max(0, Math.ceil((C - M) / f)), | |
B = Math.min(m.outWidth, (m.inWidth + C - M) / f); | |
for (let z = 0; z < m.inChannels; ++z) for (let U = 0; U < m.outChannels; ++U) { | |
let j = 0; | |
for (let q = 0; q < m.batchSize; ++q) for (let Y = P; Y < O; ++Y) { | |
let J = D + Y * d - S; | |
for (let re = L; re < B; ++re) { | |
let ne = M + re * f - C; | |
x ? j += E.get(q, J, ne, z) * R.get(q, Y, re, U) : j += E.get(q, z, J, ne) * R.get(q, U, Y, re); | |
} | |
} | |
b.set(j, D, M, z, U); | |
} | |
} | |
} | |
return e.makeTensorInfo(b.shape, b.dtype, b.values); | |
} | |
var d$ = { | |
kernelName: Oi, | |
backendName: "cpu", | |
kernelFunc: OY | |
}; | |
function MY(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
dy: n, | |
filter: s | |
} = t8, | |
{ | |
inputShape: a, | |
strides: i, | |
pad: p, | |
dataFormat: u, | |
dimRoundingMode: c | |
} = o; | |
Q([n, s], "conv2dBackpropInput"); | |
let l = y.computeStrides(s.shape), | |
m = y.computeStrides(n.shape), | |
d = w.convertConv2DDataFormat(u), | |
f = w.computeConv2DInfo(a, s.shape, i, 1, p, c, false, d), | |
h = new tt(f.inShape, "float32"), | |
g = h.values, | |
x = e.data.get(n.dataId).values, | |
b = e.data.get(s.dataId).values, | |
[C, S, k] = l, | |
{ | |
batchSize: _, | |
filterHeight: E, | |
filterWidth: R, | |
inChannels: D, | |
inHeight: P, | |
inWidth: O, | |
outChannels: M, | |
outHeight: L, | |
outWidth: B, | |
strideHeight: z, | |
strideWidth: U | |
} = f; | |
d = f.dataFormat; | |
let j = E - 1 - f.padInfo.top, | |
q = R - 1 - f.padInfo.left, | |
Y = d === "channelsLast", | |
J = h.strides[0], | |
re = Y ? h.strides[1] : h.strides[2], | |
ne = Y ? h.strides[2] : 1, | |
ee = Y ? 1 : h.strides[1], | |
oe = m[0], | |
ie = Y ? m[1] : m[2], | |
le = Y ? m[2] : 1, | |
be = Y ? 1 : m[1]; | |
for (let _e = 0; _e < _; ++_e) for (let ve = 0; ve < D; ++ve) for (let Fe = 0; Fe < P; ++Fe) { | |
let Pe = Fe - j, | |
st = Math.max(0, Math.ceil(Pe / z)), | |
lt = Math.min(L, (E + Pe) / z); | |
for (let Ge = 0; Ge < O; ++Ge) { | |
let mt = Ge - q, | |
it = Math.max(0, Math.ceil(mt / U)), | |
gt = Math.min(B, (R + mt) / U), | |
xt = 0; | |
for (let Lt = st; Lt < lt; ++Lt) { | |
let to = Lt * z - Pe; | |
for (let nr = it; nr < gt; ++nr) { | |
let _t = nr * U - mt, | |
sr = oe * _e + ie * Lt + le * nr, | |
ar = C * (E - 1 - to) + S * (R - 1 - _t) + k * ve; | |
for (let ro = 0; ro < M; ++ro) { | |
let oo = x[sr + be * ro], | |
hr = b[ar + ro]; | |
xt += oo * hr; | |
} | |
} | |
} | |
let Lr = J * _e + re * Fe + ne * Ge + ee * ve; | |
g[Lr] = xt; | |
} | |
} | |
return e.makeTensorInfo(h.shape, h.dtype, h.values); | |
} | |
var f$ = { | |
kernelName: on, | |
backendName: "cpu", | |
kernelFunc: MY | |
}; | |
function LY(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n, | |
filter: s | |
} = t8, | |
{ | |
strides: a, | |
pad: i, | |
dilations: p | |
} = o; | |
Q([n, s], "conv3d"); | |
let u = w.computeConv3DInfo(n.shape, s.shape, a, p, i), | |
{ | |
filterDepth: c, | |
filterHeight: l, | |
filterWidth: m, | |
dilationDepth: d, | |
dilationHeight: f, | |
dilationWidth: h, | |
padInfo: g | |
} = u, | |
x = g.front, | |
b = g.left, | |
C = g.top, | |
S = new tt(u.outShape, n.dtype), | |
k = e.data.get(n.dataId).values, | |
_ = e.data.get(s.dataId).values, | |
E = S.values, | |
R = y.computeStrides(n.shape), | |
D = y.computeStrides(s.shape); | |
for (let P = 0; P < u.batchSize; ++P) { | |
let O = P * R[0], | |
M = P * S.strides[0]; | |
for (let L = 0; L < u.outDepth; ++L) { | |
let B = M + L * S.strides[1], | |
z = L * u.strideDepth - x; | |
for (let U = 0; U < c; ++U) { | |
let j = z + U * d; | |
if (j < 0 || j >= u.inDepth) continue; | |
let q = U * D[0], | |
Y = O + j * R[1]; | |
for (let J = 0; J < u.outHeight; ++J) { | |
let re = B + J * S.strides[2], | |
ne = J * u.strideHeight - C; | |
for (let ee = 0; ee < l; ++ee) { | |
let oe = ne + ee * f; | |
if (oe < 0 || oe >= u.inHeight) continue; | |
let ie = q + ee * D[1], | |
le = Y + oe * R[2]; | |
for (let be = 0; be < u.outWidth; ++be) { | |
let _e = re + be * u.outChannels, | |
ve = be * u.strideWidth - b; | |
for (let Fe = 0; Fe < m; ++Fe) { | |
let Pe = ve + Fe * h; | |
if (Pe < 0 || Pe >= u.inWidth) continue; | |
let st = ie + Fe * D[2], | |
lt = le + Pe * u.inChannels, | |
Ge = st; | |
for (let mt = 0; mt < u.inChannels; ++mt) { | |
let it = k[lt + mt]; | |
for (let gt = 0; gt < u.outChannels; ++gt) E[_e + gt] += it * _[Ge + gt]; | |
Ge += u.outChannels; | |
} | |
} | |
} | |
} | |
} | |
} | |
} | |
} | |
return e.makeTensorInfo(S.shape, S.dtype, S.values); | |
} | |
var h$ = { | |
kernelName: nn, | |
backendName: "cpu", | |
kernelFunc: LY | |
}; | |
function BY(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n, | |
dy: s | |
} = t8, | |
{ | |
strides: a, | |
pad: i, | |
filterShape: p | |
} = o; | |
Q([n, s], "conv3dBackpropFilterV2"); | |
let u = y.computeStrides(n.shape), | |
c = y.computeStrides(s.shape), | |
l = w.computeConv3DInfo(n.shape, p, a, 1, i), | |
m = l.strideDepth, | |
d = l.strideHeight, | |
f = l.strideWidth, | |
h = l.filterDepth, | |
g = l.filterHeight, | |
x = l.filterWidth, | |
b = new tt(l.filterShape, "float32"), | |
C = b.values, | |
[S, k, _, E] = b.strides, | |
R = e.data.get(s.dataId).values, | |
[D, P, O, M] = c, | |
L = e.data.get(n.dataId).values, | |
[B, z, U, j] = u, | |
q = l.padInfo.front, | |
Y = l.padInfo.left, | |
J = l.padInfo.top; | |
for (let re = 0; re < h; ++re) { | |
let ne = Math.max(0, Math.ceil((q - re) / m)), | |
ee = Math.min(l.outDepth, (l.inDepth + q - re) / m), | |
oe = re * S; | |
for (let ie = 0; ie < g; ++ie) { | |
let le = Math.max(0, Math.ceil((J - ie) / d)), | |
be = Math.min(l.outHeight, (l.inHeight + J - ie) / d), | |
_e = ie * k + oe; | |
for (let ve = 0; ve < x; ++ve) { | |
let Fe = Math.max(0, Math.ceil((Y - ve) / f)), | |
Pe = Math.min(l.outWidth, (l.inWidth + Y - ve) / f), | |
st = ve * _ + _e; | |
for (let lt = 0; lt < l.inChannels; ++lt) { | |
let Ge = lt * E + st; | |
for (let mt = 0; mt < l.outChannels; ++mt) { | |
let it = 0; | |
for (let gt = 0; gt < l.batchSize; ++gt) { | |
let xt = gt * B, | |
Lr = gt * D; | |
for (let Lt = ne; Lt < ee; ++Lt) { | |
let nr = (re + Lt * m - q) * z + xt, | |
_t = Lt * P + Lr; | |
for (let sr = le; sr < be; ++sr) { | |
let ro = (ie + sr * d - J) * U + nr, | |
oo = sr * O + _t; | |
for (let hr = Fe; hr < Pe; ++hr) { | |
let Bo = (ve + hr * f - Y) * j + ro, | |
Ks = hr * M + oo; | |
it += L[Bo + lt] * R[Ks + mt]; | |
} | |
} | |
} | |
} | |
C[Ge + mt] = it; | |
} | |
} | |
} | |
} | |
} | |
return e.makeTensorInfo(b.shape, b.dtype, b.values); | |
} | |
var g$ = { | |
kernelName: Xa, | |
backendName: "cpu", | |
kernelFunc: BY | |
}; | |
function zY(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
dy: n, | |
filter: s | |
} = t8, | |
{ | |
pad: a, | |
strides: i, | |
inputShape: p | |
} = o; | |
Q([n], "conv3dBackpropInputV2"); | |
let u = y.computeStrides(n.shape), | |
c = y.computeStrides(s.shape), | |
l = w.computeConv3DInfo(p, s.shape, i, 1, a), | |
m = new tt(l.inShape, "float32"), | |
d = m.values, | |
[f, h, g, x] = m.strides, | |
b = e.data.get(n.dataId).values, | |
[C, S, k, _] = u, | |
E = e.data.get(s.dataId).values, | |
[R, D, P, O] = c, | |
{ | |
batchSize: M, | |
filterDepth: L, | |
filterHeight: B, | |
filterWidth: z, | |
inChannels: U, | |
inDepth: j, | |
inHeight: q, | |
inWidth: Y, | |
outChannels: J, | |
outDepth: re, | |
outHeight: ne, | |
outWidth: ee, | |
strideDepth: oe, | |
strideHeight: ie, | |
strideWidth: le | |
} = l, | |
be = L - 1 - l.padInfo.front, | |
_e = B - 1 - l.padInfo.top, | |
ve = z - 1 - l.padInfo.left; | |
for (let Fe = 0; Fe < M; ++Fe) for (let Pe = 0; Pe < U; ++Pe) for (let st = 0; st < j; ++st) { | |
let lt = st - be, | |
Ge = Math.max(0, Math.ceil(lt / oe)), | |
mt = Math.min(re, (L + lt) / oe); | |
for (let it = 0; it < q; ++it) { | |
let gt = it - _e, | |
xt = Math.max(0, Math.ceil(gt / ie)), | |
Lr = Math.min(ne, (B + gt) / ie); | |
for (let Lt = 0; Lt < Y; ++Lt) { | |
let to = Lt - ve, | |
nr = Math.max(0, Math.ceil(to / le)), | |
_t = Math.min(ee, (z + to) / le), | |
sr = 0; | |
for (let ar = Ge; ar < mt; ++ar) { | |
let ro = ar * oe - lt; | |
for (let oo = xt; oo < Lr; ++oo) { | |
let hr = oo * ie - gt; | |
for (let Wa = nr; Wa < _t; ++Wa) { | |
let Bo = Wa * le - to, | |
Ks = C * Fe + S * ar + k * oo + _ * Wa, | |
Yt = R * (L - 1 - ro) + D * (B - 1 - hr) + P * (z - 1 - Bo) + O * Pe; | |
for (let Ua = 0; Ua < J; ++Ua) { | |
let sl = b[Ks + Ua], | |
al = E[Yt + Ua]; | |
sr += sl * al; | |
} | |
} | |
} | |
} | |
d[f * Fe + h * st + g * it + x * Lt + Pe] = sr; | |
} | |
} | |
} | |
return e.makeTensorInfo(m.shape, m.dtype, m.values); | |
} | |
var x$ = { | |
kernelName: sn, | |
backendName: "cpu", | |
kernelFunc: zY | |
}; | |
var VY = Ie(an, r => Math.cos(r)); | |
var y$ = { | |
kernelName: an, | |
backendName: "cpu", | |
kernelFunc: VY | |
}; | |
var WY = Ie(un, r => Math.cosh(r)); | |
var b$ = { | |
kernelName: un, | |
backendName: "cpu", | |
kernelFunc: WY | |
}; | |
function UY(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
image: n, | |
boxes: s, | |
boxInd: a | |
} = t8, | |
{ | |
cropSize: i, | |
method: p, | |
extrapolationValue: u | |
} = o, | |
[c, l, m, d] = n.shape, | |
f = s.shape[0], | |
[h, g] = i, | |
x = me([f, h, g, d], "float32"), | |
b = e.data.get(s.dataId).values, | |
C = e.data.get(a.dataId).values, | |
S = e.data.get(n.dataId).values, | |
k = y.computeStrides(n.shape), | |
_ = y.computeStrides(x.shape); | |
for (let E = 0; E < f; E++) { | |
let R = E * 4, | |
D = b[R], | |
P = b[R + 1], | |
O = b[R + 2], | |
M = b[R + 3], | |
L = C[E]; | |
if (L >= c) continue; | |
let B = h > 1 ? (O - D) * (l - 1) / (h - 1) : 0, | |
z = g > 1 ? (M - P) * (m - 1) / (g - 1) : 0; | |
for (let U = 0; U < h; U++) { | |
let j = h > 1 ? D * (l - 1) + U * B : 0.5 * (D + O) * (l - 1); | |
if (j < 0 || j > l - 1) { | |
for (let q = 0; q < g; q++) for (let Y = 0; Y < d; Y++) { | |
let J = Y + q * _[2] + U * _[1] + E * _[0]; | |
x.values[J] = u; | |
} | |
continue; | |
} | |
if (p === "bilinear") { | |
let q = Math.floor(j), | |
Y = Math.ceil(j), | |
J = j - q; | |
for (let re = 0; re < g; re++) { | |
let ne = g > 1 ? P * (m - 1) + re * z : 0.5 * (P + M) * (m - 1); | |
if (ne < 0 || ne > m - 1) { | |
for (let le = 0; le < d; le++) { | |
let be = le + re * _[2] + U * _[1] + E * _[0]; | |
x.values[be] = u; | |
} | |
continue; | |
} | |
let ee = Math.floor(ne), | |
oe = Math.ceil(ne), | |
ie = ne - ee; | |
for (let le = 0; le < d; le++) { | |
let be = le + ee * k[2] + q * k[1] + L * k[0], | |
_e = S[be]; | |
be = le + oe * k[2] + q * k[1] + L * k[0]; | |
let ve = S[be]; | |
be = le + ee * k[2] + Y * k[1] + L * k[0]; | |
let Fe = S[be]; | |
be = le + oe * k[2] + Y * k[1] + L * k[0]; | |
let Pe = S[be], | |
st = _e + (ve - _e) * ie, | |
lt = Fe + (Pe - Fe) * ie; | |
be = le + re * _[2] + U * _[1] + E * _[0], x.values[be] = st + (lt - st) * J; | |
} | |
} | |
} else for (let q = 0; q < g; ++q) { | |
let Y = g > 1 ? P * (m - 1) + q * z : 0.5 * (P + M) * (m - 1); | |
if (Y < 0 || Y > m - 1) { | |
for (let ne = 0; ne < d; ne++) { | |
let ee = ne + q * _[2] + U * _[1] + E * _[0]; | |
x.values[ee] = u; | |
} | |
continue; | |
} | |
let J = Math.round(Y), | |
re = Math.round(j); | |
for (let ne = 0; ne < d; ne++) { | |
let ee = ne + J * k[2] + re * k[1] + L * k[0], | |
oe = ne + q * _[2] + U * _[1] + E * _[0]; | |
x.values[oe] = S[ee]; | |
} | |
} | |
} | |
} | |
return e.makeTensorInfo(x.shape, x.dtype, x.values); | |
} | |
var C$ = { | |
kernelName: ln, | |
backendName: "cpu", | |
kernelFunc: UY | |
}; | |
function GY(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n | |
} = t8, | |
{ | |
axis: s, | |
exclusive: a, | |
reverse: i | |
} = o; | |
Q(n, "cumprod"); | |
let p = w.getAxesPermutation([s], n.shape.length), | |
u = n; | |
p != null && (u = It({ | |
inputs: { | |
x: n | |
}, | |
backend: e, | |
attrs: { | |
perm: p | |
} | |
})); | |
let c = w.getInnerMostAxes(1, n.shape.length)[0]; | |
if (c !== u.shape.length - 1) throw new Error(`backend.cumprod in CPU expects an inner-most axis=${u.shape.length - 1} but got axis=${c}`); | |
let l = dt(u.dtype, "int32"), | |
m = y.makeOnesTypedArray(y.sizeFromShape(u.shape), l), | |
d = e.data.get(u.dataId).values, | |
f = u.shape[u.shape.length - 1], | |
h = i ? (x, b) => x + f - b - 1 : (x, b) => x + b; | |
for (let x = 0; x < d.length; x += f) for (let b = 0; b < f; b++) { | |
let C = h(x, b); | |
if (b === 0) m[C] = a ? 1 : d[C];else { | |
let S = h(x, b - 1); | |
m[C] = a ? d[S] * m[S] : d[C] * m[S]; | |
} | |
} | |
let g = e.makeTensorInfo(u.shape, l, m); | |
if (p != null) { | |
let x = w.getUndoAxesPermutation(p), | |
b = It({ | |
inputs: { | |
x: g | |
}, | |
backend: e, | |
attrs: { | |
perm: x | |
} | |
}); | |
return e.disposeIntermediateTensorInfo(g), e.disposeIntermediateTensorInfo(u), b; | |
} | |
return g; | |
} | |
var w$ = { | |
kernelName: pn, | |
backendName: "cpu", | |
kernelFunc: GY | |
}; | |
function HY(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n | |
} = t8, | |
{ | |
axis: s, | |
exclusive: a, | |
reverse: i | |
} = o; | |
Q(n, "cumsum"); | |
let p = w.getAxesPermutation([s], n.shape.length), | |
u = n; | |
p != null && (u = It({ | |
inputs: { | |
x: n | |
}, | |
backend: e, | |
attrs: { | |
perm: p | |
} | |
})); | |
let c = w.getInnerMostAxes(1, n.shape.length)[0]; | |
if (c !== u.shape.length - 1) throw new Error(`backend.cumsum in CPU expects an inner-most axis=${u.shape.length - 1} but got axis=${c}`); | |
let l = dt(u.dtype, "int32"), | |
m = y.makeZerosTypedArray(y.sizeFromShape(u.shape), l), | |
d = e.data.get(u.dataId).values, | |
f = u.shape[u.shape.length - 1], | |
h = i ? (x, b) => x + f - b - 1 : (x, b) => x + b; | |
for (let x = 0; x < d.length; x += f) for (let b = 0; b < f; b++) { | |
let C = h(x, b); | |
if (b === 0) m[C] = a ? 0 : d[C];else { | |
let S = h(x, b - 1); | |
m[C] = a ? d[S] + m[S] : d[C] + m[S]; | |
} | |
} | |
let g = e.makeTensorInfo(u.shape, l, m); | |
if (p != null) { | |
let x = w.getUndoAxesPermutation(p), | |
b = It({ | |
inputs: { | |
x: g | |
}, | |
backend: e, | |
attrs: { | |
perm: x | |
} | |
}); | |
return e.disposeIntermediateTensorInfo(g), e.disposeIntermediateTensorInfo(u), b; | |
} | |
return g; | |
} | |
var S$ = { | |
kernelName: cn, | |
backendName: "cpu", | |
kernelFunc: HY | |
}; | |
function KY(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n, | |
weights: s | |
} = t8, | |
{ | |
size: a, | |
binaryOutput: i | |
} = o; | |
if (n.shape.length === 1) { | |
let p = e.data.get(n.dataId).values, | |
u = e.data.get(s.dataId).values, | |
c = Ic(p, u, s.dtype, s.shape, a); | |
return e.makeTensorInfo([a], s.dtype, c); | |
} else if (n.shape.length === 2) { | |
let p = e.bufferSync(n), | |
u = e.bufferSync(s), | |
c = Nf(p, u, a, i); | |
return e.makeTensorInfo(c.shape, s.dtype, c.values); | |
} | |
throw new Error(`Error in denseBincount: input must be at most rank 2, but got rank${n.shape.length}.`); | |
} | |
var I$ = { | |
kernelName: ra, | |
backendName: "cpu", | |
kernelFunc: KY | |
}; | |
function qY(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n | |
} = t8, | |
{ | |
blockSize: s, | |
dataFormat: a | |
} = o; | |
y.assert(a === "NHWC", () => `Only NHWC dataFormat supported on CPU for depthToSpace. Got ${a}`); | |
let i = n.shape[0], | |
p = n.shape[1], | |
u = n.shape[2], | |
c = n.shape[3], | |
l = p * s, | |
m = u * s, | |
d = c / (s * s), | |
f = e.data.get(n.dataId).values, | |
h = new Float32Array(i * l * m * d), | |
g = 0; | |
for (let x = 0; x < i; ++x) for (let b = 0; b < l; ++b) { | |
let C = Math.floor(b / s), | |
S = b % s; | |
for (let k = 0; k < m; ++k) { | |
let _ = Math.floor(k / s), | |
E = k % s, | |
R = (S * s + E) * d; | |
for (let D = 0; D < d; ++D) { | |
let O = D + R + c * (_ + u * (C + p * x)); | |
h[g++] = f[O]; | |
} | |
} | |
} | |
return e.makeTensorInfo([i, l, m, d], n.dtype, h); | |
} | |
var v$ = { | |
kernelName: mn, | |
backendName: "cpu", | |
kernelFunc: qY | |
}; | |
function bI(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n, | |
filter: s | |
} = t8, | |
{ | |
strides: a, | |
pad: i, | |
dilations: p, | |
dimRoundingMode: u | |
} = o; | |
Q([n, s], "depthwiseConv2DNative"); | |
let c = y.computeStrides(n.shape), | |
l = y.computeStrides(s.shape), | |
m = p; | |
m == null && (m = [1, 1]), y.assert(w.eitherStridesOrDilationsAreOne(a, m), () => `Error in depthwiseConv2d: Either strides or dilations must be 1. Got strides ${a} and dilations '${m}'`); | |
let d = w.computeConv2DInfo(n.shape, s.shape, a, m, i, u, true), | |
{ | |
filterHeight: f, | |
filterWidth: h, | |
dilationHeight: g, | |
dilationWidth: x, | |
padInfo: b | |
} = d, | |
C = b.left, | |
S = b.top, | |
k = d.outChannels / d.inChannels, | |
_ = new tt(d.outShape, n.dtype), | |
E = e.data.get(n.dataId).values, | |
R = e.data.get(s.dataId).values, | |
D = _.values; | |
for (let P = 0; P < d.batchSize; ++P) { | |
let O = P * c[0], | |
M = P * _.strides[0]; | |
for (let L = 0; L < d.outHeight; ++L) { | |
let B = M + L * _.strides[1], | |
z = L * d.strideHeight - S; | |
for (let U = 0; U < f; ++U) { | |
let j = z + U * g; | |
if (j < 0 || j >= d.inHeight) continue; | |
let q = U * l[0], | |
Y = O + j * c[1]; | |
for (let J = 0; J < d.outWidth; ++J) { | |
let re = B + J * _.strides[2], | |
ne = J * d.strideWidth - C; | |
for (let ee = 0; ee < h; ++ee) { | |
let oe = ne + ee * x; | |
if (oe < 0 || oe >= d.inWidth) continue; | |
let ie = q + ee * l[1], | |
le = Y + oe * d.inChannels, | |
be = re, | |
_e = ie; | |
for (let ve = 0; ve < d.inChannels; ++ve) { | |
let Fe = E[le + ve]; | |
for (let Pe = 0; Pe < k; ++Pe) D[be + Pe] += Fe * R[_e + Pe]; | |
be += k, _e += k; | |
} | |
} | |
} | |
} | |
} | |
} | |
return e.makeTensorInfo(_.shape, _.dtype, _.values); | |
} | |
var k$ = { | |
kernelName: dn, | |
backendName: "cpu", | |
kernelFunc: bI | |
}; | |
function jY(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n, | |
dy: s | |
} = t8, | |
{ | |
strides: a, | |
dilations: i, | |
pad: p, | |
dimRoundingMode: u, | |
filterShape: c | |
} = o; | |
Q([n, s], "depthwiseConv2dNativeBackpropFilter"); | |
let l = w.computeConv2DInfo(n.shape, c, a, i, p, u, true), | |
{ | |
strideHeight: m, | |
strideWidth: d, | |
filterHeight: f, | |
filterWidth: h | |
} = l, | |
g = new tt(l.filterShape, "float32"), | |
x = l.padInfo.left, | |
b = l.padInfo.top, | |
C = l.outChannels / l.inChannels, | |
S = e.data.get(n.dataId).values, | |
k = new tt(n.shape, n.dtype, S), | |
_ = e.data.get(s.dataId).values, | |
E = new tt(s.shape, s.dtype, _); | |
for (let R = 0; R < f; ++R) { | |
let D = Math.max(0, Math.ceil((b - R) / m)), | |
P = Math.min(l.outHeight, (l.inHeight + b - R) / m); | |
for (let O = 0; O < h; ++O) { | |
let M = Math.max(0, Math.ceil((x - O) / d)), | |
L = Math.min(l.outWidth, (l.inWidth + x - O) / d); | |
for (let B = 0; B < l.outChannels; ++B) { | |
let z = Math.trunc(B / C), | |
U = B % C, | |
j = 0; | |
for (let q = 0; q < l.batchSize; ++q) for (let Y = D; Y < P; ++Y) { | |
let J = R + Y * m - b; | |
for (let re = M; re < L; ++re) { | |
let ne = O + re * d - x; | |
j += k.get(q, J, ne, z) * E.get(q, Y, re, B); | |
} | |
} | |
g.set(j, R, O, z, U); | |
} | |
} | |
} | |
return e.makeTensorInfo(g.shape, g.dtype, g.values); | |
} | |
var N$ = { | |
kernelName: Mi, | |
backendName: "cpu", | |
kernelFunc: jY | |
}; | |
function XY(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
dy: n, | |
filter: s | |
} = t8, | |
{ | |
strides: a, | |
dilations: i, | |
pad: p, | |
dimRoundingMode: u, | |
inputShape: c | |
} = o; | |
Q([n, s], "depthwiseConv2DNativeBackpropInput"); | |
let l = y.computeStrides(n.shape), | |
m = y.computeStrides(s.shape), | |
d = w.computeConv2DInfo(c, s.shape, a, i, p, u, true), | |
f = new tt(d.inShape, "float32"), | |
h = f.values, | |
[g, x, b] = f.strides, | |
C = e.data.get(n.dataId).values, | |
[S, k, _] = l, | |
E = e.data.get(s.dataId).values, | |
[R, D, P] = m, | |
{ | |
batchSize: O, | |
filterHeight: M, | |
filterWidth: L, | |
inChannels: B, | |
inHeight: z, | |
inWidth: U, | |
outChannels: j, | |
outHeight: q, | |
outWidth: Y, | |
strideHeight: J, | |
strideWidth: re | |
} = d, | |
ne = M - 1 - d.padInfo.top, | |
ee = L - 1 - d.padInfo.left, | |
oe = j / B; | |
for (let ie = 0; ie < O; ++ie) for (let le = 0; le < B; ++le) for (let be = 0; be < z; ++be) { | |
let _e = be - ne, | |
ve = Math.max(0, Math.ceil(_e / J)), | |
Fe = Math.min(q, (M + _e) / J); | |
for (let Pe = 0; Pe < U; ++Pe) { | |
let st = Pe - ee, | |
lt = Math.max(0, Math.ceil(st / re)), | |
Ge = Math.min(Y, (L + st) / re), | |
mt = 0; | |
for (let it = ve; it < Fe; ++it) { | |
let gt = it * J - _e; | |
for (let xt = lt; xt < Ge; ++xt) { | |
let Lr = xt * re - st, | |
Lt = S * ie + k * it + _ * xt, | |
to = R * (M - 1 - gt) + D * (L - 1 - Lr) + P * le; | |
for (let nr = 0; nr < oe; ++nr) { | |
let _t = le * oe + nr, | |
sr = C[Lt + _t], | |
ar = E[to + nr]; | |
mt += sr * ar; | |
} | |
} | |
} | |
h[g * ie + x * be + b * Pe + le] = mt; | |
} | |
} | |
return e.makeTensorInfo(f.shape, f.dtype, f.values); | |
} | |
var T$ = { | |
kernelName: Li, | |
backendName: "cpu", | |
kernelFunc: XY | |
}; | |
function YY(r) { | |
let { | |
inputs: t8, | |
backend: e | |
} = r, | |
{ | |
x: o | |
} = t8, | |
n = y.sizeFromShape(o.shape), | |
s = e.data.get(o.dataId).values, | |
a = me([n, n], o.dtype), | |
i = a.values; | |
for (let u = 0; u < s.length; u++) i[u * n + u] = s[u]; | |
let p = [...o.shape, ...o.shape]; | |
return e.makeTensorInfo(p, a.dtype, a.values); | |
} | |
var _$ = { | |
kernelName: oa, | |
backendName: "cpu", | |
kernelFunc: YY | |
}; | |
var $$ = { | |
kernelName: fn, | |
backendName: "cpu", | |
kernelFunc: ({ | |
inputs: r, | |
backend: t8, | |
attrs: e | |
}) => { | |
let { | |
x: o, | |
filter: n | |
} = r, | |
{ | |
strides: s, | |
pad: a, | |
dilations: i | |
} = e, | |
p = t8, | |
u = p.data.get(o.dataId).values, | |
c = o.shape.length, | |
l = p.data.get(n.dataId).values, | |
m = n.shape.length, | |
{ | |
batchSize: d, | |
inHeight: f, | |
inWidth: h, | |
inChannels: g, | |
outHeight: x, | |
outWidth: b, | |
padInfo: C, | |
strideHeight: S, | |
strideWidth: k, | |
filterHeight: _, | |
filterWidth: E, | |
dilationHeight: R, | |
dilationWidth: D, | |
outShape: P | |
} = w.computeDilation2DInfo(o.shape, n.shape, s, a, "NHWC", i), | |
O = y.sizeFromShape(P), | |
M = P.length, | |
L = y.getArrayFromDType(o.dtype, O); | |
for (let z = 0; z < d; ++z) for (let U = 0; U < x; ++U) { | |
let j = U * S - C.top; | |
for (let q = 0; q < b; ++q) { | |
let Y = q * k - C.left; | |
for (let J = 0; J < g; ++J) { | |
let re = Number.MIN_SAFE_INTEGER; | |
for (let ee = 0; ee < _; ++ee) { | |
let oe = j + ee * R; | |
if (oe >= 0 && oe < f) for (let ie = 0; ie < E; ++ie) { | |
let le = Y + ie * D; | |
if (le >= 0 && le < h) { | |
let be = y.locToIndex([z, oe, le, J], c, y.computeStrides(o.shape)), | |
_e = y.locToIndex([ee, ie, J], m, y.computeStrides(n.shape)), | |
ve = u[be] + l[_e]; | |
ve > re && (re = ve); | |
} | |
} | |
} | |
let ne = y.locToIndex([z, U, q, J], M, y.computeStrides(P)); | |
L[ne] = re; | |
} | |
} | |
} | |
return { | |
dataId: p.write(y.toTypedArray(L, o.dtype), P, o.dtype), | |
shape: P, | |
dtype: o.dtype | |
}; | |
} | |
}; | |
var E$ = { | |
kernelName: zi, | |
backendName: "cpu", | |
kernelFunc: ({ | |
inputs: r, | |
backend: t8, | |
attrs: e | |
}) => { | |
let { | |
x: o, | |
filter: n, | |
dy: s | |
} = r, | |
{ | |
strides: a, | |
pad: i, | |
dilations: p | |
} = e, | |
u = t8, | |
c = y.toNestedArray(o.shape, u.data.get(o.dataId).values), | |
l = y.toNestedArray(n.shape, u.data.get(n.dataId).values), | |
{ | |
batchSize: m, | |
inHeight: d, | |
inWidth: f, | |
inChannels: h, | |
outHeight: g, | |
outWidth: x, | |
padInfo: b, | |
strideHeight: C, | |
strideWidth: S, | |
filterHeight: k, | |
filterWidth: _, | |
dilationHeight: E, | |
dilationWidth: R, | |
outShape: D | |
} = w.computeDilation2DInfo(o.shape, n.shape, a, i, "NHWC", p); | |
y.assert(s.rank === D.length, () => `Error in ${zi}, dy must have the same rank as output ${D.length}, but got ${s.rank}`); | |
let P = y.toNestedArray(D, u.data.get(s.dataId).values), | |
O = y.makeZerosNestedTypedArray(n.shape, n.dtype); | |
for (let L = 0; L < m; ++L) for (let B = 0; B < g; ++B) { | |
let z = B * C - b.top; | |
for (let U = 0; U < x; ++U) { | |
let j = U * S - b.left; | |
for (let q = 0; q < h; ++q) { | |
let Y = Number.MIN_SAFE_INTEGER, | |
J = 0, | |
re = 0; | |
for (let ne = 0; ne < k; ++ne) { | |
let ee = z + ne * E; | |
if (ee >= 0 && ee < d) for (let oe = 0; oe < _; ++oe) { | |
let ie = j + oe * R; | |
if (ie >= 0 && ie < f) { | |
let le = c[L][ee][ie][q] + l[ne][oe][q]; | |
le > Y && (Y = le, J = ne, re = oe); | |
} | |
} | |
} | |
O[J][re][q] += P[L][B][U][q]; | |
} | |
} | |
} | |
return { | |
dataId: u.write(y.toTypedArray(O, o.dtype), n.shape, n.dtype), | |
shape: n.shape, | |
dtype: n.dtype | |
}; | |
} | |
}; | |
var R$ = { | |
kernelName: Bi, | |
backendName: "cpu", | |
kernelFunc: ({ | |
inputs: r, | |
backend: t8, | |
attrs: e | |
}) => { | |
let { | |
x: o, | |
filter: n, | |
dy: s | |
} = r, | |
{ | |
strides: a, | |
pad: i, | |
dilations: p | |
} = e, | |
u = t8, | |
c = y.toNestedArray(o.shape, u.data.get(o.dataId).values), | |
l = y.toNestedArray(n.shape, u.data.get(n.dataId).values), | |
{ | |
batchSize: m, | |
inHeight: d, | |
inWidth: f, | |
inChannels: h, | |
outHeight: g, | |
outWidth: x, | |
padInfo: b, | |
strideHeight: C, | |
strideWidth: S, | |
filterHeight: k, | |
filterWidth: _, | |
dilationHeight: E, | |
dilationWidth: R, | |
outShape: D | |
} = w.computeDilation2DInfo(o.shape, n.shape, a, i, "NHWC", p); | |
y.assert(s.rank === D.length, () => `Error in ${Bi}, dy must have the same rank as output ${D.length}, but got ${s.rank}`); | |
let P = y.toNestedArray(D, u.data.get(s.dataId).values), | |
O = y.makeZerosNestedTypedArray(o.shape, o.dtype); | |
for (let L = 0; L < m; ++L) for (let B = 0; B < g; ++B) { | |
let z = B * C - b.top; | |
for (let U = 0; U < x; ++U) { | |
let j = U * S - b.left; | |
for (let q = 0; q < h; ++q) { | |
let Y = Number.MIN_SAFE_INTEGER, | |
J = z < 0 ? 0 : z, | |
re = j < 0 ? 0 : j; | |
for (let ne = 0; ne < k; ++ne) { | |
let ee = z + ne * E; | |
if (ee >= 0 && ee < d) for (let oe = 0; oe < _; ++oe) { | |
let ie = j + oe * R; | |
if (ie >= 0 && ie < f) { | |
let le = c[L][ee][ie][q] + l[ne][oe][q]; | |
le > Y && (Y = le, J = ee, re = ie); | |
} | |
} | |
} | |
O[L][J][re][q] += P[L][B][U][q]; | |
} | |
} | |
} | |
return { | |
dataId: u.write(y.toTypedArray(O, o.dtype), o.shape, o.dtype), | |
shape: o.shape, | |
dtype: o.dtype | |
}; | |
} | |
}; | |
function QY(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
image: n | |
} = t8, | |
{ | |
canvas: s, | |
options: a | |
} = o, | |
{ | |
contextOptions: i, | |
imageOptions: p | |
} = a || {}, | |
u = (p == null ? void 0 : p.alpha) || 1, | |
c = (i == null ? void 0 : i.contextType) || "2d"; | |
if (c !== "2d") throw new Error(`Context type ${i.contextType} is not supported by the CPU backend.`); | |
let l = s.getContext(c, (i == null ? void 0 : i.contextAttributes) || {}); | |
if (l == null) throw new Error(`Could not get the context with ${c} type.`); | |
let [m, d] = n.shape.slice(0, 2), | |
f = n.shape.length === 2 ? 1 : n.shape[2], | |
h = e.data.get(n.dataId).values, | |
g = n.dtype === "float32" ? 255 : 1, | |
x = new Uint8ClampedArray(d * m * 4); | |
for (let C = 0; C < m * d; ++C) { | |
let S = [0, 0, 0, 255 * u]; | |
for (let _ = 0; _ < f; _++) { | |
let E = h[C * f + _]; | |
if (n.dtype === "float32") { | |
if (E < 0 || E > 1) throw new Error(`Tensor values for a float32 Tensor must be in the range [0 - 1] but encountered ${E}.`); | |
} else if (n.dtype === "int32" && (E < 0 || E > 255)) throw new Error(`Tensor values for a int32 Tensor must be in the range [0 - 255] but encountered ${E}.`); | |
f === 1 ? (S[0] = E * g, S[1] = E * g, S[2] = E * g) : S[_] = E * g; | |
} | |
let k = C * 4; | |
x[k + 0] = Math.round(S[0]), x[k + 1] = Math.round(S[1]), x[k + 2] = Math.round(S[2]), x[k + 3] = Math.round(S[3]); | |
} | |
s.width = d, s.height = m; | |
let b = new ImageData(x, d, m); | |
return l.putImageData(b, 0, 0), n; | |
} | |
var D$ = { | |
kernelName: Pu, | |
backendName: "cpu", | |
kernelFunc: QY | |
}; | |
function gi(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n | |
} = t8, | |
{ | |
axis: s, | |
keepDims: a | |
} = o; | |
Q(n, "sum"); | |
let i; | |
n.dtype === "bool" ? i = Do({ | |
inputs: { | |
x: n | |
}, | |
backend: e, | |
attrs: { | |
dtype: "int32" | |
} | |
}) : i = mr({ | |
inputs: { | |
x: n | |
}, | |
backend: e | |
}); | |
let p = i.shape.length, | |
u = y.parseAxisParam(s, i.shape), | |
c = w.getAxesPermutation(u, p), | |
l = u, | |
m = i; | |
c != null && (m = It({ | |
inputs: { | |
x: i | |
}, | |
backend: e, | |
attrs: { | |
perm: c | |
} | |
}), l = w.getInnerMostAxes(l.length, p)), w.assertAxesAreInnerMostDims("sum", l, m.shape.length); | |
let [d, f] = w.computeOutAndReduceShapes(m.shape, l), | |
h = w.upcastType(m.dtype, "int32"), | |
g = wc(e, d, h), | |
x = y.sizeFromShape(f), | |
b = e.data.get(g.dataId).values, | |
C = e.data.get(m.dataId).values; | |
for (let S = 0; S < b.length; ++S) { | |
let k = S * x, | |
_ = 0; | |
for (let E = 0; E < x; ++E) _ += C[k + E]; | |
b[S] = _; | |
} | |
if (a) { | |
let S = w.expandShapeToKeepDim(g.shape, u), | |
k = g; | |
g = Ve({ | |
inputs: { | |
x: g | |
}, | |
backend: e, | |
attrs: { | |
shape: S | |
} | |
}), e.disposeIntermediateTensorInfo(k); | |
} | |
return e.disposeIntermediateTensorInfo(i), c != null && e.disposeIntermediateTensorInfo(m), g; | |
} | |
var A$ = { | |
kernelName: Is, | |
backendName: "cpu", | |
kernelFunc: gi | |
}; | |
function ZY(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
equation: n | |
} = o, | |
s = t8, | |
{ | |
allDims: a, | |
summedDims: i, | |
idDims: p | |
} = w.decodeEinsumEquation(n, s.length); | |
w.checkEinsumDimSizes(a.length, p, s); | |
let { | |
path: u, | |
steps: c | |
} = w.getEinsumComputePath(i, p), | |
l = c.length, | |
m = null, | |
d = a.length, | |
f = []; | |
for (let h = 0; h < l; ++h) { | |
for (let g of c[h]) { | |
let { | |
permutationIndices: x, | |
expandDims: b | |
} = w.getEinsumPermutation(d, p[g]), | |
C; | |
w.isIdentityPermutation(x) ? C = s[g] : (C = It({ | |
inputs: { | |
x: s[g] | |
}, | |
backend: e, | |
attrs: { | |
perm: x | |
} | |
}), f.push(C)); | |
let S = C.shape.slice(); | |
for (let k = 0; k < b.length; ++k) S.splice(b[k], 0, 1); | |
y.arraysEqual(C.shape, S) || (C = Ve({ | |
inputs: { | |
x: C | |
}, | |
backend: e, | |
attrs: { | |
shape: S | |
} | |
}), f.push(C)), m === null ? m = C : (m = dp({ | |
inputs: { | |
a: C, | |
b: m | |
}, | |
backend: e | |
}), f.push(m)); | |
} | |
h < l - 1 && (u[h] >= 0 && (m = gi({ | |
inputs: { | |
x: m | |
}, | |
backend: e, | |
attrs: { | |
axis: u[h] - (a.length - d), | |
keepDims: false | |
} | |
}), f.push(m)), d--); | |
} | |
for (let h of f) h !== m && e.disposeIntermediateTensorInfo(h); | |
return m; | |
} | |
var F$ = { | |
kernelName: Vi, | |
backendName: "cpu", | |
kernelFunc: ZY | |
}; | |
function JY(r) { | |
let { | |
inputs: t8, | |
backend: e | |
} = r, | |
{ | |
dy: o, | |
y: n | |
} = t8; | |
Q([o, n], "eluGrad"); | |
let s = new Float32Array(y.sizeFromShape(n.shape)), | |
a = e.data.get(n.dataId).values, | |
i = e.data.get(o.dataId).values; | |
for (let p = 0; p < a.length; ++p) { | |
let u = a[p]; | |
u >= 0 ? s[p] = i[p] : s[p] = i[p] * (u + 1); | |
} | |
return e.makeTensorInfo(n.shape, "float32", s); | |
} | |
var P$ = { | |
kernelName: Ya, | |
backendName: "cpu", | |
kernelFunc: JY | |
}; | |
var eQ = w.ERF_P; | |
var tQ = w.ERF_A1; | |
var rQ = w.ERF_A2; | |
var oQ = w.ERF_A3; | |
var nQ = w.ERF_A4; | |
var sQ = w.ERF_A5; | |
var aQ = Ie(xn, r => { | |
let t8 = Math.sign(r), | |
e = Math.abs(r), | |
o = 1 / (1 + eQ * e); | |
return t8 * (1 - ((((sQ * o + nQ) * o + oQ) * o + rQ) * o + tQ) * o * Math.exp(-e * e)); | |
}); | |
var O$ = { | |
kernelName: xn, | |
backendName: "cpu", | |
kernelFunc: aQ | |
}; | |
function $c(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
input: n | |
} = t8, | |
{ | |
dim: s | |
} = o, | |
a = n.shape.length, | |
i = n.shape.slice(), | |
p = s; | |
return s < 0 && (y.assert(-(a + 1) <= s, () => `Axis must be in the interval [${-(a + 1)}, ${a}]`), p = a + s + 1), i.splice(p, 0, 1), Ve({ | |
inputs: { | |
x: n | |
}, | |
backend: e, | |
attrs: { | |
shape: i | |
} | |
}); | |
} | |
var M$ = { | |
kernelName: na, | |
backendName: "cpu", | |
kernelFunc: $c | |
}; | |
var iQ = ze((r, t8) => r / t8); | |
var Ul = Ye(hn, iQ); | |
var Gl = { | |
kernelName: hn, | |
backendName: "cpu", | |
kernelFunc: Ul | |
}; | |
function Vf(r, t8, e) { | |
let o = r.shape, | |
n = o[0], | |
s = o[1], | |
a = e.data.get(r.dataId), | |
i = a.complexTensorInfos.real, | |
p = a.complexTensorInfos.imag, | |
u = [n, s], | |
c = y.sizeFromShape(u), | |
l = y.getTypedArrayFromDType("float32", c), | |
m = y.getTypedArrayFromDType("float32", c); | |
for (let g = 0; g < n; g++) { | |
let x = Fo({ | |
inputs: { | |
x: i | |
}, | |
backend: e, | |
attrs: { | |
begin: [g, 0], | |
size: [1, s] | |
} | |
}), | |
b = Fo({ | |
inputs: { | |
x: p | |
}, | |
backend: e, | |
attrs: { | |
begin: [g, 0], | |
size: [1, s] | |
} | |
}), | |
C = Kt({ | |
inputs: { | |
real: x, | |
imag: b | |
}, | |
backend: e | |
}), | |
{ | |
real: S, | |
imag: k | |
} = uQ(C, t8, e), | |
_ = w.mergeRealAndImagArrays(S, k); | |
for (let E = 0; E < s; E++) { | |
let R = w.getComplexWithIndex(_, E); | |
l[g * s + E] = R.real, m[g * s + E] = R.imag; | |
} | |
e.disposeIntermediateTensorInfo(x), e.disposeIntermediateTensorInfo(b), e.disposeIntermediateTensorInfo(C); | |
} | |
let d = e.makeTensorInfo(u, "float32", l), | |
f = e.makeTensorInfo(u, "float32", m), | |
h = Kt({ | |
inputs: { | |
real: d, | |
imag: f | |
}, | |
backend: e | |
}); | |
return e.disposeIntermediateTensorInfo(d), e.disposeIntermediateTensorInfo(f), h; | |
} | |
function uQ(r, t8, e) { | |
let o = y.sizeFromShape(r.shape), | |
n = e.data.get(r.dataId), | |
s = e.data.get(n.complexTensorInfos.real.dataId).values, | |
a = e.data.get(n.complexTensorInfos.imag.dataId).values; | |
if (pQ(o)) { | |
let i = CI(s, a, o, t8, e), | |
p = [r.shape[0], r.shape[1]]; | |
if (t8) { | |
let u = e.makeTensorInfo(p, "float32", i.real), | |
c = e.makeTensorInfo(p, "float32", i.imag), | |
l = e.makeTensorInfo([], "float32", y.createScalarValue(o, "float32")), | |
m = mr({ | |
inputs: { | |
x: l | |
}, | |
backend: e | |
}), | |
d = Gl.kernelFunc({ | |
inputs: { | |
a: u, | |
b: l | |
}, | |
backend: e | |
}), | |
f = Gl.kernelFunc({ | |
inputs: { | |
a: c, | |
b: m | |
}, | |
backend: e | |
}), | |
h = e.data.get(d.dataId).values, | |
g = e.data.get(f.dataId).values; | |
return e.disposeIntermediateTensorInfo(u), e.disposeIntermediateTensorInfo(c), e.disposeIntermediateTensorInfo(l), e.disposeIntermediateTensorInfo(m), e.disposeIntermediateTensorInfo(d), e.disposeIntermediateTensorInfo(f), { | |
real: h, | |
imag: g | |
}; | |
} | |
return i; | |
} else { | |
let i = w.mergeRealAndImagArrays(s, a), | |
p = cQ(i, o, t8); | |
return w.splitRealAndImagArrays(p); | |
} | |
} | |
function pQ(r) { | |
return (r & r - 1) === 0; | |
} | |
function CI(r, t8, e, o, n) { | |
if (e === 1) return { | |
real: r, | |
imag: t8 | |
}; | |
let s = w.mergeRealAndImagArrays(r, t8), | |
a = e / 2, | |
i = w.complexWithEvenIndex(s), | |
p = i.real, | |
u = i.imag, | |
c = [p.length], | |
l = n.makeTensorInfo(c, "float32", p), | |
m = n.makeTensorInfo(c, "float32", u), | |
d = Kt({ | |
inputs: { | |
real: l, | |
imag: m | |
}, | |
backend: n | |
}), | |
f = w.complexWithOddIndex(s), | |
h = f.real, | |
g = f.imag, | |
x = [h.length], | |
b = n.makeTensorInfo(x, "float32", h), | |
C = n.makeTensorInfo(x, "float32", g), | |
S = Kt({ | |
inputs: { | |
real: b, | |
imag: C | |
}, | |
backend: n | |
}), | |
k = CI(p, u, a, o, n), | |
_ = k.real, | |
E = k.imag, | |
R = [_.length], | |
D = n.makeTensorInfo(R, "float32", _), | |
P = n.makeTensorInfo(R, "float32", E), | |
O = Kt({ | |
inputs: { | |
real: D, | |
imag: P | |
}, | |
backend: n | |
}), | |
M = CI(h, g, a, o, n), | |
L = M.real, | |
B = M.imag, | |
z = [L.length], | |
U = n.makeTensorInfo(z, "float32", L), | |
j = n.makeTensorInfo(z, "float32", B), | |
q = Kt({ | |
inputs: { | |
real: U, | |
imag: j | |
}, | |
backend: n | |
}), | |
Y = w.exponents(e, o), | |
J = [Y.real.length], | |
re = n.makeTensorInfo(J, "float32", Y.real), | |
ne = n.makeTensorInfo(J, "float32", Y.imag), | |
ee = Kt({ | |
inputs: { | |
real: re, | |
imag: ne | |
}, | |
backend: n | |
}), | |
oe = dp({ | |
inputs: { | |
a: ee, | |
b: q | |
}, | |
backend: n | |
}), | |
ie = Oa({ | |
inputs: { | |
a: O, | |
b: oe | |
}, | |
backend: n | |
}), | |
le = Vl({ | |
inputs: { | |
a: O, | |
b: oe | |
}, | |
backend: n | |
}), | |
be = Ro({ | |
inputs: { | |
input: ie | |
}, | |
backend: n | |
}), | |
_e = Ro({ | |
inputs: { | |
input: le | |
}, | |
backend: n | |
}), | |
ve = Ma({ | |
inputs: { | |
input: ie | |
}, | |
backend: n | |
}), | |
Fe = Ma({ | |
inputs: { | |
input: le | |
}, | |
backend: n | |
}), | |
Pe = yu({ | |
inputs: [be, _e], | |
backend: n, | |
attrs: { | |
axis: 0 | |
} | |
}), | |
st = yu({ | |
inputs: [ve, Fe], | |
backend: n, | |
attrs: { | |
axis: 0 | |
} | |
}), | |
lt = n.data.get(Pe.dataId).values, | |
Ge = n.data.get(st.dataId).values; | |
return n.disposeIntermediateTensorInfo(l), n.disposeIntermediateTensorInfo(m), n.disposeIntermediateTensorInfo(d), n.disposeIntermediateTensorInfo(b), n.disposeIntermediateTensorInfo(C), n.disposeIntermediateTensorInfo(S), n.disposeIntermediateTensorInfo(D), n.disposeIntermediateTensorInfo(P), n.disposeIntermediateTensorInfo(O), n.disposeIntermediateTensorInfo(U), n.disposeIntermediateTensorInfo(j), n.disposeIntermediateTensorInfo(q), n.disposeIntermediateTensorInfo(re), n.disposeIntermediateTensorInfo(ne), n.disposeIntermediateTensorInfo(ee), n.disposeIntermediateTensorInfo(oe), n.disposeIntermediateTensorInfo(ie), n.disposeIntermediateTensorInfo(le), n.disposeIntermediateTensorInfo(be), n.disposeIntermediateTensorInfo(ve), n.disposeIntermediateTensorInfo(_e), n.disposeIntermediateTensorInfo(Fe), n.disposeIntermediateTensorInfo(Pe), n.disposeIntermediateTensorInfo(st), { | |
real: lt, | |
imag: Ge | |
}; | |
} | |
function cQ(r, t8, e) { | |
let o = new Float32Array(t8 * 2); | |
for (let n = 0; n < t8; n++) { | |
let s = 0, | |
a = 0; | |
for (let i = 0; i < t8; i++) { | |
let p = w.exponent(n * i, t8, e), | |
u = w.getComplexWithIndex(r, i); | |
s += u.real * p.real - u.imag * p.imag, a += u.real * p.imag + u.imag * p.real; | |
} | |
e && (s /= t8, a /= t8), w.assignToTypedArray(o, s, a, n); | |
} | |
return o; | |
} | |
function lQ(r) { | |
let { | |
inputs: t8, | |
backend: e | |
} = r, | |
{ | |
input: o | |
} = t8, | |
n = y.sizeFromShape(o.shape), | |
s = o.shape[o.shape.length - 1], | |
a = n / s, | |
i = Ve({ | |
inputs: { | |
x: o | |
}, | |
backend: e, | |
attrs: { | |
shape: [a, s] | |
} | |
}), | |
p = Vf(i, false, e), | |
u = Ve({ | |
inputs: { | |
x: p | |
}, | |
backend: e, | |
attrs: { | |
shape: o.shape | |
} | |
}); | |
return e.disposeIntermediateTensorInfo(i), e.disposeIntermediateTensorInfo(p), u; | |
} | |
var L$ = { | |
kernelName: Wi, | |
backendName: "cpu", | |
kernelFunc: lQ | |
}; | |
function Hl(r) { | |
let { | |
backend: t8, | |
attrs: e | |
} = r, | |
{ | |
shape: o, | |
value: n, | |
dtype: s | |
} = e, | |
a = s || y.inferDtype(n), | |
i = y.getArrayFromDType(a, y.sizeFromShape(o)); | |
return mQ(i, n, a), t8.makeTensorInfo(o, a, i); | |
} | |
var B$ = { | |
kernelName: sa, | |
backendName: "cpu", | |
kernelFunc: Hl | |
}; | |
function mQ(r, t8, e) { | |
r.fill(t8); | |
} | |
var z$ = { | |
kernelName: wn, | |
backendName: "cpu", | |
kernelFunc: ({ | |
inputs: r, | |
attrs: t8, | |
backend: e | |
}) => { | |
let { | |
image: o | |
} = r, | |
n = e, | |
s = y.getTypedArrayFromDType(o.dtype, y.sizeFromShape(o.shape)), | |
[a, i, p, u] = o.shape, | |
c = n.data.get(o.dataId).values; | |
for (let m = 0; m < a; m++) { | |
let d = m * p * i * u; | |
for (let f = 0; f < i; f++) { | |
let h = f * (p * u); | |
for (let g = 0; g < p; g++) { | |
let x = g * u; | |
for (let b = 0; b < u; b++) { | |
let C = Math.round(p - g - 1), | |
S = d + h + x + b, | |
k = c[S]; | |
if (C >= 0 && C < p) { | |
let _ = C * u, | |
E = d + h + _ + b; | |
k = c[E]; | |
} | |
s[S] = k; | |
} | |
} | |
} | |
} | |
return { | |
dataId: n.write(s, o.shape, o.dtype), | |
shape: o.shape, | |
dtype: o.dtype | |
}; | |
} | |
}; | |
function dQ(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n, | |
filter: s, | |
bias: a, | |
preluActivationWeights: i | |
} = t8, | |
{ | |
strides: p, | |
pad: u, | |
dataFormat: c, | |
dilations: l, | |
dimRoundingMode: m, | |
activation: d, | |
leakyreluAlpha: f | |
} = o, | |
h = yI({ | |
inputs: { | |
x: n, | |
filter: s | |
}, | |
backend: e, | |
attrs: { | |
strides: p, | |
pad: u, | |
dataFormat: c, | |
dilations: l, | |
dimRoundingMode: m | |
} | |
}); | |
if (a) { | |
let g = h; | |
if (c === "NCHW" && a.shape.length === 1 && a.shape[0] !== 1) { | |
let x = Ve({ | |
inputs: { | |
x: a | |
}, | |
backend: e, | |
attrs: { | |
shape: [a.shape[0], 1, 1] | |
} | |
}); | |
h = Oa({ | |
inputs: { | |
a: h, | |
b: x | |
}, | |
backend: e | |
}), e.disposeIntermediateTensorInfo(x); | |
} else h = Oa({ | |
inputs: { | |
a: h, | |
b: a | |
}, | |
backend: e | |
}); | |
e.disposeIntermediateTensorInfo(g); | |
} | |
if (d) { | |
let g = h; | |
if (c === "NCHW" && d === "prelu" && i.shape.length === 1 && i.shape[0] !== 1) { | |
let x = Ve({ | |
inputs: { | |
x: i | |
}, | |
backend: e, | |
attrs: { | |
shape: [i.shape[0], 1, 1] | |
} | |
}); | |
h = Cp(e, h, d, x, f), e.disposeIntermediateTensorInfo(x); | |
} else h = Cp(e, h, d, i, f); | |
e.disposeIntermediateTensorInfo(g); | |
} | |
return h; | |
} | |
var V$ = { | |
kernelName: vo, | |
backendName: "cpu", | |
kernelFunc: dQ | |
}; | |
function fQ(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n, | |
filter: s, | |
bias: a, | |
preluActivationWeights: i | |
} = t8, | |
{ | |
strides: p, | |
pad: u, | |
dataFormat: c, | |
dilations: l, | |
dimRoundingMode: m, | |
activation: d, | |
leakyreluAlpha: f | |
} = o, | |
h = bI({ | |
inputs: { | |
x: n, | |
filter: s | |
}, | |
backend: e, | |
attrs: { | |
strides: p, | |
pad: u, | |
dataFormat: c, | |
dilations: l, | |
dimRoundingMode: m | |
} | |
}); | |
if (a) { | |
let g = h; | |
h = Oa({ | |
inputs: { | |
a: h, | |
b: a | |
}, | |
backend: e | |
}), e.disposeIntermediateTensorInfo(g); | |
} | |
if (d) { | |
let g = h; | |
h = Cp(e, h, d, i, f), e.disposeIntermediateTensorInfo(g); | |
} | |
return h; | |
} | |
var W$ = { | |
kernelName: ko, | |
backendName: "cpu", | |
kernelFunc: fQ | |
}; | |
function hQ(r) { | |
let { | |
inputs: t8, | |
backend: e | |
} = r, | |
{ | |
params: o, | |
indices: n | |
} = t8, | |
s = y.sizeFromShape(o.shape), | |
a = n.shape, | |
i = a[a.length - 1], | |
[p, u, c, l] = w.prepareAndValidate(o, n); | |
if (u === 0) return e.makeTensorInfo(p, o.dtype, []); | |
let m = e.data.get(n.dataId).values, | |
d = e.bufferSync(o), | |
f = Tf(m, d, o.dtype, u, i, c, l, o.shape, s); | |
return e.makeTensorInfo(p, o.dtype, f.values); | |
} | |
var U$ = { | |
kernelName: kn, | |
backendName: "cpu", | |
kernelFunc: hQ | |
}; | |
function gQ(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n, | |
indices: s | |
} = t8, | |
{ | |
axis: a, | |
batchDims: i | |
} = o; | |
Q([n, s], "gatherV2"); | |
let p = y.parseAxisParam(a, n.shape)[0], | |
u = e.data.get(s.dataId).values, | |
c = n.shape[p]; | |
for (let S = 0; S < u.length; ++S) { | |
let k = u[S]; | |
y.assert(k <= c - 1 && k >= 0, () => `GatherV2: the index value ${k} is not in [0, ${c - 1}]`); | |
} | |
let l = i; | |
i == null && (l = 0); | |
let m = y.sizeFromShape(s.shape), | |
d = w.segment_util.collectGatherOpShapeInfo(n, s, p, l), | |
f = Ve({ | |
inputs: { | |
x: n | |
}, | |
backend: e, | |
attrs: { | |
shape: [d.batchSize, d.outerSize, d.dimSize, d.sliceSize] | |
} | |
}), | |
h = Ve({ | |
inputs: { | |
x: s | |
}, | |
backend: e, | |
attrs: { | |
shape: [d.batchSize, m / d.batchSize] | |
} | |
}), | |
g = [d.batchSize, d.outerSize, m / d.batchSize, d.sliceSize], | |
x = e.bufferSync(h), | |
b = e.bufferSync(f), | |
C = _f(b, x, g); | |
return e.disposeIntermediateTensorInfo(f), e.disposeIntermediateTensorInfo(h), e.makeTensorInfo(d.outputShape, C.dtype, C.values); | |
} | |
var G$ = { | |
kernelName: aa, | |
backendName: "cpu", | |
kernelFunc: gQ | |
}; | |
function xQ(r) { | |
let { | |
inputs: t8, | |
backend: e | |
} = r, | |
{ | |
input: o | |
} = t8, | |
n = y.sizeFromShape(o.shape), | |
s = o.shape[o.shape.length - 1], | |
a = n / s, | |
i = Ve({ | |
inputs: { | |
x: o | |
}, | |
backend: e, | |
attrs: { | |
shape: [a, s] | |
} | |
}), | |
p = Vf(i, true, e), | |
u = Ve({ | |
inputs: { | |
x: p | |
}, | |
backend: e, | |
attrs: { | |
shape: o.shape | |
} | |
}); | |
return e.disposeIntermediateTensorInfo(i), e.disposeIntermediateTensorInfo(p), u; | |
} | |
var H$ = { | |
kernelName: Ui, | |
backendName: "cpu", | |
kernelFunc: xQ | |
}; | |
var yQ = Ie(_n, r => Number.isFinite(r) ? 1 : 0, "bool"); | |
var K$ = { | |
kernelName: _n, | |
backendName: "cpu", | |
kernelFunc: yQ | |
}; | |
var bQ = Ie($n, r => Math.abs(r) === 1 / 0 ? 1 : 0, "bool"); | |
var q$ = { | |
kernelName: $n, | |
backendName: "cpu", | |
kernelFunc: bQ | |
}; | |
var CQ = Ie(En, r => Number.isNaN(r) ? 1 : 0, "bool"); | |
var j$ = { | |
kernelName: En, | |
backendName: "cpu", | |
kernelFunc: CQ | |
}; | |
function wQ(r) { | |
let { | |
backend: t8, | |
attrs: e | |
} = r, | |
{ | |
start: o, | |
stop: n, | |
num: s | |
} = e, | |
a = $f(o, n, s); | |
return t8.makeTensorInfo([a.length], "float32", a); | |
} | |
var X$ = { | |
kernelName: Fn, | |
backendName: "cpu", | |
kernelFunc: wQ | |
}; | |
var SQ = Ie(On, r => Math.log1p(r)); | |
var Y$ = { | |
kernelName: On, | |
backendName: "cpu", | |
kernelFunc: SQ | |
}; | |
var IQ = ze((r, t8) => r && t8); | |
var vQ = Ye(Mn, IQ, null, "bool"); | |
var Q$ = { | |
kernelName: Mn, | |
backendName: "cpu", | |
kernelFunc: vQ | |
}; | |
var kQ = Ie(Ln, r => r ? 0 : 1, "bool"); | |
var Z$ = { | |
kernelName: Ln, | |
backendName: "cpu", | |
kernelFunc: kQ | |
}; | |
var NQ = ze((r, t8) => r || t8); | |
var TQ = Ye(Bn, NQ, null, "bool"); | |
var J$ = { | |
kernelName: Bn, | |
backendName: "cpu", | |
kernelFunc: TQ | |
}; | |
function _Q(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n | |
} = t8, | |
{ | |
depthRadius: s, | |
bias: a, | |
alpha: i, | |
beta: p | |
} = o; | |
Q(n, "LRN"); | |
let u = n.shape[3], | |
c = u - 1, | |
l = e.data.get(n.dataId).values, | |
m = y.sizeFromShape(n.shape), | |
d = new Float32Array(m); | |
function f(h) { | |
let g = h % u, | |
x = h - g + Math.max(0, g - s), | |
b = h - g + Math.min(g + s, c), | |
C = 0; | |
for (; x <= b; x++) { | |
let S = l[x]; | |
C += S * S; | |
} | |
return C; | |
} | |
for (let h = 0; h < m; h++) { | |
let g = f(h), | |
x = l[h] * Math.pow(a + i * g, -p); | |
d[h] = x; | |
} | |
return e.makeTensorInfo(n.shape, n.dtype, d); | |
} | |
var eE = { | |
kernelName: zn, | |
backendName: "cpu", | |
kernelFunc: _Q | |
}; | |
function $Q(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n, | |
y: s, | |
dy: a | |
} = t8, | |
{ | |
depthRadius: i, | |
bias: p, | |
alpha: u, | |
beta: c | |
} = o; | |
Q(a, "LRNGrad"); | |
let l = y.sizeFromShape(a.shape), | |
m = a.shape[3], | |
d = e.data.get(a.dataId).values, | |
f = e.data.get(n.dataId).values, | |
h = e.data.get(s.dataId).values, | |
g = new Float32Array(l), | |
x = l; | |
for (let b = 0; b < x; b++) { | |
let C = b % m, | |
S = b - C + Math.max(0, C - i), | |
k = b - C + Math.min(m, C + i + 1), | |
_ = 0; | |
for (let E = S; E < k; E++) _ += Math.pow(f[E], 2); | |
_ = u * _ + p; | |
for (let E = S; E < k; E++) { | |
let R = -2 * u * c * f[E] * h[b] / _; | |
b === E && (R += Math.pow(_, -c)), R *= d[b], g[E] += R; | |
} | |
} | |
return e.makeTensorInfo(a.shape, n.dtype, g); | |
} | |
var tE = { | |
kernelName: Qa, | |
backendName: "cpu", | |
kernelFunc: $Q | |
}; | |
function wI(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n | |
} = t8, | |
{ | |
reductionIndices: s, | |
keepDims: a | |
} = o, | |
i = e, | |
p = n.shape, | |
u = p.length, | |
c = y.parseAxisParam(s, p), | |
l = c, | |
m = w.getAxesPermutation(l, u), | |
d = i.data.get(n.dataId).values; | |
if (m != null) { | |
let S = new Array(u); | |
for (let k = 0; k < S.length; k++) S[k] = p[m[k]]; | |
d = vc(d, p, n.dtype, m, S), l = w.getInnerMostAxes(l.length, u), p = S; | |
} | |
Q(n, "max"), w.assertAxesAreInnerMostDims("max", l, u); | |
let [f, h] = w.computeOutAndReduceShapes(p, l), | |
g = y.sizeFromShape(h), | |
x = Ef(d, g, f, n.dtype), | |
b = i.write(x, f, n.dtype), | |
C = f; | |
return a && (C = w.expandShapeToKeepDim(f, c)), { | |
dataId: b, | |
shape: C, | |
dtype: n.dtype | |
}; | |
} | |
var rE = { | |
kernelName: Vn, | |
backendName: "cpu", | |
kernelFunc: wI | |
}; | |
function EQ(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n | |
} = t8; | |
Q(n, "maxPool"); | |
let { | |
filterSize: s, | |
strides: a, | |
pad: i, | |
dimRoundingMode: p | |
} = o, | |
u = 1; | |
y.assert(w.eitherStridesOrDilationsAreOne(a, u), () => `Error in maxPool: Either strides or dilations must be 1. Got strides ${a} and dilations '${u}'`); | |
let c = w.computePool2DInfo(n.shape, s, a, u, i, p), | |
l; | |
if (c.filterWidth === 1 && c.filterHeight === 1 && y.arraysEqual(c.inShape, c.outShape)) l = mr({ | |
inputs: { | |
x: n | |
}, | |
backend: e | |
});else { | |
let m = e.data.get(n.dataId).values, | |
d = y.computeStrides(n.shape), | |
f = _c(m, n.shape, n.dtype, d, c, "max"); | |
l = e.makeTensorInfo(c.outShape, n.dtype, f.values); | |
} | |
return l; | |
} | |
var oE = { | |
kernelName: Un, | |
backendName: "cpu", | |
kernelFunc: EQ | |
}; | |
function RQ(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n | |
} = t8, | |
{ | |
filterSize: s, | |
strides: a, | |
pad: i, | |
dimRoundingMode: p, | |
dataFormat: u | |
} = o; | |
Q(n, "maxPool3d"); | |
let c = w.computePool3DInfo(n.shape, s, a, 1, i, p, u), | |
l = e.data.get(n.dataId).values, | |
m = zf(l, n.shape, n.dtype, y.computeStrides(n.shape), c, "max"); | |
return e.makeTensorInfo(m.shape, "float32", m.values); | |
} | |
var nE = { | |
kernelName: ia, | |
backendName: "cpu", | |
kernelFunc: RQ | |
}; | |
function DQ(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
dy: n, | |
input: s | |
} = t8, | |
{ | |
filterSize: a, | |
strides: i, | |
pad: p, | |
dimRoundingMode: u | |
} = o; | |
Q([n, s], "maxPool3DGrad"); | |
let c = w.computePool3DInfo(s.shape, a, i, 1, p, u), | |
l = e.bufferSync(s), | |
m = J_(l, c), | |
d = c.strideDepth, | |
f = c.strideHeight, | |
h = c.strideWidth, | |
g = c.dilationDepth, | |
x = c.dilationHeight, | |
b = c.dilationWidth, | |
C = c.effectiveFilterDepth, | |
S = c.effectiveFilterHeight, | |
k = c.effectiveFilterWidth, | |
_ = C - 1 - c.padInfo.front, | |
E = k - 1 - c.padInfo.left, | |
R = S - 1 - c.padInfo.top, | |
D = me(s.shape, "float32"), | |
P = e.bufferSync(n); | |
for (let O = 0; O < c.batchSize; ++O) for (let M = 0; M < c.inChannels; ++M) for (let L = 0; L < c.inDepth; ++L) for (let B = 0; B < c.inHeight; ++B) for (let z = 0; z < c.inWidth; ++z) { | |
let U = L - _, | |
j = B - R, | |
q = z - E, | |
Y = 0; | |
for (let J = 0; J < C; J += g) { | |
let re = (U + J) / d; | |
if (!(re < 0 || re >= c.outDepth || Math.floor(re) !== re)) for (let ne = 0; ne < S; ne += x) { | |
let ee = (j + ne) / f; | |
if (!(ee < 0 || ee >= c.outHeight || Math.floor(ee) !== ee)) for (let oe = 0; oe < k; oe += b) { | |
let ie = (q + oe) / h; | |
if (ie < 0 || ie >= c.outWidth || Math.floor(ie) !== ie) continue; | |
let le = C * S * k - 1 - m.get(O, re, ee, ie, M), | |
be = J * S * k + ne * k + oe, | |
_e = le === be ? 1 : 0; | |
if (_e === 0) continue; | |
let ve = P.get(O, re, ee, ie, M); | |
Y += ve * _e; | |
} | |
} | |
} | |
D.set(Y, O, L, B, z, M); | |
} | |
return e.makeTensorInfo(D.shape, D.dtype, D.values); | |
} | |
var sE = { | |
kernelName: Ki, | |
backendName: "cpu", | |
kernelFunc: DQ | |
}; | |
function AQ(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
dy: n, | |
input: s, | |
output: a | |
} = t8, | |
i = s; | |
Q([s, a], "maxPoolGrad"); | |
let { | |
filterSize: p, | |
strides: u, | |
pad: c, | |
dimRoundingMode: l | |
} = o, | |
m = w.computePool2DInfo(i.shape, p, u, 1, c, l), | |
d = e.data.get(i.dataId).values, | |
f = me(m.outShape, i.dtype, Bf(d, i.shape, i.dtype, m).values), | |
h = m.strideHeight, | |
g = m.strideWidth, | |
x = m.dilationHeight, | |
b = m.dilationWidth, | |
C = m.effectiveFilterHeight, | |
S = m.effectiveFilterWidth, | |
k = S - 1 - m.padInfo.left, | |
_ = C - 1 - m.padInfo.top, | |
E = me(i.shape, "float32"), | |
R = e.data.get(n.dataId).values, | |
D = me(n.shape, "float32", R); | |
for (let P = 0; P < m.batchSize; ++P) for (let O = 0; O < m.inChannels; ++O) for (let M = 0; M < m.inHeight; ++M) for (let L = 0; L < m.inWidth; ++L) { | |
let B = M - _, | |
z = L - k, | |
U = 0; | |
for (let j = 0; j < C; j += x) { | |
let q = (B + j) / h; | |
if (!(q < 0 || q >= m.outHeight || Math.floor(q) !== q)) for (let Y = 0; Y < S; Y += b) { | |
let J = (z + Y) / g; | |
if (J < 0 || J >= m.outWidth || Math.floor(J) !== J) continue; | |
let re = C * S - 1 - f.get(P, q, J, O), | |
ne = j * S + Y, | |
ee = re === ne ? 1 : 0; | |
if (ee === 0) continue; | |
let oe = D.get(P, q, J, O); | |
U += oe * ee; | |
} | |
} | |
E.set(U, P, M, L, O); | |
} | |
return e.makeTensorInfo(E.shape, E.dtype, E.values); | |
} | |
var aE = { | |
kernelName: Hi, | |
backendName: "cpu", | |
kernelFunc: AQ | |
}; | |
function iE(r, t8, e, o, n) { | |
let s = y.computeStrides(t8), | |
a = _c(r, t8, e, s, n, "max"), | |
i = Bf(r, t8, e, n, true, o); | |
return [a.values, i.values]; | |
} | |
var uE = { | |
kernelName: ua, | |
backendName: "cpu", | |
kernelFunc: ({ | |
inputs: r, | |
attrs: t8, | |
backend: e | |
}) => { | |
let { | |
x: o | |
} = r, | |
{ | |
filterSize: n, | |
strides: s, | |
pad: a, | |
includeBatchInIndex: i | |
} = t8, | |
p = e; | |
Q(o, "MaxPoolWithArgmax"); | |
let u = p.data.get(o.dataId).values, | |
c = w.computePool2DInfo(o.shape, n, s, [1, 1], a), | |
[l, m] = iE(u, o.shape, o.dtype, i, c), | |
d = p.write(l, c.outShape, o.dtype), | |
f = p.write(m, c.outShape, o.dtype); | |
return [{ | |
dataId: d, | |
shape: c.outShape, | |
dtype: o.dtype | |
}, { | |
dataId: f, | |
shape: c.outShape, | |
dtype: "int32" | |
}]; | |
} | |
}; | |
function FQ(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n | |
} = t8, | |
{ | |
axis: s, | |
keepDims: a | |
} = o, | |
i = y.parseAxisParam(s, n.shape), | |
u = w.computeOutAndReduceShapes(n.shape, i)[1], | |
c = y.sizeFromShape(u), | |
l = [], | |
m = e.makeTensorInfo([], "float32", new Float32Array([c])); | |
l.push(m); | |
let d = Do({ | |
inputs: { | |
x: n | |
}, | |
backend: e, | |
attrs: { | |
dtype: "float32" | |
} | |
}); | |
l.push(d); | |
let f = Ul({ | |
inputs: { | |
a: d, | |
b: m | |
}, | |
backend: e | |
}); | |
l.push(f); | |
let h = gi({ | |
inputs: { | |
x: f | |
}, | |
backend: e, | |
attrs: { | |
axis: s, | |
keepDims: a | |
} | |
}); | |
return l.forEach(g => e.disposeIntermediateTensorInfo(g)), h; | |
} | |
var pE = { | |
kernelName: Gn, | |
backendName: "cpu", | |
kernelFunc: FQ | |
}; | |
function PQ(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n | |
} = t8, | |
{ | |
axis: s, | |
keepDims: a | |
} = o; | |
Q(n, "min"); | |
let i = y.parseAxisParam(s, n.shape), | |
p = i, | |
u = w.getAxesPermutation(p, n.shape.length), | |
c = n; | |
u != null && (c = It({ | |
inputs: { | |
x: n | |
}, | |
backend: e, | |
attrs: { | |
perm: u | |
} | |
}), p = w.getInnerMostAxes(p.length, n.shape.length)), w.assertAxesAreInnerMostDims("min", p, c.shape.length); | |
let [l, m] = w.computeOutAndReduceShapes(c.shape, p), | |
d = y.sizeFromShape(m), | |
f = y.makeZerosTypedArray(y.sizeFromShape(l), c.dtype), | |
h = e.data.get(c.dataId).values; | |
for (let x = 0; x < f.length; ++x) { | |
let b = x * d, | |
C = h[b]; | |
for (let S = 0; S < d; ++S) { | |
let k = h[b + S]; | |
(Number.isNaN(k) || k < C) && (C = k); | |
} | |
f[x] = C; | |
} | |
u != null && e.disposeIntermediateTensorInfo(c); | |
let g = e.makeTensorInfo(l, c.dtype, f); | |
if (a) { | |
let x = w.expandShapeToKeepDim(l, i), | |
b = Ve({ | |
inputs: { | |
x: g | |
}, | |
backend: e, | |
attrs: { | |
shape: x | |
} | |
}); | |
return e.disposeIntermediateTensorInfo(g), b; | |
} | |
return g; | |
} | |
var cE = { | |
kernelName: Hn, | |
backendName: "cpu", | |
kernelFunc: PQ | |
}; | |
function OQ(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n | |
} = t8, | |
{ | |
paddings: s, | |
mode: a | |
} = o; | |
Q(n, "mirrorPad"); | |
let i = s.map((C, S) => C[0] + n.shape[S] + C[1]), | |
p = s.map(C => C[0]), | |
u = s.map((C, S) => C[0] + n.shape[S]), | |
c = a === "reflect" ? 0 : 1, | |
l = e.data.get(n.dataId).values, | |
m = n.shape.length, | |
d = y.computeStrides(n.shape), | |
f = y.sizeFromShape(i), | |
h = i.length, | |
g = y.computeStrides(i), | |
x = y.getTypedArrayFromDType(n.dtype, f); | |
for (let C = 0; C < f; C++) { | |
let S = y.indexToLoc(C, h, g); | |
for (let _ = 0; _ < h; _++) S[_] < p[_] ? S[_] = p[_] * 2 - S[_] - c : S[_] >= u[_] && (S[_] = (u[_] - 1) * 2 - S[_] + c); | |
S = S.map((_, E) => _ - p[E]); | |
let k = y.locToIndex(S, m, d); | |
x[C] = l[k]; | |
} | |
return { | |
dataId: e.write(x, i, n.dtype), | |
shape: i, | |
dtype: n.dtype | |
}; | |
} | |
var lE = { | |
kernelName: qn, | |
backendName: "cpu", | |
kernelFunc: OQ | |
}; | |
var MQ = ze((r, t8) => { | |
let e = r % t8; | |
return r < 0 && t8 < 0 || r >= 0 && t8 >= 0 ? e : (e + t8) % t8; | |
}); | |
var LQ = Ye(jn, MQ); | |
var mE = { | |
kernelName: jn, | |
backendName: "cpu", | |
kernelFunc: LQ | |
}; | |
var fE = Kp(qw()); | |
function SI(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
logits: n | |
} = t8, | |
{ | |
dim: s | |
} = o, | |
a = n.shape.length, | |
i = s; | |
if (i === -1 && (i = a - 1), i !== a - 1) throw Error(`Softmax along a non-last dimension is not yet supported. Logits was rank ${a} and dim was ${i}`); | |
let p = y.parseAxisParam([i], n.shape), | |
u = wI({ | |
inputs: { | |
x: n | |
}, | |
backend: e, | |
attrs: { | |
reductionIndices: p, | |
keepDims: false | |
} | |
}), | |
c = w.expandShapeToKeepDim(u.shape, p), | |
l = Ve({ | |
inputs: { | |
x: u | |
}, | |
backend: e, | |
attrs: { | |
shape: c | |
} | |
}), | |
m = Vl({ | |
inputs: { | |
a: n, | |
b: l | |
}, | |
backend: e | |
}), | |
d = KS({ | |
inputs: { | |
x: m | |
}, | |
backend: e | |
}), | |
f = gi({ | |
inputs: { | |
x: d | |
}, | |
backend: e, | |
attrs: { | |
axis: p, | |
keepDims: false | |
} | |
}), | |
h = Ve({ | |
inputs: { | |
x: f | |
}, | |
backend: e, | |
attrs: { | |
shape: c | |
} | |
}), | |
g = Ul({ | |
inputs: { | |
a: d, | |
b: h | |
}, | |
backend: e | |
}); | |
return e.disposeIntermediateTensorInfo(u), e.disposeIntermediateTensorInfo(l), e.disposeIntermediateTensorInfo(m), e.disposeIntermediateTensorInfo(d), e.disposeIntermediateTensorInfo(f), e.disposeIntermediateTensorInfo(h), g; | |
} | |
var dE = { | |
kernelName: vs, | |
backendName: "cpu", | |
kernelFunc: SI | |
}; | |
function BQ(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
logits: n | |
} = t8, | |
{ | |
numSamples: s, | |
seed: a, | |
normalized: i | |
} = o; | |
Q(n, "multinomial"); | |
let p = i ? n : SI({ | |
inputs: { | |
logits: n | |
}, | |
backend: e, | |
attrs: { | |
dim: -1 | |
} | |
}), | |
u = p.shape[0], | |
c = p.shape[1], | |
l = e.data.get(p.dataId).values, | |
m = [u, s], | |
d = y.makeZerosTypedArray(y.sizeFromShape(m), "int32"); | |
for (let f = 0; f < u; ++f) { | |
let h = f * c, | |
g = new Float32Array(c - 1); | |
g[0] = l[h]; | |
for (let C = 1; C < g.length; ++C) g[C] = g[C - 1] + l[h + C]; | |
let x = fE.alea(a.toString()), | |
b = f * s; | |
for (let C = 0; C < s; ++C) { | |
let S = x(); | |
d[b + C] = g.length; | |
for (let k = 0; k < g.length; k++) if (S < g[k]) { | |
d[b + C] = k; | |
break; | |
} | |
} | |
} | |
return i || e.disposeIntermediateTensorInfo(p), e.makeTensorInfo(m, "int32", d); | |
} | |
var hE = { | |
kernelName: Xn, | |
backendName: "cpu", | |
kernelFunc: BQ | |
}; | |
var zQ = Wt.nonMaxSuppressionV3Impl; | |
function VQ(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
boxes: n, | |
scores: s | |
} = t8, | |
{ | |
maxOutputSize: a, | |
iouThreshold: i, | |
scoreThreshold: p | |
} = o; | |
Q(n, "NonMaxSuppression"); | |
let u = e.data.get(n.dataId).values, | |
c = e.data.get(s.dataId).values, | |
{ | |
selectedIndices: l | |
} = zQ(u, c, a, i, p); | |
return e.makeTensorInfo([l.length], "int32", new Int32Array(l)); | |
} | |
var gE = { | |
kernelName: Zn, | |
backendName: "cpu", | |
kernelFunc: VQ | |
}; | |
var WQ = Wt.nonMaxSuppressionV4Impl; | |
function UQ(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
boxes: n, | |
scores: s | |
} = t8, | |
{ | |
maxOutputSize: a, | |
iouThreshold: i, | |
scoreThreshold: p, | |
padToMaxOutputSize: u | |
} = o; | |
Q(n, "NonMaxSuppressionPadded"); | |
let c = e.data.get(n.dataId).values, | |
l = e.data.get(s.dataId).values, | |
{ | |
selectedIndices: m, | |
validOutputs: d | |
} = WQ(c, l, a, i, p, u); | |
return [e.makeTensorInfo([m.length], "int32", new Int32Array(m)), e.makeTensorInfo([], "int32", new Int32Array([d]))]; | |
} | |
var xE = { | |
kernelName: Za, | |
backendName: "cpu", | |
kernelFunc: UQ | |
}; | |
var GQ = Wt.nonMaxSuppressionV5Impl; | |
function HQ(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
boxes: n, | |
scores: s | |
} = t8, | |
{ | |
maxOutputSize: a, | |
iouThreshold: i, | |
scoreThreshold: p, | |
softNmsSigma: u | |
} = o; | |
Q(n, "NonMaxSuppressionWithScore"); | |
let c = e.data.get(n.dataId).values, | |
l = e.data.get(s.dataId).values, | |
m = a, | |
d = i, | |
f = p, | |
h = u, | |
{ | |
selectedIndices: g, | |
selectedScores: x | |
} = GQ(c, l, m, d, f, h); | |
return [e.makeTensorInfo([g.length], "int32", new Int32Array(g)), e.makeTensorInfo([x.length], "float32", new Float32Array(x))]; | |
} | |
var yE = { | |
kernelName: Jn, | |
backendName: "cpu", | |
kernelFunc: HQ | |
}; | |
function KQ(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
indices: n | |
} = t8, | |
{ | |
dtype: s, | |
depth: a, | |
onValue: i, | |
offValue: p | |
} = o; | |
Q(n, "oneHot"); | |
let u = y.sizeFromShape(n.shape), | |
c = new Float32Array(u * a); | |
c.fill(p); | |
let l = e.data.get(n.dataId).values; | |
for (let m = 0; m < u; ++m) l[m] >= 0 && l[m] < a && (c[m * a + l[m]] = i); | |
return e.makeTensorInfo([...n.shape, a], s, c); | |
} | |
var bE = { | |
kernelName: es, | |
backendName: "cpu", | |
kernelFunc: KQ | |
}; | |
function Kl(r) { | |
let { | |
inputs: t8, | |
backend: e | |
} = r, | |
{ | |
x: o | |
} = t8; | |
if (o.dtype === "string") throw new Error("zerosLike is not supported for string tensors"); | |
if (o.dtype === "complex64") { | |
let n = Ro({ | |
inputs: { | |
input: o | |
}, | |
backend: e | |
}), | |
s = Kl({ | |
inputs: { | |
x: n | |
}, | |
backend: e | |
}), | |
a = Ma({ | |
inputs: { | |
input: o | |
}, | |
backend: e | |
}), | |
i = Kl({ | |
inputs: { | |
x: a | |
}, | |
backend: e | |
}), | |
p = Kt({ | |
inputs: { | |
real: s, | |
imag: i | |
}, | |
backend: e | |
}); | |
return e.disposeIntermediateTensorInfo(n), e.disposeIntermediateTensorInfo(s), e.disposeIntermediateTensorInfo(a), e.disposeIntermediateTensorInfo(i), p; | |
} else return Hl({ | |
backend: e, | |
attrs: { | |
shape: o.shape, | |
value: 0, | |
dtype: o.dtype | |
} | |
}); | |
} | |
var CE = { | |
kernelName: Sa, | |
backendName: "cpu", | |
kernelFunc: Kl | |
}; | |
function wE(r) { | |
let { | |
inputs: t8, | |
backend: e | |
} = r, | |
{ | |
x: o | |
} = t8; | |
if (o.dtype === "string") throw new Error("onesLike is not supported for string tensors"); | |
if (o.dtype === "complex64") { | |
let n = Ro({ | |
inputs: { | |
input: o | |
}, | |
backend: e | |
}), | |
s = wE({ | |
inputs: { | |
x: n | |
}, | |
backend: e | |
}), | |
a = Ma({ | |
inputs: { | |
input: o | |
}, | |
backend: e | |
}), | |
i = Kl({ | |
inputs: { | |
x: a | |
}, | |
backend: e | |
}), | |
p = Kt({ | |
inputs: { | |
real: s, | |
imag: i | |
}, | |
backend: e | |
}); | |
return e.disposeIntermediateTensorInfo(n), e.disposeIntermediateTensorInfo(s), e.disposeIntermediateTensorInfo(a), e.disposeIntermediateTensorInfo(i), p; | |
} else return Hl({ | |
backend: e, | |
attrs: { | |
shape: o.shape, | |
value: 1, | |
dtype: o.dtype | |
} | |
}); | |
} | |
var SE = { | |
kernelName: ca, | |
backendName: "cpu", | |
kernelFunc: wE | |
}; | |
function II(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
axis: n | |
} = o; | |
if (t8.length === 1) return $c({ | |
inputs: { | |
input: t8[0] | |
}, | |
backend: e, | |
attrs: { | |
dim: n | |
} | |
}); | |
let s = t8[0].shape, | |
a = t8[0].dtype; | |
t8.forEach(c => { | |
y.assertShapesMatch(s, c.shape, "All tensors passed to stack must have matching shapes"), y.assert(a === c.dtype, () => "All tensors passed to stack must have matching dtypes"); | |
}); | |
let i = [], | |
p = t8.map(c => { | |
let l = $c({ | |
inputs: { | |
input: c | |
}, | |
backend: e, | |
attrs: { | |
dim: n | |
} | |
}); | |
return i.push(l), l; | |
}), | |
u = yu({ | |
inputs: p, | |
backend: e, | |
attrs: { | |
axis: n | |
} | |
}); | |
return i.forEach(c => e.disposeIntermediateTensorInfo(c)), u; | |
} | |
var IE = { | |
kernelName: la, | |
backendName: "cpu", | |
kernelFunc: II | |
}; | |
function qQ(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n | |
} = t8, | |
{ | |
paddings: s, | |
constantValue: a | |
} = o; | |
Q(n, "pad"); | |
let i = s.map((b, C) => b[0] + n.shape[C] + b[1]), | |
p = s.map(b => b[0]), | |
u = e.data.get(n.dataId).values, | |
c = y.sizeFromShape(n.shape), | |
l = n.shape.length, | |
m = y.computeStrides(n.shape), | |
d = y.sizeFromShape(i), | |
f = i.length, | |
h = y.computeStrides(i), | |
g = y.getTypedArrayFromDType(n.dtype, d); | |
a !== 0 && g.fill(a); | |
for (let b = 0; b < c; b++) { | |
let S = y.indexToLoc(b, l, m).map((_, E) => _ + p[E]), | |
k = y.locToIndex(S, f, h); | |
g[k] = u[b]; | |
} | |
return { | |
dataId: e.write(g, i, n.dtype), | |
shape: i, | |
dtype: n.dtype | |
}; | |
} | |
var Wf = { | |
kernelName: ts, | |
backendName: "cpu", | |
kernelFunc: qQ | |
}; | |
var jQ = ze((r, t8) => Math.pow(r, t8)); | |
var XQ = Ye(rs, jQ); | |
var vE = { | |
kernelName: rs, | |
backendName: "cpu", | |
kernelFunc: XQ | |
}; | |
function YQ(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
paramsNestedSplits: n, | |
paramsDenseValues: s, | |
indices: a | |
} = t8, | |
{ | |
outputRaggedRank: i | |
} = o, | |
p = n.map(x => e.data.get(x.dataId).values), | |
u = n.map(x => x.shape), | |
c = e.data.get(s.dataId).values, | |
l = e.data.get(a.dataId).values, | |
[m, d, f] = Rf(p, u, c, s.shape, s.dtype, l, a.shape, i), | |
h = m.map(x => e.makeTensorInfo([x.length], "int32", x)), | |
g = e.makeTensorInfo(f, s.dtype, d); | |
return h.concat([g]); | |
} | |
var kE = { | |
kernelName: Qp, | |
backendName: "cpu", | |
kernelFunc: YQ | |
}; | |
function QQ(r) { | |
let { | |
inputs: t8, | |
backend: e | |
} = r, | |
{ | |
starts: o, | |
limits: n, | |
deltas: s | |
} = t8, | |
a = e.data.get(o.dataId).values, | |
i = e.data.get(n.dataId).values, | |
p = e.data.get(s.dataId).values, | |
[u, c] = Df(a, o.shape, o.dtype, i, n.shape, p, s.shape), | |
l = e.makeTensorInfo([u.length], "int32", u), | |
m = e.makeTensorInfo([c.length], o.dtype, c); | |
return [l, m]; | |
} | |
var NE = { | |
kernelName: Zp, | |
backendName: "cpu", | |
kernelFunc: QQ | |
}; | |
function ZQ(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
shape: n, | |
values: s, | |
defaultValue: a, | |
rowPartitionTensors: i | |
} = t8, | |
{ | |
rowPartitionTypes: p | |
} = o, | |
u = e.data.get(n.dataId).values, | |
c = e.data.get(s.dataId).values, | |
l = e.data.get(a.dataId).values, | |
m = i.map(g => e.data.get(g.dataId).values), | |
d = i.map(g => g.shape), | |
[f, h] = Af(u, n.shape, c, s.shape, s.dtype, l, a.shape, m, d, p); | |
return e.makeTensorInfo(f, s.dtype, h); | |
} | |
var TE = { | |
kernelName: Jp, | |
backendName: "cpu", | |
kernelFunc: ZQ | |
}; | |
function JQ(r) { | |
let { | |
backend: t8, | |
attrs: e | |
} = r, | |
{ | |
start: o, | |
stop: n, | |
dtype: s, | |
step: a | |
} = e, | |
i = fp(o, n, a, s); | |
return t8.makeTensorInfo([i.length], s, i); | |
} | |
var _E = { | |
kernelName: ma, | |
backendName: "cpu", | |
kernelFunc: JQ | |
}; | |
var e7 = Ie(ss, r => 1 / r); | |
var $E = { | |
kernelName: ss, | |
backendName: "cpu", | |
kernelFunc: e7 | |
}; | |
function t7(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
images: n | |
} = t8, | |
{ | |
alignCorners: s, | |
halfPixelCenters: a, | |
size: i | |
} = o; | |
Q(n, "resizeBilinear"); | |
let p = y.computeStrides(n.shape), | |
[u, c] = i, | |
[l, m, d, f] = n.shape, | |
h = e.data.get(n.dataId).values, | |
g = new Float32Array(y.sizeFromShape([l, u, c, f])), | |
x = [s && u > 1 ? m - 1 : m, s && c > 1 ? d - 1 : d], | |
b = [s && u > 1 ? u - 1 : u, s && c > 1 ? c - 1 : c], | |
C = 0, | |
S = x[0] / b[0], | |
k = x[1] / b[1]; | |
for (let _ = 0; _ < l; _++) for (let E = 0; E < u; E++) { | |
let R; | |
a ? R = S * (E + 0.5) - 0.5 : R = S * E; | |
let D = Math.max(0, Math.floor(R)), | |
P = R - D, | |
O = Math.min(m - 1, Math.ceil(R)), | |
M = _ * p[0] + D * p[1], | |
L = _ * p[0] + O * p[1]; | |
for (let B = 0; B < c; B++) { | |
let z; | |
a ? z = k * (B + 0.5) - 0.5 : z = k * B; | |
let U = Math.max(0, Math.floor(z)), | |
j = z - U, | |
q = Math.min(d - 1, Math.ceil(z)), | |
Y = M + U * p[2], | |
J = L + U * p[2], | |
re = M + q * p[2], | |
ne = L + q * p[2]; | |
for (let ee = 0; ee < f; ee++) { | |
let oe = h[Y + ee], | |
ie = h[J + ee], | |
le = h[re + ee], | |
be = h[ne + ee], | |
_e = oe + (le - oe) * j, | |
ve = ie + (be - ie) * j, | |
Fe = _e + (ve - _e) * P; | |
g[C++] = Fe; | |
} | |
} | |
} | |
return e.makeTensorInfo([l, u, c, f], "float32", g); | |
} | |
var EE = { | |
kernelName: us, | |
backendName: "cpu", | |
kernelFunc: t7 | |
}; | |
function r7(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
images: n, | |
dy: s | |
} = t8, | |
{ | |
alignCorners: a | |
} = o; | |
Q([s, n], "resizeBilinearGrad"); | |
let i = y.computeStrides(n.shape), | |
[p, u, c, l] = n.shape, | |
[, m, d] = s.shape, | |
f = new Float32Array(p * u * c * l), | |
h = [a && m > 1 ? u - 1 : u, a && d > 1 ? c - 1 : c], | |
g = [a && m > 1 ? m - 1 : m, a && d > 1 ? d - 1 : d], | |
x = h[0] / g[0], | |
b = h[1] / g[1], | |
C = e.data.get(s.dataId).values, | |
S = 0; | |
for (let k = 0; k < p; k++) { | |
let _ = k * i[0]; | |
for (let E = 0; E < m; E++) { | |
let R = E * x, | |
D = Math.floor(R), | |
P = Math.min(Math.ceil(R), u - 1), | |
O = _ + D * i[1], | |
M = _ + P * i[1], | |
L = R - D, | |
B = 1 - L; | |
for (let z = 0; z < d; z++) { | |
let U = z * b, | |
j = Math.floor(U), | |
q = Math.min(Math.ceil(U), c - 1), | |
Y = U - j, | |
J = 1 - Y, | |
re = O + j * i[2], | |
ne = O + q * i[2], | |
ee = M + j * i[2], | |
oe = M + q * i[2], | |
ie = B * J, | |
le = B * Y, | |
be = L * J, | |
_e = L * Y; | |
for (let ve = 0; ve < l; ve++) { | |
let Fe = C[S++]; | |
f[re + ve] += Fe * ie, f[ne + ve] += Fe * le, f[ee + ve] += Fe * be, f[oe + ve] += Fe * _e; | |
} | |
} | |
} | |
} | |
return e.makeTensorInfo([p, c, u, l], "float32", f); | |
} | |
var RE = { | |
kernelName: ei, | |
backendName: "cpu", | |
kernelFunc: r7 | |
}; | |
function o7(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
images: n | |
} = t8, | |
{ | |
alignCorners: s, | |
halfPixelCenters: a, | |
size: i | |
} = o; | |
Q(n, "resizeNearestNeighbor"); | |
let p = y.computeStrides(n.shape), | |
[u, c] = i, | |
[l, m, d, f] = n.shape, | |
h = e.data.get(n.dataId).values, | |
g = new Float32Array(l * u * c * f), | |
x = [s && u > 1 ? m - 1 : m, s && c > 1 ? d - 1 : d], | |
b = [s && u > 1 ? u - 1 : u, s && c > 1 ? c - 1 : c], | |
C = x[0] / b[0], | |
S = x[1] / b[1], | |
k = 0; | |
for (let _ = 0; _ < l; _++) { | |
let E = _ * p[0]; | |
for (let R = 0; R < u; R++) { | |
let D = a ? C * (R + 0.5) : C * R, | |
P = Math.min(m - 1, s ? Math.round(D) : Math.floor(D)); | |
a && (P = Math.max(0, P)); | |
let O = E + P * p[1]; | |
for (let M = 0; M < c; M++) { | |
let L = a ? S * (M + 0.5) : S * M, | |
B = Math.min(d - 1, s ? Math.round(L) : Math.floor(L)); | |
a && (B = Math.max(0, B)); | |
let z = O + B * p[2]; | |
for (let U = 0; U < f; U++) { | |
let j = h[z + U]; | |
g[k++] = j; | |
} | |
} | |
} | |
} | |
return e.makeTensorInfo([l, u, c, f], n.dtype, g); | |
} | |
var DE = { | |
kernelName: is, | |
backendName: "cpu", | |
kernelFunc: o7 | |
}; | |
function n7(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
images: n, | |
dy: s | |
} = t8, | |
{ | |
alignCorners: a | |
} = o; | |
Q([s, n], "resizeNearestNeighborGrad"); | |
let i = y.computeStrides(n.shape), | |
p = y.computeStrides(s.shape), | |
[u, c, l, m] = n.shape, | |
[, d, f] = s.shape, | |
h = new Float32Array(u * c * l * m), | |
g = e.data.get(s.dataId).values, | |
x = [a && d > 1 ? c - 1 : c, a && f > 1 ? l - 1 : l], | |
b = [a && d > 1 ? d - 1 : d, a && f > 1 ? f - 1 : f], | |
C = x[0] / b[0], | |
S = x[1] / b[1], | |
k = 1 / C, | |
_ = 1 / S, | |
E = Math.ceil(k) * 2 + 2, | |
R = Math.ceil(_) * 2 + 2; | |
for (let D = 0; D < u; D++) { | |
let P = D * i[0]; | |
for (let O = 0; O < c; O++) { | |
let M = P + O * i[1], | |
L = Math.floor(O * k), | |
B = Math.floor(L - E / 2); | |
for (let z = 0; z < l; z++) { | |
let U = M + z * i[2], | |
j = Math.floor(z * _), | |
q = Math.floor(j - R / 2); | |
for (let Y = 0; Y < m; Y++) { | |
let J = 0; | |
for (let re = 0; re < E; re++) { | |
let ne = re + B; | |
if (ne < 0 || ne >= d) continue; | |
let ee = P + ne * p[1], | |
oe = ne * C, | |
ie = Math.min(c - 1, a ? Math.round(oe) : Math.floor(oe)); | |
if (O === ie) for (let le = 0; le < R; le++) { | |
let be = le + q; | |
if (be < 0 || be >= f) continue; | |
let _e = ee + be * p[2], | |
ve = be * S, | |
Fe = Math.min(l - 1, a ? Math.round(ve) : Math.floor(ve)); | |
z === Fe && (J += g[_e + Y]); | |
} | |
} | |
h[U + Y] = J; | |
} | |
} | |
} | |
} | |
return e.makeTensorInfo(n.shape, n.dtype, h); | |
} | |
var AE = { | |
kernelName: Ja, | |
backendName: "cpu", | |
kernelFunc: n7 | |
}; | |
function s7(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n | |
} = t8, | |
{ | |
dims: s | |
} = o; | |
Q(n, "reverse"); | |
let a = n.shape.length, | |
i = y.parseAxisParam(s, n.shape); | |
if (a === 0) return mr({ | |
inputs: { | |
x: n | |
}, | |
backend: e | |
}); | |
let p = new tt(n.shape, n.dtype), | |
u = e.bufferSync(n); | |
for (let c = 0; c < p.size; c++) { | |
let l = p.indexToLoc(c), | |
m = l.slice(); | |
i.forEach(d => m[d] = n.shape[d] - 1 - m[d]), p.set(u.get(...m), ...l); | |
} | |
return e.makeTensorInfo(p.shape, p.dtype, p.values); | |
} | |
var FE = { | |
kernelName: cs, | |
backendName: "cpu", | |
kernelFunc: s7 | |
}; | |
var PE = { | |
kernelName: As, | |
backendName: "cpu", | |
kernelFunc: ({ | |
inputs: r, | |
attrs: t8, | |
backend: e | |
}) => { | |
let { | |
image: o | |
} = r, | |
{ | |
radians: n, | |
fillValue: s, | |
center: a | |
} = t8, | |
i = e, | |
p = y.getTypedArrayFromDType(o.dtype, y.sizeFromShape(o.shape)), | |
[u, c, l, m] = o.shape, | |
[d, f] = w.getImageCenter(a, c, l), | |
h = 255, | |
g = Math.sin(n), | |
x = Math.cos(n), | |
b = i.data.get(o.dataId).values; | |
for (let S = 0; S < u; S++) { | |
let k = S * l * c * m; | |
for (let _ = 0; _ < c; _++) { | |
let E = _ * (l * m); | |
for (let R = 0; R < l; R++) { | |
let D = R * m; | |
for (let P = 0; P < m; P++) { | |
let O = [u, _, R, P], | |
M = O[2], | |
L = O[1], | |
B = (M - d) * x - (L - f) * g, | |
z = (M - d) * g + (L - f) * x; | |
B = Math.round(B + d), z = Math.round(z + f); | |
let U = s; | |
if (typeof s != "number" && (P === 3 ? U = h : U = s[P]), B >= 0 && B < l && z >= 0 && z < c) { | |
let q = z * (l * m), | |
Y = B * m, | |
J = k + q + Y + P; | |
U = b[J]; | |
} | |
let j = k + E + D + P; | |
p[j] = U; | |
} | |
} | |
} | |
} | |
return { | |
dataId: i.write(p, o.shape, o.dtype), | |
shape: o.shape, | |
dtype: o.dtype | |
}; | |
} | |
}; | |
var a7 = Ie(ls, r => { | |
let t8 = Math.floor(r); | |
return r - t8 < 0.5 ? Math.floor(r) : r - t8 > 0.5 ? Math.ceil(r) : t8 % 2 === 0 ? t8 : t8 + 1; | |
}); | |
var OE = { | |
kernelName: ls, | |
backendName: "cpu", | |
kernelFunc: a7 | |
}; | |
function i7(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
indices: n, | |
updates: s | |
} = t8, | |
{ | |
shape: a | |
} = o, | |
{ | |
sliceRank: i, | |
numUpdates: p, | |
sliceSize: u, | |
strides: c, | |
outputSize: l | |
} = w.calculateShapes(s, n, a), | |
m = true, | |
d = e.bufferSync(n), | |
f = e.bufferSync(s), | |
h = zs(d, f, a, l, u, p, i, c, 0, m); | |
return e.makeTensorInfo(a, h.dtype, h.values); | |
} | |
var ME = { | |
kernelName: ds, | |
backendName: "cpu", | |
kernelFunc: i7 | |
}; | |
function u7(r, t8) { | |
let e = 0, | |
o = r.length, | |
n = 0; | |
for (; e < o;) n = Math.floor((e + o) / 2), r[n] < t8 ? e = n + 1 : o = n; | |
return o; | |
} | |
function p7(r, t8) { | |
let e = 0, | |
o = r.length, | |
n = 0; | |
for (; e < o;) n = Math.floor((e + o) / 2), r[n] <= t8 ? e = n + 1 : o = n; | |
return o; | |
} | |
function LE(r, t8, e, o, n, s) { | |
let a = y.getArrayFromDType("int32", e * n); | |
for (let i = 0; i < e; ++i) { | |
let p = r.slice(i * o, (i + 1) * o), | |
u = i * n; | |
for (let c = 0; c < n; ++c) a[u + c] = s === "left" ? u7(p, t8[c + u]) : p7(p, t8[c + u]); | |
} | |
return a; | |
} | |
function c7(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
sortedSequence: n, | |
values: s | |
} = t8, | |
{ | |
side: a | |
} = o, | |
i = e.data.get(n.dataId).values, | |
p = e.data.get(s.dataId).values, | |
u = LE(i, p, n.shape[0], n.shape[1], s.shape[1], a); | |
return e.makeTensorInfo(s.shape, "int32", u); | |
} | |
var BE = { | |
kernelName: hs, | |
backendName: "cpu", | |
kernelFunc: c7 | |
}; | |
function l7(r) { | |
let { | |
inputs: t8, | |
backend: e | |
} = r, | |
{ | |
condition: o, | |
t: n, | |
e: s | |
} = t8; | |
Q([o, n, s], "select"); | |
let a = o.shape.length, | |
i = e.data.get(o.dataId).values, | |
p = e.data.get(n.dataId).values, | |
u = e.data.get(s.dataId).values, | |
c = dt(n.dtype, s.dtype), | |
l = y.makeZerosTypedArray(y.sizeFromShape(n.shape), c), | |
m = 0, | |
d = a === 0 || a > 1 || n.shape.length === 1 ? 1 : y.sizeFromShape(n.shape.slice(1)); | |
for (let f = 0; f < i.length; f++) for (let h = 0; h < d; h++) i[f] === 1 ? l[m++] = p[f] : l[m++] = u[f]; | |
return e.makeTensorInfo(n.shape, c, l); | |
} | |
var zE = { | |
kernelName: fa, | |
backendName: "cpu", | |
kernelFunc: l7 | |
}; | |
var m7 = w.SELU_SCALEALPHA; | |
var d7 = w.SELU_SCALE; | |
var f7 = Ie(gs, r => r >= 0 ? d7 * r : m7 * (Math.exp(r) - 1)); | |
var VE = { | |
kernelName: gs, | |
backendName: "cpu", | |
kernelFunc: f7 | |
}; | |
var h7 = Ie(bs, r => r < 0 ? -1 : r > 0 ? 1 : 0); | |
var WE = { | |
kernelName: bs, | |
backendName: "cpu", | |
kernelFunc: h7 | |
}; | |
var g7 = Ie(xs, r => Math.sin(r)); | |
var UE = { | |
kernelName: xs, | |
backendName: "cpu", | |
kernelFunc: g7 | |
}; | |
var x7 = Ie(ys, r => Math.sinh(r)); | |
var GE = { | |
kernelName: ys, | |
backendName: "cpu", | |
kernelFunc: x7 | |
}; | |
var y7 = 11920928955078125e-23; | |
var HE = Math.log(y7) + 2; | |
var b7 = Ie(ws, r => { | |
let t8 = r > -HE, | |
e = r < HE, | |
o = Math.exp(r), | |
n; | |
return e ? n = o : t8 ? n = r : n = Math.log(1 + o), n; | |
}); | |
var KE = { | |
kernelName: ws, | |
backendName: "cpu", | |
kernelFunc: b7 | |
}; | |
function C7(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n | |
} = t8, | |
{ | |
blockShape: s, | |
paddings: a | |
} = o; | |
Q([n], "spaceToBatchND"); | |
let i = y.sizeFromShape(s), | |
p = [[0, 0]]; | |
p.push(...a); | |
for (let _ = 1 + s.length; _ < n.shape.length; ++_) p.push([0, 0]); | |
let u = Wf.kernelFunc({ | |
inputs: { | |
x: n | |
}, | |
backend: e, | |
attrs: { | |
paddings: p, | |
constantValue: 0 | |
} | |
}), | |
c = w.getReshaped(u.shape, s, i, false), | |
l = w.getPermuted(c.length, s.length, false), | |
m = w.getReshapedPermuted(u.shape, s, i, false), | |
h = Ve({ | |
inputs: { | |
x: u | |
}, | |
backend: e, | |
attrs: { | |
shape: c | |
} | |
}), | |
b = It({ | |
inputs: { | |
x: h | |
}, | |
backend: e, | |
attrs: { | |
perm: l | |
} | |
}), | |
k = Ve({ | |
inputs: { | |
x: b | |
}, | |
backend: e, | |
attrs: { | |
shape: m | |
} | |
}); | |
return e.disposeIntermediateTensorInfo(u), e.disposeIntermediateTensorInfo(h), e.disposeIntermediateTensorInfo(b), k; | |
} | |
var qE = { | |
kernelName: ga, | |
backendName: "cpu", | |
kernelFunc: C7 | |
}; | |
function w7(r) { | |
let { | |
inputs: t8, | |
backend: e | |
} = r, | |
{ | |
indices: o, | |
values: n, | |
denseShape: s, | |
defaultValue: a | |
} = t8; | |
if (s.shape.length !== 1) throw new Error(`Dense shape must be a vector, saw: | |
${s.shape}`); | |
if (o.shape.length !== 2) throw new Error(`Indices must be a matrix, saw: | |
${o.shape}`); | |
if (n.shape.length !== 1) throw new Error(`Values must be a vector, saw: | |
${n.shape}`); | |
if (a.shape.length !== 0) throw new Error(`Default value must be a scalar, saw: | |
${a.shape}`); | |
let i = e.data.get(o.dataId).values, | |
p = e.data.get(n.dataId).values, | |
u = e.data.get(s.dataId).values, | |
c = e.data.get(a.dataId).values[0], | |
[l, m, d, f, h] = Ff(i, o.shape, o.dtype, p, n.dtype, u, c); | |
return [e.makeTensorInfo(m, o.dtype, l), e.makeTensorInfo([m[0]], n.dtype, d), e.makeTensorInfo([f.length], "bool", new Uint8Array(f.map(g => Number(g)))), e.makeTensorInfo([h.length], o.dtype, new Int32Array(h))]; | |
} | |
var jE = { | |
kernelName: ji, | |
backendName: "cpu", | |
kernelFunc: w7 | |
}; | |
function S7(r) { | |
let { | |
inputs: t8, | |
backend: e | |
} = r, | |
{ | |
inputIndices: o, | |
inputShape: n, | |
newShape: s | |
} = t8; | |
if (o.shape.length !== 2) throw new Error(`Input indices should be a matrix but received shape | |
${o.shape}`); | |
if (n.shape.length !== 1) throw new Error(`Input shape should be a vector but received shape | |
${n.shape}`); | |
if (s.shape.length !== 1) throw new Error(`Target shape should be a vector but received shape ${s.shape}`); | |
let a = Array.from(e.data.get(n.dataId).values), | |
i = e.data.get(o.dataId).values, | |
p = Array.from(e.data.get(s.dataId).values), | |
[u, c, l] = Pf(i, o.shape, o.dtype, a, p); | |
return [e.makeTensorInfo(c, o.dtype, u), e.makeTensorInfo([l.length], s.dtype, new Int32Array(l))]; | |
} | |
var XE = { | |
kernelName: ti, | |
backendName: "cpu", | |
kernelFunc: S7 | |
}; | |
function I7(r) { | |
let { | |
inputs: t8, | |
backend: e | |
} = r, | |
{ | |
data: o, | |
indices: n, | |
segmentIds: s | |
} = t8; | |
if (o.shape.length < 1) throw new Error("Data should be at least 1 dimensional but received scalar"); | |
if (n.shape.length !== 1) throw new Error(`Indices should be a vector but received shape | |
${n.shape}`); | |
if (s.shape.length !== 1) throw new Error(`Segment ids should be a vector but received shape | |
${s.shape}`); | |
if (n.shape[0] !== s.shape[0]) throw new Error("segmentIds and indices should have same size."); | |
let a = e.data.get(o.dataId).values, | |
i = e.data.get(n.dataId).values, | |
p = e.data.get(s.dataId).values, | |
[u, c] = Nc(a, o.shape, o.dtype, i, p, true); | |
return e.makeTensorInfo(c, o.dtype, u); | |
} | |
var YE = { | |
kernelName: ya, | |
backendName: "cpu", | |
kernelFunc: I7 | |
}; | |
function v7(r) { | |
let { | |
inputs: t8, | |
backend: e | |
} = r, | |
{ | |
data: o, | |
indices: n, | |
segmentIds: s | |
} = t8; | |
if (o.shape.length < 1) throw new Error("Data should be at least 1 dimensional but received scalar"); | |
if (n.shape.length !== 1) throw new Error(`Indices should be a vector but received shape | |
${n.shape}`); | |
if (s.shape.length !== 1) throw new Error(`Segment ids should be a vector but received shape | |
${s.shape}`); | |
if (n.shape[0] !== s.shape[0]) throw new Error("segmentIds and indices should have same size."); | |
let a = e.data.get(o.dataId).values, | |
i = e.data.get(n.dataId).values, | |
p = e.data.get(s.dataId).values, | |
[u, c] = Nc(a, o.shape, o.dtype, i, p); | |
return e.makeTensorInfo(c, o.dtype, u); | |
} | |
var QE = { | |
kernelName: ba, | |
backendName: "cpu", | |
kernelFunc: v7 | |
}; | |
function k7(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
sparseIndices: n, | |
sparseValues: s, | |
defaultValue: a | |
} = t8, | |
{ | |
outputShape: i | |
} = o, | |
{ | |
sliceRank: p, | |
numUpdates: u, | |
sliceSize: c, | |
strides: l, | |
outputSize: m | |
} = w.calculateShapes(s, n, i), | |
d = false, | |
f = e.bufferSync(n), | |
h; | |
switch (s.dtype) { | |
case "bool": | |
{ | |
let g = e.bufferSync(s), | |
x = !!e.data.get(a.dataId).values[0]; | |
h = zs(f, g, i, m, c, u, p, l, x, d); | |
break; | |
} | |
case "float32": | |
{ | |
let g = e.bufferSync(s), | |
x = e.data.get(a.dataId).values[0]; | |
h = zs(f, g, i, m, c, u, p, l, x, d); | |
break; | |
} | |
case "int32": | |
{ | |
let g = e.bufferSync(s), | |
x = e.data.get(a.dataId).values[0]; | |
h = zs(f, g, i, m, c, u, p, l, x, d); | |
break; | |
} | |
case "string": | |
{ | |
let g = e.bufferSync(s), | |
x = y.decodeString(e.data.get(a.dataId).values[0]); | |
h = zs(f, g, i, m, c, u, p, l, x, d); | |
break; | |
} | |
default: | |
throw new Error(`Unsupported type ${s.dtype}`); | |
} | |
return e.makeTensorInfo(i, h.dtype, h.values); | |
} | |
var ZE = { | |
kernelName: ks, | |
backendName: "cpu", | |
kernelFunc: k7 | |
}; | |
function N7(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n | |
} = t8, | |
{ | |
numOrSizeSplits: s, | |
axis: a | |
} = o, | |
i = y.parseAxisParam(a, n.shape)[0], | |
p = w.prepareSplitSize(n, s, i), | |
u = new Array(n.shape.length).fill(0), | |
c = n.shape.slice(); | |
return p.map(l => { | |
let m = [...c]; | |
m[i] = l; | |
let d = Fo({ | |
inputs: { | |
x: n | |
}, | |
backend: e, | |
attrs: { | |
begin: u, | |
size: m | |
} | |
}); | |
return u[i] += l, d; | |
}); | |
} | |
var JE = { | |
kernelName: xa, | |
backendName: "cpu", | |
kernelFunc: N7 | |
}; | |
var eR = { | |
kernelName: Xi, | |
backendName: "cpu", | |
kernelFunc: ({ | |
inputs: r, | |
backend: t8 | |
}) => { | |
let { | |
x: e | |
} = r, | |
o = t8; | |
Q(e, "square"); | |
let n = o.data.get(e.dataId).values, | |
s = new Float32Array(n.length); | |
for (let i = 0; i < n.length; ++i) { | |
let p = n[i]; | |
s[i] = p * p; | |
} | |
return { | |
dataId: o.write(s, e.shape, e.dtype), | |
shape: e.shape, | |
dtype: e.dtype | |
}; | |
} | |
}; | |
var T7 = Ie(So, (r, t8) => { | |
let e = t8; | |
return isNaN(r) ? NaN : r > 0 ? 1 : e.alpha; | |
}); | |
var tR = { | |
kernelName: So, | |
backendName: "cpu", | |
kernelFunc: T7 | |
}; | |
function _7(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n | |
} = t8, | |
{ | |
begin: s, | |
end: a, | |
strides: i, | |
beginMask: p, | |
endMask: u, | |
ellipsisMask: c, | |
newAxisMask: l, | |
shrinkAxisMask: m | |
} = o; | |
Q(n, "stridedSlice"); | |
let { | |
finalShapeSparse: d, | |
finalShape: f, | |
isIdentity: h, | |
sliceDim0: g, | |
isSimpleSlice: x, | |
begin: b, | |
end: C, | |
strides: S | |
} = ct.sliceInfo(n.shape, s, a, i, p, u, c, l, m), | |
k; | |
if (h) k = Ve({ | |
inputs: { | |
x: n | |
}, | |
backend: e, | |
attrs: { | |
shape: f | |
} | |
});else if (g || x) { | |
y.assert(n.shape.length >= 1, () => `Input must have rank at least 1, got: ${n.shape.length}`); | |
let _ = ct.computeOutShape(b, C, S), | |
E = Fo({ | |
inputs: { | |
x: n | |
}, | |
backend: e, | |
attrs: { | |
begin: b, | |
size: _ | |
} | |
}); | |
k = Ve({ | |
inputs: { | |
x: E | |
}, | |
backend: e, | |
attrs: { | |
shape: f | |
} | |
}), e.disposeIntermediateTensorInfo(E); | |
} else { | |
let _ = e.bufferSync(n), | |
E = Of(d, _, S, b); | |
k = e.makeTensorInfo(f, E.dtype, E.values); | |
} | |
return k; | |
} | |
var rR = { | |
kernelName: Ts, | |
backendName: "cpu", | |
kernelFunc: _7 | |
}; | |
function $7(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
separator: n, | |
nGramWidths: s, | |
leftPad: a, | |
rightPad: i, | |
padWidth: p, | |
preserveShortSequences: u | |
} = o, | |
{ | |
data: c, | |
dataSplits: l | |
} = t8, | |
m = e.data.get(c.dataId).values, | |
d = e.data.get(l.dataId).values, | |
[f, h] = gp(m, d, n, s, a, i, p, u); | |
return [e.makeTensorInfo([f.length], "string", f), e.makeTensorInfo(l.shape, "int32", h)]; | |
} | |
var oR = { | |
kernelName: Ca, | |
backendName: "cpu", | |
kernelFunc: $7 | |
}; | |
function E7(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
skipEmpty: n | |
} = o, | |
{ | |
input: s, | |
delimiter: a | |
} = t8; | |
if (s.dtype !== "string") throw new Error("Input must be of datatype string"); | |
if (s.shape.length !== 1) throw new Error(`Input must be a vector, got shape: ${s.shape}`); | |
if (a.shape.length !== 0) throw new Error(`Delimiter must be a scalar, got shape: ${a.shape}`); | |
let i = e.data.get(s.dataId).values, | |
p = e.data.get(a.dataId).values[0], | |
[u, c, l] = xp(i, p, n), | |
m = c.length; | |
return [e.makeTensorInfo([m, 2], "int32", u), e.makeTensorInfo([m], "string", c), e.makeTensorInfo([2], "int32", new Int32Array(l))]; | |
} | |
var nR = { | |
kernelName: Yi, | |
backendName: "cpu", | |
kernelFunc: E7 | |
}; | |
function R7(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
numBuckets: n | |
} = o, | |
{ | |
input: s | |
} = t8; | |
if (s.dtype !== "string") throw new Error("Input must be of datatype string"); | |
if (n <= 0) throw new Error("Number of buckets must be at least 1"); | |
let a = e.data.get(s.dataId).values, | |
i = yp(a, n); | |
return e.makeTensorInfo(s.shape, "int32", i); | |
} | |
var sR = { | |
kernelName: Qi, | |
backendName: "cpu", | |
kernelFunc: R7 | |
}; | |
var D7 = Ie($s, r => Math.tan(r)); | |
var aR = { | |
kernelName: $s, | |
backendName: "cpu", | |
kernelFunc: D7 | |
}; | |
var A7 = Ie(Es, r => Math.tanh(r)); | |
var iR = { | |
kernelName: Es, | |
backendName: "cpu", | |
kernelFunc: A7 | |
}; | |
function F7(r) { | |
let { | |
inputs: t8, | |
backend: e | |
} = r, | |
{ | |
tensor: o, | |
indices: n, | |
updates: s | |
} = t8, | |
{ | |
sliceRank: a, | |
numUpdates: i, | |
sliceSize: p, | |
strides: u, | |
outputSize: c | |
} = w.calculateShapes(s, n, o.shape), | |
l = false, | |
m = e.bufferSync(n), | |
d = e.bufferSync(s), | |
f = e.bufferSync(o), | |
h = zs(m, d, o.shape, c, p, i, a, u, f, l); | |
return e.makeTensorInfo(o.shape, h.dtype, h.values); | |
} | |
var uR = { | |
kernelName: fs, | |
backendName: "cpu", | |
kernelFunc: F7 | |
}; | |
function P7(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n | |
} = t8, | |
{ | |
reps: s | |
} = o; | |
Q(n, "tile"); | |
let a = Mf(e.bufferSync(n), s); | |
return e.makeTensorInfo(a.shape, a.dtype, a.values); | |
} | |
var pR = { | |
kernelName: po, | |
backendName: "cpu", | |
kernelFunc: P7 | |
}; | |
function O7(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n | |
} = t8, | |
{ | |
k: s, | |
sorted: a | |
} = o; | |
Q(n, "topk"); | |
let i = e.data.get(n.dataId).values, | |
[p, u] = Lf(i, n.shape, n.dtype, s, a); | |
return [e.makeTensorInfo(p.shape, p.dtype, p.values), e.makeTensorInfo(u.shape, u.dtype, u.values)]; | |
} | |
var cR = { | |
kernelName: Rs, | |
backendName: "cpu", | |
kernelFunc: O7 | |
}; | |
function M7(r) { | |
let { | |
inputs: t8, | |
attrs: e, | |
backend: o | |
} = r, | |
{ | |
image: n, | |
transforms: s | |
} = t8, | |
{ | |
interpolation: a, | |
fillMode: i, | |
fillValue: p, | |
outputShape: u | |
} = e, | |
[c, l, m, d] = n.shape, | |
[f, h] = u != null ? u : [l, m], | |
g = [c, f, h, d], | |
x = y.computeStrides(n.shape), | |
b = x[0], | |
C = x[1], | |
S = x[2], | |
k = y.computeStrides(g), | |
_ = k[0], | |
E = k[1], | |
R = k[2], | |
D = y.getTypedArrayFromDType(n.dtype, y.sizeFromShape(g)); | |
D.fill(p); | |
let P = o.data.get(n.dataId).values, | |
O = o.data.get(s.dataId).values; | |
for (let L = 0; L < c; ++L) { | |
let B = s.shape[0] === 1 ? O : O.subarray(L * 8, L * 8 + 8); | |
for (let z = 0; z < f; ++z) for (let U = 0; U < h; ++U) for (let j = 0; j < d; ++j) { | |
let q, | |
Y = B[6] * U + B[7] * z + 1; | |
if (Y === 0) continue; | |
let J = (B[0] * U + B[1] * z + B[2]) / Y, | |
re = (B[3] * U + B[4] * z + B[5]) / Y, | |
ne = lR(J, m, i), | |
ee = lR(re, l, i); | |
switch (a) { | |
case "nearest": | |
q = W7(P, l, m, b, C, S, L, ee, ne, j, p); | |
break; | |
case "bilinear": | |
q = U7(P, l, m, b, C, S, L, ee, ne, j, p); | |
break; | |
default: | |
throw new Error(`Error in Transform: Expect 'nearest' or 'bilinear', but got ${a}`); | |
} | |
let oe = L * _ + z * E + U * R + j; | |
D[oe] = q; | |
} | |
return o.makeTensorInfo(g, n.dtype, D); | |
} | |
return { | |
dataId: o.write(D, g, n.dtype), | |
shape: n.shape, | |
dtype: n.dtype | |
}; | |
} | |
var mR = { | |
kernelName: Ds, | |
backendName: "cpu", | |
kernelFunc: M7 | |
}; | |
function lR(r, t8, e) { | |
switch (e) { | |
case "reflect": | |
return L7(r, t8); | |
case "wrap": | |
return B7(r, t8); | |
case "nearest": | |
return V7(r, t8); | |
case "constant": | |
default: | |
return z7(r, t8); | |
} | |
} | |
function L7(r, t8) { | |
let e = r; | |
if (e < 0) { | |
if (t8 <= 1) e = 0;else { | |
let o = 2 * t8; | |
e < o && (e = o * Math.trunc(-e / o) + e), e = e < -t8 ? e + o : -e - 1; | |
} | |
} else if (e > t8 - 1) if (t8 <= 1) e = 0;else { | |
let o = 2 * t8; | |
e -= o * Math.trunc(e / o), e >= t8 && (e = o - e - 1); | |
} | |
return y.clamp(0, e, t8 - 1); | |
} | |
function B7(r, t8) { | |
let e = r; | |
if (e < 0) { | |
if (t8 <= 1) e = 0;else { | |
let o = t8 - 1; | |
e += t8 * (Math.trunc(-e / o) + 1); | |
} | |
} else if (e > t8 - 1) if (t8 <= 1) e = 0;else { | |
let o = t8 - 1; | |
e -= t8 * Math.trunc(e / o); | |
} | |
return y.clamp(0, e, t8 - 1); | |
} | |
function z7(r, t8) { | |
return r; | |
} | |
function V7(r, t8) { | |
return y.clamp(0, r, t8 - 1); | |
} | |
function ql(r, t8, e, o, n, s, a, i, p, u, c) { | |
let l = a * o + i * n + p * s + u; | |
return 0 <= i && i < t8 && 0 <= p && p < e ? r[l] : c; | |
} | |
function W7(r, t8, e, o, n, s, a, i, p, u, c) { | |
let l = Math.round(i), | |
m = Math.round(p); | |
return ql(r, t8, e, o, n, s, a, l, m, u, c); | |
} | |
function U7(r, t8, e, o, n, s, a, i, p, u, c) { | |
let l = Math.floor(i), | |
m = Math.floor(p), | |
d = l + 1, | |
f = m + 1, | |
h = (f - p) * ql(r, t8, e, o, n, s, a, l, m, u, c) + (p - m) * ql(r, t8, e, o, n, s, a, l, f, u, c), | |
g = (f - p) * ql(r, t8, e, o, n, s, a, d, m, u, c) + (p - m) * ql(r, t8, e, o, n, s, a, d, f, u, c); | |
return (d - i) * h + (i - l) * g; | |
} | |
function G7(r) { | |
let { | |
inputs: t8, | |
attrs: e, | |
backend: o | |
} = r, | |
{ | |
axis: n | |
} = e, | |
{ | |
x: s | |
} = t8; | |
Q(s, "unique"); | |
let a = o.data.get(s.dataId).values, | |
{ | |
outputValues: i, | |
outputShape: p, | |
indices: u | |
} = bp(a, n, s.shape, s.dtype); | |
return [o.makeTensorInfo(p, s.dtype, i), o.makeTensorInfo([u.length], "int32", u)]; | |
} | |
var dR = { | |
kernelName: Zi, | |
backendName: "cpu", | |
kernelFunc: G7 | |
}; | |
function H7(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
value: n | |
} = t8, | |
{ | |
axis: s | |
} = o; | |
s < 0 && (s += n.shape.length); | |
let a = n.shape.length, | |
i = n.shape[s], | |
p = new Array(a - 1), | |
u = 0; | |
for (let d = 0; d < a; d++) d !== s && (p[u++] = n.shape[d]); | |
let c = new Array(a).fill(0), | |
l = n.shape.slice(); | |
l[s] = 1; | |
let m = new Array(i); | |
for (let d = 0; d < m.length; d++) { | |
c[s] = d; | |
let f = Fo({ | |
inputs: { | |
x: n | |
}, | |
backend: e, | |
attrs: { | |
begin: c, | |
size: l | |
} | |
}); | |
m[d] = Ve({ | |
inputs: { | |
x: f | |
}, | |
backend: e, | |
attrs: { | |
shape: p | |
} | |
}), e.disposeIntermediateTensorInfo(f); | |
} | |
return m; | |
} | |
var fR = { | |
kernelName: wa, | |
backendName: "cpu", | |
kernelFunc: H7 | |
}; | |
function K7(r) { | |
let { | |
inputs: t8, | |
backend: e, | |
attrs: o | |
} = r, | |
{ | |
x: n, | |
segmentIds: s | |
} = t8, | |
{ | |
numSegments: a | |
} = o; | |
Q(n, "unsortedSegmentSum"); | |
let i = n.shape.length, | |
p = s.shape.length, | |
u = [], | |
c = [], | |
l = i - p, | |
m = s; | |
for (let f = 0; f < l; ++f) { | |
let h = $c({ | |
inputs: { | |
input: m | |
}, | |
backend: e, | |
attrs: { | |
dim: f + 1 | |
} | |
}); | |
m = h, c.push(h); | |
} | |
for (let f = 0; f < a; ++f) { | |
let h = y.createScalarValue(f, "int32"), | |
g = e.makeTensorInfo([], "int32", h), | |
x = GS({ | |
inputs: { | |
a: g, | |
b: m | |
}, | |
backend: e | |
}), | |
b = Do({ | |
inputs: { | |
x | |
}, | |
backend: e, | |
attrs: { | |
dtype: "float32" | |
} | |
}), | |
C = dp({ | |
inputs: { | |
a: b, | |
b: n | |
}, | |
backend: e | |
}), | |
S = gi({ | |
inputs: { | |
x: C | |
}, | |
backend: e, | |
attrs: { | |
axis: 0, | |
keepDims: false | |
} | |
}); | |
u.push(S), c.push(g), c.push(x), c.push(b), c.push(C), c.push(S); | |
} | |
let d = II({ | |
inputs: u, | |
backend: e, | |
attrs: { | |
axis: 0 | |
} | |
}); | |
return c.forEach(f => e.disposeIntermediateTensorInfo(f)), d; | |
} | |
var hR = { | |
kernelName: Ji, | |
backendName: "cpu", | |
kernelFunc: K7 | |
}; | |
var q7 = [z_, jT, V_, W_, JT, U_, G_, H_, K_, q_, j_, X_, Y_, Q_, Z_, e$, t$, r$, o$, B_, n$, s$, a$, e_, i$, ZT, t_, u$, XT, p$, l$, m$, d$, f$, h$, g$, x$, y$, b$, C$, w$, S$, I$, v$, k$, N$, T$, _$, $$, E$, R$, D$, F$, A_, P$, r_, O$, o_, M$, n_, L$, B$, z$, s_, a_, V$, W$, U$, G$, i_, u_, YT, H$, c$, K$, q$, j$, F_, p_, c_, X$, l_, Y$, Q$, Z$, J$, eE, tE, rE, m_, oE, nE, sE, aE, uE, pE, cE, d_, lE, mE, hE, f_, h_, gE, xE, yE, g_, bE, SE, IE, Wf, vE, P_, y_, kE, NE, TE, _E, QT, Gl, $E, O_, M_, L_, EE, RE, DE, AE, FE, PE, OE, I_, ME, BE, zE, VE, k_, WE, UE, GE, N_, dE, KE, qE, jE, XE, YE, QE, ZE, JE, __, eR, $_, E_, tR, rR, oR, nR, sR, R_, A$, aR, iR, uR, pR, cR, mR, x_, dR, fR, hR, CE]; | |
for (let r of q7) ri(r); | |
var Ac = {}; | |
qe(Ac, { | |
assertNotComplex: () => Vs, | |
bindCanvasToFramebuffer: () => rZ, | |
bindColorTextureToFramebuffer: () => Ql, | |
bindTextureToProgramUniformSampler: () => BI, | |
bindTextureUnit: () => bR, | |
bindVertexBufferToProgramAttribute: () => jf, | |
callAndCheck: () => ce, | |
canBeRepresented: () => TI, | |
createFragmentShader: () => $I, | |
createFramebuffer: () => OI, | |
createProgram: () => EI, | |
createStaticIndexBuffer: () => AI, | |
createStaticVertexBuffer: () => DI, | |
createTexture: () => FI, | |
createVertexShader: () => _I, | |
getBatchDim: () => yi, | |
getExtensionOrThrow: () => Ec, | |
getFramebufferErrorMessage: () => CR, | |
getMaxTexturesInShader: () => WI, | |
getNumChannels: () => eZ, | |
getProgramUniformLocation: () => LI, | |
getProgramUniformLocationOrThrow: () => MI, | |
getRowsCols: () => bi, | |
getShapeAs3D: () => Dc, | |
getTextureShapeFromLogicalShape: () => zI, | |
getWebGLDisjointQueryTimerVersion: () => UI, | |
getWebGLErrorMessage: () => yR, | |
getWebGLMaxTextureSize: () => VI, | |
hasExtension: () => qr, | |
isCapableOfRenderingToFloatTexture: () => GI, | |
isDownloadFloatTextureEnabled: () => HI, | |
isReshapeFree: () => Cu, | |
isWebGLFenceEnabled: () => KI, | |
isWebGLVersionEnabled: () => Yf, | |
linkProgram: () => RI, | |
logShaderSourceAndInfoLog: () => qf, | |
resetMaxTextureSize: () => oZ, | |
resetMaxTexturesInShader: () => nZ, | |
unbindColorTextureFromFramebuffer: () => Xf, | |
unbindTextureUnit: () => tZ, | |
validateFramebuffer: () => Rc, | |
validateProgram: () => Yl, | |
validateTextureSize: () => PI | |
}); | |
var wp = {}; | |
var Uf = { | |
alpha: false, | |
antialias: false, | |
premultipliedAlpha: false, | |
preserveDrawingBuffer: false, | |
depth: false, | |
stencil: false, | |
failIfMajorPerformanceCaveat: true | |
}; | |
function vI(r, t8) { | |
wp[r] = t8; | |
} | |
function Kr(r, t8) { | |
if (!(r in wp) || t8 != null) { | |
let o = X7(r, t8); | |
if (o !== null) wp[r] = o;else return console.log("Could not get context for WebGL version", r), null; | |
} | |
let e = wp[r]; | |
return e == null || e.isContextLost() ? (delete wp[r], Kr(r)) : (e.disable(e.DEPTH_TEST), e.disable(e.STENCIL_TEST), e.disable(e.BLEND), e.disable(e.DITHER), e.disable(e.POLYGON_OFFSET_FILL), e.disable(e.SAMPLE_COVERAGE), e.enable(e.SCISSOR_TEST), e.enable(e.CULL_FACE), e.cullFace(e.BACK), wp[r]); | |
} | |
function j7(r) { | |
if (!A().getBool("IS_SAFARI") && typeof OffscreenCanvas != "undefined" && r === 2) return new OffscreenCanvas(300, 150); | |
if (typeof document != "undefined") return document.createElement("canvas"); | |
throw new Error("Cannot create a canvas in this context"); | |
} | |
function X7(r, t8) { | |
if (r !== 1 && r !== 2) throw new Error("Cannot get WebGL rendering context, WebGL is disabled."); | |
let e = t8 == null ? j7(r) : t8; | |
return e.addEventListener("webglcontextlost", o => { | |
o.preventDefault(), delete wp[r]; | |
}, false), A().getBool("SOFTWARE_WEBGL_ENABLED") && (Uf.failIfMajorPerformanceCaveat = false), r === 1 ? e.getContext("webgl", Uf) || e.getContext("experimental-webgl", Uf) : e.getContext("webgl2", Uf); | |
} | |
var bu; | |
(function (r) { | |
r[r.DENSE = 0] = "DENSE", r[r.SHARED_BATCH = 1] = "SHARED_BATCH"; | |
})(bu || (bu = {})); | |
var dr; | |
(function (r) { | |
r[r.RENDER = 0] = "RENDER", r[r.UPLOAD = 1] = "UPLOAD", r[r.PIXELS = 2] = "PIXELS", r[r.DOWNLOAD = 3] = "DOWNLOAD"; | |
})(dr || (dr = {})); | |
var rr; | |
(function (r) { | |
r[r.UNPACKED_FLOAT16 = 0] = "UNPACKED_FLOAT16", r[r.UNPACKED_FLOAT32 = 1] = "UNPACKED_FLOAT32", r[r.PACKED_4X1_UNSIGNED_BYTE = 2] = "PACKED_4X1_UNSIGNED_BYTE", r[r.PACKED_2X2_FLOAT32 = 3] = "PACKED_2X2_FLOAT32", r[r.PACKED_2X2_FLOAT16 = 4] = "PACKED_2X2_FLOAT16"; | |
})(rr || (rr = {})); | |
function Sp(r, t8) { | |
return [t8, r]; | |
} | |
function gR(r, t8) { | |
return r * t8; | |
} | |
function jl(r) { | |
let t8 = y.sizeFromShape(r), | |
e = Math.ceil(t8 / 4); | |
return y.sizeToSquarishShape(e); | |
} | |
function La(r, t8) { | |
return [Math.max(1, Math.ceil(t8 / 2)), Math.max(1, Math.ceil(r / 2))]; | |
} | |
function xR(r, t8) { | |
let [e, o] = La(r, t8); | |
return e * o * 4; | |
} | |
function Xl(r, t8) { | |
let e = r, | |
o, | |
n, | |
s, | |
a, | |
i, | |
p, | |
u, | |
c, | |
l, | |
m; | |
return A().getNumber("WEBGL_VERSION") === 2 ? (o = e.R32F, n = e.R16F, s = e.RGBA16F, a = e.RGBA32F, i = e.RED, u = 4, c = 1, l = e.HALF_FLOAT, m = e.FLOAT, p = e.RGBA8) : (o = r.RGBA, n = r.RGBA, s = r.RGBA, a = e.RGBA, i = r.RGBA, u = 4, c = 4, l = t8 != null ? t8.HALF_FLOAT_OES : null, m = r.FLOAT, p = r.RGBA), { | |
internalFormatFloat: o, | |
internalFormatHalfFloat: n, | |
internalFormatPackedHalfFloat: s, | |
internalFormatPackedFloat: a, | |
textureFormatFloat: i, | |
downloadTextureFormat: p, | |
downloadUnpackNumChannels: u, | |
defaultNumChannels: c, | |
textureTypeHalfFloat: l, | |
textureTypeFloat: m | |
}; | |
} | |
function ce(r, t8) { | |
let e = t8(); | |
return A().getBool("DEBUG") && Y7(r), e; | |
} | |
function Y7(r) { | |
let t8 = r.getError(); | |
if (t8 !== r.NO_ERROR) throw new Error("WebGL Error: " + yR(r, t8)); | |
} | |
var Q7 = 596e-10; | |
var Z7 = 65504; | |
function TI(r) { | |
return !!(A().getBool("WEBGL_RENDER_FLOAT32_ENABLED") || r === 0 || Q7 < Math.abs(r) && Math.abs(r) < Z7); | |
} | |
function yR(r, t8) { | |
switch (t8) { | |
case r.NO_ERROR: | |
return "NO_ERROR"; | |
case r.INVALID_ENUM: | |
return "INVALID_ENUM"; | |
case r.INVALID_VALUE: | |
return "INVALID_VALUE"; | |
case r.INVALID_OPERATION: | |
return "INVALID_OPERATION"; | |
case r.INVALID_FRAMEBUFFER_OPERATION: | |
return "INVALID_FRAMEBUFFER_OPERATION"; | |
case r.OUT_OF_MEMORY: | |
return "OUT_OF_MEMORY"; | |
case r.CONTEXT_LOST_WEBGL: | |
return "CONTEXT_LOST_WEBGL"; | |
default: | |
return `Unknown error code ${t8}`; | |
} | |
} | |
function Ec(r, t8) { | |
return xi(r, () => r.getExtension(t8), 'Extension "' + t8 + '" not supported on this browser.'); | |
} | |
function _I(r, t8) { | |
let e = xi(r, () => r.createShader(r.VERTEX_SHADER), "Unable to create vertex WebGLShader."); | |
if (ce(r, () => r.shaderSource(e, t8)), ce(r, () => r.compileShader(e)), r.getShaderParameter(e, r.COMPILE_STATUS) === false) throw console.log(r.getShaderInfoLog(e)), new Error("Failed to compile vertex shader."); | |
return e; | |
} | |
function $I(r, t8) { | |
let e = xi(r, () => r.createShader(r.FRAGMENT_SHADER), "Unable to create fragment WebGLShader."); | |
if (ce(r, () => r.shaderSource(e, t8)), ce(r, () => r.compileShader(e)), A().get("ENGINE_COMPILE_ONLY")) return e; | |
if (r.getShaderParameter(e, r.COMPILE_STATUS) === false) throw qf(t8, r.getShaderInfoLog(e)), new Error("Failed to compile fragment shader."); | |
return e; | |
} | |
var J7 = /ERROR: [0-9]+:([0-9]+):/g; | |
function qf(r, t8) { | |
let e = J7.exec(t8); | |
if (e == null) { | |
console.log(`Couldn't parse line number in error: ${t8}`), console.log(r); | |
return; | |
} | |
let o = +e[1], | |
n = r.split(` | |
`), | |
s = n.length.toString().length + 2, | |
a = n.map((l, m) => y.rightPad((m + 1).toString(), s) + l), | |
i = 0; | |
for (let l = 0; l < a.length; l++) i = Math.max(a[l].length, i); | |
let p = a.slice(0, o - 1), | |
u = a.slice(o - 1, o), | |
c = a.slice(o); | |
console.log(p.join(` | |
`)), console.log(t8.split(` | |
`)[0]), console.log(`%c ${y.rightPad(u[0], i)}`, "border:1px solid red; background-color:#e3d2d2; color:#a61717"), console.log(c.join(` | |
`)); | |
} | |
function EI(r) { | |
return xi(r, () => r.createProgram(), "Unable to create WebGLProgram."); | |
} | |
function RI(r, t8) { | |
if (ce(r, () => r.linkProgram(t8)), !A().get("ENGINE_COMPILE_ONLY") && r.getProgramParameter(t8, r.LINK_STATUS) === false) throw console.log(r.getProgramInfoLog(t8)), new Error("Failed to link vertex and fragment shaders."); | |
} | |
function Yl(r, t8) { | |
if (ce(r, () => r.validateProgram(t8)), r.getProgramParameter(t8, r.VALIDATE_STATUS) === false) throw console.log(r.getProgramInfoLog(t8)), new Error("Shader program validation failed."); | |
} | |
function DI(r, t8) { | |
let e = xi(r, () => r.createBuffer(), "Unable to create WebGLBuffer"); | |
return ce(r, () => r.bindBuffer(r.ARRAY_BUFFER, e)), ce(r, () => r.bufferData(r.ARRAY_BUFFER, t8, r.STATIC_DRAW)), e; | |
} | |
function AI(r, t8) { | |
let e = xi(r, () => r.createBuffer(), "Unable to create WebGLBuffer"); | |
return ce(r, () => r.bindBuffer(r.ELEMENT_ARRAY_BUFFER, e)), ce(r, () => r.bufferData(r.ELEMENT_ARRAY_BUFFER, t8, r.STATIC_DRAW)), e; | |
} | |
function eZ() { | |
return A().getNumber("WEBGL_VERSION") === 2 ? 1 : 4; | |
} | |
function FI(r) { | |
return xi(r, () => r.createTexture(), "Unable to create WebGLTexture."); | |
} | |
function PI(r, t8) { | |
let e = A().getNumber("WEBGL_MAX_TEXTURE_SIZE"); | |
if (r <= 0 || t8 <= 0) { | |
let o = `[${r}x${t8}]`; | |
throw new Error("Requested texture size " + o + " is invalid."); | |
} | |
if (r > e || t8 > e) { | |
let o = `[${r}x${t8}]`, | |
n = `[${e}x${e}]`; | |
throw new Error("Requested texture size " + o + " greater than WebGL maximum on this browser / GPU " + n + "."); | |
} | |
} | |
function OI(r) { | |
return xi(r, () => r.createFramebuffer(), "Unable to create WebGLFramebuffer."); | |
} | |
function jf(r, t8, e, o, n, s, a) { | |
let i = r.getAttribLocation(t8, e); | |
return i === -1 ? false : (ce(r, () => r.bindBuffer(r.ARRAY_BUFFER, o)), ce(r, () => r.vertexAttribPointer(i, n, r.FLOAT, false, s, a)), ce(r, () => r.enableVertexAttribArray(i)), true); | |
} | |
function bR(r, t8, e) { | |
wR(r, e), ce(r, () => r.activeTexture(r.TEXTURE0 + e)), ce(r, () => r.bindTexture(r.TEXTURE_2D, t8)); | |
} | |
function tZ(r, t8) { | |
wR(r, t8), ce(r, () => r.activeTexture(r.TEXTURE0 + t8)), ce(r, () => r.bindTexture(r.TEXTURE_2D, null)); | |
} | |
function MI(r, t8, e) { | |
return xi(r, () => r.getUniformLocation(t8, e), 'uniform "' + e + '" not present in program.'); | |
} | |
function LI(r, t8, e) { | |
return r.getUniformLocation(t8, e); | |
} | |
function BI(r, t8, e, o) { | |
ce(r, () => bR(r, t8, o)), ce(r, () => r.uniform1i(e, o)); | |
} | |
function rZ(r) { | |
ce(r, () => r.bindFramebuffer(r.FRAMEBUFFER, null)), ce(r, () => r.viewport(0, 0, r.canvas.width, r.canvas.height)), ce(r, () => r.scissor(0, 0, r.canvas.width, r.canvas.height)); | |
} | |
function Ql(r, t8, e) { | |
ce(r, () => r.bindFramebuffer(r.FRAMEBUFFER, e)), ce(r, () => r.framebufferTexture2D(r.FRAMEBUFFER, r.COLOR_ATTACHMENT0, r.TEXTURE_2D, t8, 0)); | |
} | |
function Xf(r, t8) { | |
ce(r, () => r.bindFramebuffer(r.FRAMEBUFFER, t8)), ce(r, () => r.framebufferTexture2D(r.FRAMEBUFFER, r.COLOR_ATTACHMENT0, r.TEXTURE_2D, null, 0)); | |
} | |
function Rc(r) { | |
let t8 = r.checkFramebufferStatus(r.FRAMEBUFFER); | |
if (t8 !== r.FRAMEBUFFER_COMPLETE) throw new Error("Error binding framebuffer: " + CR(r, t8)); | |
} | |
function CR(r, t8) { | |
switch (t8) { | |
case r.FRAMEBUFFER_INCOMPLETE_ATTACHMENT: | |
return "FRAMEBUFFER_INCOMPLETE_ATTACHMENT"; | |
case r.FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT: | |
return "FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT"; | |
case r.FRAMEBUFFER_INCOMPLETE_DIMENSIONS: | |
return "FRAMEBUFFER_INCOMPLETE_DIMENSIONS"; | |
case r.FRAMEBUFFER_UNSUPPORTED: | |
return "FRAMEBUFFER_UNSUPPORTED"; | |
default: | |
return `unknown error ${t8}`; | |
} | |
} | |
function xi(r, t8, e) { | |
let o = ce(r, () => t8()); | |
if (o == null) throw new Error(e); | |
return o; | |
} | |
function wR(r, t8) { | |
let e = r.MAX_COMBINED_TEXTURE_IMAGE_UNITS - 1, | |
o = t8 + r.TEXTURE0; | |
if (o < r.TEXTURE0 || o > e) { | |
let n = `[gl.TEXTURE0, gl.TEXTURE${e}]`; | |
throw new Error(`textureUnit must be in ${n}.`); | |
} | |
} | |
function yi(r, t8 = 2) { | |
return y.sizeFromShape(r.slice(0, r.length - t8)); | |
} | |
function bi(r) { | |
if (r.length === 0) throw Error("Cannot get rows and columns of an empty shape array."); | |
return [r.length > 1 ? r[r.length - 2] : 1, r[r.length - 1]]; | |
} | |
function Dc(r) { | |
let t8 = [1, 1, 1]; | |
return r.length === 0 || r.length === 1 && r[0] === 1 || (t8 = [yi(r), ...bi(r)]), t8; | |
} | |
function zI(r, t8 = false) { | |
let e = A().getNumber("WEBGL_MAX_TEXTURE_SIZE"), | |
o = A().getNumber("WEBGL_MAX_SIZE_FOR_NARROW_TEXTURE"); | |
o === 1 / 0 && A().getBool("WEBGL_AUTO_SQUARIFY_NARROW_TEXTURE_SHAPE") && (o = e / 2), t8 && (e = e * 2, o = o * 2, r = r.map((i, p) => p >= r.length - 2 ? y.nearestLargerEven(r[p]) : r[p]), r.length === 1 && (r = [2, r[0]])), r.length !== 2 && (r = y.squeezeShape(r).newShape); | |
let n = y.sizeFromShape(r), | |
s = null; | |
r.length <= 1 && n <= e ? s = [1, n] : r.length === 2 && r[0] <= e && r[1] <= e ? s = r : r.length === 3 && r[0] * r[1] <= e && r[2] <= e ? s = [r[0] * r[1], r[2]] : r.length === 3 && r[0] <= e && r[1] * r[2] <= e ? s = [r[0], r[1] * r[2]] : r.length === 4 && r[0] * r[1] * r[2] <= e && r[3] <= e ? s = [r[0] * r[1] * r[2], r[3]] : r.length === 4 && r[0] <= e && r[1] * r[2] * r[3] <= e && (s = [r[0], r[1] * r[2] * r[3]]); | |
let a = s != null && Math.max(...s) > o && Math.min(...s) <= (t8 ? 2 : 1) && Math.min(...s) > 0; | |
if (s == null || a) if (t8) { | |
let i = yi(r), | |
p = 2, | |
u = 2; | |
r.length && ([p, u] = bi(r)), n = i * (p / 2) * (u / 2), s = y.sizeToSquarishShape(n).map(c => c * 2); | |
} else s = y.sizeToSquarishShape(n); | |
return s; | |
} | |
function Gf(r) { | |
return r % 2 === 0; | |
} | |
function Cu(r, t8) { | |
if (r = r.slice(-2), t8 = t8.slice(-2), y.arraysEqual(r, t8) || !r.length || !t8.length || r[0] === 0 || r[1] === 0 || t8[0] === 0 || t8[1] === 0) return true; | |
if (r.length !== t8.length) { | |
let e = r[r.length - 1], | |
o = t8[t8.length - 1]; | |
if (e === o || Gf(e) && Gf(o) && (r[0] === 1 || t8[0] === 1)) return true; | |
} | |
return r[1] === t8[1] && Gf(r[0]) && Gf(t8[0]); | |
} | |
var Hf; | |
var Kf; | |
function VI(r) { | |
if (Hf == null) { | |
let t8 = Kr(r); | |
Hf = t8.getParameter(t8.MAX_TEXTURE_SIZE); | |
} | |
return Hf; | |
} | |
function oZ() { | |
Hf = null; | |
} | |
function nZ() { | |
Kf = null; | |
} | |
function WI(r) { | |
if (Kf == null) { | |
let t8 = Kr(r); | |
Kf = t8.getParameter(t8.MAX_TEXTURE_IMAGE_UNITS); | |
} | |
return Math.min(16, Kf); | |
} | |
function UI(r) { | |
if (r === 0) return 0; | |
let t8, | |
e = Kr(r); | |
return qr(e, "EXT_disjoint_timer_query_webgl2") && r === 2 ? t8 = 2 : qr(e, "EXT_disjoint_timer_query") ? t8 = 1 : t8 = 0, t8; | |
} | |
function qr(r, t8) { | |
return r.getExtension(t8) != null; | |
} | |
function Yf(r) { | |
try { | |
if (Kr(r) != null) return true; | |
} catch (t8) { | |
return console.log("Error when getting WebGL context: ", t8), false; | |
} | |
return false; | |
} | |
function GI(r) { | |
if (r === 0) return false; | |
let t8 = Kr(r); | |
if (r === 1) { | |
if (!qr(t8, "OES_texture_float")) return false; | |
} else if (!qr(t8, "EXT_color_buffer_float")) return false; | |
return NI(t8); | |
} | |
function HI(r) { | |
if (r === 0) return false; | |
let t8 = Kr(r); | |
if (r === 1) { | |
if (!qr(t8, "OES_texture_float") || !qr(t8, "WEBGL_color_buffer_float")) return false; | |
} else { | |
if (qr(t8, "EXT_color_buffer_float")) return NI(t8); | |
let o = "EXT_color_buffer_half_float"; | |
if (qr(t8, o)) { | |
let n = t8.getExtension(o); | |
return sZ(t8, n); | |
} | |
return false; | |
} | |
return NI(t8); | |
} | |
function NI(r) { | |
let t8 = Xl(r), | |
e = r.createTexture(); | |
r.bindTexture(r.TEXTURE_2D, e); | |
let o = 1, | |
n = 1; | |
r.texImage2D(r.TEXTURE_2D, 0, t8.internalFormatFloat, o, n, 0, t8.textureFormatFloat, t8.textureTypeFloat, null); | |
let s = r.createFramebuffer(); | |
r.bindFramebuffer(r.FRAMEBUFFER, s), r.framebufferTexture2D(r.FRAMEBUFFER, r.COLOR_ATTACHMENT0, r.TEXTURE_2D, e, 0); | |
let a = r.checkFramebufferStatus(r.FRAMEBUFFER) === r.FRAMEBUFFER_COMPLETE; | |
return r.bindTexture(r.TEXTURE_2D, null), r.bindFramebuffer(r.FRAMEBUFFER, null), r.deleteTexture(e), r.deleteFramebuffer(s), a; | |
} | |
function sZ(r, t8) { | |
let e = Xl(r, t8), | |
o = r.createTexture(); | |
r.bindTexture(r.TEXTURE_2D, o); | |
let n = 1, | |
s = 1; | |
r.texImage2D(r.TEXTURE_2D, 0, e.internalFormatHalfFloat, n, s, 0, e.textureFormatFloat, e.textureTypeHalfFloat, null); | |
let a = r.createFramebuffer(); | |
r.bindFramebuffer(r.FRAMEBUFFER, a), r.framebufferTexture2D(r.FRAMEBUFFER, r.COLOR_ATTACHMENT0, r.TEXTURE_2D, o, 0); | |
let i = r.checkFramebufferStatus(r.FRAMEBUFFER) === r.FRAMEBUFFER_COMPLETE; | |
return r.bindTexture(r.TEXTURE_2D, null), r.bindFramebuffer(r.FRAMEBUFFER, null), r.deleteTexture(o), r.deleteFramebuffer(a), i; | |
} | |
function KI(r) { | |
return r !== 2 ? false : Kr(r).fenceSync != null; | |
} | |
function Vs(r, t8) { | |
Array.isArray(r) || (r = [r]), r.forEach(e => { | |
e != null && y.assert(e.dtype !== "complex64", () => `${t8} does not support complex64 tensors in the WebGL backend.`); | |
}); | |
} | |
var Se = A(); | |
Se.registerFlag("HAS_WEBGL", () => Se.getNumber("WEBGL_VERSION") > 0); | |
Se.registerFlag("WEBGL_VERSION", () => Yf(2) ? 2 : Yf(1) ? 1 : 0); | |
Se.registerFlag("WEBGL_CHECK_NUMERICAL_PROBLEMS", () => false); | |
Se.registerFlag("WEBGL_BUFFER_SUPPORTED", () => Se.get("WEBGL_VERSION") === 2); | |
Se.registerFlag("WEBGL_CPU_FORWARD", () => true); | |
Se.registerFlag("WEBGL_FORCE_F16_TEXTURES", () => false); | |
Se.registerFlag("WEBGL_PACK", () => Se.getBool("HAS_WEBGL")); | |
Se.registerFlag("WEBGL_PACK_NORMALIZATION", () => Se.getBool("WEBGL_PACK")); | |
Se.registerFlag("WEBGL_PACK_CLIP", () => Se.getBool("WEBGL_PACK")); | |
Se.registerFlag("WEBGL_PACK_DEPTHWISECONV", () => Se.getBool("WEBGL_PACK")); | |
Se.registerFlag("WEBGL_PACK_BINARY_OPERATIONS", () => Se.getBool("WEBGL_PACK")); | |
Se.registerFlag("WEBGL_PACK_UNARY_OPERATIONS", () => Se.getBool("WEBGL_PACK")); | |
Se.registerFlag("WEBGL_PACK_ARRAY_OPERATIONS", () => Se.getBool("WEBGL_PACK")); | |
Se.registerFlag("WEBGL_PACK_IMAGE_OPERATIONS", () => Se.getBool("WEBGL_PACK")); | |
Se.registerFlag("WEBGL_PACK_REDUCE", () => Se.getBool("WEBGL_PACK")); | |
Se.registerFlag("WEBGL_LAZILY_UNPACK", () => Se.getBool("WEBGL_PACK")); | |
Se.registerFlag("WEBGL_CONV_IM2COL", () => Se.getBool("WEBGL_PACK")); | |
Se.registerFlag("WEBGL_PACK_CONV2DTRANSPOSE", () => Se.getBool("WEBGL_PACK")); | |
Se.registerFlag("WEBGL_MAX_TEXTURE_SIZE", () => VI(Se.getNumber("WEBGL_VERSION"))); | |
Se.registerFlag("WEBGL_MAX_TEXTURES_IN_SHADER", () => WI(Se.getNumber("WEBGL_VERSION"))); | |
Se.registerFlag("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION", () => { | |
let r = Se.getNumber("WEBGL_VERSION"); | |
return r === 0 ? 0 : UI(r); | |
}); | |
Se.registerFlag("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE", () => Se.getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION") > 0 && !ou.isMobile()); | |
Se.registerFlag("WEBGL_RENDER_FLOAT32_CAPABLE", () => GI(Se.getNumber("WEBGL_VERSION"))); | |
Se.registerFlag("WEBGL_RENDER_FLOAT32_ENABLED", () => Se.getBool("WEBGL_FORCE_F16_TEXTURES") ? false : Se.getBool("WEBGL_RENDER_FLOAT32_CAPABLE")); | |
Se.registerFlag("WEBGL_DOWNLOAD_FLOAT_ENABLED", () => HI(Se.getNumber("WEBGL_VERSION"))); | |
Se.registerFlag("WEBGL_FENCE_API_ENABLED", () => KI(Se.getNumber("WEBGL_VERSION"))); | |
Se.registerFlag("WEBGL_SIZE_UPLOAD_UNIFORM", () => Se.getBool("WEBGL_RENDER_FLOAT32_ENABLED") ? 4 : 0); | |
Se.registerFlag("WEBGL_DELETE_TEXTURE_THRESHOLD", () => -1, r => { | |
if (typeof r != "number") throw new Error(`WEBGL_DELETE_TEXTURE_THRESHOLD must be a number but got ${r}.`); | |
if (r < 0 && r !== -1) throw new Error(`WEBGL_DELETE_TEXTURE_THRESHOLD must be -1 (indicating never delete) or at least 0, but got ${r}.`); | |
}); | |
Se.registerFlag("WEBGL_FLUSH_THRESHOLD", () => ou.isMobile() ? 1 : -1, r => { | |
if (typeof r != "number") throw new Error(`WEBGL_FLUSH_THRESHOLD must be a number but got ${r}.`); | |
if (r < 0 && r !== -1) throw new Error(`WEBGL_FLUSH_THRESHOLD must be -1 (indicating never manual flush) or at least 0, but got ${r}.`); | |
}); | |
Se.registerFlag("CPU_HANDOFF_SIZE_THRESHOLD", () => 128); | |
Se.registerFlag("WEBGL_USE_SHAPES_UNIFORMS", () => false); | |
Se.registerFlag("TOPK_LAST_DIM_CPU_HANDOFF_SIZE_THRESHOLD", () => 1e5); | |
Se.registerFlag("TOPK_K_CPU_HANDOFF_THRESHOLD", () => 128); | |
Se.registerFlag("WEBGL_EXP_CONV", () => false); | |
Se.registerFlag("SOFTWARE_WEBGL_ENABLED", () => Se.getBool("IS_TEST")); | |
Se.registerFlag("WEBGL_MAX_SIZE_FOR_NARROW_TEXTURE", () => 1 / 0); | |
Se.registerFlag("WEBGL_AUTO_SQUARIFY_NARROW_TEXTURE_SHAPE", () => false); | |
Se.registerFlag("WEBGL2_ISNAN_CUSTOM", () => false); | |
Se.registerFlag("ENGINE_COMPILE_ONLY", () => false); | |
function vt() { | |
let r, t8, e, o, n, s, a, i, p, u; | |
return A().getNumber("WEBGL_VERSION") === 2 ? (r = "#version 300 es", t8 = "in", e = "out", o = "in", n = "texture", s = "outputColor", a = "out vec4 outputColor;", i = A().getBool("WEBGL2_ISNAN_CUSTOM") ? ` | |
bool isnan_custom(float val) { | |
uint floatToUint = floatBitsToUint(val); | |
return (floatToUint & 0x7fffffffu) > 0x7f800000u; | |
} | |
bvec4 isnan_custom(vec4 val) { | |
return bvec4(isnan_custom(val.x), | |
isnan_custom(val.y), isnan_custom(val.z), isnan_custom(val.w)); | |
} | |
#define isnan(value) isnan_custom(value) | |
` : "", p = "", u = ` | |
#define round(value) newRound(value) | |
int newRound(float value) { | |
return int(floor(value + 0.5)); | |
} | |
ivec4 newRound(vec4 value) { | |
return ivec4(floor(value + vec4(0.5))); | |
} | |
`) : (r = "", t8 = "attribute", e = "varying", o = "varying", n = "texture2D", s = "gl_FragColor", a = "", i = ` | |
#define isnan(value) isnan_custom(value) | |
bool isnan_custom(float val) { | |
return (val > 0. || val < 1. || val == 0.) ? false : true; | |
} | |
bvec4 isnan_custom(vec4 val) { | |
return bvec4(isnan(val.x), isnan(val.y), isnan(val.z), isnan(val.w)); | |
} | |
`, p = ` | |
uniform float INFINITY; | |
bool isinf(float val) { | |
return abs(val) == INFINITY; | |
} | |
bvec4 isinf(vec4 val) { | |
return equal(abs(val), vec4(INFINITY)); | |
} | |
`, u = ` | |
int round(float value) { | |
return int(floor(value + 0.5)); | |
} | |
ivec4 round(vec4 value) { | |
return ivec4(floor(value + vec4(0.5))); | |
} | |
`), { | |
version: r, | |
attribute: t8, | |
varyingVs: e, | |
varyingFs: o, | |
texture2D: n, | |
output: s, | |
defineOutput: a, | |
defineSpecialNaN: i, | |
defineSpecialInf: p, | |
defineRound: u | |
}; | |
} | |
function Ws(r, t8, e = "index") { | |
let o = y.computeStrides(t8); | |
return o.map((n, s) => { | |
let a = `int ${r[s]} = ${e} / ${n}`, | |
i = s === o.length - 1 ? `int ${r[s + 1]} = ${e} - ${r[s]} * ${n}` : `index -= ${r[s]} * ${n}`; | |
return `${a}; ${i};`; | |
}).join(""); | |
} | |
function Ip(r, t8, e = "index") { | |
let o = y.computeStrides(t8); | |
return o.map((n, s) => { | |
let a = `int ${r[s]} = ${e} / outShapeStrides[${s}]`, | |
i = s === o.length - 1 ? `int ${r[s + 1]} = ${e} - ${r[s]} * outShapeStrides[${s}]` : `index -= ${r[s]} * outShapeStrides[${s}]`; | |
return `${a}; ${i};`; | |
}).join(""); | |
} | |
function aZ(r, t8) { | |
let e = r.length, | |
o = r.map(s => `${t8}[${s}]`), | |
n = new Array(e - 1); | |
n[e - 2] = o[e - 1]; | |
for (let s = e - 3; s >= 0; --s) n[s] = `(${n[s + 1]} * ${o[s + 1]})`; | |
return n; | |
} | |
function SR(r, t8, e = "index") { | |
let o = r.map((s, a) => a), | |
n = aZ(o, t8); | |
return n.map((s, a) => { | |
let i = `int ${r[a]} = ${e} / ${n[a]}`, | |
p = a === n.length - 1 ? `int ${r[a + 1]} = ${e} - ${r[a]} * ${n[a]}` : `index -= ${r[a]} * ${n[a]}`; | |
return `${i}; ${p};`; | |
}).join(""); | |
} | |
function Fc(r) { | |
let t8 = y.computeStrides(r).map(e => e.toString()); | |
return ` | |
int getFlatIndex(ivec3 coords) { | |
return coords.x * ${t8[0]} + coords.y * ${t8[1]} + coords.z; | |
} | |
`; | |
} | |
function Pc() { | |
return ` | |
int getFlatIndex(ivec3 coords) { | |
return coords.x * outShapeStrides[0] + coords.y * outShapeStrides[1] + coords.z; | |
} | |
`; | |
} | |
var Qf = ` | |
const float FLOAT_MAX = 1.70141184e38; | |
const float FLOAT_MIN = 1.17549435e-38; | |
lowp vec4 encode_float(highp float v) { | |
if (isnan(v)) { | |
return vec4(255, 255, 255, 255); | |
} | |
highp float av = abs(v); | |
if(av < FLOAT_MIN) { | |
return vec4(0.0, 0.0, 0.0, 0.0); | |
} else if(v > FLOAT_MAX) { | |
return vec4(0.0, 0.0, 128.0, 127.0) / 255.0; | |
} else if(v < -FLOAT_MAX) { | |
return vec4(0.0, 0.0, 128.0, 255.0) / 255.0; | |
} | |
highp vec4 c = vec4(0,0,0,0); | |
highp float e = floor(log2(av)); | |
highp float m = exp2(fract(log2(av))) - 1.0; | |
c[2] = floor(128.0 * m); | |
m -= c[2] / 128.0; | |
c[1] = floor(32768.0 * m); | |
m -= c[1] / 32768.0; | |
c[0] = floor(8388608.0 * m); | |
highp float ebias = e + 127.0; | |
c[3] = floor(ebias / 2.0); | |
ebias -= c[3] * 2.0; | |
c[2] += floor(ebias) * 128.0; | |
c[3] += 128.0 * step(0.0, -v); | |
return c / 255.0; | |
} | |
`; | |
var { | |
getBroadcastDims: IR | |
} = w; | |
function vR(r, t8, e) { | |
let o = []; | |
if (r.forEach(d => { | |
let f = y.sizeFromShape(d.shapeInfo.logicalShape); | |
if (d.shapeInfo.isUniform ? o.push(`uniform float ${d.name}${f > 1 ? `[${f}]` : ""};`) : (o.push(`uniform sampler2D ${d.name};`), o.push(`uniform int offset${d.name};`)), e.enableShapeUniforms) { | |
let { | |
uniformShape: h | |
} = Zf(e.packedInputs, d.shapeInfo.logicalShape, d.shapeInfo.texShape); | |
switch (h.length) { | |
case 1: | |
o.push(`uniform int ${d.name}Shape;`); | |
break; | |
case 2: | |
o.push(`uniform ivec2 ${d.name}Shape;`); | |
break; | |
case 3: | |
o.push(`uniform ivec3 ${d.name}Shape;`); | |
break; | |
case 4: | |
o.push(`uniform ivec4 ${d.name}Shape;`); | |
break; | |
default: | |
break; | |
} | |
o.push(`uniform ivec2 ${d.name}TexShape;`); | |
} | |
}), e.enableShapeUniforms) { | |
switch (t8.logicalShape.length) { | |
case 1: | |
o.push("uniform int outShape;"); | |
break; | |
case 2: | |
o.push("uniform ivec2 outShape;"), o.push("uniform int outShapeStrides;"); | |
break; | |
case 3: | |
o.push("uniform ivec3 outShape;"), o.push("uniform ivec2 outShapeStrides;"); | |
break; | |
case 4: | |
o.push("uniform ivec4 outShape;"), o.push("uniform ivec3 outShapeStrides;"); | |
break; | |
default: | |
break; | |
} | |
o.push("uniform ivec2 outTexShape;"); | |
} | |
e.customUniforms && e.customUniforms.forEach(d => { | |
o.push(`uniform ${d.type} ${d.name}${d.arrayIndex ? `[${d.arrayIndex}]` : ""};`); | |
}); | |
let n = o.join(` | |
`), | |
s = r.map(d => iZ(d, t8, e.packedInputs, e.enableShapeUniforms)).join(` | |
`), | |
a = t8.texShape, | |
i = vt(), | |
p = cZ(i), | |
u, | |
c, | |
l = dZ(i); | |
return t8.isPacked ? (u = uZ(t8.logicalShape, a, e.enableShapeUniforms), c = mZ(i)) : (u = pZ(t8.logicalShape, a, e.enableShapeUniforms), c = lZ(i)), e.packedInputs && (l += xZ), [l, p, c, n, u, s, e.userCode].join(` | |
`); | |
} | |
function Mc(r, t8 = false) { | |
let e = r.shapeInfo.logicalShape; | |
switch (e.length) { | |
case 0: | |
return $Z(r, t8); | |
case 1: | |
return RZ(r, t8); | |
case 2: | |
return AZ(r, t8); | |
case 3: | |
return PZ(r, t8); | |
case 4: | |
return MZ(r, t8); | |
case 5: | |
return LZ(r); | |
case 6: | |
return BZ(r); | |
default: | |
throw new Error(`${e.length}-D input sampling is not yet supported`); | |
} | |
} | |
function kR(r, t8) { | |
switch (r.shapeInfo.logicalShape.length) { | |
case 0: | |
return _Z(r); | |
case 1: | |
return EZ(r, t8); | |
case 2: | |
return DZ(r, t8); | |
case 3: | |
return FZ(r, t8); | |
default: | |
return OZ(r, t8); | |
} | |
} | |
function iZ(r, t8, e = false, o) { | |
let n = ""; | |
e ? n += kR(r, o) : n += Mc(r, o); | |
let s = r.shapeInfo.logicalShape, | |
a = t8.logicalShape; | |
return s.length <= a.length && (e ? n += zZ(r, t8) : n += VZ(r, t8)), n; | |
} | |
function uZ(r, t8, e) { | |
switch (r.length) { | |
case 0: | |
return NR(); | |
case 1: | |
return yZ(r, t8, e); | |
case 2: | |
return NZ(r, t8, e); | |
case 3: | |
return CZ(r, t8, e); | |
default: | |
return SZ(r, t8, e); | |
} | |
} | |
function pZ(r, t8, e) { | |
switch (r.length) { | |
case 0: | |
return NR(); | |
case 1: | |
return bZ(r, t8, e); | |
case 2: | |
return TZ(r, t8, e); | |
case 3: | |
return wZ(r, t8, e); | |
case 4: | |
return IZ(r, t8, e); | |
case 5: | |
return vZ(r, t8); | |
case 6: | |
return kZ(r, t8); | |
default: | |
throw new Error(`${r.length}-D output sampling is not yet supported`); | |
} | |
} | |
function cZ(r) { | |
return ` | |
float sampleTexture(sampler2D textureSampler, vec2 uv) { | |
return ${r.texture2D}(textureSampler, uv).r; | |
} | |
`; | |
} | |
function lZ(r) { | |
return ` | |
void setOutput(float val) { | |
${r.output} = vec4(val, 0, 0, 0); | |
} | |
`; | |
} | |
function mZ(r) { | |
return ` | |
void setOutput(vec4 val) { | |
${r.output} = val; | |
} | |
`; | |
} | |
function dZ(r) { | |
return `${r.version} | |
precision highp float; | |
precision highp int; | |
precision highp sampler2D; | |
${r.varyingFs} vec2 resultUV; | |
${r.defineOutput} | |
const vec2 halfCR = vec2(0.5, 0.5); | |
struct ivec5 | |
{ | |
int x; | |
int y; | |
int z; | |
int w; | |
int u; | |
}; | |
struct ivec6 | |
{ | |
int x; | |
int y; | |
int z; | |
int w; | |
int u; | |
int v; | |
}; | |
uniform float NAN; | |
${r.defineSpecialNaN} | |
${r.defineSpecialInf} | |
${r.defineRound} | |
int imod(int x, int y) { | |
return x - y * (x / y); | |
} | |
int idiv(int a, int b, float sign) { | |
int res = a / b; | |
int mod = imod(a, b); | |
if (sign < 0. && mod != 0) { | |
res -= 1; | |
} | |
return res; | |
} | |
//Based on the work of Dave Hoskins | |
//https://www.shadertoy.com/view/4djSRW | |
#define HASHSCALE1 443.8975 | |
float random(float seed){ | |
vec2 p = resultUV * seed; | |
vec3 p3 = fract(vec3(p.xyx) * HASHSCALE1); | |
p3 += dot(p3, p3.yzx + 19.19); | |
return fract((p3.x + p3.y) * p3.z); | |
} | |
${fZ} | |
${hZ} | |
${gZ} | |
`; | |
} | |
var fZ = ` | |
vec2 uvFromFlat(int texNumR, int texNumC, int index) { | |
int texR = index / texNumC; | |
int texC = index - texR * texNumC; | |
return (vec2(texC, texR) + halfCR) / vec2(texNumC, texNumR); | |
} | |
vec2 packedUVfrom1D(int texNumR, int texNumC, int index) { | |
int texelIndex = index / 2; | |
int texR = texelIndex / texNumC; | |
int texC = texelIndex - texR * texNumC; | |
return (vec2(texC, texR) + halfCR) / vec2(texNumC, texNumR); | |
} | |
`; | |
var hZ = ` | |
vec2 packedUVfrom2D(int texelsInLogicalRow, int texNumR, | |
int texNumC, int row, int col) { | |
int texelIndex = (row / 2) * texelsInLogicalRow + (col / 2); | |
int texR = texelIndex / texNumC; | |
int texC = texelIndex - texR * texNumC; | |
return (vec2(texC, texR) + halfCR) / vec2(texNumC, texNumR); | |
} | |
`; | |
var gZ = ` | |
vec2 packedUVfrom3D(int texNumR, int texNumC, | |
int texelsInBatch, int texelsInLogicalRow, int b, | |
int row, int col) { | |
int index = b * texelsInBatch + (row / 2) * texelsInLogicalRow + (col / 2); | |
int texR = index / texNumC; | |
int texC = index - texR * texNumC; | |
return (vec2(texC, texR) + halfCR) / vec2(texNumC, texNumR); | |
} | |
`; | |
var xZ = ` | |
float getChannel(vec4 frag, vec2 innerDims) { | |
vec2 modCoord = mod(innerDims, 2.); | |
return modCoord.x == 0. ? | |
(modCoord.y == 0. ? frag.r : frag.g) : | |
(modCoord.y == 0. ? frag.b : frag.a); | |
} | |
float getChannel(vec4 frag, int dim) { | |
float modCoord = mod(float(dim), 2.); | |
return modCoord == 0. ? frag.r : frag.g; | |
} | |
`; | |
function NR() { | |
return ` | |
int getOutputCoords() { | |
return 0; | |
} | |
`; | |
} | |
function yZ(r, t8, e) { | |
let o = [Math.ceil(t8[0] / 2), Math.ceil(t8[1] / 2)]; | |
return o[0] === 1 ? e ? ` | |
int getOutputCoords() { | |
return 2 * int(resultUV.x * ceil(float(outTexShape[1]) / 2.0)); | |
} | |
` : ` | |
int getOutputCoords() { | |
return 2 * int(resultUV.x * ${o[1]}.0); | |
} | |
` : o[1] === 1 ? e ? ` | |
int getOutputCoords() { | |
return 2 * int(resultUV.y * ceil(float(outTexShape[0]) / 2.0)); | |
} | |
` : ` | |
int getOutputCoords() { | |
return 2 * int(resultUV.y * ${o[0]}.0); | |
} | |
` : e ? ` | |
int getOutputCoords() { | |
ivec2 packedTexShape = ivec2(ceil(float(outTexShape[0]) / 2.0), ceil(float(outTexShape[1]) / 2.0)); | |
ivec2 resTexRC = ivec2(resultUV.yx * | |
vec2(packedTexShape[0], packedTexShape[1])); | |
return 2 * (resTexRC.x * packedTexShape[1] + resTexRC.y); | |
} | |
` : ` | |
int getOutputCoords() { | |
ivec2 resTexRC = ivec2(resultUV.yx * | |
vec2(${o[0]}, ${o[1]})); | |
return 2 * (resTexRC.x * ${o[1]} + resTexRC.y); | |
} | |
`; | |
} | |
function bZ(r, t8, e) { | |
return t8[0] === 1 ? e ? ` | |
int getOutputCoords() { | |
return int(resultUV.x * float(outTexShape[1])); | |
} | |
` : ` | |
int getOutputCoords() { | |
return int(resultUV.x * ${t8[1]}.0); | |
} | |
` : t8[1] === 1 ? e ? ` | |
int getOutputCoords() { | |
return int(resultUV.y * float(outTexShape[0])); | |
} | |
` : ` | |
int getOutputCoords() { | |
return int(resultUV.y * ${t8[0]}.0); | |
} | |
` : e ? ` | |
int getOutputCoords() { | |
ivec2 resTexRC = ivec2(resultUV.yx * | |
vec2(outTexShape[0], outTexShape[1])); | |
return resTexRC.x * outTexShape[1] + resTexRC.y; | |
} | |
` : ` | |
int getOutputCoords() { | |
ivec2 resTexRC = ivec2(resultUV.yx * | |
vec2(${t8[0]}, ${t8[1]})); | |
return resTexRC.x * ${t8[1]} + resTexRC.y; | |
} | |
`; | |
} | |
function CZ(r, t8, e) { | |
if (e) return ` | |
ivec3 getOutputCoords() { | |
ivec2 packedTexShape = ivec2(ceil(float(outTexShape[0]) / 2.0), ceil(float(outTexShape[1]) / 2.0)); | |
int texelsInLogicalRow = int(ceil(float(outShape[2]) / 2.0)); | |
int texelsInBatch = texelsInLogicalRow * int(ceil(float(outShape[1]) / 2.0)); | |
ivec2 resTexRC = ivec2(resultUV.yx * | |
vec2(packedTexShape[0], packedTexShape[1])); | |
int index = resTexRC.x * packedTexShape[1] + resTexRC.y; | |
int b = index / texelsInBatch; | |
index -= b * texelsInBatch; | |
int r = 2 * (index / texelsInLogicalRow); | |
int c = imod(index, texelsInLogicalRow) * 2; | |
return ivec3(b, r, c); | |
} | |
`; | |
let o = [Math.ceil(t8[0] / 2), Math.ceil(t8[1] / 2)], | |
n = Math.ceil(r[2] / 2), | |
s = n * Math.ceil(r[1] / 2); | |
return ` | |
ivec3 getOutputCoords() { | |
ivec2 resTexRC = ivec2(resultUV.yx * | |
vec2(${o[0]}, ${o[1]})); | |
int index = resTexRC.x * ${o[1]} + resTexRC.y; | |
int b = index / ${s}; | |
index -= b * ${s}; | |
int r = 2 * (index / ${n}); | |
int c = imod(index, ${n}) * 2; | |
return ivec3(b, r, c); | |
} | |
`; | |
} | |
function wZ(r, t8, e) { | |
if (e) return ` | |
ivec3 getOutputCoords() { | |
ivec2 resTexRC = ivec2(resultUV.yx * | |
vec2(outTexShape[0], outTexShape[1])); | |
int index = resTexRC.x * outTexShape[1] + resTexRC.y; | |
${Ip(["r", "c", "d"], r)} | |
return ivec3(r, c, d); | |
} | |
`; | |
let o = Ws(["r", "c", "d"], r); | |
return ` | |
ivec3 getOutputCoords() { | |
ivec2 resTexRC = ivec2(resultUV.yx * | |
vec2(${t8[0]}, ${t8[1]})); | |
int index = resTexRC.x * ${t8[1]} + resTexRC.y; | |
${o} | |
return ivec3(r, c, d); | |
} | |
`; | |
} | |
function SZ(r, t8, e) { | |
if (e) return ` | |
ivec4 getOutputCoords() { | |
ivec2 packedTexShape = ivec2(ceil(float(outTexShape[0]) / 2.0), ceil(float(outTexShape[1]) / 2.0)); | |
ivec2 resTexRC = ivec2(resultUV.yx * | |
vec2(packedTexShape[0], packedTexShape[1])); | |
int index = resTexRC.x * packedTexShape[1] + resTexRC.y; | |
int texelsInLogicalRow = int(ceil(float(outShape[3]) / 2.0)); | |
int texelsInBatch = texelsInLogicalRow * int(ceil(float(outShape[2]) / 2.0)); | |
int texelsInBatchN = texelsInBatch * outShape[1]; | |
int b2 = index / texelsInBatchN; | |
index -= b2 * texelsInBatchN; | |
int b = index / texelsInBatch; | |
index -= b * texelsInBatch; | |
int r = 2 * (index / texelsInLogicalRow); | |
int c = imod(index, texelsInLogicalRow) * 2; | |
return ivec4(b2, b, r, c); | |
} | |
`; | |
let o = [Math.ceil(t8[0] / 2), Math.ceil(t8[1] / 2)], | |
n = Math.ceil(r[r.length - 1] / 2), | |
s = n * Math.ceil(r[r.length - 2] / 2), | |
a = s, | |
i = "", | |
p = "b, r, c"; | |
for (let u = 2; u < r.length - 1; u++) a *= r[r.length - u - 1], i = ` | |
int b${u} = index / ${a}; | |
index -= b${u} * ${a}; | |
` + i, p = `b${u}, ` + p; | |
return ` | |
ivec${r.length} getOutputCoords() { | |
ivec2 resTexRC = ivec2(resultUV.yx * | |
vec2(${o[0]}, ${o[1]})); | |
int index = resTexRC.x * ${o[1]} + resTexRC.y; | |
${i} | |
int b = index / ${s}; | |
index -= b * ${s}; | |
int r = 2 * (index / ${n}); | |
int c = imod(index, ${n}) * 2; | |
return ivec${r.length}(${p}); | |
} | |
`; | |
} | |
function IZ(r, t8, e) { | |
if (e) return ` | |
ivec4 getOutputCoords() { | |
ivec2 resTexRC = ivec2(resultUV.yx * | |
vec2(outTexShape[0], outTexShape[1])); | |
int index = resTexRC.x * outTexShape[1] + resTexRC.y; | |
${Ip(["r", "c", "d", "d2"], r)} | |
return ivec4(r, c, d, d2); | |
} | |
`; | |
let o = Ws(["r", "c", "d", "d2"], r); | |
return ` | |
ivec4 getOutputCoords() { | |
ivec2 resTexRC = ivec2(resultUV.yx * | |
vec2(${t8[0]}, ${t8[1]})); | |
int index = resTexRC.x * ${t8[1]} + resTexRC.y; | |
${o} | |
return ivec4(r, c, d, d2); | |
} | |
`; | |
} | |
function vZ(r, t8) { | |
let e = Ws(["r", "c", "d", "d2", "d3"], r); | |
return ` | |
ivec5 getOutputCoords() { | |
ivec2 resTexRC = ivec2(resultUV.yx * vec2(${t8[0]}, | |
${t8[1]})); | |
int index = resTexRC.x * ${t8[1]} + resTexRC.y; | |
${e} | |
ivec5 outShape = ivec5(r, c, d, d2, d3); | |
return outShape; | |
} | |
`; | |
} | |
function kZ(r, t8) { | |
let e = Ws(["r", "c", "d", "d2", "d3", "d4"], r); | |
return ` | |
ivec6 getOutputCoords() { | |
ivec2 resTexRC = ivec2(resultUV.yx * | |
vec2(${t8[0]}, ${t8[1]})); | |
int index = resTexRC.x * ${t8[1]} + resTexRC.y; | |
${e} | |
ivec6 result = ivec6(r, c, d, d2, d3, d4); | |
return result; | |
} | |
`; | |
} | |
function NZ(r, t8, e) { | |
let o = [Math.ceil(t8[0] / 2), Math.ceil(t8[1] / 2)]; | |
if (y.arraysEqual(r, t8)) return e ? ` | |
ivec2 getOutputCoords() { | |
ivec2 packedTexShape = ivec2(ceil(float(outTexShape[0]) / 2.0), ceil(float(outTexShape[1]) / 2.0)); | |
return 2 * ivec2(resultUV.yx * vec2(packedTexShape[0], packedTexShape[1])); | |
} | |
` : ` | |
ivec2 getOutputCoords() { | |
return 2 * ivec2(resultUV.yx * vec2(${o[0]}, ${o[1]})); | |
} | |
`; | |
let n = Math.ceil(r[1] / 2); | |
return e ? ` | |
ivec2 getOutputCoords() { | |
ivec2 packedTexShape = ivec2(ceil(float(outTexShape[0]) / 2.0), ceil(float(outTexShape[1]) / 2.0)); | |
int texelsInLogicalRow = int(ceil(float(outShape[1]) / 2.0)); | |
ivec2 resTexRC = ivec2(resultUV.yx * | |
vec2(packedTexShape[0], packedTexShape[1])); | |
int index = resTexRC.x * packedTexShape[1] + resTexRC.y; | |
int r = 2 * (index / texelsInLogicalRow); | |
int c = imod(index, texelsInLogicalRow) * 2; | |
return ivec2(r, c); | |
} | |
` : ` | |
ivec2 getOutputCoords() { | |
ivec2 resTexRC = ivec2(resultUV.yx * | |
vec2(${o[0]}, ${o[1]})); | |
int index = resTexRC.x * ${o[1]} + resTexRC.y; | |
int r = 2 * (index / ${n}); | |
int c = imod(index, ${n}) * 2; | |
return ivec2(r, c); | |
} | |
`; | |
} | |
function TZ(r, t8, e) { | |
return y.arraysEqual(r, t8) ? e ? ` | |
ivec2 getOutputCoords() { | |
return ivec2(resultUV.yx * vec2(outTexShape[0], outTexShape[1])); | |
} | |
` : ` | |
ivec2 getOutputCoords() { | |
return ivec2(resultUV.yx * vec2(${t8[0]}, ${t8[1]})); | |
} | |
` : r[1] === 1 ? e ? ` | |
ivec2 getOutputCoords() { | |
ivec2 resTexRC = ivec2(resultUV.yx * | |
vec2(outTexShape[0], outTexShape[1])); | |
int index = resTexRC.x * outTexShape[1] + resTexRC.y; | |
return ivec2(index, 0); | |
} | |
` : ` | |
ivec2 getOutputCoords() { | |
ivec2 resTexRC = ivec2(resultUV.yx * | |
vec2(${t8[0]}, ${t8[1]})); | |
int index = resTexRC.x * ${t8[1]} + resTexRC.y; | |
return ivec2(index, 0); | |
} | |
` : r[0] === 1 ? e ? ` | |
ivec2 getOutputCoords() { | |
ivec2 resTexRC = ivec2(resultUV.yx * | |
vec2(outTexShape[0], outTexShape[1])); | |
int index = resTexRC.x * outTexShape[1] + resTexRC.y; | |
return ivec2(0, index); | |
} | |
` : ` | |
ivec2 getOutputCoords() { | |
ivec2 resTexRC = ivec2(resultUV.yx * | |
vec2(${t8[0]}, ${t8[1]})); | |
int index = resTexRC.x * ${t8[1]} + resTexRC.y; | |
return ivec2(0, index); | |
} | |
` : e ? ` | |
ivec2 getOutputCoords() { | |
ivec2 resTexRC = ivec2(resultUV.yx * | |
vec2(outTexShape[0], outTexShape[1])); | |
int index = resTexRC.x * outTexShape[1] + resTexRC.y; | |
int r = index / outShape[1]; | |
int c = index - r * outShape[1]; | |
return ivec2(r, c); | |
} | |
` : ` | |
ivec2 getOutputCoords() { | |
ivec2 resTexRC = ivec2(resultUV.yx * | |
vec2(${t8[0]}, ${t8[1]})); | |
int index = resTexRC.x * ${t8[1]} + resTexRC.y; | |
int r = index / ${r[1]}; | |
int c = index - r * ${r[1]}; | |
return ivec2(r, c); | |
} | |
`; | |
} | |
function vp(r) { | |
return `offset${r}`; | |
} | |
function _Z(r) { | |
let t8 = r.name, | |
e = "get" + t8.charAt(0).toUpperCase() + t8.slice(1), | |
o = vt(); | |
return ` | |
vec4 ${e}() { | |
return ${o.texture2D}(${t8}, halfCR); | |
} | |
`; | |
} | |
function $Z(r, t8) { | |
let e = r.name, | |
o = "get" + e.charAt(0).toUpperCase() + e.slice(1); | |
if (r.shapeInfo.isUniform) return `float ${o}() {return ${e};}`; | |
let [n, s] = r.shapeInfo.texShape; | |
if (n === 1 && s === 1) return ` | |
float ${o}() { | |
return sampleTexture(${e}, halfCR); | |
} | |
`; | |
let a = vp(e); | |
if (t8) return ` | |
float ${o}() { | |
vec2 uv = uvFromFlat(${e}TexShape[0], ${e}TexShape[1], ${a}); | |
return sampleTexture(${e}, uv); | |
} | |
`; | |
let [i, p] = r.shapeInfo.texShape; | |
return ` | |
float ${o}() { | |
vec2 uv = uvFromFlat(${i}, ${p}, ${a}); | |
return sampleTexture(${e}, uv); | |
} | |
`; | |
} | |
function EZ(r, t8) { | |
let e = r.name, | |
o = "get" + e.charAt(0).toUpperCase() + e.slice(1), | |
n = r.shapeInfo.texShape, | |
s = vt(); | |
if (t8) return ` | |
vec4 ${o}(int index) { | |
ivec2 packedTexShape = ivec2(ceil(float(${e}TexShape[0]) / 2.0), ceil(float(${e}TexShape[1]) / 2.0)); | |
vec2 uv = packedUVfrom1D( | |
packedTexShape[0], packedTexShape[1], index); | |
return ${s.texture2D}(${e}, uv); | |
} | |
`; | |
let a = [Math.ceil(n[0] / 2), Math.ceil(n[1] / 2)]; | |
return ` | |
vec4 ${o}(int index) { | |
vec2 uv = packedUVfrom1D( | |
${a[0]}, ${a[1]}, index); | |
return ${s.texture2D}(${e}, uv); | |
} | |
`; | |
} | |
function RZ(r, t8) { | |
let e = r.name, | |
o = "get" + e.charAt(0).toUpperCase() + e.slice(1); | |
if (r.shapeInfo.isUniform) return ` | |
float ${o}(int index) { | |
${Lc(r)} | |
} | |
`; | |
let n = r.shapeInfo.texShape, | |
s = n[0], | |
a = n[1]; | |
if (a === 1 && s === 1) return ` | |
float ${o}(int index) { | |
return sampleTexture(${e}, halfCR); | |
} | |
`; | |
let i = vp(e); | |
return a === 1 ? t8 ? ` | |
float ${o}(int index) { | |
vec2 uv = vec2(0.5, (float(index + ${i}) + 0.5) / float(${e}TexShape[0])); | |
return sampleTexture(${e}, uv); | |
} | |
` : ` | |
float ${o}(int index) { | |
vec2 uv = vec2(0.5, (float(index + ${i}) + 0.5) / ${s}.0); | |
return sampleTexture(${e}, uv); | |
} | |
` : s === 1 ? t8 ? ` | |
float ${o}(int index) { | |
vec2 uv = vec2((float(index + ${i}) + 0.5) / float(${e}TexShape[1]), 0.5); | |
return sampleTexture(${e}, uv); | |
} | |
` : ` | |
float ${o}(int index) { | |
vec2 uv = vec2((float(index + ${i}) + 0.5) / ${a}.0, 0.5); | |
return sampleTexture(${e}, uv); | |
} | |
` : t8 ? ` | |
float ${o}(int index) { | |
vec2 uv = uvFromFlat(${e}TexShape[0], ${e}TexShape[1], index + ${i}); | |
return sampleTexture(${e}, uv); | |
} | |
` : ` | |
float ${o}(int index) { | |
vec2 uv = uvFromFlat(${s}, ${a}, index + ${i}); | |
return sampleTexture(${e}, uv); | |
} | |
`; | |
} | |
function DZ(r, t8) { | |
let e = r.shapeInfo.logicalShape, | |
o = r.name, | |
n = "get" + o.charAt(0).toUpperCase() + o.slice(1), | |
s = r.shapeInfo.texShape, | |
a = s[0], | |
i = s[1], | |
p = vt(); | |
if (s != null && y.arraysEqual(e, s)) return t8 ? ` | |
vec4 ${n}(int row, int col) { | |
vec2 uv = (vec2(col, row) + halfCR) / vec2(${o}TexShape[1], ${o}TexShape[0]); | |
return ${p.texture2D}(${o}, uv); | |
} | |
` : ` | |
vec4 ${n}(int row, int col) { | |
vec2 uv = (vec2(col, row) + halfCR) / vec2(${i}.0, ${a}.0); | |
return ${p.texture2D}(${o}, uv); | |
} | |
`; | |
if (t8) return ` | |
vec4 ${n}(int row, int col) { | |
ivec2 packedTexShape = ivec2(ceil(float(${o}TexShape[0]) / 2.0), ceil(float(${o}TexShape[1]) / 2.0)); | |
int valuesPerRow = int(ceil(float(${o}Shape[1]) / 2.0)); | |
vec2 uv = packedUVfrom2D(valuesPerRow, packedTexShape[0], packedTexShape[1], row, col); | |
return ${p.texture2D}(${o}, uv); | |
} | |
`; | |
let u = [Math.ceil(s[0] / 2), Math.ceil(s[1] / 2)], | |
c = Math.ceil(e[1] / 2); | |
return ` | |
vec4 ${n}(int row, int col) { | |
vec2 uv = packedUVfrom2D(${c}, ${u[0]}, ${u[1]}, row, col); | |
return ${p.texture2D}(${o}, uv); | |
} | |
`; | |
} | |
function AZ(r, t8) { | |
let e = r.shapeInfo.logicalShape, | |
o = r.name, | |
n = "get" + o.charAt(0).toUpperCase() + o.slice(1), | |
s = r.shapeInfo.texShape; | |
if (s != null && y.arraysEqual(e, s)) { | |
if (t8) return ` | |
float ${n}(int row, int col) { | |
vec2 uv = (vec2(col, row) + halfCR) / vec2(${o}TexShape[1], ${o}TexShape[0]); | |
return sampleTexture(${o}, uv); | |
} | |
`; | |
let m = s[0], | |
d = s[1]; | |
return ` | |
float ${n}(int row, int col) { | |
vec2 uv = (vec2(col, row) + halfCR) / vec2(${d}.0, ${m}.0); | |
return sampleTexture(${o}, uv); | |
} | |
`; | |
} | |
let { | |
newShape: a, | |
keptDims: i | |
} = y.squeezeShape(e), | |
p = a; | |
if (p.length < e.length) { | |
let m = Bc(r, p), | |
d = ["row", "col"]; | |
return ` | |
${Mc(m, t8)} | |
float ${n}(int row, int col) { | |
return ${n}(${zc(d, i)}); | |
} | |
`; | |
} | |
if (r.shapeInfo.isUniform) return ` | |
float ${n}(int row, int col) { | |
int index = round(dot(vec2(row, col), vec2(${e[1]}, 1))); | |
${Lc(r)} | |
} | |
`; | |
let u = s[0], | |
c = s[1], | |
l = vp(o); | |
return c === 1 ? t8 ? ` | |
float ${n}(int row, int col) { | |
float index = dot(vec3(row, col, ${l}), vec3(${o}Shape[1], 1, 1)); | |
vec2 uv = vec2(0.5, (index + 0.5) / float(${o}TexShape[0])); | |
return sampleTexture(${o}, uv); | |
} | |
` : ` | |
float ${n}(int row, int col) { | |
float index = dot(vec3(row, col, ${l}), vec3(${e[1]}, 1, 1)); | |
vec2 uv = vec2(0.5, (index + 0.5) / ${u}.0); | |
return sampleTexture(${o}, uv); | |
} | |
` : u === 1 ? t8 ? ` | |
float ${n}(int row, int col) { | |
float index = dot(vec3(row, col, ${l}), vec3(${o}Shape[1], 1, 1)); | |
vec2 uv = vec2((index + 0.5) / float(${o}TexShape[1]), 0.5); | |
return sampleTexture(${o}, uv); | |
} | |
` : ` | |
float ${n}(int row, int col) { | |
float index = dot(vec3(row, col, ${l}), vec3(${e[1]}, 1, 1)); | |
vec2 uv = vec2((index + 0.5) / ${c}.0, 0.5); | |
return sampleTexture(${o}, uv); | |
} | |
` : t8 ? ` | |
float ${n}(int row, int col) { | |
// Explicitly use integer operations as dot() only works on floats. | |
int index = row * ${o}Shape[1] + col + ${l}; | |
vec2 uv = uvFromFlat(${o}TexShape[0], ${o}TexShape[1], index); | |
return sampleTexture(${o}, uv); | |
} | |
` : ` | |
float ${n}(int row, int col) { | |
// Explicitly use integer operations as dot() only works on floats. | |
int index = row * ${e[1]} + col + ${l}; | |
vec2 uv = uvFromFlat(${u}, ${c}, index); | |
return sampleTexture(${o}, uv); | |
} | |
`; | |
} | |
function FZ(r, t8) { | |
let e = r.shapeInfo.logicalShape, | |
o = r.name, | |
n = "get" + o.charAt(0).toUpperCase() + o.slice(1), | |
s = r.shapeInfo.texShape, | |
a = [Math.ceil(s[0] / 2), Math.ceil(s[1] / 2)]; | |
if (e[0] === 1) { | |
let m = e.slice(1), | |
d = [1, 2], | |
f = Bc(r, m), | |
h = ["b", "row", "col"]; | |
return ` | |
${kR(f, t8)} | |
vec4 ${n}(int b, int row, int col) { | |
return ${n}(${zc(h, d)}); | |
} | |
`; | |
} | |
let i = vt(); | |
if (t8) return ` | |
vec4 ${n}(int b, int row, int col) { | |
ivec2 packedTexShape = ivec2(ceil(float(${o}TexShape[0]) / 2.0), ceil(float(${o}TexShape[1]) / 2.0)); | |
int valuesPerRow = int(ceil(float(${o}Shape[2]) / 2.0)); | |
int texelsInBatch = valuesPerRow * int(ceil(float(${o}Shape[1]) / 2.0)); | |
vec2 uv = packedUVfrom3D( | |
packedTexShape[0], packedTexShape[1], texelsInBatch, valuesPerRow, b, row, col); | |
return ${i.texture2D}(${o}, uv); | |
} | |
`; | |
let p = a[0], | |
u = a[1], | |
c = Math.ceil(e[2] / 2), | |
l = c * Math.ceil(e[1] / 2); | |
return ` | |
vec4 ${n}(int b, int row, int col) { | |
vec2 uv = packedUVfrom3D( | |
${p}, ${u}, ${l}, ${c}, b, row, col); | |
return ${i.texture2D}(${o}, uv); | |
} | |
`; | |
} | |
function PZ(r, t8) { | |
let e = r.shapeInfo.logicalShape, | |
o = r.name, | |
n = "get" + o.charAt(0).toUpperCase() + o.slice(1), | |
s = e[1] * e[2], | |
a = e[2], | |
{ | |
newShape: i, | |
keptDims: p | |
} = y.squeezeShape(e), | |
u = i; | |
if (u.length < e.length) { | |
let h = Bc(r, u), | |
g = ["row", "col", "depth"]; | |
return ` | |
${Mc(h, t8)} | |
float ${n}(int row, int col, int depth) { | |
return ${n}(${zc(g, p)}); | |
} | |
`; | |
} | |
if (r.shapeInfo.isUniform) return ` | |
float ${n}(int row, int col, int depth) { | |
int index = round(dot(vec3(row, col, depth), | |
vec3(${s}, ${a}, 1))); | |
${Lc(r)} | |
} | |
`; | |
let c = r.shapeInfo.texShape, | |
l = c[0], | |
m = c[1], | |
d = r.shapeInfo.flatOffset; | |
if (m === s && d == null) return t8 ? ` | |
float ${n}(int row, int col, int depth) { | |
int stride1 = ${o}Shape[2]; | |
float texR = float(row); | |
float texC = dot(vec2(col, depth), vec2(stride1, 1)); | |
vec2 uv = (vec2(texC, texR) + halfCR) / | |
vec2(${o}TexShape[1], ${o}TexShape[0]); | |
return sampleTexture(${o}, uv); | |
} | |
` : ` | |
float ${n}(int row, int col, int depth) { | |
float texR = float(row); | |
float texC = dot(vec2(col, depth), vec2(${a}, 1)); | |
vec2 uv = (vec2(texC, texR) + halfCR) / | |
vec2(${m}.0, ${l}.0); | |
return sampleTexture(${o}, uv); | |
} | |
`; | |
if (m === a && d == null) return t8 ? ` | |
float ${n}(int row, int col, int depth) { | |
float texR = dot(vec2(row, col), vec2(${o}Shape[1], 1)); | |
float texC = float(depth); | |
vec2 uv = (vec2(texC, texR) + halfCR) / vec2(${o}TexShape[1], ${o}TexShape[0]); | |
return sampleTexture(${o}, uv); | |
} | |
` : ` | |
float ${n}(int row, int col, int depth) { | |
float texR = dot(vec2(row, col), vec2(${e[1]}, 1)); | |
float texC = float(depth); | |
vec2 uv = (vec2(texC, texR) + halfCR) / vec2(${m}.0, ${l}.0); | |
return sampleTexture(${o}, uv); | |
} | |
`; | |
let f = vp(o); | |
return t8 ? ` | |
float ${n}(int row, int col, int depth) { | |
// Explicitly use integer operations as dot() only works on floats. | |
int stride0 = ${o}Shape[1] * ${o}Shape[2]; | |
int stride1 = ${o}Shape[2]; | |
int index = row * stride0 + col * stride1 + depth + ${f}; | |
vec2 uv = uvFromFlat(${o}TexShape[0], ${o}TexShape[1], index); | |
return sampleTexture(${o}, uv); | |
} | |
` : ` | |
float ${n}(int row, int col, int depth) { | |
// Explicitly use integer operations as dot() only works on floats. | |
int index = row * ${s} + col * ${a} + depth + ${f}; | |
vec2 uv = uvFromFlat(${l}, ${m}, index); | |
return sampleTexture(${o}, uv); | |
} | |
`; | |
} | |
function OZ(r, t8) { | |
let e = r.name, | |
o = "get" + e.charAt(0).toUpperCase() + e.slice(1), | |
n = vt(); | |
if (t8) return ` | |
vec4 ${o}(int b2, int b, int row, int col) { | |
int valuesPerRow = int(ceil(float(${e}Shape[3]) / 2.0)); | |
int texelsInBatch = valuesPerRow * int(ceil(float(${e}Shape[2]) / 2.0)); | |
int index = b * texelsInBatch + (row / 2) * valuesPerRow + (col / 2); | |
texelsInBatch *= ${e}Shape[1]; | |
index = b2 * texelsInBatch + index; | |
ivec2 packedTexShape = ivec2(ceil(float(${e}TexShape[0]) / 2.0), ceil(float(${e}TexShape[1]) / 2.0)); | |
int texR = index / packedTexShape[1]; | |
int texC = index - texR * packedTexShape[1]; | |
vec2 uv = (vec2(texC, texR) + halfCR) / vec2(packedTexShape[1], packedTexShape[0]); return ${n.texture2D}(${e}, uv); | |
} | |
`; | |
let s = r.shapeInfo.logicalShape, | |
a = s.length, | |
i = r.shapeInfo.texShape, | |
p = [Math.ceil(i[0] / 2), Math.ceil(i[1] / 2)], | |
u = p[0], | |
c = p[1], | |
l = Math.ceil(s[a - 1] / 2), | |
m = l * Math.ceil(s[a - 2] / 2), | |
d = "int b, int row, int col", | |
f = `b * ${m} + (row / 2) * ${l} + (col / 2)`; | |
for (let h = 2; h < a - 1; h++) d = `int b${h}, ` + d, m *= s[a - h - 1], f = `b${h} * ${m} + ` + f; | |
return ` | |
vec4 ${o}(${d}) { | |
int index = ${f}; | |
int texR = index / ${c}; | |
int texC = index - texR * ${c}; | |
vec2 uv = (vec2(texC, texR) + halfCR) / vec2(${c}, ${u}); | |
return ${n.texture2D}(${e}, uv); | |
} | |
`; | |
} | |
function MZ(r, t8) { | |
let e = r.shapeInfo.logicalShape, | |
o = r.name, | |
n = "get" + o.charAt(0).toUpperCase() + o.slice(1), | |
s = e[3], | |
a = e[2] * s, | |
i = e[1] * a, | |
{ | |
newShape: p, | |
keptDims: u | |
} = y.squeezeShape(e); | |
if (p.length < e.length) { | |
let b = Bc(r, p), | |
C = ["row", "col", "depth", "depth2"]; | |
return ` | |
${Mc(b, t8)} | |
float ${n}(int row, int col, int depth, int depth2) { | |
return ${n}(${zc(C, u)}); | |
} | |
`; | |
} | |
if (r.shapeInfo.isUniform) return ` | |
float ${n}(int row, int col, int depth, int depth2) { | |
int index = round(dot(vec4(row, col, depth, depth2), | |
vec4(${i}, ${a}, ${s}, 1))); | |
${Lc(r)} | |
} | |
`; | |
let c = r.shapeInfo.flatOffset, | |
l = r.shapeInfo.texShape, | |
m = l[0], | |
d = l[1], | |
f = `int stride2 = ${o}Shape[3];`, | |
h = `int stride1 = ${o}Shape[2] * stride2;`, | |
g = `int stride0 = ${o}Shape[1] * stride1;`; | |
if (d === i && c == null) return t8 ? ` | |
float ${n}(int row, int col, int depth, int depth2) { | |
${f} | |
${h} | |
float texR = float(row); | |
float texC = | |
dot(vec3(col, depth, depth2), | |
vec3(stride1, stride2, 1)); | |
vec2 uv = (vec2(texC, texR) + halfCR) / | |
vec2(${o}TexShape[1], ${o}TexShape[0]); | |
return sampleTexture(${o}, uv); | |
} | |
` : ` | |
float ${n}(int row, int col, int depth, int depth2) { | |
float texR = float(row); | |
float texC = | |
dot(vec3(col, depth, depth2), | |
vec3(${a}, ${s}, 1)); | |
vec2 uv = (vec2(texC, texR) + halfCR) / | |
vec2(${d}.0, ${m}.0); | |
return sampleTexture(${o}, uv); | |
} | |
`; | |
if (d === s && c == null) return t8 ? ` | |
float ${n}(int row, int col, int depth, int depth2) { | |
float texR = dot(vec3(row, col, depth), | |
vec3(${o}Shape[1] * ${o}Shape[2], ${o}Shape[2], 1)); | |
float texC = float(depth2); | |
vec2 uv = (vec2(texC, texR) + halfCR) / | |
vec2(${o}TexShape[1], ${o}TexShape[0]); | |
return sampleTexture(${o}, uv); | |
} | |
` : ` | |
float ${n}(int row, int col, int depth, int depth2) { | |
float texR = dot(vec3(row, col, depth), | |
vec3(${e[1] * e[2]}, ${e[2]}, 1)); | |
float texC = float(depth2); | |
vec2 uv = (vec2(texC, texR) + halfCR) / | |
vec2(${d}.0, ${m}.0); | |
return sampleTexture(${o}, uv); | |
} | |
`; | |
let x = vp(o); | |
return t8 ? ` | |
float ${n}(int row, int col, int depth, int depth2) { | |
// Explicitly use integer operations as dot() only works on floats. | |
${f} | |
${h} | |
${g} | |
int index = row * stride0 + col * stride1 + | |
depth * stride2 + depth2; | |
vec2 uv = uvFromFlat(${o}TexShape[0], ${o}TexShape[1], index + ${x}); | |
return sampleTexture(${o}, uv); | |
} | |
` : ` | |
float ${n}(int row, int col, int depth, int depth2) { | |
// Explicitly use integer operations as dot() only works on floats. | |
int index = row * ${i} + col * ${a} + | |
depth * ${s} + depth2; | |
vec2 uv = uvFromFlat(${m}, ${d}, index + ${x}); | |
return sampleTexture(${o}, uv); | |
} | |
`; | |
} | |
function LZ(r) { | |
let t8 = r.shapeInfo.logicalShape, | |
e = r.name, | |
o = "get" + e.charAt(0).toUpperCase() + e.slice(1), | |
n = t8[4], | |
s = t8[3] * n, | |
a = t8[2] * s, | |
i = t8[1] * a, | |
{ | |
newShape: p, | |
keptDims: u | |
} = y.squeezeShape(t8); | |
if (p.length < t8.length) { | |
let h = Bc(r, p), | |
g = ["row", "col", "depth", "depth2", "depth3"]; | |
return ` | |
${Mc(h)} | |
float ${o}(int row, int col, int depth, int depth2, int depth3) { | |
return ${o}(${zc(g, u)}); | |
} | |
`; | |
} | |
if (r.shapeInfo.isUniform) return ` | |
float ${o}(int row, int col, int depth, int depth2, int depth3) { | |
float index = dot( | |
vec4(row, col, depth, depth2), | |
vec4(${i}, ${a}, ${s}, ${n})) + | |
depth3; | |
${Lc(r)} | |
} | |
`; | |
let c = r.shapeInfo.flatOffset, | |
l = r.shapeInfo.texShape, | |
m = l[0], | |
d = l[1]; | |
if (d === i && c == null) return ` | |
float ${o}(int row, int col, int depth, int depth2, int depth3) { | |
int texR = row; | |
float texC = dot(vec4(col, depth, depth2, depth3), | |
vec4(${a}, ${s}, ${n}, 1)); | |
vec2 uv = (vec2(texC, texR) + halfCR) / | |
vec2(${d}.0, ${m}.0); | |
return sampleTexture(${e}, uv); | |
} | |
`; | |
if (d === n && c == null) return ` | |
float ${o}(int row, int col, int depth, int depth2, int depth3) { | |
float texR = dot( | |
vec4(row, col, depth, depth2), | |
vec4(${t8[1] * t8[2] * t8[3]}, | |
${t8[2] * t8[3]}, ${t8[3]}, 1)); | |
int texC = depth3; | |
vec2 uv = (vec2(texC, texR) + halfCR) / | |
vec2(${d}.0, ${m}.0); | |
return sampleTexture(${e}, uv); | |
} | |
`; | |
let f = vp(e); | |
return ` | |
float ${o}(int row, int col, int depth, int depth2, int depth3) { | |
// Explicitly use integer operations as dot() only works on floats. | |
int index = row * ${i} + col * ${a} + depth * ${s} + | |
depth2 * ${n} + depth3 + ${f}; | |
vec2 uv = uvFromFlat(${m}, ${d}, index); | |
return sampleTexture(${e}, uv); | |
} | |
`; | |
} | |
function BZ(r) { | |
let t8 = r.shapeInfo.logicalShape, | |
e = r.name, | |
o = "get" + e.charAt(0).toUpperCase() + e.slice(1), | |
{ | |
newShape: n, | |
keptDims: s | |
} = y.squeezeShape(t8); | |
if (n.length < t8.length) { | |
let g = Bc(r, n), | |
x = ["row", "col", "depth", "depth2", "depth3", "depth4"]; | |
return ` | |
${Mc(g)} | |
float ${o}(int row, int col, int depth, | |
int depth2, int depth3, int depth4) { | |
return ${o}(${zc(x, s)}); | |
} | |
`; | |
} | |
let a = t8[5], | |
i = t8[4] * a, | |
p = t8[3] * i, | |
u = t8[2] * p, | |
c = t8[1] * u; | |
if (r.shapeInfo.isUniform) return ` | |
float ${o}(int row, int col, int depth, | |
int depth2, int depth3, int depth4) { | |
int index = round(dot( | |
vec4(row, col, depth, depth2), | |
vec4(${c}, ${u}, ${p}, ${i})) + | |
dot( | |
vec2(depth3, depth4), | |
vec2(${a}, 1))); | |
${Lc(r)} | |
} | |
`; | |
let l = r.shapeInfo.flatOffset, | |
m = r.shapeInfo.texShape, | |
d = m[0], | |
f = m[1]; | |
if (f === c && l == null) return ` | |
float ${o}(int row, int col, int depth, | |
int depth2, int depth3, int depth4) { | |
int texR = row; | |
float texC = dot(vec4(col, depth, depth2, depth3), | |
vec4(${u}, ${p}, ${i}, ${a})) + | |
float(depth4); | |
vec2 uv = (vec2(texC, texR) + halfCR) / | |
vec2(${f}.0, ${d}.0); | |
return sampleTexture(${e}, uv); | |
} | |
`; | |
if (f === a && l == null) return ` | |
float ${o}(int row, int col, int depth, | |
int depth2, int depth3, int depth4) { | |
float texR = dot(vec4(row, col, depth, depth2), | |
vec4(${t8[1] * t8[2] * t8[3] * t8[4]}, | |
${t8[2] * t8[3] * t8[4]}, | |
${t8[3] * t8[4]}, | |
${t8[4]})) + float(depth3); | |
int texC = depth4; | |
vec2 uv = (vec2(texC, texR) + halfCR) / | |
vec2(${f}.0, ${d}.0); | |
return sampleTexture(${e}, uv); | |
} | |
`; | |
let h = vp(e); | |
return ` | |
float ${o}(int row, int col, int depth, | |
int depth2, int depth3, int depth4) { | |
// Explicitly use integer operations as dot() only works on floats. | |
int index = row * ${c} + col * ${u} + depth * ${p} + | |
depth2 * ${i} + depth3 * ${a} + depth4 + ${h}; | |
vec2 uv = uvFromFlat(${d}, ${f}, index); | |
return sampleTexture(${e}, uv); | |
} | |
`; | |
} | |
function Lc(r) { | |
let t8 = r.name, | |
e = y.sizeFromShape(r.shapeInfo.logicalShape); | |
return e < 2 ? `return ${t8};` : ` | |
for (int i = 0; i < ${e}; i++) { | |
if (i == index) { | |
return ${t8}[i]; | |
} | |
} | |
`; | |
} | |
function zZ(r, t8) { | |
let e = r.name, | |
o = e.charAt(0).toUpperCase() + e.slice(1), | |
n = "get" + o + "AtOutCoords", | |
s = r.shapeInfo.logicalShape.length, | |
a = t8.logicalShape.length, | |
i = IR(r.shapeInfo.logicalShape, t8.logicalShape), | |
p = Re(a), | |
u = a - s, | |
c, | |
l = ["x", "y", "z", "w", "u", "v"]; | |
s === 0 ? c = "" : a < 2 && i.length >= 1 ? c = "coords = 0;" : c = i.map(b => `coords.${l[b + u]} = 0;`).join(` | |
`); | |
let m = ""; | |
a < 2 && s > 0 ? m = "coords" : m = r.shapeInfo.logicalShape.map((b, C) => `coords.${l[C + u]}`).join(", "); | |
let d = "return outputValue;", | |
h = y.sizeFromShape(r.shapeInfo.logicalShape) === 1, | |
x = y.sizeFromShape(t8.logicalShape) === 1; | |
if (s === 1 && !h && !x) d = ` | |
return vec4(outputValue.xy, outputValue.xy); | |
`;else if (h && !x) a === 1 ? d = ` | |
return vec4(outputValue.x, outputValue.x, 0., 0.); | |
` : d = ` | |
return vec4(outputValue.x); | |
`;else if (i.length) { | |
let b = s - 2, | |
C = s - 1; | |
i.indexOf(b) > -1 && i.indexOf(C) > -1 ? d = "return vec4(outputValue.x);" : i.indexOf(b) > -1 ? d = "return vec4(outputValue.x, outputValue.y, outputValue.x, outputValue.y);" : i.indexOf(C) > -1 && (d = "return vec4(outputValue.xx, outputValue.zz);"); | |
} | |
return ` | |
vec4 ${n}() { | |
${p} coords = getOutputCoords(); | |
${c} | |
vec4 outputValue = get${o}(${m}); | |
${d} | |
} | |
`; | |
} | |
function VZ(r, t8) { | |
let e = r.name, | |
o = e.charAt(0).toUpperCase() + e.slice(1), | |
n = "get" + o + "AtOutCoords", | |
s = t8.texShape, | |
a = r.shapeInfo.texShape, | |
i = r.shapeInfo.logicalShape.length, | |
p = t8.logicalShape.length; | |
if (!r.shapeInfo.isUniform && i === p && r.shapeInfo.flatOffset == null && y.arraysEqual(a, s)) return ` | |
float ${n}() { | |
return sampleTexture(${e}, resultUV); | |
} | |
`; | |
let u = Re(p), | |
c = IR(r.shapeInfo.logicalShape, t8.logicalShape), | |
l = p - i, | |
m, | |
d = ["x", "y", "z", "w", "u", "v"]; | |
i === 0 ? m = "" : p < 2 && c.length >= 1 ? m = "coords = 0;" : m = c.map(h => `coords.${d[h + l]} = 0;`).join(` | |
`); | |
let f = ""; | |
return p < 2 && i > 0 ? f = "coords" : f = r.shapeInfo.logicalShape.map((h, g) => `coords.${d[g + l]}`).join(", "), ` | |
float ${n}() { | |
${u} coords = getOutputCoords(); | |
${m} | |
return get${o}(${f}); | |
} | |
`; | |
} | |
function Re(r) { | |
if (r <= 1) return "int"; | |
if (r === 2) return "ivec2"; | |
if (r === 3) return "ivec3"; | |
if (r === 4) return "ivec4"; | |
if (r === 5) return "ivec5"; | |
if (r === 6) return "ivec6"; | |
throw Error(`GPU for rank ${r} is not yet supported`); | |
} | |
function Zf(r, t8, e) { | |
let { | |
newShape: o, | |
keptDims: n | |
} = y.squeezeShape(t8), | |
s = t8.length, | |
a = r && s === 3 && t8[0] === 1, | |
i = a ? t8.slice(1) : o, | |
p = !r && s > 1 && !y.arraysEqual(t8, e) && o.length < s || a; | |
return { | |
useSqueezeShape: p, | |
uniformShape: p ? i : t8, | |
keptDims: n | |
}; | |
} | |
function Bc(r, t8) { | |
let e = JSON.parse(JSON.stringify(r)); | |
return e.shapeInfo.logicalShape = t8, e; | |
} | |
function zc(r, t8) { | |
return t8.map(e => r[e]).join(", "); | |
} | |
function _R(r, t8, e, o) { | |
let n = e.map((c, l) => { | |
let m = { | |
logicalShape: c.shape, | |
texShape: c.isUniform ? null : c.texData.texShape, | |
isUniform: c.isUniform, | |
isPacked: c.isUniform ? false : c.texData.isPacked, | |
flatOffset: null | |
}; | |
return c.texData != null && c.texData.slice != null && c.texData.slice.flatOffset > 0 && (m.flatOffset = c.texData.slice.flatOffset), { | |
name: t8.variableNames[l], | |
shapeInfo: m | |
}; | |
}), | |
s = n.map(c => c.shapeInfo), | |
a = { | |
logicalShape: o.shape, | |
texShape: o.texData.texShape, | |
isUniform: false, | |
isPacked: o.texData.isPacked, | |
flatOffset: null | |
}, | |
i = vR(n, a, t8), | |
p = $I(r.gl, i), | |
u = r.createProgram(p); | |
return A().get("ENGINE_COMPILE_ONLY") ? { | |
program: t8, | |
fragmentShader: p, | |
source: i, | |
webGLProgram: u, | |
inShapeInfos: s, | |
outShapeInfo: a, | |
variablesLocations: null, | |
customUniformLocations: null, | |
infLoc: null, | |
nanLoc: null, | |
outShapeLocation: null, | |
outShapeStridesLocation: null, | |
outTexShapeLocation: null | |
} : (r.buildVao(u), Object.assign({ | |
program: t8, | |
fragmentShader: p, | |
source: i, | |
webGLProgram: u, | |
inShapeInfos: s, | |
outShapeInfo: a | |
}, qI(r, t8, u))); | |
} | |
function qI(r, t8, e) { | |
let o = [], | |
n = [], | |
s, | |
a, | |
i, | |
p = null, | |
u = null; | |
u = r.getUniformLocation(e, "NAN", false), A().getNumber("WEBGL_VERSION") === 1 && (p = r.getUniformLocation(e, "INFINITY", false)); | |
let c = false; | |
for (let l of t8.variableNames) { | |
let m = { | |
name: l, | |
uniform: r.getUniformLocation(e, l, c), | |
offset: r.getUniformLocation(e, `offset${l}`, c) | |
}; | |
t8.enableShapeUniforms && (m.shape = r.getUniformLocation(e, `${l}Shape`, c), m.texShape = r.getUniformLocation(e, `${l}TexShape`, c)), o.push(m); | |
} | |
if (t8.enableShapeUniforms && (s = r.getUniformLocation(e, "outShape", c), i = r.getUniformLocation(e, "outShapeStrides", c), a = r.getUniformLocation(e, "outTexShape", c)), t8.customUniforms) for (let l of t8.customUniforms) n.push(r.getUniformLocation(e, l.name, c)); | |
return { | |
variablesLocations: o, | |
customUniformLocations: n, | |
infLoc: p, | |
nanLoc: u, | |
outShapeLocation: s, | |
outShapeStridesLocation: i, | |
outTexShapeLocation: a | |
}; | |
} | |
function TR(r, t8) { | |
if (r.length !== t8.length) throw Error(`Binary was compiled with ${r.length} inputs, but was executed with ${t8.length} inputs`); | |
r.forEach((e, o) => { | |
let n = e.logicalShape, | |
s = t8[o], | |
a = s.shape; | |
if (!y.arraysEqual(n, a)) throw Error(`Binary was compiled with different shapes than the current args. Shapes ${n} and ${a} must match`); | |
if (e.isUniform && s.isUniform) return; | |
let i = e.texShape, | |
p = s.isUniform ? null : s.texData.texShape; | |
if (!y.arraysEqual(i, p)) throw Error(`Binary was compiled with different texture shapes than the current args. Shape ${i} and ${p} must match`); | |
}); | |
} | |
function $R(r, t8, e, o, n) { | |
t8.program.enableShapeUniforms || (TR(t8.inShapeInfos, e), TR([t8.outShapeInfo], [o])); | |
let s = o.texData.texture, | |
a = o.texData.texShape; | |
o.texData.isPacked ? r.setOutputPackedMatrixTexture(s.texture, a[0], a[1]) : r.setOutputMatrixTexture(s.texture, a[0], a[1]), r.setProgram(t8.webGLProgram), r.bindVertexArray(t8.webGLProgram.vao), A().getNumber("WEBGL_VERSION") === 1 && t8.infLoc !== null && r.gl.uniform1f(t8.infLoc, 1 / 0), t8.nanLoc !== null && r.gl.uniform1f(t8.nanLoc, NaN); | |
for (let p = 0; p < e.length; ++p) { | |
let u = e[p], | |
{ | |
uniform: c, | |
offset: l, | |
shape: m, | |
texShape: d | |
} = t8.variablesLocations[p]; | |
if (m) { | |
let { | |
uniformShape: f | |
} = Zf(t8.program.packedInputs, u.shape, u.texData.texShape); | |
switch (f.length) { | |
case 1: | |
r.gl.uniform1iv(m, new Int32Array(f)); | |
break; | |
case 2: | |
r.gl.uniform2iv(m, new Int32Array(f)); | |
break; | |
case 3: | |
r.gl.uniform3iv(m, new Int32Array(f)); | |
break; | |
case 4: | |
r.gl.uniform4iv(m, new Int32Array(f)); | |
break; | |
default: | |
break; | |
} | |
} | |
if (d && r.gl.uniform2i(d, u.texData.texShape[0], u.texData.texShape[1]), c != null) { | |
if (u.isUniform) { | |
if (y.sizeFromShape(u.shape) < 2) r.gl.uniform1f(c, u.uniformValues[0]);else { | |
let f = u.uniformValues; | |
f instanceof Float32Array || (f = new Float32Array(f)), r.gl.uniform1fv(c, f); | |
} | |
continue; | |
} | |
u.texData.slice != null && l != null && r.gl.uniform1i(l, u.texData.slice.flatOffset), r.setInputMatrixTexture(u.texData.texture.texture, c, p); | |
} | |
} | |
let i = t8.outShapeLocation; | |
if (i) switch (o.shape.length) { | |
case 1: | |
r.gl.uniform1iv(i, new Int32Array(o.shape)); | |
break; | |
case 2: | |
r.gl.uniform2iv(i, new Int32Array(o.shape)); | |
break; | |
case 3: | |
r.gl.uniform3iv(i, new Int32Array(o.shape)); | |
break; | |
case 4: | |
r.gl.uniform4iv(i, new Int32Array(o.shape)); | |
break; | |
default: | |
break; | |
} | |
if (t8.outShapeStridesLocation) { | |
let p = y.computeStrides(o.shape); | |
switch (o.shape.length) { | |
case 2: | |
r.gl.uniform1iv(t8.outShapeStridesLocation, new Int32Array(p)); | |
break; | |
case 3: | |
r.gl.uniform2iv(t8.outShapeStridesLocation, new Int32Array(p)); | |
break; | |
case 4: | |
r.gl.uniform3iv(t8.outShapeStridesLocation, new Int32Array(p)); | |
break; | |
default: | |
break; | |
} | |
} | |
if (t8.outTexShapeLocation && r.gl.uniform2i(t8.outTexShapeLocation, o.texData.texShape[0], o.texData.texShape[1]), t8.program.customUniforms && n) for (let p = 0; p < t8.program.customUniforms.length; ++p) { | |
let u = t8.program.customUniforms[p], | |
c = t8.customUniformLocations[p], | |
l = n[p]; | |
if (u.type === "float") r.gl.uniform1fv(c, l);else if (u.type === "vec2") r.gl.uniform2fv(c, l);else if (u.type === "vec3") r.gl.uniform3fv(c, l);else if (u.type === "vec4") r.gl.uniform4fv(c, l);else if (u.type === "int") r.gl.uniform1iv(c, l);else if (u.type === "ivec2") r.gl.uniform2iv(c, l);else if (u.type === "ivec3") r.gl.uniform3iv(c, l);else if (u.type === "ivec4") r.gl.uniform4iv(c, l);else throw Error(`uniform type ${u.type} is not supported yet.`); | |
} | |
r.executeProgram(); | |
} | |
function ER(r, t8, e) { | |
let o = ""; | |
t8.concat(e).forEach(a => { | |
let i = a.texData != null && a.texData.slice != null && a.texData.slice.flatOffset > 0; | |
if (r.enableShapeUniforms && !a.isUniform) { | |
let p = a.texData.texShape, | |
{ | |
useSqueezeShape: u, | |
uniformShape: c, | |
keptDims: l | |
} = Zf(r.packedInputs, a.shape, p), | |
m = "", | |
d = "", | |
f = ""; | |
if (c.length === 1 && r.packedInputs) { | |
let k = [Math.ceil(p[0] / 2), Math.ceil(p[1] / 2)]; | |
m = `${k[0] > 1}_${k[1] > 1}`; | |
} else if (c.length === 2 && !r.packedInputs) d = `${c[0] > 1}_${c[1] > 1}`;else if (c.length > 2 && !r.packedInputs) { | |
let k = y.computeStrides(c); | |
f = `${k[0] === p[1]}_${k[k.length - 1] === p[1]}`; | |
} | |
let h = a.shape.length, | |
g = c.length === 2 && y.arraysEqual(a.shape, p), | |
x = y.sizeFromShape(a.shape) === 1, | |
b = w.getBroadcastDims(a.shape, e.shape), | |
C = !r.packedInputs && h === e.shape.length && y.arraysEqual(p, e.texData.texShape), | |
S = r.packedInputs || c.length > 2 ? "" : `${p[0] > 1}_${p[1] > 1}`; | |
o += `${h}_${C}_${u ? l : ""}_${c.length}_${x}_${b}_${g}_${m}_${d}_${f}_${S}_${i}`; | |
} else { | |
let p = a.isUniform ? "uniform" : a.texData.texShape; | |
o += `${a.shape}_${p}_${i}`; | |
} | |
}); | |
let n = r.userCode, | |
s = r.constructor.name; | |
return s += "_" + o + "_" + n + `${A().getNumber("WEBGL_VERSION")}`, s; | |
} | |
function pt(r) { | |
return A().getBool("WEBGL_USE_SHAPES_UNIFORMS") && r <= 4; | |
} | |
var Jf = class { | |
constructor(t8) { | |
this.variableNames = ["A"], this.packedInputs = false, this.packedOutput = true, this.outPackingScheme = bu.DENSE, this.customUniforms = [{ | |
name: "texShape", | |
type: "ivec2" | |
}]; | |
let e = vt(); | |
this.outputShape = t8, this.enableShapeUniforms = pt(this.outputShape.length), this.userCode = ` | |
ivec3 outCoordsFromFlatIndex(int index) { | |
${this.enableShapeUniforms ? Ip(["r", "c", "d"], t8) : Ws(["r", "c", "d"], t8)} | |
return ivec3(r, c, d); | |
} | |
void main() { | |
ivec2 resTexRC = ivec2(resultUV.yx * vec2(texShape[0], texShape[1])); | |
int index = 4 * (resTexRC.x * texShape[1] + resTexRC.y); | |
vec4 result = vec4(0.); | |
for (int i=0; i<4; i++) { | |
int flatIndex = index + i; | |
ivec3 rc = outCoordsFromFlatIndex(flatIndex); | |
result[i] = getA(rc.x, rc.y, rc.z); | |
} | |
${e.output} = result; | |
} | |
`; | |
} | |
}; | |
var eh = class { | |
constructor(t8) { | |
this.variableNames = ["A"], this.packedInputs = true, this.packedOutput = true, this.outPackingScheme = bu.DENSE, this.customUniforms = [{ | |
name: "texShape", | |
type: "ivec2" | |
}]; | |
let e = vt(); | |
this.outputShape = t8, this.enableShapeUniforms = pt(this.outputShape.length), this.userCode = ` | |
ivec3 outCoordsFromFlatIndex(int index) { | |
${this.enableShapeUniforms ? Ip(["r", "c", "d"], t8) : Ws(["r", "c", "d"], t8)} | |
return ivec3(r, c, d); | |
} | |
void main() { | |
ivec2 resTexRC = ivec2(resultUV.yx * vec2(texShape[0], texShape[1])); | |
int index = 4 * (resTexRC.x * texShape[1] + resTexRC.y); | |
vec4 result = vec4(0.); | |
for (int i=0; i<4; i++) { | |
int flatIndex = index + i; | |
ivec3 rc = outCoordsFromFlatIndex(flatIndex); | |
result[i] = getChannel(getA(rc.x, rc.y, rc.z), vec2(rc.y, rc.z)); | |
} | |
${e.output} = result; | |
} | |
`; | |
} | |
}; | |
var th = class { | |
constructor(t8) { | |
this.variableNames = ["A"], this.outTexUsage = dr.DOWNLOAD; | |
let e = vt(); | |
this.outputShape = t8, this.userCode = ` | |
${Qf} | |
void main() { | |
float x = getAAtOutCoords(); | |
${e.output} = encode_float(x); | |
} | |
`; | |
} | |
}; | |
var rh = class { | |
constructor(t8) { | |
this.variableNames = ["A"], this.packedInputs = true, this.packedOutput = false, this.outTexUsage = dr.DOWNLOAD; | |
let e = vt(); | |
this.outputShape = t8, this.userCode = ` | |
${Qf} | |
void main() { | |
ivec3 coords = getOutputCoords(); | |
float x = getChannel(getAAtOutCoords(), vec2(coords.y, coords.z)); | |
${e.output} = encode_float(x); | |
} | |
`; | |
} | |
}; | |
var GZ = { | |
R: 0, | |
G: 1, | |
B: 2, | |
A: 3 | |
}; | |
var Zl = class { | |
constructor(t8, e = false, o = "RGBA") { | |
this.variableNames = ["A"], this.customUniforms = [{ | |
name: "texShape", | |
type: "ivec2" | |
}]; | |
let n = vt(); | |
this.outputShape = t8, this.enableShapeUniforms = pt(this.outputShape.length); | |
let s = "result"; | |
e && (s = "floor(result * 255. + 0.5)"); | |
let a = ""; | |
for (let i = 0; i < o.length; i++) { | |
let p = o[i]; | |
a += ` | |
if(offset == ${i}) { | |
result = values[${GZ[p]}]; | |
}`; | |
} | |
this.userCode = ` | |
${this.enableShapeUniforms ? Pc() : Fc(t8)} | |
void main() { | |
ivec3 coords = getOutputCoords(); | |
int flatIndex = getFlatIndex(coords); | |
float result = 0.; | |
int offset = imod(flatIndex, ${o.length}); | |
flatIndex = idiv(flatIndex, ${o.length}, 1.); | |
int r = flatIndex / texShape[1]; | |
if (r < texShape[0]) { | |
int c = imod(flatIndex, texShape[1]); | |
vec2 uv = (vec2(c, r) + halfCR) / vec2(texShape[1], texShape[0]); | |
vec4 values = ${n.texture2D}(A, uv); | |
${a} | |
} | |
${n.output} = vec4(${s}, 0., 0., 0.); | |
} | |
`; | |
} | |
}; | |
var oh = class { | |
constructor(t8, e = false) { | |
this.variableNames = ["A"], this.packedInputs = false, this.packedOutput = true, this.customUniforms = [{ | |
name: "texShape", | |
type: "ivec2" | |
}]; | |
let o = vt(); | |
this.outputShape = t8, this.enableShapeUniforms = pt(this.outputShape.length); | |
let n = "", | |
s = "result"; | |
e && (s = "floor(result * 255. + 0.5)"); | |
for (let a = 0; a <= 1; a++) for (let i = 0; i <= 1; i++) { | |
let p = a * 2 + i; | |
n += ` | |
localCoords = coords; | |
if(localCoords[2] + ${i} < ${this.enableShapeUniforms ? "outShape[2]" : `${t8[2]}`}) { | |
localCoords[2] += ${i}; | |
if (localCoords[1] + ${a} < ${this.enableShapeUniforms ? "outShape[1]" : `${t8[1]}`}) { | |
localCoords[1] += ${a}; | |
flatIndex = getFlatIndex(localCoords); | |
offset = imod(flatIndex, 4); | |
flatIndex = idiv(flatIndex, 4, 1.); | |
int r = flatIndex / texShape[1]; | |
int c = imod(flatIndex, texShape[1]); | |
vec2 uv = (vec2(c, r) + halfCR) / vec2(texShape[1], texShape[0]); | |
values = ${o.texture2D}(A, uv); | |
if (offset == 0) { | |
result[${p}] = values[0]; | |
} else if (offset == 1) { | |
result[${p}] = values[1]; | |
} else if (offset == 2) { | |
result[${p}] = values[2]; | |
} else { | |
result[${p}] = values[3]; | |
} | |
} | |
} | |
`; | |
} | |
this.userCode = ` | |
${this.enableShapeUniforms ? Pc() : Fc(t8)} | |
void main() { | |
ivec3 coords = getOutputCoords(); | |
vec4 result = vec4(0.); | |
int flatIndex, r, c, offset; | |
ivec3 localCoords; | |
vec2 uv; | |
vec4 values; | |
${n} | |
${o.output} = ${s}; | |
} | |
`; | |
} | |
}; | |
var cv = {}; | |
qe(cv, { | |
bindVertexProgramAttributeStreams: () => rv, | |
createBufferFromOutputTexture: () => sv, | |
createFloat16MatrixTexture: () => ZI, | |
createFloat16PackedMatrixTexture: () => tv, | |
createFloat32MatrixTexture: () => QI, | |
createIndexBuffer: () => YI, | |
createPackedMatrixTexture: () => ev, | |
createUnsignedBytesMatrixTexture: () => JI, | |
createVertexBuffer: () => XI, | |
createVertexShader: () => jI, | |
downloadByteEncodedFloatMatrixFromOutputTexture: () => iv, | |
downloadFloat32MatrixFromBuffer: () => av, | |
downloadMatrixFromPackedOutputTexture: () => pv, | |
downloadPackedMatrixFromBuffer: () => uv, | |
getInternalFormatForFloat16MatrixTexture: () => sh, | |
getInternalFormatForFloat16PackedMatrixTexture: () => uh, | |
getInternalFormatForFloat32MatrixTexture: () => nh, | |
getInternalFormatForPackedMatrixTexture: () => ih, | |
getInternalFormatForUnsignedBytesMatrixTexture: () => ah, | |
uploadDenseMatrixToTexture: () => ov, | |
uploadPixelDataToTexture: () => nv | |
}); | |
function jI(r) { | |
let t8 = vt(), | |
e = `${t8.version} | |
precision highp float; | |
${t8.attribute} vec3 clipSpacePos; | |
${t8.attribute} vec2 uv; | |
${t8.varyingVs} vec2 resultUV; | |
void main() { | |
gl_Position = vec4(clipSpacePos, 1); | |
resultUV = uv; | |
}`; | |
return _I(r, e); | |
} | |
function XI(r) { | |
let t8 = new Float32Array([-1, 1, 0, 0, 1, -1, -1, 0, 0, 0, 1, 1, 0, 1, 1, 1, -1, 0, 1, 0]); | |
return DI(r, t8); | |
} | |
function YI(r) { | |
let t8 = new Uint16Array([0, 1, 2, 2, 1, 3]); | |
return AI(r, t8); | |
} | |
function Jl(r, t8, e, o, n, s) { | |
PI(t8, e); | |
let a = FI(r), | |
i = r.TEXTURE_2D; | |
return ce(r, () => r.bindTexture(i, a)), ce(r, () => r.texParameteri(i, r.TEXTURE_WRAP_S, r.CLAMP_TO_EDGE)), ce(r, () => r.texParameteri(i, r.TEXTURE_WRAP_T, r.CLAMP_TO_EDGE)), ce(r, () => r.texParameteri(i, r.TEXTURE_MIN_FILTER, r.NEAREST)), ce(r, () => r.texParameteri(i, r.TEXTURE_MAG_FILTER, r.NEAREST)), A().getNumber("WEBGL_VERSION") === 1 ? ce(r, () => r.texImage2D(i, 0, o, t8, e, 0, n, s, null)) : ce(r, () => r.texStorage2D(i, 1, o, t8, e)), ce(r, () => r.bindTexture(r.TEXTURE_2D, null)), { | |
texture: a, | |
texShape: [e, t8] | |
}; | |
} | |
function nh(r) { | |
return r.internalFormatFloat; | |
} | |
function QI(r, t8, e, o) { | |
let [n, s] = Sp(t8, e); | |
return Jl(r, n, s, nh(o), o.textureFormatFloat, r.FLOAT); | |
} | |
function sh(r) { | |
return r.internalFormatHalfFloat; | |
} | |
function ZI(r, t8, e, o) { | |
let [n, s] = Sp(t8, e); | |
return Jl(r, n, s, sh(o), o.textureFormatFloat, o.textureTypeHalfFloat); | |
} | |
function ah(r) { | |
return r.downloadTextureFormat; | |
} | |
function JI(r, t8, e, o) { | |
let [n, s] = Sp(t8, e); | |
return Jl(r, n, s, ah(o), r.RGBA, r.UNSIGNED_BYTE); | |
} | |
function ih(r) { | |
return r.internalFormatPackedFloat; | |
} | |
function ev(r, t8, e, o) { | |
let [n, s] = La(t8, e); | |
return Jl(r, n, s, ih(o), r.RGBA, r.FLOAT); | |
} | |
function uh(r) { | |
return r.internalFormatPackedHalfFloat; | |
} | |
function tv(r, t8, e, o) { | |
let [n, s] = La(t8, e); | |
return Jl(r, n, s, uh(o), r.RGBA, o.textureTypeHalfFloat); | |
} | |
function rv(r, t8, e) { | |
return ce(r, () => r.bindBuffer(r.ARRAY_BUFFER, e)), jf(r, t8, "clipSpacePos", e, 3, 20, 0) && jf(r, t8, "uv", e, 2, 20, 12); | |
} | |
function ov(r, t8, e, o, n, s) { | |
ce(r, () => r.bindTexture(r.TEXTURE_2D, t8)); | |
let a, i, p; | |
n instanceof Uint8Array ? (a = new Uint8Array(e * o * 4), i = r.UNSIGNED_BYTE, p = r.RGBA) : (a = new Float32Array(e * o * 4), i = r.FLOAT, p = s.internalFormatPackedFloat), a.set(n), A().getNumber("WEBGL_VERSION") === 2 ? ce(r, () => r.texSubImage2D(r.TEXTURE_2D, 0, 0, 0, e, o, r.RGBA, i, a)) : ce(r, () => r.texImage2D(r.TEXTURE_2D, 0, p, e, o, 0, r.RGBA, i, a)), ce(r, () => r.bindTexture(r.TEXTURE_2D, null)); | |
} | |
function nv(r, t8, e) { | |
ce(r, () => r.bindTexture(r.TEXTURE_2D, t8)), e.data instanceof Uint8Array ? A().getNumber("WEBGL_VERSION") === 2 ? ce(r, () => r.texSubImage2D(r.TEXTURE_2D, 0, 0, 0, e.width, e.height, r.RGBA, r.UNSIGNED_BYTE, e.data)) : ce(r, () => r.texImage2D(r.TEXTURE_2D, 0, r.RGBA, e.width, e.height, 0, r.RGBA, r.UNSIGNED_BYTE, e.data)) : A().getNumber("WEBGL_VERSION") === 2 ? ce(r, () => r.texSubImage2D(r.TEXTURE_2D, 0, 0, 0, r.RGBA, r.UNSIGNED_BYTE, e)) : ce(r, () => r.texImage2D(r.TEXTURE_2D, 0, r.RGBA, r.RGBA, r.UNSIGNED_BYTE, e)), ce(r, () => r.bindTexture(r.TEXTURE_2D, null)); | |
} | |
function sv(r, t8, e, o) { | |
let n = r.createBuffer(); | |
ce(r, () => r.bindBuffer(r.PIXEL_PACK_BUFFER, n)); | |
let i = 4 * 4 * t8 * e; | |
return ce(r, () => r.bufferData(r.PIXEL_PACK_BUFFER, i, r.STREAM_READ)), ce(r, () => r.readPixels(0, 0, e, t8, r.RGBA, r.FLOAT, 0)), ce(r, () => r.bindBuffer(r.PIXEL_PACK_BUFFER, null)), n; | |
} | |
function av(r, t8, e) { | |
let o = r, | |
n = new Float32Array(e); | |
return o.bindBuffer(o.PIXEL_PACK_BUFFER, t8), o.getBufferSubData(o.PIXEL_PACK_BUFFER, 0, n), o.bindBuffer(o.PIXEL_PACK_BUFFER, null), n; | |
} | |
function iv(r, t8, e, o) { | |
let [n, s] = Sp(t8, e), | |
a = 4, | |
i = new Uint8Array(gR(t8 * e, a)); | |
return ce(r, () => r.readPixels(0, 0, n, s, o.downloadTextureFormat, r.UNSIGNED_BYTE, i)), new Float32Array(i.buffer); | |
} | |
function uv(r, t8, e, o, n, s, a, i) { | |
let p = r, | |
u = new Float32Array(xR(s, a)); | |
return p.bindBuffer(p.PIXEL_PACK_BUFFER, t8), p.getBufferSubData(p.PIXEL_PACK_BUFFER, 0, u), p.bindBuffer(p.PIXEL_PACK_BUFFER, null), u; | |
} | |
function pv(r, t8, e) { | |
let o = new Float32Array(t8 * e * 4); | |
return ce(r, () => r.readPixels(0, 0, e, t8, r.RGBA, r.FLOAT, o)), o; | |
} | |
var kp = class { | |
constructor(t8) { | |
this.outputTexture = null, this.program = null, this.disposed = false, this.itemsToPoll = []; | |
let e = A().getNumber("WEBGL_VERSION"); | |
if (t8 != null ? (this.gl = t8, vI(e, t8)) : this.gl = Kr(e), t8 = this.gl, A().getNumber("WEBGL_VERSION") === 2) { | |
let s = t8; | |
this.createVertexArray = () => ce(s, () => s.createVertexArray()), this.bindVertexArray = a => ce(s, () => s.bindVertexArray(a)), this.deleteVertexArray = a => ce(s, () => s.deleteVertexArray(a)), this.getVertexArray = () => ce(s, () => s.getParameter(s.VERTEX_ARRAY_BINDING)); | |
} else if (t8 != null) { | |
let s = t8.getExtension("OES_vertex_array_object"); | |
if (s == null) throw new Error("All WebGL1 implementations are expected to offer OES_vertex_array_object."); | |
this.createVertexArray = () => ce(t8, () => s.createVertexArrayOES()), this.bindVertexArray = a => ce(t8, () => s.bindVertexArrayOES(a)), this.deleteVertexArray = a => ce(t8, () => s.deleteVertexArrayOES(a)), this.getVertexArray = () => ce(t8, () => t8.getParameter(s.VERTEX_ARRAY_BINDING_OES)); | |
} | |
let o = "WEBGL_color_buffer_float", | |
n = "EXT_color_buffer_half_float"; | |
if (this.parallelCompilationExtension = this.gl.getExtension("KHR_parallel_shader_compile"), A().getNumber("WEBGL_VERSION") === 1) { | |
let s = "OES_texture_float", | |
a = "OES_texture_half_float"; | |
if (this.textureFloatExtension = Ec(this.gl, s), qr(this.gl, a)) this.textureHalfFloatExtension = Ec(this.gl, a);else if (A().get("WEBGL_FORCE_F16_TEXTURES")) throw new Error("GL context does not support half float textures, yet the environment flag WEBGL_FORCE_F16_TEXTURES is set to true."); | |
if (this.colorBufferFloatExtension = this.gl.getExtension(o), qr(this.gl, n)) this.colorBufferHalfFloatExtension = Ec(this.gl, n);else if (A().get("WEBGL_FORCE_F16_TEXTURES")) throw new Error("GL context does not support color renderable half floats, yet the environment flag WEBGL_FORCE_F16_TEXTURES is set to true."); | |
} else if (o = "EXT_color_buffer_float", qr(this.gl, o)) this.colorBufferFloatExtension = this.gl.getExtension(o);else if (qr(this.gl, n)) this.colorBufferHalfFloatExtension = this.gl.getExtension(n);else throw new Error("GL context does not support color renderable floats"); | |
this.vertexBuffer = XI(this.gl), this.indexBuffer = YI(this.gl), this.framebuffer = OI(this.gl), this.textureConfig = Xl(this.gl, this.textureHalfFloatExtension); | |
} | |
get debug() { | |
return A().getBool("DEBUG"); | |
} | |
dispose() { | |
if (this.disposed) return; | |
this.program != null && console.warn("Disposing a GPGPUContext that still has a bound WebGLProgram. This is probably a resource leak, delete the program with GPGPUContext.deleteProgram before disposing."), this.outputTexture != null && console.warn("Disposing a GPGPUContext that still has a bound output matrix texture. This is probably a resource leak, delete the output matrix texture with GPGPUContext.deleteMatrixTexture before disposing."); | |
let t8 = this.gl; | |
ce(t8, () => t8.finish()), ce(t8, () => t8.bindFramebuffer(t8.FRAMEBUFFER, null)), ce(t8, () => t8.deleteFramebuffer(this.framebuffer)), ce(t8, () => t8.bindBuffer(t8.ARRAY_BUFFER, null)), ce(t8, () => t8.bindBuffer(t8.ELEMENT_ARRAY_BUFFER, null)), ce(t8, () => t8.deleteBuffer(this.indexBuffer)), this.disposed = true; | |
} | |
createFloat32MatrixTexture(t8, e) { | |
return this.throwIfDisposed(), QI(this.gl, t8, e, this.textureConfig); | |
} | |
createFloat16MatrixTexture(t8, e) { | |
return this.throwIfDisposed(), ZI(this.gl, t8, e, this.textureConfig); | |
} | |
createUnsignedBytesMatrixTexture(t8, e) { | |
return this.throwIfDisposed(), JI(this.gl, t8, e, this.textureConfig); | |
} | |
uploadPixelDataToTexture(t8, e) { | |
this.throwIfDisposed(), nv(this.gl, t8, e); | |
} | |
uploadDenseMatrixToTexture(t8, e, o, n) { | |
this.throwIfDisposed(), ov(this.gl, t8, e, o, n, this.textureConfig); | |
} | |
createFloat16PackedMatrixTexture(t8, e) { | |
return this.throwIfDisposed(), tv(this.gl, t8, e, this.textureConfig); | |
} | |
createPackedMatrixTexture(t8, e) { | |
return this.throwIfDisposed(), ev(this.gl, t8, e, this.textureConfig); | |
} | |
deleteMatrixTexture(t8) { | |
this.throwIfDisposed(), this.outputTexture === t8 && (Xf(this.gl, this.framebuffer), this.outputTexture = null), ce(this.gl, () => this.gl.deleteTexture(t8)); | |
} | |
downloadByteEncodedFloatMatrixFromOutputTexture(t8, e, o) { | |
return this.downloadMatrixDriver(t8, () => iv(this.gl, e, o, this.textureConfig)); | |
} | |
downloadPackedMatrixFromBuffer(t8, e, o, n, s, a) { | |
return uv(this.gl, t8, e, o, n, s, a, this.textureConfig); | |
} | |
downloadFloat32MatrixFromBuffer(t8, e) { | |
return av(this.gl, t8, e); | |
} | |
createBufferFromTexture(t8, e, o) { | |
this.bindTextureToFrameBuffer(t8); | |
let n = sv(this.gl, e, o, this.textureConfig); | |
return this.unbindTextureToFrameBuffer(), n; | |
} | |
createAndWaitForFence() { | |
let t8 = this.createFence(this.gl); | |
return this.pollFence(t8); | |
} | |
createFence(t8) { | |
let e, o; | |
if (A().getBool("WEBGL_FENCE_API_ENABLED")) { | |
let n = t8, | |
s = n.fenceSync(n.SYNC_GPU_COMMANDS_COMPLETE, 0); | |
t8.flush(), o = () => { | |
let a = n.clientWaitSync(s, 0, 0); | |
return a === n.ALREADY_SIGNALED || a === n.CONDITION_SATISFIED; | |
}, e = s; | |
} else A().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION") > 0 ? (e = this.beginQuery(), this.endQuery(), o = () => this.isQueryAvailable(e, A().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION"))) : o = () => true; | |
return { | |
query: e, | |
isFencePassed: o | |
}; | |
} | |
downloadMatrixFromPackedTexture(t8, e, o) { | |
return this.downloadMatrixDriver(t8, () => pv(this.gl, e, o)); | |
} | |
createProgram(t8) { | |
this.throwIfDisposed(); | |
let e = this.gl; | |
this.vertexShader == null && (this.vertexShader = jI(e)); | |
let o = EI(e); | |
ce(e, () => e.attachShader(o, this.vertexShader)), ce(e, () => e.attachShader(o, t8)), RI(e, o); | |
let n = Object.assign(o, { | |
vao: this.createVertexArray() | |
}); | |
return this.debug && Yl(e, n), n; | |
} | |
buildVao(t8) { | |
this.setProgram(t8), this.bindVertexArray(t8.vao); | |
let e = this.gl; | |
ce(e, () => e.bindBuffer(e.ELEMENT_ARRAY_BUFFER, this.indexBuffer)), rv(e, t8, this.vertexBuffer); | |
} | |
deleteProgram(t8) { | |
this.throwIfDisposed(), t8 === this.program && (this.program = null), t8 != null && (ce(this.gl, () => this.gl.deleteProgram(t8)), this.deleteVertexArray(t8.vao)); | |
} | |
setProgram(t8) { | |
this.throwIfDisposed(), this.program = t8, this.program != null && this.debug && Yl(this.gl, this.program), ce(this.gl, () => this.gl.useProgram(t8)); | |
} | |
getUniformLocation(t8, e, o = true) { | |
return this.throwIfDisposed(), o ? MI(this.gl, t8, e) : LI(this.gl, t8, e); | |
} | |
getAttributeLocation(t8, e) { | |
return this.throwIfDisposed(), ce(this.gl, () => this.gl.getAttribLocation(t8, e)); | |
} | |
getUniformLocationNoThrow(t8, e) { | |
return this.throwIfDisposed(), this.gl.getUniformLocation(t8, e); | |
} | |
setInputMatrixTexture(t8, e, o) { | |
this.throwIfDisposed(), this.throwIfNoProgram(), BI(this.gl, t8, e, o); | |
} | |
setOutputMatrixTexture(t8, e, o) { | |
this.setOutputMatrixTextureDriver(t8, o, e); | |
} | |
setOutputPackedMatrixTexture(t8, e, o) { | |
this.throwIfDisposed(); | |
let [n, s] = La(e, o); | |
this.setOutputMatrixTextureDriver(t8, n, s); | |
} | |
setOutputMatrixWriteRegion(t8, e, o, n) { | |
this.setOutputMatrixWriteRegionDriver(o, t8, n, e); | |
} | |
setOutputPackedMatrixWriteRegion(t8, e, o, n) { | |
throw new Error("setOutputPackedMatrixWriteRegion not implemented."); | |
} | |
debugValidate() { | |
this.program != null && Yl(this.gl, this.program), Rc(this.gl); | |
} | |
executeProgram() { | |
this.throwIfDisposed(), this.throwIfNoProgram(); | |
let t8 = this.gl; | |
if (this.debug) { | |
let e = this.getVertexArray(); | |
console.assert(e === this.program.vao, "VAO changed between setProgram and executeProgram!"), this.debugValidate(); | |
} | |
ce(t8, () => t8.drawElements(t8.TRIANGLES, 6, t8.UNSIGNED_SHORT, 0)); | |
} | |
blockUntilAllProgramsCompleted() { | |
this.throwIfDisposed(), ce(this.gl, () => this.gl.finish()); | |
} | |
getQueryTimerExtension() { | |
return this.disjointQueryTimerExtension == null && (this.disjointQueryTimerExtension = Ec(this.gl, A().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION") === 2 ? "EXT_disjoint_timer_query_webgl2" : "EXT_disjoint_timer_query")), this.disjointQueryTimerExtension; | |
} | |
getQueryTimerExtensionWebGL2() { | |
return this.getQueryTimerExtension(); | |
} | |
getQueryTimerExtensionWebGL1() { | |
return this.getQueryTimerExtension(); | |
} | |
beginQuery() { | |
if (A().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION") === 2) { | |
let o = this.gl, | |
n = this.getQueryTimerExtensionWebGL2(), | |
s = o.createQuery(); | |
return o.beginQuery(n.TIME_ELAPSED_EXT, s), s; | |
} | |
let t8 = this.getQueryTimerExtensionWebGL1(), | |
e = t8.createQueryEXT(); | |
return t8.beginQueryEXT(t8.TIME_ELAPSED_EXT, e), e; | |
} | |
endQuery() { | |
if (A().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION") === 2) { | |
let e = this.gl, | |
o = this.getQueryTimerExtensionWebGL2(); | |
e.endQuery(o.TIME_ELAPSED_EXT); | |
return; | |
} | |
let t8 = this.getQueryTimerExtensionWebGL1(); | |
t8.endQueryEXT(t8.TIME_ELAPSED_EXT); | |
} | |
async waitForQueryAndGetTime(t8) { | |
return await y.repeatedTry(() => this.disposed || this.isQueryAvailable(t8, A().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION"))), this.getQueryTime(t8, A().getNumber("WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION")); | |
} | |
getQueryTime(t8, e) { | |
if (e === 0) return null; | |
if (e === 2) { | |
let o = this.gl; | |
return o.getQueryParameter(t8, o.QUERY_RESULT) / 1e6; | |
} else { | |
let o = this.getQueryTimerExtensionWebGL1(); | |
return o.getQueryObjectEXT(t8, o.QUERY_RESULT_EXT) / 1e6; | |
} | |
} | |
isQueryAvailable(t8, e) { | |
if (e === 0) return true; | |
if (e === 2) { | |
let o = this.gl, | |
n = this.getQueryTimerExtensionWebGL2(), | |
s = o.getQueryParameter(t8, o.QUERY_RESULT_AVAILABLE); | |
return this.disjoint == null && (this.disjoint = this.gl.getParameter(n.GPU_DISJOINT_EXT)), s && !this.disjoint; | |
} else { | |
let o = this.getQueryTimerExtensionWebGL1(), | |
n = o.getQueryObjectEXT(t8, o.QUERY_RESULT_AVAILABLE_EXT); | |
return this.disjoint == null && (this.disjoint = this.gl.getParameter(o.GPU_DISJOINT_EXT)), n && !this.disjoint; | |
} | |
} | |
pollFence(t8) { | |
return new Promise(e => { | |
this.addItemToPoll(() => t8.isFencePassed(), () => e()); | |
}); | |
} | |
pollItems() { | |
let t8 = HZ(this.itemsToPoll.map(e => e.isDoneFn)); | |
for (let e = 0; e <= t8; ++e) { | |
let { | |
resolveFn: o | |
} = this.itemsToPoll[e]; | |
o(); | |
} | |
this.itemsToPoll = this.itemsToPoll.slice(t8 + 1); | |
} | |
addItemToPoll(t8, e) { | |
if (this.itemsToPoll.push({ | |
isDoneFn: t8, | |
resolveFn: e | |
}), this.itemsToPoll.length > 1) return; | |
let o; | |
"setTimeoutCustom" in A().platform && (o = A().platform.setTimeoutCustom.bind(A().platform)), y.repeatedTry(() => (this.pollItems(), this.itemsToPoll.length === 0), () => 0, null, o); | |
} | |
bindTextureToFrameBuffer(t8) { | |
this.throwIfDisposed(), Ql(this.gl, t8, this.framebuffer), this.debug && Rc(this.gl); | |
} | |
unbindTextureToFrameBuffer() { | |
this.outputTexture != null ? (Ql(this.gl, this.outputTexture, this.framebuffer), this.debug && Rc(this.gl)) : Xf(this.gl, this.framebuffer); | |
} | |
downloadMatrixDriver(t8, e) { | |
this.bindTextureToFrameBuffer(t8); | |
let o = e(); | |
return this.unbindTextureToFrameBuffer(), o; | |
} | |
setOutputMatrixTextureDriver(t8, e, o) { | |
this.throwIfDisposed(); | |
let n = this.gl; | |
Ql(n, t8, this.framebuffer), this.debug && Rc(n), this.outputTexture = t8, ce(n, () => n.viewport(0, 0, e, o)), ce(n, () => n.scissor(0, 0, e, o)); | |
} | |
setOutputMatrixWriteRegionDriver(t8, e, o, n) { | |
this.throwIfDisposed(), ce(this.gl, () => this.gl.scissor(t8, e, o, n)); | |
} | |
throwIfDisposed() { | |
if (this.disposed) throw new Error("Attempted to use disposed GPGPUContext."); | |
} | |
throwIfNoProgram() { | |
if (this.program == null) throw new Error("No GPU program is currently set."); | |
} | |
}; | |
function HZ(r) { | |
let t8 = 0; | |
for (; t8 < r.length && r[t8](); ++t8); | |
return t8 - 1; | |
} | |
var { | |
addImpl: RR, | |
bincountImpl: ph, | |
bincountReduceImpl: DR, | |
bitwiseAndImpl: AR, | |
castImpl: FR, | |
ceilImpl: PR, | |
concatImpl: OR, | |
equalImpl: MR, | |
expImpl: LR, | |
expm1Impl: BR, | |
floorImpl: zR, | |
gatherNdImpl: VR, | |
gatherV2Impl: WR, | |
greaterImpl: UR, | |
greaterEqualImpl: GR, | |
lessImpl: HR, | |
lessEqualImpl: KR, | |
linSpaceImpl: qR, | |
logImpl: jR, | |
maxImpl: XR, | |
maximumImpl: YR, | |
minimumImpl: QR, | |
multiplyImpl: ZR, | |
negImpl: JR, | |
notEqualImpl: eD, | |
prodImpl: tD, | |
raggedGatherImpl: rD, | |
raggedRangeImpl: oD, | |
raggedTensorToTensorImpl: nD, | |
rangeImpl: sD, | |
rsqrtImpl: aD, | |
scatterImpl: iD, | |
sigmoidImpl: uD, | |
simpleAbsImpl: ch, | |
sliceImpl: pD, | |
sparseFillEmptyRowsImpl: cD, | |
sparseReshapeImpl: lD, | |
sparseSegmentReductionImpl: lh, | |
sqrtImpl: mD, | |
staticRegexReplaceImpl: dD, | |
stridedSliceImpl: fD, | |
stringNGramsImpl: hD, | |
stringSplitImpl: gD, | |
stringToHashBucketFastImpl: xD, | |
subImpl: yD, | |
tileImpl: bD, | |
topKImpl: CD, | |
transposeImpl: Np, | |
uniqueImpl: wD | |
} = Tc; | |
function lv(r, t8) { | |
return ["x", "y", "z", "w", "u", "v"].slice(0, t8).map(e => `${r}.${e}`); | |
} | |
function Dt(r, t8) { | |
return t8 === 1 ? [r] : lv(r, t8); | |
} | |
function SD(r, t8) { | |
if (r === 1) return "rc"; | |
let e = ""; | |
for (let o = 0; o < r; o++) e += t8[o], o < r - 1 && (e += ","); | |
return e; | |
} | |
var mh = class { | |
constructor(t8) { | |
if (this.variableNames = ["A"], this.packedInputs = false, this.packedOutput = true, this.outputShape = t8, this.rank = t8.length, this.enableShapeUniforms = pt(this.outputShape.length), this.rank === 0) this.userCode = ` | |
void main() { | |
setOutput(vec4(getA(), 0., 0., 0.)); | |
} | |
`;else { | |
let e = Dt("rc", this.rank), | |
o = Re(this.rank), | |
n = this.getOutOfBoundsCondition(e), | |
s = this.getSetup(e), | |
a = this.getOutput(e); | |
this.userCode = ` | |
void main() { | |
${o} rc = getOutputCoords(); | |
if(${n}) { | |
setOutput(vec4(0)); | |
} else { | |
${s} | |
setOutput(vec4(${a})); | |
} | |
} | |
`; | |
} | |
} | |
getSourceCoordsArr(t8) { | |
let e = []; | |
for (let o = 0; o <= 1; o++) for (let n = 0; n <= 1; n++) { | |
let s = `${o === 0 ? "r" : "rp1"}, ${n === 0 ? "c" : "cp1"}`; | |
for (let a = 2; a < this.rank; a++) s = `${t8[t8.length - 1 - a]},` + s; | |
e.push(s); | |
} | |
return e; | |
} | |
getOutOfBoundsCondition(t8) { | |
if (this.rank === 1) return `rc > ${this.enableShapeUniforms ? "outShape" : this.outputShape[0]}`; | |
let e = ""; | |
for (let o = this.rank - 2; o < this.rank; o++) e += `${t8[o]} >= ${this.enableShapeUniforms ? `outShape[${o}]` : this.outputShape[o]}`, o < this.rank - 1 && (e += "||"); | |
return e; | |
} | |
getSetup(t8) { | |
if (this.rank === 1) return ""; | |
let e = t8.slice(-2), | |
o = this.enableShapeUniforms ? `outShape[${this.rank} - 1]` : this.outputShape[this.rank - 1], | |
n = this.enableShapeUniforms ? `outShape[${this.rank} - 2]` : this.outputShape[this.rank - 2]; | |
return ` | |
int r = ${e[0]}; | |
int c = ${e[1]}; | |
int rp1 = r + 1; | |
int cp1 = c + 1; | |
bool cEdge = cp1 >= ${o}; | |
bool rEdge = rp1 >= ${n}; | |
`; | |
} | |
getOutput(t8) { | |
let e = this.getSourceCoordsArr(t8); | |
return this.rank === 1 ? `getA(rc), (rc + 1 >= ${this.enableShapeUniforms ? "outShape" : this.outputShape[0]} ? 0. : getA(rc + 1)), 0, 0` : `getA(${e[0]}), | |
cEdge ? 0. : getA(${e[1]}), | |
rEdge ? 0. : getA(${e[2]}), | |
rEdge || cEdge ? 0. : getA(${e[3]})`; | |
} | |
}; | |
var Vc = class { | |
constructor(t8, e) { | |
this.variableNames = ["A"], this.packedInputs = true, this.packedOutput = true, this.customUniforms = [{ | |
name: "inputShape", | |
type: "ivec3" | |
}], this.outputShape = t8, this.enableShapeUniforms = pt(this.outputShape.length); | |
let o = ""; | |
for (let n = 0; n < 4; n++) { | |
let s = "thisRC = rc;"; | |
n % 2 === 1 && (s += "thisRC.z += 1;"), n > 1 && (s += "thisRC.y += 1;"), o += ` | |
${s} | |
${n > 0 ? "if(thisRC.y < rows && thisRC.z < cols){" : ""} | |
int flatIndex = getFlatIndex(thisRC); | |
ivec3 inputRC = inputCoordsFromReshapedOutCoords(flatIndex); | |
vec2 inputRCInnerDims = vec2(float(inputRC.y),float(inputRC.z)); | |
result[${n}] = | |
getChannel(getA(inputRC.x, inputRC.y, inputRC.z), inputRCInnerDims); | |
${n > 0 ? "}" : ""} | |
`; | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment