diff --git a/graalwasm/graalwasm-tensorflow/.gitattributes b/graalwasm/graalwasm-tensorflow/.gitattributes new file mode 100644 index 00000000..3b41682a --- /dev/null +++ b/graalwasm/graalwasm-tensorflow/.gitattributes @@ -0,0 +1,2 @@ +/mvnw text eol=lf +*.cmd text eol=crlf diff --git a/graalwasm/graalwasm-tensorflow/.gitignore b/graalwasm/graalwasm-tensorflow/.gitignore new file mode 100644 index 00000000..04bff45a --- /dev/null +++ b/graalwasm/graalwasm-tensorflow/.gitignore @@ -0,0 +1,34 @@ +HELP.md +target/ +!.mvn/wrapper/maven-wrapper.jar +!**/src/main/**/target/ +!**/src/test/**/target/ +/**/src/main/js/node_modules/ + +### STS ### +.apt_generated +.classpath +.factorypath +.project +.settings +.springBeans +.sts4-cache + +### IntelliJ IDEA ### +.idea +*.iws +*.iml +*.ipr + +### NetBeans ### +/nbproject/private/ +/nbbuild/ +/dist/ +/nbdist/ +/.nb-gradle/ +build/ +!**/src/main/**/build/ +!**/src/test/**/build/ + +### VS Code ### +.vscode/ diff --git a/graalwasm/graalwasm-tensorflow/.mvn/wrapper/maven-wrapper.properties b/graalwasm/graalwasm-tensorflow/.mvn/wrapper/maven-wrapper.properties new file mode 100644 index 00000000..d58dfb70 --- /dev/null +++ b/graalwasm/graalwasm-tensorflow/.mvn/wrapper/maven-wrapper.properties @@ -0,0 +1,19 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +wrapperVersion=3.3.2 +distributionType=only-script +distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.9.9/apache-maven-3.9.9-bin.zip diff --git a/graalwasm/graalwasm-tensorflow/README.md b/graalwasm/graalwasm-tensorflow/README.md new file mode 100644 index 00000000..aa6434b7 --- /dev/null +++ b/graalwasm/graalwasm-tensorflow/README.md @@ -0,0 +1,15 @@ +# Tensorflow with GraalWasm Spring-boot Demo + +This is a simple Spring Boot application that predicts house prices using a JavaScript function executed via GraalVM, with a web frontend built using Thymeleaf. +This demo illustrates how GraalWasm can be used to embed tensorflow-wasm . The demo also uses GraalJS to access the Tensorflow module through the WebAssembly JavaScript API. + +## Run the Application + +To start the demo, simply run: + +```bash +mvn spring-boot:run +``` + +When the demo runs, open the following URLs in a browser: +Go to: http://localhost:8080 \ No newline at end of file diff --git a/graalwasm/graalwasm-tensorflow/mvnw b/graalwasm/graalwasm-tensorflow/mvnw new file mode 100755 index 00000000..19529ddf --- /dev/null +++ b/graalwasm/graalwasm-tensorflow/mvnw @@ -0,0 +1,259 @@ +#!/bin/sh +# ---------------------------------------------------------------------------- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- + +# ---------------------------------------------------------------------------- +# Apache Maven Wrapper startup batch script, version 3.3.2 +# +# Optional ENV vars +# ----------------- +# JAVA_HOME - location of a JDK home dir, required when download maven via java source +# MVNW_REPOURL - repo url base for downloading maven distribution +# MVNW_USERNAME/MVNW_PASSWORD - user and password for downloading maven +# MVNW_VERBOSE - true: enable verbose log; debug: trace the mvnw script; others: silence the output +# ---------------------------------------------------------------------------- + +set -euf +[ "${MVNW_VERBOSE-}" != debug ] || set -x + +# OS specific support. +native_path() { printf %s\\n "$1"; } +case "$(uname)" in +CYGWIN* | MINGW*) + [ -z "${JAVA_HOME-}" ] || JAVA_HOME="$(cygpath --unix "$JAVA_HOME")" + native_path() { cygpath --path --windows "$1"; } + ;; +esac + +# set JAVACMD and JAVACCMD +set_java_home() { + # For Cygwin and MinGW, ensure paths are in Unix format before anything is touched + if [ -n "${JAVA_HOME-}" ]; then + if [ -x "$JAVA_HOME/jre/sh/java" ]; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + JAVACCMD="$JAVA_HOME/jre/sh/javac" + else + JAVACMD="$JAVA_HOME/bin/java" + JAVACCMD="$JAVA_HOME/bin/javac" + + if [ ! -x "$JAVACMD" ] || [ ! -x "$JAVACCMD" ]; then + echo "The JAVA_HOME environment variable is not defined correctly, so mvnw cannot run." >&2 + echo "JAVA_HOME is set to \"$JAVA_HOME\", but \"\$JAVA_HOME/bin/java\" or \"\$JAVA_HOME/bin/javac\" does not exist." >&2 + return 1 + fi + fi + else + JAVACMD="$( + 'set' +e + 'unset' -f command 2>/dev/null + 'command' -v java + )" || : + JAVACCMD="$( + 'set' +e + 'unset' -f command 2>/dev/null + 'command' -v javac + )" || : + + if [ ! -x "${JAVACMD-}" ] || [ ! -x "${JAVACCMD-}" ]; then + echo "The java/javac command does not exist in PATH nor is JAVA_HOME set, so mvnw cannot run." >&2 + return 1 + fi + fi +} + +# hash string like Java String::hashCode +hash_string() { + str="${1:-}" h=0 + while [ -n "$str" ]; do + char="${str%"${str#?}"}" + h=$(((h * 31 + $(LC_CTYPE=C printf %d "'$char")) % 4294967296)) + str="${str#?}" + done + printf %x\\n $h +} + +verbose() { :; } +[ "${MVNW_VERBOSE-}" != true ] || verbose() { printf %s\\n "${1-}"; } + +die() { + printf %s\\n "$1" >&2 + exit 1 +} + +trim() { + # MWRAPPER-139: + # Trims trailing and leading whitespace, carriage returns, tabs, and linefeeds. + # Needed for removing poorly interpreted newline sequences when running in more + # exotic environments such as mingw bash on Windows. + printf "%s" "${1}" | tr -d '[:space:]' +} + +# parse distributionUrl and optional distributionSha256Sum, requires .mvn/wrapper/maven-wrapper.properties +while IFS="=" read -r key value; do + case "${key-}" in + distributionUrl) distributionUrl=$(trim "${value-}") ;; + distributionSha256Sum) distributionSha256Sum=$(trim "${value-}") ;; + esac +done <"${0%/*}/.mvn/wrapper/maven-wrapper.properties" +[ -n "${distributionUrl-}" ] || die "cannot read distributionUrl property in ${0%/*}/.mvn/wrapper/maven-wrapper.properties" + +case "${distributionUrl##*/}" in +maven-mvnd-*bin.*) + MVN_CMD=mvnd.sh _MVNW_REPO_PATTERN=/maven/mvnd/ + case "${PROCESSOR_ARCHITECTURE-}${PROCESSOR_ARCHITEW6432-}:$(uname -a)" in + *AMD64:CYGWIN* | *AMD64:MINGW*) distributionPlatform=windows-amd64 ;; + :Darwin*x86_64) distributionPlatform=darwin-amd64 ;; + :Darwin*arm64) distributionPlatform=darwin-aarch64 ;; + :Linux*x86_64*) distributionPlatform=linux-amd64 ;; + *) + echo "Cannot detect native platform for mvnd on $(uname)-$(uname -m), use pure java version" >&2 + distributionPlatform=linux-amd64 + ;; + esac + distributionUrl="${distributionUrl%-bin.*}-$distributionPlatform.zip" + ;; +maven-mvnd-*) MVN_CMD=mvnd.sh _MVNW_REPO_PATTERN=/maven/mvnd/ ;; +*) MVN_CMD="mvn${0##*/mvnw}" _MVNW_REPO_PATTERN=/org/apache/maven/ ;; +esac + +# apply MVNW_REPOURL and calculate MAVEN_HOME +# maven home pattern: ~/.m2/wrapper/dists/{apache-maven-,maven-mvnd--}/ +[ -z "${MVNW_REPOURL-}" ] || distributionUrl="$MVNW_REPOURL$_MVNW_REPO_PATTERN${distributionUrl#*"$_MVNW_REPO_PATTERN"}" +distributionUrlName="${distributionUrl##*/}" +distributionUrlNameMain="${distributionUrlName%.*}" +distributionUrlNameMain="${distributionUrlNameMain%-bin}" +MAVEN_USER_HOME="${MAVEN_USER_HOME:-${HOME}/.m2}" +MAVEN_HOME="${MAVEN_USER_HOME}/wrapper/dists/${distributionUrlNameMain-}/$(hash_string "$distributionUrl")" + +exec_maven() { + unset MVNW_VERBOSE MVNW_USERNAME MVNW_PASSWORD MVNW_REPOURL || : + exec "$MAVEN_HOME/bin/$MVN_CMD" "$@" || die "cannot exec $MAVEN_HOME/bin/$MVN_CMD" +} + +if [ -d "$MAVEN_HOME" ]; then + verbose "found existing MAVEN_HOME at $MAVEN_HOME" + exec_maven "$@" +fi + +case "${distributionUrl-}" in +*?-bin.zip | *?maven-mvnd-?*-?*.zip) ;; +*) die "distributionUrl is not valid, must match *-bin.zip or maven-mvnd-*.zip, but found '${distributionUrl-}'" ;; +esac + +# prepare tmp dir +if TMP_DOWNLOAD_DIR="$(mktemp -d)" && [ -d "$TMP_DOWNLOAD_DIR" ]; then + clean() { rm -rf -- "$TMP_DOWNLOAD_DIR"; } + trap clean HUP INT TERM EXIT +else + die "cannot create temp dir" +fi + +mkdir -p -- "${MAVEN_HOME%/*}" + +# Download and Install Apache Maven +verbose "Couldn't find MAVEN_HOME, downloading and installing it ..." +verbose "Downloading from: $distributionUrl" +verbose "Downloading to: $TMP_DOWNLOAD_DIR/$distributionUrlName" + +# select .zip or .tar.gz +if ! command -v unzip >/dev/null; then + distributionUrl="${distributionUrl%.zip}.tar.gz" + distributionUrlName="${distributionUrl##*/}" +fi + +# verbose opt +__MVNW_QUIET_WGET=--quiet __MVNW_QUIET_CURL=--silent __MVNW_QUIET_UNZIP=-q __MVNW_QUIET_TAR='' +[ "${MVNW_VERBOSE-}" != true ] || __MVNW_QUIET_WGET='' __MVNW_QUIET_CURL='' __MVNW_QUIET_UNZIP='' __MVNW_QUIET_TAR=v + +# normalize http auth +case "${MVNW_PASSWORD:+has-password}" in +'') MVNW_USERNAME='' MVNW_PASSWORD='' ;; +has-password) [ -n "${MVNW_USERNAME-}" ] || MVNW_USERNAME='' MVNW_PASSWORD='' ;; +esac + +if [ -z "${MVNW_USERNAME-}" ] && command -v wget >/dev/null; then + verbose "Found wget ... using wget" + wget ${__MVNW_QUIET_WGET:+"$__MVNW_QUIET_WGET"} "$distributionUrl" -O "$TMP_DOWNLOAD_DIR/$distributionUrlName" || die "wget: Failed to fetch $distributionUrl" +elif [ -z "${MVNW_USERNAME-}" ] && command -v curl >/dev/null; then + verbose "Found curl ... using curl" + curl ${__MVNW_QUIET_CURL:+"$__MVNW_QUIET_CURL"} -f -L -o "$TMP_DOWNLOAD_DIR/$distributionUrlName" "$distributionUrl" || die "curl: Failed to fetch $distributionUrl" +elif set_java_home; then + verbose "Falling back to use Java to download" + javaSource="$TMP_DOWNLOAD_DIR/Downloader.java" + targetZip="$TMP_DOWNLOAD_DIR/$distributionUrlName" + cat >"$javaSource" <<-END + public class Downloader extends java.net.Authenticator + { + protected java.net.PasswordAuthentication getPasswordAuthentication() + { + return new java.net.PasswordAuthentication( System.getenv( "MVNW_USERNAME" ), System.getenv( "MVNW_PASSWORD" ).toCharArray() ); + } + public static void main( String[] args ) throws Exception + { + setDefault( new Downloader() ); + java.nio.file.Files.copy( java.net.URI.create( args[0] ).toURL().openStream(), java.nio.file.Paths.get( args[1] ).toAbsolutePath().normalize() ); + } + } + END + # For Cygwin/MinGW, switch paths to Windows format before running javac and java + verbose " - Compiling Downloader.java ..." + "$(native_path "$JAVACCMD")" "$(native_path "$javaSource")" || die "Failed to compile Downloader.java" + verbose " - Running Downloader.java ..." + "$(native_path "$JAVACMD")" -cp "$(native_path "$TMP_DOWNLOAD_DIR")" Downloader "$distributionUrl" "$(native_path "$targetZip")" +fi + +# If specified, validate the SHA-256 sum of the Maven distribution zip file +if [ -n "${distributionSha256Sum-}" ]; then + distributionSha256Result=false + if [ "$MVN_CMD" = mvnd.sh ]; then + echo "Checksum validation is not supported for maven-mvnd." >&2 + echo "Please disable validation by removing 'distributionSha256Sum' from your maven-wrapper.properties." >&2 + exit 1 + elif command -v sha256sum >/dev/null; then + if echo "$distributionSha256Sum $TMP_DOWNLOAD_DIR/$distributionUrlName" | sha256sum -c >/dev/null 2>&1; then + distributionSha256Result=true + fi + elif command -v shasum >/dev/null; then + if echo "$distributionSha256Sum $TMP_DOWNLOAD_DIR/$distributionUrlName" | shasum -a 256 -c >/dev/null 2>&1; then + distributionSha256Result=true + fi + else + echo "Checksum validation was requested but neither 'sha256sum' or 'shasum' are available." >&2 + echo "Please install either command, or disable validation by removing 'distributionSha256Sum' from your maven-wrapper.properties." >&2 + exit 1 + fi + if [ $distributionSha256Result = false ]; then + echo "Error: Failed to validate Maven distribution SHA-256, your Maven distribution might be compromised." >&2 + echo "If you updated your Maven version, you need to update the specified distributionSha256Sum property." >&2 + exit 1 + fi +fi + +# unzip and move +if command -v unzip >/dev/null; then + unzip ${__MVNW_QUIET_UNZIP:+"$__MVNW_QUIET_UNZIP"} "$TMP_DOWNLOAD_DIR/$distributionUrlName" -d "$TMP_DOWNLOAD_DIR" || die "failed to unzip" +else + tar xzf${__MVNW_QUIET_TAR:+"$__MVNW_QUIET_TAR"} "$TMP_DOWNLOAD_DIR/$distributionUrlName" -C "$TMP_DOWNLOAD_DIR" || die "failed to untar" +fi +printf %s\\n "$distributionUrl" >"$TMP_DOWNLOAD_DIR/$distributionUrlNameMain/mvnw.url" +mv -- "$TMP_DOWNLOAD_DIR/$distributionUrlNameMain" "$MAVEN_HOME" || [ -d "$MAVEN_HOME" ] || die "fail to move MAVEN_HOME" + +clean || : +exec_maven "$@" diff --git a/graalwasm/graalwasm-tensorflow/mvnw.cmd b/graalwasm/graalwasm-tensorflow/mvnw.cmd new file mode 100644 index 00000000..249bdf38 --- /dev/null +++ b/graalwasm/graalwasm-tensorflow/mvnw.cmd @@ -0,0 +1,149 @@ +<# : batch portion +@REM ---------------------------------------------------------------------------- +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM http://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM ---------------------------------------------------------------------------- + +@REM ---------------------------------------------------------------------------- +@REM Apache Maven Wrapper startup batch script, version 3.3.2 +@REM +@REM Optional ENV vars +@REM MVNW_REPOURL - repo url base for downloading maven distribution +@REM MVNW_USERNAME/MVNW_PASSWORD - user and password for downloading maven +@REM MVNW_VERBOSE - true: enable verbose log; others: silence the output +@REM ---------------------------------------------------------------------------- + +@IF "%__MVNW_ARG0_NAME__%"=="" (SET __MVNW_ARG0_NAME__=%~nx0) +@SET __MVNW_CMD__= +@SET __MVNW_ERROR__= +@SET __MVNW_PSMODULEP_SAVE=%PSModulePath% +@SET PSModulePath= +@FOR /F "usebackq tokens=1* delims==" %%A IN (`powershell -noprofile "& {$scriptDir='%~dp0'; $script='%__MVNW_ARG0_NAME__%'; icm -ScriptBlock ([Scriptblock]::Create((Get-Content -Raw '%~f0'))) -NoNewScope}"`) DO @( + IF "%%A"=="MVN_CMD" (set __MVNW_CMD__=%%B) ELSE IF "%%B"=="" (echo %%A) ELSE (echo %%A=%%B) +) +@SET PSModulePath=%__MVNW_PSMODULEP_SAVE% +@SET __MVNW_PSMODULEP_SAVE= +@SET __MVNW_ARG0_NAME__= +@SET MVNW_USERNAME= +@SET MVNW_PASSWORD= +@IF NOT "%__MVNW_CMD__%"=="" (%__MVNW_CMD__% %*) +@echo Cannot start maven from wrapper >&2 && exit /b 1 +@GOTO :EOF +: end batch / begin powershell #> + +$ErrorActionPreference = "Stop" +if ($env:MVNW_VERBOSE -eq "true") { + $VerbosePreference = "Continue" +} + +# calculate distributionUrl, requires .mvn/wrapper/maven-wrapper.properties +$distributionUrl = (Get-Content -Raw "$scriptDir/.mvn/wrapper/maven-wrapper.properties" | ConvertFrom-StringData).distributionUrl +if (!$distributionUrl) { + Write-Error "cannot read distributionUrl property in $scriptDir/.mvn/wrapper/maven-wrapper.properties" +} + +switch -wildcard -casesensitive ( $($distributionUrl -replace '^.*/','') ) { + "maven-mvnd-*" { + $USE_MVND = $true + $distributionUrl = $distributionUrl -replace '-bin\.[^.]*$',"-windows-amd64.zip" + $MVN_CMD = "mvnd.cmd" + break + } + default { + $USE_MVND = $false + $MVN_CMD = $script -replace '^mvnw','mvn' + break + } +} + +# apply MVNW_REPOURL and calculate MAVEN_HOME +# maven home pattern: ~/.m2/wrapper/dists/{apache-maven-,maven-mvnd--}/ +if ($env:MVNW_REPOURL) { + $MVNW_REPO_PATTERN = if ($USE_MVND) { "/org/apache/maven/" } else { "/maven/mvnd/" } + $distributionUrl = "$env:MVNW_REPOURL$MVNW_REPO_PATTERN$($distributionUrl -replace '^.*'+$MVNW_REPO_PATTERN,'')" +} +$distributionUrlName = $distributionUrl -replace '^.*/','' +$distributionUrlNameMain = $distributionUrlName -replace '\.[^.]*$','' -replace '-bin$','' +$MAVEN_HOME_PARENT = "$HOME/.m2/wrapper/dists/$distributionUrlNameMain" +if ($env:MAVEN_USER_HOME) { + $MAVEN_HOME_PARENT = "$env:MAVEN_USER_HOME/wrapper/dists/$distributionUrlNameMain" +} +$MAVEN_HOME_NAME = ([System.Security.Cryptography.MD5]::Create().ComputeHash([byte[]][char[]]$distributionUrl) | ForEach-Object {$_.ToString("x2")}) -join '' +$MAVEN_HOME = "$MAVEN_HOME_PARENT/$MAVEN_HOME_NAME" + +if (Test-Path -Path "$MAVEN_HOME" -PathType Container) { + Write-Verbose "found existing MAVEN_HOME at $MAVEN_HOME" + Write-Output "MVN_CMD=$MAVEN_HOME/bin/$MVN_CMD" + exit $? +} + +if (! $distributionUrlNameMain -or ($distributionUrlName -eq $distributionUrlNameMain)) { + Write-Error "distributionUrl is not valid, must end with *-bin.zip, but found $distributionUrl" +} + +# prepare tmp dir +$TMP_DOWNLOAD_DIR_HOLDER = New-TemporaryFile +$TMP_DOWNLOAD_DIR = New-Item -Itemtype Directory -Path "$TMP_DOWNLOAD_DIR_HOLDER.dir" +$TMP_DOWNLOAD_DIR_HOLDER.Delete() | Out-Null +trap { + if ($TMP_DOWNLOAD_DIR.Exists) { + try { Remove-Item $TMP_DOWNLOAD_DIR -Recurse -Force | Out-Null } + catch { Write-Warning "Cannot remove $TMP_DOWNLOAD_DIR" } + } +} + +New-Item -Itemtype Directory -Path "$MAVEN_HOME_PARENT" -Force | Out-Null + +# Download and Install Apache Maven +Write-Verbose "Couldn't find MAVEN_HOME, downloading and installing it ..." +Write-Verbose "Downloading from: $distributionUrl" +Write-Verbose "Downloading to: $TMP_DOWNLOAD_DIR/$distributionUrlName" + +$webclient = New-Object System.Net.WebClient +if ($env:MVNW_USERNAME -and $env:MVNW_PASSWORD) { + $webclient.Credentials = New-Object System.Net.NetworkCredential($env:MVNW_USERNAME, $env:MVNW_PASSWORD) +} +[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12 +$webclient.DownloadFile($distributionUrl, "$TMP_DOWNLOAD_DIR/$distributionUrlName") | Out-Null + +# If specified, validate the SHA-256 sum of the Maven distribution zip file +$distributionSha256Sum = (Get-Content -Raw "$scriptDir/.mvn/wrapper/maven-wrapper.properties" | ConvertFrom-StringData).distributionSha256Sum +if ($distributionSha256Sum) { + if ($USE_MVND) { + Write-Error "Checksum validation is not supported for maven-mvnd. `nPlease disable validation by removing 'distributionSha256Sum' from your maven-wrapper.properties." + } + Import-Module $PSHOME\Modules\Microsoft.PowerShell.Utility -Function Get-FileHash + if ((Get-FileHash "$TMP_DOWNLOAD_DIR/$distributionUrlName" -Algorithm SHA256).Hash.ToLower() -ne $distributionSha256Sum) { + Write-Error "Error: Failed to validate Maven distribution SHA-256, your Maven distribution might be compromised. If you updated your Maven version, you need to update the specified distributionSha256Sum property." + } +} + +# unzip and move +Expand-Archive "$TMP_DOWNLOAD_DIR/$distributionUrlName" -DestinationPath "$TMP_DOWNLOAD_DIR" | Out-Null +Rename-Item -Path "$TMP_DOWNLOAD_DIR/$distributionUrlNameMain" -NewName $MAVEN_HOME_NAME | Out-Null +try { + Move-Item -Path "$TMP_DOWNLOAD_DIR/$MAVEN_HOME_NAME" -Destination $MAVEN_HOME_PARENT | Out-Null +} catch { + if (! (Test-Path -Path "$MAVEN_HOME" -PathType Container)) { + Write-Error "fail to move MAVEN_HOME" + } +} finally { + try { Remove-Item $TMP_DOWNLOAD_DIR -Recurse -Force | Out-Null } + catch { Write-Warning "Cannot remove $TMP_DOWNLOAD_DIR" } +} + +Write-Output "MVN_CMD=$MAVEN_HOME/bin/$MVN_CMD" diff --git a/graalwasm/graalwasm-tensorflow/pom.xml b/graalwasm/graalwasm-tensorflow/pom.xml new file mode 100644 index 00000000..cc4c0549 --- /dev/null +++ b/graalwasm/graalwasm-tensorflow/pom.xml @@ -0,0 +1,161 @@ + + + 4.0.0 + + org.springframework.boot + spring-boot-starter-parent + 3.4.5 + + + com.example + Tensorflow + 0.0.1-SNAPSHOT + Tensorflow + Demo project for Spring Boot + + + + + + + + + + + + + + + 21 + + + + org.springframework.boot + spring-boot-starter-data-jpa + + + org.springframework.boot + spring-boot-starter-web + + + org.graalvm.polyglot + polyglot + 24.2.0 + + + org.graalvm.js + js-language + 24.2.0 + + + org.springframework.boot + spring-boot-starter-thymeleaf + + + + org.graalvm.wasm + wasm-language + 24.2.0 + + + org.graalvm.truffle + truffle-api + 24.2.0 + + + org.graalvm.truffle + truffle-runtime + 24.2.0 + + + org.projectlombok + lombok + true + + + org.springframework.boot + spring-boot-starter-test + test + + + org.openjdk.jmh + jmh-core + 1.37 + + + org.openjdk.jmh + jmh-generator-annprocess + 1.37 + + + + + + + com.github.eirslett + frontend-maven-plugin + 1.15.0 + + + v21.7.2 + src/main/js + target + + + + + + install node and npm + install-node-and-npm + + + + + npm install + npm + + + + + webpack build + webpack + + --mode production + + ${project.build.outputDirectory}/bundle + + + + + + + org.springframework.boot + spring-boot-maven-plugin + + + + org.projectlombok + lombok + + + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.8.1 + + + + org.openjdk.jmh + jmh-generator-annprocess + 1.37 + + + + + + + + diff --git a/graalwasm/graalwasm-tensorflow/src/main/java/com/example/Tensorflow/BenchmarkClass.java b/graalwasm/graalwasm-tensorflow/src/main/java/com/example/Tensorflow/BenchmarkClass.java new file mode 100644 index 00000000..34e7a35e --- /dev/null +++ b/graalwasm/graalwasm-tensorflow/src/main/java/com/example/Tensorflow/BenchmarkClass.java @@ -0,0 +1,60 @@ +package com.example.Tensorflow; + +import org.graalvm.polyglot.Context; +import org.openjdk.jmh.annotations.*; +import org.openjdk.jmh.infra.Blackhole; + +import java.io.IOException; +import java.util.List; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; + +@Warmup(iterations = 6, time = 10) +@Measurement(iterations = 6, time = 10) +@Fork(1) +@BenchmarkMode(Mode.AverageTime) +@OutputTimeUnit(TimeUnit.MILLISECONDS) +@State(Scope.Benchmark) +public class BenchmarkClass { + ContextPool contextPool; + + public BenchmarkClass() { + try { + this.contextPool = new ContextPool(); + } catch (IOException e) { + throw new RuntimeException("Failed to initialize ContextPool", e); + } + } + + @Benchmark + public void predict(Blackhole blackhole){ + Context context = contextPool.getContext(); + try { + PredictFunction predictFn = context.getBindings("js").getMember("predictHouse").as(PredictFunction.class); + + double[] houseFeatures = new double[12]; + for (int i = 0; i < houseFeatures.length; i++) { + houseFeatures[i] = ThreadLocalRandom.current().nextDouble(1, 5000); + } + + Promise>> prediction = predictFn.apply(houseFeatures); + + prediction.then(result -> blackhole.consume(result.get(0).get(0))); + } finally { + contextPool.release(context); + } + } + + public interface PredictFunction { + Promise>> apply(double[] houseFeatures); + } + + public interface Promise { + void then(Callback callback); + } + + @FunctionalInterface + public interface Callback { + void accept(T result); + } +} \ No newline at end of file diff --git a/graalwasm/graalwasm-tensorflow/src/main/java/com/example/Tensorflow/ContextPool.java b/graalwasm/graalwasm-tensorflow/src/main/java/com/example/Tensorflow/ContextPool.java new file mode 100644 index 00000000..92b54573 --- /dev/null +++ b/graalwasm/graalwasm-tensorflow/src/main/java/com/example/Tensorflow/ContextPool.java @@ -0,0 +1,140 @@ +package com.example.Tensorflow; + +import org.graalvm.polyglot.Context; +import org.graalvm.polyglot.HostAccess; +import org.graalvm.polyglot.Source; +import org.graalvm.polyglot.Value; +import org.springframework.stereotype.Component; + +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.BlockingQueue; +import java.util.concurrent.LinkedBlockingQueue; + +@Component +public class ContextPool { + private final BlockingQueue contexts; + + private static Map getLanguageOptions() { + Map options = new HashMap<>(); + options.put("js.ecmascript-version", "2023"); + options.put("js.top-level-await", "true"); + options.put("js.webassembly", "true"); + options.put("js.performance", "true"); + options.put("js.commonjs-require", "true"); + options.put("js.esm-eval-returns-exports", "true"); + options.put("js.unhandled-rejections", "throw"); + options.put("js.commonjs-require-cwd", Paths.get("./").toAbsolutePath().toString()); + return options; + } + + public ContextPool () throws IOException { + + Context context = Context.newBuilder("js", "wasm") + .allowAllAccess(true) + .options(getLanguageOptions()) + .build(); + + + ExcelizePool excelizePool = new ExcelizePool(); + Context context1 = excelizePool.getContext(); + byte[] excelBytes = Files.readAllBytes(Paths.get("./src/main/resources/data.xlsx")); + + + Value readFunc = context1.getBindings("js").getMember("readExcel"); + Value bufferArray = readFunc.execute(excelBytes); + + + byte[] tsfjswasm = Files.readAllBytes(Paths.get("./src/main/resources/tfjs-backend-wasm-simd.wasm")); + context.getBindings("js").putMember("tsfwasm", tsfjswasm); + + String polyfill= """ + (() => { + const NativeURL = globalThis.URL; + + class FakeURL { + constructor(input, base) { + this.href = input; + } + + toString() { + return this.href; + } + } + + globalThis.URL = FakeURL; + + globalThis.fetch = async function (url) { + const tsfwasm = './tfjs-backend-wasm-simd.wasm' + const target = (typeof url === 'object' && 'href' in url) ? url.href : url; + if (target === tsfwasm) { + return { + async arrayBuffer() { + return globalThis.tsfwasm; + }, + ok: true, + status: 200, + }; + } + else { + throw new Error(`Unhandled fetch to: ${target}`); + } + }; + })(); + if (typeof WebAssembly.instantiateStreaming !== "function") { + WebAssembly.instantiateStreaming = async (sourcePromise, importObject) => { + // Assume `globalThis.tsfwasm` is already a Uint8Array or ArrayBuffer + const buffer = globalThis.tsfwasm instanceof Uint8Array + ? globalThis.tsfwasm.buffer + : globalThis.tsfwasm; + + return WebAssembly.instantiate(new Uint8Array(buffer), importObject); + }; + } + globalThis.self = globalThis; + globalThis.window = globalThis; + globalThis.document = { body: {} } + globalThis.window.location = { href: '' } + """ + ; + context.eval("js",polyfill); + Source bundleSrc = Source.newBuilder("js",ContextPool.class.getResource("/bundle/bundle.mjs")).build(); + context.eval(bundleSrc); + context.getBindings("js").getMember("trainModel").execute(bufferArray); + System.out.println( context.getBindings("js").getMember("savedArtifacts")); + + + int maxThreads = Runtime.getRuntime().availableProcessors(); + contexts = new LinkedBlockingQueue<>(maxThreads); + for (int i = 0; i < maxThreads; i++) { + Context modelContext = Context.newBuilder("js", "wasm") + .allowHostAccess(HostAccess.ALL) + .allowAllAccess(true) + .options(getLanguageOptions()) + .build(); + + modelContext.getBindings("js").putMember("tsfwasm", tsfjswasm); + modelContext.eval("js",polyfill); + + modelContext.eval(bundleSrc); + Value mdl = context.getBindings("js").getMember("savedArtifacts"); + modelContext.getBindings("js").putMember("savedArtifacts",mdl); + this.contexts.add(modelContext); + + } + } + + public Context getContext() { + try { + return contexts.take(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + } + void release(Context context) { + contexts.add(context); + } +} diff --git a/graalwasm/graalwasm-tensorflow/src/main/java/com/example/Tensorflow/Controller.java b/graalwasm/graalwasm-tensorflow/src/main/java/com/example/Tensorflow/Controller.java new file mode 100644 index 00000000..32af5e07 --- /dev/null +++ b/graalwasm/graalwasm-tensorflow/src/main/java/com/example/Tensorflow/Controller.java @@ -0,0 +1,69 @@ +package com.example.Tensorflow; + +import org.graalvm.polyglot.Context; +import org.graalvm.polyglot.Value; +import org.springframework.ui.Model; +import org.springframework.web.bind.annotation.PostMapping; +import org.springframework.web.bind.annotation.RequestParam; +import org.graalvm.polyglot.proxy.ProxyExecutable; +import org.springframework.web.context.request.async.DeferredResult; + +import java.util.stream.DoubleStream; + +@org.springframework.stereotype.Controller +public class Controller { + private final ContextPool contextPool; + public Controller(ContextPool contextPool){ + + this.contextPool = contextPool; + } + + @PostMapping("/predict") + public DeferredResult predictPrice( + @RequestParam double bedrooms, + @RequestParam double bathrooms, + @RequestParam double sqftLiving, + @RequestParam double sqftLot, + @RequestParam double floors, + @RequestParam double waterfront, + @RequestParam double view, + @RequestParam double condition, + @RequestParam double sqftAbove, + @RequestParam double sqftBasement, + @RequestParam double yrBuilt, + @RequestParam double yrRenovated, + Model model) { + DeferredResult deferredResult = new DeferredResult<>(); + + double[] newHouse = { + bedrooms, bathrooms, sqftLiving, sqftLot, + floors, waterfront, view, condition, + sqftAbove, sqftBasement, yrBuilt, yrRenovated + }; + + Context context = contextPool.getContext(); + try { + Value predictFn = context.getBindings("js").getMember("predictHouse"); + + Value jsArray = context.eval("js", "Array"); + Value input = jsArray.newInstance((Object[]) DoubleStream.of(newHouse).boxed().toArray()); + + Value prediction = predictFn.execute(input); + + prediction.invokeMember("then", (ProxyExecutable) result -> { + + System.out.println("results from java side" + result); + double price = result[0].getArrayElement(0).getArrayElement(0).asDouble(); + long roundedPrice = Math.round(price); + model.addAttribute("predictedPrice", roundedPrice); + deferredResult.setResult("index"); + return null; + }); + }finally { + contextPool.release(context); + } + + + return deferredResult; + } +} diff --git a/graalwasm/graalwasm-tensorflow/src/main/java/com/example/Tensorflow/ExcelizePool.java b/graalwasm/graalwasm-tensorflow/src/main/java/com/example/Tensorflow/ExcelizePool.java new file mode 100644 index 00000000..94a3d896 --- /dev/null +++ b/graalwasm/graalwasm-tensorflow/src/main/java/com/example/Tensorflow/ExcelizePool.java @@ -0,0 +1,72 @@ +package com.example.Tensorflow; + +import org.graalvm.polyglot.*; +import org.springframework.stereotype.Component; + +import java.io.IOException; +import java.net.URI; +import java.nio.charset.StandardCharsets; +import java.nio.file.Files; +import java.nio.file.Paths; +import java.util.HashMap; +import java.util.Map; + +@Component + +public class ExcelizePool { + private final Context context; + + public ExcelizePool() throws IOException { + + // Use regular file paths + byte[] excelizeWasmBytes = Files.readAllBytes(Paths.get("src/main/resources/excelize.wasm")); + String test = Files.readString(Paths.get("src/main/resources/excelize.js"), StandardCharsets.UTF_8); + String prep = Files.readString(Paths.get("src/main/resources/excelize_prep.js"), StandardCharsets.UTF_8); + String excelizeLib = Files.readString(Paths.get("src/main/resources/excelize_m.js"), StandardCharsets.UTF_8); + + System.out.println("Executing excelize read..."); + + // Configure engine options + Map options = new HashMap<>(); + options.put("js.webassembly", "true"); + options.put("js.text-encoding","true"); + + Map engineOptions = new HashMap<>(); + //engineOptions.put("engine.CompilerThreads", "1"); + engineOptions.put("engine.MultiTier", "true"); + engineOptions.put("engine.Mode", "throughput"); + + Engine engine = Engine.newBuilder("js", "wasm") + .allowExperimentalOptions(true) + .options(engineOptions) + .build(); + + // Build the context + Context context = Context.newBuilder("js", "wasm") + .engine(engine) + .allowAllAccess(true) + .options(options) + .build(); + + + context.eval(Source.newBuilder("js", prep, "prep.js").build()); + + // Evaluate the Excelize WASM module + Source excelizeModule = Source.newBuilder("js", excelizeLib, "excelize.mjs") + .mimeType("application/javascript+module") + .uri(URI.create("excelize.mjs")) + .build(); + Value excelizeMod = context.eval(excelizeModule); + context.getPolyglotBindings().putMember("excelize", excelizeMod); + context.getBindings("js").putMember("wasmBytes", excelizeWasmBytes); + + // Evaluate test script + context.eval(Source.newBuilder("js", test, "excelize.js").build()); + + this.context = context; + } + + public Context getContext() { + return context; + } +} diff --git a/graalwasm/graalwasm-tensorflow/src/main/java/com/example/Tensorflow/MainBenchmark.java b/graalwasm/graalwasm-tensorflow/src/main/java/com/example/Tensorflow/MainBenchmark.java new file mode 100644 index 00000000..ca79c0c1 --- /dev/null +++ b/graalwasm/graalwasm-tensorflow/src/main/java/com/example/Tensorflow/MainBenchmark.java @@ -0,0 +1,7 @@ +package com.example.Tensorflow; + +public class MainBenchmark { + public static void main(String[] args) throws Exception { + org.openjdk.jmh.Main.main(args); + } +} diff --git a/graalwasm/graalwasm-tensorflow/src/main/java/com/example/Tensorflow/TensorflowApplication.java b/graalwasm/graalwasm-tensorflow/src/main/java/com/example/Tensorflow/TensorflowApplication.java new file mode 100644 index 00000000..1a260a91 --- /dev/null +++ b/graalwasm/graalwasm-tensorflow/src/main/java/com/example/Tensorflow/TensorflowApplication.java @@ -0,0 +1,22 @@ +package com.example.Tensorflow; + +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; +import org.springframework.boot.autoconfigure.jdbc.DataSourceAutoConfiguration; +import org.springframework.boot.autoconfigure.jdbc.DataSourceTransactionManagerAutoConfiguration; +import org.springframework.boot.autoconfigure.orm.jpa.HibernateJpaAutoConfiguration; + +@SpringBootApplication( + exclude = { + DataSourceAutoConfiguration.class, + DataSourceTransactionManagerAutoConfiguration.class, + HibernateJpaAutoConfiguration.class + } +) +public class TensorflowApplication { + + public static void main(String[] args) { + SpringApplication.run(TensorflowApplication.class, args); + } + +} diff --git a/graalwasm/graalwasm-tensorflow/src/main/js/main.mjs b/graalwasm/graalwasm-tensorflow/src/main/js/main.mjs new file mode 100644 index 00000000..217a790e --- /dev/null +++ b/graalwasm/graalwasm-tensorflow/src/main/js/main.mjs @@ -0,0 +1,133 @@ +import 'fast-text-encoding'; +import * as tf from '@tensorflow/tfjs'; +import { setWasmPaths } from '@tensorflow/tfjs-backend-wasm'; + +// Set backend to WebAssembly +setWasmPaths('./'); +await tf.setBackend('wasm'); +await tf.ready(); +globalThis.predictHouse = async function(houseFeatures) { + const model = await tf.loadLayersModel({ + load: async () => savedArtifacts + }); + const inputTensor = tf.tensor2d([houseFeatures]); + + const prediction = model.predict(inputTensor); + prediction.print(); + return prediction.array(); +}; + +let model; // + +globalThis.savedArtifacts = null; +globalThis.trainModel = function(datasetPromise) { + datasetPromise.then(dataset => { + const inputs = []; + const prices = []; + + const dataRows = dataset.slice(1); // Skip header + dataRows.forEach(row => { + const numbers = row.map(val => { + const num = Number(val); + return isNaN(num) ? 0 : num; + }); + + const [price, ...features] = numbers; + prices.push([price]); + inputs.push(features); + }); + + const featureTensor = tf.tensor2d(inputs); + const min = featureTensor.min(0); + const max = featureTensor.max(0); + const range = max.sub(min); + const safeRange = range.add(tf.tensor1d(Array(range.shape[0]).fill(1e-7))); + const normalizedFeatures = featureTensor.sub(min).div(safeRange); + + const labelTensor = tf.tensor2d(prices); + + model = tf.sequential(); + model.add(tf.layers.dense({ inputShape: [normalizedFeatures.shape[1]], units: 12, activation: 'relu' })); + model.add(tf.layers.dense({ units: 6, activation: 'relu' })); + model.add(tf.layers.dense({ units: 1 })); + + model.compile({ optimizer: 'adam', loss: 'meanSquaredError' }); + + model.fit(normalizedFeatures, labelTensor, { + epochs: 200, + batchSize: 1, + verbose: 1, + }).then(() => { + model.save({ + async save(modelArtifacts) { + savedArtifacts = modelArtifacts; + return { + modelArtifactsInfo: { + dateSaved: new Date(), + modelTopologyType: 'JSON', + } + }; + } + }).then(() => { + Polyglot.export("savedArtifacts", savedArtifacts); + }); + }); + }); +}; + +/* +async function save(modelArtifacts) { + if (modelArtifacts.modelTopology instanceof ArrayBuffer) { + throw new Error('BrowserLocalStorage.save() does not support saving model topology ' + + 'in binary formats yet.'); + } + else { + const topology = JSON.stringify(modelArtifacts.modelTopology); + const weightSpecs = JSON.stringify(modelArtifacts.weightSpecs); + const modelArtifactsInfo = getModelArtifactsInfoForJSON(modelArtifacts); + // TODO(mattsoulanille): Support saving models over 2GB that exceed + // Chrome's ArrayBuffer size limit. + const weightBuffer = CompositeArrayBuffer.join(modelArtifacts.weightData); + try { + this.LS.setItem(this.keys.info, JSON.stringify(modelArtifactsInfo)); + this.LS.setItem(this.keys.topology, topology); + this.LS.setItem(this.keys.weightSpecs, weightSpecs); + this.LS.setItem(this.keys.weightData, arrayBufferToBase64String(weightBuffer)); + // Note that JSON.stringify doesn't write out keys that have undefined + // values, so for some keys, we set undefined instead of a null-ish + // value. + const metadata = { + format: modelArtifacts.format, + generatedBy: modelArtifacts.generatedBy, + convertedBy: modelArtifacts.convertedBy, + signature: modelArtifacts.signature != null ? + modelArtifacts.signature : + undefined, + userDefinedMetadata: modelArtifacts.userDefinedMetadata != null ? + modelArtifacts.userDefinedMetadata : + undefined, + modelInitializer: modelArtifacts.modelInitializer != null ? + modelArtifacts.modelInitializer : + undefined, + initializerSignature: modelArtifacts.initializerSignature != null ? + modelArtifacts.initializerSignature : + undefined, + trainingConfig: modelArtifacts.trainingConfig != null ? + modelArtifacts.trainingConfig : + undefined + }; + this.LS.setItem(this.keys.modelMetadata, JSON.stringify(metadata)); + return { modelArtifactsInfo }; + } + catch (err) { + // If saving failed, clean up all items saved so far. + removeItems(this.keys); + throw new Error(`Failed to save model '${this.modelPath}' to local storage: ` + + `size quota being exceeded is a possible cause of this failure: ` + + `modelTopologyBytes=${modelArtifactsInfo.modelTopologyBytes}, ` + + `weightSpecsBytes=${modelArtifactsInfo.weightSpecsBytes}, ` + + `weightDataBytes=${modelArtifactsInfo.weightDataBytes}.`); + } + } +} +*/ \ No newline at end of file diff --git a/graalwasm/graalwasm-tensorflow/src/main/js/package-lock.json b/graalwasm/graalwasm-tensorflow/src/main/js/package-lock.json new file mode 100644 index 00000000..b1554b08 --- /dev/null +++ b/graalwasm/graalwasm-tensorflow/src/main/js/package-lock.json @@ -0,0 +1,11490 @@ +{ + "name": "js", + "version": "1.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "js", + "version": "1.0.0", + "license": "ISC", + "dependencies": { + "@tensorflow/tfjs": "^4.22.0", + "@tensorflow/tfjs-backend-wasm": "^4.22.0", + "assert": "^2.1.0", + "browserify-zlib": "^0.2.0", + "fast-text-encoding": "^1.0.6", + "stream-browserify": "^3.0.0", + "util": "^0.12.5" + }, + "devDependencies": { + "@babel/core": "^7.26.10", + "@babel/preset-env": "^7.26.9", + "@webpack-cli/generators": "^3.0.7", + "babel-loader": "^10.0.0", + "ts-loader": "^9.5.2", + "typescript": "^5.8.3" + } + }, + "node_modules/@ampproject/remapping": { + "version": "2.3.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@ampproject/remapping/-/remapping-2.3.0.tgz", + "integrity": "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/code-frame": { + "version": "7.26.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/code-frame/-/code-frame-7.26.2.tgz", + "integrity": "sha512-RJlIHRueQgwWitWgF8OdFYGZX328Ax5BCemNGlqHfplnRT9ESi8JkFlvaVYbS+UubVY6dpv87Fs2u5M29iNFVQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-validator-identifier": "^7.25.9", + "js-tokens": "^4.0.0", + "picocolors": "^1.0.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/compat-data": { + "version": "7.26.8", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/compat-data/-/compat-data-7.26.8.tgz", + "integrity": "sha512-oH5UPLMWR3L2wEFLnFJ1TZXqHufiTKAiLfqw5zkhS4dKXLJ10yVztfil/twG8EDTA4F/tvVNw9nOl4ZMslB8rQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/core": { + "version": "7.26.10", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/core/-/core-7.26.10.tgz", + "integrity": "sha512-vMqyb7XCDMPvJFFOaT9kxtiRh42GwlZEg1/uIgtZshS5a/8OaduUfCi7kynKgc3Tw/6Uo2D+db9qBttghhmxwQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@ampproject/remapping": "^2.2.0", + "@babel/code-frame": "^7.26.2", + "@babel/generator": "^7.26.10", + "@babel/helper-compilation-targets": "^7.26.5", + "@babel/helper-module-transforms": "^7.26.0", + "@babel/helpers": "^7.26.10", + "@babel/parser": "^7.26.10", + "@babel/template": "^7.26.9", + "@babel/traverse": "^7.26.10", + "@babel/types": "^7.26.10", + "convert-source-map": "^2.0.0", + "debug": "^4.1.0", + "gensync": "^1.0.0-beta.2", + "json5": "^2.2.3", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/babel" + } + }, + "node_modules/@babel/core/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/generator": { + "version": "7.27.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/generator/-/generator-7.27.0.tgz", + "integrity": "sha512-VybsKvpiN1gU1sdMZIp7FcqphVVKEwcuj02x73uvcHE0PTihx1nlBcowYWhDwjpoAXRv43+gDzyggGnn1XZhVw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.27.0", + "@babel/types": "^7.27.0", + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.25", + "jsesc": "^3.0.2" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-annotate-as-pure": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/helper-annotate-as-pure/-/helper-annotate-as-pure-7.25.9.tgz", + "integrity": "sha512-gv7320KBUFJz1RnylIg5WWYPRXKZ884AGkYpgpWW02TH66Dl+HaC1t1CKd0z3R4b6hdYEcmrNZHUmfCP+1u3/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets": { + "version": "7.27.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/helper-compilation-targets/-/helper-compilation-targets-7.27.0.tgz", + "integrity": "sha512-LVk7fbXml0H2xH34dFzKQ7TDZ2G4/rVTOrq9V+icbbadjbVxxeFeDsNHv2SrZeWoA+6ZiTyWYWtScEIW07EAcA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/compat-data": "^7.26.8", + "@babel/helper-validator-option": "^7.25.9", + "browserslist": "^4.24.0", + "lru-cache": "^5.1.1", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-compilation-targets/node_modules/lru-cache": { + "version": "5.1.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/lru-cache/-/lru-cache-5.1.1.tgz", + "integrity": "sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^3.0.2" + } + }, + "node_modules/@babel/helper-compilation-targets/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-compilation-targets/node_modules/yallist": { + "version": "3.1.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/yallist/-/yallist-3.1.1.tgz", + "integrity": "sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==", + "dev": true, + "license": "ISC" + }, + "node_modules/@babel/helper-create-class-features-plugin": { + "version": "7.27.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.27.0.tgz", + "integrity": "sha512-vSGCvMecvFCd/BdpGlhpXYNhhC4ccxyvQWpbGL4CWbvfEoLFWUZuSuf7s9Aw70flgQF+6vptvgK2IfOnKlRmBg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.25.9", + "@babel/helper-member-expression-to-functions": "^7.25.9", + "@babel/helper-optimise-call-expression": "^7.25.9", + "@babel/helper-replace-supers": "^7.26.5", + "@babel/helper-skip-transparent-expression-wrappers": "^7.25.9", + "@babel/traverse": "^7.27.0", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-create-class-features-plugin/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-create-regexp-features-plugin": { + "version": "7.27.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.27.0.tgz", + "integrity": "sha512-fO8l08T76v48BhpNRW/nQ0MxfnSdoSKUJBMjubOAYffsVuGG5qOfMq7N6Es7UJvi7Y8goXXo07EfcHZXDPuELQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.25.9", + "regexpu-core": "^6.2.0", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-create-regexp-features-plugin/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/helper-define-polyfill-provider": { + "version": "0.6.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/helper-define-polyfill-provider/-/helper-define-polyfill-provider-0.6.4.tgz", + "integrity": "sha512-jljfR1rGnXXNWnmQg2K3+bvhkxB51Rl32QRaOTuwwjviGrHzIbSc8+x9CpraDtbT7mfyjXObULP4w/adunNwAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-compilation-targets": "^7.22.6", + "@babel/helper-plugin-utils": "^7.22.5", + "debug": "^4.1.1", + "lodash.debounce": "^4.0.8", + "resolve": "^1.14.2" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/@babel/helper-member-expression-to-functions": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/helper-member-expression-to-functions/-/helper-member-expression-to-functions-7.25.9.tgz", + "integrity": "sha512-wbfdZ9w5vk0C0oyHqAJbc62+vet5prjj01jjJ8sKn3j9h3MQQlflEdXYvuqRWjHnM12coDEqiC1IRCi0U/EKwQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.25.9", + "@babel/types": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-imports": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/helper-module-imports/-/helper-module-imports-7.25.9.tgz", + "integrity": "sha512-tnUA4RsrmflIM6W6RFTLFSXITtl0wKjgpnLgXyowocVPrbYrLUXSBXDgTs8BlbmIzIdlBySRQjINYs2BAkiLtw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.25.9", + "@babel/types": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-module-transforms": { + "version": "7.26.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/helper-module-transforms/-/helper-module-transforms-7.26.0.tgz", + "integrity": "sha512-xO+xu6B5K2czEnQye6BHA7DolFFmS3LB7stHZFaOLb1pAwO1HWLS8fXA+eh0A2yIvltPVmx3eNNDBJA2SLHXFw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-module-imports": "^7.25.9", + "@babel/helper-validator-identifier": "^7.25.9", + "@babel/traverse": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-optimise-call-expression": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/helper-optimise-call-expression/-/helper-optimise-call-expression-7.25.9.tgz", + "integrity": "sha512-FIpuNaz5ow8VyrYcnXQTDRGvV6tTjkNtCK/RYNDXGSLlUD6cBuQTSw43CShGxjvfBTfcUA/r6UhUCbtYqkhcuQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-plugin-utils": { + "version": "7.26.5", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/helper-plugin-utils/-/helper-plugin-utils-7.26.5.tgz", + "integrity": "sha512-RS+jZcRdZdRFzMyr+wcsaqOmld1/EqTghfaBGQQd/WnRdzdlvSZ//kF7U8VQTxf1ynZ4cjUcYgjVGx13ewNPMg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-remap-async-to-generator": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/helper-remap-async-to-generator/-/helper-remap-async-to-generator-7.25.9.tgz", + "integrity": "sha512-IZtukuUeBbhgOcaW2s06OXTzVNJR0ybm4W5xC1opWFFJMZbwRj5LCk+ByYH7WdZPZTt8KnFwA8pvjN2yqcPlgw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.25.9", + "@babel/helper-wrap-function": "^7.25.9", + "@babel/traverse": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-replace-supers": { + "version": "7.26.5", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/helper-replace-supers/-/helper-replace-supers-7.26.5.tgz", + "integrity": "sha512-bJ6iIVdYX1YooY2X7w1q6VITt+LnUILtNk7zT78ykuwStx8BauCzxvFqFaHjOpW1bVnSUM1PN1f0p5P21wHxvg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-member-expression-to-functions": "^7.25.9", + "@babel/helper-optimise-call-expression": "^7.25.9", + "@babel/traverse": "^7.26.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/helper-skip-transparent-expression-wrappers": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/helper-skip-transparent-expression-wrappers/-/helper-skip-transparent-expression-wrappers-7.25.9.tgz", + "integrity": "sha512-K4Du3BFa3gvyhzgPcntrkDgZzQaq6uozzcpGbOO1OEJaI+EJdqWIMTLgFgQf6lrfiDFo5FU+BxKepI9RmZqahA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/traverse": "^7.25.9", + "@babel/types": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-string-parser": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/helper-string-parser/-/helper-string-parser-7.25.9.tgz", + "integrity": "sha512-4A/SCr/2KLd5jrtOMFzaKjVtAei3+2r/NChoBNoZ3EyP/+GlhoaEGoWOZUmFmoITP7zOJyHIMm+DYRd8o3PvHA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-identifier": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/helper-validator-identifier/-/helper-validator-identifier-7.25.9.tgz", + "integrity": "sha512-Ed61U6XJc3CVRfkERJWDz4dJwKe7iLmmJsbOGu9wSloNSFttHV0I8g6UAgb7qnK5ly5bGLPd4oXZlxCdANBOWQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-validator-option": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/helper-validator-option/-/helper-validator-option-7.25.9.tgz", + "integrity": "sha512-e/zv1co8pp55dNdEcCynfj9X7nyUKUXoUEwfXqaZt0omVOmDe9oOTdKStH4GmAw6zxMFs50ZayuMfHDKlO7Tfw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helper-wrap-function": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/helper-wrap-function/-/helper-wrap-function-7.25.9.tgz", + "integrity": "sha512-ETzz9UTjQSTmw39GboatdymDq4XIQbR8ySgVrylRhPOFpsd+JrKHIuF0de7GCWmem+T4uC5z7EZguod7Wj4A4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/template": "^7.25.9", + "@babel/traverse": "^7.25.9", + "@babel/types": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/helpers": { + "version": "7.27.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/helpers/-/helpers-7.27.0.tgz", + "integrity": "sha512-U5eyP/CTFPuNE3qk+WZMxFkp/4zUzdceQlfzf7DdGdhp+Fezd7HD+i8Y24ZuTMKX3wQBld449jijbGq6OdGNQg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/template": "^7.27.0", + "@babel/types": "^7.27.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/parser": { + "version": "7.27.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/parser/-/parser-7.27.0.tgz", + "integrity": "sha512-iaepho73/2Pz7w2eMS0Q5f83+0RKI7i4xmiYeBmDzfRVbQtTOG7Ts0S4HzJVsTMGI9keU8rNfuZr8DKfSt7Yyg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/types": "^7.27.0" + }, + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-firefox-class-in-computed-class-key": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-bugfix-firefox-class-in-computed-class-key/-/plugin-bugfix-firefox-class-in-computed-class-key-7.25.9.tgz", + "integrity": "sha512-ZkRyVkThtxQ/J6nv3JFYv1RYY+JT5BvU0y3k5bWrmuG4woXypRa4PXmm9RhOwodRkYFWqC0C0cqcJ4OqR7kW+g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.25.9", + "@babel/traverse": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-safari-class-field-initializer-scope": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-bugfix-safari-class-field-initializer-scope/-/plugin-bugfix-safari-class-field-initializer-scope-7.25.9.tgz", + "integrity": "sha512-MrGRLZxLD/Zjj0gdU15dfs+HH/OXvnw/U4jJD8vpcP2CJQapPEv1IWwjc/qMg7ItBlPwSv1hRBbb7LeuANdcnw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression/-/plugin-bugfix-safari-id-destructuring-collision-in-function-expression-7.25.9.tgz", + "integrity": "sha512-2qUwwfAFpJLZqxd02YW9btUCZHl+RFvdDkNfZwaIJrvB8Tesjsk8pEQkTvGwZXLqXUx/2oyY3ySRhm6HOXuCug==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining/-/plugin-bugfix-v8-spread-parameters-in-optional-chaining-7.25.9.tgz", + "integrity": "sha512-6xWgLZTJXwilVjlnV7ospI3xi+sl8lN8rXXbBD6vYn3UYDlGsag8wrZkKcSI8G6KgqKP7vNFaDgeDnfAABq61g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.25.9", + "@babel/helper-skip-transparent-expression-wrappers": "^7.25.9", + "@babel/plugin-transform-optional-chaining": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.13.0" + } + }, + "node_modules/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly/-/plugin-bugfix-v8-static-class-fields-redefine-readonly-7.25.9.tgz", + "integrity": "sha512-aLnMXYPnzwwqhYSCyXfKkIkYgJ8zv9RK+roo9DkTXz38ynIhd9XCbN08s3MGvqL2MYGVUGdRQLL/JqBIeJhJBg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.25.9", + "@babel/traverse": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-proposal-private-property-in-object": { + "version": "7.21.0-placeholder-for-preset-env.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-proposal-private-property-in-object/-/plugin-proposal-private-property-in-object-7.21.0-placeholder-for-preset-env.2.tgz", + "integrity": "sha512-SOSkfJDddaM7mak6cPEpswyTRnuRltl429hMraQEglW+OkovnCzsiszTmsrlY//qLFjCpQDFRvjdm2wA5pPm9w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-assertions": { + "version": "7.26.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-syntax-import-assertions/-/plugin-syntax-import-assertions-7.26.0.tgz", + "integrity": "sha512-QCWT5Hh830hK5EQa7XzuqIkQU9tT/whqbDz7kuaZMHFl1inRRg7JnuAEOQ0Ur0QUl0NufCk1msK2BeY79Aj/eg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-import-attributes": { + "version": "7.26.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-syntax-import-attributes/-/plugin-syntax-import-attributes-7.26.0.tgz", + "integrity": "sha512-e2dttdsJ1ZTpi3B9UYGLw41hifAubg19AtCu/2I/F1QNVclOBr1dYpTdmdyZ84Xiz43BS/tCUkMAZNLv12Pi+A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-syntax-unicode-sets-regex": { + "version": "7.18.6", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-syntax-unicode-sets-regex/-/plugin-syntax-unicode-sets-regex-7.18.6.tgz", + "integrity": "sha512-727YkEAPwSIQTv5im8QHz3upqp92JTWhidIC81Tdx4VJYIte/VndKf1qKrfnnhPLiPghStWfvC/iFaMCQu7Nqg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.18.6", + "@babel/helper-plugin-utils": "^7.18.6" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-transform-arrow-functions": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.25.9.tgz", + "integrity": "sha512-6jmooXYIwn9ca5/RylZADJ+EnSxVUS5sjeJ9UPk6RWRzXCmOJCy6dqItPJFpw2cuCangPK4OYr5uhGKcmrm5Qg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-async-generator-functions": { + "version": "7.26.8", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-async-generator-functions/-/plugin-transform-async-generator-functions-7.26.8.tgz", + "integrity": "sha512-He9Ej2X7tNf2zdKMAGOsmg2MrFc+hfoAhd3po4cWfo/NWjzEAKa0oQruj1ROVUdl0e6fb6/kE/G3SSxE0lRJOg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.26.5", + "@babel/helper-remap-async-to-generator": "^7.25.9", + "@babel/traverse": "^7.26.8" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-async-to-generator": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-async-to-generator/-/plugin-transform-async-to-generator-7.25.9.tgz", + "integrity": "sha512-NT7Ejn7Z/LjUH0Gv5KsBCxh7BH3fbLTV0ptHvpeMvrt3cPThHfJfst9Wrb7S8EvJ7vRTFI7z+VAvFVEQn/m5zQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-module-imports": "^7.25.9", + "@babel/helper-plugin-utils": "^7.25.9", + "@babel/helper-remap-async-to-generator": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-block-scoped-functions": { + "version": "7.26.5", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-block-scoped-functions/-/plugin-transform-block-scoped-functions-7.26.5.tgz", + "integrity": "sha512-chuTSY+hq09+/f5lMj8ZSYgCFpppV2CbYrhNFJ1BFoXpiWPnnAb7R0MqrafCpN8E1+YRrtM1MXZHJdIx8B6rMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.26.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-block-scoping": { + "version": "7.27.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-block-scoping/-/plugin-transform-block-scoping-7.27.0.tgz", + "integrity": "sha512-u1jGphZ8uDI2Pj/HJj6YQ6XQLZCNjOlprjxB5SVz6rq2T6SwAR+CdrWK0CP7F+9rDVMXdB0+r6Am5G5aobOjAQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.26.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-class-properties": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-class-properties/-/plugin-transform-class-properties-7.25.9.tgz", + "integrity": "sha512-bbMAII8GRSkcd0h0b4X+36GksxuheLFjP65ul9w6C3KgAamI3JqErNgSrosX6ZPj+Mpim5VvEbawXxJCyEUV3Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.25.9", + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-class-static-block": { + "version": "7.26.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-class-static-block/-/plugin-transform-class-static-block-7.26.0.tgz", + "integrity": "sha512-6J2APTs7BDDm+UMqP1useWqhcRAXo0WIoVj26N7kPFB6S73Lgvyka4KTZYIxtgYXiN5HTyRObA72N2iu628iTQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.25.9", + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.12.0" + } + }, + "node_modules/@babel/plugin-transform-classes": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-classes/-/plugin-transform-classes-7.25.9.tgz", + "integrity": "sha512-mD8APIXmseE7oZvZgGABDyM34GUmK45Um2TXiBUt7PnuAxrgoSVf123qUzPxEr/+/BHrRn5NMZCdE2m/1F8DGg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.25.9", + "@babel/helper-compilation-targets": "^7.25.9", + "@babel/helper-plugin-utils": "^7.25.9", + "@babel/helper-replace-supers": "^7.25.9", + "@babel/traverse": "^7.25.9", + "globals": "^11.1.0" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-computed-properties": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-computed-properties/-/plugin-transform-computed-properties-7.25.9.tgz", + "integrity": "sha512-HnBegGqXZR12xbcTHlJ9HGxw1OniltT26J5YpfruGqtUHlz/xKf/G2ak9e+t0rVqrjXa9WOhvYPz1ERfMj23AA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.25.9", + "@babel/template": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-destructuring": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.25.9.tgz", + "integrity": "sha512-WkCGb/3ZxXepmMiX101nnGiU+1CAdut8oHyEOHxkKuS1qKpU2SMXE2uSvfz8PBuLd49V6LEsbtyPhWC7fnkgvQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-dotall-regex": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-dotall-regex/-/plugin-transform-dotall-regex-7.25.9.tgz", + "integrity": "sha512-t7ZQ7g5trIgSRYhI9pIJtRl64KHotutUJsh4Eze5l7olJv+mRSg4/MmbZ0tv1eeqRbdvo/+trvJD/Oc5DmW2cA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.25.9", + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-duplicate-keys": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-duplicate-keys/-/plugin-transform-duplicate-keys-7.25.9.tgz", + "integrity": "sha512-LZxhJ6dvBb/f3x8xwWIuyiAHy56nrRG3PeYTpBkkzkYRRQ6tJLu68lEF5VIqMUZiAV7a8+Tb78nEoMCMcqjXBw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-duplicate-named-capturing-groups-regex": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-duplicate-named-capturing-groups-regex/-/plugin-transform-duplicate-named-capturing-groups-regex-7.25.9.tgz", + "integrity": "sha512-0UfuJS0EsXbRvKnwcLjFtJy/Sxc5J5jhLHnFhy7u4zih97Hz6tJkLU+O+FMMrNZrosUPxDi6sYxJ/EA8jDiAog==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.25.9", + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-transform-dynamic-import": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-dynamic-import/-/plugin-transform-dynamic-import-7.25.9.tgz", + "integrity": "sha512-GCggjexbmSLaFhqsojeugBpeaRIgWNTcgKVq/0qIteFEqY2A+b9QidYadrWlnbWQUrW5fn+mCvf3tr7OeBFTyg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-exponentiation-operator": { + "version": "7.26.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-exponentiation-operator/-/plugin-transform-exponentiation-operator-7.26.3.tgz", + "integrity": "sha512-7CAHcQ58z2chuXPWblnn1K6rLDnDWieghSOEmqQsrBenH0P9InCUtOJYD89pvngljmZlJcz3fcmgYsXFNGa1ZQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-export-namespace-from": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-export-namespace-from/-/plugin-transform-export-namespace-from-7.25.9.tgz", + "integrity": "sha512-2NsEz+CxzJIVOPx2o9UsW1rXLqtChtLoVnwYHHiB04wS5sgn7mrV45fWMBX0Kk+ub9uXytVYfNP2HjbVbCB3Ww==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-for-of": { + "version": "7.26.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.26.9.tgz", + "integrity": "sha512-Hry8AusVm8LW5BVFgiyUReuoGzPUpdHQQqJY5bZnbbf+ngOHWuCuYFKw/BqaaWlvEUrF91HMhDtEaI1hZzNbLg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.26.5", + "@babel/helper-skip-transparent-expression-wrappers": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-function-name": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-function-name/-/plugin-transform-function-name-7.25.9.tgz", + "integrity": "sha512-8lP+Yxjv14Vc5MuWBpJsoUCd3hD6V9DgBon2FVYL4jJgbnVQ9fTgYmonchzZJOVNgzEgbxp4OwAf6xz6M/14XA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-compilation-targets": "^7.25.9", + "@babel/helper-plugin-utils": "^7.25.9", + "@babel/traverse": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-json-strings": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-json-strings/-/plugin-transform-json-strings-7.25.9.tgz", + "integrity": "sha512-xoTMk0WXceiiIvsaquQQUaLLXSW1KJ159KP87VilruQm0LNNGxWzahxSS6T6i4Zg3ezp4vA4zuwiNUR53qmQAw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-literals": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-literals/-/plugin-transform-literals-7.25.9.tgz", + "integrity": "sha512-9N7+2lFziW8W9pBl2TzaNht3+pgMIRP74zizeCSrtnSKVdUl8mAjjOP2OOVQAfZ881P2cNjDj1uAMEdeD50nuQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-logical-assignment-operators": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-logical-assignment-operators/-/plugin-transform-logical-assignment-operators-7.25.9.tgz", + "integrity": "sha512-wI4wRAzGko551Y8eVf6iOY9EouIDTtPb0ByZx+ktDGHwv6bHFimrgJM/2T021txPZ2s4c7bqvHbd+vXG6K948Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-member-expression-literals": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.25.9.tgz", + "integrity": "sha512-PYazBVfofCQkkMzh2P6IdIUaCEWni3iYEerAsRWuVd8+jlM1S9S9cz1dF9hIzyoZ8IA3+OwVYIp9v9e+GbgZhA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-amd": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.25.9.tgz", + "integrity": "sha512-g5T11tnI36jVClQlMlt4qKDLlWnG5pP9CSM4GhdRciTNMRgkfpo5cR6b4rGIOYPgRRuFAvwjPQ/Yk+ql4dyhbw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-module-transforms": "^7.25.9", + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-commonjs": { + "version": "7.26.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.26.3.tgz", + "integrity": "sha512-MgR55l4q9KddUDITEzEFYn5ZsGDXMSsU9E+kh7fjRXTIC3RHqfCo8RPRbyReYJh44HQ/yomFkqbOFohXvDCiIQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-module-transforms": "^7.26.0", + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-systemjs": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.25.9.tgz", + "integrity": "sha512-hyss7iIlH/zLHaehT+xwiymtPOpsiwIIRlCAOwBB04ta5Tt+lNItADdlXw3jAWZ96VJ2jlhl/c+PNIQPKNfvcA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-module-transforms": "^7.25.9", + "@babel/helper-plugin-utils": "^7.25.9", + "@babel/helper-validator-identifier": "^7.25.9", + "@babel/traverse": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-modules-umd": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.25.9.tgz", + "integrity": "sha512-bS9MVObUgE7ww36HEfwe6g9WakQ0KF07mQF74uuXdkoziUPfKyu/nIm663kz//e5O1nPInPFx36z7WJmJ4yNEw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-module-transforms": "^7.25.9", + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-named-capturing-groups-regex": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-named-capturing-groups-regex/-/plugin-transform-named-capturing-groups-regex-7.25.9.tgz", + "integrity": "sha512-oqB6WHdKTGl3q/ItQhpLSnWWOpjUJLsOCLVyeFgeTktkBSCiurvPOsyt93gibI9CmuKvTUEtWmG5VhZD+5T/KA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.25.9", + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-transform-new-target": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-new-target/-/plugin-transform-new-target-7.25.9.tgz", + "integrity": "sha512-U/3p8X1yCSoKyUj2eOBIx3FOn6pElFOKvAAGf8HTtItuPyB+ZeOqfn+mvTtg9ZlOAjsPdK3ayQEjqHjU/yLeVQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-nullish-coalescing-operator": { + "version": "7.26.6", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-nullish-coalescing-operator/-/plugin-transform-nullish-coalescing-operator-7.26.6.tgz", + "integrity": "sha512-CKW8Vu+uUZneQCPtXmSBUC6NCAUdya26hWCElAWh5mVSlSRsmiCPUUDKb3Z0szng1hiAJa098Hkhg9o4SE35Qw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.26.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-numeric-separator": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-numeric-separator/-/plugin-transform-numeric-separator-7.25.9.tgz", + "integrity": "sha512-TlprrJ1GBZ3r6s96Yq8gEQv82s8/5HnCVHtEJScUj90thHQbwe+E5MLhi2bbNHBEJuzrvltXSru+BUxHDoog7Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-object-rest-spread": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-object-rest-spread/-/plugin-transform-object-rest-spread-7.25.9.tgz", + "integrity": "sha512-fSaXafEE9CVHPweLYw4J0emp1t8zYTXyzN3UuG+lylqkvYd7RMrsOQ8TYx5RF231be0vqtFC6jnx3UmpJmKBYg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-compilation-targets": "^7.25.9", + "@babel/helper-plugin-utils": "^7.25.9", + "@babel/plugin-transform-parameters": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-object-super": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-object-super/-/plugin-transform-object-super-7.25.9.tgz", + "integrity": "sha512-Kj/Gh+Rw2RNLbCK1VAWj2U48yxxqL2x0k10nPtSdRa0O2xnHXalD0s+o1A6a0W43gJ00ANo38jxkQreckOzv5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.25.9", + "@babel/helper-replace-supers": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-optional-catch-binding": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-optional-catch-binding/-/plugin-transform-optional-catch-binding-7.25.9.tgz", + "integrity": "sha512-qM/6m6hQZzDcZF3onzIhZeDHDO43bkNNlOX0i8n3lR6zLbu0GN2d8qfM/IERJZYauhAHSLHy39NF0Ctdvcid7g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-optional-chaining": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-optional-chaining/-/plugin-transform-optional-chaining-7.25.9.tgz", + "integrity": "sha512-6AvV0FsLULbpnXeBjrY4dmWF8F7gf8QnvTEoO/wX/5xm/xE1Xo8oPuD3MPS+KS9f9XBEAWN7X1aWr4z9HdOr7A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.25.9", + "@babel/helper-skip-transparent-expression-wrappers": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-parameters": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.25.9.tgz", + "integrity": "sha512-wzz6MKwpnshBAiRmn4jR8LYz/g8Ksg0o80XmwZDlordjwEk9SxBzTWC7F5ef1jhbrbOW2DJ5J6ayRukrJmnr0g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-private-methods": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-private-methods/-/plugin-transform-private-methods-7.25.9.tgz", + "integrity": "sha512-D/JUozNpQLAPUVusvqMxyvjzllRaF8/nSrP1s2YGQT/W4LHK4xxsMcHjhOGTS01mp9Hda8nswb+FblLdJornQw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-create-class-features-plugin": "^7.25.9", + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-private-property-in-object": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-private-property-in-object/-/plugin-transform-private-property-in-object-7.25.9.tgz", + "integrity": "sha512-Evf3kcMqzXA3xfYJmZ9Pg1OvKdtqsDMSWBDzZOPLvHiTt36E75jLDQo5w1gtRU95Q4E5PDttrTf25Fw8d/uWLw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-annotate-as-pure": "^7.25.9", + "@babel/helper-create-class-features-plugin": "^7.25.9", + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-property-literals": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.25.9.tgz", + "integrity": "sha512-IvIUeV5KrS/VPavfSM/Iu+RE6llrHrYIKY1yfCzyO/lMXHQ+p7uGhonmGVisv6tSBSVgWzMBohTcvkC9vQcQFA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-regenerator": { + "version": "7.27.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.27.0.tgz", + "integrity": "sha512-LX/vCajUJQDqE7Aum/ELUMZAY19+cDpghxrnyt5I1tV6X5PyC86AOoWXWFYFeIvauyeSA6/ktn4tQVn/3ZifsA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.26.5", + "regenerator-transform": "^0.15.2" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-regexp-modifiers": { + "version": "7.26.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-regexp-modifiers/-/plugin-transform-regexp-modifiers-7.26.0.tgz", + "integrity": "sha512-vN6saax7lrA2yA/Pak3sCxuD6F5InBjn9IcrIKQPjpsLvuHYLVroTxjdlVRHjjBWxKOqIwpTXDkOssYT4BFdRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.25.9", + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/plugin-transform-reserved-words": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.25.9.tgz", + "integrity": "sha512-7DL7DKYjn5Su++4RXu8puKZm2XBPHyjWLUidaPEkCUBbE7IPcsrkRHggAOOKydH1dASWdcUBxrkOGNxUv5P3Jg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-shorthand-properties": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-shorthand-properties/-/plugin-transform-shorthand-properties-7.25.9.tgz", + "integrity": "sha512-MUv6t0FhO5qHnS/W8XCbHmiRWOphNufpE1IVxhK5kuN3Td9FT1x4rx4K42s3RYdMXCXpfWkGSbCSd0Z64xA7Ng==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-spread": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-spread/-/plugin-transform-spread-7.25.9.tgz", + "integrity": "sha512-oNknIB0TbURU5pqJFVbOOFspVlrpVwo2H1+HUIsVDvp5VauGGDP1ZEvO8Nn5xyMEs3dakajOxlmkNW7kNgSm6A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.25.9", + "@babel/helper-skip-transparent-expression-wrappers": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-sticky-regex": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-sticky-regex/-/plugin-transform-sticky-regex-7.25.9.tgz", + "integrity": "sha512-WqBUSgeVwucYDP9U/xNRQam7xV8W5Zf+6Eo7T2SRVUFlhRiMNFdFz58u0KZmCVVqs2i7SHgpRnAhzRNmKfi2uA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-template-literals": { + "version": "7.26.8", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-template-literals/-/plugin-transform-template-literals-7.26.8.tgz", + "integrity": "sha512-OmGDL5/J0CJPJZTHZbi2XpO0tyT2Ia7fzpW5GURwdtp2X3fMmN8au/ej6peC/T33/+CRiIpA8Krse8hFGVmT5Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.26.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-typeof-symbol": { + "version": "7.27.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-typeof-symbol/-/plugin-transform-typeof-symbol-7.27.0.tgz", + "integrity": "sha512-+LLkxA9rKJpNoGsbLnAgOCdESl73vwYn+V6b+5wHbrE7OGKVDPHIQvbFSzqE6rwqaCw2RE+zdJrlLkcf8YOA0w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.26.5" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-escapes": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-unicode-escapes/-/plugin-transform-unicode-escapes-7.25.9.tgz", + "integrity": "sha512-s5EDrE6bW97LtxOcGj1Khcx5AaXwiMmi4toFWRDP9/y0Woo6pXC+iyPu/KuhKtfSrNFd7jJB+/fkOtZy6aIC6Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-property-regex": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-unicode-property-regex/-/plugin-transform-unicode-property-regex-7.25.9.tgz", + "integrity": "sha512-Jt2d8Ga+QwRluxRQ307Vlxa6dMrYEMZCgGxoPR8V52rxPyldHu3hdlHspxaqYmE7oID5+kB+UKUB/eWS+DkkWg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.25.9", + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-regex": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-unicode-regex/-/plugin-transform-unicode-regex-7.25.9.tgz", + "integrity": "sha512-yoxstj7Rg9dlNn9UQxzk4fcNivwv4nUYz7fYXBaKxvw/lnmPuOm/ikoELygbYq68Bls3D/D+NBPHiLwZdZZ4HA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.25.9", + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/plugin-transform-unicode-sets-regex": { + "version": "7.25.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/plugin-transform-unicode-sets-regex/-/plugin-transform-unicode-sets-regex-7.25.9.tgz", + "integrity": "sha512-8BYqO3GeVNHtx69fdPshN3fnzUNLrWdHhk/icSwigksJGczKSizZ+Z6SBCxTs723Fr5VSNorTIK7a+R2tISvwQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-create-regexp-features-plugin": "^7.25.9", + "@babel/helper-plugin-utils": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0" + } + }, + "node_modules/@babel/preset-env": { + "version": "7.26.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/preset-env/-/preset-env-7.26.9.tgz", + "integrity": "sha512-vX3qPGE8sEKEAZCWk05k3cpTAE3/nOYca++JA+Rd0z2NCNzabmYvEiSShKzm10zdquOIAVXsy2Ei/DTW34KlKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/compat-data": "^7.26.8", + "@babel/helper-compilation-targets": "^7.26.5", + "@babel/helper-plugin-utils": "^7.26.5", + "@babel/helper-validator-option": "^7.25.9", + "@babel/plugin-bugfix-firefox-class-in-computed-class-key": "^7.25.9", + "@babel/plugin-bugfix-safari-class-field-initializer-scope": "^7.25.9", + "@babel/plugin-bugfix-safari-id-destructuring-collision-in-function-expression": "^7.25.9", + "@babel/plugin-bugfix-v8-spread-parameters-in-optional-chaining": "^7.25.9", + "@babel/plugin-bugfix-v8-static-class-fields-redefine-readonly": "^7.25.9", + "@babel/plugin-proposal-private-property-in-object": "7.21.0-placeholder-for-preset-env.2", + "@babel/plugin-syntax-import-assertions": "^7.26.0", + "@babel/plugin-syntax-import-attributes": "^7.26.0", + "@babel/plugin-syntax-unicode-sets-regex": "^7.18.6", + "@babel/plugin-transform-arrow-functions": "^7.25.9", + "@babel/plugin-transform-async-generator-functions": "^7.26.8", + "@babel/plugin-transform-async-to-generator": "^7.25.9", + "@babel/plugin-transform-block-scoped-functions": "^7.26.5", + "@babel/plugin-transform-block-scoping": "^7.25.9", + "@babel/plugin-transform-class-properties": "^7.25.9", + "@babel/plugin-transform-class-static-block": "^7.26.0", + "@babel/plugin-transform-classes": "^7.25.9", + "@babel/plugin-transform-computed-properties": "^7.25.9", + "@babel/plugin-transform-destructuring": "^7.25.9", + "@babel/plugin-transform-dotall-regex": "^7.25.9", + "@babel/plugin-transform-duplicate-keys": "^7.25.9", + "@babel/plugin-transform-duplicate-named-capturing-groups-regex": "^7.25.9", + "@babel/plugin-transform-dynamic-import": "^7.25.9", + "@babel/plugin-transform-exponentiation-operator": "^7.26.3", + "@babel/plugin-transform-export-namespace-from": "^7.25.9", + "@babel/plugin-transform-for-of": "^7.26.9", + "@babel/plugin-transform-function-name": "^7.25.9", + "@babel/plugin-transform-json-strings": "^7.25.9", + "@babel/plugin-transform-literals": "^7.25.9", + "@babel/plugin-transform-logical-assignment-operators": "^7.25.9", + "@babel/plugin-transform-member-expression-literals": "^7.25.9", + "@babel/plugin-transform-modules-amd": "^7.25.9", + "@babel/plugin-transform-modules-commonjs": "^7.26.3", + "@babel/plugin-transform-modules-systemjs": "^7.25.9", + "@babel/plugin-transform-modules-umd": "^7.25.9", + "@babel/plugin-transform-named-capturing-groups-regex": "^7.25.9", + "@babel/plugin-transform-new-target": "^7.25.9", + "@babel/plugin-transform-nullish-coalescing-operator": "^7.26.6", + "@babel/plugin-transform-numeric-separator": "^7.25.9", + "@babel/plugin-transform-object-rest-spread": "^7.25.9", + "@babel/plugin-transform-object-super": "^7.25.9", + "@babel/plugin-transform-optional-catch-binding": "^7.25.9", + "@babel/plugin-transform-optional-chaining": "^7.25.9", + "@babel/plugin-transform-parameters": "^7.25.9", + "@babel/plugin-transform-private-methods": "^7.25.9", + "@babel/plugin-transform-private-property-in-object": "^7.25.9", + "@babel/plugin-transform-property-literals": "^7.25.9", + "@babel/plugin-transform-regenerator": "^7.25.9", + "@babel/plugin-transform-regexp-modifiers": "^7.26.0", + "@babel/plugin-transform-reserved-words": "^7.25.9", + "@babel/plugin-transform-shorthand-properties": "^7.25.9", + "@babel/plugin-transform-spread": "^7.25.9", + "@babel/plugin-transform-sticky-regex": "^7.25.9", + "@babel/plugin-transform-template-literals": "^7.26.8", + "@babel/plugin-transform-typeof-symbol": "^7.26.7", + "@babel/plugin-transform-unicode-escapes": "^7.25.9", + "@babel/plugin-transform-unicode-property-regex": "^7.25.9", + "@babel/plugin-transform-unicode-regex": "^7.25.9", + "@babel/plugin-transform-unicode-sets-regex": "^7.25.9", + "@babel/preset-modules": "0.1.6-no-external-plugins", + "babel-plugin-polyfill-corejs2": "^0.4.10", + "babel-plugin-polyfill-corejs3": "^0.11.0", + "babel-plugin-polyfill-regenerator": "^0.6.1", + "core-js-compat": "^3.40.0", + "semver": "^6.3.1" + }, + "engines": { + "node": ">=6.9.0" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0" + } + }, + "node_modules/@babel/preset-env/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/@babel/preset-modules": { + "version": "0.1.6-no-external-plugins", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/preset-modules/-/preset-modules-0.1.6-no-external-plugins.tgz", + "integrity": "sha512-HrcgcIESLm9aIR842yhJ5RWan/gebQUJ6E/E5+rf0y9o6oj7w0Br+sWuL6kEQ/o/AdfvR1Je9jG18/gnpwjEyA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-plugin-utils": "^7.0.0", + "@babel/types": "^7.4.4", + "esutils": "^2.0.2" + }, + "peerDependencies": { + "@babel/core": "^7.0.0-0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/@babel/runtime": { + "version": "7.27.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/runtime/-/runtime-7.27.0.tgz", + "integrity": "sha512-VtPOkrdPHZsKc/clNqyi9WUA8TINkZ4cGk63UUE3u4pmB2k+ZMQRDuIOagv8UVd6j7k0T3+RRIb7beKTebNbcw==", + "dev": true, + "license": "MIT", + "dependencies": { + "regenerator-runtime": "^0.14.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/runtime/node_modules/regenerator-runtime": { + "version": "0.14.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/regenerator-runtime/-/regenerator-runtime-0.14.1.tgz", + "integrity": "sha512-dYnhHh0nJoMfnkZs6GmmhFknAGRrLznOu5nc9ML+EJxGvrx6H7teuevqVqCuPcPK//3eDrrjQhehXVx9cnkGdw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@babel/template": { + "version": "7.27.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/template/-/template-7.27.0.tgz", + "integrity": "sha512-2ncevenBqXI6qRMukPlXwHKHchC7RyMuu4xv5JBXRfOGVcTy1mXCD12qrp7Jsoxll1EV3+9sE4GugBVRjT2jFA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.26.2", + "@babel/parser": "^7.27.0", + "@babel/types": "^7.27.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/traverse": { + "version": "7.27.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/traverse/-/traverse-7.27.0.tgz", + "integrity": "sha512-19lYZFzYVQkkHkl4Cy4WrAVcqBkgvV2YM2TU3xG6DIwO7O3ecbDPfW3yM3bjAGcqcQHi+CCtjMR3dIEHxsd6bA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.26.2", + "@babel/generator": "^7.27.0", + "@babel/parser": "^7.27.0", + "@babel/template": "^7.27.0", + "@babel/types": "^7.27.0", + "debug": "^4.3.1", + "globals": "^11.1.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@babel/types": { + "version": "7.27.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@babel/types/-/types-7.27.0.tgz", + "integrity": "sha512-H45s8fVLYjbhFH62dIJ3WtmJ6RSPt/3DRO0ZcT2SUiYiQyz3BLVb9ADEnLl91m74aQPS3AzzeajZHYOalWe3bg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-string-parser": "^7.25.9", + "@babel/helper-validator-identifier": "^7.25.9" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@discoveryjs/json-ext": { + "version": "0.5.7", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@discoveryjs/json-ext/-/json-ext-0.5.7.tgz", + "integrity": "sha512-dBVuXR082gk3jsFp7Rd/JI4kytwGHecnCoTtXFb7DB6CNHp4rg5k1bhg0nWdLGLnOV71lmDzGQaLMy8iPLY0pw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/@gar/promisify": { + "version": "1.1.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@gar/promisify/-/promisify-1.1.3.tgz", + "integrity": "sha512-k2Ty1JcVojjJFwrg/ThKi2ujJ7XNLYaFGNB/bWT9wGR+oSMJHMa5w+CUq6p/pVrKeNNgA7pCqEcjSnHVoqJQFw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@isaacs/cliui": { + "version": "8.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@isaacs/cliui/-/cliui-8.0.2.tgz", + "integrity": "sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^5.1.2", + "string-width-cjs": "npm:string-width@^4.2.0", + "strip-ansi": "^7.0.1", + "strip-ansi-cjs": "npm:strip-ansi@^6.0.1", + "wrap-ansi": "^8.1.0", + "wrap-ansi-cjs": "npm:wrap-ansi@^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/@isaacs/cliui/node_modules/ansi-regex": { + "version": "6.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/ansi-regex/-/ansi-regex-6.1.0.tgz", + "integrity": "sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-regex?sponsor=1" + } + }, + "node_modules/@isaacs/cliui/node_modules/ansi-styles": { + "version": "6.2.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/ansi-styles/-/ansi-styles-6.2.1.tgz", + "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/@isaacs/cliui/node_modules/emoji-regex": { + "version": "9.2.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/emoji-regex/-/emoji-regex-9.2.2.tgz", + "integrity": "sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==", + "dev": true, + "license": "MIT" + }, + "node_modules/@isaacs/cliui/node_modules/string-width": { + "version": "5.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/string-width/-/string-width-5.1.2.tgz", + "integrity": "sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==", + "dev": true, + "license": "MIT", + "dependencies": { + "eastasianwidth": "^0.2.0", + "emoji-regex": "^9.2.2", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/@isaacs/cliui/node_modules/strip-ansi": { + "version": "7.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/strip-ansi/-/strip-ansi-7.1.0.tgz", + "integrity": "sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^6.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/strip-ansi?sponsor=1" + } + }, + "node_modules/@isaacs/cliui/node_modules/wrap-ansi": { + "version": "8.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/wrap-ansi/-/wrap-ansi-8.1.0.tgz", + "integrity": "sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.1.0", + "string-width": "^5.0.1", + "strip-ansi": "^7.0.1" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/@isaacs/string-locale-compare": { + "version": "1.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@isaacs/string-locale-compare/-/string-locale-compare-1.1.0.tgz", + "integrity": "sha512-SQ7Kzhh9+D+ZW9MA0zkYv3VXhIDNx+LzM6EJ+/65I3QY+enU6Itte7E5XX7EWrqLW2FN4n06GWzBnPoC3th2aQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/@jridgewell/gen-mapping": { + "version": "0.3.8", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@jridgewell/gen-mapping/-/gen-mapping-0.3.8.tgz", + "integrity": "sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/set-array": "^1.2.1", + "@jridgewell/sourcemap-codec": "^1.4.10", + "@jridgewell/trace-mapping": "^0.3.24" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/resolve-uri": { + "version": "3.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz", + "integrity": "sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/set-array": { + "version": "1.2.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@jridgewell/set-array/-/set-array-1.2.1.tgz", + "integrity": "sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@jridgewell/source-map": { + "version": "0.3.6", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@jridgewell/source-map/-/source-map-0.3.6.tgz", + "integrity": "sha512-1ZJTZebgqllO79ue2bm3rIGud/bOe0pP5BjSRCRxxYkEZS8STV7zN84UBbiYu7jy+eCKSnVIUgoWWE/tt+shMQ==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@jridgewell/gen-mapping": "^0.3.5", + "@jridgewell/trace-mapping": "^0.3.25" + } + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz", + "integrity": "sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@jridgewell/trace-mapping": { + "version": "0.3.25", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@jridgewell/trace-mapping/-/trace-mapping-0.3.25.tgz", + "integrity": "sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@jridgewell/resolve-uri": "^3.1.0", + "@jridgewell/sourcemap-codec": "^1.4.14" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@npmcli/arborist": { + "version": "4.3.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@npmcli/arborist/-/arborist-4.3.1.tgz", + "integrity": "sha512-yMRgZVDpwWjplorzt9SFSaakWx6QIK248Nw4ZFgkrAy/GvJaFRaSZzE6nD7JBK5r8g/+PTxFq5Wj/sfciE7x+A==", + "dev": true, + "license": "ISC", + "dependencies": { + "@isaacs/string-locale-compare": "^1.1.0", + "@npmcli/installed-package-contents": "^1.0.7", + "@npmcli/map-workspaces": "^2.0.0", + "@npmcli/metavuln-calculator": "^2.0.0", + "@npmcli/move-file": "^1.1.0", + "@npmcli/name-from-folder": "^1.0.1", + "@npmcli/node-gyp": "^1.0.3", + "@npmcli/package-json": "^1.0.1", + "@npmcli/run-script": "^2.0.0", + "bin-links": "^3.0.0", + "cacache": "^15.0.3", + "common-ancestor-path": "^1.0.1", + "json-parse-even-better-errors": "^2.3.1", + "json-stringify-nice": "^1.1.4", + "mkdirp": "^1.0.4", + "mkdirp-infer-owner": "^2.0.0", + "npm-install-checks": "^4.0.0", + "npm-package-arg": "^8.1.5", + "npm-pick-manifest": "^6.1.0", + "npm-registry-fetch": "^12.0.1", + "pacote": "^12.0.2", + "parse-conflict-json": "^2.0.1", + "proc-log": "^1.0.0", + "promise-all-reject-late": "^1.0.0", + "promise-call-limit": "^1.0.1", + "read-package-json-fast": "^2.0.2", + "readdir-scoped-modules": "^1.1.0", + "rimraf": "^3.0.2", + "semver": "^7.3.5", + "ssri": "^8.0.1", + "treeverse": "^1.0.4", + "walk-up-path": "^1.0.0" + }, + "bin": { + "arborist": "bin/index.js" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16" + } + }, + "node_modules/@npmcli/fs": { + "version": "1.1.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@npmcli/fs/-/fs-1.1.1.tgz", + "integrity": "sha512-8KG5RD0GVP4ydEzRn/I4BNDuxDtqVbOdm8675T49OIG/NGhaK0pjPX7ZcDlvKYbA+ulvVK3ztfcF4uBdOxuJbQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "@gar/promisify": "^1.0.1", + "semver": "^7.3.5" + } + }, + "node_modules/@npmcli/git": { + "version": "2.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@npmcli/git/-/git-2.1.0.tgz", + "integrity": "sha512-/hBFX/QG1b+N7PZBFs0bi+evgRZcK9nWBxQKZkGoXUT5hJSwl5c4d7y8/hm+NQZRPhQ67RzFaj5UM9YeyKoryw==", + "dev": true, + "license": "ISC", + "dependencies": { + "@npmcli/promise-spawn": "^1.3.2", + "lru-cache": "^6.0.0", + "mkdirp": "^1.0.4", + "npm-pick-manifest": "^6.1.1", + "promise-inflight": "^1.0.1", + "promise-retry": "^2.0.1", + "semver": "^7.3.5", + "which": "^2.0.2" + } + }, + "node_modules/@npmcli/installed-package-contents": { + "version": "1.0.7", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@npmcli/installed-package-contents/-/installed-package-contents-1.0.7.tgz", + "integrity": "sha512-9rufe0wnJusCQoLpV9ZPKIVP55itrM5BxOXs10DmdbRfgWtHy1LDyskbwRnBghuB0PrF7pNPOqREVtpz4HqzKw==", + "dev": true, + "license": "ISC", + "dependencies": { + "npm-bundled": "^1.1.1", + "npm-normalize-package-bin": "^1.0.1" + }, + "bin": { + "installed-package-contents": "index.js" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/@npmcli/map-workspaces": { + "version": "2.0.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@npmcli/map-workspaces/-/map-workspaces-2.0.4.tgz", + "integrity": "sha512-bMo0aAfwhVwqoVM5UzX1DJnlvVvzDCHae821jv48L1EsrYwfOZChlqWYXEtto/+BkBXetPbEWgau++/brh4oVg==", + "dev": true, + "license": "ISC", + "dependencies": { + "@npmcli/name-from-folder": "^1.0.1", + "glob": "^8.0.1", + "minimatch": "^5.0.1", + "read-package-json-fast": "^2.0.3" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/@npmcli/map-workspaces/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/@npmcli/map-workspaces/node_modules/minimatch": { + "version": "5.1.6", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minimatch/-/minimatch-5.1.6.tgz", + "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@npmcli/metavuln-calculator": { + "version": "2.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@npmcli/metavuln-calculator/-/metavuln-calculator-2.0.0.tgz", + "integrity": "sha512-VVW+JhWCKRwCTE+0xvD6p3uV4WpqocNYYtzyvenqL/u1Q3Xx6fGTJ+6UoIoii07fbuEO9U3IIyuGY0CYHDv1sg==", + "dev": true, + "license": "ISC", + "dependencies": { + "cacache": "^15.0.5", + "json-parse-even-better-errors": "^2.3.1", + "pacote": "^12.0.0", + "semver": "^7.3.2" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16" + } + }, + "node_modules/@npmcli/move-file": { + "version": "1.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@npmcli/move-file/-/move-file-1.1.2.tgz", + "integrity": "sha512-1SUf/Cg2GzGDyaf15aR9St9TWlb+XvbZXWpDx8YKs7MLzMH/BCeopv+y9vzrzgkfykCGuWOlSu3mZhj2+FQcrg==", + "deprecated": "This functionality has been moved to @npmcli/fs", + "dev": true, + "license": "MIT", + "dependencies": { + "mkdirp": "^1.0.4", + "rimraf": "^3.0.2" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/@npmcli/name-from-folder": { + "version": "1.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@npmcli/name-from-folder/-/name-from-folder-1.0.1.tgz", + "integrity": "sha512-qq3oEfcLFwNfEYOQ8HLimRGKlD8WSeGEdtUa7hmzpR8Sa7haL1KVQrvgO6wqMjhWFFVjgtrh1gIxDz+P8sjUaA==", + "dev": true, + "license": "ISC" + }, + "node_modules/@npmcli/node-gyp": { + "version": "1.0.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@npmcli/node-gyp/-/node-gyp-1.0.3.tgz", + "integrity": "sha512-fnkhw+fmX65kiLqk6E3BFLXNC26rUhK90zVwe2yncPliVT/Qos3xjhTLE59Df8KnPlcwIERXKVlU1bXoUQ+liA==", + "dev": true, + "license": "ISC" + }, + "node_modules/@npmcli/package-json": { + "version": "1.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@npmcli/package-json/-/package-json-1.0.1.tgz", + "integrity": "sha512-y6jnu76E9C23osz8gEMBayZmaZ69vFOIk8vR1FJL/wbEJ54+9aVG9rLTjQKSXfgYZEr50nw1txBBFfBZZe+bYg==", + "dev": true, + "license": "ISC", + "dependencies": { + "json-parse-even-better-errors": "^2.3.1" + } + }, + "node_modules/@npmcli/promise-spawn": { + "version": "1.3.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@npmcli/promise-spawn/-/promise-spawn-1.3.2.tgz", + "integrity": "sha512-QyAGYo/Fbj4MXeGdJcFzZ+FkDkomfRBrPM+9QYJSg+PxgAUL+LU3FneQk37rKR2/zjqkCV1BLHccX98wRXG3Sg==", + "dev": true, + "license": "ISC", + "dependencies": { + "infer-owner": "^1.0.4" + } + }, + "node_modules/@npmcli/run-script": { + "version": "2.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@npmcli/run-script/-/run-script-2.0.0.tgz", + "integrity": "sha512-fSan/Pu11xS/TdaTpTB0MRn9guwGU8dye+x56mEVgBEd/QsybBbYcAL0phPXi8SGWFEChkQd6M9qL4y6VOpFig==", + "dev": true, + "license": "ISC", + "dependencies": { + "@npmcli/node-gyp": "^1.0.2", + "@npmcli/promise-spawn": "^1.3.2", + "node-gyp": "^8.2.0", + "read-package-json-fast": "^2.0.1" + } + }, + "node_modules/@octokit/auth-token": { + "version": "2.5.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@octokit/auth-token/-/auth-token-2.5.0.tgz", + "integrity": "sha512-r5FVUJCOLl19AxiuZD2VRZ/ORjp/4IN98Of6YJoJOkY75CIBuYfmiNHGrDwXr+aLGG55igl9QrxX3hbiXlLb+g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@octokit/types": "^6.0.3" + } + }, + "node_modules/@octokit/core": { + "version": "3.6.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@octokit/core/-/core-3.6.0.tgz", + "integrity": "sha512-7RKRKuA4xTjMhY+eG3jthb3hlZCsOwg3rztWh75Xc+ShDWOfDDATWbeZpAHBNRpm4Tv9WgBMOy1zEJYXG6NJ7Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@octokit/auth-token": "^2.4.4", + "@octokit/graphql": "^4.5.8", + "@octokit/request": "^5.6.3", + "@octokit/request-error": "^2.0.5", + "@octokit/types": "^6.0.3", + "before-after-hook": "^2.2.0", + "universal-user-agent": "^6.0.0" + } + }, + "node_modules/@octokit/endpoint": { + "version": "6.0.12", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@octokit/endpoint/-/endpoint-6.0.12.tgz", + "integrity": "sha512-lF3puPwkQWGfkMClXb4k/eUT/nZKQfxinRWJrdZaJO85Dqwo/G0yOC434Jr2ojwafWJMYqFGFa5ms4jJUgujdA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@octokit/types": "^6.0.3", + "is-plain-object": "^5.0.0", + "universal-user-agent": "^6.0.0" + } + }, + "node_modules/@octokit/endpoint/node_modules/is-plain-object": { + "version": "5.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/is-plain-object/-/is-plain-object-5.0.0.tgz", + "integrity": "sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/@octokit/graphql": { + "version": "4.8.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@octokit/graphql/-/graphql-4.8.0.tgz", + "integrity": "sha512-0gv+qLSBLKF0z8TKaSKTsS39scVKF9dbMxJpj3U0vC7wjNWFuIpL/z76Qe2fiuCbDRcJSavkXsVtMS6/dtQQsg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@octokit/request": "^5.6.0", + "@octokit/types": "^6.0.3", + "universal-user-agent": "^6.0.0" + } + }, + "node_modules/@octokit/openapi-types": { + "version": "12.11.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@octokit/openapi-types/-/openapi-types-12.11.0.tgz", + "integrity": "sha512-VsXyi8peyRq9PqIz/tpqiL2w3w80OgVMwBHltTml3LmVvXiphgeqmY9mvBw9Wu7e0QWk/fqD37ux8yP5uVekyQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@octokit/plugin-paginate-rest": { + "version": "2.21.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@octokit/plugin-paginate-rest/-/plugin-paginate-rest-2.21.3.tgz", + "integrity": "sha512-aCZTEf0y2h3OLbrgKkrfFdjRL6eSOo8komneVQJnYecAxIej7Bafor2xhuDJOIFau4pk0i/P28/XgtbyPF0ZHw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@octokit/types": "^6.40.0" + }, + "peerDependencies": { + "@octokit/core": ">=2" + } + }, + "node_modules/@octokit/plugin-request-log": { + "version": "1.0.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@octokit/plugin-request-log/-/plugin-request-log-1.0.4.tgz", + "integrity": "sha512-mLUsMkgP7K/cnFEw07kWqXGF5LKrOkD+lhCrKvPHXWDywAwuDUeDwWBpc69XK3pNX0uKiVt8g5z96PJ6z9xCFA==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "@octokit/core": ">=3" + } + }, + "node_modules/@octokit/plugin-rest-endpoint-methods": { + "version": "5.16.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@octokit/plugin-rest-endpoint-methods/-/plugin-rest-endpoint-methods-5.16.2.tgz", + "integrity": "sha512-8QFz29Fg5jDuTPXVtey05BLm7OB+M8fnvE64RNegzX7U+5NUXcOcnpTIK0YfSHBg8gYd0oxIq3IZTe9SfPZiRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@octokit/types": "^6.39.0", + "deprecation": "^2.3.1" + }, + "peerDependencies": { + "@octokit/core": ">=3" + } + }, + "node_modules/@octokit/request": { + "version": "5.6.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@octokit/request/-/request-5.6.3.tgz", + "integrity": "sha512-bFJl0I1KVc9jYTe9tdGGpAMPy32dLBXXo1dS/YwSCTL/2nd9XeHsY616RE3HPXDVk+a+dBuzyz5YdlXwcDTr2A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@octokit/endpoint": "^6.0.1", + "@octokit/request-error": "^2.1.0", + "@octokit/types": "^6.16.1", + "is-plain-object": "^5.0.0", + "node-fetch": "^2.6.7", + "universal-user-agent": "^6.0.0" + } + }, + "node_modules/@octokit/request-error": { + "version": "2.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@octokit/request-error/-/request-error-2.1.0.tgz", + "integrity": "sha512-1VIvgXxs9WHSjicsRwq8PlR2LR2x6DwsJAaFgzdi0JfJoGSO8mYI/cHJQ+9FbN21aa+DrgNLnwObmyeSC8Rmpg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@octokit/types": "^6.0.3", + "deprecation": "^2.0.0", + "once": "^1.4.0" + } + }, + "node_modules/@octokit/request/node_modules/is-plain-object": { + "version": "5.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/is-plain-object/-/is-plain-object-5.0.0.tgz", + "integrity": "sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/@octokit/rest": { + "version": "18.12.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@octokit/rest/-/rest-18.12.0.tgz", + "integrity": "sha512-gDPiOHlyGavxr72y0guQEhLsemgVjwRePayJ+FcKc2SJqKUbxbkvf5kAZEWA/MKvsfYlQAMVzNJE3ezQcxMJ2Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "@octokit/core": "^3.5.1", + "@octokit/plugin-paginate-rest": "^2.16.8", + "@octokit/plugin-request-log": "^1.0.4", + "@octokit/plugin-rest-endpoint-methods": "^5.12.0" + } + }, + "node_modules/@octokit/types": { + "version": "6.41.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@octokit/types/-/types-6.41.0.tgz", + "integrity": "sha512-eJ2jbzjdijiL3B4PrSQaSjuF2sPEQPVCPzBvTHJD9Nz+9dw2SGH4K4xeQJ77YfTq5bRQ+bD8wT11JbeDPmxmGg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@octokit/openapi-types": "^12.11.0" + } + }, + "node_modules/@pkgjs/parseargs": { + "version": "0.11.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", + "integrity": "sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==", + "dev": true, + "license": "MIT", + "optional": true, + "engines": { + "node": ">=14" + } + }, + "node_modules/@sigstore/bundle": { + "version": "1.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@sigstore/bundle/-/bundle-1.1.0.tgz", + "integrity": "sha512-PFutXEy0SmQxYI4texPw3dd2KewuNqv7OuK1ZFtY2fM754yhvG2KdgwIhRnoEE2uHdtdGNQ8s0lb94dW9sELog==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@sigstore/protobuf-specs": "^0.2.0" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/@sigstore/protobuf-specs": { + "version": "0.2.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@sigstore/protobuf-specs/-/protobuf-specs-0.2.1.tgz", + "integrity": "sha512-XTWVxnWJu+c1oCshMLwnKvz8ZQJJDVOlciMfgpJBQbThVjKTCG8dwyhgLngBD2KN0ap9F/gOV8rFDEx8uh7R2A==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/@sigstore/sign": { + "version": "1.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@sigstore/sign/-/sign-1.0.0.tgz", + "integrity": "sha512-INxFVNQteLtcfGmcoldzV6Je0sbbfh9I16DM4yJPw3j5+TFP8X6uIiA18mvpEa9yyeycAKgPmOA3X9hVdVTPUA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@sigstore/bundle": "^1.1.0", + "@sigstore/protobuf-specs": "^0.2.0", + "make-fetch-happen": "^11.0.1" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/@sigstore/sign/node_modules/@npmcli/fs": { + "version": "3.1.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@npmcli/fs/-/fs-3.1.1.tgz", + "integrity": "sha512-q9CRWjpHCMIh5sVyefoD1cA7PkvILqCZsnSOEUUivORLjxCO/Irmue2DprETiNgEqktDBZaM1Bi+jrarx1XdCg==", + "dev": true, + "license": "ISC", + "dependencies": { + "semver": "^7.3.5" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/@sigstore/sign/node_modules/@tootallnate/once": { + "version": "2.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@tootallnate/once/-/once-2.0.0.tgz", + "integrity": "sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10" + } + }, + "node_modules/@sigstore/sign/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/@sigstore/sign/node_modules/cacache": { + "version": "17.1.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/cacache/-/cacache-17.1.4.tgz", + "integrity": "sha512-/aJwG2l3ZMJ1xNAnqbMpA40of9dj/pIH3QfiuQSqjfPJF747VR0J/bHn+/KdNnHKc6XQcWt/AfRSBft82W1d2A==", + "dev": true, + "license": "ISC", + "dependencies": { + "@npmcli/fs": "^3.1.0", + "fs-minipass": "^3.0.0", + "glob": "^10.2.2", + "lru-cache": "^7.7.1", + "minipass": "^7.0.3", + "minipass-collect": "^1.0.2", + "minipass-flush": "^1.0.5", + "minipass-pipeline": "^1.2.4", + "p-map": "^4.0.0", + "ssri": "^10.0.0", + "tar": "^6.1.11", + "unique-filename": "^3.0.0" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/@sigstore/sign/node_modules/cacache/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/@sigstore/sign/node_modules/fs-minipass": { + "version": "3.0.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/fs-minipass/-/fs-minipass-3.0.3.tgz", + "integrity": "sha512-XUBA9XClHbnJWSfBzjkm6RvPsyg3sryZt06BEQoXcF7EK/xpGaQYJgQKDJSUH5SGZ76Y7pFx1QBnXz09rU5Fbw==", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^7.0.3" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/@sigstore/sign/node_modules/fs-minipass/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/@sigstore/sign/node_modules/glob": { + "version": "10.4.5", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/glob/-/glob-10.4.5.tgz", + "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", + "dev": true, + "license": "ISC", + "dependencies": { + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@sigstore/sign/node_modules/glob/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/@sigstore/sign/node_modules/http-proxy-agent": { + "version": "5.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz", + "integrity": "sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@tootallnate/once": "2", + "agent-base": "6", + "debug": "4" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/@sigstore/sign/node_modules/lru-cache": { + "version": "7.18.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/lru-cache/-/lru-cache-7.18.3.tgz", + "integrity": "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/@sigstore/sign/node_modules/make-fetch-happen": { + "version": "11.1.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/make-fetch-happen/-/make-fetch-happen-11.1.1.tgz", + "integrity": "sha512-rLWS7GCSTcEujjVBs2YqG7Y4643u8ucvCJeSRqiLYhesrDuzeuFIk37xREzAsfQaqzl8b9rNCE4m6J8tvX4Q8w==", + "dev": true, + "license": "ISC", + "dependencies": { + "agentkeepalive": "^4.2.1", + "cacache": "^17.0.0", + "http-cache-semantics": "^4.1.1", + "http-proxy-agent": "^5.0.0", + "https-proxy-agent": "^5.0.0", + "is-lambda": "^1.0.1", + "lru-cache": "^7.7.1", + "minipass": "^5.0.0", + "minipass-fetch": "^3.0.0", + "minipass-flush": "^1.0.5", + "minipass-pipeline": "^1.2.4", + "negotiator": "^0.6.3", + "promise-retry": "^2.0.1", + "socks-proxy-agent": "^7.0.0", + "ssri": "^10.0.0" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/@sigstore/sign/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@sigstore/sign/node_modules/minipass": { + "version": "5.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass/-/minipass-5.0.0.tgz", + "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=8" + } + }, + "node_modules/@sigstore/sign/node_modules/minipass-fetch": { + "version": "3.0.5", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass-fetch/-/minipass-fetch-3.0.5.tgz", + "integrity": "sha512-2N8elDQAtSnFV0Dk7gt15KHsS0Fyz6CbYZ360h0WTYV1Ty46li3rAXVOQj1THMNLdmrD9Vt5pBPtWtVkpwGBqg==", + "dev": true, + "license": "MIT", + "dependencies": { + "minipass": "^7.0.3", + "minipass-sized": "^1.0.3", + "minizlib": "^2.1.2" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + }, + "optionalDependencies": { + "encoding": "^0.1.13" + } + }, + "node_modules/@sigstore/sign/node_modules/minipass-fetch/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/@sigstore/sign/node_modules/socks-proxy-agent": { + "version": "7.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/socks-proxy-agent/-/socks-proxy-agent-7.0.0.tgz", + "integrity": "sha512-Fgl0YPZ902wEsAyiQ+idGd1A7rSFx/ayC1CQVMw5P+EQx2V0SgpGtf6OKFhVjPflPUl9YMmEOnmfjCdMUsygww==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^6.0.2", + "debug": "^4.3.3", + "socks": "^2.6.2" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/@sigstore/sign/node_modules/ssri": { + "version": "10.0.6", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/ssri/-/ssri-10.0.6.tgz", + "integrity": "sha512-MGrFH9Z4NP9Iyhqn16sDtBpRRNJ0Y2hNa6D65h736fVSaPCHr4DM4sWUNvVaSuC+0OBGhwsrydQwmgfg5LncqQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^7.0.3" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/@sigstore/sign/node_modules/ssri/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/@sigstore/sign/node_modules/unique-filename": { + "version": "3.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/unique-filename/-/unique-filename-3.0.0.tgz", + "integrity": "sha512-afXhuC55wkAmZ0P18QsVE6kp8JaxrEokN2HGIoIVv2ijHQd419H0+6EigAFcIzXeMIkcIkNBpB3L/DXB3cTS/g==", + "dev": true, + "license": "ISC", + "dependencies": { + "unique-slug": "^4.0.0" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/@sigstore/sign/node_modules/unique-slug": { + "version": "4.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/unique-slug/-/unique-slug-4.0.0.tgz", + "integrity": "sha512-WrcA6AyEfqDX5bWige/4NQfPZMtASNVxdmWR76WESYQVAACSgWcR6e9i0mofqqBxYFtL4oAxPIptY73/0YE1DQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "imurmurhash": "^0.1.4" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/@sigstore/tuf": { + "version": "1.0.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@sigstore/tuf/-/tuf-1.0.3.tgz", + "integrity": "sha512-2bRovzs0nJZFlCN3rXirE4gwxCn97JNjMmwpecqlbgV9WcxX7WRuIrgzx/X7Ib7MYRbyUTpBYE0s2x6AmZXnlg==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@sigstore/protobuf-specs": "^0.2.0", + "tuf-js": "^1.1.7" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/@tensorflow/tfjs": { + "version": "4.22.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@tensorflow/tfjs/-/tfjs-4.22.0.tgz", + "integrity": "sha512-0TrIrXs6/b7FLhLVNmfh8Sah6JgjBPH4mZ8JGb7NU6WW+cx00qK5BcAZxw7NCzxj6N8MRAIfHq+oNbPUNG5VAg==", + "license": "Apache-2.0", + "dependencies": { + "@tensorflow/tfjs-backend-cpu": "4.22.0", + "@tensorflow/tfjs-backend-webgl": "4.22.0", + "@tensorflow/tfjs-converter": "4.22.0", + "@tensorflow/tfjs-core": "4.22.0", + "@tensorflow/tfjs-data": "4.22.0", + "@tensorflow/tfjs-layers": "4.22.0", + "argparse": "^1.0.10", + "chalk": "^4.1.0", + "core-js": "3.29.1", + "regenerator-runtime": "^0.13.5", + "yargs": "^16.0.3" + }, + "bin": { + "tfjs-custom-module": "dist/tools/custom_module/cli.js" + } + }, + "node_modules/@tensorflow/tfjs-backend-cpu": { + "version": "4.22.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@tensorflow/tfjs-backend-cpu/-/tfjs-backend-cpu-4.22.0.tgz", + "integrity": "sha512-1u0FmuLGuRAi8D2c3cocHTASGXOmHc/4OvoVDENJayjYkS119fcTcQf4iHrtLthWyDIPy3JiPhRrZQC9EwnhLw==", + "license": "Apache-2.0", + "dependencies": { + "@types/seedrandom": "^2.4.28", + "seedrandom": "^3.0.5" + }, + "engines": { + "yarn": ">= 1.3.2" + }, + "peerDependencies": { + "@tensorflow/tfjs-core": "4.22.0" + } + }, + "node_modules/@tensorflow/tfjs-backend-wasm": { + "version": "4.22.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@tensorflow/tfjs-backend-wasm/-/tfjs-backend-wasm-4.22.0.tgz", + "integrity": "sha512-/IYhReRIp4jg/wYW0OwbbJZG8ON87mbz0PgkiP3CdcACRSvUN0h8rvC0O3YcDtkTQtFWF/tcXq/KlVDyV49wmA==", + "license": "Apache-2.0", + "dependencies": { + "@tensorflow/tfjs-backend-cpu": "4.22.0", + "@types/emscripten": "~0.0.34" + }, + "peerDependencies": { + "@tensorflow/tfjs-core": "4.22.0" + } + }, + "node_modules/@tensorflow/tfjs-backend-webgl": { + "version": "4.22.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@tensorflow/tfjs-backend-webgl/-/tfjs-backend-webgl-4.22.0.tgz", + "integrity": "sha512-H535XtZWnWgNwSzv538czjVlbJebDl5QTMOth4RXr2p/kJ1qSIXE0vZvEtO+5EC9b00SvhplECny2yDewQb/Yg==", + "license": "Apache-2.0", + "dependencies": { + "@tensorflow/tfjs-backend-cpu": "4.22.0", + "@types/offscreencanvas": "~2019.3.0", + "@types/seedrandom": "^2.4.28", + "seedrandom": "^3.0.5" + }, + "engines": { + "yarn": ">= 1.3.2" + }, + "peerDependencies": { + "@tensorflow/tfjs-core": "4.22.0" + } + }, + "node_modules/@tensorflow/tfjs-converter": { + "version": "4.22.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@tensorflow/tfjs-converter/-/tfjs-converter-4.22.0.tgz", + "integrity": "sha512-PT43MGlnzIo+YfbsjM79Lxk9lOq6uUwZuCc8rrp0hfpLjF6Jv8jS84u2jFb+WpUeuF4K33ZDNx8CjiYrGQ2trQ==", + "license": "Apache-2.0", + "peerDependencies": { + "@tensorflow/tfjs-core": "4.22.0" + } + }, + "node_modules/@tensorflow/tfjs-core": { + "version": "4.22.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@tensorflow/tfjs-core/-/tfjs-core-4.22.0.tgz", + "integrity": "sha512-LEkOyzbknKFoWUwfkr59vSB68DMJ4cjwwHgicXN0DUi3a0Vh1Er3JQqCI1Hl86GGZQvY8ezVrtDIvqR1ZFW55A==", + "license": "Apache-2.0", + "dependencies": { + "@types/long": "^4.0.1", + "@types/offscreencanvas": "~2019.7.0", + "@types/seedrandom": "^2.4.28", + "@webgpu/types": "0.1.38", + "long": "4.0.0", + "node-fetch": "~2.6.1", + "seedrandom": "^3.0.5" + }, + "engines": { + "yarn": ">= 1.3.2" + } + }, + "node_modules/@tensorflow/tfjs-core/node_modules/@types/offscreencanvas": { + "version": "2019.7.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@types/offscreencanvas/-/offscreencanvas-2019.7.3.tgz", + "integrity": "sha512-ieXiYmgSRXUDeOntE1InxjWyvEelZGP63M+cGuquuRLuIKKT1osnkXjxev9B7d1nXSug5vpunx+gNlbVxMlC9A==", + "license": "MIT" + }, + "node_modules/@tensorflow/tfjs-core/node_modules/node-fetch": { + "version": "2.6.13", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/node-fetch/-/node-fetch-2.6.13.tgz", + "integrity": "sha512-StxNAxh15zr77QvvkmveSQ8uCQ4+v5FkvNTj0OESmiHu+VRi/gXArXtkWMElOsOUNLtUEvI4yS+rdtOHZTwlQA==", + "license": "MIT", + "dependencies": { + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } + } + }, + "node_modules/@tensorflow/tfjs-data": { + "version": "4.22.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@tensorflow/tfjs-data/-/tfjs-data-4.22.0.tgz", + "integrity": "sha512-dYmF3LihQIGvtgJrt382hSRH4S0QuAp2w1hXJI2+kOaEqo5HnUPG0k5KA6va+S1yUhx7UBToUKCBHeLHFQRV4w==", + "license": "Apache-2.0", + "dependencies": { + "@types/node-fetch": "^2.1.2", + "node-fetch": "~2.6.1", + "string_decoder": "^1.3.0" + }, + "peerDependencies": { + "@tensorflow/tfjs-core": "4.22.0", + "seedrandom": "^3.0.5" + } + }, + "node_modules/@tensorflow/tfjs-data/node_modules/node-fetch": { + "version": "2.6.13", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/node-fetch/-/node-fetch-2.6.13.tgz", + "integrity": "sha512-StxNAxh15zr77QvvkmveSQ8uCQ4+v5FkvNTj0OESmiHu+VRi/gXArXtkWMElOsOUNLtUEvI4yS+rdtOHZTwlQA==", + "license": "MIT", + "dependencies": { + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } + } + }, + "node_modules/@tensorflow/tfjs-layers": { + "version": "4.22.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@tensorflow/tfjs-layers/-/tfjs-layers-4.22.0.tgz", + "integrity": "sha512-lybPj4ZNj9iIAPUj7a8ZW1hg8KQGfqWLlCZDi9eM/oNKCCAgchiyzx8OrYoWmRrB+AM6VNEeIT+2gZKg5ReihA==", + "license": "Apache-2.0 AND MIT", + "peerDependencies": { + "@tensorflow/tfjs-core": "4.22.0" + } + }, + "node_modules/@tootallnate/once": { + "version": "1.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@tootallnate/once/-/once-1.1.2.tgz", + "integrity": "sha512-RbzJvlNzmRq5c3O09UipeuXno4tA1FE6ikOjxZK0tuxVv3412l64l5t1W5pj4+rJq9vpkm/kwiR07aZXnsKPxw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6" + } + }, + "node_modules/@tufjs/canonical-json": { + "version": "1.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@tufjs/canonical-json/-/canonical-json-1.0.0.tgz", + "integrity": "sha512-QTnf++uxunWvG2z3UFNzAoQPHxnSXOwtaI3iJ+AohhV+5vONuArPjJE7aPXPVXfXJsqrVbZBu9b81AJoSd09IQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/@tufjs/models": { + "version": "1.0.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@tufjs/models/-/models-1.0.4.tgz", + "integrity": "sha512-qaGV9ltJP0EO25YfFUPhxRVK0evXFIAGicsVXuRim4Ed9cjPxYhNnNJ49SFmbeLgtxpslIkX317IgpfcHPVj/A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@tufjs/canonical-json": "1.0.0", + "minimatch": "^9.0.0" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/@tufjs/models/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/@tufjs/models/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/@types/emscripten": { + "version": "0.0.34", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@types/emscripten/-/emscripten-0.0.34.tgz", + "integrity": "sha512-QSb9ojDincskc+uKMI0KXp8e1NALFINCrMlp8VGKGcTSxeEyRTTKyjWw75NYrCZHUsVEEEpr1tYHpbtaC++/sQ==", + "license": "MIT" + }, + "node_modules/@types/eslint": { + "version": "9.6.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@types/eslint/-/eslint-9.6.1.tgz", + "integrity": "sha512-FXx2pKgId/WyYo2jXw63kk7/+TY7u7AziEJxJAnSFzHlqTAS3Ync6SvgYAN/k4/PQpnnVuzoMuVnByKK2qp0ag==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@types/estree": "*", + "@types/json-schema": "*" + } + }, + "node_modules/@types/eslint-scope": { + "version": "3.7.7", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@types/eslint-scope/-/eslint-scope-3.7.7.tgz", + "integrity": "sha512-MzMFlSLBqNF2gcHWO0G1vP/YQyfvrxZ0bF+u7mzUdZ1/xK4A4sru+nraZz5i3iEIk1l1uyicaDVTB4QbbEkAYg==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@types/eslint": "*", + "@types/estree": "*" + } + }, + "node_modules/@types/estree": { + "version": "1.0.7", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@types/estree/-/estree-1.0.7.tgz", + "integrity": "sha512-w28IoSUCJpidD/TGviZwwMJckNESJZXFu7NBZ5YJ4mEUnNraUn9Pm8HSZm/jDF1pDWYKspWE7oVphigUPRakIQ==", + "dev": true, + "license": "MIT", + "peer": true + }, + "node_modules/@types/expect": { + "version": "1.20.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@types/expect/-/expect-1.20.4.tgz", + "integrity": "sha512-Q5Vn3yjTDyCMV50TB6VRIbQNxSE4OmZR86VSbGaNpfUolm0iePBB4KdEEHmxoY5sT2+2DIvXW0rvMDP2nHZ4Mg==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", + "dev": true, + "license": "MIT", + "peer": true + }, + "node_modules/@types/long": { + "version": "4.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@types/long/-/long-4.0.2.tgz", + "integrity": "sha512-MqTGEo5bj5t157U6fA/BiDynNkn0YknVdh48CMPkTSpFTVmvao5UQmm7uEF6xBEo7qIMAlY/JSleYaE6VOdpaA==", + "license": "MIT" + }, + "node_modules/@types/minimatch": { + "version": "3.0.5", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@types/minimatch/-/minimatch-3.0.5.tgz", + "integrity": "sha512-Klz949h02Gz2uZCMGwDUSDS1YBlTdDDgbWHi+81l29tQALUtvz4rAYi5uoVhE5Lagoq6DeqAUlbrHvW/mXDgdQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "22.14.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@types/node/-/node-22.14.1.tgz", + "integrity": "sha512-u0HuPQwe/dHrItgHHpmw3N2fYCR6x4ivMNbPHRkBVP4CvN+kiRrKHWk3i8tXiO/joPwXLMYvF9TTF0eqgHIuOw==", + "license": "MIT", + "dependencies": { + "undici-types": "~6.21.0" + } + }, + "node_modules/@types/node-fetch": { + "version": "2.6.12", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@types/node-fetch/-/node-fetch-2.6.12.tgz", + "integrity": "sha512-8nneRWKCg3rMtF69nLQJnOYUcbafYeFSjqkw3jCRLsqkWFlHaoQrr5mXmofFGOx3DKn7UfmBMyov8ySvLRVldA==", + "license": "MIT", + "dependencies": { + "@types/node": "*", + "form-data": "^4.0.0" + } + }, + "node_modules/@types/normalize-package-data": { + "version": "2.4.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@types/normalize-package-data/-/normalize-package-data-2.4.4.tgz", + "integrity": "sha512-37i+OaWTh9qeK4LSHPsyRC7NahnGotNuZvjLSgcPzblpHB3rrCJxAOgI5gCdKm7coonsaX1Of0ILiTcnZjbfxA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/offscreencanvas": { + "version": "2019.3.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@types/offscreencanvas/-/offscreencanvas-2019.3.0.tgz", + "integrity": "sha512-esIJx9bQg+QYF0ra8GnvfianIY8qWB0GBx54PK5Eps6m+xTj86KLavHv6qDhzKcu5UUOgNfJ2pWaIIV7TRUd9Q==", + "license": "MIT" + }, + "node_modules/@types/seedrandom": { + "version": "2.4.34", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@types/seedrandom/-/seedrandom-2.4.34.tgz", + "integrity": "sha512-ytDiArvrn/3Xk6/vtylys5tlY6eo7Ane0hvcx++TKo6RxQXuVfW0AF/oeWqAj9dN29SyhtawuXstgmPlwNcv/A==", + "license": "MIT" + }, + "node_modules/@types/vinyl": { + "version": "2.0.12", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@types/vinyl/-/vinyl-2.0.12.tgz", + "integrity": "sha512-Sr2fYMBUVGYq8kj3UthXFAu5UN6ZW+rYr4NACjZQJvHvj+c8lYv0CahmZ2P/r7iUkN44gGUBwqxZkrKXYPb7cw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/expect": "^1.20.4", + "@types/node": "*" + } + }, + "node_modules/@webassemblyjs/ast": { + "version": "1.14.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@webassemblyjs/ast/-/ast-1.14.1.tgz", + "integrity": "sha512-nuBEDgQfm1ccRp/8bCQrx1frohyufl4JlbMMZ4P1wpeOfDhF6FQkxZJ1b/e+PLwr6X1Nhw6OLme5usuBWYBvuQ==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@webassemblyjs/helper-numbers": "1.13.2", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2" + } + }, + "node_modules/@webassemblyjs/floating-point-hex-parser": { + "version": "1.13.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.13.2.tgz", + "integrity": "sha512-6oXyTOzbKxGH4steLbLNOu71Oj+C8Lg34n6CqRvqfS2O71BxY6ByfMDRhBytzknj9yGUPVJ1qIKhRlAwO1AovA==", + "dev": true, + "license": "MIT", + "peer": true + }, + "node_modules/@webassemblyjs/helper-api-error": { + "version": "1.13.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@webassemblyjs/helper-api-error/-/helper-api-error-1.13.2.tgz", + "integrity": "sha512-U56GMYxy4ZQCbDZd6JuvvNV/WFildOjsaWD3Tzzvmw/mas3cXzRJPMjP83JqEsgSbyrmaGjBfDtV7KDXV9UzFQ==", + "dev": true, + "license": "MIT", + "peer": true + }, + "node_modules/@webassemblyjs/helper-buffer": { + "version": "1.14.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@webassemblyjs/helper-buffer/-/helper-buffer-1.14.1.tgz", + "integrity": "sha512-jyH7wtcHiKssDtFPRB+iQdxlDf96m0E39yb0k5uJVhFGleZFoNw1c4aeIcVUPPbXUVJ94wwnMOAqUHyzoEPVMA==", + "dev": true, + "license": "MIT", + "peer": true + }, + "node_modules/@webassemblyjs/helper-numbers": { + "version": "1.13.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@webassemblyjs/helper-numbers/-/helper-numbers-1.13.2.tgz", + "integrity": "sha512-FE8aCmS5Q6eQYcV3gI35O4J789wlQA+7JrqTTpJqn5emA4U2hvwJmvFRC0HODS+3Ye6WioDklgd6scJ3+PLnEA==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@webassemblyjs/floating-point-hex-parser": "1.13.2", + "@webassemblyjs/helper-api-error": "1.13.2", + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webassemblyjs/helper-wasm-bytecode": { + "version": "1.13.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.13.2.tgz", + "integrity": "sha512-3QbLKy93F0EAIXLh0ogEVR6rOubA9AoZ+WRYhNbFyuB70j3dRdwH9g+qXhLAO0kiYGlg3TxDV+I4rQTr/YNXkA==", + "dev": true, + "license": "MIT", + "peer": true + }, + "node_modules/@webassemblyjs/helper-wasm-section": { + "version": "1.14.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.14.1.tgz", + "integrity": "sha512-ds5mXEqTJ6oxRoqjhWDU83OgzAYjwsCV8Lo/N+oRsNDmx/ZDpqalmrtgOMkHwxsG0iI//3BwWAErYRHtgn0dZw==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-buffer": "1.14.1", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/wasm-gen": "1.14.1" + } + }, + "node_modules/@webassemblyjs/ieee754": { + "version": "1.13.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@webassemblyjs/ieee754/-/ieee754-1.13.2.tgz", + "integrity": "sha512-4LtOzh58S/5lX4ITKxnAK2USuNEvpdVV9AlgGQb8rJDHaLeHciwG4zlGr0j/SNWlr7x3vO1lDEsuePvtcDNCkw==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@xtuc/ieee754": "^1.2.0" + } + }, + "node_modules/@webassemblyjs/leb128": { + "version": "1.13.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@webassemblyjs/leb128/-/leb128-1.13.2.tgz", + "integrity": "sha512-Lde1oNoIdzVzdkNEAWZ1dZ5orIbff80YPdHx20mrHwHrVNNTjNr8E3xz9BdpcGqRQbAEa+fkrCb+fRFTl/6sQw==", + "dev": true, + "license": "Apache-2.0", + "peer": true, + "dependencies": { + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webassemblyjs/utf8": { + "version": "1.13.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@webassemblyjs/utf8/-/utf8-1.13.2.tgz", + "integrity": "sha512-3NQWGjKTASY1xV5m7Hr0iPeXD9+RDobLll3T9d2AO+g3my8xy5peVyjSag4I50mR1bBSN/Ct12lo+R9tJk0NZQ==", + "dev": true, + "license": "MIT", + "peer": true + }, + "node_modules/@webassemblyjs/wasm-edit": { + "version": "1.14.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@webassemblyjs/wasm-edit/-/wasm-edit-1.14.1.tgz", + "integrity": "sha512-RNJUIQH/J8iA/1NzlE4N7KtyZNHi3w7at7hDjvRNm5rcUXa00z1vRz3glZoULfJ5mpvYhLybmVcwcjGrC1pRrQ==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-buffer": "1.14.1", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/helper-wasm-section": "1.14.1", + "@webassemblyjs/wasm-gen": "1.14.1", + "@webassemblyjs/wasm-opt": "1.14.1", + "@webassemblyjs/wasm-parser": "1.14.1", + "@webassemblyjs/wast-printer": "1.14.1" + } + }, + "node_modules/@webassemblyjs/wasm-gen": { + "version": "1.14.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@webassemblyjs/wasm-gen/-/wasm-gen-1.14.1.tgz", + "integrity": "sha512-AmomSIjP8ZbfGQhumkNvgC33AY7qtMCXnN6bL2u2Js4gVCg8fp735aEiMSBbDR7UQIj90n4wKAFUSEd0QN2Ukg==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/ieee754": "1.13.2", + "@webassemblyjs/leb128": "1.13.2", + "@webassemblyjs/utf8": "1.13.2" + } + }, + "node_modules/@webassemblyjs/wasm-opt": { + "version": "1.14.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@webassemblyjs/wasm-opt/-/wasm-opt-1.14.1.tgz", + "integrity": "sha512-PTcKLUNvBqnY2U6E5bdOQcSM+oVP/PmrDY9NzowJjislEjwP/C4an2303MCVS2Mg9d3AJpIGdUFIQQWbPds0Sw==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-buffer": "1.14.1", + "@webassemblyjs/wasm-gen": "1.14.1", + "@webassemblyjs/wasm-parser": "1.14.1" + } + }, + "node_modules/@webassemblyjs/wasm-parser": { + "version": "1.14.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@webassemblyjs/wasm-parser/-/wasm-parser-1.14.1.tgz", + "integrity": "sha512-JLBl+KZ0R5qB7mCnud/yyX08jWFw5MsoalJ1pQ4EdFlgj9VdXKGuENGsiCIjegI1W7p91rUlcB/LB5yRJKNTcQ==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@webassemblyjs/ast": "1.14.1", + "@webassemblyjs/helper-api-error": "1.13.2", + "@webassemblyjs/helper-wasm-bytecode": "1.13.2", + "@webassemblyjs/ieee754": "1.13.2", + "@webassemblyjs/leb128": "1.13.2", + "@webassemblyjs/utf8": "1.13.2" + } + }, + "node_modules/@webassemblyjs/wast-printer": { + "version": "1.14.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@webassemblyjs/wast-printer/-/wast-printer-1.14.1.tgz", + "integrity": "sha512-kPSSXE6De1XOR820C90RIo2ogvZG+c3KiHzqUoO/F34Y2shGzesfqv7o57xrxovZJH/MetF5UjroJ/R/3isoiw==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@webassemblyjs/ast": "1.14.1", + "@xtuc/long": "4.2.2" + } + }, + "node_modules/@webgpu/types": { + "version": "0.1.38", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@webgpu/types/-/types-0.1.38.tgz", + "integrity": "sha512-7LrhVKz2PRh+DD7+S+PVaFd5HxaWQvoMqBbsV9fNJO1pjUs1P8bM2vQVNfk+3URTqbuTI7gkXi0rfsN0IadoBA==", + "license": "BSD-3-Clause" + }, + "node_modules/@webpack-cli/configtest": { + "version": "2.1.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@webpack-cli/configtest/-/configtest-2.1.1.tgz", + "integrity": "sha512-wy0mglZpDSiSS0XHrVR+BAdId2+yxPSoJW8fsna3ZpYSlufjvxnP4YbKTCBZnNIcGN4r6ZPXV55X4mYExOfLmw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.15.0" + }, + "peerDependencies": { + "webpack": "5.x.x", + "webpack-cli": "5.x.x" + } + }, + "node_modules/@webpack-cli/generators": { + "version": "3.0.7", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@webpack-cli/generators/-/generators-3.0.7.tgz", + "integrity": "sha512-H4dlEX8CzO5EHBYYZQop9x4w6lG9FenSF/1spLRlvRAULDgTs0VfmwOuwp03tTLml9jpMsouuVw6vEN8KpwE/w==", + "dev": true, + "license": "MIT", + "dependencies": { + "webpack-cli": "^5.1.4", + "yeoman-environment": "^3.9.1", + "yeoman-generator": "^5.7.0" + }, + "engines": { + "node": ">=14.15.0" + }, + "peerDependencies": { + "webpack": "5.x.x", + "webpack-cli": "5.x.x" + }, + "peerDependenciesMeta": { + "prettier": { + "optional": true + } + } + }, + "node_modules/@webpack-cli/info": { + "version": "2.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@webpack-cli/info/-/info-2.0.2.tgz", + "integrity": "sha512-zLHQdI/Qs1UyT5UBdWNqsARasIA+AaF8t+4u2aS2nEpBQh2mWIVb8qAklq0eUENnC5mOItrIB4LiS9xMtph18A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.15.0" + }, + "peerDependencies": { + "webpack": "5.x.x", + "webpack-cli": "5.x.x" + } + }, + "node_modules/@webpack-cli/serve": { + "version": "2.0.5", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@webpack-cli/serve/-/serve-2.0.5.tgz", + "integrity": "sha512-lqaoKnRYBdo1UgDX8uF24AfGMifWK19TxPmM5FHc2vAGxrJ/qtyUyFBWoY1tISZdelsQ5fBcOusifo5o5wSJxQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14.15.0" + }, + "peerDependencies": { + "webpack": "5.x.x", + "webpack-cli": "5.x.x" + }, + "peerDependenciesMeta": { + "webpack-dev-server": { + "optional": true + } + } + }, + "node_modules/@xtuc/ieee754": { + "version": "1.2.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@xtuc/ieee754/-/ieee754-1.2.0.tgz", + "integrity": "sha512-DX8nKgqcGwsc0eJSqYt5lwP4DH5FlHnmuWWBRy7X0NcaGR0ZtuyeESgMwTYVEtxmsNGY+qit4QYT/MIYTOTPeA==", + "dev": true, + "license": "BSD-3-Clause", + "peer": true + }, + "node_modules/@xtuc/long": { + "version": "4.2.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@xtuc/long/-/long-4.2.2.tgz", + "integrity": "sha512-NuHqBY1PB/D8xU6s/thBgOAiAP7HOYDQ32+BFZILJ8ivkUkAHQnWfn6WhL79Owj1qmUnoN/YPhktdIoucipkAQ==", + "dev": true, + "license": "Apache-2.0", + "peer": true + }, + "node_modules/abbrev": { + "version": "1.1.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/abbrev/-/abbrev-1.1.1.tgz", + "integrity": "sha512-nne9/IiQ/hzIhY6pdDnbBtz7DjPTKrY00P/zvPSm5pOFkl6xuGrGnXn/VtTNNfNtAfZ9/1RtehkszU9qcTii0Q==", + "dev": true, + "license": "ISC" + }, + "node_modules/abort-controller": { + "version": "3.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/abort-controller/-/abort-controller-3.0.0.tgz", + "integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==", + "dev": true, + "license": "MIT", + "dependencies": { + "event-target-shim": "^5.0.0" + }, + "engines": { + "node": ">=6.5" + } + }, + "node_modules/acorn": { + "version": "8.14.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/acorn/-/acorn-8.14.1.tgz", + "integrity": "sha512-OvQ/2pUDKmgfCg++xsTX1wGxfTaszcHVcTctW4UJB4hibJx2HXxxO5UmVgyjMa+ZDsiaf5wWLXYpRWMmBI0QHg==", + "dev": true, + "license": "MIT", + "peer": true, + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/agent-base": { + "version": "6.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/agent-base/-/agent-base-6.0.2.tgz", + "integrity": "sha512-RZNwNclF7+MS/8bDg70amg32dyeZGZxiDuQmZxKLAlQjr3jGyLx+4Kkk58UO7D2QdgFIQCovuSuZESne6RG6XQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "debug": "4" + }, + "engines": { + "node": ">= 6.0.0" + } + }, + "node_modules/agentkeepalive": { + "version": "4.6.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/agentkeepalive/-/agentkeepalive-4.6.0.tgz", + "integrity": "sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "humanize-ms": "^1.2.1" + }, + "engines": { + "node": ">= 8.0.0" + } + }, + "node_modules/aggregate-error": { + "version": "3.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/aggregate-error/-/aggregate-error-3.1.0.tgz", + "integrity": "sha512-4I7Td01quW/RpocfNayFdFVk1qSuoh0E7JrbRJ16nH01HhKFQ88INq9Sd+nd72zqRySlr9BmDA8xlEJ6vJMrYA==", + "dev": true, + "license": "MIT", + "dependencies": { + "clean-stack": "^2.0.0", + "indent-string": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/ajv": { + "version": "8.17.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/ajv/-/ajv-8.17.1.tgz", + "integrity": "sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "fast-deep-equal": "^3.1.3", + "fast-uri": "^3.0.1", + "json-schema-traverse": "^1.0.0", + "require-from-string": "^2.0.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ajv-formats": { + "version": "2.1.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/ajv-formats/-/ajv-formats-2.1.1.tgz", + "integrity": "sha512-Wx0Kx52hxE7C18hkMEggYlEifqWZtYaRgouJor+WMdPnQyEK13vgEWyVNup7SoeeoLMsr4kf5h6dOW11I15MUA==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "ajv": "^8.0.0" + }, + "peerDependencies": { + "ajv": "^8.0.0" + }, + "peerDependenciesMeta": { + "ajv": { + "optional": true + } + } + }, + "node_modules/ajv-keywords": { + "version": "5.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/ajv-keywords/-/ajv-keywords-5.1.0.tgz", + "integrity": "sha512-YCS/JNFAUyr5vAuhk1DWm1CBxRHW9LbJ2ozWeemrIqpbsqKjHVxYPyi5GC0rjZIT5JxJ3virVTS8wk4i/Z+krw==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "fast-deep-equal": "^3.1.3" + }, + "peerDependencies": { + "ajv": "^8.8.2" + } + }, + "node_modules/ansi-escapes": { + "version": "4.3.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/ansi-escapes/-/ansi-escapes-4.3.2.tgz", + "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "type-fest": "^0.21.3" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/aproba": { + "version": "2.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/aproba/-/aproba-2.0.0.tgz", + "integrity": "sha512-lYe4Gx7QT+MKGbDsA+Z+he/Wtef0BiwDOlK/XkBrdfsh9J/jPPXbX0tE9x9cl27Tmu5gg3QUbUrQYa/y+KOHPQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/are-we-there-yet": { + "version": "2.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/are-we-there-yet/-/are-we-there-yet-2.0.0.tgz", + "integrity": "sha512-Ci/qENmwHnsYo9xKIcUJN5LeDKdJ6R1Z1j9V/J5wyq8nh/mYPEpIKJbBZXtZjG04HiK7zV/p6Vs9952MrMeUIw==", + "deprecated": "This package is no longer supported.", + "dev": true, + "license": "ISC", + "dependencies": { + "delegates": "^1.0.0", + "readable-stream": "^3.6.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/are-we-there-yet/node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "license": "MIT", + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/argparse/node_modules/sprintf-js": { + "version": "1.0.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==", + "license": "BSD-3-Clause" + }, + "node_modules/array-differ": { + "version": "3.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/array-differ/-/array-differ-3.0.0.tgz", + "integrity": "sha512-THtfYS6KtME/yIAhKjZ2ul7XI96lQGHRputJQHO80LAWQnuGP4iCIN8vdMRboGbIEYBwU33q8Tch1os2+X0kMg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/array-union": { + "version": "2.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/arrify": { + "version": "2.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/arrify/-/arrify-2.0.1.tgz", + "integrity": "sha512-3duEwti880xqi4eAMN8AyR4a0ByT90zoYdLlevfrvU43vb0YZwZVfxOgxWrLXXXpyugL0hNZc9G6BiB5B3nUug==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/asap": { + "version": "2.0.6", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/asap/-/asap-2.0.6.tgz", + "integrity": "sha512-BSHWgDSAiKs50o2Re8ppvp3seVHXSRM44cdSsT9FfNEUUZLOGWVCsiWaRPWM1Znn+mqZ1OfVZ3z3DWEzSp7hRA==", + "dev": true, + "license": "MIT" + }, + "node_modules/assert": { + "version": "2.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/assert/-/assert-2.1.0.tgz", + "integrity": "sha512-eLHpSK/Y4nhMJ07gDaAzoX/XAKS8PSaojml3M0DM4JpV1LAi5JOJ/p6H/XWrl8L+DzVEvVCW1z3vWAaB9oTsQw==", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.2", + "is-nan": "^1.3.2", + "object-is": "^1.1.5", + "object.assign": "^4.1.4", + "util": "^0.12.5" + } + }, + "node_modules/async": { + "version": "3.2.6", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/async/-/async-3.2.6.tgz", + "integrity": "sha512-htCUDlxyyCLMgaM3xXg0C0LW2xqfuQ6p05pCEIsXuyQ+a1koYKTuBMzRNwmybfLgvJDMd0r1LTn4+E0Ti6C2AA==", + "dev": true, + "license": "MIT" + }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", + "license": "MIT" + }, + "node_modules/available-typed-arrays": { + "version": "1.0.7", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/available-typed-arrays/-/available-typed-arrays-1.0.7.tgz", + "integrity": "sha512-wvUjBtSGN7+7SjNpq/9M2Tg350UZD3q62IFZLbRAR1bSMlCo1ZaeW+BJ+D090e4hIIZLBcTDWe4Mh4jvUDajzQ==", + "license": "MIT", + "dependencies": { + "possible-typed-array-names": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/babel-loader": { + "version": "10.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/babel-loader/-/babel-loader-10.0.0.tgz", + "integrity": "sha512-z8jt+EdS61AMw22nSfoNJAZ0vrtmhPRVi6ghL3rCeRZI8cdNYFiV5xeV3HbE7rlZZNmGH8BVccwWt8/ED0QOHA==", + "dev": true, + "license": "MIT", + "dependencies": { + "find-up": "^5.0.0" + }, + "engines": { + "node": "^18.20.0 || ^20.10.0 || >=22.0.0" + }, + "peerDependencies": { + "@babel/core": "^7.12.0", + "webpack": ">=5.61.0" + } + }, + "node_modules/babel-loader/node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/babel-loader/node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/babel-loader/node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/babel-loader/node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/babel-plugin-polyfill-corejs2": { + "version": "0.4.13", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/babel-plugin-polyfill-corejs2/-/babel-plugin-polyfill-corejs2-0.4.13.tgz", + "integrity": "sha512-3sX/eOms8kd3q2KZ6DAhKPc0dgm525Gqq5NtWKZ7QYYZEv57OQ54KtblzJzH1lQF/eQxO8KjWGIK9IPUJNus5g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/compat-data": "^7.22.6", + "@babel/helper-define-polyfill-provider": "^0.6.4", + "semver": "^6.3.1" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/babel-plugin-polyfill-corejs2/node_modules/semver": { + "version": "6.3.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/semver/-/semver-6.3.1.tgz", + "integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + } + }, + "node_modules/babel-plugin-polyfill-corejs3": { + "version": "0.11.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/babel-plugin-polyfill-corejs3/-/babel-plugin-polyfill-corejs3-0.11.1.tgz", + "integrity": "sha512-yGCqvBT4rwMczo28xkH/noxJ6MZ4nJfkVYdoDaC/utLtWrXxv27HVrzAeSbqR8SxDsp46n0YF47EbHoixy6rXQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-define-polyfill-provider": "^0.6.3", + "core-js-compat": "^3.40.0" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/babel-plugin-polyfill-regenerator": { + "version": "0.6.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/babel-plugin-polyfill-regenerator/-/babel-plugin-polyfill-regenerator-0.6.4.tgz", + "integrity": "sha512-7gD3pRadPrbjhjLyxebmx/WrFYcuSjZ0XbdUujQMZ/fcE9oeewk2U/7PCvez84UeuK3oSjmPZ0Ch0dlupQvGzw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/helper-define-polyfill-provider": "^0.6.4" + }, + "peerDependencies": { + "@babel/core": "^7.4.0 || ^8.0.0-0 <8.0.0" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/base64-js": { + "version": "1.5.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/before-after-hook": { + "version": "2.2.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/before-after-hook/-/before-after-hook-2.2.3.tgz", + "integrity": "sha512-NzUnlZexiaH/46WDhANlyR2bXRopNg4F/zuSA3OpZnllCUgRaOF2znDioDWrmbNVsuZk6l9pMquQB38cfBZwkQ==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/bin-links": { + "version": "3.0.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/bin-links/-/bin-links-3.0.3.tgz", + "integrity": "sha512-zKdnMPWEdh4F5INR07/eBrodC7QrF5JKvqskjz/ZZRXg5YSAZIbn8zGhbhUrElzHBZ2fvEQdOU59RHcTG3GiwA==", + "dev": true, + "license": "ISC", + "dependencies": { + "cmd-shim": "^5.0.0", + "mkdirp-infer-owner": "^2.0.0", + "npm-normalize-package-bin": "^2.0.0", + "read-cmd-shim": "^3.0.0", + "rimraf": "^3.0.0", + "write-file-atomic": "^4.0.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/bin-links/node_modules/npm-normalize-package-bin": { + "version": "2.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/npm-normalize-package-bin/-/npm-normalize-package-bin-2.0.0.tgz", + "integrity": "sha512-awzfKUO7v0FscrSpRoogyNm0sajikhBWpU0QMrW09AMi9n1PoKU6WaIqUzuJSQnpciZZmJ/jMZ2Egfmb/9LiWQ==", + "dev": true, + "license": "ISC", + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/binaryextensions": { + "version": "4.19.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/binaryextensions/-/binaryextensions-4.19.0.tgz", + "integrity": "sha512-DRxnVbOi/1OgA5pA9EDiRT8gvVYeqfuN7TmPfLyt6cyho3KbHCi3EtDQf39TTmGDrR5dZ9CspdXhPkL/j/WGbg==", + "dev": true, + "license": "Artistic-2.0", + "engines": { + "node": ">=0.8" + }, + "funding": { + "url": "https://bevry.me/fund" + } + }, + "node_modules/bl": { + "version": "4.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/bl/-/bl-4.1.0.tgz", + "integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "buffer": "^5.5.0", + "inherits": "^2.0.4", + "readable-stream": "^3.4.0" + } + }, + "node_modules/bl/node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/browserify-zlib": { + "version": "0.2.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/browserify-zlib/-/browserify-zlib-0.2.0.tgz", + "integrity": "sha512-Z942RysHXmJrhqk88FmKBVq/v5tqmSkDz7p54G/MGyjMnCFFnC79XWNbg+Vta8W6Wb2qtSZTSxIGkJrRpCFEiA==", + "license": "MIT", + "dependencies": { + "pako": "~1.0.5" + } + }, + "node_modules/browserslist": { + "version": "4.24.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/browserslist/-/browserslist-4.24.4.tgz", + "integrity": "sha512-KDi1Ny1gSePi1vm0q4oxSF8b4DR44GF4BbmS2YdhPLOEqd8pDviZOGH/GsmRwoWJ2+5Lr085X7naowMwKHDG1A==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "caniuse-lite": "^1.0.30001688", + "electron-to-chromium": "^1.5.73", + "node-releases": "^2.0.19", + "update-browserslist-db": "^1.1.1" + }, + "bin": { + "browserslist": "cli.js" + }, + "engines": { + "node": "^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7" + } + }, + "node_modules/buffer": { + "version": "5.7.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/buffer/-/buffer-5.7.1.tgz", + "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.1.13" + } + }, + "node_modules/buffer-from": { + "version": "1.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/buffer-from/-/buffer-from-1.1.2.tgz", + "integrity": "sha512-E+XQCRwSbaaiChtv6k6Dwgc+bx+Bs6vuKJHHl5kox/BaKbhiXzqQOwK4cO22yElGp2OCmjwVhT3HmxgyPGnJfQ==", + "dev": true, + "license": "MIT", + "peer": true + }, + "node_modules/builtins": { + "version": "1.0.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/builtins/-/builtins-1.0.3.tgz", + "integrity": "sha512-uYBjakWipfaO/bXI7E8rq6kpwHRZK5cNYrUv2OzZSI/FvmdMyXJ2tG9dKcjEC5YHmHpUAwsargWIZNWdxb/bnQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/cacache": { + "version": "15.3.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/cacache/-/cacache-15.3.0.tgz", + "integrity": "sha512-VVdYzXEn+cnbXpFgWs5hTT7OScegHVmLhJIR8Ufqk3iFD6A6j5iSX1KuBTfNEv4tdJWE2PzA6IVFtcLC7fN9wQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "@npmcli/fs": "^1.0.0", + "@npmcli/move-file": "^1.0.1", + "chownr": "^2.0.0", + "fs-minipass": "^2.0.0", + "glob": "^7.1.4", + "infer-owner": "^1.0.4", + "lru-cache": "^6.0.0", + "minipass": "^3.1.1", + "minipass-collect": "^1.0.2", + "minipass-flush": "^1.0.5", + "minipass-pipeline": "^1.2.2", + "mkdirp": "^1.0.3", + "p-map": "^4.0.0", + "promise-inflight": "^1.0.1", + "rimraf": "^3.0.2", + "ssri": "^8.0.1", + "tar": "^6.0.2", + "unique-filename": "^1.1.1" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/cacache/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/call-bind": { + "version": "1.0.8", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/call-bind/-/call-bind-1.0.8.tgz", + "integrity": "sha512-oKlSFMcMwpUg2ednkhQ454wfWiU/ul3CkJe/PEHcTKuiX6RpbehUiFMXu13HalGZxfUwCQzZG747YXBn1im9ww==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.0", + "es-define-property": "^1.0.0", + "get-intrinsic": "^1.2.4", + "set-function-length": "^1.2.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/call-bind-apply-helpers": { + "version": "1.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", + "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/call-bound": { + "version": "1.0.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/call-bound/-/call-bound-1.0.4.tgz", + "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "get-intrinsic": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/caniuse-lite": { + "version": "1.0.30001715", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/caniuse-lite/-/caniuse-lite-1.0.30001715.tgz", + "integrity": "sha512-7ptkFGMm2OAOgvZpwgA4yjQ5SQbrNVGdRjzH0pBdy1Fasvcr+KAeECmbCAECzTuDuoX0FCY8KzUxjf9+9kfZEw==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/caniuse-lite" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "CC-BY-4.0" + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/chalk/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/chardet": { + "version": "0.7.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/chardet/-/chardet-0.7.0.tgz", + "integrity": "sha512-mT8iDcrh03qDGRRmoA2hmBJnxpllMR+0/0qlzjqZES6NdiWDcZkCNAk4rPFZ9Q85r27unkiNNg8ZOiwZXBHwcA==", + "dev": true, + "license": "MIT" + }, + "node_modules/chownr": { + "version": "2.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/chownr/-/chownr-2.0.0.tgz", + "integrity": "sha512-bIomtDF5KGpdogkLd9VspvFzk9KfpyyGlS8YFVZl7TGPBHL5snIOnxeshwVgPteQ9b4Eydl+pVbIyE1DcvCWgQ==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/chrome-trace-event": { + "version": "1.0.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/chrome-trace-event/-/chrome-trace-event-1.0.4.tgz", + "integrity": "sha512-rNjApaLzuwaOTjCiT8lSDdGN1APCiqkChLMJxJPWLunPAt5fy8xgU9/jNOchV84wfIxrA0lRQB7oCT8jrn/wrQ==", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=6.0" + } + }, + "node_modules/clean-stack": { + "version": "2.2.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/clean-stack/-/clean-stack-2.2.0.tgz", + "integrity": "sha512-4diC9HaTE+KRAMWhDhrGOECgWZxoevMc5TlkObMqNSsVU62PYzXZ/SMTjzyGAFF1YusgxGcSWTEXBhp0CPwQ1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/cli-cursor": { + "version": "3.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/cli-cursor/-/cli-cursor-3.1.0.tgz", + "integrity": "sha512-I/zHAwsKf9FqGoXM4WWRACob9+SNukZTd94DWF57E4toouRulbCxcUh6RKUEOQlYTHJnzkPMySvPNaaSLNfLZw==", + "dev": true, + "license": "MIT", + "dependencies": { + "restore-cursor": "^3.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cli-spinners": { + "version": "2.9.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/cli-spinners/-/cli-spinners-2.9.2.tgz", + "integrity": "sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/cli-table": { + "version": "0.3.11", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/cli-table/-/cli-table-0.3.11.tgz", + "integrity": "sha512-IqLQi4lO0nIB4tcdTpN4LCB9FI3uqrJZK7RC515EnhZ6qBaglkIgICb1wjeAqpdoOabm1+SuQtkXIPdYC93jhQ==", + "dev": true, + "dependencies": { + "colors": "1.0.3" + }, + "engines": { + "node": ">= 0.2.0" + } + }, + "node_modules/cli-width": { + "version": "3.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/cli-width/-/cli-width-3.0.0.tgz", + "integrity": "sha512-FxqpkPPwu1HjuN93Omfm4h8uIanXofW0RxVEW3k5RKx+mJJYSthzNhp32Kzxxy3YAEZ/Dc/EWN1vZRY0+kOhbw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">= 10" + } + }, + "node_modules/cliui": { + "version": "7.0.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/cliui/-/cliui-7.0.4.tgz", + "integrity": "sha512-OcRE68cOsVMXp1Yvonl/fzkQOyjLSu/8bhPDfQt0e0/Eb283TKP20Fs2MqoPsr9SwA595rRCA+QMzYc9nBP+JQ==", + "license": "ISC", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.0", + "wrap-ansi": "^7.0.0" + } + }, + "node_modules/cliui/node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/clone": { + "version": "2.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/clone/-/clone-2.1.2.tgz", + "integrity": "sha512-3Pe/CF1Nn94hyhIYpjtiLhdCoEoz0DqQ+988E9gmeEdQZlojxnOb74wctFyuwWQHzqyf9X7C7MG8juUpqBJT8w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/clone-buffer": { + "version": "1.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/clone-buffer/-/clone-buffer-1.0.0.tgz", + "integrity": "sha512-KLLTJWrvwIP+OPfMn0x2PheDEP20RPUcGXj/ERegTgdmPEZylALQldygiqrPPu8P45uNuPs7ckmReLY6v/iA5g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/clone-deep": { + "version": "4.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/clone-deep/-/clone-deep-4.0.1.tgz", + "integrity": "sha512-neHB9xuzh/wk0dIHweyAXv2aPGZIVk3pLMe+/RNzINf17fe0OG96QroktYAUm7SM1PBnzTabaLboqqxDyMU+SQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-plain-object": "^2.0.4", + "kind-of": "^6.0.2", + "shallow-clone": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/clone-stats": { + "version": "1.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/clone-stats/-/clone-stats-1.0.0.tgz", + "integrity": "sha512-au6ydSpg6nsrigcZ4m8Bc9hxjeW+GJ8xh5G3BJCMt4WXe1H10UNaVOamqQTmrx1kjVuxAHIQSNU6hY4Nsn9/ag==", + "dev": true, + "license": "MIT" + }, + "node_modules/cloneable-readable": { + "version": "1.1.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/cloneable-readable/-/cloneable-readable-1.1.3.tgz", + "integrity": "sha512-2EF8zTQOxYq70Y4XKtorQupqF0m49MBz2/yf5Bj+MHjvpG3Hy7sImifnqD6UA+TKYxeSV+u6qqQPawN5UvnpKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.1", + "process-nextick-args": "^2.0.0", + "readable-stream": "^2.3.5" + } + }, + "node_modules/cloneable-readable/node_modules/readable-stream": { + "version": "2.3.8", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/readable-stream/-/readable-stream-2.3.8.tgz", + "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", + "dev": true, + "license": "MIT", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/cloneable-readable/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true, + "license": "MIT" + }, + "node_modules/cloneable-readable/node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dev": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/cmd-shim": { + "version": "5.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/cmd-shim/-/cmd-shim-5.0.0.tgz", + "integrity": "sha512-qkCtZ59BidfEwHltnJwkyVZn+XQojdAySM1D1gSeh11Z4pW1Kpolkyo53L5noc0nrxmIvyFwTmJRo4xs7FFLPw==", + "dev": true, + "license": "ISC", + "dependencies": { + "mkdirp-infer-owner": "^2.0.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "license": "MIT" + }, + "node_modules/color-support": { + "version": "1.1.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/color-support/-/color-support-1.1.3.tgz", + "integrity": "sha512-qiBjkpbMLO/HL68y+lh4q0/O1MZFj2RX6X/KmMa3+gJD3z+WwI1ZzDHysvqHGS3mP6mznPckpXmw1nI9cJjyRg==", + "dev": true, + "license": "ISC", + "bin": { + "color-support": "bin.js" + } + }, + "node_modules/colorette": { + "version": "2.0.20", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/colorette/-/colorette-2.0.20.tgz", + "integrity": "sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w==", + "dev": true, + "license": "MIT" + }, + "node_modules/colors": { + "version": "1.0.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/colors/-/colors-1.0.3.tgz", + "integrity": "sha512-pFGrxThWcWQ2MsAz6RtgeWe4NK2kUE1WfsrvvlctdII745EW9I0yflqhe7++M5LEc7bV2c/9/5zc8sFcpL0Drw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.1.90" + } + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "license": "MIT", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/commander": { + "version": "2.20.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/commander/-/commander-2.20.3.tgz", + "integrity": "sha512-GpVkmM8vF2vQUkj2LvZmD35JxeJOLCwJ9cUkugyk2nuhbv3+mJvpLYYt+0+USMxE+oj+ey/lJEnhZw75x/OMcQ==", + "dev": true, + "license": "MIT", + "peer": true + }, + "node_modules/common-ancestor-path": { + "version": "1.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/common-ancestor-path/-/common-ancestor-path-1.0.1.tgz", + "integrity": "sha512-L3sHRo1pXXEqX8VU28kfgUY+YGsk09hPqZiZmLacNib6XNTCM8ubYeT7ryXQw8asB1sKgcU5lkB7ONug08aB8w==", + "dev": true, + "license": "ISC" + }, + "node_modules/commondir": { + "version": "1.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/commondir/-/commondir-1.0.1.tgz", + "integrity": "sha512-W9pAhw0ja1Edb5GVdIF1mjZw/ASI0AlShXM83UUGe2DVr5TdAPEA1OA8m/g8zWp9x6On7gqufY+FatDbC3MDQg==", + "dev": true, + "license": "MIT" + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true, + "license": "MIT" + }, + "node_modules/console-control-strings": { + "version": "1.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/console-control-strings/-/console-control-strings-1.1.0.tgz", + "integrity": "sha512-ty/fTekppD2fIwRvnZAVdeOiGd1c7YXEixbgJTNzqcxJWKQnjJ/V1bNEEE6hygpM3WjwHFUVK6HTjWSzV4a8sQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true, + "license": "MIT" + }, + "node_modules/core-js": { + "version": "3.29.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/core-js/-/core-js-3.29.1.tgz", + "integrity": "sha512-+jwgnhg6cQxKYIIjGtAHq2nwUOolo9eoFZ4sHfUH09BLXBgxnH4gA0zEd+t+BO2cNB8idaBtZFcFTRjQJRJmAw==", + "hasInstallScript": true, + "license": "MIT", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, + "node_modules/core-js-compat": { + "version": "3.41.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/core-js-compat/-/core-js-compat-3.41.0.tgz", + "integrity": "sha512-RFsU9LySVue9RTwdDVX/T0e2Y6jRYWXERKElIjpuEOEnxaXffI0X7RUwVzfYLfzuLXSNJDYoRYUAmRUcyln20A==", + "dev": true, + "license": "MIT", + "dependencies": { + "browserslist": "^4.24.4" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, + "node_modules/core-util-is": { + "version": "1.0.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/core-util-is/-/core-util-is-1.0.3.tgz", + "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/cross-spawn": { + "version": "7.0.6", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/cross-spawn/-/cross-spawn-7.0.6.tgz", + "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/dargs": { + "version": "7.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/dargs/-/dargs-7.0.0.tgz", + "integrity": "sha512-2iy1EkLdlBzQGvbweYRFxmFath8+K7+AKB0TlhHWkNuH+TmovaMH/Wp7V7R4u7f4SnX3OgLsU9t1NI9ioDnUpg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/dateformat": { + "version": "4.6.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/dateformat/-/dateformat-4.6.3.tgz", + "integrity": "sha512-2P0p0pFGzHS5EMnhdxQi7aJN+iMheud0UhG4dlE1DLAlvL8JHjJJTX/CSm4JXwV0Ka5nGk3zC5mcb5bUQUxxMA==", + "dev": true, + "license": "MIT", + "engines": { + "node": "*" + } + }, + "node_modules/debug": { + "version": "4.4.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/debug/-/debug-4.4.0.tgz", + "integrity": "sha512-6WTZ/IxCY/T6BALoZHaE4ctp9xm+Z5kY/pzYaCHRFeyVhojxlrm+46y68HA6hr0TcwEssoxNiDEUJQjfPZ/RYA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.1.3" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/debuglog": { + "version": "1.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/debuglog/-/debuglog-1.0.1.tgz", + "integrity": "sha512-syBZ+rnAK3EgMsH2aYEOLUW7mZSY9Gb+0wUMCFsZvcmiz+HigA0LOcq/HoQqVuGG+EKykunc7QG2bzrponfaSw==", + "deprecated": "Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.", + "dev": true, + "license": "MIT", + "engines": { + "node": "*" + } + }, + "node_modules/deep-extend": { + "version": "0.6.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/deep-extend/-/deep-extend-0.6.0.tgz", + "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4.0.0" + } + }, + "node_modules/defaults": { + "version": "1.0.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/defaults/-/defaults-1.0.4.tgz", + "integrity": "sha512-eFuaLoy/Rxalv2kr+lqMlUnrDWV+3j4pljOIJgLIhI058IQfWJ7vXhyEIHu+HtC738klGALYxOKDO0bQP3tg8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "clone": "^1.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/defaults/node_modules/clone": { + "version": "1.0.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/clone/-/clone-1.0.4.tgz", + "integrity": "sha512-JQHZ2QMW6l3aH/j6xCqQThY/9OH4D/9ls34cgkUBiEeocRTU04tHfKPBsUK1PqZCUQM7GiA0IIXJSuXHI64Kbg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8" + } + }, + "node_modules/define-data-property": { + "version": "1.1.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/define-data-property/-/define-data-property-1.1.4.tgz", + "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0", + "es-errors": "^1.3.0", + "gopd": "^1.0.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/define-properties": { + "version": "1.2.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/define-properties/-/define-properties-1.2.1.tgz", + "integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==", + "license": "MIT", + "dependencies": { + "define-data-property": "^1.0.1", + "has-property-descriptors": "^1.0.0", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "license": "MIT", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/delegates": { + "version": "1.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/delegates/-/delegates-1.0.0.tgz", + "integrity": "sha512-bd2L678uiWATM6m5Z1VzNCErI3jiGzt6HGY8OVICs40JQq/HALfbyNJmp0UDakEY4pMMaN0Ly5om/B1VI/+xfQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/deprecation": { + "version": "2.3.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/deprecation/-/deprecation-2.3.1.tgz", + "integrity": "sha512-xmHIy4F3scKVwMsQ4WnVaS8bHOx0DmVwRywosKhaILI0ywMDWPtBSku2HNxRvF7jtwDRsoEwYQSfbxj8b7RlJQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/dezalgo": { + "version": "1.0.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/dezalgo/-/dezalgo-1.0.4.tgz", + "integrity": "sha512-rXSP0bf+5n0Qonsb+SVVfNfIsimO4HEtmnIpPHY8Q1UCzKlQrDMfdobr8nJOOsRgWCyMRqeSBQzmWUMq7zvVig==", + "dev": true, + "license": "ISC", + "dependencies": { + "asap": "^2.0.0", + "wrappy": "1" + } + }, + "node_modules/diff": { + "version": "5.2.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/diff/-/diff-5.2.0.tgz", + "integrity": "sha512-uIFDxqpRZGZ6ThOk84hEfqWoHx2devRFvpTZcTHur85vImfaxUbTW9Ryh4CpCuDnToOP1CEtXKIgytHBPVff5A==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.3.1" + } + }, + "node_modules/dir-glob": { + "version": "3.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/dunder-proto": { + "version": "1.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/dunder-proto/-/dunder-proto-1.0.1.tgz", + "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.1", + "es-errors": "^1.3.0", + "gopd": "^1.2.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/eastasianwidth": { + "version": "0.2.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/eastasianwidth/-/eastasianwidth-0.2.0.tgz", + "integrity": "sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==", + "dev": true, + "license": "MIT" + }, + "node_modules/ejs": { + "version": "3.1.10", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/ejs/-/ejs-3.1.10.tgz", + "integrity": "sha512-UeJmFfOrAQS8OJWPZ4qtgHyWExa088/MtK5UEyoJGFH67cDEXkZSviOiKRCZ4Xij0zxI3JECgYs3oKx+AizQBA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "jake": "^10.8.5" + }, + "bin": { + "ejs": "bin/cli.js" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/electron-to-chromium": { + "version": "1.5.140", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/electron-to-chromium/-/electron-to-chromium-1.5.140.tgz", + "integrity": "sha512-o82Rj+ONp4Ip7Cl1r7lrqx/pXhbp/lh9DpKcMNscFJdh8ebyRofnc7Sh01B4jx403RI0oqTBvlZ7OBIZLMr2+Q==", + "dev": true, + "license": "ISC" + }, + "node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", + "license": "MIT" + }, + "node_modules/encoding": { + "version": "0.1.13", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/encoding/-/encoding-0.1.13.tgz", + "integrity": "sha512-ETBauow1T35Y/WZMkio9jiM0Z5xjHHmJ4XmjZOq1l/dXz3lr2sRn87nJy20RupqSh1F2m3HHPSp8ShIPQJrJ3A==", + "license": "MIT", + "optional": true, + "dependencies": { + "iconv-lite": "^0.6.2" + } + }, + "node_modules/encoding/node_modules/iconv-lite": { + "version": "0.6.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/iconv-lite/-/iconv-lite-0.6.3.tgz", + "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", + "license": "MIT", + "optional": true, + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/enhanced-resolve": { + "version": "5.18.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/enhanced-resolve/-/enhanced-resolve-5.18.1.tgz", + "integrity": "sha512-ZSW3ma5GkcQBIpwZTSRAI8N71Uuwgs93IezB7mf7R60tC8ZbJideoDNKjHn2O9KIlx6rkGTTEk1xUCK2E1Y2Yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.2.4", + "tapable": "^2.2.0" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/env-paths": { + "version": "2.2.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/env-paths/-/env-paths-2.2.1.tgz", + "integrity": "sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/envinfo": { + "version": "7.14.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/envinfo/-/envinfo-7.14.0.tgz", + "integrity": "sha512-CO40UI41xDQzhLB1hWyqUKgFhs250pNcGbyGKe1l/e4FSaI/+YE4IMG76GDt0In67WLPACIITC+sOi08x4wIvg==", + "dev": true, + "license": "MIT", + "bin": { + "envinfo": "dist/cli.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/err-code": { + "version": "2.0.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/err-code/-/err-code-2.0.3.tgz", + "integrity": "sha512-2bmlRpNKBxT/CRmPOlyISQpNj+qSeYvcym/uT0Jx2bMOlKLtSy1ZmLuVxSEKKyor/N5yhvp/ZiG1oE3DEYMSFA==", + "dev": true, + "license": "MIT" + }, + "node_modules/error": { + "version": "10.4.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/error/-/error-10.4.0.tgz", + "integrity": "sha512-YxIFEJuhgcICugOUvRx5th0UM+ActZ9sjY0QJmeVwsQdvosZ7kYzc9QqS0Da3R5iUmgU5meGIxh0xBeZpMVeLw==", + "dev": true + }, + "node_modules/error-ex": { + "version": "1.3.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/error-ex/-/error-ex-1.3.2.tgz", + "integrity": "sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-arrayish": "^0.2.1" + } + }, + "node_modules/es-define-property": { + "version": "1.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/es-define-property/-/es-define-property-1.0.1.tgz", + "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-errors": { + "version": "1.3.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/es-errors/-/es-errors-1.3.0.tgz", + "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-module-lexer": { + "version": "1.7.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/es-module-lexer/-/es-module-lexer-1.7.0.tgz", + "integrity": "sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==", + "dev": true, + "license": "MIT", + "peer": true + }, + "node_modules/es-object-atoms": { + "version": "1.1.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/es-object-atoms/-/es-object-atoms-1.1.1.tgz", + "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/es-set-tostringtag": { + "version": "2.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/es-set-tostringtag/-/es-set-tostringtag-2.1.0.tgz", + "integrity": "sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==", + "license": "MIT", + "dependencies": { + "es-errors": "^1.3.0", + "get-intrinsic": "^1.2.6", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/escalade": { + "version": "3.2.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/escalade/-/escalade-3.2.0.tgz", + "integrity": "sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==", + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint-scope": { + "version": "5.1.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/eslint-scope/-/eslint-scope-5.1.1.tgz", + "integrity": "sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==", + "dev": true, + "license": "BSD-2-Clause", + "peer": true, + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^4.1.1" + }, + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/esprima": { + "version": "4.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/esprima/-/esprima-4.0.1.tgz", + "integrity": "sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==", + "dev": true, + "license": "BSD-2-Clause", + "bin": { + "esparse": "bin/esparse.js", + "esvalidate": "bin/esvalidate.js" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dev": true, + "license": "BSD-2-Clause", + "peer": true, + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esrecurse/node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "license": "BSD-2-Clause", + "peer": true, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "4.3.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/estraverse/-/estraverse-4.3.0.tgz", + "integrity": "sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==", + "dev": true, + "license": "BSD-2-Clause", + "peer": true, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/event-target-shim": { + "version": "5.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/event-target-shim/-/event-target-shim-5.0.1.tgz", + "integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/eventemitter3": { + "version": "4.0.7", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/eventemitter3/-/eventemitter3-4.0.7.tgz", + "integrity": "sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==", + "dev": true, + "license": "MIT" + }, + "node_modules/events": { + "version": "3.3.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/events/-/events-3.3.0.tgz", + "integrity": "sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8.x" + } + }, + "node_modules/execa": { + "version": "5.1.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/execa/-/execa-5.1.1.tgz", + "integrity": "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==", + "dev": true, + "license": "MIT", + "dependencies": { + "cross-spawn": "^7.0.3", + "get-stream": "^6.0.0", + "human-signals": "^2.1.0", + "is-stream": "^2.0.0", + "merge-stream": "^2.0.0", + "npm-run-path": "^4.0.1", + "onetime": "^5.1.2", + "signal-exit": "^3.0.3", + "strip-final-newline": "^2.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sindresorhus/execa?sponsor=1" + } + }, + "node_modules/exponential-backoff": { + "version": "3.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/exponential-backoff/-/exponential-backoff-3.1.2.tgz", + "integrity": "sha512-8QxYTVXUkuy7fIIoitQkPwGonB8F3Zj8eEO8Sqg9Zv/bkI7RJAzowee4gr81Hak/dUTpA2Z7VfQgoijjPNlUZA==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/external-editor": { + "version": "3.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/external-editor/-/external-editor-3.1.0.tgz", + "integrity": "sha512-hMQ4CX1p1izmuLYyZqLMO/qGNw10wSv9QDCPfzXfyFrOaCSSoRfqE1Kf1s5an66J5JZC62NewG+mK49jOCtQew==", + "dev": true, + "license": "MIT", + "dependencies": { + "chardet": "^0.7.0", + "iconv-lite": "^0.4.24", + "tmp": "^0.0.33" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true, + "license": "MIT", + "peer": true + }, + "node_modules/fast-glob": { + "version": "3.3.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/fast-glob/-/fast-glob-3.3.3.tgz", + "integrity": "sha512-7MptL8U0cqcFdzIzwOTHoilX9x5BrNqye7Z/LuC7kCMRio1EMSyqRK3BEAUD7sXRq4iT4AzTVuZdhgQ2TCvYLg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.8" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-text-encoding": { + "version": "1.0.6", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/fast-text-encoding/-/fast-text-encoding-1.0.6.tgz", + "integrity": "sha512-VhXlQgj9ioXCqGstD37E/HBeqEGV/qOD/kmbVG8h5xKBYvM1L3lR1Zn4555cQ8GkYbJa8aJSipLPndE1k6zK2w==", + "license": "Apache-2.0" + }, + "node_modules/fast-uri": { + "version": "3.0.6", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/fast-uri/-/fast-uri-3.0.6.tgz", + "integrity": "sha512-Atfo14OibSv5wAp4VWNsFYE1AchQRTv9cBGWET4pZWHzYshFSS9NQI6I57rdKn9croWVMbYFbLhJ+yJvmZIIHw==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/fastify" + }, + { + "type": "opencollective", + "url": "https://opencollective.com/fastify" + } + ], + "license": "BSD-3-Clause", + "peer": true + }, + "node_modules/fastest-levenshtein": { + "version": "1.0.16", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/fastest-levenshtein/-/fastest-levenshtein-1.0.16.tgz", + "integrity": "sha512-eRnCtTTtGZFpQCwhJiUOuxPQWRXVKYDn0b2PeHfXL6/Zi53SLAzAHfVhVWK2AryC/WH05kGfxhFIPvTF0SXQzg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4.9.1" + } + }, + "node_modules/fastq": { + "version": "1.19.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/fastq/-/fastq-1.19.1.tgz", + "integrity": "sha512-GwLTyxkCXjXbxqIhTsMI2Nui8huMPtnxg7krajPJAjnEG/iiOS7i+zCtWGZR9G0NBKbXKh6X9m9UIsYX/N6vvQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/figures": { + "version": "3.2.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/figures/-/figures-3.2.0.tgz", + "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==", + "dev": true, + "license": "MIT", + "dependencies": { + "escape-string-regexp": "^1.0.5" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/figures/node_modules/escape-string-regexp": { + "version": "1.0.5", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", + "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8.0" + } + }, + "node_modules/filelist": { + "version": "1.0.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/filelist/-/filelist-1.0.4.tgz", + "integrity": "sha512-w1cEuf3S+DrLCQL7ET6kz+gmlJdbq9J7yXCSjK/OZCPA+qEN1WyF4ZAf0YYJa4/shHJra2t/d/r8SV4Ji+x+8Q==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "minimatch": "^5.0.1" + } + }, + "node_modules/filelist/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/filelist/node_modules/minimatch": { + "version": "5.1.6", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minimatch/-/minimatch-5.1.6.tgz", + "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "4.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/find-up/-/find-up-4.1.0.tgz", + "integrity": "sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^5.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-yarn-workspace-root2": { + "version": "1.2.16", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/find-yarn-workspace-root2/-/find-yarn-workspace-root2-1.2.16.tgz", + "integrity": "sha512-hr6hb1w8ePMpPVUK39S4RlwJzi+xPLuVuG8XlwXU3KD5Yn3qgBWVfy3AzNlDhWvE1EORCE65/Qm26rFQt3VLVA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "micromatch": "^4.0.2", + "pkg-dir": "^4.2.0" + } + }, + "node_modules/first-chunk-stream": { + "version": "2.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/first-chunk-stream/-/first-chunk-stream-2.0.0.tgz", + "integrity": "sha512-X8Z+b/0L4lToKYq+lwnKqi9X/Zek0NibLpsJgVsSxpoYq7JtiCtRb5HqKVEjEw/qAb/4AKKRLOwwKHlWNpm2Eg==", + "dev": true, + "license": "MIT", + "dependencies": { + "readable-stream": "^2.0.2" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/first-chunk-stream/node_modules/readable-stream": { + "version": "2.3.8", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/readable-stream/-/readable-stream-2.3.8.tgz", + "integrity": "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==", + "dev": true, + "license": "MIT", + "dependencies": { + "core-util-is": "~1.0.0", + "inherits": "~2.0.3", + "isarray": "~1.0.0", + "process-nextick-args": "~2.0.0", + "safe-buffer": "~5.1.1", + "string_decoder": "~1.1.1", + "util-deprecate": "~1.0.1" + } + }, + "node_modules/first-chunk-stream/node_modules/safe-buffer": { + "version": "5.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/safe-buffer/-/safe-buffer-5.1.2.tgz", + "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", + "dev": true, + "license": "MIT" + }, + "node_modules/first-chunk-stream/node_modules/string_decoder": { + "version": "1.1.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/string_decoder/-/string_decoder-1.1.1.tgz", + "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", + "dev": true, + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.1.0" + } + }, + "node_modules/flat": { + "version": "5.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/flat/-/flat-5.0.2.tgz", + "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==", + "dev": true, + "license": "BSD-3-Clause", + "bin": { + "flat": "cli.js" + } + }, + "node_modules/for-each": { + "version": "0.3.5", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/for-each/-/for-each-0.3.5.tgz", + "integrity": "sha512-dKx12eRCVIzqCxFGplyFKJMPvLEWgmNtUrpTiJIR5u97zEhRG8ySrtboPHZXx7daLxQVrl643cTzbab2tkQjxg==", + "license": "MIT", + "dependencies": { + "is-callable": "^1.2.7" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/foreground-child": { + "version": "3.3.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/foreground-child/-/foreground-child-3.3.1.tgz", + "integrity": "sha512-gIXjKqtFuWEgzFRJA9WCQeSJLZDjgJUOMCMzxtvFq/37KojM1BFGufqsCy0r4qSQmYLsZYMeyRqzIWOMup03sw==", + "dev": true, + "license": "ISC", + "dependencies": { + "cross-spawn": "^7.0.6", + "signal-exit": "^4.0.1" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/foreground-child/node_modules/signal-exit": { + "version": "4.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/signal-exit/-/signal-exit-4.1.0.tgz", + "integrity": "sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/form-data": { + "version": "4.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/form-data/-/form-data-4.0.2.tgz", + "integrity": "sha512-hGfm/slu0ZabnNt4oaRZ6uREyfCj6P4fT/n6A1rGV+Z0VdGXjfOhVUpkn6qVQONHGIFwmveGXyDs75+nr6FM8w==", + "license": "MIT", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "es-set-tostringtag": "^2.1.0", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fs-minipass": { + "version": "2.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/fs-minipass/-/fs-minipass-2.1.0.tgz", + "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^3.0.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true, + "license": "ISC" + }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/gauge": { + "version": "3.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/gauge/-/gauge-3.0.2.tgz", + "integrity": "sha512-+5J6MS/5XksCuXq++uFRsnUd7Ovu1XenbeuIuNRJxYWjgQbPuFhT14lAvsWfqfAmnwluf1OwMjz39HjfLPci0Q==", + "deprecated": "This package is no longer supported.", + "dev": true, + "license": "ISC", + "dependencies": { + "aproba": "^1.0.3 || ^2.0.0", + "color-support": "^1.1.2", + "console-control-strings": "^1.0.0", + "has-unicode": "^2.0.1", + "object-assign": "^4.1.1", + "signal-exit": "^3.0.0", + "string-width": "^4.2.3", + "strip-ansi": "^6.0.1", + "wide-align": "^1.1.2" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/gensync": { + "version": "1.0.0-beta.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/gensync/-/gensync-1.0.0-beta.2.tgz", + "integrity": "sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "license": "ISC", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, + "node_modules/get-intrinsic": { + "version": "1.3.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/get-intrinsic/-/get-intrinsic-1.3.0.tgz", + "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", + "license": "MIT", + "dependencies": { + "call-bind-apply-helpers": "^1.0.2", + "es-define-property": "^1.0.1", + "es-errors": "^1.3.0", + "es-object-atoms": "^1.1.1", + "function-bind": "^1.1.2", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-symbols": "^1.1.0", + "hasown": "^2.0.2", + "math-intrinsics": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-proto": { + "version": "1.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/get-proto/-/get-proto-1.0.1.tgz", + "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", + "license": "MIT", + "dependencies": { + "dunder-proto": "^1.0.1", + "es-object-atoms": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/get-stream": { + "version": "6.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/get-stream/-/get-stream-6.0.1.tgz", + "integrity": "sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/github-username": { + "version": "6.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/github-username/-/github-username-6.0.0.tgz", + "integrity": "sha512-7TTrRjxblSI5l6adk9zd+cV5d6i1OrJSo3Vr9xdGqFLBQo0mz5P9eIfKCDJ7eekVGGFLbce0qbPSnktXV2BjDQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@octokit/rest": "^18.0.6" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/glob": { + "version": "8.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/glob/-/glob-8.1.0.tgz", + "integrity": "sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^5.0.1", + "once": "^1.3.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/glob-to-regexp": { + "version": "0.4.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/glob-to-regexp/-/glob-to-regexp-0.4.1.tgz", + "integrity": "sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==", + "dev": true, + "license": "BSD-2-Clause", + "peer": true + }, + "node_modules/glob/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/glob/node_modules/minimatch": { + "version": "5.1.6", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minimatch/-/minimatch-5.1.6.tgz", + "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/globals": { + "version": "11.12.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/globals/-/globals-11.12.0.tgz", + "integrity": "sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/globby": { + "version": "11.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "dev": true, + "license": "MIT", + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/gopd": { + "version": "1.2.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/gopd/-/gopd-1.2.0.tgz", + "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/grouped-queue": { + "version": "2.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/grouped-queue/-/grouped-queue-2.0.0.tgz", + "integrity": "sha512-/PiFUa7WIsl48dUeCvhIHnwNmAAzlI/eHoJl0vu3nsFA366JleY7Ff8EVTplZu5kO0MIdZjKTTnzItL61ahbnw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.0.0" + } + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/has-property-descriptors": { + "version": "1.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", + "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", + "license": "MIT", + "dependencies": { + "es-define-property": "^1.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbols": { + "version": "1.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/has-symbols/-/has-symbols-1.1.0.tgz", + "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-tostringtag": { + "version": "1.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/has-tostringtag/-/has-tostringtag-1.0.2.tgz", + "integrity": "sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==", + "license": "MIT", + "dependencies": { + "has-symbols": "^1.0.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-unicode": { + "version": "2.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/has-unicode/-/has-unicode-2.0.1.tgz", + "integrity": "sha512-8Rf9Y83NBReMnx0gFzA8JImQACstCYWUplepDa9xprwwtmgEZUF0h/i5xSA625zB/I37EtrswSST6OXxwaaIJQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/hasown": { + "version": "2.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/hasown/-/hasown-2.0.2.tgz", + "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", + "license": "MIT", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/hosted-git-info": { + "version": "4.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/hosted-git-info/-/hosted-git-info-4.1.0.tgz", + "integrity": "sha512-kyCuEOWjJqZuDbRHzL8V93NzQhwIB71oFWSyzVo+KPZI+pnQPPxucdkrOZvkLRnrf5URsQM+IJ09Dw29cRALIA==", + "dev": true, + "license": "ISC", + "dependencies": { + "lru-cache": "^6.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/http-cache-semantics": { + "version": "4.1.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/http-cache-semantics/-/http-cache-semantics-4.1.1.tgz", + "integrity": "sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ==", + "dev": true, + "license": "BSD-2-Clause" + }, + "node_modules/http-proxy-agent": { + "version": "4.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/http-proxy-agent/-/http-proxy-agent-4.0.1.tgz", + "integrity": "sha512-k0zdNgqWTGA6aeIRVpvfVob4fL52dTfaehylg0Y4UvSySvOq/Y+BOyPrgpUrA7HylqvU8vIZGsRuXmspskV0Tg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@tootallnate/once": "1", + "agent-base": "6", + "debug": "4" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/https-proxy-agent": { + "version": "5.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/https-proxy-agent/-/https-proxy-agent-5.0.1.tgz", + "integrity": "sha512-dFcAjpTQFgoLMzC2VwU+C/CbS7uRL0lWmxDITmqm7C+7F0Odmj6s9l6alZc6AELXhrnggM2CeWSXHGOdX2YtwA==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "6", + "debug": "4" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/human-signals": { + "version": "2.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/human-signals/-/human-signals-2.1.0.tgz", + "integrity": "sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=10.17.0" + } + }, + "node_modules/humanize-ms": { + "version": "1.2.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/humanize-ms/-/humanize-ms-1.2.1.tgz", + "integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "^2.0.0" + } + }, + "node_modules/iconv-lite": { + "version": "0.4.24", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/iconv-lite/-/iconv-lite-0.4.24.tgz", + "integrity": "sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==", + "dev": true, + "license": "MIT", + "dependencies": { + "safer-buffer": ">= 2.1.2 < 3" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/ieee754": { + "version": "1.2.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/ieee754/-/ieee754-1.2.1.tgz", + "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "BSD-3-Clause" + }, + "node_modules/ignore": { + "version": "5.3.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/ignore/-/ignore-5.3.2.tgz", + "integrity": "sha512-hsBTNUqQTDwkWtcdYI2i06Y/nUBEsNEDJKjWdigLvegy8kDuJAS8uRlpkkcQpyEXL0Z/pjDy5HBmMjRCJ2gq+g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/ignore-walk": { + "version": "4.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/ignore-walk/-/ignore-walk-4.0.1.tgz", + "integrity": "sha512-rzDQLaW4jQbh2YrOFlJdCtX8qgJTehFRYiUB2r1osqTeDzV/3+Jh8fz1oAPzUThf3iku8Ds4IDqawI5d8mUiQw==", + "dev": true, + "license": "ISC", + "dependencies": { + "minimatch": "^3.0.4" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/import-local": { + "version": "3.2.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/import-local/-/import-local-3.2.0.tgz", + "integrity": "sha512-2SPlun1JUPWoM6t3F0dw0FkCF/jWY8kttcY4f599GLTSjh2OCuuhdTkJQsEcZzBqbXZGKMK2OqW1oZsjtf/gQA==", + "dev": true, + "license": "MIT", + "dependencies": { + "pkg-dir": "^4.2.0", + "resolve-cwd": "^3.0.0" + }, + "bin": { + "import-local-fixture": "fixtures/cli.js" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/indent-string": { + "version": "4.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/indent-string/-/indent-string-4.0.0.tgz", + "integrity": "sha512-EdDDZu4A2OyIK7Lr/2zG+w5jmbuk1DVBnEwREQvBzspBJkCEbRa8GxU1lghYcaGJCnRWibjDXlq779X1/y5xwg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/infer-owner": { + "version": "1.0.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/infer-owner/-/infer-owner-1.0.4.tgz", + "integrity": "sha512-IClj+Xz94+d7irH5qRyfJonOdfTzuDaifE6ZPWfx0N0+/ATZCbuTPq2prFl526urkQd90WyUKIh1DfBQ2hMz9A==", + "dev": true, + "license": "ISC" + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", + "dev": true, + "license": "ISC", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "license": "ISC" + }, + "node_modules/inquirer": { + "version": "8.2.6", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/inquirer/-/inquirer-8.2.6.tgz", + "integrity": "sha512-M1WuAmb7pn9zdFRtQYk26ZBoY043Sse0wVDdk4Bppr+JOXyQYybdtvK+l9wUibhtjdjvtoiNy8tk+EgsYIUqKg==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-escapes": "^4.2.1", + "chalk": "^4.1.1", + "cli-cursor": "^3.1.0", + "cli-width": "^3.0.0", + "external-editor": "^3.0.3", + "figures": "^3.0.0", + "lodash": "^4.17.21", + "mute-stream": "0.0.8", + "ora": "^5.4.1", + "run-async": "^2.4.0", + "rxjs": "^7.5.5", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0", + "through": "^2.3.6", + "wrap-ansi": "^6.0.1" + }, + "engines": { + "node": ">=12.0.0" + } + }, + "node_modules/interpret": { + "version": "3.1.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/interpret/-/interpret-3.1.1.tgz", + "integrity": "sha512-6xwYfHbajpoF0xLW+iwLkhwgvLoZDfjYfoFNu8ftMoXINzwuymNLd9u/KmwtdT2GbR+/Cz66otEGEVVUHX9QLQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/ip-address": { + "version": "9.0.5", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/ip-address/-/ip-address-9.0.5.tgz", + "integrity": "sha512-zHtQzGojZXTwZTHQqra+ETKd4Sn3vgi7uBmlPoXVWZqYvuKmtI0l/VZTjqGmJY9x88GGOaZ9+G9ES8hC4T4X8g==", + "dev": true, + "license": "MIT", + "dependencies": { + "jsbn": "1.1.0", + "sprintf-js": "^1.1.3" + }, + "engines": { + "node": ">= 12" + } + }, + "node_modules/is-arguments": { + "version": "1.2.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/is-arguments/-/is-arguments-1.2.0.tgz", + "integrity": "sha512-7bVbi0huj/wrIAOzb8U1aszg9kdi3KN/CyU19CTI7tAoZYEZoL9yCDXpbXN+uPsuWnP02cyug1gleqq+TU+YCA==", + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-arrayish": { + "version": "0.2.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/is-arrayish/-/is-arrayish-0.2.1.tgz", + "integrity": "sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==", + "dev": true, + "license": "MIT" + }, + "node_modules/is-callable": { + "version": "1.2.7", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/is-callable/-/is-callable-1.2.7.tgz", + "integrity": "sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-core-module": { + "version": "2.16.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/is-core-module/-/is-core-module-2.16.1.tgz", + "integrity": "sha512-UfoeMA6fIJ8wTYFEUjelnaGI67v6+N7qXJEvQuIGa99l4xsCruSYOVSQ0uPANn4dAzm8lkYPaKLrrijLq7x23w==", + "dev": true, + "license": "MIT", + "dependencies": { + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-fullwidth-code-point": { + "version": "3.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", + "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-generator-function": { + "version": "1.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/is-generator-function/-/is-generator-function-1.1.0.tgz", + "integrity": "sha512-nPUB5km40q9e8UfN/Zc24eLlzdSf9OfKByBw9CIdw4H1giPMeA0OIJvbchsCu4npfI2QcMVBsGEBHKZ7wLTWmQ==", + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.3", + "get-proto": "^1.0.0", + "has-tostringtag": "^1.0.2", + "safe-regex-test": "^1.1.0" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-interactive": { + "version": "1.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/is-interactive/-/is-interactive-1.0.0.tgz", + "integrity": "sha512-2HvIEKRoqS62guEC+qBjpvRubdX910WCMuJTZ+I9yvqKU2/12eSL549HMwtabb4oupdj2sMP50k+XJfB/8JE6w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-lambda": { + "version": "1.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/is-lambda/-/is-lambda-1.0.1.tgz", + "integrity": "sha512-z7CMFGNrENq5iFB9Bqo64Xk6Y9sg+epq1myIcdHaGnbMTYOxvzsEtdYqQUylB7LxfkvgrrjP32T6Ywciio9UIQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/is-nan": { + "version": "1.3.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/is-nan/-/is-nan-1.3.2.tgz", + "integrity": "sha512-E+zBKpQ2t6MEo1VsonYmluk9NxGrbzpeeLC2xIViuO2EjU2xsXsBPwTr3Ykv9l08UYEVEdWeRZNouaZqF6RN0w==", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.0", + "define-properties": "^1.1.3" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-plain-obj": { + "version": "2.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/is-plain-obj/-/is-plain-obj-2.1.0.tgz", + "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/is-plain-object": { + "version": "2.0.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/is-plain-object/-/is-plain-object-2.0.4.tgz", + "integrity": "sha512-h5PpgXkWitc38BBMYawTYMWJHFZJVnBquFE57xFpjB8pJFiF6gZ+bU+WyI/yqXiFR5mdLsgYNaPe8uao6Uv9Og==", + "dev": true, + "license": "MIT", + "dependencies": { + "isobject": "^3.0.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-regex": { + "version": "1.2.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/is-regex/-/is-regex-1.2.1.tgz", + "integrity": "sha512-MjYsKHO5O7mCsmRGxWcLWheFqN9DJ/2TmngvjKXihe6efViPqc274+Fx/4fYj/r03+ESvBdTXK0V6tA3rgez1g==", + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "gopd": "^1.2.0", + "has-tostringtag": "^1.0.2", + "hasown": "^2.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-scoped": { + "version": "2.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/is-scoped/-/is-scoped-2.1.0.tgz", + "integrity": "sha512-Cv4OpPTHAK9kHYzkzCrof3VJh7H/PrG2MBUMvvJebaaUMbqhm0YAtXnvh0I3Hnj2tMZWwrRROWLSgfJrKqWmlQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "scoped-regex": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-stream": { + "version": "2.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/is-stream/-/is-stream-2.0.1.tgz", + "integrity": "sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-typed-array": { + "version": "1.1.15", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/is-typed-array/-/is-typed-array-1.1.15.tgz", + "integrity": "sha512-p3EcsicXjit7SaskXHs1hA91QxgTw46Fv6EFKKGS5DRFLD8yKnohjF3hxoju94b/OcMZoQukzpPpBE9uLVKzgQ==", + "license": "MIT", + "dependencies": { + "which-typed-array": "^1.1.16" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/is-unicode-supported": { + "version": "0.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", + "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/is-utf8": { + "version": "0.2.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/is-utf8/-/is-utf8-0.2.1.tgz", + "integrity": "sha512-rMYPYvCzsXywIsldgLaSoPlw5PfoB/ssr7hY4pLfcodrA5M/eArza1a9VmTiNIBNMjOGr1Ow9mTyU2o69U6U9Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/isarray": { + "version": "1.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/isarray/-/isarray-1.0.0.tgz", + "integrity": "sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/isbinaryfile": { + "version": "4.0.10", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/isbinaryfile/-/isbinaryfile-4.0.10.tgz", + "integrity": "sha512-iHrqe5shvBUcFbmZq9zOQHBoeOhZJu6RQGrDpBgenUm/Am+F3JM2MgQj+rK3Z601fzrL5gLZWtAPH2OBaSVcyw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/gjtorikian/" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true, + "license": "ISC" + }, + "node_modules/isobject": { + "version": "3.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/isobject/-/isobject-3.0.1.tgz", + "integrity": "sha512-WhB9zCku7EGTj/HQQRz5aUQEUeoQZH2bWcltRErOpymJ4boYE6wL9Tbr23krRPSZ+C5zqNSrSw+Cc7sZZ4b7vg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/jackspeak": { + "version": "3.4.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/jackspeak/-/jackspeak-3.4.3.tgz", + "integrity": "sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "@isaacs/cliui": "^8.0.2" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + }, + "optionalDependencies": { + "@pkgjs/parseargs": "^0.11.0" + } + }, + "node_modules/jake": { + "version": "10.9.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/jake/-/jake-10.9.2.tgz", + "integrity": "sha512-2P4SQ0HrLQ+fw6llpLnOaGAvN2Zu6778SJMrCUwns4fOoG9ayrTiZk3VV8sCPkVZF8ab0zksVpS8FDY5pRCNBA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "async": "^3.2.3", + "chalk": "^4.0.2", + "filelist": "^1.0.4", + "minimatch": "^3.1.2" + }, + "bin": { + "jake": "bin/cli.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/jest-worker": { + "version": "27.5.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/jest-worker/-/jest-worker-27.5.1.tgz", + "integrity": "sha512-7vuh85V5cdDofPyxn58nrPjBktZo0u9x1g8WtjQol+jZDaE+fhN+cIvTj11GndBnMnyfrUOG1sZQxCdjKh+DKg==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@types/node": "*", + "merge-stream": "^2.0.0", + "supports-color": "^8.0.0" + }, + "engines": { + "node": ">= 10.13.0" + } + }, + "node_modules/js-tokens": { + "version": "4.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/js-tokens/-/js-tokens-4.0.0.tgz", + "integrity": "sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/js-yaml": { + "version": "3.14.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/js-yaml/-/js-yaml-3.14.1.tgz", + "integrity": "sha512-okMH7OXXJ7YrN9Ok3/SXrnu4iX9yOk+25nqX4imS2npuvTYDmo/QEZoqwZkYaIDk3jVvBOTOIEgEhaLOynBS9g==", + "dev": true, + "license": "MIT", + "dependencies": { + "argparse": "^1.0.7", + "esprima": "^4.0.0" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/jsbn": { + "version": "1.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/jsbn/-/jsbn-1.1.0.tgz", + "integrity": "sha512-4bYVV3aAMtDTTu4+xsDYa6sy9GyJ69/amsu9sYF2zqjiEoZA5xJi3BrfX3uY+/IekIu7MwdObdbDWpoZdBv3/A==", + "dev": true, + "license": "MIT" + }, + "node_modules/jsesc": { + "version": "3.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/jsesc/-/jsesc-3.1.0.tgz", + "integrity": "sha512-/sM3dO2FOzXjKQhJuo0Q173wf2KOo8t4I8vHy6lF9poUp7bKT0/NHE8fPX23PwfhnykfqnC2xRxOnVw5XuGIaA==", + "dev": true, + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/json-parse-even-better-errors": { + "version": "2.3.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/json-parse-even-better-errors/-/json-parse-even-better-errors-2.3.1.tgz", + "integrity": "sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-schema-traverse": { + "version": "1.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/json-schema-traverse/-/json-schema-traverse-1.0.0.tgz", + "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", + "dev": true, + "license": "MIT", + "peer": true + }, + "node_modules/json-stringify-nice": { + "version": "1.1.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/json-stringify-nice/-/json-stringify-nice-1.1.4.tgz", + "integrity": "sha512-5Z5RFW63yxReJ7vANgW6eZFGWaQvnPE3WNmZoOJrSkGju2etKA2L5rrOa1sm877TVTFt57A80BH1bArcmlLfPw==", + "dev": true, + "license": "ISC", + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/json5": { + "version": "2.2.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/json5/-/json5-2.2.3.tgz", + "integrity": "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==", + "dev": true, + "license": "MIT", + "bin": { + "json5": "lib/cli.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/jsonparse": { + "version": "1.3.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/jsonparse/-/jsonparse-1.3.1.tgz", + "integrity": "sha512-POQXvpdL69+CluYsillJ7SUhKvytYjW9vG/GKpnf+xP8UWgYEM/RaMzHHofbALDiKbbP1W8UEYmgGl39WkPZsg==", + "dev": true, + "engines": [ + "node >= 0.2.0" + ], + "license": "MIT" + }, + "node_modules/just-diff": { + "version": "5.2.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/just-diff/-/just-diff-5.2.0.tgz", + "integrity": "sha512-6ufhP9SHjb7jibNFrNxyFZ6od3g+An6Ai9mhGRvcYe8UJlH0prseN64M+6ZBBUoKYHZsitDP42gAJ8+eVWr3lw==", + "dev": true, + "license": "MIT" + }, + "node_modules/just-diff-apply": { + "version": "5.5.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/just-diff-apply/-/just-diff-apply-5.5.0.tgz", + "integrity": "sha512-OYTthRfSh55WOItVqwpefPtNt2VdKsq5AnAK6apdtR6yCH8pr0CmSr710J0Mf+WdQy7K/OzMy7K2MgAfdQURDw==", + "dev": true, + "license": "MIT" + }, + "node_modules/kind-of": { + "version": "6.0.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/kind-of/-/kind-of-6.0.3.tgz", + "integrity": "sha512-dcS1ul+9tmeD95T+x28/ehLgd9mENa3LsvDTtzm3vyBEO7RPptvAD+t44WVXaUjTBRcrpFeFlC8WCruUR456hw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/lines-and-columns": { + "version": "1.2.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/lines-and-columns/-/lines-and-columns-1.2.4.tgz", + "integrity": "sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==", + "dev": true, + "license": "MIT" + }, + "node_modules/load-yaml-file": { + "version": "0.2.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/load-yaml-file/-/load-yaml-file-0.2.0.tgz", + "integrity": "sha512-OfCBkGEw4nN6JLtgRidPX6QxjBQGQf72q3si2uvqyFEMbycSFFHwAZeXx6cJgFM9wmLrf9zBwCP3Ivqa+LLZPw==", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.1.5", + "js-yaml": "^3.13.0", + "pify": "^4.0.1", + "strip-bom": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/load-yaml-file/node_modules/pify": { + "version": "4.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/pify/-/pify-4.0.1.tgz", + "integrity": "sha512-uB80kBFb/tfd68bVleG9T5GGsGPjJrLAUpR5PZIrhBnIaRTQRjqdJSsIKkOP6OAIFbj7GOrcudc5pNjZ+geV2g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/load-yaml-file/node_modules/strip-bom": { + "version": "3.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/strip-bom/-/strip-bom-3.0.0.tgz", + "integrity": "sha512-vavAMRXOgBVNF6nyEEmL3DBK19iRpDcoIwW+swQ+CbGiu7lju6t+JklA1MHweoWtadgt4ISVUsXLyDq34ddcwA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/loader-runner": { + "version": "4.3.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/loader-runner/-/loader-runner-4.3.0.tgz", + "integrity": "sha512-3R/1M+yS3j5ou80Me59j7F9IMs4PXs3VqRrm0TU3AbKPxlmpoY1TNscJV/oGJXo8qCatFGTfDbY6W6ipGOYXfg==", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=6.11.5" + } + }, + "node_modules/locate-path": { + "version": "5.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/locate-path/-/locate-path-5.0.0.tgz", + "integrity": "sha512-t7hw9pI+WvuwNJXwk5zVHpyhIqzg2qTlklJOf0mVxGSbe3Fp2VieZcduNYjaLDoy6p9uGpQEGWG87WpMKlNq8g==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^4.1.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.debounce": { + "version": "4.0.8", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/lodash.debounce/-/lodash.debounce-4.0.8.tgz", + "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==", + "dev": true, + "license": "MIT" + }, + "node_modules/log-symbols": { + "version": "4.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/log-symbols/-/log-symbols-4.1.0.tgz", + "integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.1.0", + "is-unicode-supported": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/long": { + "version": "4.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/long/-/long-4.0.0.tgz", + "integrity": "sha512-XsP+KhQif4bjX1kbuSiySJFNAehNxgLb6hPRGJ9QsUr8ajHkuXGdrHmFUTUUXhDwVX2R5bY4JNZEwbUiMhV+MA==", + "license": "Apache-2.0" + }, + "node_modules/lru-cache": { + "version": "6.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/lru-cache/-/lru-cache-6.0.0.tgz", + "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/make-fetch-happen": { + "version": "9.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/make-fetch-happen/-/make-fetch-happen-9.1.0.tgz", + "integrity": "sha512-+zopwDy7DNknmwPQplem5lAZX/eCOzSvSNNcSKm5eVwTkOBzoktEfXsa9L23J/GIRhxRsaxzkPEhrJEpE2F4Gg==", + "dev": true, + "license": "ISC", + "dependencies": { + "agentkeepalive": "^4.1.3", + "cacache": "^15.2.0", + "http-cache-semantics": "^4.1.0", + "http-proxy-agent": "^4.0.1", + "https-proxy-agent": "^5.0.0", + "is-lambda": "^1.0.1", + "lru-cache": "^6.0.0", + "minipass": "^3.1.3", + "minipass-collect": "^1.0.2", + "minipass-fetch": "^1.3.2", + "minipass-flush": "^1.0.5", + "minipass-pipeline": "^1.2.4", + "negotiator": "^0.6.2", + "promise-retry": "^2.0.1", + "socks-proxy-agent": "^6.0.0", + "ssri": "^8.0.0" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/math-intrinsics": { + "version": "1.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/math-intrinsics/-/math-intrinsics-1.1.0.tgz", + "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/mem-fs": { + "version": "2.3.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/mem-fs/-/mem-fs-2.3.0.tgz", + "integrity": "sha512-GftCCBs6EN8sz3BoWO1bCj8t7YBtT713d8bUgbhg9Iel5kFSqnSvCK06TYIDJAtJ51cSiWkM/YemlT0dfoFycw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/node": "^15.6.2", + "@types/vinyl": "^2.0.4", + "vinyl": "^2.0.1", + "vinyl-file": "^3.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/mem-fs-editor": { + "version": "9.7.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/mem-fs-editor/-/mem-fs-editor-9.7.0.tgz", + "integrity": "sha512-ReB3YD24GNykmu4WeUL/FDIQtkoyGB6zfJv60yfCo3QjKeimNcTqv2FT83bP0ccs6uu+sm5zyoBlspAzigmsdg==", + "dev": true, + "license": "MIT", + "dependencies": { + "binaryextensions": "^4.16.0", + "commondir": "^1.0.1", + "deep-extend": "^0.6.0", + "ejs": "^3.1.8", + "globby": "^11.1.0", + "isbinaryfile": "^5.0.0", + "minimatch": "^7.2.0", + "multimatch": "^5.0.0", + "normalize-path": "^3.0.0", + "textextensions": "^5.13.0" + }, + "engines": { + "node": ">=12.10.0" + }, + "peerDependencies": { + "mem-fs": "^2.1.0" + }, + "peerDependenciesMeta": { + "mem-fs": { + "optional": true + } + } + }, + "node_modules/mem-fs-editor/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/mem-fs-editor/node_modules/isbinaryfile": { + "version": "5.0.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/isbinaryfile/-/isbinaryfile-5.0.4.tgz", + "integrity": "sha512-YKBKVkKhty7s8rxddb40oOkuP0NbaeXrQvLin6QMHL7Ypiy2RW9LwOVrVgZRyOrhQlayMd9t+D8yDy8MKFTSDQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 18.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/gjtorikian/" + } + }, + "node_modules/mem-fs-editor/node_modules/minimatch": { + "version": "7.4.6", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minimatch/-/minimatch-7.4.6.tgz", + "integrity": "sha512-sBz8G/YjVniEz6lKPNpKxXwazJe4c19fEfV2GDMX6AjFz+MX9uDWIZW8XreVhkFW3fkIdTv/gxWr/Kks5FFAVw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/mem-fs/node_modules/@types/node": { + "version": "15.14.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@types/node/-/node-15.14.9.tgz", + "integrity": "sha512-qjd88DrCxupx/kJD5yQgZdcYKZKSIGBVDIBE1/LTGcNm3d2Np/jxojkdePDdfnBHJc5W7vSMpbJ1aB7p/Py69A==", + "dev": true, + "license": "MIT" + }, + "node_modules/merge-stream": { + "version": "2.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/merge-stream/-/merge-stream-2.0.0.tgz", + "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", + "dev": true, + "license": "MIT" + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "dev": true, + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mimic-fn": { + "version": "2.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/mimic-fn/-/mimic-fn-2.1.0.tgz", + "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/minipass": { + "version": "3.3.6", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass/-/minipass-3.3.6.tgz", + "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/minipass-collect": { + "version": "1.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass-collect/-/minipass-collect-1.0.2.tgz", + "integrity": "sha512-6T6lH0H8OG9kITm/Jm6tdooIbogG9e0tLgpY6mphXSm/A9u8Nq1ryBG+Qspiub9LjWlBPsPS3tWQ/Botq4FdxA==", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^3.0.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/minipass-fetch": { + "version": "1.4.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass-fetch/-/minipass-fetch-1.4.1.tgz", + "integrity": "sha512-CGH1eblLq26Y15+Azk7ey4xh0J/XfJfrCox5LDJiKqI2Q2iwOLOKrlmIaODiSQS8d18jalF6y2K2ePUm0CmShw==", + "dev": true, + "license": "MIT", + "dependencies": { + "minipass": "^3.1.0", + "minipass-sized": "^1.0.3", + "minizlib": "^2.0.0" + }, + "engines": { + "node": ">=8" + }, + "optionalDependencies": { + "encoding": "^0.1.12" + } + }, + "node_modules/minipass-flush": { + "version": "1.0.5", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass-flush/-/minipass-flush-1.0.5.tgz", + "integrity": "sha512-JmQSYYpPUqX5Jyn1mXaRwOda1uQ8HP5KAT/oDSLCzt1BYRhQU0/hDtsB1ufZfEEzMZ9aAVmsBw8+FWsIXlClWw==", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^3.0.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/minipass-json-stream": { + "version": "1.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass-json-stream/-/minipass-json-stream-1.0.2.tgz", + "integrity": "sha512-myxeeTm57lYs8pH2nxPzmEEg8DGIgW+9mv6D4JZD2pa81I/OBjeU7PtICXV6c9eRGTA5JMDsuIPUZRCyBMYNhg==", + "dev": true, + "license": "MIT", + "dependencies": { + "jsonparse": "^1.3.1", + "minipass": "^3.0.0" + } + }, + "node_modules/minipass-pipeline": { + "version": "1.2.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass-pipeline/-/minipass-pipeline-1.2.4.tgz", + "integrity": "sha512-xuIq7cIOt09RPRJ19gdi4b+RiNvDFYe5JH+ggNvBqGqpQXcru3PcRmOZuHBKWK1Txf9+cQ+HMVN4d6z46LZP7A==", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/minipass-sized": { + "version": "1.0.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass-sized/-/minipass-sized-1.0.3.tgz", + "integrity": "sha512-MbkQQ2CTiBMlA2Dm/5cY+9SWFEN8pzzOXi6rlM5Xxq0Yqbda5ZQy9sU75a673FE9ZK0Zsbr6Y5iP6u9nktfg2g==", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/minizlib": { + "version": "2.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minizlib/-/minizlib-2.1.2.tgz", + "integrity": "sha512-bAxsR8BVfj60DWXHE3u30oHzfl4G7khkSuPW+qvpd7jFRHm7dLxOjUk1EHACJ/hxLY8phGJ0YhYHZo7jil7Qdg==", + "dev": true, + "license": "MIT", + "dependencies": { + "minipass": "^3.0.0", + "yallist": "^4.0.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/mkdirp": { + "version": "1.0.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/mkdirp/-/mkdirp-1.0.4.tgz", + "integrity": "sha512-vVqVZQyf3WLx2Shd0qJ9xuvqgAyKPLAiqITEtqW0oIUjzo3PePDd6fW9iFz30ef7Ysp/oiWqbhszeGWW2T6Gzw==", + "dev": true, + "license": "MIT", + "bin": { + "mkdirp": "bin/cmd.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/mkdirp-infer-owner": { + "version": "2.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/mkdirp-infer-owner/-/mkdirp-infer-owner-2.0.0.tgz", + "integrity": "sha512-sdqtiFt3lkOaYvTXSRIUjkIdPTcxgv5+fgqYE/5qgwdw12cOrAuzzgzvVExIkH/ul1oeHN3bCLOWSG3XOqbKKw==", + "dev": true, + "license": "ISC", + "dependencies": { + "chownr": "^2.0.0", + "infer-owner": "^1.0.4", + "mkdirp": "^1.0.3" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/ms": { + "version": "2.1.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/ms/-/ms-2.1.3.tgz", + "integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==", + "dev": true, + "license": "MIT" + }, + "node_modules/multimatch": { + "version": "5.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/multimatch/-/multimatch-5.0.0.tgz", + "integrity": "sha512-ypMKuglUrZUD99Tk2bUQ+xNQj43lPEfAeX2o9cTteAmShXy2VHDJpuwu1o0xqoKCt9jLVAvwyFKdLTPXKAfJyA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/minimatch": "^3.0.3", + "array-differ": "^3.0.0", + "array-union": "^2.1.0", + "arrify": "^2.0.1", + "minimatch": "^3.0.4" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/mute-stream": { + "version": "0.0.8", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/mute-stream/-/mute-stream-0.0.8.tgz", + "integrity": "sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==", + "dev": true, + "license": "ISC" + }, + "node_modules/negotiator": { + "version": "0.6.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/negotiator/-/negotiator-0.6.4.tgz", + "integrity": "sha512-myRT3DiWPHqho5PrJaIRyaMv2kgYf0mUVgBNOYMuCH5Ki1yEiQaf/ZJuQ62nvpc44wL5WDbTX7yGJi1Neevw8w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/neo-async": { + "version": "2.6.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/neo-async/-/neo-async-2.6.2.tgz", + "integrity": "sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==", + "dev": true, + "license": "MIT", + "peer": true + }, + "node_modules/node-fetch": { + "version": "2.7.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/node-fetch/-/node-fetch-2.7.0.tgz", + "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==", + "dev": true, + "license": "MIT", + "dependencies": { + "whatwg-url": "^5.0.0" + }, + "engines": { + "node": "4.x || >=6.0.0" + }, + "peerDependencies": { + "encoding": "^0.1.0" + }, + "peerDependenciesMeta": { + "encoding": { + "optional": true + } + } + }, + "node_modules/node-gyp": { + "version": "8.4.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/node-gyp/-/node-gyp-8.4.1.tgz", + "integrity": "sha512-olTJRgUtAb/hOXG0E93wZDs5YiJlgbXxTwQAFHyNlRsXQnYzUaF2aGgujZbw+hR8aF4ZG/rST57bWMWD16jr9w==", + "dev": true, + "license": "MIT", + "dependencies": { + "env-paths": "^2.2.0", + "glob": "^7.1.4", + "graceful-fs": "^4.2.6", + "make-fetch-happen": "^9.1.0", + "nopt": "^5.0.0", + "npmlog": "^6.0.0", + "rimraf": "^3.0.2", + "semver": "^7.3.5", + "tar": "^6.1.2", + "which": "^2.0.2" + }, + "bin": { + "node-gyp": "bin/node-gyp.js" + }, + "engines": { + "node": ">= 10.12.0" + } + }, + "node_modules/node-gyp/node_modules/are-we-there-yet": { + "version": "3.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/are-we-there-yet/-/are-we-there-yet-3.0.1.tgz", + "integrity": "sha512-QZW4EDmGwlYur0Yyf/b2uGucHQMa8aFUP7eu9ddR73vvhFyt4V0Vl3QHPcTNJ8l6qYOBdxgXdnBXQrHilfRQBg==", + "deprecated": "This package is no longer supported.", + "dev": true, + "license": "ISC", + "dependencies": { + "delegates": "^1.0.0", + "readable-stream": "^3.6.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/node-gyp/node_modules/gauge": { + "version": "4.0.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/gauge/-/gauge-4.0.4.tgz", + "integrity": "sha512-f9m+BEN5jkg6a0fZjleidjN51VE1X+mPFQ2DJ0uv1V39oCLCbsGe6yjbBnp7eK7z/+GAon99a3nHuqbuuthyPg==", + "deprecated": "This package is no longer supported.", + "dev": true, + "license": "ISC", + "dependencies": { + "aproba": "^1.0.3 || ^2.0.0", + "color-support": "^1.1.3", + "console-control-strings": "^1.1.0", + "has-unicode": "^2.0.1", + "signal-exit": "^3.0.7", + "string-width": "^4.2.3", + "strip-ansi": "^6.0.1", + "wide-align": "^1.1.5" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/node-gyp/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/node-gyp/node_modules/npmlog": { + "version": "6.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/npmlog/-/npmlog-6.0.2.tgz", + "integrity": "sha512-/vBvz5Jfr9dT/aFWd0FIRf+T/Q2WBsLENygUaFUqstqsycmZAP/t5BvFJTK0viFmSUxiUKTUplWy5vt+rvKIxg==", + "deprecated": "This package is no longer supported.", + "dev": true, + "license": "ISC", + "dependencies": { + "are-we-there-yet": "^3.0.0", + "console-control-strings": "^1.1.0", + "gauge": "^4.0.3", + "set-blocking": "^2.0.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/node-gyp/node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/node-releases": { + "version": "2.0.19", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/node-releases/-/node-releases-2.0.19.tgz", + "integrity": "sha512-xxOWJsBKtzAq7DY0J+DTzuz58K8e7sJbdgwkbMWQe8UYB6ekmsQ45q0M/tJDsGaZmbC+l7n57UV8Hl5tHxO9uw==", + "dev": true, + "license": "MIT" + }, + "node_modules/nopt": { + "version": "5.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/nopt/-/nopt-5.0.0.tgz", + "integrity": "sha512-Tbj67rffqceeLpcRXrT7vKAN8CwfPeIBgM7E6iBkmKLV7bEMwpGgYLGv0jACUsECaa/vuxP0IjEont6umdMgtQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "abbrev": "1" + }, + "bin": { + "nopt": "bin/nopt.js" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/normalize-package-data": { + "version": "2.5.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/normalize-package-data/-/normalize-package-data-2.5.0.tgz", + "integrity": "sha512-/5CMN3T0R4XTj4DcGaexo+roZSdSFW/0AOOTROrjxzCG1wrWXEsGbRKevjlIL+ZDE4sZlJr5ED4YW0yqmkK+eA==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "hosted-git-info": "^2.1.4", + "resolve": "^1.10.0", + "semver": "2 || 3 || 4 || 5", + "validate-npm-package-license": "^3.0.1" + } + }, + "node_modules/normalize-package-data/node_modules/hosted-git-info": { + "version": "2.8.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/hosted-git-info/-/hosted-git-info-2.8.9.tgz", + "integrity": "sha512-mxIDAb9Lsm6DoOJ7xH+5+X4y1LU/4Hi50L9C5sIswK3JzULS4bwk1FvjdBgvYR4bzT4tuUQiC15FE2f5HbLvYw==", + "dev": true, + "license": "ISC" + }, + "node_modules/normalize-package-data/node_modules/semver": { + "version": "5.7.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/semver/-/semver-5.7.2.tgz", + "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver" + } + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm-bundled": { + "version": "1.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/npm-bundled/-/npm-bundled-1.1.2.tgz", + "integrity": "sha512-x5DHup0SuyQcmL3s7Rx/YQ8sbw/Hzg0rj48eN0dV7hf5cmQq5PXIeioroH3raV1QC1yh3uTYuMThvEQF3iKgGQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "npm-normalize-package-bin": "^1.0.1" + } + }, + "node_modules/npm-install-checks": { + "version": "4.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/npm-install-checks/-/npm-install-checks-4.0.0.tgz", + "integrity": "sha512-09OmyDkNLYwqKPOnbI8exiOZU2GVVmQp7tgez2BPi5OZC8M82elDAps7sxC4l//uSUtotWqoEIDwjRvWH4qz8w==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "semver": "^7.1.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/npm-normalize-package-bin": { + "version": "1.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/npm-normalize-package-bin/-/npm-normalize-package-bin-1.0.1.tgz", + "integrity": "sha512-EPfafl6JL5/rU+ot6P3gRSCpPDW5VmIzX959Ob1+ySFUuuYHWHekXpwdUZcKP5C+DS4GEtdJluwBjnsNDl+fSA==", + "dev": true, + "license": "ISC" + }, + "node_modules/npm-package-arg": { + "version": "8.1.5", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/npm-package-arg/-/npm-package-arg-8.1.5.tgz", + "integrity": "sha512-LhgZrg0n0VgvzVdSm1oiZworPbTxYHUJCgtsJW8mGvlDpxTM1vSJc3m5QZeUkhAHIzbz3VCHd/R4osi1L1Tg/Q==", + "dev": true, + "license": "ISC", + "dependencies": { + "hosted-git-info": "^4.0.1", + "semver": "^7.3.4", + "validate-npm-package-name": "^3.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/npm-packlist": { + "version": "3.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/npm-packlist/-/npm-packlist-3.0.0.tgz", + "integrity": "sha512-L/cbzmutAwII5glUcf2DBRNY/d0TFd4e/FnaZigJV6JD85RHZXJFGwCndjMWiiViiWSsWt3tiOLpI3ByTnIdFQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "glob": "^7.1.6", + "ignore-walk": "^4.0.1", + "npm-bundled": "^1.1.1", + "npm-normalize-package-bin": "^1.0.1" + }, + "bin": { + "npm-packlist": "bin/index.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/npm-packlist/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/npm-pick-manifest": { + "version": "6.1.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/npm-pick-manifest/-/npm-pick-manifest-6.1.1.tgz", + "integrity": "sha512-dBsdBtORT84S8V8UTad1WlUyKIY9iMsAmqxHbLdeEeBNMLQDlDWWra3wYUx9EBEIiG/YwAy0XyNHDd2goAsfuA==", + "dev": true, + "license": "ISC", + "dependencies": { + "npm-install-checks": "^4.0.0", + "npm-normalize-package-bin": "^1.0.1", + "npm-package-arg": "^8.1.2", + "semver": "^7.3.4" + } + }, + "node_modules/npm-registry-fetch": { + "version": "12.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/npm-registry-fetch/-/npm-registry-fetch-12.0.2.tgz", + "integrity": "sha512-Df5QT3RaJnXYuOwtXBXS9BWs+tHH2olvkCLh6jcR/b/u3DvPMlp3J0TvvYwplPKxHMOwfg287PYih9QqaVFoKA==", + "dev": true, + "license": "ISC", + "dependencies": { + "make-fetch-happen": "^10.0.1", + "minipass": "^3.1.6", + "minipass-fetch": "^1.4.1", + "minipass-json-stream": "^1.0.1", + "minizlib": "^2.1.2", + "npm-package-arg": "^8.1.5" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16" + } + }, + "node_modules/npm-registry-fetch/node_modules/@npmcli/fs": { + "version": "2.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@npmcli/fs/-/fs-2.1.2.tgz", + "integrity": "sha512-yOJKRvohFOaLqipNtwYB9WugyZKhC/DZC4VYPmpaCzDBrA8YpK3qHZ8/HGscMnE4GqbkLNuVcCnxkeQEdGt6LQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "@gar/promisify": "^1.1.3", + "semver": "^7.3.5" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/npm-registry-fetch/node_modules/@npmcli/move-file": { + "version": "2.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@npmcli/move-file/-/move-file-2.0.1.tgz", + "integrity": "sha512-mJd2Z5TjYWq/ttPLLGqArdtnC74J6bOzg4rMDnN+p1xTacZ2yPRCk2y0oSWQtygLR9YVQXgOcONrwtnk3JupxQ==", + "deprecated": "This functionality has been moved to @npmcli/fs", + "dev": true, + "license": "MIT", + "dependencies": { + "mkdirp": "^1.0.4", + "rimraf": "^3.0.2" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/npm-registry-fetch/node_modules/@tootallnate/once": { + "version": "2.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@tootallnate/once/-/once-2.0.0.tgz", + "integrity": "sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10" + } + }, + "node_modules/npm-registry-fetch/node_modules/cacache": { + "version": "16.1.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/cacache/-/cacache-16.1.3.tgz", + "integrity": "sha512-/+Emcj9DAXxX4cwlLmRI9c166RuL3w30zp4R7Joiv2cQTtTtA+jeuCAjH3ZlGnYS3tKENSrKhAzVVP9GVyzeYQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "@npmcli/fs": "^2.1.0", + "@npmcli/move-file": "^2.0.0", + "chownr": "^2.0.0", + "fs-minipass": "^2.1.0", + "glob": "^8.0.1", + "infer-owner": "^1.0.4", + "lru-cache": "^7.7.1", + "minipass": "^3.1.6", + "minipass-collect": "^1.0.2", + "minipass-flush": "^1.0.5", + "minipass-pipeline": "^1.2.4", + "mkdirp": "^1.0.4", + "p-map": "^4.0.0", + "promise-inflight": "^1.0.1", + "rimraf": "^3.0.2", + "ssri": "^9.0.0", + "tar": "^6.1.11", + "unique-filename": "^2.0.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/npm-registry-fetch/node_modules/http-proxy-agent": { + "version": "5.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz", + "integrity": "sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@tootallnate/once": "2", + "agent-base": "6", + "debug": "4" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/npm-registry-fetch/node_modules/lru-cache": { + "version": "7.18.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/lru-cache/-/lru-cache-7.18.3.tgz", + "integrity": "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/npm-registry-fetch/node_modules/make-fetch-happen": { + "version": "10.2.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/make-fetch-happen/-/make-fetch-happen-10.2.1.tgz", + "integrity": "sha512-NgOPbRiaQM10DYXvN3/hhGVI2M5MtITFryzBGxHM5p4wnFxsVCbxkrBrDsk+EZ5OB4jEOT7AjDxtdF+KVEFT7w==", + "dev": true, + "license": "ISC", + "dependencies": { + "agentkeepalive": "^4.2.1", + "cacache": "^16.1.0", + "http-cache-semantics": "^4.1.0", + "http-proxy-agent": "^5.0.0", + "https-proxy-agent": "^5.0.0", + "is-lambda": "^1.0.1", + "lru-cache": "^7.7.1", + "minipass": "^3.1.6", + "minipass-collect": "^1.0.2", + "minipass-fetch": "^2.0.3", + "minipass-flush": "^1.0.5", + "minipass-pipeline": "^1.2.4", + "negotiator": "^0.6.3", + "promise-retry": "^2.0.1", + "socks-proxy-agent": "^7.0.0", + "ssri": "^9.0.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/npm-registry-fetch/node_modules/make-fetch-happen/node_modules/minipass-fetch": { + "version": "2.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass-fetch/-/minipass-fetch-2.1.2.tgz", + "integrity": "sha512-LT49Zi2/WMROHYoqGgdlQIZh8mLPZmOrN2NdJjMXxYe4nkN6FUyuPuOAOedNJDrx0IRGg9+4guZewtp8hE6TxA==", + "dev": true, + "license": "MIT", + "dependencies": { + "minipass": "^3.1.6", + "minipass-sized": "^1.0.3", + "minizlib": "^2.1.2" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + }, + "optionalDependencies": { + "encoding": "^0.1.13" + } + }, + "node_modules/npm-registry-fetch/node_modules/socks-proxy-agent": { + "version": "7.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/socks-proxy-agent/-/socks-proxy-agent-7.0.0.tgz", + "integrity": "sha512-Fgl0YPZ902wEsAyiQ+idGd1A7rSFx/ayC1CQVMw5P+EQx2V0SgpGtf6OKFhVjPflPUl9YMmEOnmfjCdMUsygww==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^6.0.2", + "debug": "^4.3.3", + "socks": "^2.6.2" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/npm-registry-fetch/node_modules/ssri": { + "version": "9.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/ssri/-/ssri-9.0.1.tgz", + "integrity": "sha512-o57Wcn66jMQvfHG1FlYbWeZWW/dHZhJXjpIcTfXldXEk5nz5lStPo3mK0OJQfGR3RbZUlbISexbljkJzuEj/8Q==", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^3.1.1" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/npm-registry-fetch/node_modules/unique-filename": { + "version": "2.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/unique-filename/-/unique-filename-2.0.1.tgz", + "integrity": "sha512-ODWHtkkdx3IAR+veKxFV+VBkUMcN+FaqzUUd7IZzt+0zhDZFPFxhlqwPF3YQvMHx1TD0tdgYl+kuPnJ8E6ql7A==", + "dev": true, + "license": "ISC", + "dependencies": { + "unique-slug": "^3.0.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/npm-registry-fetch/node_modules/unique-slug": { + "version": "3.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/unique-slug/-/unique-slug-3.0.0.tgz", + "integrity": "sha512-8EyMynh679x/0gqE9fT9oilG+qEt+ibFyqjuVTsZn1+CMxH+XLlpvr2UZx4nVcCwTpx81nICr2JQFkM+HPLq4w==", + "dev": true, + "license": "ISC", + "dependencies": { + "imurmurhash": "^0.1.4" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/npm-run-path": { + "version": "4.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/npm-run-path/-/npm-run-path-4.0.1.tgz", + "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/npmlog": { + "version": "5.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/npmlog/-/npmlog-5.0.1.tgz", + "integrity": "sha512-AqZtDUWOMKs1G/8lwylVjrdYgqA4d9nu8hc+0gzRxlDb1I10+FHBGMXs6aiQHFdCUUlqH99MUMuLfzWDNDtfxw==", + "deprecated": "This package is no longer supported.", + "dev": true, + "license": "ISC", + "dependencies": { + "are-we-there-yet": "^2.0.0", + "console-control-strings": "^1.1.0", + "gauge": "^3.0.0", + "set-blocking": "^2.0.0" + } + }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-is": { + "version": "1.1.6", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/object-is/-/object-is-1.1.6.tgz", + "integrity": "sha512-F8cZ+KfGlSGi09lJT7/Nd6KJZ9ygtvYC0/UYYLI9nmQKLMnydpB9yvbv9K1uSkEu7FU9vYPmVwLg328tX+ot3Q==", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.7", + "define-properties": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/object-keys": { + "version": "1.1.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/object-keys/-/object-keys-1.1.1.tgz", + "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/object.assign": { + "version": "4.1.7", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/object.assign/-/object.assign-4.1.7.tgz", + "integrity": "sha512-nK28WOo+QIjBkDduTINE4JkF/UJJKyf2EJxvJKfblDpyg0Q+pkOHNTL0Qwy6NP6FhE/EnzV73BxxqcJaXY9anw==", + "license": "MIT", + "dependencies": { + "call-bind": "^1.0.8", + "call-bound": "^1.0.3", + "define-properties": "^1.2.1", + "es-object-atoms": "^1.0.0", + "has-symbols": "^1.1.0", + "object-keys": "^1.1.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dev": true, + "license": "ISC", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/onetime": { + "version": "5.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/onetime/-/onetime-5.1.2.tgz", + "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "mimic-fn": "^2.1.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ora": { + "version": "5.4.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/ora/-/ora-5.4.1.tgz", + "integrity": "sha512-5b6Y85tPxZZ7QytO+BQzysW31HJku27cRIlkbAXaNx+BdcVi+LlRFmVXzeF6a7JCwJpyw5c4b+YSVImQIrBpuQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "bl": "^4.1.0", + "chalk": "^4.1.0", + "cli-cursor": "^3.1.0", + "cli-spinners": "^2.5.0", + "is-interactive": "^1.0.0", + "is-unicode-supported": "^0.1.0", + "log-symbols": "^4.1.0", + "strip-ansi": "^6.0.0", + "wcwidth": "^1.0.1" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/os-tmpdir": { + "version": "1.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/os-tmpdir/-/os-tmpdir-1.0.2.tgz", + "integrity": "sha512-D2FR03Vir7FIu45XBY20mTb+/ZSWB00sjU9jdQXt83gDrI4Ztz5Fs7/yy74g2N5SVQY4xY1qDr4rNddwYRVX0g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/p-finally": { + "version": "1.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/p-finally/-/p-finally-1.0.0.tgz", + "integrity": "sha512-LICb2p9CB7FS+0eR1oqWnHhp0FljGLZCWBE9aix0Uye9W8LTQPwMTYVGWQWIw9RdQiDg4+epXQODwIYJtSJaow==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/p-limit": { + "version": "2.3.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-try": "^2.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "4.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/p-locate/-/p-locate-4.1.0.tgz", + "integrity": "sha512-R79ZZ/0wAxKGu3oYMlz8jy/kbhsNrS7SKZ7PxEHBgJ5+F2mtFW2fK2cOtBh1cHYkQsbzFV7I+EoRKe6Yt0oK7A==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^2.2.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/p-map": { + "version": "4.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/p-map/-/p-map-4.0.0.tgz", + "integrity": "sha512-/bjOqmgETBYB5BoEeGVea8dmvHb2m9GLy1E9W43yeyfP6QQCZGFNa+XRceJEuDB6zqr+gKpIAmlLebMpykw/MQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "aggregate-error": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-queue": { + "version": "6.6.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/p-queue/-/p-queue-6.6.2.tgz", + "integrity": "sha512-RwFpb72c/BhQLEXIZ5K2e+AhgNVmIejGlTgiB9MzZ0e93GRvqZ7uSi0dvRF7/XIXDeNkra2fNHBxTyPDGySpjQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "eventemitter3": "^4.0.4", + "p-timeout": "^3.2.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-timeout": { + "version": "3.2.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/p-timeout/-/p-timeout-3.2.0.tgz", + "integrity": "sha512-rhIwUycgwwKcP9yTOOFK/AKsAopjjCakVqLHePO3CC6Mir1Z99xT+R63jZxAT5lFZLa2inS5h+ZS2GvR99/FBg==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-finally": "^1.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/p-transform": { + "version": "1.3.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/p-transform/-/p-transform-1.3.0.tgz", + "integrity": "sha512-UJKdSzgd3KOnXXAtqN5+/eeHcvTn1hBkesEmElVgvO/NAYcxAvmjzIGmnNd3Tb/gRAvMBdNRFD4qAWdHxY6QXg==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "debug": "^4.3.2", + "p-queue": "^6.6.2" + }, + "engines": { + "node": ">=12.10.0" + } + }, + "node_modules/p-try": { + "version": "2.2.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/p-try/-/p-try-2.2.0.tgz", + "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/package-json-from-dist": { + "version": "1.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz", + "integrity": "sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw==", + "dev": true, + "license": "BlueOak-1.0.0" + }, + "node_modules/pacote": { + "version": "12.0.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/pacote/-/pacote-12.0.3.tgz", + "integrity": "sha512-CdYEl03JDrRO3x18uHjBYA9TyoW8gy+ThVcypcDkxPtKlw76e4ejhYB6i9lJ+/cebbjpqPW/CijjqxwDTts8Ow==", + "dev": true, + "license": "ISC", + "dependencies": { + "@npmcli/git": "^2.1.0", + "@npmcli/installed-package-contents": "^1.0.6", + "@npmcli/promise-spawn": "^1.2.0", + "@npmcli/run-script": "^2.0.0", + "cacache": "^15.0.5", + "chownr": "^2.0.0", + "fs-minipass": "^2.1.0", + "infer-owner": "^1.0.4", + "minipass": "^3.1.3", + "mkdirp": "^1.0.3", + "npm-package-arg": "^8.0.1", + "npm-packlist": "^3.0.0", + "npm-pick-manifest": "^6.0.0", + "npm-registry-fetch": "^12.0.0", + "promise-retry": "^2.0.1", + "read-package-json-fast": "^2.0.1", + "rimraf": "^3.0.2", + "ssri": "^8.0.1", + "tar": "^6.1.0" + }, + "bin": { + "pacote": "lib/bin.js" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16" + } + }, + "node_modules/pako": { + "version": "1.0.11", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/pako/-/pako-1.0.11.tgz", + "integrity": "sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==", + "license": "(MIT AND Zlib)" + }, + "node_modules/parse-conflict-json": { + "version": "2.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/parse-conflict-json/-/parse-conflict-json-2.0.2.tgz", + "integrity": "sha512-jDbRGb00TAPFsKWCpZZOT93SxVP9nONOSgES3AevqRq/CHvavEBvKAjxX9p5Y5F0RZLxH9Ufd9+RwtCsa+lFDA==", + "dev": true, + "license": "ISC", + "dependencies": { + "json-parse-even-better-errors": "^2.3.1", + "just-diff": "^5.0.1", + "just-diff-apply": "^5.2.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/parse-json": { + "version": "5.2.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/parse-json/-/parse-json-5.2.0.tgz", + "integrity": "sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/code-frame": "^7.0.0", + "error-ex": "^1.3.1", + "json-parse-even-better-errors": "^2.3.0", + "lines-and-columns": "^1.1.6" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-parse": { + "version": "1.0.7", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/path-parse/-/path-parse-1.0.7.tgz", + "integrity": "sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==", + "dev": true, + "license": "MIT" + }, + "node_modules/path-scurry": { + "version": "1.11.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/path-scurry/-/path-scurry-1.11.1.tgz", + "integrity": "sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==", + "dev": true, + "license": "BlueOak-1.0.0", + "dependencies": { + "lru-cache": "^10.2.0", + "minipass": "^5.0.0 || ^6.0.2 || ^7.0.0" + }, + "engines": { + "node": ">=16 || 14 >=14.18" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/path-scurry/node_modules/lru-cache": { + "version": "10.4.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/lru-cache/-/lru-cache-10.4.3.tgz", + "integrity": "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/path-scurry/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/path-type": { + "version": "4.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/picocolors": { + "version": "1.1.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/picocolors/-/picocolors-1.1.1.tgz", + "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", + "dev": true, + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pify": { + "version": "2.3.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/pify/-/pify-2.3.0.tgz", + "integrity": "sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/pkg-dir": { + "version": "4.2.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/pkg-dir/-/pkg-dir-4.2.0.tgz", + "integrity": "sha512-HRDzbaKjC+AOWVXxAU/x54COGeIv9eb+6CkDSQoNTt4XyWoIJvuPsXizxu/Fr23EiekbtZwmh1IcIG/l/a10GQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "find-up": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/possible-typed-array-names": { + "version": "1.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/possible-typed-array-names/-/possible-typed-array-names-1.1.0.tgz", + "integrity": "sha512-/+5VFTchJDoVj3bhoqi6UeymcD00DAwb1nJwamzPvHEszJ4FpF6SNNbUbOS8yI56qHzdV8eK0qEfOSiodkTdxg==", + "license": "MIT", + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/preferred-pm": { + "version": "3.1.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/preferred-pm/-/preferred-pm-3.1.4.tgz", + "integrity": "sha512-lEHd+yEm22jXdCphDrkvIJQU66EuLojPPtvZkpKIkiD+l0DMThF/niqZKJSoU8Vl7iuvtmzyMhir9LdVy5WMnA==", + "dev": true, + "license": "MIT", + "dependencies": { + "find-up": "^5.0.0", + "find-yarn-workspace-root2": "1.2.16", + "path-exists": "^4.0.0", + "which-pm": "^2.2.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/preferred-pm/node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/preferred-pm/node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/preferred-pm/node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/preferred-pm/node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/pretty-bytes": { + "version": "5.6.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/pretty-bytes/-/pretty-bytes-5.6.0.tgz", + "integrity": "sha512-FFw039TmrBqFK8ma/7OL3sDz/VytdtJr044/QUJtH0wK9lb9jLq9tJyIxUwtQJHwar2BqtiA4iCWSwo9JLkzFg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/proc-log": { + "version": "1.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/proc-log/-/proc-log-1.0.0.tgz", + "integrity": "sha512-aCk8AO51s+4JyuYGg3Q/a6gnrlDO09NpVWePtjp7xwphcoQ04x5WAfCyugcsbLooWcMJ87CLkD4+604IckEdhg==", + "dev": true, + "license": "ISC" + }, + "node_modules/process": { + "version": "0.11.10", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/process/-/process-0.11.10.tgz", + "integrity": "sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.6.0" + } + }, + "node_modules/process-nextick-args": { + "version": "2.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/process-nextick-args/-/process-nextick-args-2.0.1.tgz", + "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==", + "dev": true, + "license": "MIT" + }, + "node_modules/promise-all-reject-late": { + "version": "1.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/promise-all-reject-late/-/promise-all-reject-late-1.0.1.tgz", + "integrity": "sha512-vuf0Lf0lOxyQREH7GDIOUMLS7kz+gs8i6B+Yi8dC68a2sychGrHTJYghMBD6k7eUcH0H5P73EckCA48xijWqXw==", + "dev": true, + "license": "ISC", + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/promise-call-limit": { + "version": "1.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/promise-call-limit/-/promise-call-limit-1.0.2.tgz", + "integrity": "sha512-1vTUnfI2hzui8AEIixbdAJlFY4LFDXqQswy/2eOlThAscXCY4It8FdVuI0fMJGAB2aWGbdQf/gv0skKYXmdrHA==", + "dev": true, + "license": "ISC", + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/promise-inflight": { + "version": "1.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/promise-inflight/-/promise-inflight-1.0.1.tgz", + "integrity": "sha512-6zWPyEOFaQBJYcGMHBKTKJ3u6TBsnMFOIZSa6ce1e/ZrrsOlnHRHbabMjLiBYKp+n44X9eUI6VUPaukCXHuG4g==", + "dev": true, + "license": "ISC" + }, + "node_modules/promise-retry": { + "version": "2.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/promise-retry/-/promise-retry-2.0.1.tgz", + "integrity": "sha512-y+WKFlBR8BGXnsNlIHFGPZmyDf3DFMoLhaflAnyZgV6rG6xu+JwesTo2Q9R6XwYmtmwAFCkAk3e35jEdoeh/3g==", + "dev": true, + "license": "MIT", + "dependencies": { + "err-code": "^2.0.2", + "retry": "^0.12.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/randombytes": { + "version": "2.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "safe-buffer": "^5.1.0" + } + }, + "node_modules/read-cmd-shim": { + "version": "3.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/read-cmd-shim/-/read-cmd-shim-3.0.1.tgz", + "integrity": "sha512-kEmDUoYf/CDy8yZbLTmhB1X9kkjf9Q80PCNsDMb7ufrGd6zZSQA1+UyjrO+pZm5K/S4OXCWJeiIt1JA8kAsa6g==", + "dev": true, + "license": "ISC", + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/read-package-json": { + "version": "6.0.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/read-package-json/-/read-package-json-6.0.4.tgz", + "integrity": "sha512-AEtWXYfopBj2z5N5PbkAOeNHRPUg5q+Nen7QLxV8M2zJq1ym6/lCz3fYNTCXe19puu2d06jfHhrP7v/S2PtMMw==", + "deprecated": "This package is no longer supported. Please use @npmcli/package-json instead.", + "dev": true, + "license": "ISC", + "dependencies": { + "glob": "^10.2.2", + "json-parse-even-better-errors": "^3.0.0", + "normalize-package-data": "^5.0.0", + "npm-normalize-package-bin": "^3.0.0" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/read-package-json-fast": { + "version": "2.0.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/read-package-json-fast/-/read-package-json-fast-2.0.3.tgz", + "integrity": "sha512-W/BKtbL+dUjTuRL2vziuYhp76s5HZ9qQhd/dKfWIZveD0O40453QNyZhC0e63lqZrAQ4jiOapVoeJ7JrszenQQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "json-parse-even-better-errors": "^2.3.0", + "npm-normalize-package-bin": "^1.0.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/read-package-json/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/read-package-json/node_modules/glob": { + "version": "10.4.5", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/glob/-/glob-10.4.5.tgz", + "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", + "dev": true, + "license": "ISC", + "dependencies": { + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/read-package-json/node_modules/hosted-git-info": { + "version": "6.1.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/hosted-git-info/-/hosted-git-info-6.1.3.tgz", + "integrity": "sha512-HVJyzUrLIL1c0QmviVh5E8VGyUS7xCFPS6yydaVd1UegW+ibV/CohqTH9MkOLDp5o+rb82DMo77PTuc9F/8GKw==", + "dev": true, + "license": "ISC", + "dependencies": { + "lru-cache": "^7.5.1" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/read-package-json/node_modules/json-parse-even-better-errors": { + "version": "3.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/json-parse-even-better-errors/-/json-parse-even-better-errors-3.0.2.tgz", + "integrity": "sha512-fi0NG4bPjCHunUJffmLd0gxssIgkNmArMvis4iNah6Owg1MCJjWhEcDLmsK6iGkJq3tHwbDkTlce70/tmXN4cQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/read-package-json/node_modules/lru-cache": { + "version": "7.18.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/lru-cache/-/lru-cache-7.18.3.tgz", + "integrity": "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/read-package-json/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/read-package-json/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/read-package-json/node_modules/normalize-package-data": { + "version": "5.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/normalize-package-data/-/normalize-package-data-5.0.0.tgz", + "integrity": "sha512-h9iPVIfrVZ9wVYQnxFgtw1ugSvGEMOlyPWWtm8BMJhnwyEL/FLbYbTY3V3PpjI/BUK67n9PEWDu6eHzu1fB15Q==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "hosted-git-info": "^6.0.0", + "is-core-module": "^2.8.1", + "semver": "^7.3.5", + "validate-npm-package-license": "^3.0.4" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/read-package-json/node_modules/npm-normalize-package-bin": { + "version": "3.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/npm-normalize-package-bin/-/npm-normalize-package-bin-3.0.1.tgz", + "integrity": "sha512-dMxCf+zZ+3zeQZXKxmyuCKlIDPGuv8EF940xbkC4kQVDTtqoh6rJFO+JTKSA6/Rwi0getWmtuy4Itup0AMcaDQ==", + "dev": true, + "license": "ISC", + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/read-pkg": { + "version": "5.2.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/read-pkg/-/read-pkg-5.2.0.tgz", + "integrity": "sha512-Ug69mNOpfvKDAc2Q8DRpMjjzdtrnv9HcSMX+4VsZxD1aZ6ZzrIE7rlzXBtWTyhULSMKg076AW6WR5iZpD0JiOg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/normalize-package-data": "^2.4.0", + "normalize-package-data": "^2.5.0", + "parse-json": "^5.0.0", + "type-fest": "^0.6.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/read-pkg-up": { + "version": "7.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/read-pkg-up/-/read-pkg-up-7.0.1.tgz", + "integrity": "sha512-zK0TB7Xd6JpCLmlLmufqykGE+/TlOePD6qKClNW7hHDKFh/J7/7gCWGR7joEQEW1bKq3a3yUZSObOoWLFQ4ohg==", + "dev": true, + "license": "MIT", + "dependencies": { + "find-up": "^4.1.0", + "read-pkg": "^5.2.0", + "type-fest": "^0.8.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/read-pkg-up/node_modules/type-fest": { + "version": "0.8.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/type-fest/-/type-fest-0.8.1.tgz", + "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=8" + } + }, + "node_modules/read-pkg/node_modules/type-fest": { + "version": "0.6.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/type-fest/-/type-fest-0.6.0.tgz", + "integrity": "sha512-q+MB8nYR1KDLrgr4G5yemftpMC7/QLqVndBmEEdqzmNj5dcFOO4Oo8qlwZE3ULT3+Zim1F8Kq4cBnikNhlCMlg==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=8" + } + }, + "node_modules/readable-stream": { + "version": "4.7.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/readable-stream/-/readable-stream-4.7.0.tgz", + "integrity": "sha512-oIGGmcpTLwPga8Bn6/Z75SVaH1z5dUut2ibSyAMVhmUggWpmDn2dapB0n7f8nwaSiRtepAsfJyfXIO5DCVAODg==", + "dev": true, + "license": "MIT", + "dependencies": { + "abort-controller": "^3.0.0", + "buffer": "^6.0.3", + "events": "^3.3.0", + "process": "^0.11.10", + "string_decoder": "^1.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "node_modules/readable-stream/node_modules/buffer": { + "version": "6.0.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/buffer/-/buffer-6.0.3.tgz", + "integrity": "sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.2.1" + } + }, + "node_modules/readdir-scoped-modules": { + "version": "1.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/readdir-scoped-modules/-/readdir-scoped-modules-1.1.0.tgz", + "integrity": "sha512-asaikDeqAQg7JifRsZn1NJZXo9E+VwlyCfbkZhwyISinqk5zNS6266HS5kah6P0SaQKGF6SkNnZVHUzHFYxYDw==", + "deprecated": "This functionality has been moved to @npmcli/fs", + "dev": true, + "license": "ISC", + "dependencies": { + "debuglog": "^1.0.1", + "dezalgo": "^1.0.0", + "graceful-fs": "^4.1.2", + "once": "^1.3.0" + } + }, + "node_modules/rechoir": { + "version": "0.8.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/rechoir/-/rechoir-0.8.0.tgz", + "integrity": "sha512-/vxpCXddiX8NGfGO/mTafwjq4aFa/71pvamip0++IQk3zG8cbCj0fifNPrjjF1XMXUne91jL9OoxmdykoEtifQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "resolve": "^1.20.0" + }, + "engines": { + "node": ">= 10.13.0" + } + }, + "node_modules/regenerate": { + "version": "1.4.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/regenerate/-/regenerate-1.4.2.tgz", + "integrity": "sha512-zrceR/XhGYU/d/opr2EKO7aRHUeiBI8qjtfHqADTwZd6Szfy16la6kqD0MIUs5z5hx6AaKa+PixpPrR289+I0A==", + "dev": true, + "license": "MIT" + }, + "node_modules/regenerate-unicode-properties": { + "version": "10.2.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/regenerate-unicode-properties/-/regenerate-unicode-properties-10.2.0.tgz", + "integrity": "sha512-DqHn3DwbmmPVzeKj9woBadqmXxLvQoQIwu7nopMc72ztvxVmVk2SBhSnx67zuye5TP+lJsb/TBQsjLKhnDf3MA==", + "dev": true, + "license": "MIT", + "dependencies": { + "regenerate": "^1.4.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/regenerator-runtime": { + "version": "0.13.11", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz", + "integrity": "sha512-kY1AZVr2Ra+t+piVaJ4gxaFaReZVH40AKNo7UCX6W+dEwBo/2oZJzqfuN1qLq1oL45o56cPaTXELwrTh8Fpggg==", + "license": "MIT" + }, + "node_modules/regenerator-transform": { + "version": "0.15.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/regenerator-transform/-/regenerator-transform-0.15.2.tgz", + "integrity": "sha512-hfMp2BoF0qOk3uc5V20ALGDS2ddjQaLrdl7xrGXvAIow7qeWRM2VA2HuCHkUKk9slq3VwEwLNK3DFBqDfPGYtg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@babel/runtime": "^7.8.4" + } + }, + "node_modules/regexpu-core": { + "version": "6.2.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/regexpu-core/-/regexpu-core-6.2.0.tgz", + "integrity": "sha512-H66BPQMrv+V16t8xtmq+UC0CBpiTBA60V8ibS1QVReIp8T1z8hwFxqcGzm9K6lgsN7sB5edVH8a+ze6Fqm4weA==", + "dev": true, + "license": "MIT", + "dependencies": { + "regenerate": "^1.4.2", + "regenerate-unicode-properties": "^10.2.0", + "regjsgen": "^0.8.0", + "regjsparser": "^0.12.0", + "unicode-match-property-ecmascript": "^2.0.0", + "unicode-match-property-value-ecmascript": "^2.1.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/regjsgen": { + "version": "0.8.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/regjsgen/-/regjsgen-0.8.0.tgz", + "integrity": "sha512-RvwtGe3d7LvWiDQXeQw8p5asZUmfU1G/l6WbUXeHta7Y2PEIvBTwH6E2EfmYUK8pxcxEdEmaomqyp0vZZ7C+3Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/regjsparser": { + "version": "0.12.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/regjsparser/-/regjsparser-0.12.0.tgz", + "integrity": "sha512-cnE+y8bz4NhMjISKbgeVJtqNbtf5QpjZP+Bslo+UqkIt9QPnX9q095eiRRASJG1/tz6dlNr6Z5NsBiWYokp6EQ==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "jsesc": "~3.0.2" + }, + "bin": { + "regjsparser": "bin/parser" + } + }, + "node_modules/regjsparser/node_modules/jsesc": { + "version": "3.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/jsesc/-/jsesc-3.0.2.tgz", + "integrity": "sha512-xKqzzWXDttJuOcawBt4KnKHHIf5oQ/Cxax+0PWFG+DFDgHNAdi+TXECADI+RYiFUMmx8792xsMbbgXj4CwnP4g==", + "dev": true, + "license": "MIT", + "bin": { + "jsesc": "bin/jsesc" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/remove-trailing-separator": { + "version": "1.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/remove-trailing-separator/-/remove-trailing-separator-1.1.0.tgz", + "integrity": "sha512-/hS+Y0u3aOfIETiaiirUFwDBDzmXPvO+jAfKTitUngIPzdKc6Z0LoFjM/CK5PL4C+eKwHohlHAb6H0VFfmmUsw==", + "dev": true, + "license": "ISC" + }, + "node_modules/replace-ext": { + "version": "1.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/replace-ext/-/replace-ext-1.0.1.tgz", + "integrity": "sha512-yD5BHCe7quCgBph4rMQ+0KkIRKwWCrHDOX1p1Gp6HwjPM5kVoCdKGNhN7ydqqsX6lJEnQDKZ/tFMiEdQ1dvPEw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/require-from-string": { + "version": "2.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/require-from-string/-/require-from-string-2.0.2.tgz", + "integrity": "sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/resolve": { + "version": "1.22.10", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/resolve/-/resolve-1.22.10.tgz", + "integrity": "sha512-NPRy+/ncIMeDlTAsuqwKIiferiawhefFJtkNSW0qZJEqMEb+qBt/77B/jGeeek+F0uOeN05CDa6HXbbIgtVX4w==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-core-module": "^2.16.0", + "path-parse": "^1.0.7", + "supports-preserve-symlinks-flag": "^1.0.0" + }, + "bin": { + "resolve": "bin/resolve" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/resolve-cwd": { + "version": "3.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/resolve-cwd/-/resolve-cwd-3.0.0.tgz", + "integrity": "sha512-OrZaX2Mb+rJCpH/6CpSqt9xFVpN++x01XnN2ie9g6P5/3xelLAkXWVADpdz1IHD/KFfEXyE6V0U01OQ3UO2rEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "resolve-from": "^5.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/resolve-from": { + "version": "5.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/resolve-from/-/resolve-from-5.0.0.tgz", + "integrity": "sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/restore-cursor": { + "version": "3.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/restore-cursor/-/restore-cursor-3.1.0.tgz", + "integrity": "sha512-l+sSefzHpj5qimhFSE5a8nufZYAM3sBSVMAPtYkmC+4EH2anSGaEMXSD0izRQbu9nfyQ9y5JrVmp7E8oZrUjvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "onetime": "^5.1.0", + "signal-exit": "^3.0.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/retry": { + "version": "0.12.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/retry/-/retry-0.12.0.tgz", + "integrity": "sha512-9LkiTwjUh6rT555DtE9rTX+BKByPfrMzEAtnlEtdEwr3Nkffwiihqe2bWADg+OQRjt9gl6ICdmB/ZFDCGAtSow==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/reusify": { + "version": "1.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/reusify/-/reusify-1.1.0.tgz", + "integrity": "sha512-g6QUff04oZpHs0eG5p83rFLhHeV00ug/Yf9nZM6fLeUrPguBTkTQOdpAWWspMh55TZfVQDPaN3NQJfbVRAxdIw==", + "dev": true, + "license": "MIT", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rimraf": { + "version": "3.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "deprecated": "Rimraf versions prior to v4 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/rimraf/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/run-async": { + "version": "2.4.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/run-async/-/run-async-2.4.1.tgz", + "integrity": "sha512-tvVnVv01b8c1RrA6Ep7JkStj85Guv/YrMcwqYQnwjsAS2cTmmPGBBjAjpCW7RrSodNSoE2/qg9O4bceNvUuDgQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/rxjs": { + "version": "7.8.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/rxjs/-/rxjs-7.8.2.tgz", + "integrity": "sha512-dhKf903U/PQZY6boNNtAGdWbG85WAbjT/1xYoZIC7FAY0yWapOBQVsVrDl58W86//e1VpMNBtRV4MaXfdMySFA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "tslib": "^2.1.0" + } + }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/safe-regex-test": { + "version": "1.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/safe-regex-test/-/safe-regex-test-1.1.0.tgz", + "integrity": "sha512-x/+Cz4YrimQxQccJf5mKEbIa1NzeCRNI5Ecl/ekmlYaampdNLPalVyIcCZNNH3MvmqBugV5TMYZXv0ljslUlaw==", + "license": "MIT", + "dependencies": { + "call-bound": "^1.0.2", + "es-errors": "^1.3.0", + "is-regex": "^1.2.1" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/safer-buffer": { + "version": "2.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/safer-buffer/-/safer-buffer-2.1.2.tgz", + "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", + "devOptional": true, + "license": "MIT" + }, + "node_modules/schema-utils": { + "version": "4.3.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/schema-utils/-/schema-utils-4.3.2.tgz", + "integrity": "sha512-Gn/JaSk/Mt9gYubxTtSn/QCV4em9mpAPiR1rqy/Ocu19u/G9J5WWdNoUT4SiV6mFC3y6cxyFcFwdzPM3FgxGAQ==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@types/json-schema": "^7.0.9", + "ajv": "^8.9.0", + "ajv-formats": "^2.1.1", + "ajv-keywords": "^5.1.0" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + } + }, + "node_modules/scoped-regex": { + "version": "2.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/scoped-regex/-/scoped-regex-2.1.0.tgz", + "integrity": "sha512-g3WxHrqSWCZHGHlSrF51VXFdjImhwvH8ZO/pryFH56Qi0cDsZfylQa/t0jCzVQFNbNvM00HfHjkDPEuarKDSWQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/seedrandom": { + "version": "3.0.5", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/seedrandom/-/seedrandom-3.0.5.tgz", + "integrity": "sha512-8OwmbklUNzwezjGInmZ+2clQmExQPvomqjL7LFqOYqtmuxRgQYqOD3mHaU+MvZn5FLUeVxVfQjwLZW/n/JFuqg==", + "license": "MIT" + }, + "node_modules/semver": { + "version": "7.7.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/semver/-/semver-7.7.1.tgz", + "integrity": "sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/serialize-javascript": { + "version": "6.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/serialize-javascript/-/serialize-javascript-6.0.2.tgz", + "integrity": "sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g==", + "dev": true, + "license": "BSD-3-Clause", + "peer": true, + "dependencies": { + "randombytes": "^2.1.0" + } + }, + "node_modules/set-blocking": { + "version": "2.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/set-blocking/-/set-blocking-2.0.0.tgz", + "integrity": "sha512-KiKBS8AnWGEyLzofFfmvKwpdPzqiy16LvQfK3yv/fVH7Bj13/wl3JSR1J+rfgRE9q7xUJK4qvgS8raSOeLUehw==", + "dev": true, + "license": "ISC" + }, + "node_modules/set-function-length": { + "version": "1.2.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/set-function-length/-/set-function-length-1.2.2.tgz", + "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", + "license": "MIT", + "dependencies": { + "define-data-property": "^1.1.4", + "es-errors": "^1.3.0", + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.4", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/shallow-clone": { + "version": "3.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/shallow-clone/-/shallow-clone-3.0.1.tgz", + "integrity": "sha512-/6KqX+GVUdqPuPPd2LxDDxzX6CAbjJehAAOKlNpqqUpAqPM6HeL8f+o3a+JsyGjn2lv0WY8UsTgUJjU9Ok55NA==", + "dev": true, + "license": "MIT", + "dependencies": { + "kind-of": "^6.0.2" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/shelljs": { + "version": "0.8.5", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/shelljs/-/shelljs-0.8.5.tgz", + "integrity": "sha512-TiwcRcrkhHvbrZbnRcFYMLl30Dfov3HKqzp5tO5b4pt6G/SezKcYhmDg15zXVBswHmctSAQKznqNW2LO5tTDow==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "glob": "^7.0.0", + "interpret": "^1.0.0", + "rechoir": "^0.6.2" + }, + "bin": { + "shjs": "bin/shjs" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/shelljs/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/shelljs/node_modules/interpret": { + "version": "1.4.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/interpret/-/interpret-1.4.0.tgz", + "integrity": "sha512-agE4QfB2Lkp9uICn7BAqoscw4SZP9kTE2hxiFI3jBPmXJfdqiahTbUuKGsMoN2GtqL9AxhYioAcVvgsb1HvRbA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/shelljs/node_modules/rechoir": { + "version": "0.6.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/rechoir/-/rechoir-0.6.2.tgz", + "integrity": "sha512-HFM8rkZ+i3zrV+4LQjwQ0W+ez98pApMGM3HUrN04j3CqzPOzl9nmP15Y8YXNm8QHGv/eacOVEjqhmWpkRV0NAw==", + "dev": true, + "dependencies": { + "resolve": "^1.1.6" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/signal-exit": { + "version": "3.0.7", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/signal-exit/-/signal-exit-3.0.7.tgz", + "integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/sigstore": { + "version": "1.9.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/sigstore/-/sigstore-1.9.0.tgz", + "integrity": "sha512-0Zjz0oe37d08VeOtBIuB6cRriqXse2e8w+7yIy2XSXjshRKxbc2KkhXjL229jXSxEm7UbcjS76wcJDGQddVI9A==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@sigstore/bundle": "^1.1.0", + "@sigstore/protobuf-specs": "^0.2.0", + "@sigstore/sign": "^1.0.0", + "@sigstore/tuf": "^1.0.3", + "make-fetch-happen": "^11.0.1" + }, + "bin": { + "sigstore": "bin/sigstore.js" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/sigstore/node_modules/@npmcli/fs": { + "version": "3.1.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@npmcli/fs/-/fs-3.1.1.tgz", + "integrity": "sha512-q9CRWjpHCMIh5sVyefoD1cA7PkvILqCZsnSOEUUivORLjxCO/Irmue2DprETiNgEqktDBZaM1Bi+jrarx1XdCg==", + "dev": true, + "license": "ISC", + "dependencies": { + "semver": "^7.3.5" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/sigstore/node_modules/@tootallnate/once": { + "version": "2.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@tootallnate/once/-/once-2.0.0.tgz", + "integrity": "sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10" + } + }, + "node_modules/sigstore/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/sigstore/node_modules/cacache": { + "version": "17.1.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/cacache/-/cacache-17.1.4.tgz", + "integrity": "sha512-/aJwG2l3ZMJ1xNAnqbMpA40of9dj/pIH3QfiuQSqjfPJF747VR0J/bHn+/KdNnHKc6XQcWt/AfRSBft82W1d2A==", + "dev": true, + "license": "ISC", + "dependencies": { + "@npmcli/fs": "^3.1.0", + "fs-minipass": "^3.0.0", + "glob": "^10.2.2", + "lru-cache": "^7.7.1", + "minipass": "^7.0.3", + "minipass-collect": "^1.0.2", + "minipass-flush": "^1.0.5", + "minipass-pipeline": "^1.2.4", + "p-map": "^4.0.0", + "ssri": "^10.0.0", + "tar": "^6.1.11", + "unique-filename": "^3.0.0" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/sigstore/node_modules/cacache/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/sigstore/node_modules/fs-minipass": { + "version": "3.0.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/fs-minipass/-/fs-minipass-3.0.3.tgz", + "integrity": "sha512-XUBA9XClHbnJWSfBzjkm6RvPsyg3sryZt06BEQoXcF7EK/xpGaQYJgQKDJSUH5SGZ76Y7pFx1QBnXz09rU5Fbw==", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^7.0.3" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/sigstore/node_modules/fs-minipass/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/sigstore/node_modules/glob": { + "version": "10.4.5", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/glob/-/glob-10.4.5.tgz", + "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", + "dev": true, + "license": "ISC", + "dependencies": { + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/sigstore/node_modules/glob/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/sigstore/node_modules/http-proxy-agent": { + "version": "5.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz", + "integrity": "sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@tootallnate/once": "2", + "agent-base": "6", + "debug": "4" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/sigstore/node_modules/lru-cache": { + "version": "7.18.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/lru-cache/-/lru-cache-7.18.3.tgz", + "integrity": "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/sigstore/node_modules/make-fetch-happen": { + "version": "11.1.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/make-fetch-happen/-/make-fetch-happen-11.1.1.tgz", + "integrity": "sha512-rLWS7GCSTcEujjVBs2YqG7Y4643u8ucvCJeSRqiLYhesrDuzeuFIk37xREzAsfQaqzl8b9rNCE4m6J8tvX4Q8w==", + "dev": true, + "license": "ISC", + "dependencies": { + "agentkeepalive": "^4.2.1", + "cacache": "^17.0.0", + "http-cache-semantics": "^4.1.1", + "http-proxy-agent": "^5.0.0", + "https-proxy-agent": "^5.0.0", + "is-lambda": "^1.0.1", + "lru-cache": "^7.7.1", + "minipass": "^5.0.0", + "minipass-fetch": "^3.0.0", + "minipass-flush": "^1.0.5", + "minipass-pipeline": "^1.2.4", + "negotiator": "^0.6.3", + "promise-retry": "^2.0.1", + "socks-proxy-agent": "^7.0.0", + "ssri": "^10.0.0" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/sigstore/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/sigstore/node_modules/minipass": { + "version": "5.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass/-/minipass-5.0.0.tgz", + "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=8" + } + }, + "node_modules/sigstore/node_modules/minipass-fetch": { + "version": "3.0.5", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass-fetch/-/minipass-fetch-3.0.5.tgz", + "integrity": "sha512-2N8elDQAtSnFV0Dk7gt15KHsS0Fyz6CbYZ360h0WTYV1Ty46li3rAXVOQj1THMNLdmrD9Vt5pBPtWtVkpwGBqg==", + "dev": true, + "license": "MIT", + "dependencies": { + "minipass": "^7.0.3", + "minipass-sized": "^1.0.3", + "minizlib": "^2.1.2" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + }, + "optionalDependencies": { + "encoding": "^0.1.13" + } + }, + "node_modules/sigstore/node_modules/minipass-fetch/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/sigstore/node_modules/socks-proxy-agent": { + "version": "7.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/socks-proxy-agent/-/socks-proxy-agent-7.0.0.tgz", + "integrity": "sha512-Fgl0YPZ902wEsAyiQ+idGd1A7rSFx/ayC1CQVMw5P+EQx2V0SgpGtf6OKFhVjPflPUl9YMmEOnmfjCdMUsygww==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^6.0.2", + "debug": "^4.3.3", + "socks": "^2.6.2" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/sigstore/node_modules/ssri": { + "version": "10.0.6", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/ssri/-/ssri-10.0.6.tgz", + "integrity": "sha512-MGrFH9Z4NP9Iyhqn16sDtBpRRNJ0Y2hNa6D65h736fVSaPCHr4DM4sWUNvVaSuC+0OBGhwsrydQwmgfg5LncqQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^7.0.3" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/sigstore/node_modules/ssri/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/sigstore/node_modules/unique-filename": { + "version": "3.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/unique-filename/-/unique-filename-3.0.0.tgz", + "integrity": "sha512-afXhuC55wkAmZ0P18QsVE6kp8JaxrEokN2HGIoIVv2ijHQd419H0+6EigAFcIzXeMIkcIkNBpB3L/DXB3cTS/g==", + "dev": true, + "license": "ISC", + "dependencies": { + "unique-slug": "^4.0.0" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/sigstore/node_modules/unique-slug": { + "version": "4.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/unique-slug/-/unique-slug-4.0.0.tgz", + "integrity": "sha512-WrcA6AyEfqDX5bWige/4NQfPZMtASNVxdmWR76WESYQVAACSgWcR6e9i0mofqqBxYFtL4oAxPIptY73/0YE1DQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "imurmurhash": "^0.1.4" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/smart-buffer": { + "version": "4.2.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/smart-buffer/-/smart-buffer-4.2.0.tgz", + "integrity": "sha512-94hK0Hh8rPqQl2xXc3HsaBoOXKV20MToPkcXvwbISWLEs+64sBq5kFgn2kJDHb1Pry9yrP0dxrCI9RRci7RXKg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 6.0.0", + "npm": ">= 3.0.0" + } + }, + "node_modules/socks": { + "version": "2.8.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/socks/-/socks-2.8.4.tgz", + "integrity": "sha512-D3YaD0aRxR3mEcqnidIs7ReYJFVzWdd6fXJYUM8ixcQcJRGTka/b3saV0KflYhyVJXKhb947GndU35SxYNResQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ip-address": "^9.0.5", + "smart-buffer": "^4.2.0" + }, + "engines": { + "node": ">= 10.0.0", + "npm": ">= 3.0.0" + } + }, + "node_modules/socks-proxy-agent": { + "version": "6.2.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/socks-proxy-agent/-/socks-proxy-agent-6.2.1.tgz", + "integrity": "sha512-a6KW9G+6B3nWZ1yB8G7pJwL3ggLy1uTzKAgCb7ttblwqdz9fMGJUuTy3uFzEP48FAs9FLILlmzDlE2JJhVQaXQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^6.0.2", + "debug": "^4.3.3", + "socks": "^2.6.2" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/sort-keys": { + "version": "4.2.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/sort-keys/-/sort-keys-4.2.0.tgz", + "integrity": "sha512-aUYIEU/UviqPgc8mHR6IW1EGxkAXpeRETYcrzg8cLAvUPZcpAlleSXHV2mY7G12GphSH6Gzv+4MMVSSkbdteHg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-plain-obj": "^2.0.0" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/source-map": { + "version": "0.6.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/source-map/-/source-map-0.6.1.tgz", + "integrity": "sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==", + "dev": true, + "license": "BSD-3-Clause", + "peer": true, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/source-map-support": { + "version": "0.5.21", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/source-map-support/-/source-map-support-0.5.21.tgz", + "integrity": "sha512-uBHU3L3czsIyYXKX88fdrGovxdSCoTGDRZ6SYXtSRxLZUzHg5P/66Ht6uoUlHu9EZod+inXhKo3qQgwXUT/y1w==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "buffer-from": "^1.0.0", + "source-map": "^0.6.0" + } + }, + "node_modules/spdx-correct": { + "version": "3.2.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/spdx-correct/-/spdx-correct-3.2.0.tgz", + "integrity": "sha512-kN9dJbvnySHULIluDHy32WHRUu3Og7B9sbY7tsFLctQkIqnMh3hErYgdMjTYuqmcXX+lK5T1lnUt3G7zNswmZA==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "spdx-expression-parse": "^3.0.0", + "spdx-license-ids": "^3.0.0" + } + }, + "node_modules/spdx-exceptions": { + "version": "2.5.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/spdx-exceptions/-/spdx-exceptions-2.5.0.tgz", + "integrity": "sha512-PiU42r+xO4UbUS1buo3LPJkjlO7430Xn5SVAhdpzzsPHsjbYVflnnFdATgabnLude+Cqu25p6N+g2lw/PFsa4w==", + "dev": true, + "license": "CC-BY-3.0" + }, + "node_modules/spdx-expression-parse": { + "version": "3.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/spdx-expression-parse/-/spdx-expression-parse-3.0.1.tgz", + "integrity": "sha512-cbqHunsQWnJNE6KhVSMsMeH5H/L9EpymbzqTQ3uLwNCLZ1Q481oWaofqH7nO6V07xlXwY6PhQdQ2IedWx/ZK4Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "spdx-exceptions": "^2.1.0", + "spdx-license-ids": "^3.0.0" + } + }, + "node_modules/spdx-license-ids": { + "version": "3.0.21", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/spdx-license-ids/-/spdx-license-ids-3.0.21.tgz", + "integrity": "sha512-Bvg/8F5XephndSK3JffaRqdT+gyhfqIPwDHpX80tJrF8QQRYMo8sNMeaZ2Dp5+jhwKnUmIOyFFQfHRkjJm5nXg==", + "dev": true, + "license": "CC0-1.0" + }, + "node_modules/sprintf-js": { + "version": "1.1.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/sprintf-js/-/sprintf-js-1.1.3.tgz", + "integrity": "sha512-Oo+0REFV59/rz3gfJNKQiBlwfHaSESl1pcGyABQsnnIfWOFt6JNj5gCog2U6MLZ//IGYD+nA8nI+mTShREReaA==", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/ssri": { + "version": "8.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/ssri/-/ssri-8.0.1.tgz", + "integrity": "sha512-97qShzy1AiyxvPNIkLWoGua7xoQzzPjQ0HAH4B0rWKo7SZ6USuPcrUiAFrws0UH8RrbWmgq3LMTObhPIHbbBeQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^3.1.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/stream-browserify": { + "version": "3.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/stream-browserify/-/stream-browserify-3.0.0.tgz", + "integrity": "sha512-H73RAHsVBapbim0tU2JwwOiXUj+fikfiaoYAKHF3VJfA0pe2BCzkhAHBlLG6REzE+2WNZcxOXjK7lkso+9euLA==", + "license": "MIT", + "dependencies": { + "inherits": "~2.0.4", + "readable-stream": "^3.5.0" + } + }, + "node_modules/stream-browserify/node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "license": "MIT", + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, + "node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/string-width-cjs": { + "name": "string-width", + "version": "4.2.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dev": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-ansi-cjs": { + "name": "strip-ansi", + "version": "6.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-bom": { + "version": "2.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/strip-bom/-/strip-bom-2.0.0.tgz", + "integrity": "sha512-kwrX1y7czp1E69n2ajbG65mIo9dqvJ+8aBQXOGVxqwvNbsXdFM6Lq37dLAY3mknUwru8CfcCbfOLL/gMo+fi3g==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-utf8": "^0.2.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/strip-bom-buf": { + "version": "1.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/strip-bom-buf/-/strip-bom-buf-1.0.0.tgz", + "integrity": "sha512-1sUIL1jck0T1mhOLP2c696BIznzT525Lkub+n4jjMHjhjhoAQA6Ye659DxdlZBr0aLDMQoTxKIpnlqxgtwjsuQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-utf8": "^0.2.1" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/strip-bom-stream": { + "version": "2.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/strip-bom-stream/-/strip-bom-stream-2.0.0.tgz", + "integrity": "sha512-yH0+mD8oahBZWnY43vxs4pSinn8SMKAdml/EOGBewoe1Y0Eitd0h2Mg3ZRiXruUW6L4P+lvZiEgbh0NgUGia1w==", + "dev": true, + "license": "MIT", + "dependencies": { + "first-chunk-stream": "^2.0.0", + "strip-bom": "^2.0.0" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/strip-final-newline": { + "version": "2.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/strip-final-newline/-/strip-final-newline-2.0.0.tgz", + "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/supports-color": { + "version": "8.1.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/supports-color/-/supports-color-8.1.1.tgz", + "integrity": "sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/supports-color?sponsor=1" + } + }, + "node_modules/supports-preserve-symlinks-flag": { + "version": "1.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/supports-preserve-symlinks-flag/-/supports-preserve-symlinks-flag-1.0.0.tgz", + "integrity": "sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/tapable": { + "version": "2.2.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/tapable/-/tapable-2.2.1.tgz", + "integrity": "sha512-GNzQvQTOIP6RyTfE2Qxb8ZVlNmw0n88vp1szwWRimP02mnTsx3Wtn5qRdqY9w2XduFNUgvOwhNnQsjwCp+kqaQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/tar": { + "version": "6.2.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/tar/-/tar-6.2.1.tgz", + "integrity": "sha512-DZ4yORTwrbTj/7MZYq2w+/ZFdI6OZ/f9SFHR+71gIVUZhOQPHzVCLpvRnPgyaMpfWxxk/4ONva3GQSyNIKRv6A==", + "dev": true, + "license": "ISC", + "dependencies": { + "chownr": "^2.0.0", + "fs-minipass": "^2.0.0", + "minipass": "^5.0.0", + "minizlib": "^2.1.1", + "mkdirp": "^1.0.3", + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/tar/node_modules/minipass": { + "version": "5.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass/-/minipass-5.0.0.tgz", + "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=8" + } + }, + "node_modules/terser": { + "version": "5.39.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/terser/-/terser-5.39.0.tgz", + "integrity": "sha512-LBAhFyLho16harJoWMg/nZsQYgTrg5jXOn2nCYjRUcZZEdE3qa2zb8QEDRUGVZBW4rlazf2fxkg8tztybTaqWw==", + "dev": true, + "license": "BSD-2-Clause", + "peer": true, + "dependencies": { + "@jridgewell/source-map": "^0.3.3", + "acorn": "^8.8.2", + "commander": "^2.20.0", + "source-map-support": "~0.5.20" + }, + "bin": { + "terser": "bin/terser" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/terser-webpack-plugin": { + "version": "5.3.14", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/terser-webpack-plugin/-/terser-webpack-plugin-5.3.14.tgz", + "integrity": "sha512-vkZjpUjb6OMS7dhV+tILUW6BhpDR7P2L/aQSAv+Uwk+m8KATX9EccViHTJR2qDtACKPIYndLGCyl3FMo+r2LMw==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@jridgewell/trace-mapping": "^0.3.25", + "jest-worker": "^27.4.5", + "schema-utils": "^4.3.0", + "serialize-javascript": "^6.0.2", + "terser": "^5.31.1" + }, + "engines": { + "node": ">= 10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "^5.1.0" + }, + "peerDependenciesMeta": { + "@swc/core": { + "optional": true + }, + "esbuild": { + "optional": true + }, + "uglify-js": { + "optional": true + } + } + }, + "node_modules/text-table": { + "version": "0.2.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", + "dev": true, + "license": "MIT" + }, + "node_modules/textextensions": { + "version": "5.16.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/textextensions/-/textextensions-5.16.0.tgz", + "integrity": "sha512-7D/r3s6uPZyU//MCYrX6I14nzauDwJ5CxazouuRGNuvSCihW87ufN6VLoROLCrHg6FblLuJrT6N2BVaPVzqElw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8" + }, + "funding": { + "url": "https://bevry.me/fund" + } + }, + "node_modules/through": { + "version": "2.3.8", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/through/-/through-2.3.8.tgz", + "integrity": "sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==", + "dev": true, + "license": "MIT" + }, + "node_modules/tmp": { + "version": "0.0.33", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/tmp/-/tmp-0.0.33.tgz", + "integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==", + "dev": true, + "license": "MIT", + "dependencies": { + "os-tmpdir": "~1.0.2" + }, + "engines": { + "node": ">=0.6.0" + } + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/tr46": { + "version": "0.0.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/tr46/-/tr46-0.0.3.tgz", + "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==", + "license": "MIT" + }, + "node_modules/treeverse": { + "version": "1.0.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/treeverse/-/treeverse-1.0.4.tgz", + "integrity": "sha512-whw60l7r+8ZU8Tu/Uc2yxtc4ZTZbR/PF3u1IPNKGQ6p8EICLb3Z2lAgoqw9bqYd8IkgnsaOcLzYHFckjqNsf0g==", + "dev": true, + "license": "ISC" + }, + "node_modules/ts-loader": { + "version": "9.5.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/ts-loader/-/ts-loader-9.5.2.tgz", + "integrity": "sha512-Qo4piXvOTWcMGIgRiuFa6nHNm+54HbYaZCKqc9eeZCLRy3XqafQgwX2F7mofrbJG3g7EEb+lkiR+z2Lic2s3Zw==", + "dev": true, + "license": "MIT", + "dependencies": { + "chalk": "^4.1.0", + "enhanced-resolve": "^5.0.0", + "micromatch": "^4.0.0", + "semver": "^7.3.4", + "source-map": "^0.7.4" + }, + "engines": { + "node": ">=12.0.0" + }, + "peerDependencies": { + "typescript": "*", + "webpack": "^5.0.0" + } + }, + "node_modules/ts-loader/node_modules/source-map": { + "version": "0.7.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/source-map/-/source-map-0.7.4.tgz", + "integrity": "sha512-l3BikUxvPOcn5E74dZiq5BGsTb5yEwhaTSzccU6t4sDOH8NWJCstKO5QT2CvtFoK6F0saL7p9xHAqHOlCPJygA==", + "dev": true, + "license": "BSD-3-Clause", + "engines": { + "node": ">= 8" + } + }, + "node_modules/tslib": { + "version": "2.8.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/tslib/-/tslib-2.8.1.tgz", + "integrity": "sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==", + "dev": true, + "license": "0BSD" + }, + "node_modules/tuf-js": { + "version": "1.1.7", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/tuf-js/-/tuf-js-1.1.7.tgz", + "integrity": "sha512-i3P9Kgw3ytjELUfpuKVDNBJvk4u5bXL6gskv572mcevPbSKCV3zt3djhmlEQ65yERjIbOSncy7U4cQJaB1CBCg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@tufjs/models": "1.0.4", + "debug": "^4.3.4", + "make-fetch-happen": "^11.1.1" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/tuf-js/node_modules/@npmcli/fs": { + "version": "3.1.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@npmcli/fs/-/fs-3.1.1.tgz", + "integrity": "sha512-q9CRWjpHCMIh5sVyefoD1cA7PkvILqCZsnSOEUUivORLjxCO/Irmue2DprETiNgEqktDBZaM1Bi+jrarx1XdCg==", + "dev": true, + "license": "ISC", + "dependencies": { + "semver": "^7.3.5" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/tuf-js/node_modules/@tootallnate/once": { + "version": "2.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@tootallnate/once/-/once-2.0.0.tgz", + "integrity": "sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10" + } + }, + "node_modules/tuf-js/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/tuf-js/node_modules/cacache": { + "version": "17.1.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/cacache/-/cacache-17.1.4.tgz", + "integrity": "sha512-/aJwG2l3ZMJ1xNAnqbMpA40of9dj/pIH3QfiuQSqjfPJF747VR0J/bHn+/KdNnHKc6XQcWt/AfRSBft82W1d2A==", + "dev": true, + "license": "ISC", + "dependencies": { + "@npmcli/fs": "^3.1.0", + "fs-minipass": "^3.0.0", + "glob": "^10.2.2", + "lru-cache": "^7.7.1", + "minipass": "^7.0.3", + "minipass-collect": "^1.0.2", + "minipass-flush": "^1.0.5", + "minipass-pipeline": "^1.2.4", + "p-map": "^4.0.0", + "ssri": "^10.0.0", + "tar": "^6.1.11", + "unique-filename": "^3.0.0" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/tuf-js/node_modules/cacache/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/tuf-js/node_modules/fs-minipass": { + "version": "3.0.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/fs-minipass/-/fs-minipass-3.0.3.tgz", + "integrity": "sha512-XUBA9XClHbnJWSfBzjkm6RvPsyg3sryZt06BEQoXcF7EK/xpGaQYJgQKDJSUH5SGZ76Y7pFx1QBnXz09rU5Fbw==", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^7.0.3" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/tuf-js/node_modules/fs-minipass/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/tuf-js/node_modules/glob": { + "version": "10.4.5", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/glob/-/glob-10.4.5.tgz", + "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", + "dev": true, + "license": "ISC", + "dependencies": { + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/tuf-js/node_modules/glob/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/tuf-js/node_modules/http-proxy-agent": { + "version": "5.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz", + "integrity": "sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@tootallnate/once": "2", + "agent-base": "6", + "debug": "4" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/tuf-js/node_modules/lru-cache": { + "version": "7.18.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/lru-cache/-/lru-cache-7.18.3.tgz", + "integrity": "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/tuf-js/node_modules/make-fetch-happen": { + "version": "11.1.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/make-fetch-happen/-/make-fetch-happen-11.1.1.tgz", + "integrity": "sha512-rLWS7GCSTcEujjVBs2YqG7Y4643u8ucvCJeSRqiLYhesrDuzeuFIk37xREzAsfQaqzl8b9rNCE4m6J8tvX4Q8w==", + "dev": true, + "license": "ISC", + "dependencies": { + "agentkeepalive": "^4.2.1", + "cacache": "^17.0.0", + "http-cache-semantics": "^4.1.1", + "http-proxy-agent": "^5.0.0", + "https-proxy-agent": "^5.0.0", + "is-lambda": "^1.0.1", + "lru-cache": "^7.7.1", + "minipass": "^5.0.0", + "minipass-fetch": "^3.0.0", + "minipass-flush": "^1.0.5", + "minipass-pipeline": "^1.2.4", + "negotiator": "^0.6.3", + "promise-retry": "^2.0.1", + "socks-proxy-agent": "^7.0.0", + "ssri": "^10.0.0" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/tuf-js/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/tuf-js/node_modules/minipass": { + "version": "5.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass/-/minipass-5.0.0.tgz", + "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=8" + } + }, + "node_modules/tuf-js/node_modules/minipass-fetch": { + "version": "3.0.5", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass-fetch/-/minipass-fetch-3.0.5.tgz", + "integrity": "sha512-2N8elDQAtSnFV0Dk7gt15KHsS0Fyz6CbYZ360h0WTYV1Ty46li3rAXVOQj1THMNLdmrD9Vt5pBPtWtVkpwGBqg==", + "dev": true, + "license": "MIT", + "dependencies": { + "minipass": "^7.0.3", + "minipass-sized": "^1.0.3", + "minizlib": "^2.1.2" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + }, + "optionalDependencies": { + "encoding": "^0.1.13" + } + }, + "node_modules/tuf-js/node_modules/minipass-fetch/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/tuf-js/node_modules/socks-proxy-agent": { + "version": "7.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/socks-proxy-agent/-/socks-proxy-agent-7.0.0.tgz", + "integrity": "sha512-Fgl0YPZ902wEsAyiQ+idGd1A7rSFx/ayC1CQVMw5P+EQx2V0SgpGtf6OKFhVjPflPUl9YMmEOnmfjCdMUsygww==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^6.0.2", + "debug": "^4.3.3", + "socks": "^2.6.2" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/tuf-js/node_modules/ssri": { + "version": "10.0.6", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/ssri/-/ssri-10.0.6.tgz", + "integrity": "sha512-MGrFH9Z4NP9Iyhqn16sDtBpRRNJ0Y2hNa6D65h736fVSaPCHr4DM4sWUNvVaSuC+0OBGhwsrydQwmgfg5LncqQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^7.0.3" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/tuf-js/node_modules/ssri/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/tuf-js/node_modules/unique-filename": { + "version": "3.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/unique-filename/-/unique-filename-3.0.0.tgz", + "integrity": "sha512-afXhuC55wkAmZ0P18QsVE6kp8JaxrEokN2HGIoIVv2ijHQd419H0+6EigAFcIzXeMIkcIkNBpB3L/DXB3cTS/g==", + "dev": true, + "license": "ISC", + "dependencies": { + "unique-slug": "^4.0.0" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/tuf-js/node_modules/unique-slug": { + "version": "4.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/unique-slug/-/unique-slug-4.0.0.tgz", + "integrity": "sha512-WrcA6AyEfqDX5bWige/4NQfPZMtASNVxdmWR76WESYQVAACSgWcR6e9i0mofqqBxYFtL4oAxPIptY73/0YE1DQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "imurmurhash": "^0.1.4" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/type-fest": { + "version": "0.21.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/type-fest/-/type-fest-0.21.3.tgz", + "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/typescript": { + "version": "5.8.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/typescript/-/typescript-5.8.3.tgz", + "integrity": "sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ==", + "dev": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/undici-types": { + "version": "6.21.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/undici-types/-/undici-types-6.21.0.tgz", + "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", + "license": "MIT" + }, + "node_modules/unicode-canonical-property-names-ecmascript": { + "version": "2.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/unicode-canonical-property-names-ecmascript/-/unicode-canonical-property-names-ecmascript-2.0.1.tgz", + "integrity": "sha512-dA8WbNeb2a6oQzAQ55YlT5vQAWGV9WXOsi3SskE3bcCdM0P4SDd+24zS/OCacdRq5BkdsRj9q3Pg6YyQoxIGqg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-match-property-ecmascript": { + "version": "2.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/unicode-match-property-ecmascript/-/unicode-match-property-ecmascript-2.0.0.tgz", + "integrity": "sha512-5kaZCrbp5mmbz5ulBkDkbY0SsPOjKqVS35VpL9ulMPfSl0J0Xsm+9Evphv9CoIZFwre7aJoa94AY6seMKGVN5Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "unicode-canonical-property-names-ecmascript": "^2.0.0", + "unicode-property-aliases-ecmascript": "^2.0.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-match-property-value-ecmascript": { + "version": "2.2.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-2.2.0.tgz", + "integrity": "sha512-4IehN3V/+kkr5YeSSDDQG8QLqO26XpL2XP3GQtqwlT/QYSECAwFztxVHjlbh0+gjJ3XmNLS0zDsbgs9jWKExLg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/unicode-property-aliases-ecmascript": { + "version": "2.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-2.1.0.tgz", + "integrity": "sha512-6t3foTQI9qne+OZoVQB/8x8rk2k1eVy1gRXhV3oFQ5T6R1dqQ1xtin3XqSlx3+ATBkliTaR/hHyJBm+LVPNM8w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/unique-filename": { + "version": "1.1.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/unique-filename/-/unique-filename-1.1.1.tgz", + "integrity": "sha512-Vmp0jIp2ln35UTXuryvjzkjGdRyf9b2lTXuSYUiPmzRcl3FDtYqAwOnTJkAngD9SWhnoJzDbTKwaOrZ+STtxNQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "unique-slug": "^2.0.0" + } + }, + "node_modules/unique-slug": { + "version": "2.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/unique-slug/-/unique-slug-2.0.2.tgz", + "integrity": "sha512-zoWr9ObaxALD3DOPfjPSqxt4fnZiWblxHIgeWqW8x7UqDzEtHEQLzji2cuJYQFCU6KmoJikOYAZlrTHHebjx2w==", + "dev": true, + "license": "ISC", + "dependencies": { + "imurmurhash": "^0.1.4" + } + }, + "node_modules/universal-user-agent": { + "version": "6.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/universal-user-agent/-/universal-user-agent-6.0.1.tgz", + "integrity": "sha512-yCzhz6FN2wU1NiiQRogkTQszlQSlpWaw8SvVegAc+bDxbzHgh1vX8uIe8OYyMH6DwH+sdTJsgMl36+mSMdRJIQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/untildify": { + "version": "4.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/untildify/-/untildify-4.0.0.tgz", + "integrity": "sha512-KK8xQ1mkzZeg9inewmFVDNkg3l5LUhoq9kN6iWYB/CC9YMG8HA+c1Q8HwDe6dEX7kErrEVNVBO3fWsVq5iDgtw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/update-browserslist-db": { + "version": "1.1.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/update-browserslist-db/-/update-browserslist-db-1.1.3.tgz", + "integrity": "sha512-UxhIZQ+QInVdunkDAaiazvvT/+fXL5Osr0JZlJulepYu6Jd7qJtDZjlur0emRlT71EN3ScPoE7gvsuIKKNavKw==", + "dev": true, + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/browserslist" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/browserslist" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "escalade": "^3.2.0", + "picocolors": "^1.1.1" + }, + "bin": { + "update-browserslist-db": "cli.js" + }, + "peerDependencies": { + "browserslist": ">= 4.21.0" + } + }, + "node_modules/util": { + "version": "0.12.5", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/util/-/util-0.12.5.tgz", + "integrity": "sha512-kZf/K6hEIrWHI6XqOFUiiMa+79wE/D8Q+NCNAWclkyg3b4d2k7s0QGepNjiABc+aR3N1PAyHL7p6UcLY6LmrnA==", + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "is-arguments": "^1.0.4", + "is-generator-function": "^1.0.7", + "is-typed-array": "^1.1.3", + "which-typed-array": "^1.1.2" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "license": "MIT" + }, + "node_modules/validate-npm-package-license": { + "version": "3.0.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/validate-npm-package-license/-/validate-npm-package-license-3.0.4.tgz", + "integrity": "sha512-DpKm2Ui/xN7/HQKCtpZxoRWBhZ9Z0kqtygG8XCgNQ8ZlDnxuQmWhj566j8fN4Cu3/JmbhsDo7fcAJq4s9h27Ew==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "spdx-correct": "^3.0.0", + "spdx-expression-parse": "^3.0.0" + } + }, + "node_modules/validate-npm-package-name": { + "version": "3.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/validate-npm-package-name/-/validate-npm-package-name-3.0.0.tgz", + "integrity": "sha512-M6w37eVCMMouJ9V/sdPGnC5H4uDr73/+xdq0FBLO3TFFX1+7wiUY6Es328NN+y43tmY+doUdN9g9J21vqB7iLw==", + "dev": true, + "license": "ISC", + "dependencies": { + "builtins": "^1.0.3" + } + }, + "node_modules/vinyl": { + "version": "2.2.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/vinyl/-/vinyl-2.2.1.tgz", + "integrity": "sha512-LII3bXRFBZLlezoG5FfZVcXflZgWP/4dCwKtxd5ky9+LOtM4CS3bIRQsmR1KMnMW07jpE8fqR2lcxPZ+8sJIcw==", + "dev": true, + "license": "MIT", + "dependencies": { + "clone": "^2.1.1", + "clone-buffer": "^1.0.0", + "clone-stats": "^1.0.0", + "cloneable-readable": "^1.0.0", + "remove-trailing-separator": "^1.0.1", + "replace-ext": "^1.0.0" + }, + "engines": { + "node": ">= 0.10" + } + }, + "node_modules/vinyl-file": { + "version": "3.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/vinyl-file/-/vinyl-file-3.0.0.tgz", + "integrity": "sha512-BoJDj+ca3D9xOuPEM6RWVtWQtvEPQiQYn82LvdxhLWplfQsBzBqtgK0yhCP0s1BNTi6dH9BO+dzybvyQIacifg==", + "dev": true, + "license": "MIT", + "dependencies": { + "graceful-fs": "^4.1.2", + "pify": "^2.3.0", + "strip-bom-buf": "^1.0.0", + "strip-bom-stream": "^2.0.0", + "vinyl": "^2.0.1" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/walk-up-path": { + "version": "1.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/walk-up-path/-/walk-up-path-1.0.0.tgz", + "integrity": "sha512-hwj/qMDUEjCU5h0xr90KGCf0tg0/LgJbmOWgrWKYlcJZM7XvquvUJZ0G/HMGr7F7OQMOUuPHWP9JpriinkAlkg==", + "dev": true, + "license": "ISC" + }, + "node_modules/watchpack": { + "version": "2.4.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/watchpack/-/watchpack-2.4.2.tgz", + "integrity": "sha512-TnbFSbcOCcDgjZ4piURLCbJ3nJhznVh9kw6F6iokjiFPl8ONxe9A6nMDVXDiNbrSfLILs6vB07F7wLBrwPYzJw==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.1.2" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/wcwidth": { + "version": "1.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/wcwidth/-/wcwidth-1.0.1.tgz", + "integrity": "sha512-XHPEwS0q6TaxcvG85+8EYkbiCux2XtWG2mkc47Ng2A77BQu9+DqIOJldST4HgPkuea7dvKSj5VgX3P1d4rW8Tg==", + "dev": true, + "license": "MIT", + "dependencies": { + "defaults": "^1.0.3" + } + }, + "node_modules/webidl-conversions": { + "version": "3.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/webidl-conversions/-/webidl-conversions-3.0.1.tgz", + "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==", + "license": "BSD-2-Clause" + }, + "node_modules/webpack": { + "version": "5.99.6", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/webpack/-/webpack-5.99.6.tgz", + "integrity": "sha512-TJOLrJ6oeccsGWPl7ujCYuc0pIq2cNsuD6GZDma8i5o5Npvcco/z+NKvZSFsP0/x6SShVb0+X2JK/JHUjKY9dQ==", + "dev": true, + "license": "MIT", + "peer": true, + "dependencies": { + "@types/eslint-scope": "^3.7.7", + "@types/estree": "^1.0.6", + "@webassemblyjs/ast": "^1.14.1", + "@webassemblyjs/wasm-edit": "^1.14.1", + "@webassemblyjs/wasm-parser": "^1.14.1", + "acorn": "^8.14.0", + "browserslist": "^4.24.0", + "chrome-trace-event": "^1.0.2", + "enhanced-resolve": "^5.17.1", + "es-module-lexer": "^1.2.1", + "eslint-scope": "5.1.1", + "events": "^3.2.0", + "glob-to-regexp": "^0.4.1", + "graceful-fs": "^4.2.11", + "json-parse-even-better-errors": "^2.3.1", + "loader-runner": "^4.2.0", + "mime-types": "^2.1.27", + "neo-async": "^2.6.2", + "schema-utils": "^4.3.0", + "tapable": "^2.1.1", + "terser-webpack-plugin": "^5.3.11", + "watchpack": "^2.4.1", + "webpack-sources": "^3.2.3" + }, + "bin": { + "webpack": "bin/webpack.js" + }, + "engines": { + "node": ">=10.13.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependenciesMeta": { + "webpack-cli": { + "optional": true + } + } + }, + "node_modules/webpack-cli": { + "version": "5.1.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/webpack-cli/-/webpack-cli-5.1.4.tgz", + "integrity": "sha512-pIDJHIEI9LR0yxHXQ+Qh95k2EvXpWzZ5l+d+jIo+RdSm9MiHfzazIxwwni/p7+x4eJZuvG1AJwgC4TNQ7NRgsg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@discoveryjs/json-ext": "^0.5.0", + "@webpack-cli/configtest": "^2.1.1", + "@webpack-cli/info": "^2.0.2", + "@webpack-cli/serve": "^2.0.5", + "colorette": "^2.0.14", + "commander": "^10.0.1", + "cross-spawn": "^7.0.3", + "envinfo": "^7.7.3", + "fastest-levenshtein": "^1.0.12", + "import-local": "^3.0.2", + "interpret": "^3.1.1", + "rechoir": "^0.8.0", + "webpack-merge": "^5.7.3" + }, + "bin": { + "webpack-cli": "bin/cli.js" + }, + "engines": { + "node": ">=14.15.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/webpack" + }, + "peerDependencies": { + "webpack": "5.x.x" + }, + "peerDependenciesMeta": { + "@webpack-cli/generators": { + "optional": true + }, + "webpack-bundle-analyzer": { + "optional": true + }, + "webpack-dev-server": { + "optional": true + } + } + }, + "node_modules/webpack-cli/node_modules/commander": { + "version": "10.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/commander/-/commander-10.0.1.tgz", + "integrity": "sha512-y4Mg2tXshplEbSGzx7amzPwKKOCGuoSRP/CjEdwwk0FOGlUbq6lKuoyDZTNZkmxHdJtp54hdfY/JUrdL7Xfdug==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=14" + } + }, + "node_modules/webpack-merge": { + "version": "5.10.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/webpack-merge/-/webpack-merge-5.10.0.tgz", + "integrity": "sha512-+4zXKdx7UnO+1jaN4l2lHVD+mFvnlZQP/6ljaJVb4SZiwIKeUnrT5l0gkT8z+n4hKpC+jpOv6O9R+gLtag7pSA==", + "dev": true, + "license": "MIT", + "dependencies": { + "clone-deep": "^4.0.1", + "flat": "^5.0.2", + "wildcard": "^2.0.0" + }, + "engines": { + "node": ">=10.0.0" + } + }, + "node_modules/webpack-sources": { + "version": "3.2.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/webpack-sources/-/webpack-sources-3.2.3.tgz", + "integrity": "sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==", + "dev": true, + "license": "MIT", + "peer": true, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/whatwg-url": { + "version": "5.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/whatwg-url/-/whatwg-url-5.0.0.tgz", + "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==", + "license": "MIT", + "dependencies": { + "tr46": "~0.0.3", + "webidl-conversions": "^3.0.0" + } + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/which-pm": { + "version": "2.2.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/which-pm/-/which-pm-2.2.0.tgz", + "integrity": "sha512-MOiaDbA5ZZgUjkeMWM5EkJp4loW5ZRoa5bc3/aeMox/PJelMhE6t7S/mLuiY43DBupyxH+S0U1bTui9kWUlmsw==", + "dev": true, + "license": "MIT", + "dependencies": { + "load-yaml-file": "^0.2.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=8.15" + } + }, + "node_modules/which-typed-array": { + "version": "1.1.19", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/which-typed-array/-/which-typed-array-1.1.19.tgz", + "integrity": "sha512-rEvr90Bck4WZt9HHFC4DJMsjvu7x+r6bImz0/BrbWb7A2djJ8hnZMrWnHo9F8ssv0OMErasDhftrfROTyqSDrw==", + "license": "MIT", + "dependencies": { + "available-typed-arrays": "^1.0.7", + "call-bind": "^1.0.8", + "call-bound": "^1.0.4", + "for-each": "^0.3.5", + "get-proto": "^1.0.1", + "gopd": "^1.2.0", + "has-tostringtag": "^1.0.2" + }, + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/wide-align": { + "version": "1.1.5", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/wide-align/-/wide-align-1.1.5.tgz", + "integrity": "sha512-eDMORYaPNZ4sQIuuYPDHdQvf4gyCF9rEEV/yPxGfwPkRodwEgiMUUXTx/dex+Me0wxx53S+NgUHaP7y3MGlDmg==", + "dev": true, + "license": "ISC", + "dependencies": { + "string-width": "^1.0.2 || 2 || 3 || 4" + } + }, + "node_modules/wildcard": { + "version": "2.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/wildcard/-/wildcard-2.0.1.tgz", + "integrity": "sha512-CC1bOL87PIWSBhDcTrdeLo6eGT7mCFtrg0uIJtqJUFyK+eJnzl8A1niH56uu7KMa5XFrtiV+AQuHO3n7DsHnLQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/wrap-ansi": { + "version": "6.2.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/wrap-ansi/-/wrap-ansi-6.2.0.tgz", + "integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/wrap-ansi-cjs": { + "name": "wrap-ansi", + "version": "7.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/write-file-atomic": { + "version": "4.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/write-file-atomic/-/write-file-atomic-4.0.2.tgz", + "integrity": "sha512-7KxauUdBmSdWnmpaGFg+ppNjKF8uNLry8LyzjauQDOVONfFLNKrKvQOxZ/VuTIcS/gge/YNahf5RIIQWTSarlg==", + "dev": true, + "license": "ISC", + "dependencies": { + "imurmurhash": "^0.1.4", + "signal-exit": "^3.0.7" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/yallist": { + "version": "4.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/yallist/-/yallist-4.0.0.tgz", + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", + "dev": true, + "license": "ISC" + }, + "node_modules/yargs": { + "version": "16.2.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/yargs/-/yargs-16.2.0.tgz", + "integrity": "sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw==", + "license": "MIT", + "dependencies": { + "cliui": "^7.0.2", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.0", + "y18n": "^5.0.5", + "yargs-parser": "^20.2.2" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/yargs-parser": { + "version": "20.2.9", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/yargs-parser/-/yargs-parser-20.2.9.tgz", + "integrity": "sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w==", + "license": "ISC", + "engines": { + "node": ">=10" + } + }, + "node_modules/yeoman-environment": { + "version": "3.19.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/yeoman-environment/-/yeoman-environment-3.19.3.tgz", + "integrity": "sha512-/+ODrTUHtlDPRH9qIC0JREH8+7nsRcjDl3Bxn2Xo/rvAaVvixH5275jHwg0C85g4QsF4P6M2ojfScPPAl+pLAg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "@npmcli/arborist": "^4.0.4", + "are-we-there-yet": "^2.0.0", + "arrify": "^2.0.1", + "binaryextensions": "^4.15.0", + "chalk": "^4.1.0", + "cli-table": "^0.3.1", + "commander": "7.1.0", + "dateformat": "^4.5.0", + "debug": "^4.1.1", + "diff": "^5.0.0", + "error": "^10.4.0", + "escape-string-regexp": "^4.0.0", + "execa": "^5.0.0", + "find-up": "^5.0.0", + "globby": "^11.0.1", + "grouped-queue": "^2.0.0", + "inquirer": "^8.0.0", + "is-scoped": "^2.1.0", + "isbinaryfile": "^4.0.10", + "lodash": "^4.17.10", + "log-symbols": "^4.0.0", + "mem-fs": "^1.2.0 || ^2.0.0", + "mem-fs-editor": "^8.1.2 || ^9.0.0", + "minimatch": "^3.0.4", + "npmlog": "^5.0.1", + "p-queue": "^6.6.2", + "p-transform": "^1.3.0", + "pacote": "^12.0.2", + "preferred-pm": "^3.0.3", + "pretty-bytes": "^5.3.0", + "readable-stream": "^4.3.0", + "semver": "^7.1.3", + "slash": "^3.0.0", + "strip-ansi": "^6.0.0", + "text-table": "^0.2.0", + "textextensions": "^5.12.0", + "untildify": "^4.0.0" + }, + "bin": { + "yoe": "cli/index.js" + }, + "engines": { + "node": ">=12.10.0" + } + }, + "node_modules/yeoman-environment/node_modules/commander": { + "version": "7.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/commander/-/commander-7.1.0.tgz", + "integrity": "sha512-pRxBna3MJe6HKnBGsDyMv8ETbptw3axEdYHoqNh7gu5oDcew8fs0xnivZGm06Ogk8zGAJ9VX+OPEr2GXEQK4dg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10" + } + }, + "node_modules/yeoman-environment/node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/yeoman-environment/node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/yeoman-environment/node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/yeoman-environment/node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/yeoman-generator": { + "version": "5.10.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/yeoman-generator/-/yeoman-generator-5.10.0.tgz", + "integrity": "sha512-iDUKykV7L4nDNzeYSedRmSeJ5eMYFucnKDi6KN1WNASXErgPepKqsQw55TgXPHnmpcyOh2Dd/LAZkyc+f0qaAw==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "chalk": "^4.1.0", + "dargs": "^7.0.0", + "debug": "^4.1.1", + "execa": "^5.1.1", + "github-username": "^6.0.0", + "lodash": "^4.17.11", + "mem-fs-editor": "^9.0.0", + "minimist": "^1.2.5", + "pacote": "^15.2.0", + "read-pkg-up": "^7.0.1", + "run-async": "^2.0.0", + "semver": "^7.2.1", + "shelljs": "^0.8.5", + "sort-keys": "^4.2.0", + "text-table": "^0.2.0" + }, + "acceptDependencies": { + "yeoman-environment": "^4.0.0" + }, + "engines": { + "node": ">=12.10.0" + }, + "peerDependencies": { + "yeoman-environment": "^3.2.0" + }, + "peerDependenciesMeta": { + "yeoman-environment": { + "optional": true + } + } + }, + "node_modules/yeoman-generator/node_modules/@npmcli/fs": { + "version": "3.1.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@npmcli/fs/-/fs-3.1.1.tgz", + "integrity": "sha512-q9CRWjpHCMIh5sVyefoD1cA7PkvILqCZsnSOEUUivORLjxCO/Irmue2DprETiNgEqktDBZaM1Bi+jrarx1XdCg==", + "dev": true, + "license": "ISC", + "dependencies": { + "semver": "^7.3.5" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/@npmcli/git": { + "version": "4.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@npmcli/git/-/git-4.1.0.tgz", + "integrity": "sha512-9hwoB3gStVfa0N31ymBmrX+GuDGdVA/QWShZVqE0HK2Af+7QGGrCTbZia/SW0ImUTjTne7SP91qxDmtXvDHRPQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "@npmcli/promise-spawn": "^6.0.0", + "lru-cache": "^7.4.4", + "npm-pick-manifest": "^8.0.0", + "proc-log": "^3.0.0", + "promise-inflight": "^1.0.1", + "promise-retry": "^2.0.1", + "semver": "^7.3.5", + "which": "^3.0.0" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/@npmcli/installed-package-contents": { + "version": "2.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@npmcli/installed-package-contents/-/installed-package-contents-2.1.0.tgz", + "integrity": "sha512-c8UuGLeZpm69BryRykLuKRyKFZYJsZSCT4aVY5ds4omyZqJ172ApzgfKJ5eV/r3HgLdUYgFVe54KSFVjKoe27w==", + "dev": true, + "license": "ISC", + "dependencies": { + "npm-bundled": "^3.0.0", + "npm-normalize-package-bin": "^3.0.0" + }, + "bin": { + "installed-package-contents": "bin/index.js" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/@npmcli/move-file": { + "version": "2.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@npmcli/move-file/-/move-file-2.0.1.tgz", + "integrity": "sha512-mJd2Z5TjYWq/ttPLLGqArdtnC74J6bOzg4rMDnN+p1xTacZ2yPRCk2y0oSWQtygLR9YVQXgOcONrwtnk3JupxQ==", + "deprecated": "This functionality has been moved to @npmcli/fs", + "dev": true, + "license": "MIT", + "dependencies": { + "mkdirp": "^1.0.4", + "rimraf": "^3.0.2" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/@npmcli/node-gyp": { + "version": "3.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@npmcli/node-gyp/-/node-gyp-3.0.0.tgz", + "integrity": "sha512-gp8pRXC2oOxu0DUE1/M3bYtb1b3/DbJ5aM113+XJBgfXdussRAsX0YOrOhdd8WvnAR6auDBvJomGAkLKA5ydxA==", + "dev": true, + "license": "ISC", + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/@npmcli/promise-spawn": { + "version": "6.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@npmcli/promise-spawn/-/promise-spawn-6.0.2.tgz", + "integrity": "sha512-gGq0NJkIGSwdbUt4yhdF8ZrmkGKVz9vAdVzpOfnom+V8PLSmSOVhZwbNvZZS1EYcJN5hzzKBxmmVVAInM6HQLg==", + "dev": true, + "license": "ISC", + "dependencies": { + "which": "^3.0.0" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/@npmcli/run-script": { + "version": "6.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@npmcli/run-script/-/run-script-6.0.2.tgz", + "integrity": "sha512-NCcr1uQo1k5U+SYlnIrbAh3cxy+OQT1VtqiAbxdymSlptbzBb62AjH2xXgjNCoP073hoa1CfCAcwoZ8k96C4nA==", + "dev": true, + "license": "ISC", + "dependencies": { + "@npmcli/node-gyp": "^3.0.0", + "@npmcli/promise-spawn": "^6.0.0", + "node-gyp": "^9.0.0", + "read-package-json-fast": "^3.0.0", + "which": "^3.0.0" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/@tootallnate/once": { + "version": "2.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@tootallnate/once/-/once-2.0.0.tgz", + "integrity": "sha512-XCuKFP5PS55gnMVu3dty8KPatLqUoy/ZYzDzAGCQ8JNFCkLXzmI7vNHCR+XpbZaMWQK/vQubr7PkYq8g470J/A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 10" + } + }, + "node_modules/yeoman-generator/node_modules/are-we-there-yet": { + "version": "3.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/are-we-there-yet/-/are-we-there-yet-3.0.1.tgz", + "integrity": "sha512-QZW4EDmGwlYur0Yyf/b2uGucHQMa8aFUP7eu9ddR73vvhFyt4V0Vl3QHPcTNJ8l6qYOBdxgXdnBXQrHilfRQBg==", + "deprecated": "This package is no longer supported.", + "dev": true, + "license": "ISC", + "dependencies": { + "delegates": "^1.0.0", + "readable-stream": "^3.6.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/cacache": { + "version": "17.1.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/cacache/-/cacache-17.1.4.tgz", + "integrity": "sha512-/aJwG2l3ZMJ1xNAnqbMpA40of9dj/pIH3QfiuQSqjfPJF747VR0J/bHn+/KdNnHKc6XQcWt/AfRSBft82W1d2A==", + "dev": true, + "license": "ISC", + "dependencies": { + "@npmcli/fs": "^3.1.0", + "fs-minipass": "^3.0.0", + "glob": "^10.2.2", + "lru-cache": "^7.7.1", + "minipass": "^7.0.3", + "minipass-collect": "^1.0.2", + "minipass-flush": "^1.0.5", + "minipass-pipeline": "^1.2.4", + "p-map": "^4.0.0", + "ssri": "^10.0.0", + "tar": "^6.1.11", + "unique-filename": "^3.0.0" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/cacache/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/yeoman-generator/node_modules/fs-minipass": { + "version": "3.0.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/fs-minipass/-/fs-minipass-3.0.3.tgz", + "integrity": "sha512-XUBA9XClHbnJWSfBzjkm6RvPsyg3sryZt06BEQoXcF7EK/xpGaQYJgQKDJSUH5SGZ76Y7pFx1QBnXz09rU5Fbw==", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^7.0.3" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/fs-minipass/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/yeoman-generator/node_modules/gauge": { + "version": "4.0.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/gauge/-/gauge-4.0.4.tgz", + "integrity": "sha512-f9m+BEN5jkg6a0fZjleidjN51VE1X+mPFQ2DJ0uv1V39oCLCbsGe6yjbBnp7eK7z/+GAon99a3nHuqbuuthyPg==", + "deprecated": "This package is no longer supported.", + "dev": true, + "license": "ISC", + "dependencies": { + "aproba": "^1.0.3 || ^2.0.0", + "color-support": "^1.1.3", + "console-control-strings": "^1.1.0", + "has-unicode": "^2.0.1", + "signal-exit": "^3.0.7", + "string-width": "^4.2.3", + "strip-ansi": "^6.0.1", + "wide-align": "^1.1.5" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/glob": { + "version": "10.4.5", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/glob/-/glob-10.4.5.tgz", + "integrity": "sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==", + "dev": true, + "license": "ISC", + "dependencies": { + "foreground-child": "^3.1.0", + "jackspeak": "^3.1.2", + "minimatch": "^9.0.4", + "minipass": "^7.1.2", + "package-json-from-dist": "^1.0.0", + "path-scurry": "^1.11.1" + }, + "bin": { + "glob": "dist/esm/bin.mjs" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/yeoman-generator/node_modules/glob/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/yeoman-generator/node_modules/hosted-git-info": { + "version": "6.1.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/hosted-git-info/-/hosted-git-info-6.1.3.tgz", + "integrity": "sha512-HVJyzUrLIL1c0QmviVh5E8VGyUS7xCFPS6yydaVd1UegW+ibV/CohqTH9MkOLDp5o+rb82DMo77PTuc9F/8GKw==", + "dev": true, + "license": "ISC", + "dependencies": { + "lru-cache": "^7.5.1" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/http-proxy-agent": { + "version": "5.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/http-proxy-agent/-/http-proxy-agent-5.0.0.tgz", + "integrity": "sha512-n2hY8YdoRE1i7r6M0w9DIw5GgZN0G25P8zLCRQ8rjXtTU3vsNFBI/vWK/UIeE6g5MUUz6avwAPXmL6Fy9D/90w==", + "dev": true, + "license": "MIT", + "dependencies": { + "@tootallnate/once": "2", + "agent-base": "6", + "debug": "4" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/yeoman-generator/node_modules/ignore-walk": { + "version": "6.0.5", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/ignore-walk/-/ignore-walk-6.0.5.tgz", + "integrity": "sha512-VuuG0wCnjhnylG1ABXT3dAuIpTNDs/G8jlpmwXY03fXoXy/8ZK8/T+hMzt8L4WnrLCJgdybqgPagnF/f97cg3A==", + "dev": true, + "license": "ISC", + "dependencies": { + "minimatch": "^9.0.0" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/json-parse-even-better-errors": { + "version": "3.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/json-parse-even-better-errors/-/json-parse-even-better-errors-3.0.2.tgz", + "integrity": "sha512-fi0NG4bPjCHunUJffmLd0gxssIgkNmArMvis4iNah6Owg1MCJjWhEcDLmsK6iGkJq3tHwbDkTlce70/tmXN4cQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/lru-cache": { + "version": "7.18.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/lru-cache/-/lru-cache-7.18.3.tgz", + "integrity": "sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=12" + } + }, + "node_modules/yeoman-generator/node_modules/make-fetch-happen": { + "version": "10.2.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/make-fetch-happen/-/make-fetch-happen-10.2.1.tgz", + "integrity": "sha512-NgOPbRiaQM10DYXvN3/hhGVI2M5MtITFryzBGxHM5p4wnFxsVCbxkrBrDsk+EZ5OB4jEOT7AjDxtdF+KVEFT7w==", + "dev": true, + "license": "ISC", + "dependencies": { + "agentkeepalive": "^4.2.1", + "cacache": "^16.1.0", + "http-cache-semantics": "^4.1.0", + "http-proxy-agent": "^5.0.0", + "https-proxy-agent": "^5.0.0", + "is-lambda": "^1.0.1", + "lru-cache": "^7.7.1", + "minipass": "^3.1.6", + "minipass-collect": "^1.0.2", + "minipass-fetch": "^2.0.3", + "minipass-flush": "^1.0.5", + "minipass-pipeline": "^1.2.4", + "negotiator": "^0.6.3", + "promise-retry": "^2.0.1", + "socks-proxy-agent": "^7.0.0", + "ssri": "^9.0.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/make-fetch-happen/node_modules/@npmcli/fs": { + "version": "2.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/@npmcli/fs/-/fs-2.1.2.tgz", + "integrity": "sha512-yOJKRvohFOaLqipNtwYB9WugyZKhC/DZC4VYPmpaCzDBrA8YpK3qHZ8/HGscMnE4GqbkLNuVcCnxkeQEdGt6LQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "@gar/promisify": "^1.1.3", + "semver": "^7.3.5" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/make-fetch-happen/node_modules/cacache": { + "version": "16.1.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/cacache/-/cacache-16.1.3.tgz", + "integrity": "sha512-/+Emcj9DAXxX4cwlLmRI9c166RuL3w30zp4R7Joiv2cQTtTtA+jeuCAjH3ZlGnYS3tKENSrKhAzVVP9GVyzeYQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "@npmcli/fs": "^2.1.0", + "@npmcli/move-file": "^2.0.0", + "chownr": "^2.0.0", + "fs-minipass": "^2.1.0", + "glob": "^8.0.1", + "infer-owner": "^1.0.4", + "lru-cache": "^7.7.1", + "minipass": "^3.1.6", + "minipass-collect": "^1.0.2", + "minipass-flush": "^1.0.5", + "minipass-pipeline": "^1.2.4", + "mkdirp": "^1.0.4", + "p-map": "^4.0.0", + "promise-inflight": "^1.0.1", + "rimraf": "^3.0.2", + "ssri": "^9.0.0", + "tar": "^6.1.11", + "unique-filename": "^2.0.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/make-fetch-happen/node_modules/fs-minipass": { + "version": "2.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/fs-minipass/-/fs-minipass-2.1.0.tgz", + "integrity": "sha512-V/JgOLFCS+R6Vcq0slCuaeWEdNC3ouDlJMNIsacH2VtALiu9mV4LPrHc5cDl8k5aw6J8jwgWWpiTo5RYhmIzvg==", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^3.0.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/yeoman-generator/node_modules/make-fetch-happen/node_modules/glob": { + "version": "8.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/glob/-/glob-8.1.0.tgz", + "integrity": "sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^5.0.1", + "once": "^1.3.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/yeoman-generator/node_modules/make-fetch-happen/node_modules/minimatch": { + "version": "5.1.6", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minimatch/-/minimatch-5.1.6.tgz", + "integrity": "sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/yeoman-generator/node_modules/make-fetch-happen/node_modules/minipass": { + "version": "3.3.6", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass/-/minipass-3.3.6.tgz", + "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/yeoman-generator/node_modules/make-fetch-happen/node_modules/ssri": { + "version": "9.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/ssri/-/ssri-9.0.1.tgz", + "integrity": "sha512-o57Wcn66jMQvfHG1FlYbWeZWW/dHZhJXjpIcTfXldXEk5nz5lStPo3mK0OJQfGR3RbZUlbISexbljkJzuEj/8Q==", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^3.1.1" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/make-fetch-happen/node_modules/unique-filename": { + "version": "2.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/unique-filename/-/unique-filename-2.0.1.tgz", + "integrity": "sha512-ODWHtkkdx3IAR+veKxFV+VBkUMcN+FaqzUUd7IZzt+0zhDZFPFxhlqwPF3YQvMHx1TD0tdgYl+kuPnJ8E6ql7A==", + "dev": true, + "license": "ISC", + "dependencies": { + "unique-slug": "^3.0.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/make-fetch-happen/node_modules/unique-slug": { + "version": "3.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/unique-slug/-/unique-slug-3.0.0.tgz", + "integrity": "sha512-8EyMynh679x/0gqE9fT9oilG+qEt+ibFyqjuVTsZn1+CMxH+XLlpvr2UZx4nVcCwTpx81nICr2JQFkM+HPLq4w==", + "dev": true, + "license": "ISC", + "dependencies": { + "imurmurhash": "^0.1.4" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/minimatch": { + "version": "9.0.5", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minimatch/-/minimatch-9.0.5.tgz", + "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/yeoman-generator/node_modules/minipass": { + "version": "5.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass/-/minipass-5.0.0.tgz", + "integrity": "sha512-3FnjYuehv9k6ovOEbyOswadCDPX1piCfhV8ncmYtHOjuPwylVWsghTLo7rabjC3Rx5xD4HDx8Wm1xnMF7S5qFQ==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=8" + } + }, + "node_modules/yeoman-generator/node_modules/minipass-fetch": { + "version": "2.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass-fetch/-/minipass-fetch-2.1.2.tgz", + "integrity": "sha512-LT49Zi2/WMROHYoqGgdlQIZh8mLPZmOrN2NdJjMXxYe4nkN6FUyuPuOAOedNJDrx0IRGg9+4guZewtp8hE6TxA==", + "dev": true, + "license": "MIT", + "dependencies": { + "minipass": "^3.1.6", + "minipass-sized": "^1.0.3", + "minizlib": "^2.1.2" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + }, + "optionalDependencies": { + "encoding": "^0.1.13" + } + }, + "node_modules/yeoman-generator/node_modules/minipass-fetch/node_modules/minipass": { + "version": "3.3.6", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass/-/minipass-3.3.6.tgz", + "integrity": "sha512-DxiNidxSEK+tHG6zOIklvNOwm3hvCrbUrdtzY74U6HKTJxvIDfOUL5W5P2Ghd3DTkhhKPYGqeNUIh5qcM4YBfw==", + "dev": true, + "license": "ISC", + "dependencies": { + "yallist": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/yeoman-generator/node_modules/node-gyp": { + "version": "9.4.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/node-gyp/-/node-gyp-9.4.1.tgz", + "integrity": "sha512-OQkWKbjQKbGkMf/xqI1jjy3oCTgMKJac58G2+bjZb3fza6gW2YrCSdMQYaoTb70crvE//Gngr4f0AgVHmqHvBQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "env-paths": "^2.2.0", + "exponential-backoff": "^3.1.1", + "glob": "^7.1.4", + "graceful-fs": "^4.2.6", + "make-fetch-happen": "^10.0.3", + "nopt": "^6.0.0", + "npmlog": "^6.0.0", + "rimraf": "^3.0.2", + "semver": "^7.3.5", + "tar": "^6.1.2", + "which": "^2.0.2" + }, + "bin": { + "node-gyp": "bin/node-gyp.js" + }, + "engines": { + "node": "^12.13 || ^14.13 || >=16" + } + }, + "node_modules/yeoman-generator/node_modules/node-gyp/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/yeoman-generator/node_modules/node-gyp/node_modules/glob": { + "version": "7.2.3", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/yeoman-generator/node_modules/node-gyp/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/yeoman-generator/node_modules/node-gyp/node_modules/which": { + "version": "2.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/yeoman-generator/node_modules/nopt": { + "version": "6.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/nopt/-/nopt-6.0.0.tgz", + "integrity": "sha512-ZwLpbTgdhuZUnZzjd7nb1ZV+4DoiC6/sfiVKok72ym/4Tlf+DFdlHYmT2JPmcNNWV6Pi3SDf1kT+A4r9RTuT9g==", + "dev": true, + "license": "ISC", + "dependencies": { + "abbrev": "^1.0.0" + }, + "bin": { + "nopt": "bin/nopt.js" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/npm-bundled": { + "version": "3.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/npm-bundled/-/npm-bundled-3.0.1.tgz", + "integrity": "sha512-+AvaheE/ww1JEwRHOrn4WHNzOxGtVp+adrg2AeZS/7KuxGUYFuBta98wYpfHBbJp6Tg6j1NKSEVHNcfZzJHQwQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "npm-normalize-package-bin": "^3.0.0" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/npm-install-checks": { + "version": "6.3.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/npm-install-checks/-/npm-install-checks-6.3.0.tgz", + "integrity": "sha512-W29RiK/xtpCGqn6f3ixfRYGk+zRyr+Ew9F2E20BfXxT5/euLdA/Nm7fO7OeTGuAmTs30cpgInyJ0cYe708YTZw==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "semver": "^7.1.1" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/npm-normalize-package-bin": { + "version": "3.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/npm-normalize-package-bin/-/npm-normalize-package-bin-3.0.1.tgz", + "integrity": "sha512-dMxCf+zZ+3zeQZXKxmyuCKlIDPGuv8EF940xbkC4kQVDTtqoh6rJFO+JTKSA6/Rwi0getWmtuy4Itup0AMcaDQ==", + "dev": true, + "license": "ISC", + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/npm-package-arg": { + "version": "10.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/npm-package-arg/-/npm-package-arg-10.1.0.tgz", + "integrity": "sha512-uFyyCEmgBfZTtrKk/5xDfHp6+MdrqGotX/VoOyEEl3mBwiEE5FlBaePanazJSVMPT7vKepcjYBY2ztg9A3yPIA==", + "dev": true, + "license": "ISC", + "dependencies": { + "hosted-git-info": "^6.0.0", + "proc-log": "^3.0.0", + "semver": "^7.3.5", + "validate-npm-package-name": "^5.0.0" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/npm-packlist": { + "version": "7.0.4", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/npm-packlist/-/npm-packlist-7.0.4.tgz", + "integrity": "sha512-d6RGEuRrNS5/N84iglPivjaJPxhDbZmlbTwTDX2IbcRHG5bZCdtysYMhwiPvcF4GisXHGn7xsxv+GQ7T/02M5Q==", + "dev": true, + "license": "ISC", + "dependencies": { + "ignore-walk": "^6.0.0" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/npm-pick-manifest": { + "version": "8.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/npm-pick-manifest/-/npm-pick-manifest-8.0.2.tgz", + "integrity": "sha512-1dKY+86/AIiq1tkKVD3l0WI+Gd3vkknVGAggsFeBkTvbhMQ1OND/LKkYv4JtXPKUJ8bOTCyLiqEg2P6QNdK+Gg==", + "dev": true, + "license": "ISC", + "dependencies": { + "npm-install-checks": "^6.0.0", + "npm-normalize-package-bin": "^3.0.0", + "npm-package-arg": "^10.0.0", + "semver": "^7.3.5" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/npm-registry-fetch": { + "version": "14.0.5", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/npm-registry-fetch/-/npm-registry-fetch-14.0.5.tgz", + "integrity": "sha512-kIDMIo4aBm6xg7jOttupWZamsZRkAqMqwqqbVXnUqstY5+tapvv6bkH/qMR76jdgV+YljEUCyWx3hRYMrJiAgA==", + "dev": true, + "license": "ISC", + "dependencies": { + "make-fetch-happen": "^11.0.0", + "minipass": "^5.0.0", + "minipass-fetch": "^3.0.0", + "minipass-json-stream": "^1.0.1", + "minizlib": "^2.1.2", + "npm-package-arg": "^10.0.0", + "proc-log": "^3.0.0" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/npm-registry-fetch/node_modules/make-fetch-happen": { + "version": "11.1.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/make-fetch-happen/-/make-fetch-happen-11.1.1.tgz", + "integrity": "sha512-rLWS7GCSTcEujjVBs2YqG7Y4643u8ucvCJeSRqiLYhesrDuzeuFIk37xREzAsfQaqzl8b9rNCE4m6J8tvX4Q8w==", + "dev": true, + "license": "ISC", + "dependencies": { + "agentkeepalive": "^4.2.1", + "cacache": "^17.0.0", + "http-cache-semantics": "^4.1.1", + "http-proxy-agent": "^5.0.0", + "https-proxy-agent": "^5.0.0", + "is-lambda": "^1.0.1", + "lru-cache": "^7.7.1", + "minipass": "^5.0.0", + "minipass-fetch": "^3.0.0", + "minipass-flush": "^1.0.5", + "minipass-pipeline": "^1.2.4", + "negotiator": "^0.6.3", + "promise-retry": "^2.0.1", + "socks-proxy-agent": "^7.0.0", + "ssri": "^10.0.0" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/npm-registry-fetch/node_modules/minipass-fetch": { + "version": "3.0.5", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass-fetch/-/minipass-fetch-3.0.5.tgz", + "integrity": "sha512-2N8elDQAtSnFV0Dk7gt15KHsS0Fyz6CbYZ360h0WTYV1Ty46li3rAXVOQj1THMNLdmrD9Vt5pBPtWtVkpwGBqg==", + "dev": true, + "license": "MIT", + "dependencies": { + "minipass": "^7.0.3", + "minipass-sized": "^1.0.3", + "minizlib": "^2.1.2" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + }, + "optionalDependencies": { + "encoding": "^0.1.13" + } + }, + "node_modules/yeoman-generator/node_modules/npm-registry-fetch/node_modules/minipass-fetch/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/yeoman-generator/node_modules/npmlog": { + "version": "6.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/npmlog/-/npmlog-6.0.2.tgz", + "integrity": "sha512-/vBvz5Jfr9dT/aFWd0FIRf+T/Q2WBsLENygUaFUqstqsycmZAP/t5BvFJTK0viFmSUxiUKTUplWy5vt+rvKIxg==", + "deprecated": "This package is no longer supported.", + "dev": true, + "license": "ISC", + "dependencies": { + "are-we-there-yet": "^3.0.0", + "console-control-strings": "^1.1.0", + "gauge": "^4.0.3", + "set-blocking": "^2.0.0" + }, + "engines": { + "node": "^12.13.0 || ^14.15.0 || >=16.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/pacote": { + "version": "15.2.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/pacote/-/pacote-15.2.0.tgz", + "integrity": "sha512-rJVZeIwHTUta23sIZgEIM62WYwbmGbThdbnkt81ravBplQv+HjyroqnLRNH2+sLJHcGZmLRmhPwACqhfTcOmnA==", + "dev": true, + "license": "ISC", + "dependencies": { + "@npmcli/git": "^4.0.0", + "@npmcli/installed-package-contents": "^2.0.1", + "@npmcli/promise-spawn": "^6.0.1", + "@npmcli/run-script": "^6.0.0", + "cacache": "^17.0.0", + "fs-minipass": "^3.0.0", + "minipass": "^5.0.0", + "npm-package-arg": "^10.0.0", + "npm-packlist": "^7.0.0", + "npm-pick-manifest": "^8.0.0", + "npm-registry-fetch": "^14.0.0", + "proc-log": "^3.0.0", + "promise-retry": "^2.0.1", + "read-package-json": "^6.0.0", + "read-package-json-fast": "^3.0.0", + "sigstore": "^1.3.0", + "ssri": "^10.0.0", + "tar": "^6.1.11" + }, + "bin": { + "pacote": "lib/bin.js" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/proc-log": { + "version": "3.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/proc-log/-/proc-log-3.0.0.tgz", + "integrity": "sha512-++Vn7NS4Xf9NacaU9Xq3URUuqZETPsf8L4j5/ckhaRYsfPeRyzGw+iDjFhV/Jr3uNmTvvddEJFWh5R1gRgUH8A==", + "dev": true, + "license": "ISC", + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/read-package-json-fast": { + "version": "3.0.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/read-package-json-fast/-/read-package-json-fast-3.0.2.tgz", + "integrity": "sha512-0J+Msgym3vrLOUB3hzQCuZHII0xkNGCtz/HJH9xZshwv9DbDwkw1KaE3gx/e2J5rpEY5rtOy6cyhKOPrkP7FZw==", + "dev": true, + "license": "ISC", + "dependencies": { + "json-parse-even-better-errors": "^3.0.0", + "npm-normalize-package-bin": "^3.0.0" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "dev": true, + "license": "MIT", + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/yeoman-generator/node_modules/socks-proxy-agent": { + "version": "7.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/socks-proxy-agent/-/socks-proxy-agent-7.0.0.tgz", + "integrity": "sha512-Fgl0YPZ902wEsAyiQ+idGd1A7rSFx/ayC1CQVMw5P+EQx2V0SgpGtf6OKFhVjPflPUl9YMmEOnmfjCdMUsygww==", + "dev": true, + "license": "MIT", + "dependencies": { + "agent-base": "^6.0.2", + "debug": "^4.3.3", + "socks": "^2.6.2" + }, + "engines": { + "node": ">= 10" + } + }, + "node_modules/yeoman-generator/node_modules/ssri": { + "version": "10.0.6", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/ssri/-/ssri-10.0.6.tgz", + "integrity": "sha512-MGrFH9Z4NP9Iyhqn16sDtBpRRNJ0Y2hNa6D65h736fVSaPCHr4DM4sWUNvVaSuC+0OBGhwsrydQwmgfg5LncqQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "minipass": "^7.0.3" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/ssri/node_modules/minipass": { + "version": "7.1.2", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/minipass/-/minipass-7.1.2.tgz", + "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "dev": true, + "license": "ISC", + "engines": { + "node": ">=16 || 14 >=14.17" + } + }, + "node_modules/yeoman-generator/node_modules/unique-filename": { + "version": "3.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/unique-filename/-/unique-filename-3.0.0.tgz", + "integrity": "sha512-afXhuC55wkAmZ0P18QsVE6kp8JaxrEokN2HGIoIVv2ijHQd419H0+6EigAFcIzXeMIkcIkNBpB3L/DXB3cTS/g==", + "dev": true, + "license": "ISC", + "dependencies": { + "unique-slug": "^4.0.0" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/unique-slug": { + "version": "4.0.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/unique-slug/-/unique-slug-4.0.0.tgz", + "integrity": "sha512-WrcA6AyEfqDX5bWige/4NQfPZMtASNVxdmWR76WESYQVAACSgWcR6e9i0mofqqBxYFtL4oAxPIptY73/0YE1DQ==", + "dev": true, + "license": "ISC", + "dependencies": { + "imurmurhash": "^0.1.4" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/validate-npm-package-name": { + "version": "5.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/validate-npm-package-name/-/validate-npm-package-name-5.0.1.tgz", + "integrity": "sha512-OljLrQ9SQdOUqTaQxqL5dEfZWrXExyyWsozYlAWFawPVNuD83igl7uJD2RTkNMbniIYgt8l81eCJGIdQF7avLQ==", + "dev": true, + "license": "ISC", + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/yeoman-generator/node_modules/which": { + "version": "3.0.1", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/which/-/which-3.0.1.tgz", + "integrity": "sha512-XA1b62dzQzLfaEOSQFTCOd5KFf/1VSzZo7/7TUjnya6u0vGGKzU96UQBZTAThCb2j4/xjBAyii1OhRLJEivHvg==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/which.js" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://artifacthub-iad.oci.oraclecorp.com/api/npm/npmjs-registry/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + } + } +} diff --git a/graalwasm/graalwasm-tensorflow/src/main/js/package.json b/graalwasm/graalwasm-tensorflow/src/main/js/package.json new file mode 100644 index 00000000..61b48107 --- /dev/null +++ b/graalwasm/graalwasm-tensorflow/src/main/js/package.json @@ -0,0 +1,32 @@ +{ + "name": "js", + "version": "1.0.0", + "main": "index.js", + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1", + "build": "webpack --mode=production --node-env=production", + "build:dev": "webpack --mode=development", + "build:prod": "webpack --mode=production --node-env=production", + "watch": "webpack --watch" + }, + "author": "", + "license": "ISC", + "description": "", + "devDependencies": { + "@babel/core": "^7.26.10", + "@babel/preset-env": "^7.26.9", + "@webpack-cli/generators": "^3.0.7", + "babel-loader": "^10.0.0", + "ts-loader": "^9.5.2", + "typescript": "^5.8.3" + }, + "dependencies": { + "@tensorflow/tfjs": "^4.22.0", + "@tensorflow/tfjs-backend-wasm": "^4.22.0", + "assert": "^2.1.0", + "browserify-zlib": "^0.2.0", + "fast-text-encoding": "^1.0.6", + "stream-browserify": "^3.0.0", + "util": "^0.12.5" + } +} diff --git a/graalwasm/graalwasm-tensorflow/src/main/js/tfjs-backend-wasm-simd.wasm b/graalwasm/graalwasm-tensorflow/src/main/js/tfjs-backend-wasm-simd.wasm new file mode 100755 index 00000000..b515dee9 Binary files /dev/null and b/graalwasm/graalwasm-tensorflow/src/main/js/tfjs-backend-wasm-simd.wasm differ diff --git a/graalwasm/graalwasm-tensorflow/src/main/js/webpack.config.js b/graalwasm/graalwasm-tensorflow/src/main/js/webpack.config.js new file mode 100644 index 00000000..b46b932f --- /dev/null +++ b/graalwasm/graalwasm-tensorflow/src/main/js/webpack.config.js @@ -0,0 +1,66 @@ +const path = require('path'); +const { EnvironmentPlugin } = require('webpack'); + +const isProduction = process.env.NODE_ENV == 'production'; + +const config = { + entry: './main.mjs', + output: { + path: process.env.BUILD_DIR + ? path.resolve(process.env.BUILD_DIR) + : path.resolve(__dirname, '../../../target/classes/bundle'), + filename: 'bundle.mjs', + module: true, + library: { + type: 'module', + }, + globalObject: 'globalThis' + }, + experiments: { + outputModule: true // Generate ES module sources + }, + optimization: { + usedExports: true, // Include only used exports in the bundle + minimize: false, // Disable minification + }, + resolve: { + aliasFields: [], // Disable browser alias to use the server version of the qrcode package + fallback: { // Redirect Node.js core modules to polyfills + "stream": require.resolve("stream-browserify"), + "zlib": require.resolve("browserify-zlib"), + "fs": false , + "path": false, + "crypto": false, + "os": false, + "perf_hooks": false, + "worker_threads": false + }, + }, + + plugins: [ + // Add your plugins here + // Learn more about plugins from https://webpack.js.org/configuration/plugins/ + // @ts-ignore + new EnvironmentPlugin({ + NODE_DEBUG: false, // Set process.env.NODE_DEBUG to false + }), + ], + module: { + rules: [ + { + test: /\.(js|jsx)$/i, + loader: 'babel-loader', + }, + { + test: /\.(eot|svg|ttf|woff|woff2|png|jpg|gif)$/i, + type: 'asset', + }, + + // Add your rules for custom modules here + // Learn more about loaders from https://webpack.js.org/loaders/ + ], + }, + mode: isProduction ? 'production' : 'development', +}; + +module.exports = () => config; \ No newline at end of file diff --git a/graalwasm/graalwasm-tensorflow/src/main/resources/application.properties b/graalwasm/graalwasm-tensorflow/src/main/resources/application.properties new file mode 100644 index 00000000..bf0367b1 --- /dev/null +++ b/graalwasm/graalwasm-tensorflow/src/main/resources/application.properties @@ -0,0 +1 @@ +spring.application.name=Tensorflow diff --git a/graalwasm/graalwasm-tensorflow/src/main/resources/data.xlsx b/graalwasm/graalwasm-tensorflow/src/main/resources/data.xlsx new file mode 100644 index 00000000..f99302f2 Binary files /dev/null and b/graalwasm/graalwasm-tensorflow/src/main/resources/data.xlsx differ diff --git a/graalwasm/graalwasm-tensorflow/src/main/resources/excelize.js b/graalwasm/graalwasm-tensorflow/src/main/resources/excelize.js new file mode 100644 index 00000000..35ecc2f4 --- /dev/null +++ b/graalwasm/graalwasm-tensorflow/src/main/resources/excelize.js @@ -0,0 +1,48 @@ +function readExcel(excelFileBytes) { + let result ; + return new Promise((resolve, reject) => { + var start = Date.now(); + const go = new Go(); + global.excelize = {}; + + WebAssembly.instantiate(new Uint8Array(wasmBytes), go.importObject) + .then((result) => { + var endInit = Date.now(); + go.run(result.instance); + + const f = excelize.OpenReader(new Uint8Array(excelFileBytes)); // No fs.readFileSync + + + const ret1 = f.GetCellValue('data', 'B2'); + if (ret1.error) { + console.error(ret1.error); + reject(ret1.error); // Reject promise in case of error + } + + // Get all rows from Sheet1 + const ret2 = f.GetRows('data'); + if (ret2.error) { + console.error(ret2.error); + reject(ret2.error); // Reject promise in case of error + } else { + // Format the rows into a simple array format for return + const resultArray = ret2.result.map(row => row.map(colCell => colCell)); + console.log("Extracted data:", resultArray); + + + // Resolve the promise with the result array + + Polyglot.export("resultArray", resultArray); + resolve(resultArray); + } + + console.log("Excel read successfully."); + }) + .catch(err => { + console.error("Error reading Excel:", err); + reject(err); // Reject promise on error + }); + }); +} + + diff --git a/graalwasm/graalwasm-tensorflow/src/main/resources/excelize.wasm b/graalwasm/graalwasm-tensorflow/src/main/resources/excelize.wasm new file mode 100755 index 00000000..2837a0fa Binary files /dev/null and b/graalwasm/graalwasm-tensorflow/src/main/resources/excelize.wasm differ diff --git a/graalwasm/graalwasm-tensorflow/src/main/resources/excelize_m.js b/graalwasm/graalwasm-tensorflow/src/main/resources/excelize_m.js new file mode 100644 index 00000000..78ad346e --- /dev/null +++ b/graalwasm/graalwasm-tensorflow/src/main/resources/excelize_m.js @@ -0,0 +1,563 @@ +'use strict'; + +if (typeof window === 'undefined') { + global.crypto = { + getRandomValues(b) { + return nodeCrypto.randomFillSync(b); + } + }; + global.performance = { + now() { + const [sec, nsec] = process.hrtime(); + return sec * 1000 + nsec / 1000000; + } + }; +} +(() => { + const enosys = () => { + const err = new Error("not implemented"); + err.code = "ENOSYS"; + return err; + }; + + if (!globalThis.fs) { + let outputBuf = ""; + globalThis.fs = { + constants: { O_WRONLY: -1, O_RDWR: -1, O_CREAT: -1, O_TRUNC: -1, O_APPEND: -1, O_EXCL: -1 }, // unused + writeSync(fd, buf) { + outputBuf += decoder.decode(buf); + const nl = outputBuf.lastIndexOf("\n"); + if (nl != -1) { + console.log(outputBuf.substr(0, nl)); + outputBuf = outputBuf.substr(nl + 1); + } + return buf.length; + }, + write(fd, buf, offset, length, position, callback) { + if (offset !== 0 || length !== buf.length || position !== null) { + callback(enosys()); + return; + } + const n = this.writeSync(fd, buf); + callback(null, n); + }, + chmod(path, mode, callback) { callback(enosys()); }, + chown(path, uid, gid, callback) { callback(enosys()); }, + close(fd, callback) { callback(enosys()); }, + fchmod(fd, mode, callback) { callback(enosys()); }, + fchown(fd, uid, gid, callback) { callback(enosys()); }, + fstat(fd, callback) { callback(enosys()); }, + fsync(fd, callback) { callback(null); }, + ftruncate(fd, length, callback) { callback(enosys()); }, + lchown(path, uid, gid, callback) { callback(enosys()); }, + link(path, link, callback) { callback(enosys()); }, + lstat(path, callback) { callback(enosys()); }, + mkdir(path, perm, callback) { callback(enosys()); }, + open(path, flags, mode, callback) { callback(enosys()); }, + read(fd, buffer, offset, length, position, callback) { callback(enosys()); }, + readdir(path, callback) { callback(enosys()); }, + readlink(path, callback) { callback(enosys()); }, + rename(from, to, callback) { callback(enosys()); }, + rmdir(path, callback) { callback(enosys()); }, + stat(path, callback) { callback(enosys()); }, + symlink(path, link, callback) { callback(enosys()); }, + truncate(path, length, callback) { callback(enosys()); }, + unlink(path, callback) { callback(enosys()); }, + utimes(path, atime, mtime, callback) { callback(enosys()); }, + }; + } + + if (!globalThis.process) { + globalThis.process = { + getuid() { return -1; }, + getgid() { return -1; }, + geteuid() { return -1; }, + getegid() { return -1; }, + getgroups() { throw enosys(); }, + pid: -1, + ppid: -1, + umask() { throw enosys(); }, + cwd() { throw enosys(); }, + chdir() { throw enosys(); }, + } + } + + if (!globalThis.crypto) { + throw new Error("globalThis.crypto is not available, polyfill required (crypto.getRandomValues only)"); + } + + if (!globalThis.performance) { + throw new Error("globalThis.performance is not available, polyfill required (performance.now only)"); + } + + if (!globalThis.TextEncoder) { + throw new Error("globalThis.TextEncoder is not available, polyfill required"); + } + + if (!globalThis.TextDecoder) { + throw new Error("globalThis.TextDecoder is not available, polyfill required"); + } + + const encoder = new TextEncoder("utf-8"); + const decoder = new TextDecoder("utf-8"); + + globalThis.Go = class { + constructor() { + this.argv = ["js"]; + this.env = {}; + this.exit = (code) => { + if (code !== 0) { + console.warn("exit code:", code); + } + }; + this._exitPromise = new Promise((resolve) => { + this._resolveExitPromise = resolve; + }); + this._pendingEvent = null; + this._scheduledTimeouts = new Map(); + this._nextCallbackTimeoutID = 1; + + const setInt64 = (addr, v) => { + this.mem.setUint32(addr + 0, v, true); + this.mem.setUint32(addr + 4, Math.floor(v / 4294967296), true); + } + + const getInt64 = (addr) => { + const low = this.mem.getUint32(addr + 0, true); + const high = this.mem.getInt32(addr + 4, true); + return low + high * 4294967296; + } + + const loadValue = (addr) => { + const f = this.mem.getFloat64(addr, true); + if (f === 0) { + return undefined; + } + if (!isNaN(f)) { + return f; + } + + const id = this.mem.getUint32(addr, true); + return this._values[id]; + } + + const storeValue = (addr, v) => { + const nanHead = 0x7FF80000; + + if (typeof v === "number" && v !== 0) { + if (isNaN(v)) { + this.mem.setUint32(addr + 4, nanHead, true); + this.mem.setUint32(addr, 0, true); + return; + } + this.mem.setFloat64(addr, v, true); + return; + } + + if (v === undefined) { + this.mem.setFloat64(addr, 0, true); + return; + } + + let id = this._ids.get(v); + if (id === undefined) { + id = this._idPool.pop(); + if (id === undefined) { + id = this._values.length; + } + this._values[id] = v; + this._goRefCounts[id] = 0; + this._ids.set(v, id); + } + this._goRefCounts[id]++; + let typeFlag = 0; + switch (typeof v) { + case "object": + if (v !== null) { + typeFlag = 1; + } + break; + case "string": + typeFlag = 2; + break; + case "symbol": + typeFlag = 3; + break; + case "function": + typeFlag = 4; + break; + } + this.mem.setUint32(addr + 4, nanHead | typeFlag, true); + this.mem.setUint32(addr, id, true); + } + + const loadSlice = (addr) => { + const array = getInt64(addr + 0); + const len = getInt64(addr + 8); + return new Uint8Array(this._inst.exports.mem.buffer, array, len); + } + + const loadSliceOfValues = (addr) => { + const array = getInt64(addr + 0); + const len = getInt64(addr + 8); + const a = new Array(len); + for (let i = 0; i < len; i++) { + a[i] = loadValue(array + i * 8); + } + return a; + } + + const loadString = (addr) => { + const saddr = getInt64(addr + 0); + const len = getInt64(addr + 8); + return decoder.decode(new DataView(this._inst.exports.mem.buffer, saddr, len)); + } + + const timeOrigin = Date.now() - performance.now(); + this.importObject = { + gojs: { + // Go's SP does not change as long as no Go code is running. Some operations (e.g. calls, getters and setters) + // may synchronously trigger a Go event handler. This makes Go code get executed in the middle of the imported + // function. A goroutine can switch to a new stack if the current stack is too small (see morestack function). + // This changes the SP, thus we have to update the SP used by the imported function. + + // func wasmExit(code int32) + "runtime.wasmExit": (sp) => { + sp >>>= 0; + const code = this.mem.getInt32(sp + 8, true); + this.exited = true; + delete this._inst; + delete this._values; + delete this._goRefCounts; + delete this._ids; + delete this._idPool; + this.exit(code); + }, + + // func wasmWrite(fd uintptr, p unsafe.Pointer, n int32) + "runtime.wasmWrite": (sp) => { + sp >>>= 0; + const fd = getInt64(sp + 8); + const p = getInt64(sp + 16); + const n = this.mem.getInt32(sp + 24, true); + fs.writeSync(fd, new Uint8Array(this._inst.exports.mem.buffer, p, n)); + }, + + // func resetMemoryDataView() + "runtime.resetMemoryDataView": (sp) => { + sp >>>= 0; + this.mem = new DataView(this._inst.exports.mem.buffer); + }, + + // func nanotime1() int64 + "runtime.nanotime1": (sp) => { + sp >>>= 0; + setInt64(sp + 8, (timeOrigin + performance.now()) * 1000000); + }, + + // func walltime() (sec int64, nsec int32) + "runtime.walltime": (sp) => { + sp >>>= 0; + const msec = (new Date).getTime(); + setInt64(sp + 8, msec / 1000); + this.mem.setInt32(sp + 16, (msec % 1000) * 1000000, true); + }, + + // func scheduleTimeoutEvent(delay int64) int32 + "runtime.scheduleTimeoutEvent": (sp) => { + sp >>>= 0; + const id = this._nextCallbackTimeoutID; + this._nextCallbackTimeoutID++; + this._scheduledTimeouts.set(id, setTimeout( + () => { + this._resume(); + while (this._scheduledTimeouts.has(id)) { + // for some reason Go failed to register the timeout event, log and try again + // (temporary workaround for https://github.com/golang/go/issues/28975) + console.warn("scheduleTimeoutEvent: missed timeout event"); + this._resume(); + } + }, + getInt64(sp + 8) + 1, // setTimeout has been seen to fire up to 1 millisecond early + )); + this.mem.setInt32(sp + 16, id, true); + }, + + // func clearTimeoutEvent(id int32) + "runtime.clearTimeoutEvent": (sp) => { + sp >>>= 0; + const id = this.mem.getInt32(sp + 8, true); + clearTimeout(this._scheduledTimeouts.get(id)); + this._scheduledTimeouts.delete(id); + }, + + // func getRandomData(r []byte) + "runtime.getRandomData": (sp) => { + sp >>>= 0; + crypto.getRandomValues(loadSlice(sp + 8)); + }, + + // func finalizeRef(v ref) + "syscall/js.finalizeRef": (sp) => { + sp >>>= 0; + const id = this.mem.getUint32(sp + 8, true); + this._goRefCounts[id]--; + if (this._goRefCounts[id] === 0) { + const v = this._values[id]; + this._values[id] = null; + this._ids.delete(v); + this._idPool.push(id); + } + }, + + // func stringVal(value string) ref + "syscall/js.stringVal": (sp) => { + sp >>>= 0; + storeValue(sp + 24, loadString(sp + 8)); + }, + + // func valueGet(v ref, p string) ref + "syscall/js.valueGet": (sp) => { + sp >>>= 0; + const result = Reflect.get(loadValue(sp + 8), loadString(sp + 16)); + sp = this._inst.exports.getsp() >>> 0; // see comment above + storeValue(sp + 32, result); + }, + + // func valueSet(v ref, p string, x ref) + "syscall/js.valueSet": (sp) => { + sp >>>= 0; + Reflect.set(loadValue(sp + 8), loadString(sp + 16), loadValue(sp + 32)); + }, + + // func valueDelete(v ref, p string) + "syscall/js.valueDelete": (sp) => { + sp >>>= 0; + Reflect.deleteProperty(loadValue(sp + 8), loadString(sp + 16)); + }, + + // func valueIndex(v ref, i int) ref + "syscall/js.valueIndex": (sp) => { + sp >>>= 0; + storeValue(sp + 24, Reflect.get(loadValue(sp + 8), getInt64(sp + 16))); + }, + + // valueSetIndex(v ref, i int, x ref) + "syscall/js.valueSetIndex": (sp) => { + sp >>>= 0; + Reflect.set(loadValue(sp + 8), getInt64(sp + 16), loadValue(sp + 24)); + }, + + // func valueCall(v ref, m string, args []ref) (ref, bool) + "syscall/js.valueCall": (sp) => { + sp >>>= 0; + try { + const v = loadValue(sp + 8); + const m = Reflect.get(v, loadString(sp + 16)); + const args = loadSliceOfValues(sp + 32); + const result = Reflect.apply(m, v, args); + sp = this._inst.exports.getsp() >>> 0; // see comment above + storeValue(sp + 56, result); + this.mem.setUint8(sp + 64, 1); + } catch (err) { + sp = this._inst.exports.getsp() >>> 0; // see comment above + storeValue(sp + 56, err); + this.mem.setUint8(sp + 64, 0); + } + }, + + // func valueInvoke(v ref, args []ref) (ref, bool) + "syscall/js.valueInvoke": (sp) => { + sp >>>= 0; + try { + const v = loadValue(sp + 8); + const args = loadSliceOfValues(sp + 16); + const result = Reflect.apply(v, undefined, args); + sp = this._inst.exports.getsp() >>> 0; // see comment above + storeValue(sp + 40, result); + this.mem.setUint8(sp + 48, 1); + } catch (err) { + sp = this._inst.exports.getsp() >>> 0; // see comment above + storeValue(sp + 40, err); + this.mem.setUint8(sp + 48, 0); + } + }, + + // func valueNew(v ref, args []ref) (ref, bool) + "syscall/js.valueNew": (sp) => { + sp >>>= 0; + try { + const v = loadValue(sp + 8); + const args = loadSliceOfValues(sp + 16); + const result = Reflect.construct(v, args); + sp = this._inst.exports.getsp() >>> 0; // see comment above + storeValue(sp + 40, result); + this.mem.setUint8(sp + 48, 1); + } catch (err) { + sp = this._inst.exports.getsp() >>> 0; // see comment above + storeValue(sp + 40, err); + this.mem.setUint8(sp + 48, 0); + } + }, + + // func valueLength(v ref) int + "syscall/js.valueLength": (sp) => { + sp >>>= 0; + setInt64(sp + 16, parseInt(loadValue(sp + 8).length)); + }, + + // valuePrepareString(v ref) (ref, int) + "syscall/js.valuePrepareString": (sp) => { + sp >>>= 0; + const str = encoder.encode(String(loadValue(sp + 8))); + storeValue(sp + 16, str); + setInt64(sp + 24, str.length); + }, + + // valueLoadString(v ref, b []byte) + "syscall/js.valueLoadString": (sp) => { + sp >>>= 0; + const str = loadValue(sp + 8); + loadSlice(sp + 16).set(str); + }, + + // func valueInstanceOf(v ref, t ref) bool + "syscall/js.valueInstanceOf": (sp) => { + sp >>>= 0; + this.mem.setUint8(sp + 24, (loadValue(sp + 8) instanceof loadValue(sp + 16)) ? 1 : 0); + }, + + // func copyBytesToGo(dst []byte, src ref) (int, bool) + "syscall/js.copyBytesToGo": (sp) => { + sp >>>= 0; + const dst = loadSlice(sp + 8); + const src = loadValue(sp + 32); + if (!(src instanceof Uint8Array || src instanceof Uint8ClampedArray)) { + this.mem.setUint8(sp + 48, 0); + return; + } + const toCopy = src.subarray(0, dst.length); + dst.set(toCopy); + setInt64(sp + 40, toCopy.length); + this.mem.setUint8(sp + 48, 1); + }, + + // func copyBytesToJS(dst ref, src []byte) (int, bool) + "syscall/js.copyBytesToJS": (sp) => { + sp >>>= 0; + const dst = loadValue(sp + 8); + const src = loadSlice(sp + 16); + if (!(dst instanceof Uint8Array || dst instanceof Uint8ClampedArray)) { + this.mem.setUint8(sp + 48, 0); + return; + } + const toCopy = src.subarray(0, dst.length); + dst.set(toCopy); + setInt64(sp + 40, toCopy.length); + this.mem.setUint8(sp + 48, 1); + }, + + "debug": (value) => { + console.log(value); + }, + } + }; + } + + async run(instance) { + if (!(instance instanceof WebAssembly.Instance)) { + throw new Error("Go.run: WebAssembly.Instance expected"); + } + this._inst = instance; + this.mem = new DataView(this._inst.exports.mem.buffer); + this._values = [ // JS values that Go currently has references to, indexed by reference id + NaN, + 0, + null, + true, + false, + globalThis, + this, + ]; + this._goRefCounts = new Array(this._values.length).fill(Infinity); // number of references that Go has to a JS value, indexed by reference id + this._ids = new Map([ // mapping from JS values to reference ids + [0, 1], + [null, 2], + [true, 3], + [false, 4], + [globalThis, 5], + [this, 6], + ]); + this._idPool = []; // unused ids that have been garbage collected + this.exited = false; // whether the Go program has exited + + // Pass command line arguments and environment variables to WebAssembly by writing them to the linear memory. + let offset = 4096; + + const strPtr = (str) => { + const ptr = offset; + const bytes = encoder.encode(str + "\0"); + new Uint8Array(this.mem.buffer, offset, bytes.length).set(bytes); + offset += bytes.length; + if (offset % 8 !== 0) { + offset += 8 - (offset % 8); + } + return ptr; + }; + + const argc = this.argv.length; + + const argvPtrs = []; + this.argv.forEach((arg) => { + argvPtrs.push(strPtr(arg)); + }); + argvPtrs.push(0); + + const keys = Object.keys(this.env).sort(); + keys.forEach((key) => { + argvPtrs.push(strPtr(`${key}=${this.env[key]}`)); + }); + argvPtrs.push(0); + + const argv = offset; + argvPtrs.forEach((ptr) => { + this.mem.setUint32(offset, ptr, true); + this.mem.setUint32(offset + 4, 0, true); + offset += 8; + }); + + // The linker guarantees global data starts from at least wasmMinDataAddr. + // Keep in sync with cmd/link/internal/ld/data.go:wasmMinDataAddr. + const wasmMinDataAddr = 4096 + 8192; + if (offset >= wasmMinDataAddr) { + throw new Error("total length of command line and environment variables exceeds limit"); + } + + this._inst.exports.run(argc, argv); + if (this.exited) { + this._resolveExitPromise(); + } + await this._exitPromise; + } + + _resume() { + if (this.exited) { + throw new Error("Go program has already exited"); + } + this._inst.exports.resume(); + if (this.exited) { + this._resolveExitPromise(); + } + } + + _makeFuncWrapper(id) { + const go = this; + return function () { + const event = { id: id, this: this, args: arguments }; + go._pendingEvent = event; + go._resume(); + return event.result; + }; + } + } +})(); \ No newline at end of file diff --git a/graalwasm/graalwasm-tensorflow/src/main/resources/excelize_prep.js b/graalwasm/graalwasm-tensorflow/src/main/resources/excelize_prep.js new file mode 100644 index 00000000..72af4369 --- /dev/null +++ b/graalwasm/graalwasm-tensorflow/src/main/resources/excelize_prep.js @@ -0,0 +1,28 @@ +(async () => { + global = globalThis; + const nowOffset = Date.now(); + const now = () => Date.now() - nowOffset; + global.process = {}; + global.nodeCrypto = {}; + global.process.hrtime = global.process.hrtime || ((previousTimestamp) => { + const baseNow = Math.floor((Date.now() - now()) * 1e-3) + const clocktime = now() * 1e-3 + let seconds = Math.floor(clocktime) + baseNow + let nanoseconds = Math.floor((clocktime % 1) * 1e9) + + if (previousTimestamp) { + seconds = seconds - previousTimestamp[0] + nanoseconds = nanoseconds - previousTimestamp[1] + if (nanoseconds < 0) { + seconds-- + nanoseconds += 1e9 + } + } + return [seconds, nanoseconds] + }); + global.nodeCrypto.randomFillSync = function(number) { + return 123; + }; +})(); + + diff --git a/graalwasm/graalwasm-tensorflow/src/main/resources/templates/index.html b/graalwasm/graalwasm-tensorflow/src/main/resources/templates/index.html new file mode 100644 index 00000000..e4c2c912 --- /dev/null +++ b/graalwasm/graalwasm-tensorflow/src/main/resources/templates/index.html @@ -0,0 +1,140 @@ + + + + House Price Prediction + + + + + +

Predict House Price

+ + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +
+

Predicted Price:

+
+ + + diff --git a/graalwasm/graalwasm-tensorflow/src/main/resources/tf.es2017.js b/graalwasm/graalwasm-tensorflow/src/main/resources/tf.es2017.js new file mode 100644 index 00000000..dd339cd8 --- /dev/null +++ b/graalwasm/graalwasm-tensorflow/src/main/resources/tf.es2017.js @@ -0,0 +1,104780 @@ +/** + * @license + * Copyright 2024 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ +(function (global, factory) { + typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports) : + typeof define === 'function' && define.amd ? define(['exports'], factory) : + (global = typeof globalThis !== 'undefined' ? globalThis : global || self, factory(global.tf = global.tf || {})); +})(this, (function (exports) { 'use strict'; + + function _mergeNamespaces(n, m) { + m.forEach(function (e) { + e && typeof e !== 'string' && !Array.isArray(e) && Object.keys(e).forEach(function (k) { + if (k !== 'default' && !(k in n)) { + var d = Object.getOwnPropertyDescriptor(e, k); + Object.defineProperty(n, k, d.get ? d : { + enumerable: true, + get: function () { return e[k]; } + }); + } + }); + }); + return Object.freeze(n); + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const EPSILON_FLOAT32$1 = 1e-7; + const EPSILON_FLOAT16$1 = 1e-4; + /** Convenient class for storing tensor-related data. */ + class DataStorage { + constructor(backend, dataMover) { + this.backend = backend; + this.dataMover = dataMover; + this.data = new WeakMap(); + this.dataIdsCount = 0; + } + get(dataId) { + if (!this.data.has(dataId)) { + this.dataMover.moveData(this.backend, dataId); + } + return this.data.get(dataId); + } + set(dataId, value) { + this.dataIdsCount++; + this.data.set(dataId, value); + } + has(dataId) { + return this.data.has(dataId); + } + delete(dataId) { + this.dataIdsCount--; + return this.data.delete(dataId); + } + numDataIds() { + return this.dataIdsCount; + } + } + /** + * The interface that defines the kernels that should be implemented when + * adding a new backend. New backends don't need to implement every one of the + * methods, this can be done gradually (throw an error for unimplemented + * methods). + */ + class KernelBackend { + refCount(dataId) { + return notYetImplemented('refCount'); + } + incRef(dataId) { + return notYetImplemented('incRef'); + } + timerAvailable() { + return true; + } + time(f) { + return notYetImplemented('time'); + } + read(dataId) { + return notYetImplemented('read'); + } + readSync(dataId) { + return notYetImplemented('readSync'); + } + readToGPU(dataId, options) { + return notYetImplemented('readToGPU'); + } + numDataIds() { + return notYetImplemented('numDataIds'); + } + disposeData(dataId, force) { + return notYetImplemented('disposeData'); + } + write(values, shape, dtype) { + return notYetImplemented('write'); + } + move(dataId, values, shape, dtype, refCount) { + return notYetImplemented('move'); + } + createTensorFromGPUData(values, shape, dtype) { + return notYetImplemented('createTensorFromGPUData'); + } + memory() { + return notYetImplemented('memory'); + } + /** Returns the highest precision for floats in bits (e.g. 16 or 32) */ + floatPrecision() { + return notYetImplemented('floatPrecision'); + } + /** Returns the smallest representable number. */ + epsilon() { + return this.floatPrecision() === 32 ? EPSILON_FLOAT32$1 : EPSILON_FLOAT16$1; + } + dispose() { + return notYetImplemented('dispose'); + } + } + function notYetImplemented(kernelName) { + throw new Error(`'${kernelName}' not yet implemented or not found in the registry. ` + + `This kernel may not be supported by the tfjs backend you have chosen`); + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Shuffles the array in-place using Fisher-Yates algorithm. + * + * ```js + * const a = [1, 2, 3, 4, 5]; + * tf.util.shuffle(a); + * console.log(a); + * ``` + * + * @param array The array to shuffle in-place. + * + * @doc {heading: 'Util', namespace: 'util'} + */ + // tslint:disable-next-line:no-any + function shuffle(array) { + let counter = array.length; + let index = 0; + // While there are elements in the array + while (counter > 0) { + // Pick a random index + index = (Math.random() * counter) | 0; + // Decrease counter by 1 + counter--; + // And swap the last element with it + swap(array, counter, index); + } + } + /** + * Shuffles two arrays in-place the same way using Fisher-Yates algorithm. + * + * ```js + * const a = [1,2,3,4,5]; + * const b = [11,22,33,44,55]; + * tf.util.shuffleCombo(a, b); + * console.log(a, b); + * ``` + * + * @param array The first array to shuffle in-place. + * @param array2 The second array to shuffle in-place with the same permutation + * as the first array. + * + * @doc {heading: 'Util', namespace: 'util'} + */ + function shuffleCombo( + // tslint:disable-next-line:no-any + array, + // tslint:disable-next-line:no-any + array2) { + if (array.length !== array2.length) { + throw new Error(`Array sizes must match to be shuffled together ` + + `First array length was ${array.length}` + + `Second array length was ${array2.length}`); + } + let counter = array.length; + let index = 0; + // While there are elements in the array + while (counter > 0) { + // Pick a random index + index = (Math.random() * counter) | 0; + // Decrease counter by 1 + counter--; + // And swap the last element of each array with it + swap(array, counter, index); + swap(array2, counter, index); + } + } + /** Clamps a value to a specified range. */ + function clamp(min, x, max) { + return Math.max(min, Math.min(x, max)); + } + function nearestLargerEven(val) { + return val % 2 === 0 ? val : val + 1; + } + function swap(object, left, right) { + const temp = object[left]; + object[left] = object[right]; + object[right] = temp; + } + function sum$4(arr) { + let sum = 0; + for (let i = 0; i < arr.length; i++) { + sum += arr[i]; + } + return sum; + } + /** + * Returns a sample from a uniform [a, b) distribution. + * + * @param a The minimum support (inclusive). + * @param b The maximum support (exclusive). + * @return A pseudorandom number on the half-open interval [a,b). + */ + function randUniform(a, b) { + const r = Math.random(); + return (b * r) + (1 - r) * a; + } + /** Returns the squared Euclidean distance between two vectors. */ + function distSquared(a, b) { + let result = 0; + for (let i = 0; i < a.length; i++) { + const diff = Number(a[i]) - Number(b[i]); + result += diff * diff; + } + return result; + } + /** + * Asserts that the expression is true. Otherwise throws an error with the + * provided message. + * + * ```js + * const x = 2; + * tf.util.assert(x === 2, 'x is not 2'); + * ``` + * + * @param expr The expression to assert (as a boolean). + * @param msg A function that returns the message to report when throwing an + * error. We use a function for performance reasons. + * + * @doc {heading: 'Util', namespace: 'util'} + */ + function assert$1(expr, msg) { + if (!expr) { + throw new Error(typeof msg === 'string' ? msg : msg()); + } + } + function assertShapesMatch(shapeA, shapeB, errorMessagePrefix = '') { + assert$1(arraysEqual(shapeA, shapeB), () => errorMessagePrefix + ` Shapes ${shapeA} and ${shapeB} must match`); + } + function assertNonNull(a) { + assert$1(a != null, () => `The input to the tensor constructor must be a non-null value.`); + } + /** + * Returns the size (number of elements) of the tensor given its shape. + * + * ```js + * const shape = [3, 4, 2]; + * const size = tf.util.sizeFromShape(shape); + * console.log(size); + * ``` + * + * @doc {heading: 'Util', namespace: 'util'} + */ + function sizeFromShape(shape) { + if (shape.length === 0) { + // Scalar. + return 1; + } + let size = shape[0]; + for (let i = 1; i < shape.length; i++) { + size *= shape[i]; + } + return size; + } + function isScalarShape(shape) { + return shape.length === 0; + } + function arraysEqualWithNull(n1, n2) { + if (n1 === n2) { + return true; + } + if (n1 == null || n2 == null) { + return false; + } + if (n1.length !== n2.length) { + return false; + } + for (let i = 0; i < n1.length; i++) { + if (n1[i] !== null && n2[i] !== null && n1[i] !== n2[i]) { + return false; + } + } + return true; + } + function arraysEqual(n1, n2) { + if (n1 === n2) { + return true; + } + if (n1 == null || n2 == null) { + return false; + } + if (n1.length !== n2.length) { + return false; + } + for (let i = 0; i < n1.length; i++) { + if (n1[i] !== n2[i]) { + return false; + } + } + return true; + } + function isInt(a) { + return a % 1 === 0; + } + function tanh$3(x) { + // tslint:disable-next-line:no-any + if (Math.tanh != null) { + // tslint:disable-next-line:no-any + return Math.tanh(x); + } + if (x === Infinity) { + return 1; + } + else if (x === -Infinity) { + return -1; + } + else { + const e2x = Math.exp(2 * x); + return (e2x - 1) / (e2x + 1); + } + } + function sizeToSquarishShape(size) { + const width = Math.ceil(Math.sqrt(size)); + return [width, Math.ceil(size / width)]; + } + /** + * Creates a new array with randomized indices to a given quantity. + * + * ```js + * const randomTen = tf.util.createShuffledIndices(10); + * console.log(randomTen); + * ``` + * + * @param number Quantity of how many shuffled indices to create. + * + * @doc {heading: 'Util', namespace: 'util'} + */ + function createShuffledIndices(n) { + const shuffledIndices = new Uint32Array(n); + for (let i = 0; i < n; ++i) { + shuffledIndices[i] = i; + } + shuffle(shuffledIndices); + return shuffledIndices; + } + function rightPad(a, size) { + if (size <= a.length) { + return a; + } + return a + ' '.repeat(size - a.length); + } + function repeatedTry(checkFn, delayFn = (counter) => 0, maxCounter, scheduleFn) { + return new Promise((resolve, reject) => { + let tryCount = 0; + const tryFn = () => { + if (checkFn()) { + resolve(); + return; + } + tryCount++; + const nextBackoff = delayFn(tryCount); + if (maxCounter != null && tryCount >= maxCounter) { + reject(); + return; + } + if (scheduleFn != null) { + scheduleFn(tryFn, nextBackoff); + } + else { + // google3 does not allow assigning another variable to setTimeout. + // Don't refactor this so scheduleFn has a default value of setTimeout. + setTimeout(tryFn, nextBackoff); + } + }; + tryFn(); + }); + } + /** + * Given the full size of the array and a shape that may contain -1 as the + * implicit dimension, returns the inferred shape where -1 is replaced. + * E.g. For shape=[2, -1, 3] and size=24, it will return [2, 4, 3]. + * + * @param shape The shape, which may contain -1 in some dimension. + * @param size The full size (number of elements) of the array. + * @return The inferred shape where -1 is replaced with the inferred size. + */ + function inferFromImplicitShape(shape, size) { + let shapeProd = 1; + let implicitIdx = -1; + for (let i = 0; i < shape.length; ++i) { + if (shape[i] >= 0) { + shapeProd *= shape[i]; + } + else if (shape[i] === -1) { + if (implicitIdx !== -1) { + throw Error(`Shapes can only have 1 implicit size. ` + + `Found -1 at dim ${implicitIdx} and dim ${i}`); + } + implicitIdx = i; + } + else if (shape[i] < 0) { + throw Error(`Shapes can not be < 0. Found ${shape[i]} at dim ${i}`); + } + } + if (implicitIdx === -1) { + if (size > 0 && size !== shapeProd) { + throw Error(`Size(${size}) must match the product of shape ${shape}`); + } + return shape; + } + if (shapeProd === 0) { + throw Error(`Cannot infer the missing size in [${shape}] when ` + + `there are 0 elements`); + } + if (size % shapeProd !== 0) { + throw Error(`The implicit shape can't be a fractional number. ` + + `Got ${size} / ${shapeProd}`); + } + const newShape = shape.slice(); + newShape[implicitIdx] = size / shapeProd; + return newShape; + } + function parseAxisParam(axis, shape) { + const rank = shape.length; + // Normalize input + axis = axis == null ? shape.map((s, i) => i) : [].concat(axis); + // Check for valid range + assert$1(axis.every(ax => ax >= -rank && ax < rank), () => `All values in axis param must be in range [-${rank}, ${rank}) but ` + + `got axis ${axis}`); + // Check for only integers + assert$1(axis.every(ax => isInt(ax)), () => `All values in axis param must be integers but ` + + `got axis ${axis}`); + // Handle negative axis. + return axis.map(a => a < 0 ? rank + a : a); + } + /** Reduces the shape by removing all dimensions of shape 1. */ + function squeezeShape(shape, axis) { + const newShape = []; + const keptDims = []; + const isEmptyArray = axis != null && Array.isArray(axis) && axis.length === 0; + const axes = (axis == null || isEmptyArray) ? + null : + parseAxisParam(axis, shape).sort(); + let j = 0; + for (let i = 0; i < shape.length; ++i) { + if (axes != null) { + if (axes[j] === i && shape[i] !== 1) { + throw new Error(`Can't squeeze axis ${i} since its dim '${shape[i]}' is not 1`); + } + if ((axes[j] == null || axes[j] > i) && shape[i] === 1) { + newShape.push(shape[i]); + keptDims.push(i); + } + if (axes[j] <= i) { + j++; + } + } + if (shape[i] !== 1) { + newShape.push(shape[i]); + keptDims.push(i); + } + } + return { newShape, keptDims }; + } + function getTypedArrayFromDType(dtype, size) { + return getArrayFromDType(dtype, size); + } + function getArrayFromDType(dtype, size) { + let values = null; + if (dtype == null || dtype === 'float32') { + values = new Float32Array(size); + } + else if (dtype === 'int32') { + values = new Int32Array(size); + } + else if (dtype === 'bool') { + values = new Uint8Array(size); + } + else if (dtype === 'string') { + values = new Array(size); + } + else { + throw new Error(`Unknown data type ${dtype}`); + } + return values; + } + function checkConversionForErrors(vals, dtype) { + for (let i = 0; i < vals.length; i++) { + const num = vals[i]; + if (isNaN(num) || !isFinite(num)) { + throw Error(`A tensor of type ${dtype} being uploaded contains ${num}.`); + } + } + } + /** Returns true if the dtype is valid. */ + function isValidDtype(dtype) { + return dtype === 'bool' || dtype === 'complex64' || dtype === 'float32' || + dtype === 'int32' || dtype === 'string'; + } + /** + * Returns true if the new type can't encode the old type without loss of + * precision. + */ + function hasEncodingLoss(oldType, newType) { + if (newType === 'complex64') { + return false; + } + if (newType === 'float32' && oldType !== 'complex64') { + return false; + } + if (newType === 'int32' && oldType !== 'float32' && oldType !== 'complex64') { + return false; + } + if (newType === 'bool' && oldType === 'bool') { + return false; + } + return true; + } + function bytesPerElement(dtype) { + if (dtype === 'float32' || dtype === 'int32') { + return 4; + } + else if (dtype === 'complex64') { + return 8; + } + else if (dtype === 'bool') { + return 1; + } + else { + throw new Error(`Unknown dtype ${dtype}`); + } + } + /** + * Returns the approximate number of bytes allocated in the string array - 2 + * bytes per character. Computing the exact bytes for a native string in JS + * is not possible since it depends on the encoding of the html page that + * serves the website. + */ + function bytesFromStringArray(arr) { + if (arr == null) { + return 0; + } + let bytes = 0; + arr.forEach(x => bytes += x.length); + return bytes; + } + /** Returns true if the value is a string. */ + function isString(value) { + return typeof value === 'string' || value instanceof String; + } + function isBoolean(value) { + return typeof value === 'boolean'; + } + function isNumber(value) { + return typeof value === 'number'; + } + function inferDtype(values) { + if (Array.isArray(values)) { + return inferDtype(values[0]); + } + if (values instanceof Float32Array) { + return 'float32'; + } + else if (values instanceof Int32Array || values instanceof Uint8Array || + values instanceof Uint8ClampedArray) { + return 'int32'; + } + else if (isNumber(values)) { + return 'float32'; + } + else if (isString(values)) { + return 'string'; + } + else if (isBoolean(values)) { + return 'bool'; + } + return 'float32'; + } + function isFunction(f) { + return !!(f && f.constructor && f.call && f.apply); + } + function nearestDivisor(size, start) { + for (let i = start; i < size; ++i) { + if (size % i === 0) { + return i; + } + } + return size; + } + function computeStrides(shape) { + const rank = shape.length; + if (rank < 2) { + return []; + } + // Last dimension has implicit stride of 1, thus having D-1 (instead of D) + // strides. + const strides = new Array(rank - 1); + strides[rank - 2] = shape[rank - 1]; + for (let i = rank - 3; i >= 0; --i) { + strides[i] = strides[i + 1] * shape[i + 1]; + } + return strides; + } + function createNestedArray(offset, shape, a, isComplex = false) { + const ret = new Array(); + if (shape.length === 1) { + const d = shape[0] * (isComplex ? 2 : 1); + for (let i = 0; i < d; i++) { + ret[i] = a[offset + i]; + } + } + else { + const d = shape[0]; + const rest = shape.slice(1); + const len = rest.reduce((acc, c) => acc * c) * (isComplex ? 2 : 1); + for (let i = 0; i < d; i++) { + ret[i] = createNestedArray(offset + i * len, rest, a, isComplex); + } + } + return ret; + } + // Provide a nested array of TypedArray in given shape. + function toNestedArray(shape, a, isComplex = false) { + if (shape.length === 0) { + // Scalar type should return a single number. + return a[0]; + } + const size = shape.reduce((acc, c) => acc * c) * (isComplex ? 2 : 1); + if (size === 0) { + // A tensor with shape zero should be turned into empty list. + return []; + } + if (size !== a.length) { + throw new Error(`[${shape}] does not match the input size ${a.length}${isComplex ? ' for a complex tensor' : ''}.`); + } + return createNestedArray(0, shape, a, isComplex); + } + function convertBackendValuesAndArrayBuffer(data, dtype) { + // If is type Uint8Array[], return it directly. + if (Array.isArray(data)) { + return data; + } + if (dtype === 'float32') { + return data instanceof Float32Array ? data : new Float32Array(data); + } + else if (dtype === 'int32') { + return data instanceof Int32Array ? data : new Int32Array(data); + } + else if (dtype === 'bool' || dtype === 'string') { + return Uint8Array.from(new Int32Array(data)); + } + else { + throw new Error(`Unknown dtype ${dtype}`); + } + } + function makeOnesTypedArray(size, dtype) { + const array = makeZerosTypedArray(size, dtype); + for (let i = 0; i < array.length; i++) { + array[i] = 1; + } + return array; + } + function makeZerosTypedArray(size, dtype) { + if (dtype == null || dtype === 'float32' || dtype === 'complex64') { + return new Float32Array(size); + } + else if (dtype === 'int32') { + return new Int32Array(size); + } + else if (dtype === 'bool') { + return new Uint8Array(size); + } + else { + throw new Error(`Unknown data type ${dtype}`); + } + } + /** + * Make nested `TypedArray` filled with zeros. + * @param shape The shape information for the nested array. + * @param dtype dtype of the array element. + */ + function makeZerosNestedTypedArray(shape, dtype) { + const size = shape.reduce((prev, curr) => prev * curr, 1); + if (dtype == null || dtype === 'float32') { + return toNestedArray(shape, new Float32Array(size)); + } + else if (dtype === 'int32') { + return toNestedArray(shape, new Int32Array(size)); + } + else if (dtype === 'bool') { + return toNestedArray(shape, new Uint8Array(size)); + } + else { + throw new Error(`Unknown data type ${dtype}`); + } + } + function assertNonNegativeIntegerDimensions(shape) { + shape.forEach(dimSize => { + assert$1(Number.isInteger(dimSize) && dimSize >= 0, () => `Tensor must have a shape comprised of positive integers but got ` + + `shape [${shape}].`); + }); + } + /** + * Computes flat index for a given location (multidimentionsal index) in a + * Tensor/multidimensional array. + * + * @param locs Location in the tensor. + * @param rank Rank of the tensor. + * @param strides Tensor strides. + */ + function locToIndex(locs, rank, strides) { + if (rank === 0) { + return 0; + } + else if (rank === 1) { + return locs[0]; + } + let index = locs[locs.length - 1]; + for (let i = 0; i < locs.length - 1; ++i) { + index += strides[i] * locs[i]; + } + return index; + } + /** + * Computes the location (multidimensional index) in a + * tensor/multidimentional array for a given flat index. + * + * @param index Index in flat array. + * @param rank Rank of tensor. + * @param strides Strides of tensor. + */ + function indexToLoc(index, rank, strides) { + if (rank === 0) { + return []; + } + else if (rank === 1) { + return [index]; + } + const locs = new Array(rank); + for (let i = 0; i < locs.length - 1; ++i) { + locs[i] = Math.floor(index / strides[i]); + index -= locs[i] * strides[i]; + } + locs[locs.length - 1] = index; + return locs; + } + /** + * This method asserts whether an object is a Promise instance. + * @param object + */ + // tslint:disable-next-line: no-any + function isPromise(object) { + // We chose to not use 'obj instanceOf Promise' for two reasons: + // 1. It only reliably works for es6 Promise, not other Promise + // implementations. + // 2. It doesn't work with framework that uses zone.js. zone.js monkey + // patch the async calls, so it is possible the obj (patched) is + // comparing to a pre-patched Promise. + return object && object.then && typeof object.then === 'function'; + } + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + // Expects flags from URL in the format ?tfjsflags=FLAG1:1,FLAG2:true. + const TENSORFLOWJS_FLAGS_PREFIX = 'tfjsflags'; + /** + * The environment contains evaluated flags as well as the registered platform. + * This is always used as a global singleton and can be retrieved with + * `tf.env()`. + * + * @doc {heading: 'Environment'} + */ + class Environment { + // tslint:disable-next-line: no-any + constructor(global) { + this.global = global; + this.flags = {}; + this.flagRegistry = {}; + this.urlFlags = {}; + // Jasmine spies on this in 'environment_test.ts' + this.getQueryParams = getQueryParams; + this.populateURLFlags(); + } + setPlatform(platformName, platform) { + if (this.platform != null) { + if (!(env().getBool('IS_TEST') || env().getBool('PROD'))) { + console.warn(`Platform ${this.platformName} has already been set. ` + + `Overwriting the platform with ${platformName}.`); + } + } + this.platformName = platformName; + this.platform = platform; + } + registerFlag(flagName, evaluationFn, setHook) { + this.flagRegistry[flagName] = { evaluationFn, setHook }; + // Override the flag value from the URL. This has to happen here because + // the environment is initialized before flags get registered. + if (this.urlFlags[flagName] != null) { + const flagValue = this.urlFlags[flagName]; + if (!(env().getBool('IS_TEST') || env().getBool('PROD'))) { + console.warn(`Setting feature override from URL ${flagName}: ${flagValue}.`); + } + this.set(flagName, flagValue); + } + } + async getAsync(flagName) { + if (flagName in this.flags) { + return this.flags[flagName]; + } + this.flags[flagName] = await this.evaluateFlag(flagName); + return this.flags[flagName]; + } + get(flagName) { + if (flagName in this.flags) { + return this.flags[flagName]; + } + const flagValue = this.evaluateFlag(flagName); + if (isPromise(flagValue)) { + throw new Error(`Flag ${flagName} cannot be synchronously evaluated. ` + + `Please use getAsync() instead.`); + } + this.flags[flagName] = flagValue; + return this.flags[flagName]; + } + getNumber(flagName) { + return this.get(flagName); + } + getBool(flagName) { + return this.get(flagName); + } + getString(flagName) { + return this.get(flagName); + } + getFlags() { + return this.flags; + } + // For backwards compatibility. + get features() { + return this.flags; + } + set(flagName, value) { + if (this.flagRegistry[flagName] == null) { + throw new Error(`Cannot set flag ${flagName} as it has not been registered.`); + } + this.flags[flagName] = value; + if (this.flagRegistry[flagName].setHook != null) { + this.flagRegistry[flagName].setHook(value); + } + } + evaluateFlag(flagName) { + if (this.flagRegistry[flagName] == null) { + throw new Error(`Cannot evaluate flag '${flagName}': no evaluation function found.`); + } + return this.flagRegistry[flagName].evaluationFn(); + } + setFlags(flags) { + this.flags = Object.assign({}, flags); + } + reset() { + this.flags = {}; + this.urlFlags = {}; + this.populateURLFlags(); + } + populateURLFlags() { + if (typeof this.global === 'undefined' || + typeof this.global.location === 'undefined' || + typeof this.global.location.search === 'undefined') { + return; + } + const urlParams = this.getQueryParams(this.global.location.search); + if (TENSORFLOWJS_FLAGS_PREFIX in urlParams) { + const keyValues = urlParams[TENSORFLOWJS_FLAGS_PREFIX].split(','); + keyValues.forEach(keyValue => { + const [key, value] = keyValue.split(':'); + this.urlFlags[key] = parseValue(key, value); + }); + } + } + } + function getQueryParams(queryString) { + const params = {}; + queryString.replace(/[?&]([^=?&]+)(?:=([^&]*))?/g, (s, ...t) => { + decodeParam(params, t[0], t[1]); + return t.join('='); + }); + return params; + } + function decodeParam(params, name, value) { + params[decodeURIComponent(name)] = decodeURIComponent(value || ''); + } + function parseValue(flagName, value) { + const lowerCaseValue = value.toLowerCase(); + if (lowerCaseValue === 'true' || lowerCaseValue === 'false') { + return lowerCaseValue === 'true'; + } + else if (`${+lowerCaseValue}` === lowerCaseValue) { + return +lowerCaseValue; + } + else { + return value; + } + } + /** + * Returns the current environment (a global singleton). + * + * The environment object contains the evaluated feature values as well as the + * active platform. + * + * @doc {heading: 'Environment'} + */ + function env() { + return exports.ENV; + } + exports.ENV = null; + function setEnvironmentGlobal(environment) { + exports.ENV = environment; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + // Note that the identifier globalNameSpace is scoped to this module, but will + // always resolve to the same global object regardless of how the module is + // resolved. + // tslint:disable-next-line:no-any + let globalNameSpace; + // tslint:disable-next-line:no-any + function getGlobalNamespace() { + if (globalNameSpace == null) { + // tslint:disable-next-line:no-any + let ns; + if (typeof (window) !== 'undefined') { + ns = window; + } + else if (typeof (global) !== 'undefined') { + ns = global; + } + else if (typeof (process) !== 'undefined') { + ns = process; + } + else if (typeof (self) !== 'undefined') { + ns = self; + } + else { + throw new Error('Could not find a global object'); + } + globalNameSpace = ns; + } + return globalNameSpace; + } + // tslint:disable-next-line:no-any + function getGlobalMap() { + const ns = getGlobalNamespace(); + if (ns._tfGlobals == null) { + ns._tfGlobals = new Map(); + } + return ns._tfGlobals; + } + /** + * Returns a globally accessible 'singleton' object. + * + * @param key the name of the object + * @param init a function to initialize to initialize this object + * the first time it is fetched. + */ + function getGlobal(key, init) { + const globalMap = getGlobalMap(); + if (globalMap.has(key)) { + return globalMap.get(key); + } + else { + const singleton = init(); + globalMap.set(key, singleton); + return globalMap.get(key); + } + } + + const Abs = 'Abs'; + const Acos = 'Acos'; + const Acosh = 'Acosh'; + const Add$1 = 'Add'; + const AddN = 'AddN'; + const All = 'All'; + const Any = 'Any'; + const ArgMax = 'ArgMax'; + const ArgMin = 'ArgMin'; + const Asin = 'Asin'; + const Asinh = 'Asinh'; + const Atan = 'Atan'; + const Atanh = 'Atanh'; + const Atan2 = 'Atan2'; + const AvgPool = 'AvgPool'; + const AvgPoolGrad = 'AvgPoolGrad'; + const AvgPool3D = 'AvgPool3D'; + const AvgPool3DGrad = 'AvgPool3DGrad'; + const BatchMatMul = 'BatchMatMul'; + const BatchToSpaceND = 'BatchToSpaceND'; + const Bincount = 'Bincount'; + const BitwiseAnd = 'BitwiseAnd'; + const BroadcastTo = 'BroadcastTo'; + const BroadcastArgs = 'BroadcastArgs'; + const Cast = 'Cast'; + const Ceil = 'Ceil'; + const ClipByValue = 'ClipByValue'; + const Complex = 'Complex'; + const ComplexAbs = 'ComplexAbs'; + const Concat = 'Concat'; + const Conv2D$1 = 'Conv2D'; + const Conv2DBackpropFilter = 'Conv2DBackpropFilter'; + const Conv2DBackpropInput = 'Conv2DBackpropInput'; + const Conv3D$1 = 'Conv3D'; + const Conv3DBackpropFilterV2 = 'Conv3DBackpropFilterV2'; + const Conv3DBackpropInputV2 = 'Conv3DBackpropInputV2'; + const Cos = 'Cos'; + const Cosh = 'Cosh'; + const Cumprod = 'Cumprod'; + const Cumsum = 'Cumsum'; + const CropAndResize = 'CropAndResize'; + const DenseBincount = 'DenseBincount'; + const DepthToSpace = 'DepthToSpace'; + const DepthwiseConv2dNative = 'DepthwiseConv2dNative'; + const DepthwiseConv2dNativeBackpropFilter = 'DepthwiseConv2dNativeBackpropFilter'; + const DepthwiseConv2dNativeBackpropInput = 'DepthwiseConv2dNativeBackpropInput'; + const Diag = 'Diag'; + const Dilation2D = 'Dilation2D'; + const Dilation2DBackpropInput = 'Dilation2DBackpropInput'; + const Dilation2DBackpropFilter = 'Dilation2DBackpropFilter'; + const Draw = 'Draw'; + const RealDiv = 'RealDiv'; + const Einsum = 'Einsum'; + const Elu$1 = 'Elu'; + const EluGrad = 'EluGrad'; + const Erf = 'Erf'; + const Equal = 'Equal'; + const Exp = 'Exp'; + const ExpandDims = 'ExpandDims'; + const Expm1 = 'Expm1'; + const FFT = 'FFT'; + const Fill = 'Fill'; + const FlipLeftRight = 'FlipLeftRight'; + const Floor = 'Floor'; + const FloorDiv = 'FloorDiv'; + const FusedBatchNorm = 'FusedBatchNorm'; + const GatherV2 = 'GatherV2'; + const GatherNd = 'GatherNd'; + const Greater = 'Greater'; + const GreaterEqual = 'GreaterEqual'; + const Identity$1 = 'Identity'; + const IFFT = 'IFFT'; + const Imag = 'Imag'; + const IsFinite = 'IsFinite'; + const IsInf = 'IsInf'; + const IsNan = 'IsNan'; + const LeakyRelu = 'LeakyRelu'; + const Less = 'Less'; + const LessEqual = 'LessEqual'; + const LinSpace = 'LinSpace'; + const Log = 'Log'; + const Log1p = 'Log1p'; + const LogicalAnd = 'LogicalAnd'; + const LogicalNot = 'LogicalNot'; + const LogicalOr = 'LogicalOr'; + const LogicalXor = 'LogicalXor'; + const LogSoftmax$1 = 'LogSoftmax'; + const LowerBound = 'LowerBound'; + const LRN = 'LRN'; + const LRNGrad = 'LRNGrad'; + const MatrixBandPart = 'MatrixBandPart'; + const Max = 'Max'; + const Maximum$1 = 'Maximum'; + const MaxPool = 'MaxPool'; + const MaxPoolGrad = 'MaxPoolGrad'; + const MaxPool3D = 'MaxPool3D'; + const MaxPool3DGrad = 'MaxPool3DGrad'; + const MaxPoolWithArgmax = 'MaxPoolWithArgmax'; + const Mean = 'Mean'; + const Min = 'Min'; + const Minimum$1 = 'Minimum'; + const MirrorPad = 'MirrorPad'; + const Mod = 'Mod'; + const Multinomial = 'Multinomial'; + const Multiply$1 = 'Multiply'; + const Neg = 'Neg'; + const NotEqual = 'NotEqual'; + const NonMaxSuppressionV3 = 'NonMaxSuppressionV3'; + const NonMaxSuppressionV4 = 'NonMaxSuppressionV4'; + const NonMaxSuppressionV5 = 'NonMaxSuppressionV5'; + const OnesLike = 'OnesLike'; + const OneHot = 'OneHot'; + const Pack = 'Pack'; + const PadV2 = 'PadV2'; + const Pool = 'Pool'; + const Pow = 'Pow'; + const Prelu = 'Prelu'; + const Prod = 'Prod'; + const RaggedGather = 'RaggedGather'; + const RaggedRange = 'RaggedRange'; + const RaggedTensorToTensor = 'RaggedTensorToTensor'; + const Range = 'Range'; + const Real = 'Real'; + const Reciprocal = 'Reciprocal'; + const Relu$1 = 'Relu'; + const Reshape$1 = 'Reshape'; + const ResizeNearestNeighbor = 'ResizeNearestNeighbor'; + const ResizeNearestNeighborGrad = 'ResizeNearestNeighborGrad'; + const ResizeBilinear = 'ResizeBilinear'; + const ResizeBilinearGrad = 'ResizeBilinearGrad'; + const Relu6$1 = 'Relu6'; + const Reverse = 'Reverse'; + const Round = 'Round'; + const Rsqrt = 'Rsqrt'; + const ScatterNd = 'ScatterNd'; + const TensorScatterUpdate = 'TensorScatterUpdate'; + const SearchSorted = 'SearchSorted'; + const Select = 'Select'; + const Selu$1 = 'Selu'; + const Slice = 'Slice'; + const Sin = 'Sin'; + const Sinh = 'Sinh'; + const Sign = 'Sign'; + const Sigmoid$1 = 'Sigmoid'; + const Softplus$1 = 'Softplus'; + const Sqrt = 'Sqrt'; + const Sum = 'Sum'; + const SpaceToBatchND = 'SpaceToBatchND'; + const SplitV = 'SplitV'; + const Softmax$2 = 'Softmax'; + const SparseFillEmptyRows = 'SparseFillEmptyRows'; + const SparseReshape = 'SparseReshape'; + const SparseSegmentMean = 'SparseSegmentMean'; + const SparseSegmentSum = 'SparseSegmentSum'; + const SparseToDense = 'SparseToDense'; + const SquaredDifference = 'SquaredDifference'; + const Square = 'Square'; + const StaticRegexReplace = 'StaticRegexReplace'; + const StridedSlice = 'StridedSlice'; + const StringNGrams = 'StringNGrams'; + const StringSplit = 'StringSplit'; + const StringToHashBucketFast = 'StringToHashBucketFast'; + const Sub = 'Sub'; + const Tan = 'Tan'; + const Tanh$1 = 'Tanh'; + const Tile = 'Tile'; + const TopK = 'TopK'; + const Transform = 'Transform'; + const Transpose = 'Transpose'; + const Unique = 'Unique'; + const Unpack = 'Unpack'; + const UnsortedSegmentSum = 'UnsortedSegmentSum'; + const UpperBound = 'UpperBound'; + const ZerosLike = 'ZerosLike'; + /** + * TensorFlow.js-only kernels + */ + const Step = 'Step'; + const FromPixels = 'FromPixels'; + const RotateWithOffset = 'RotateWithOffset'; + const _FusedMatMul = '_FusedMatMul'; + const FusedConv2D = 'FusedConv2D'; + const FusedDepthwiseConv2D = 'FusedDepthwiseConv2D'; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function warn(...msg) { + if (!(env().getBool('IS_TEST') || env().getBool('PROD'))) { + console.warn(...msg); + } + } + function log$3(...msg) { + if (!(env().getBool('IS_TEST') || env().getBool('PROD'))) { + console.log(...msg); + } + } + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const kernelRegistry = getGlobal('kernelRegistry', () => new Map()); + const gradRegistry = getGlobal('gradRegistry', () => new Map()); + /** + * Returns the kernel function (code) associated with the provided names. + * + * @param kernelName The official name of the kernel. + * @param backendName The official name of the backend. + */ + function getKernel(kernelName, backendName) { + const key = makeKey(kernelName, backendName); + return kernelRegistry.get(key); + } + /** + * Returns the registered gradient info associated with the provided kernel. + * @param kernelName The official TF kernel name. + */ + function getGradient(kernelName) { + return gradRegistry.get(kernelName); + } + function getKernelsForBackend(backendName) { + const it = kernelRegistry.entries(); + const result = []; + while (true) { + const { done, value } = it.next(); + if (done) { + break; + } + const [key, config] = value; + const [backend,] = key.split('_'); + if (backend === backendName) { + result.push(config); + } + } + return result; + } + /** + * Registers the function (forward pass) for the kernel in a global registry. + * + * @param config A config object with the following properties: + * - `kernelName` The official name of the kernel. + * - `backendName` The official name of the backend. + * - `kernelFunc` The function to run during the forward pass of the kernel. + * - `setupFunc` Optional. Gets called once, after the backend initializes. + * - `disposeFunc` Optional. Gets called once, right before the backend is + * disposed. + */ + function registerKernel(config) { + const { kernelName, backendName } = config; + const key = makeKey(kernelName, backendName); + if (kernelRegistry.has(key)) { + warn(`The kernel '${kernelName}' for backend ` + + `'${backendName}' is already registered`); + } + kernelRegistry.set(key, config); + } + /** + * Registers a gradient function for a given kernel in the global registry, + * to be used during the back-propagation of that kernel. + * + * @param config An object with the following properties: + * - `kernelName` The name of the kernel that the gradient function is for. + * - `gradFunc` The function to run during back-propagation. + */ + function registerGradient(config) { + const { kernelName } = config; + if (gradRegistry.has(kernelName)) { + // TODO (yassogba) after 3.0 assess whether we need to keep this gated + // to debug mode. + if (env().getBool('DEBUG')) { + warn(`Overriding the gradient for '${kernelName}'`); + } + } + gradRegistry.set(kernelName, config); + } + /** + * Removes the kernel function from the registry. + * + * @param kernelName The official name of the kernel. + * @param backendName The official name of the backend. + * + */ + function unregisterKernel(kernelName, backendName) { + const key = makeKey(kernelName, backendName); + if (!kernelRegistry.has(key)) { + throw new Error(`The kernel '${kernelName}' for backend ` + + `'${backendName}' is not registered`); + } + kernelRegistry.delete(key); + } + /** Removes the registered gradient from the global registry. */ + function unregisterGradient(kernelName) { + if (!gradRegistry.has(kernelName)) { + throw new Error(`The gradient '${kernelName}' for backend is not registered`); + } + gradRegistry.delete(kernelName); + } + /** + * Finds kernels that have already been registered to a backend and re-registers + * them for a new backend. Useful for registering custom backends. + * @param registeredBackendName Already registered backend. + * @param newBackendName New backend. + */ + function copyRegisteredKernels(registeredBackendName, newBackendName) { + const kernels = getKernelsForBackend(registeredBackendName); + kernels.forEach(kernelConfig => { + const newKernelConfig = Object.assign({}, kernelConfig, { backendName: newBackendName }); + registerKernel(newKernelConfig); + }); + } + function makeKey(kernelName, backendName) { + return `${backendName}_${kernelName}`; + } + + /** + * @license + * Copyright 2023 Google LLC. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function isTypedArrayBrowser(a) { + return a instanceof Float32Array || a instanceof Int32Array || + a instanceof Uint8Array || a instanceof Uint8ClampedArray; + } + + var commonjsGlobal = typeof globalThis !== 'undefined' ? globalThis : typeof window !== 'undefined' ? window : typeof global !== 'undefined' ? global : typeof self !== 'undefined' ? self : {}; + + function getDefaultExportFromCjs (x) { + return x && x.__esModule && Object.prototype.hasOwnProperty.call(x, 'default') ? x['default'] : x; + } + + function getDefaultExportFromNamespaceIfPresent (n) { + return n && Object.prototype.hasOwnProperty.call(n, 'default') ? n['default'] : n; + } + + function getDefaultExportFromNamespaceIfNotNamed (n) { + return n && Object.prototype.hasOwnProperty.call(n, 'default') && Object.keys(n).length === 1 ? n['default'] : n; + } + + function getAugmentedNamespace(n) { + if (n.__esModule) return n; + var f = n.default; + if (typeof f == "function") { + var a = function a () { + if (this instanceof a) { + var args = [null]; + args.push.apply(args, arguments); + var Ctor = Function.bind.apply(f, args); + return new Ctor(); + } + return f.apply(this, arguments); + }; + a.prototype = f.prototype; + } else a = {}; + Object.defineProperty(a, '__esModule', {value: true}); + Object.keys(n).forEach(function (k) { + var d = Object.getOwnPropertyDescriptor(n, k); + Object.defineProperty(a, k, d.get ? d : { + enumerable: true, + get: function () { + return n[k]; + } + }); + }); + return a; + } + + var long = Long$1; + + /** + * wasm optimizations, to do native i64 multiplication and divide + */ + var wasm = null; + + try { + wasm = new WebAssembly.Instance(new WebAssembly.Module(new Uint8Array([ + 0, 97, 115, 109, 1, 0, 0, 0, 1, 13, 2, 96, 0, 1, 127, 96, 4, 127, 127, 127, 127, 1, 127, 3, 7, 6, 0, 1, 1, 1, 1, 1, 6, 6, 1, 127, 1, 65, 0, 11, 7, 50, 6, 3, 109, 117, 108, 0, 1, 5, 100, 105, 118, 95, 115, 0, 2, 5, 100, 105, 118, 95, 117, 0, 3, 5, 114, 101, 109, 95, 115, 0, 4, 5, 114, 101, 109, 95, 117, 0, 5, 8, 103, 101, 116, 95, 104, 105, 103, 104, 0, 0, 10, 191, 1, 6, 4, 0, 35, 0, 11, 36, 1, 1, 126, 32, 0, 173, 32, 1, 173, 66, 32, 134, 132, 32, 2, 173, 32, 3, 173, 66, 32, 134, 132, 126, 34, 4, 66, 32, 135, 167, 36, 0, 32, 4, 167, 11, 36, 1, 1, 126, 32, 0, 173, 32, 1, 173, 66, 32, 134, 132, 32, 2, 173, 32, 3, 173, 66, 32, 134, 132, 127, 34, 4, 66, 32, 135, 167, 36, 0, 32, 4, 167, 11, 36, 1, 1, 126, 32, 0, 173, 32, 1, 173, 66, 32, 134, 132, 32, 2, 173, 32, 3, 173, 66, 32, 134, 132, 128, 34, 4, 66, 32, 135, 167, 36, 0, 32, 4, 167, 11, 36, 1, 1, 126, 32, 0, 173, 32, 1, 173, 66, 32, 134, 132, 32, 2, 173, 32, 3, 173, 66, 32, 134, 132, 129, 34, 4, 66, 32, 135, 167, 36, 0, 32, 4, 167, 11, 36, 1, 1, 126, 32, 0, 173, 32, 1, 173, 66, 32, 134, 132, 32, 2, 173, 32, 3, 173, 66, 32, 134, 132, 130, 34, 4, 66, 32, 135, 167, 36, 0, 32, 4, 167, 11 + ])), {}).exports; + } catch (e) { + // no wasm support :( + } + + /** + * Constructs a 64 bit two's-complement integer, given its low and high 32 bit values as *signed* integers. + * See the from* functions below for more convenient ways of constructing Longs. + * @exports Long + * @class A Long class for representing a 64 bit two's-complement integer value. + * @param {number} low The low (signed) 32 bits of the long + * @param {number} high The high (signed) 32 bits of the long + * @param {boolean=} unsigned Whether unsigned or not, defaults to signed + * @constructor + */ + function Long$1(low, high, unsigned) { + + /** + * The low 32 bits as a signed value. + * @type {number} + */ + this.low = low | 0; + + /** + * The high 32 bits as a signed value. + * @type {number} + */ + this.high = high | 0; + + /** + * Whether unsigned or not. + * @type {boolean} + */ + this.unsigned = !!unsigned; + } + + // The internal representation of a long is the two given signed, 32-bit values. + // We use 32-bit pieces because these are the size of integers on which + // Javascript performs bit-operations. For operations like addition and + // multiplication, we split each number into 16 bit pieces, which can easily be + // multiplied within Javascript's floating-point representation without overflow + // or change in sign. + // + // In the algorithms below, we frequently reduce the negative case to the + // positive case by negating the input(s) and then post-processing the result. + // Note that we must ALWAYS check specially whether those values are MIN_VALUE + // (-2^63) because -MIN_VALUE == MIN_VALUE (since 2^63 cannot be represented as + // a positive number, it overflows back into a negative). Not handling this + // case would often result in infinite recursion. + // + // Common constant values ZERO, ONE, NEG_ONE, etc. are defined below the from* + // methods on which they depend. + + /** + * An indicator used to reliably determine if an object is a Long or not. + * @type {boolean} + * @const + * @private + */ + Long$1.prototype.__isLong__; + + Object.defineProperty(Long$1.prototype, "__isLong__", { value: true }); + + /** + * @function + * @param {*} obj Object + * @returns {boolean} + * @inner + */ + function isLong(obj) { + return (obj && obj["__isLong__"]) === true; + } + + /** + * Tests if the specified object is a Long. + * @function + * @param {*} obj Object + * @returns {boolean} + */ + Long$1.isLong = isLong; + + /** + * A cache of the Long representations of small integer values. + * @type {!Object} + * @inner + */ + var INT_CACHE = {}; + + /** + * A cache of the Long representations of small unsigned integer values. + * @type {!Object} + * @inner + */ + var UINT_CACHE = {}; + + /** + * @param {number} value + * @param {boolean=} unsigned + * @returns {!Long} + * @inner + */ + function fromInt(value, unsigned) { + var obj, cachedObj, cache; + if (unsigned) { + value >>>= 0; + if (cache = (0 <= value && value < 256)) { + cachedObj = UINT_CACHE[value]; + if (cachedObj) + return cachedObj; + } + obj = fromBits(value, (value | 0) < 0 ? -1 : 0, true); + if (cache) + UINT_CACHE[value] = obj; + return obj; + } else { + value |= 0; + if (cache = (-128 <= value && value < 128)) { + cachedObj = INT_CACHE[value]; + if (cachedObj) + return cachedObj; + } + obj = fromBits(value, value < 0 ? -1 : 0, false); + if (cache) + INT_CACHE[value] = obj; + return obj; + } + } + + /** + * Returns a Long representing the given 32 bit integer value. + * @function + * @param {number} value The 32 bit integer in question + * @param {boolean=} unsigned Whether unsigned or not, defaults to signed + * @returns {!Long} The corresponding Long value + */ + Long$1.fromInt = fromInt; + + /** + * @param {number} value + * @param {boolean=} unsigned + * @returns {!Long} + * @inner + */ + function fromNumber(value, unsigned) { + if (isNaN(value)) + return unsigned ? UZERO : ZERO; + if (unsigned) { + if (value < 0) + return UZERO; + if (value >= TWO_PWR_64_DBL) + return MAX_UNSIGNED_VALUE; + } else { + if (value <= -TWO_PWR_63_DBL) + return MIN_VALUE; + if (value + 1 >= TWO_PWR_63_DBL) + return MAX_VALUE; + } + if (value < 0) + return fromNumber(-value, unsigned).neg(); + return fromBits((value % TWO_PWR_32_DBL) | 0, (value / TWO_PWR_32_DBL) | 0, unsigned); + } + + /** + * Returns a Long representing the given value, provided that it is a finite number. Otherwise, zero is returned. + * @function + * @param {number} value The number in question + * @param {boolean=} unsigned Whether unsigned or not, defaults to signed + * @returns {!Long} The corresponding Long value + */ + Long$1.fromNumber = fromNumber; + + /** + * @param {number} lowBits + * @param {number} highBits + * @param {boolean=} unsigned + * @returns {!Long} + * @inner + */ + function fromBits(lowBits, highBits, unsigned) { + return new Long$1(lowBits, highBits, unsigned); + } + + /** + * Returns a Long representing the 64 bit integer that comes by concatenating the given low and high bits. Each is + * assumed to use 32 bits. + * @function + * @param {number} lowBits The low 32 bits + * @param {number} highBits The high 32 bits + * @param {boolean=} unsigned Whether unsigned or not, defaults to signed + * @returns {!Long} The corresponding Long value + */ + Long$1.fromBits = fromBits; + + /** + * @function + * @param {number} base + * @param {number} exponent + * @returns {number} + * @inner + */ + var pow_dbl = Math.pow; // Used 4 times (4*8 to 15+4) + + /** + * @param {string} str + * @param {(boolean|number)=} unsigned + * @param {number=} radix + * @returns {!Long} + * @inner + */ + function fromString(str, unsigned, radix) { + if (str.length === 0) + throw Error('empty string'); + if (str === "NaN" || str === "Infinity" || str === "+Infinity" || str === "-Infinity") + return ZERO; + if (typeof unsigned === 'number') { + // For goog.math.long compatibility + radix = unsigned, + unsigned = false; + } else { + unsigned = !! unsigned; + } + radix = radix || 10; + if (radix < 2 || 36 < radix) + throw RangeError('radix'); + + var p; + if ((p = str.indexOf('-')) > 0) + throw Error('interior hyphen'); + else if (p === 0) { + return fromString(str.substring(1), unsigned, radix).neg(); + } + + // Do several (8) digits each time through the loop, so as to + // minimize the calls to the very expensive emulated div. + var radixToPower = fromNumber(pow_dbl(radix, 8)); + + var result = ZERO; + for (var i = 0; i < str.length; i += 8) { + var size = Math.min(8, str.length - i), + value = parseInt(str.substring(i, i + size), radix); + if (size < 8) { + var power = fromNumber(pow_dbl(radix, size)); + result = result.mul(power).add(fromNumber(value)); + } else { + result = result.mul(radixToPower); + result = result.add(fromNumber(value)); + } + } + result.unsigned = unsigned; + return result; + } + + /** + * Returns a Long representation of the given string, written using the specified radix. + * @function + * @param {string} str The textual representation of the Long + * @param {(boolean|number)=} unsigned Whether unsigned or not, defaults to signed + * @param {number=} radix The radix in which the text is written (2-36), defaults to 10 + * @returns {!Long} The corresponding Long value + */ + Long$1.fromString = fromString; + + /** + * @function + * @param {!Long|number|string|!{low: number, high: number, unsigned: boolean}} val + * @param {boolean=} unsigned + * @returns {!Long} + * @inner + */ + function fromValue(val, unsigned) { + if (typeof val === 'number') + return fromNumber(val, unsigned); + if (typeof val === 'string') + return fromString(val, unsigned); + // Throws for non-objects, converts non-instanceof Long: + return fromBits(val.low, val.high, typeof unsigned === 'boolean' ? unsigned : val.unsigned); + } + + /** + * Converts the specified value to a Long using the appropriate from* function for its type. + * @function + * @param {!Long|number|string|!{low: number, high: number, unsigned: boolean}} val Value + * @param {boolean=} unsigned Whether unsigned or not, defaults to signed + * @returns {!Long} + */ + Long$1.fromValue = fromValue; + + // NOTE: the compiler should inline these constant values below and then remove these variables, so there should be + // no runtime penalty for these. + + /** + * @type {number} + * @const + * @inner + */ + var TWO_PWR_16_DBL = 1 << 16; + + /** + * @type {number} + * @const + * @inner + */ + var TWO_PWR_24_DBL = 1 << 24; + + /** + * @type {number} + * @const + * @inner + */ + var TWO_PWR_32_DBL = TWO_PWR_16_DBL * TWO_PWR_16_DBL; + + /** + * @type {number} + * @const + * @inner + */ + var TWO_PWR_64_DBL = TWO_PWR_32_DBL * TWO_PWR_32_DBL; + + /** + * @type {number} + * @const + * @inner + */ + var TWO_PWR_63_DBL = TWO_PWR_64_DBL / 2; + + /** + * @type {!Long} + * @const + * @inner + */ + var TWO_PWR_24 = fromInt(TWO_PWR_24_DBL); + + /** + * @type {!Long} + * @inner + */ + var ZERO = fromInt(0); + + /** + * Signed zero. + * @type {!Long} + */ + Long$1.ZERO = ZERO; + + /** + * @type {!Long} + * @inner + */ + var UZERO = fromInt(0, true); + + /** + * Unsigned zero. + * @type {!Long} + */ + Long$1.UZERO = UZERO; + + /** + * @type {!Long} + * @inner + */ + var ONE = fromInt(1); + + /** + * Signed one. + * @type {!Long} + */ + Long$1.ONE = ONE; + + /** + * @type {!Long} + * @inner + */ + var UONE = fromInt(1, true); + + /** + * Unsigned one. + * @type {!Long} + */ + Long$1.UONE = UONE; + + /** + * @type {!Long} + * @inner + */ + var NEG_ONE = fromInt(-1); + + /** + * Signed negative one. + * @type {!Long} + */ + Long$1.NEG_ONE = NEG_ONE; + + /** + * @type {!Long} + * @inner + */ + var MAX_VALUE = fromBits(0xFFFFFFFF|0, 0x7FFFFFFF|0, false); + + /** + * Maximum signed value. + * @type {!Long} + */ + Long$1.MAX_VALUE = MAX_VALUE; + + /** + * @type {!Long} + * @inner + */ + var MAX_UNSIGNED_VALUE = fromBits(0xFFFFFFFF|0, 0xFFFFFFFF|0, true); + + /** + * Maximum unsigned value. + * @type {!Long} + */ + Long$1.MAX_UNSIGNED_VALUE = MAX_UNSIGNED_VALUE; + + /** + * @type {!Long} + * @inner + */ + var MIN_VALUE = fromBits(0, 0x80000000|0, false); + + /** + * Minimum signed value. + * @type {!Long} + */ + Long$1.MIN_VALUE = MIN_VALUE; + + /** + * @alias Long.prototype + * @inner + */ + var LongPrototype = Long$1.prototype; + + /** + * Converts the Long to a 32 bit integer, assuming it is a 32 bit integer. + * @returns {number} + */ + LongPrototype.toInt = function toInt() { + return this.unsigned ? this.low >>> 0 : this.low; + }; + + /** + * Converts the Long to a the nearest floating-point representation of this value (double, 53 bit mantissa). + * @returns {number} + */ + LongPrototype.toNumber = function toNumber() { + if (this.unsigned) + return ((this.high >>> 0) * TWO_PWR_32_DBL) + (this.low >>> 0); + return this.high * TWO_PWR_32_DBL + (this.low >>> 0); + }; + + /** + * Converts the Long to a string written in the specified radix. + * @param {number=} radix Radix (2-36), defaults to 10 + * @returns {string} + * @override + * @throws {RangeError} If `radix` is out of range + */ + LongPrototype.toString = function toString(radix) { + radix = radix || 10; + if (radix < 2 || 36 < radix) + throw RangeError('radix'); + if (this.isZero()) + return '0'; + if (this.isNegative()) { // Unsigned Longs are never negative + if (this.eq(MIN_VALUE)) { + // We need to change the Long value before it can be negated, so we remove + // the bottom-most digit in this base and then recurse to do the rest. + var radixLong = fromNumber(radix), + div = this.div(radixLong), + rem1 = div.mul(radixLong).sub(this); + return div.toString(radix) + rem1.toInt().toString(radix); + } else + return '-' + this.neg().toString(radix); + } + + // Do several (6) digits each time through the loop, so as to + // minimize the calls to the very expensive emulated div. + var radixToPower = fromNumber(pow_dbl(radix, 6), this.unsigned), + rem = this; + var result = ''; + while (true) { + var remDiv = rem.div(radixToPower), + intval = rem.sub(remDiv.mul(radixToPower)).toInt() >>> 0, + digits = intval.toString(radix); + rem = remDiv; + if (rem.isZero()) + return digits + result; + else { + while (digits.length < 6) + digits = '0' + digits; + result = '' + digits + result; + } + } + }; + + /** + * Gets the high 32 bits as a signed integer. + * @returns {number} Signed high bits + */ + LongPrototype.getHighBits = function getHighBits() { + return this.high; + }; + + /** + * Gets the high 32 bits as an unsigned integer. + * @returns {number} Unsigned high bits + */ + LongPrototype.getHighBitsUnsigned = function getHighBitsUnsigned() { + return this.high >>> 0; + }; + + /** + * Gets the low 32 bits as a signed integer. + * @returns {number} Signed low bits + */ + LongPrototype.getLowBits = function getLowBits() { + return this.low; + }; + + /** + * Gets the low 32 bits as an unsigned integer. + * @returns {number} Unsigned low bits + */ + LongPrototype.getLowBitsUnsigned = function getLowBitsUnsigned() { + return this.low >>> 0; + }; + + /** + * Gets the number of bits needed to represent the absolute value of this Long. + * @returns {number} + */ + LongPrototype.getNumBitsAbs = function getNumBitsAbs() { + if (this.isNegative()) // Unsigned Longs are never negative + return this.eq(MIN_VALUE) ? 64 : this.neg().getNumBitsAbs(); + var val = this.high != 0 ? this.high : this.low; + for (var bit = 31; bit > 0; bit--) + if ((val & (1 << bit)) != 0) + break; + return this.high != 0 ? bit + 33 : bit + 1; + }; + + /** + * Tests if this Long's value equals zero. + * @returns {boolean} + */ + LongPrototype.isZero = function isZero() { + return this.high === 0 && this.low === 0; + }; + + /** + * Tests if this Long's value equals zero. This is an alias of {@link Long#isZero}. + * @returns {boolean} + */ + LongPrototype.eqz = LongPrototype.isZero; + + /** + * Tests if this Long's value is negative. + * @returns {boolean} + */ + LongPrototype.isNegative = function isNegative() { + return !this.unsigned && this.high < 0; + }; + + /** + * Tests if this Long's value is positive. + * @returns {boolean} + */ + LongPrototype.isPositive = function isPositive() { + return this.unsigned || this.high >= 0; + }; + + /** + * Tests if this Long's value is odd. + * @returns {boolean} + */ + LongPrototype.isOdd = function isOdd() { + return (this.low & 1) === 1; + }; + + /** + * Tests if this Long's value is even. + * @returns {boolean} + */ + LongPrototype.isEven = function isEven() { + return (this.low & 1) === 0; + }; + + /** + * Tests if this Long's value equals the specified's. + * @param {!Long|number|string} other Other value + * @returns {boolean} + */ + LongPrototype.equals = function equals(other) { + if (!isLong(other)) + other = fromValue(other); + if (this.unsigned !== other.unsigned && (this.high >>> 31) === 1 && (other.high >>> 31) === 1) + return false; + return this.high === other.high && this.low === other.low; + }; + + /** + * Tests if this Long's value equals the specified's. This is an alias of {@link Long#equals}. + * @function + * @param {!Long|number|string} other Other value + * @returns {boolean} + */ + LongPrototype.eq = LongPrototype.equals; + + /** + * Tests if this Long's value differs from the specified's. + * @param {!Long|number|string} other Other value + * @returns {boolean} + */ + LongPrototype.notEquals = function notEquals(other) { + return !this.eq(/* validates */ other); + }; + + /** + * Tests if this Long's value differs from the specified's. This is an alias of {@link Long#notEquals}. + * @function + * @param {!Long|number|string} other Other value + * @returns {boolean} + */ + LongPrototype.neq = LongPrototype.notEquals; + + /** + * Tests if this Long's value differs from the specified's. This is an alias of {@link Long#notEquals}. + * @function + * @param {!Long|number|string} other Other value + * @returns {boolean} + */ + LongPrototype.ne = LongPrototype.notEquals; + + /** + * Tests if this Long's value is less than the specified's. + * @param {!Long|number|string} other Other value + * @returns {boolean} + */ + LongPrototype.lessThan = function lessThan(other) { + return this.comp(/* validates */ other) < 0; + }; + + /** + * Tests if this Long's value is less than the specified's. This is an alias of {@link Long#lessThan}. + * @function + * @param {!Long|number|string} other Other value + * @returns {boolean} + */ + LongPrototype.lt = LongPrototype.lessThan; + + /** + * Tests if this Long's value is less than or equal the specified's. + * @param {!Long|number|string} other Other value + * @returns {boolean} + */ + LongPrototype.lessThanOrEqual = function lessThanOrEqual(other) { + return this.comp(/* validates */ other) <= 0; + }; + + /** + * Tests if this Long's value is less than or equal the specified's. This is an alias of {@link Long#lessThanOrEqual}. + * @function + * @param {!Long|number|string} other Other value + * @returns {boolean} + */ + LongPrototype.lte = LongPrototype.lessThanOrEqual; + + /** + * Tests if this Long's value is less than or equal the specified's. This is an alias of {@link Long#lessThanOrEqual}. + * @function + * @param {!Long|number|string} other Other value + * @returns {boolean} + */ + LongPrototype.le = LongPrototype.lessThanOrEqual; + + /** + * Tests if this Long's value is greater than the specified's. + * @param {!Long|number|string} other Other value + * @returns {boolean} + */ + LongPrototype.greaterThan = function greaterThan(other) { + return this.comp(/* validates */ other) > 0; + }; + + /** + * Tests if this Long's value is greater than the specified's. This is an alias of {@link Long#greaterThan}. + * @function + * @param {!Long|number|string} other Other value + * @returns {boolean} + */ + LongPrototype.gt = LongPrototype.greaterThan; + + /** + * Tests if this Long's value is greater than or equal the specified's. + * @param {!Long|number|string} other Other value + * @returns {boolean} + */ + LongPrototype.greaterThanOrEqual = function greaterThanOrEqual(other) { + return this.comp(/* validates */ other) >= 0; + }; + + /** + * Tests if this Long's value is greater than or equal the specified's. This is an alias of {@link Long#greaterThanOrEqual}. + * @function + * @param {!Long|number|string} other Other value + * @returns {boolean} + */ + LongPrototype.gte = LongPrototype.greaterThanOrEqual; + + /** + * Tests if this Long's value is greater than or equal the specified's. This is an alias of {@link Long#greaterThanOrEqual}. + * @function + * @param {!Long|number|string} other Other value + * @returns {boolean} + */ + LongPrototype.ge = LongPrototype.greaterThanOrEqual; + + /** + * Compares this Long's value with the specified's. + * @param {!Long|number|string} other Other value + * @returns {number} 0 if they are the same, 1 if the this is greater and -1 + * if the given one is greater + */ + LongPrototype.compare = function compare(other) { + if (!isLong(other)) + other = fromValue(other); + if (this.eq(other)) + return 0; + var thisNeg = this.isNegative(), + otherNeg = other.isNegative(); + if (thisNeg && !otherNeg) + return -1; + if (!thisNeg && otherNeg) + return 1; + // At this point the sign bits are the same + if (!this.unsigned) + return this.sub(other).isNegative() ? -1 : 1; + // Both are positive if at least one is unsigned + return (other.high >>> 0) > (this.high >>> 0) || (other.high === this.high && (other.low >>> 0) > (this.low >>> 0)) ? -1 : 1; + }; + + /** + * Compares this Long's value with the specified's. This is an alias of {@link Long#compare}. + * @function + * @param {!Long|number|string} other Other value + * @returns {number} 0 if they are the same, 1 if the this is greater and -1 + * if the given one is greater + */ + LongPrototype.comp = LongPrototype.compare; + + /** + * Negates this Long's value. + * @returns {!Long} Negated Long + */ + LongPrototype.negate = function negate() { + if (!this.unsigned && this.eq(MIN_VALUE)) + return MIN_VALUE; + return this.not().add(ONE); + }; + + /** + * Negates this Long's value. This is an alias of {@link Long#negate}. + * @function + * @returns {!Long} Negated Long + */ + LongPrototype.neg = LongPrototype.negate; + + /** + * Returns the sum of this and the specified Long. + * @param {!Long|number|string} addend Addend + * @returns {!Long} Sum + */ + LongPrototype.add = function add(addend) { + if (!isLong(addend)) + addend = fromValue(addend); + + // Divide each number into 4 chunks of 16 bits, and then sum the chunks. + + var a48 = this.high >>> 16; + var a32 = this.high & 0xFFFF; + var a16 = this.low >>> 16; + var a00 = this.low & 0xFFFF; + + var b48 = addend.high >>> 16; + var b32 = addend.high & 0xFFFF; + var b16 = addend.low >>> 16; + var b00 = addend.low & 0xFFFF; + + var c48 = 0, c32 = 0, c16 = 0, c00 = 0; + c00 += a00 + b00; + c16 += c00 >>> 16; + c00 &= 0xFFFF; + c16 += a16 + b16; + c32 += c16 >>> 16; + c16 &= 0xFFFF; + c32 += a32 + b32; + c48 += c32 >>> 16; + c32 &= 0xFFFF; + c48 += a48 + b48; + c48 &= 0xFFFF; + return fromBits((c16 << 16) | c00, (c48 << 16) | c32, this.unsigned); + }; + + /** + * Returns the difference of this and the specified Long. + * @param {!Long|number|string} subtrahend Subtrahend + * @returns {!Long} Difference + */ + LongPrototype.subtract = function subtract(subtrahend) { + if (!isLong(subtrahend)) + subtrahend = fromValue(subtrahend); + return this.add(subtrahend.neg()); + }; + + /** + * Returns the difference of this and the specified Long. This is an alias of {@link Long#subtract}. + * @function + * @param {!Long|number|string} subtrahend Subtrahend + * @returns {!Long} Difference + */ + LongPrototype.sub = LongPrototype.subtract; + + /** + * Returns the product of this and the specified Long. + * @param {!Long|number|string} multiplier Multiplier + * @returns {!Long} Product + */ + LongPrototype.multiply = function multiply(multiplier) { + if (this.isZero()) + return ZERO; + if (!isLong(multiplier)) + multiplier = fromValue(multiplier); + + // use wasm support if present + if (wasm) { + var low = wasm.mul(this.low, + this.high, + multiplier.low, + multiplier.high); + return fromBits(low, wasm.get_high(), this.unsigned); + } + + if (multiplier.isZero()) + return ZERO; + if (this.eq(MIN_VALUE)) + return multiplier.isOdd() ? MIN_VALUE : ZERO; + if (multiplier.eq(MIN_VALUE)) + return this.isOdd() ? MIN_VALUE : ZERO; + + if (this.isNegative()) { + if (multiplier.isNegative()) + return this.neg().mul(multiplier.neg()); + else + return this.neg().mul(multiplier).neg(); + } else if (multiplier.isNegative()) + return this.mul(multiplier.neg()).neg(); + + // If both longs are small, use float multiplication + if (this.lt(TWO_PWR_24) && multiplier.lt(TWO_PWR_24)) + return fromNumber(this.toNumber() * multiplier.toNumber(), this.unsigned); + + // Divide each long into 4 chunks of 16 bits, and then add up 4x4 products. + // We can skip products that would overflow. + + var a48 = this.high >>> 16; + var a32 = this.high & 0xFFFF; + var a16 = this.low >>> 16; + var a00 = this.low & 0xFFFF; + + var b48 = multiplier.high >>> 16; + var b32 = multiplier.high & 0xFFFF; + var b16 = multiplier.low >>> 16; + var b00 = multiplier.low & 0xFFFF; + + var c48 = 0, c32 = 0, c16 = 0, c00 = 0; + c00 += a00 * b00; + c16 += c00 >>> 16; + c00 &= 0xFFFF; + c16 += a16 * b00; + c32 += c16 >>> 16; + c16 &= 0xFFFF; + c16 += a00 * b16; + c32 += c16 >>> 16; + c16 &= 0xFFFF; + c32 += a32 * b00; + c48 += c32 >>> 16; + c32 &= 0xFFFF; + c32 += a16 * b16; + c48 += c32 >>> 16; + c32 &= 0xFFFF; + c32 += a00 * b32; + c48 += c32 >>> 16; + c32 &= 0xFFFF; + c48 += a48 * b00 + a32 * b16 + a16 * b32 + a00 * b48; + c48 &= 0xFFFF; + return fromBits((c16 << 16) | c00, (c48 << 16) | c32, this.unsigned); + }; + + /** + * Returns the product of this and the specified Long. This is an alias of {@link Long#multiply}. + * @function + * @param {!Long|number|string} multiplier Multiplier + * @returns {!Long} Product + */ + LongPrototype.mul = LongPrototype.multiply; + + /** + * Returns this Long divided by the specified. The result is signed if this Long is signed or + * unsigned if this Long is unsigned. + * @param {!Long|number|string} divisor Divisor + * @returns {!Long} Quotient + */ + LongPrototype.divide = function divide(divisor) { + if (!isLong(divisor)) + divisor = fromValue(divisor); + if (divisor.isZero()) + throw Error('division by zero'); + + // use wasm support if present + if (wasm) { + // guard against signed division overflow: the largest + // negative number / -1 would be 1 larger than the largest + // positive number, due to two's complement. + if (!this.unsigned && + this.high === -0x80000000 && + divisor.low === -1 && divisor.high === -1) { + // be consistent with non-wasm code path + return this; + } + var low = (this.unsigned ? wasm.div_u : wasm.div_s)( + this.low, + this.high, + divisor.low, + divisor.high + ); + return fromBits(low, wasm.get_high(), this.unsigned); + } + + if (this.isZero()) + return this.unsigned ? UZERO : ZERO; + var approx, rem, res; + if (!this.unsigned) { + // This section is only relevant for signed longs and is derived from the + // closure library as a whole. + if (this.eq(MIN_VALUE)) { + if (divisor.eq(ONE) || divisor.eq(NEG_ONE)) + return MIN_VALUE; // recall that -MIN_VALUE == MIN_VALUE + else if (divisor.eq(MIN_VALUE)) + return ONE; + else { + // At this point, we have |other| >= 2, so |this/other| < |MIN_VALUE|. + var halfThis = this.shr(1); + approx = halfThis.div(divisor).shl(1); + if (approx.eq(ZERO)) { + return divisor.isNegative() ? ONE : NEG_ONE; + } else { + rem = this.sub(divisor.mul(approx)); + res = approx.add(rem.div(divisor)); + return res; + } + } + } else if (divisor.eq(MIN_VALUE)) + return this.unsigned ? UZERO : ZERO; + if (this.isNegative()) { + if (divisor.isNegative()) + return this.neg().div(divisor.neg()); + return this.neg().div(divisor).neg(); + } else if (divisor.isNegative()) + return this.div(divisor.neg()).neg(); + res = ZERO; + } else { + // The algorithm below has not been made for unsigned longs. It's therefore + // required to take special care of the MSB prior to running it. + if (!divisor.unsigned) + divisor = divisor.toUnsigned(); + if (divisor.gt(this)) + return UZERO; + if (divisor.gt(this.shru(1))) // 15 >>> 1 = 7 ; with divisor = 8 ; true + return UONE; + res = UZERO; + } + + // Repeat the following until the remainder is less than other: find a + // floating-point that approximates remainder / other *from below*, add this + // into the result, and subtract it from the remainder. It is critical that + // the approximate value is less than or equal to the real value so that the + // remainder never becomes negative. + rem = this; + while (rem.gte(divisor)) { + // Approximate the result of division. This may be a little greater or + // smaller than the actual value. + approx = Math.max(1, Math.floor(rem.toNumber() / divisor.toNumber())); + + // We will tweak the approximate result by changing it in the 48-th digit or + // the smallest non-fractional digit, whichever is larger. + var log2 = Math.ceil(Math.log(approx) / Math.LN2), + delta = (log2 <= 48) ? 1 : pow_dbl(2, log2 - 48), + + // Decrease the approximation until it is smaller than the remainder. Note + // that if it is too large, the product overflows and is negative. + approxRes = fromNumber(approx), + approxRem = approxRes.mul(divisor); + while (approxRem.isNegative() || approxRem.gt(rem)) { + approx -= delta; + approxRes = fromNumber(approx, this.unsigned); + approxRem = approxRes.mul(divisor); + } + + // We know the answer can't be zero... and actually, zero would cause + // infinite recursion since we would make no progress. + if (approxRes.isZero()) + approxRes = ONE; + + res = res.add(approxRes); + rem = rem.sub(approxRem); + } + return res; + }; + + /** + * Returns this Long divided by the specified. This is an alias of {@link Long#divide}. + * @function + * @param {!Long|number|string} divisor Divisor + * @returns {!Long} Quotient + */ + LongPrototype.div = LongPrototype.divide; + + /** + * Returns this Long modulo the specified. + * @param {!Long|number|string} divisor Divisor + * @returns {!Long} Remainder + */ + LongPrototype.modulo = function modulo(divisor) { + if (!isLong(divisor)) + divisor = fromValue(divisor); + + // use wasm support if present + if (wasm) { + var low = (this.unsigned ? wasm.rem_u : wasm.rem_s)( + this.low, + this.high, + divisor.low, + divisor.high + ); + return fromBits(low, wasm.get_high(), this.unsigned); + } + + return this.sub(this.div(divisor).mul(divisor)); + }; + + /** + * Returns this Long modulo the specified. This is an alias of {@link Long#modulo}. + * @function + * @param {!Long|number|string} divisor Divisor + * @returns {!Long} Remainder + */ + LongPrototype.mod = LongPrototype.modulo; + + /** + * Returns this Long modulo the specified. This is an alias of {@link Long#modulo}. + * @function + * @param {!Long|number|string} divisor Divisor + * @returns {!Long} Remainder + */ + LongPrototype.rem = LongPrototype.modulo; + + /** + * Returns the bitwise NOT of this Long. + * @returns {!Long} + */ + LongPrototype.not = function not() { + return fromBits(~this.low, ~this.high, this.unsigned); + }; + + /** + * Returns the bitwise AND of this Long and the specified. + * @param {!Long|number|string} other Other Long + * @returns {!Long} + */ + LongPrototype.and = function and(other) { + if (!isLong(other)) + other = fromValue(other); + return fromBits(this.low & other.low, this.high & other.high, this.unsigned); + }; + + /** + * Returns the bitwise OR of this Long and the specified. + * @param {!Long|number|string} other Other Long + * @returns {!Long} + */ + LongPrototype.or = function or(other) { + if (!isLong(other)) + other = fromValue(other); + return fromBits(this.low | other.low, this.high | other.high, this.unsigned); + }; + + /** + * Returns the bitwise XOR of this Long and the given one. + * @param {!Long|number|string} other Other Long + * @returns {!Long} + */ + LongPrototype.xor = function xor(other) { + if (!isLong(other)) + other = fromValue(other); + return fromBits(this.low ^ other.low, this.high ^ other.high, this.unsigned); + }; + + /** + * Returns this Long with bits shifted to the left by the given amount. + * @param {number|!Long} numBits Number of bits + * @returns {!Long} Shifted Long + */ + LongPrototype.shiftLeft = function shiftLeft(numBits) { + if (isLong(numBits)) + numBits = numBits.toInt(); + if ((numBits &= 63) === 0) + return this; + else if (numBits < 32) + return fromBits(this.low << numBits, (this.high << numBits) | (this.low >>> (32 - numBits)), this.unsigned); + else + return fromBits(0, this.low << (numBits - 32), this.unsigned); + }; + + /** + * Returns this Long with bits shifted to the left by the given amount. This is an alias of {@link Long#shiftLeft}. + * @function + * @param {number|!Long} numBits Number of bits + * @returns {!Long} Shifted Long + */ + LongPrototype.shl = LongPrototype.shiftLeft; + + /** + * Returns this Long with bits arithmetically shifted to the right by the given amount. + * @param {number|!Long} numBits Number of bits + * @returns {!Long} Shifted Long + */ + LongPrototype.shiftRight = function shiftRight(numBits) { + if (isLong(numBits)) + numBits = numBits.toInt(); + if ((numBits &= 63) === 0) + return this; + else if (numBits < 32) + return fromBits((this.low >>> numBits) | (this.high << (32 - numBits)), this.high >> numBits, this.unsigned); + else + return fromBits(this.high >> (numBits - 32), this.high >= 0 ? 0 : -1, this.unsigned); + }; + + /** + * Returns this Long with bits arithmetically shifted to the right by the given amount. This is an alias of {@link Long#shiftRight}. + * @function + * @param {number|!Long} numBits Number of bits + * @returns {!Long} Shifted Long + */ + LongPrototype.shr = LongPrototype.shiftRight; + + /** + * Returns this Long with bits logically shifted to the right by the given amount. + * @param {number|!Long} numBits Number of bits + * @returns {!Long} Shifted Long + */ + LongPrototype.shiftRightUnsigned = function shiftRightUnsigned(numBits) { + if (isLong(numBits)) + numBits = numBits.toInt(); + numBits &= 63; + if (numBits === 0) + return this; + else { + var high = this.high; + if (numBits < 32) { + var low = this.low; + return fromBits((low >>> numBits) | (high << (32 - numBits)), high >>> numBits, this.unsigned); + } else if (numBits === 32) + return fromBits(high, 0, this.unsigned); + else + return fromBits(high >>> (numBits - 32), 0, this.unsigned); + } + }; + + /** + * Returns this Long with bits logically shifted to the right by the given amount. This is an alias of {@link Long#shiftRightUnsigned}. + * @function + * @param {number|!Long} numBits Number of bits + * @returns {!Long} Shifted Long + */ + LongPrototype.shru = LongPrototype.shiftRightUnsigned; + + /** + * Returns this Long with bits logically shifted to the right by the given amount. This is an alias of {@link Long#shiftRightUnsigned}. + * @function + * @param {number|!Long} numBits Number of bits + * @returns {!Long} Shifted Long + */ + LongPrototype.shr_u = LongPrototype.shiftRightUnsigned; + + /** + * Converts this Long to signed. + * @returns {!Long} Signed long + */ + LongPrototype.toSigned = function toSigned() { + if (!this.unsigned) + return this; + return fromBits(this.low, this.high, false); + }; + + /** + * Converts this Long to unsigned. + * @returns {!Long} Unsigned long + */ + LongPrototype.toUnsigned = function toUnsigned() { + if (this.unsigned) + return this; + return fromBits(this.low, this.high, true); + }; + + /** + * Converts this Long to its byte representation. + * @param {boolean=} le Whether little or big endian, defaults to big endian + * @returns {!Array.} Byte representation + */ + LongPrototype.toBytes = function toBytes(le) { + return le ? this.toBytesLE() : this.toBytesBE(); + }; + + /** + * Converts this Long to its little endian byte representation. + * @returns {!Array.} Little endian byte representation + */ + LongPrototype.toBytesLE = function toBytesLE() { + var hi = this.high, + lo = this.low; + return [ + lo & 0xff, + lo >>> 8 & 0xff, + lo >>> 16 & 0xff, + lo >>> 24 , + hi & 0xff, + hi >>> 8 & 0xff, + hi >>> 16 & 0xff, + hi >>> 24 + ]; + }; + + /** + * Converts this Long to its big endian byte representation. + * @returns {!Array.} Big endian byte representation + */ + LongPrototype.toBytesBE = function toBytesBE() { + var hi = this.high, + lo = this.low; + return [ + hi >>> 24 , + hi >>> 16 & 0xff, + hi >>> 8 & 0xff, + hi & 0xff, + lo >>> 24 , + lo >>> 16 & 0xff, + lo >>> 8 & 0xff, + lo & 0xff + ]; + }; + + /** + * Creates a Long from its byte representation. + * @param {!Array.} bytes Byte representation + * @param {boolean=} unsigned Whether unsigned or not, defaults to signed + * @param {boolean=} le Whether little or big endian, defaults to big endian + * @returns {Long} The corresponding Long value + */ + Long$1.fromBytes = function fromBytes(bytes, unsigned, le) { + return le ? Long$1.fromBytesLE(bytes, unsigned) : Long$1.fromBytesBE(bytes, unsigned); + }; + + /** + * Creates a Long from its little endian byte representation. + * @param {!Array.} bytes Little endian byte representation + * @param {boolean=} unsigned Whether unsigned or not, defaults to signed + * @returns {Long} The corresponding Long value + */ + Long$1.fromBytesLE = function fromBytesLE(bytes, unsigned) { + return new Long$1( + bytes[0] | + bytes[1] << 8 | + bytes[2] << 16 | + bytes[3] << 24, + bytes[4] | + bytes[5] << 8 | + bytes[6] << 16 | + bytes[7] << 24, + unsigned + ); + }; + + /** + * Creates a Long from its big endian byte representation. + * @param {!Array.} bytes Big endian byte representation + * @param {boolean=} unsigned Whether unsigned or not, defaults to signed + * @returns {Long} The corresponding Long value + */ + Long$1.fromBytesBE = function fromBytesBE(bytes, unsigned) { + return new Long$1( + bytes[4] << 24 | + bytes[5] << 16 | + bytes[6] << 8 | + bytes[7], + bytes[0] << 24 | + bytes[1] << 16 | + bytes[2] << 8 | + bytes[3], + unsigned + ); + }; + + var long$1 = /*@__PURE__*/getDefaultExportFromCjs(long); + + var LongExports = /*#__PURE__*/_mergeNamespaces({ + __proto__: null, + default: long$1 + }, [long]); + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + // tslint:disable-next-line + const Long = + // tslint:disable-next-line + long$1 || LongExports; + function hexToLong(hex) { + return Long.fromString(hex, true, 16); + } + // Some primes between 2^63 and 2^64 for various uses. + // Hex 0xc3a5c85c97cb3127 + const k0 = hexToLong('c3a5c85c97cb3127'); + // Hex 0xb492b66fbe98f273 + const k1 = hexToLong('b492b66fbe98f273'); + // Hex 0x9ae16a3b2f90404f + const k2 = hexToLong('9ae16a3b2f90404f'); + function shiftMix(val) { + return val.xor(val.shru(47)); + } + function fetch$2(s, offset, numBytes) { + const bytes = s.slice(offset, offset + numBytes); + return Long.fromBytes(Array.from(bytes), true, true); + } + function fetch64(s, offset) { + return fetch$2(s, offset, 8); + } + function fetch32(s, offset) { + return fetch$2(s, offset, 4); + } + function rotate64(val, shift) { + // Avoid shifting by 64: doing so yields an undefined result. + return shift === 0 ? val : val.shru(shift).or(val.shl(64 - shift)); + } + function hashLen16(u, v, mul = hexToLong('9ddfea08eb382d69')) { + // Murmur-inspired hashing. + let a = u.xor(v).mul(mul); + a = a.xor(a.shru(47)); + let b = v.xor(a).mul(mul); + b = b.xor(b.shru(47)); + b = b.mul(mul); + return b; + } + // Return a 16-byte hash for 48 bytes. Quick and dirty. + // Callers do best to use "random-looking" values for a and b. + function weakHashLen32WithSeeds(w, x, y, z, a, b) { + a = a.add(w); + b = rotate64(b.add(a).add(z), 21); + const c = a; + a = a.add(x); + a = a.add(y); + b = b.add(rotate64(a, 44)); + return [a.add(z), b.add(c)]; + } + function weakHashLen32WithSeedsStr(s, offset, a, b) { + return weakHashLen32WithSeeds(fetch64(s, offset), fetch64(s, offset + 8), fetch64(s, offset + 16), fetch64(s, offset + 24), a, b); + } + function hashLen0to16(s, len = s.length) { + if (len >= 8) { + const mul = k2.add(len * 2); + const a = fetch64(s, 0).add(k2); + const b = fetch64(s, len - 8); + const c = rotate64(b, 37).mul(mul).add(a); + const d = rotate64(a, 25).add(b).mul(mul); + return hashLen16(c, d, mul); + } + if (len >= 4) { + const mul = k2.add(len * 2); + const a = fetch32(s, 0); + return hashLen16(a.shl(3).add(len), fetch32(s, len - 4), mul); + } + if (len > 0) { + const a = s[0]; + const b = s[len >> 1]; + const c = s[len - 1]; + const y = a + (b << 8); + const z = len + (c << 2); + return shiftMix(k2.mul(y).xor(k0.mul(z))).mul(k2); + } + return k2; + } + function hashLen17to32(s, len = s.length) { + const mul = k2.add(len * 2); + const a = fetch64(s, 0).mul(k1); + const b = fetch64(s, 8); + const c = fetch64(s, len - 8).mul(mul); + const d = fetch64(s, len - 16).mul(k2); + return hashLen16(rotate64(a.add(b), 43).add(rotate64(c, 30)).add(d), a.add(rotate64(b.add(k2), 18)).add(c), mul); + } + function hashLen33to64(s, len = s.length) { + const mul = k2.add(len * 2); + const a = fetch64(s, 0).mul(k2); + const b = fetch64(s, 8); + const c = fetch64(s, len - 8).mul(mul); + const d = fetch64(s, len - 16).mul(k2); + const y = rotate64(a.add(b), 43).add(rotate64(c, 30)).add(d); + const z = hashLen16(y, a.add(rotate64(b.add(k2), 18)).add(c), mul); + const e = fetch64(s, 16).mul(mul); + const f = fetch64(s, 24); + const g = y.add(fetch64(s, len - 32)).mul(mul); + const h = z.add(fetch64(s, len - 24)).mul(mul); + return hashLen16(rotate64(e.add(f), 43).add(rotate64(g, 30)).add(h), e.add(rotate64(f.add(a), 18)).add(g), mul); + } + function fingerPrint64(s, len = s.length) { + const seed = Long.fromNumber(81, true); + if (len <= 32) { + if (len <= 16) { + return hashLen0to16(s, len); + } + else { + return hashLen17to32(s, len); + } + } + else if (len <= 64) { + return hashLen33to64(s, len); + } + // For strings over 64 bytes we loop. Internal state consists of + // 56 bytes: v, w, x, y, and z. + let x = seed; + let y = seed.mul(k1).add(113); + let z = shiftMix(y.mul(k2).add(113)).mul(k2); + let v = [Long.UZERO, Long.UZERO]; + let w = [Long.UZERO, Long.UZERO]; + x = x.mul(k2).add(fetch64(s, 0)); + let offset = 0; + // Set end so that after the loop we have 1 to 64 bytes left to process. + const end = ((len - 1) >> 6) * 64; + const last64 = end + ((len - 1) & 63) - 63; + do { + x = rotate64(x.add(y).add(v[0]).add(fetch64(s, offset + 8)), 37).mul(k1); + y = rotate64(y.add(v[1]).add(fetch64(s, offset + 48)), 42).mul(k1); + x = x.xor(w[1]); + y = y.add(v[0]).add(fetch64(s, offset + 40)); + z = rotate64(z.add(w[0]), 33).mul(k1); + v = weakHashLen32WithSeedsStr(s, offset, v[1].mul(k1), x.add(w[0])); + w = weakHashLen32WithSeedsStr(s, offset + 32, z.add(w[1]), y.add(fetch64(s, offset + 16))); + [z, x] = [x, z]; + offset += 64; + } while (offset !== end); + const mul = k1.add(z.and(0xff).shl(1)); + // Point to the last 64 bytes of input. + offset = last64; + w[0] = w[0].add((len - 1) & 63); + v[0] = v[0].add(w[0]); + w[0] = w[0].add(v[0]); + x = rotate64(x.add(y).add(v[0]).add(fetch64(s, offset + 8)), 37).mul(mul); + y = rotate64(y.add(v[1]).add(fetch64(s, offset + 48)), 42).mul(mul); + x = x.xor(w[1].mul(9)); + y = y.add(v[0].mul(9).add(fetch64(s, offset + 40))); + z = rotate64(z.add(w[0]), 33).mul(mul); + v = weakHashLen32WithSeedsStr(s, offset, v[1].mul(mul), x.add(w[0])); + w = weakHashLen32WithSeedsStr(s, offset + 32, z.add(w[1]), y.add(fetch64(s, offset + 16))); + [z, x] = [x, z]; + return hashLen16(hashLen16(v[0], w[0], mul).add(shiftMix(y).mul(k0)).add(z), hashLen16(v[1], w[1], mul).add(x), mul); + } + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Create typed array for scalar value. Used for storing in `DataStorage`. + */ + function createScalarValue(value, dtype) { + if (dtype === 'string') { + return encodeString(value); + } + return toTypedArray([value], dtype); + } + function noConversionNeeded(a, dtype) { + return (a instanceof Float32Array && dtype === 'float32') || + (a instanceof Int32Array && dtype === 'int32') || + (a instanceof Uint8Array && dtype === 'bool'); + } + function toTypedArray(a, dtype) { + if (dtype === 'string') { + throw new Error('Cannot convert a string[] to a TypedArray'); + } + if (Array.isArray(a)) { + a = flatten$2(a); + } + if (env().getBool('DEBUG')) { + checkConversionForErrors(a, dtype); + } + if (noConversionNeeded(a, dtype)) { + return a; + } + if (dtype == null || dtype === 'float32' || dtype === 'complex64') { + return new Float32Array(a); + } + else if (dtype === 'int32') { + return new Int32Array(a); + } + else if (dtype === 'bool') { + const bool = new Uint8Array(a.length); + for (let i = 0; i < bool.length; ++i) { + if (Math.round(a[i]) !== 0) { + bool[i] = 1; + } + } + return bool; + } + else { + throw new Error(`Unknown data type ${dtype}`); + } + } + /** + * Returns the current high-resolution time in milliseconds relative to an + * arbitrary time in the past. It works across different platforms (node.js, + * browsers). + * + * ```js + * console.log(tf.util.now()); + * ``` + * + * @doc {heading: 'Util', namespace: 'util'} + */ + function now() { + return env().platform.now(); + } + /** + * Returns a platform-specific implementation of + * [`fetch`](https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API). + * + * If `fetch` is defined on the global object (`window`, `process`, etc.), + * `tf.util.fetch` returns that function. + * + * If not, `tf.util.fetch` returns a platform-specific solution. + * + * ```js + * const resource = await tf.util.fetch('https://cdn.jsdelivr.net/npm/@tensorflow/tfjs'); + * // handle response + * ``` + * + * @doc {heading: 'Util'} + */ + function fetch$1(path, requestInits) { + return env().platform.fetch(path, requestInits); + } + /** + * Encodes the provided string into bytes using the provided encoding scheme. + * + * @param s The string to encode. + * @param encoding The encoding scheme. Defaults to utf-8. + * + * @doc {heading: 'Util'} + */ + function encodeString(s, encoding = 'utf-8') { + encoding = encoding || 'utf-8'; + return env().platform.encode(s, encoding); + } + /** + * Decodes the provided bytes into a string using the provided encoding scheme. + * @param bytes The bytes to decode. + * + * @param encoding The encoding scheme. Defaults to utf-8. + * + * @doc {heading: 'Util'} + */ + function decodeString(bytes, encoding = 'utf-8') { + encoding = encoding || 'utf-8'; + return env().platform.decode(bytes, encoding); + } + function isTypedArray(a) { + // TODO(mattsoulanille): Remove this fallback in 5.0.0 + if (env().platform.isTypedArray != null) { + return env().platform.isTypedArray(a); + } + else { + return isTypedArrayBrowser(a); + } + } + // NOTE: We explicitly type out what T extends instead of any so that + // util.flatten on a nested array of number doesn't try to infer T as a + // number[][], causing us to explicitly type util.flatten(). + /** + * Flattens an arbitrarily nested array. + * + * ```js + * const a = [[1, 2], [3, 4], [5, [6, [7]]]]; + * const flat = tf.util.flatten(a); + * console.log(flat); + * ``` + * + * @param arr The nested array to flatten. + * @param result The destination array which holds the elements. + * @param skipTypedArray If true, avoids flattening the typed arrays. Defaults + * to false. + * + * @doc {heading: 'Util', namespace: 'util'} + */ + function flatten$2(arr, result = [], skipTypedArray = false) { + if (result == null) { + result = []; + } + if (typeof arr === 'boolean' || typeof arr === 'number' || + typeof arr === 'string' || isPromise(arr) || arr == null || + isTypedArray(arr) && skipTypedArray) { + result.push(arr); + } + else if (Array.isArray(arr) || isTypedArray(arr)) { + for (let i = 0; i < arr.length; ++i) { + flatten$2(arr[i], result, skipTypedArray); + } + } + else { + let maxIndex = -1; + for (const key of Object.keys(arr)) { + // 0 or positive integer. + if (/^([1-9]+[0-9]*|0)$/.test(key)) { + maxIndex = Math.max(maxIndex, Number(key)); + } + } + for (let i = 0; i <= maxIndex; i++) { + // tslint:disable-next-line: no-unnecessary-type-assertion + flatten$2(arr[i], result, skipTypedArray); + } + } + return result; + } + + var util = /*#__PURE__*/Object.freeze({ + __proto__: null, + arraysEqual: arraysEqual, + arraysEqualWithNull: arraysEqualWithNull, + assert: assert$1, + assertNonNegativeIntegerDimensions: assertNonNegativeIntegerDimensions, + assertNonNull: assertNonNull, + assertShapesMatch: assertShapesMatch, + bytesFromStringArray: bytesFromStringArray, + bytesPerElement: bytesPerElement, + checkConversionForErrors: checkConversionForErrors, + clamp: clamp, + computeStrides: computeStrides, + convertBackendValuesAndArrayBuffer: convertBackendValuesAndArrayBuffer, + createScalarValue: createScalarValue, + createShuffledIndices: createShuffledIndices, + decodeString: decodeString, + distSquared: distSquared, + encodeString: encodeString, + fetch: fetch$1, + fingerPrint64: fingerPrint64, + flatten: flatten$2, + getArrayFromDType: getArrayFromDType, + getTypedArrayFromDType: getTypedArrayFromDType, + hasEncodingLoss: hasEncodingLoss, + hexToLong: hexToLong, + indexToLoc: indexToLoc, + inferDtype: inferDtype, + inferFromImplicitShape: inferFromImplicitShape, + isBoolean: isBoolean, + isFunction: isFunction, + isInt: isInt, + isNumber: isNumber, + isPromise: isPromise, + isScalarShape: isScalarShape, + isString: isString, + isTypedArray: isTypedArray, + isValidDtype: isValidDtype, + locToIndex: locToIndex, + makeOnesTypedArray: makeOnesTypedArray, + makeZerosNestedTypedArray: makeZerosNestedTypedArray, + makeZerosTypedArray: makeZerosTypedArray, + nearestDivisor: nearestDivisor, + nearestLargerEven: nearestLargerEven, + now: now, + parseAxisParam: parseAxisParam, + randUniform: randUniform, + repeatedTry: repeatedTry, + rightPad: rightPad, + shuffle: shuffle, + shuffleCombo: shuffleCombo, + sizeFromShape: sizeFromShape, + sizeToSquarishShape: sizeToSquarishShape, + squeezeShape: squeezeShape, + sum: sum$4, + swap: swap, + tanh: tanh$3, + toNestedArray: toNestedArray, + toTypedArray: toTypedArray + }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class Profiler { + constructor(backendTimer, logger) { + this.backendTimer = backendTimer; + this.logger = logger; + if (logger == null) { + this.logger = new Logger(); + } + } + profileKernel(kernelName, inputs, f) { + let outputs; + const holdResultWrapperFn = () => { + outputs = f(); + }; + let timer; + const start = now(); + if (this.backendTimer.timerAvailable()) { + timer = this.backendTimer.time(holdResultWrapperFn); + } + else { + holdResultWrapperFn(); + for (const output of outputs) { + output.dataSync(); + } + timer = Promise.resolve({ kernelMs: now() - start }); + } + if (env().getBool('CHECK_COMPUTATION_FOR_ERRORS')) { + for (let i = 0; i < outputs.length; i++) { + const output = outputs[i]; + // Dangling promise here because we don't want to propagate up + // asynchronicity. + output.data().then(tensorVals => { + checkComputationForErrors(tensorVals, output.dtype, kernelName); + }); + } + } + const kernelProfile = { + kernelName, + outputs, + inputs, + timeMs: timer.then(timing => timing.kernelMs), + extraInfo: timer.then(timing => timing.getExtraProfileInfo != null ? + timing.getExtraProfileInfo() : + '') + }; + return kernelProfile; + } + logKernelProfile(kernelProfile) { + const { kernelName, outputs, timeMs, inputs, extraInfo } = kernelProfile; + outputs.forEach(result => { + Promise.all([result.data(), timeMs, extraInfo]).then(valueContainer => { + this.logger.logKernelProfile(kernelName, result, valueContainer[0], valueContainer[1], inputs, valueContainer[2]); + }); + }); + } + } + function checkComputationForErrors(vals, dtype, kernelName) { + if (dtype !== 'float32') { + // Only floating point computations will generate NaN values + return false; + } + for (let i = 0; i < vals.length; i++) { + const num = vals[i]; + if (isNaN(num) || !isFinite(num)) { + // Throwing custom exception so behavior is testable. + console.warn(`Found ${num} in the result of '${kernelName}'`); + return true; + } + } + return false; + } + class Logger { + logKernelProfile(name, result, vals, timeMs, inputs, extraInfo) { + const time = typeof timeMs === 'number' ? rightPad(`${timeMs}ms`, 9) : + timeMs['error']; + const paddedName = rightPad(name, 25); + const rank = result.rank; + const size = result.size; + const shape = rightPad(result.shape.toString(), 14); + let inputShapesDescription = ''; + for (const name in inputs) { + const input = inputs[name]; + if (input != null) { + // The input might be a non-tensor (e.g HTMLImageElement), in which case + // we claim the output shape as input shape. + const inputShape = input.shape || result.shape; + const inputRank = inputShape.length; + inputShapesDescription += + `${name}: ${inputRank}D ${inputRank > 0 ? inputShape : ''} `; + } + } + console.log(`%c${paddedName}\t%c${time}\t%c${rank}D ${shape}\t%c${size}\t%c${inputShapesDescription}\t%c${extraInfo}`, 'font-weight:bold', 'color:red', 'color:blue', 'color: orange', 'color: green', 'color: steelblue'); + } + } + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes a list of TapeNodes that connect x to y, filtering everything else + * out and preserving the order of the original tape elements. + * + * @param tape The tape elements to filter. + * @param xs The input Tensors. + * @param y The output Tensor. + */ + function getFilteredNodesXToY(tape, xs, y) { + // Forward pass to compute all the nodes and Tensors that are transitively a + // function of x. + const tensorsFromX = {}; + const nodesFromX = {}; + for (let i = 0; i < xs.length; i++) { + tensorsFromX[xs[i].id] = true; + } + for (let i = 0; i < tape.length; i++) { + const node = tape[i]; + const nodeInputs = node.inputs; + for (const inputName in nodeInputs) { + const input = nodeInputs[inputName]; + let anyInputFromX = false; + for (let j = 0; j < xs.length; j++) { + if (tensorsFromX[input.id]) { + node.outputs.forEach(output => tensorsFromX[output.id] = true); + anyInputFromX = true; + nodesFromX[node.id] = true; + break; + } + } + if (anyInputFromX) { + break; + } + } + } + // Backward pass to find all of the nodes and Tensors that lead to y. + const tensorsLeadToY = {}; + tensorsLeadToY[y.id] = true; + const nodesToY = {}; + for (let i = tape.length - 1; i >= 0; i--) { + const node = tape[i]; + const nodeInputs = node.inputs; + // If any of the outputs lead to y, mark all of the inputs as leading to y. + for (let j = 0; j < node.outputs.length; j++) { + if (tensorsLeadToY[node.outputs[j].id]) { + for (const inputName in nodeInputs) { + tensorsLeadToY[nodeInputs[inputName].id] = true; + nodesToY[node.id] = true; + } + break; + } + } + } + // Return the paths that come from x and lead to y. + const filteredTape = []; + for (let i = 0; i < tape.length; i++) { + const node = tape[i]; + if (nodesFromX[node.id] && nodesToY[node.id]) { + // Prune the inputs from the node that aren't a function of x. + const prunedInputs = {}; + for (const inputName in node.inputs) { + const nodeInput = node.inputs[inputName]; + if (tensorsFromX[nodeInput.id]) { + prunedInputs[inputName] = nodeInput; + } + } + // Copy the node and overwrite inputsAndArgs to the pruned version. + const prunedNode = Object.assign({}, node); + prunedNode.inputs = prunedInputs; + prunedNode.outputs = node.outputs; + filteredTape.push(prunedNode); + } + } + return filteredTape; + } + /** + * Backpropagate gradients through the filtered TapeNodes. + * + * @param tensorAccumulatedGradientMap A map of Tensor to its gradient. This map + * is mutated by this method. + * @param filteredTape The filtered TapeNodes to backprop through. + */ + function backpropagateGradients(tensorAccumulatedGradientMap, filteredTape, tidy, add) { + // Walk the tape backward and keep a map of Tensor to its gradient. + for (let i = filteredTape.length - 1; i >= 0; i--) { + const node = filteredTape[i]; + const dys = []; + node.outputs.forEach(o => { + const gradTensor = tensorAccumulatedGradientMap[o.id]; + if (gradTensor != null) { + dys.push(gradTensor); + } + else { + // This particular output is not in the back-propagation subgraph, so it + // does not affect the final output, thus we put null for its dy. + dys.push(null); + } + }); + if (node.gradient == null) { + throw new Error(`Cannot compute gradient: gradient function not found ` + + `for ${node.kernelName}.`); + } + // Backprop dy through this node and accumulate gradients over the inputs. + const inputGradients = node.gradient(dys); + for (const inputName in node.inputs) { + if (!(inputName in inputGradients)) { + throw new Error(`Cannot backprop through input ${inputName}. ` + + `Available gradients found: ${Object.keys(inputGradients)}.`); + } + // Call the gradient function. + const dx = tidy(() => inputGradients[inputName]()); + if (dx.dtype !== 'float32') { + throw new Error(`Error in gradient for op ${node.kernelName}. The gradient of input ` + + `${inputName} must have 'float32' dtype, but has '${dx.dtype}'`); + } + const x = node.inputs[inputName]; + if (!arraysEqual(dx.shape, x.shape)) { + throw new Error(`Error in gradient for op ${node.kernelName}. The gradient of input ` + + `'${inputName}' has shape '${dx.shape}', which does not match ` + + `the shape of the input '${x.shape}'`); + } + if (tensorAccumulatedGradientMap[x.id] == null) { + tensorAccumulatedGradientMap[x.id] = dx; + } + else { + const curGradient = tensorAccumulatedGradientMap[x.id]; + tensorAccumulatedGradientMap[x.id] = add(curGradient, dx); + curGradient.dispose(); + } + } + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + // Maximum number of values before we decide to show ellipsis. + const FORMAT_LIMIT_NUM_VALS = 20; + // Number of first and last values to show when displaying a, b,...,y, z. + const FORMAT_NUM_FIRST_LAST_VALS = 3; + // Number of significant digits to show. + const FORMAT_NUM_SIG_DIGITS = 7; + function tensorToString(vals, shape, dtype, verbose) { + const strides = computeStrides(shape); + const padPerCol = computeMaxSizePerColumn(vals, shape, dtype, strides); + const rank = shape.length; + const valsLines = subTensorToString(vals, shape, dtype, strides, padPerCol); + const lines = ['Tensor']; + if (verbose) { + lines.push(` dtype: ${dtype}`); + lines.push(` rank: ${rank}`); + lines.push(` shape: [${shape}]`); + lines.push(` values:`); + } + lines.push(valsLines.map(l => ' ' + l).join('\n')); + return lines.join('\n'); + } + function computeMaxSizePerColumn(vals, shape, dtype, strides) { + const n = sizeFromShape(shape); + const numCols = strides[strides.length - 1]; + const padPerCol = new Array(numCols).fill(0); + const rank = shape.length; + const valuesOrTuples = dtype === 'complex64' ? createComplexTuples(vals) : vals; + if (rank > 1) { + for (let row = 0; row < n / numCols; row++) { + const offset = row * numCols; + for (let j = 0; j < numCols; j++) { + padPerCol[j] = Math.max(padPerCol[j], valToString(valuesOrTuples[offset + j], 0, dtype).length); + } + } + } + return padPerCol; + } + function valToString(val, pad, dtype) { + let valStr; + if (Array.isArray(val)) { + valStr = `${parseFloat(val[0].toFixed(FORMAT_NUM_SIG_DIGITS))} + ` + + `${parseFloat(val[1].toFixed(FORMAT_NUM_SIG_DIGITS))}j`; + } + else if (isString(val)) { + valStr = `'${val}'`; + } + else if (dtype === 'bool') { + valStr = boolNumToString(val); + } + else { + valStr = parseFloat(val.toFixed(FORMAT_NUM_SIG_DIGITS)).toString(); + } + return rightPad(valStr, pad); + } + function boolNumToString(v) { + return v === 0 ? 'false' : 'true'; + } + function subTensorToString(vals, shape, dtype, strides, padPerCol, isLast = true) { + const storagePerElement = dtype === 'complex64' ? 2 : 1; + const size = shape[0]; + const rank = shape.length; + if (rank === 0) { + if (dtype === 'complex64') { + const complexTuple = createComplexTuples(vals); + return [valToString(complexTuple[0], 0, dtype)]; + } + if (dtype === 'bool') { + return [boolNumToString(vals[0])]; + } + return [vals[0].toString()]; + } + if (rank === 1) { + if (size > FORMAT_LIMIT_NUM_VALS) { + const firstValsSize = FORMAT_NUM_FIRST_LAST_VALS * storagePerElement; + let firstVals = Array.from(vals.slice(0, firstValsSize)); + let lastVals = Array.from(vals.slice((size - FORMAT_NUM_FIRST_LAST_VALS) * storagePerElement, size * storagePerElement)); + if (dtype === 'complex64') { + firstVals = createComplexTuples(firstVals); + lastVals = createComplexTuples(lastVals); + } + return [ + '[' + + firstVals.map((x, i) => valToString(x, padPerCol[i], dtype)) + .join(', ') + + ', ..., ' + + lastVals + .map((x, i) => valToString(x, padPerCol[size - FORMAT_NUM_FIRST_LAST_VALS + i], dtype)) + .join(', ') + + ']' + ]; + } + const displayVals = dtype === 'complex64' ? createComplexTuples(vals) : + Array.from(vals); + return [ + '[' + + displayVals.map((x, i) => valToString(x, padPerCol[i], dtype)) + .join(', ') + + ']' + ]; + } + // The array is rank 2 or more. + const subshape = shape.slice(1); + const substrides = strides.slice(1); + const stride = strides[0] * storagePerElement; + const lines = []; + if (size > FORMAT_LIMIT_NUM_VALS) { + for (let i = 0; i < FORMAT_NUM_FIRST_LAST_VALS; i++) { + const start = i * stride; + const end = start + stride; + lines.push(...subTensorToString(vals.slice(start, end), subshape, dtype, substrides, padPerCol, false /* isLast */)); + } + lines.push('...'); + for (let i = size - FORMAT_NUM_FIRST_LAST_VALS; i < size; i++) { + const start = i * stride; + const end = start + stride; + lines.push(...subTensorToString(vals.slice(start, end), subshape, dtype, substrides, padPerCol, i === size - 1 /* isLast */)); + } + } + else { + for (let i = 0; i < size; i++) { + const start = i * stride; + const end = start + stride; + lines.push(...subTensorToString(vals.slice(start, end), subshape, dtype, substrides, padPerCol, i === size - 1 /* isLast */)); + } + } + const sep = rank === 2 ? ',' : ''; + lines[0] = '[' + (size > 0 ? lines[0] + sep : ''); + for (let i = 1; i < lines.length - 1; i++) { + lines[i] = ' ' + lines[i] + sep; + } + let newLineSep = ',\n'; + for (let i = 2; i < rank; i++) { + newLineSep += '\n'; + } + lines[lines.length - 1] = + ' ' + lines[lines.length - 1] + ']' + (isLast ? '' : newLineSep); + return lines; + } + function createComplexTuples(vals) { + const complexTuples = []; + for (let i = 0; i < vals.length; i += 2) { + complexTuples.push([vals[i], vals[i + 1]]); + } + return complexTuples; + } + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * A mutable object, similar to `tf.Tensor`, that allows users to set values + * at locations before converting to an immutable `tf.Tensor`. + * + * See `tf.buffer` for creating a tensor buffer. + * + * @doc {heading: 'Tensors', subheading: 'Classes'} + */ + class TensorBuffer { + constructor(shape, dtype, values) { + this.dtype = dtype; + this.shape = shape.slice(); + this.size = sizeFromShape(shape); + if (values != null) { + const n = values.length; + assert$1(n === this.size, () => `Length of values '${n}' does not match the size ` + + `inferred by the shape '${this.size}'.`); + } + if (dtype === 'complex64') { + throw new Error(`complex64 dtype TensorBuffers are not supported. Please create ` + + `a TensorBuffer for the real and imaginary parts separately and ` + + `call tf.complex(real, imag).`); + } + this.values = values || getArrayFromDType(dtype, this.size); + this.strides = computeStrides(shape); + } + /** + * Sets a value in the buffer at a given location. + * + * @param value The value to set. + * @param locs The location indices. + * + * @doc {heading: 'Tensors', subheading: 'Creation'} + */ + set(value, ...locs) { + if (locs.length === 0) { + locs = [0]; + } + assert$1(locs.length === this.rank, () => `The number of provided coordinates (${locs.length}) must ` + + `match the rank (${this.rank})`); + const index = this.locToIndex(locs); + this.values[index] = value; + } + /** + * Returns the value in the buffer at the provided location. + * + * @param locs The location indices. + * + * @doc {heading: 'Tensors', subheading: 'Creation'} + */ + get(...locs) { + if (locs.length === 0) { + locs = [0]; + } + let i = 0; + for (const loc of locs) { + if (loc < 0 || loc >= this.shape[i]) { + const msg = `Requested out of range element at ${locs}. ` + + ` Buffer shape=${this.shape}`; + throw new Error(msg); + } + i++; + } + let index = locs[locs.length - 1]; + for (let i = 0; i < locs.length - 1; ++i) { + index += this.strides[i] * locs[i]; + } + return this.values[index]; + } + locToIndex(locs) { + if (this.rank === 0) { + return 0; + } + else if (this.rank === 1) { + return locs[0]; + } + let index = locs[locs.length - 1]; + for (let i = 0; i < locs.length - 1; ++i) { + index += this.strides[i] * locs[i]; + } + return index; + } + indexToLoc(index) { + if (this.rank === 0) { + return []; + } + else if (this.rank === 1) { + return [index]; + } + const locs = new Array(this.shape.length); + for (let i = 0; i < locs.length - 1; ++i) { + locs[i] = Math.floor(index / this.strides[i]); + index -= locs[i] * this.strides[i]; + } + locs[locs.length - 1] = index; + return locs; + } + get rank() { + return this.shape.length; + } + /** + * Creates an immutable `tf.Tensor` object from the buffer. + * + * @doc {heading: 'Tensors', subheading: 'Creation'} + */ + toTensor() { + return trackerFn().makeTensor(this.values, this.shape, this.dtype); + } + } + // For tracking tensor creation and disposal. + let trackerFn = null; + // Used by chaining methods to call into ops. + let opHandler$1 = null; + // Used to warn about deprecated methods. + let deprecationWarningFn = null; + // This here so that we can use this method on dev branches and keep the + // functionality at master. + // tslint:disable-next-line:no-unused-expression + [deprecationWarningFn]; + /** + * An external consumer can register itself as the tensor tracker. This way + * the Tensor class can notify the tracker for every tensor created and + * disposed. + */ + function setTensorTracker(fn) { + trackerFn = fn; + } + /** + * An external consumer can register itself as the op handler. This way the + * Tensor class can have chaining methods that call into ops via the op + * handler. + */ + function setOpHandler(handler) { + opHandler$1 = handler; + } + /** + * Sets the deprecation warning function to be used by this file. This way the + * Tensor class can be a leaf but still use the environment. + */ + function setDeprecationWarningFn(fn) { + deprecationWarningFn = fn; + } + /** + * A `tf.Tensor` object represents an immutable, multidimensional array of + * numbers that has a shape and a data type. + * + * For performance reasons, functions that create tensors do not necessarily + * perform a copy of the data passed to them (e.g. if the data is passed as a + * `Float32Array`), and changes to the data will change the tensor. This is not + * a feature and is not supported. To avoid this behavior, use the tensor before + * changing the input data or create a copy with `copy = tf.add(yourTensor, 0)`. + * + * See `tf.tensor` for details on how to create a `tf.Tensor`. + * + * @doc {heading: 'Tensors', subheading: 'Classes'} + */ + class Tensor { + constructor(shape, dtype, dataId, id) { + /** Whether this tensor has been globally kept. */ + this.kept = false; + this.isDisposedInternal = false; + this.shape = shape.slice(); + this.dtype = dtype || 'float32'; + this.size = sizeFromShape(shape); + this.strides = computeStrides(shape); + this.dataId = dataId; + this.id = id; + this.rankType = (this.rank < 5 ? this.rank.toString() : 'higher'); + } + get rank() { + return this.shape.length; + } + /** + * Returns a promise of `tf.TensorBuffer` that holds the underlying data. + * + * @doc {heading: 'Tensors', subheading: 'Classes'} + */ + async buffer() { + const vals = await this.data(); + return opHandler$1.buffer(this.shape, this.dtype, vals); + } + /** + * Returns a `tf.TensorBuffer` that holds the underlying data. + * @doc {heading: 'Tensors', subheading: 'Classes'} + */ + bufferSync() { + return opHandler$1.buffer(this.shape, this.dtype, this.dataSync()); + } + /** + * Returns the tensor data as a nested array. The transfer of data is done + * asynchronously. + * + * @doc {heading: 'Tensors', subheading: 'Classes'} + */ + async array() { + const vals = await this.data(); + return toNestedArray(this.shape, vals, this.dtype === 'complex64'); + } + /** + * Returns the tensor data as a nested array. The transfer of data is done + * synchronously. + * + * @doc {heading: 'Tensors', subheading: 'Classes'} + */ + arraySync() { + return toNestedArray(this.shape, this.dataSync(), this.dtype === 'complex64'); + } + /** + * Asynchronously downloads the values from the `tf.Tensor`. Returns a + * promise of `TypedArray` that resolves when the computation has finished. + * + * @doc {heading: 'Tensors', subheading: 'Classes'} + */ + async data() { + this.throwIfDisposed(); + const data = trackerFn().read(this.dataId); + if (this.dtype === 'string') { + const bytes = await data; + try { + return bytes.map(b => decodeString(b)); + } + catch (_a) { + throw new Error('Failed to decode the string bytes into utf-8. ' + + 'To get the original bytes, call tensor.bytes().'); + } + } + return data; + } + /** + * Copy the tensor's data to a new GPU resource. Comparing to the `dataSync()` + * and `data()`, this method prevents data from being downloaded to CPU. + * + * For WebGL backend, the data will be stored on a densely packed texture. + * This means that the texture will use the RGBA channels to store value. + * + * For WebGPU backend, the data will be stored on a buffer. There is no + * parameter, so can not use a user-defined size to create the buffer. + * + * @param options: + * For WebGL, + * - customTexShape: Optional. If set, will use the user defined + * texture shape to create the texture. + * + * @returns For WebGL backend, a GPUData contains the new texture and + * its information. + * { + * tensorRef: The tensor that is associated with this texture, + * texture: WebGLTexture, + * texShape: [number, number] // [height, width] + * } + * + * For WebGPU backend, a GPUData contains the new buffer. + * { + * tensorRef: The tensor that is associated with this buffer, + * buffer: GPUBuffer, + * } + * + * Remember to dispose the GPUData after it is used by + * `res.tensorRef.dispose()`. + * + * @doc {heading: 'Tensors', subheading: 'Classes'} + */ + dataToGPU(options) { + this.throwIfDisposed(); + return trackerFn().readToGPU(this.dataId, options); + } + /** + * Synchronously downloads the values from the `tf.Tensor`. This blocks the + * UI thread until the values are ready, which can cause performance issues. + * + * @doc {heading: 'Tensors', subheading: 'Classes'} + */ + dataSync() { + this.throwIfDisposed(); + const data = trackerFn().readSync(this.dataId); + if (this.dtype === 'string') { + try { + return data.map(b => decodeString(b)); + } + catch (_a) { + throw new Error('Failed to decode the string bytes into utf-8. ' + + 'To get the original bytes, call tensor.bytes().'); + } + } + return data; + } + /** Returns the underlying bytes of the tensor's data. */ + async bytes() { + this.throwIfDisposed(); + const data = await trackerFn().read(this.dataId); + if (this.dtype === 'string') { + return data; + } + else { + return new Uint8Array(data.buffer); + } + } + /** + * Disposes `tf.Tensor` from memory. + * + * @doc {heading: 'Tensors', subheading: 'Classes'} + */ + dispose() { + if (this.isDisposed) { + return; + } + if (this.kerasMask) { + this.kerasMask.dispose(); + } + trackerFn().disposeTensor(this); + this.isDisposedInternal = true; + } + get isDisposed() { + return this.isDisposedInternal; + } + throwIfDisposed() { + if (this.isDisposed) { + throw new Error(`Tensor is disposed.`); + } + } + /** + * Prints the `tf.Tensor`. See `tf.print` for details. + * + * @param verbose Whether to print verbose information about the tensor, + * including dtype and size. + * + * @doc {heading: 'Tensors', subheading: 'Classes'} + */ + print(verbose = false) { + return opHandler$1.print(this, verbose); + } + /** + * Returns a copy of the tensor. See `tf.clone` for details. + * @doc {heading: 'Tensors', subheading: 'Classes'} + */ + clone() { + this.throwIfDisposed(); + return opHandler$1.clone(this); + } + /** + * Returns a human-readable description of the tensor. Useful for logging. + * + * @doc {heading: 'Tensors', subheading: 'Classes'} + */ + toString(verbose = false) { + const vals = this.dataSync(); + return tensorToString(vals, this.shape, this.dtype, verbose); + } + cast(dtype) { + this.throwIfDisposed(); + return opHandler$1.cast(this, dtype); + } + variable(trainable = true, name, dtype) { + this.throwIfDisposed(); + return trackerFn().makeVariable(this, trainable, name, dtype); + } + } + Object.defineProperty(Tensor, Symbol.hasInstance, { + value: (instance) => { + // Implementation note: we should use properties of the object that will be + // defined before the constructor body has finished executing (methods). + // This is because when this code is transpiled by babel, babel will call + // classCallCheck before the constructor body is run. + // See https://github.com/tensorflow/tfjs/issues/3384 for backstory. + return !!instance && instance.data != null && instance.dataSync != null && + instance.throwIfDisposed != null; + } + }); + function getGlobalTensorClass() { + // Use getGlobal so that we can augment the Tensor class across package + // boundaries because the node resolution alg may result in different modules + // being returned for this file depending on the path they are loaded from. + return getGlobal('Tensor', () => { + return Tensor; + }); + } + // Global side effect. Cache global reference to Tensor class + getGlobalTensorClass(); + /** + * A mutable `tf.Tensor`, useful for persisting state, e.g. for training. + * + * @doc {heading: 'Tensors', subheading: 'Classes'} + */ + class Variable extends Tensor { + constructor(initialValue, trainable, name, tensorId) { + super(initialValue.shape, initialValue.dtype, initialValue.dataId, tensorId); + this.trainable = trainable; + this.name = name; + } + /** + * Assign a new `tf.Tensor` to this variable. The new `tf.Tensor` must have + * the same shape and dtype as the old `tf.Tensor`. + * + * @param newValue New tensor to be assigned to this variable. + * + * @doc {heading: 'Tensors', subheading: 'Classes'} + */ + assign(newValue) { + if (newValue.dtype !== this.dtype) { + throw new Error(`dtype of the new value (${newValue.dtype}) and ` + + `previous value (${this.dtype}) must match`); + } + if (!arraysEqual(newValue.shape, this.shape)) { + throw new Error(`shape of the new value (${newValue.shape}) and ` + + `previous value (${this.shape}) must match`); + } + trackerFn().disposeTensor(this); + this.dataId = newValue.dataId; + trackerFn().incRef(this, null /* backend */); + } + dispose() { + trackerFn().disposeVariable(this); + this.isDisposedInternal = true; + } + } + Object.defineProperty(Variable, Symbol.hasInstance, { + value: (instance) => { + return instance instanceof Tensor && instance.assign != null && + instance.assign instanceof Function; + } + }); + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + exports.Rank = void 0; + (function (Rank) { + Rank["R0"] = "R0"; + Rank["R1"] = "R1"; + Rank["R2"] = "R2"; + Rank["R3"] = "R3"; + Rank["R4"] = "R4"; + Rank["R5"] = "R5"; + Rank["R6"] = "R6"; + })(exports.Rank || (exports.Rank = {})); + // Looks for upcasting types. Used, for example, in operations with mixed dtype + // inputs. + var UpcastInt32AndMap; + (function (UpcastInt32AndMap) { + UpcastInt32AndMap["float32"] = "float32"; + UpcastInt32AndMap["int32"] = "int32"; + UpcastInt32AndMap["bool"] = "int32"; + UpcastInt32AndMap["complex64"] = "complex64"; + })(UpcastInt32AndMap || (UpcastInt32AndMap = {})); + var UpcastBoolAndMap; + (function (UpcastBoolAndMap) { + UpcastBoolAndMap["float32"] = "float32"; + UpcastBoolAndMap["int32"] = "int32"; + UpcastBoolAndMap["bool"] = "bool"; + UpcastBoolAndMap["complex64"] = "complex64"; + })(UpcastBoolAndMap || (UpcastBoolAndMap = {})); + var UpcastFloat32AndMap; + (function (UpcastFloat32AndMap) { + UpcastFloat32AndMap["float32"] = "float32"; + UpcastFloat32AndMap["int32"] = "float32"; + UpcastFloat32AndMap["bool"] = "float32"; + UpcastFloat32AndMap["complex64"] = "complex64"; + })(UpcastFloat32AndMap || (UpcastFloat32AndMap = {})); + var UpcastComplex64AndMap; + (function (UpcastComplex64AndMap) { + UpcastComplex64AndMap["float32"] = "complex64"; + UpcastComplex64AndMap["int32"] = "complex64"; + UpcastComplex64AndMap["bool"] = "complex64"; + UpcastComplex64AndMap["complex64"] = "complex64"; + })(UpcastComplex64AndMap || (UpcastComplex64AndMap = {})); + const upcastTypeMap = { + 'float32': UpcastFloat32AndMap, + 'int32': UpcastInt32AndMap, + 'bool': UpcastBoolAndMap, + 'complex64': UpcastComplex64AndMap + }; + function upcastType(typeA, typeB) { + if (typeA === 'string' || typeB === 'string') { + if (typeA === 'string' && typeB === 'string') { + return 'string'; + } + throw new Error(`Can not upcast ${typeA} with ${typeB}`); + } + return upcastTypeMap[typeA][typeB]; + } + /** Returns the output type after summation. */ + function sumOutType(type) { + return upcastType(type, 'int32'); + } + function isWebGLData(values) { + return values != null && typeof values === 'object' && 'texture' in values && + values.texture instanceof WebGLTexture; + } + function isWebGPUData(values) { + return typeof GPUBuffer !== 'undefined' && values != null && + typeof values === 'object' && 'buffer' in values && + values.buffer instanceof GPUBuffer; + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function makeTypesMatch(a, b) { + if (a.dtype === b.dtype) { + return [a, b]; + } + const dtype = upcastType(a.dtype, b.dtype); + return [a.cast(dtype), b.cast(dtype)]; + } + function assertTypesMatch(a, b) { + assert$1(a.dtype === b.dtype, () => `The dtypes of the first(${a.dtype}) and` + + ` second(${b.dtype}) input must match`); + } + function isTensorInList(tensor, tensorList) { + return tensorList.some(x => x.id === tensor.id); + } + /** + * Extracts any `Tensor`s found within the provided object. + * + * @param container an object that may be a `Tensor` or may directly contain + * `Tensor`s, such as a `Tensor[]` or `{key: Tensor, ...}`. In general it + * is safe to pass any object here, except that `Promise`s are not + * supported. + * @returns An array of `Tensors` found within the passed object. If the + * argument is simply a `Tensor', a list containing that `Tensor` is + * returned. If the object is not a `Tensor` or does not + * contain `Tensors`, an empty list is returned. + */ + function getTensorsInContainer(result) { + const list = []; + const seen = new Set(); + walkTensorContainer(result, list, seen); + return list; + } + function walkTensorContainer(container, list, seen) { + if (container == null) { + return; + } + if (container instanceof Tensor) { + list.push(container); + return; + } + if (!isIterable$1(container)) { + return; + } + // Iteration over keys works also for arrays. + const iterable = container; + for (const k in iterable) { + const val = iterable[k]; + if (!seen.has(val)) { + seen.add(val); + walkTensorContainer(val, list, seen); + } + } + } + // tslint:disable-next-line:no-any + function isIterable$1(obj) { + return Array.isArray(obj) || typeof obj === 'object'; + } + + var tensor_util = /*#__PURE__*/Object.freeze({ + __proto__: null, + assertTypesMatch: assertTypesMatch, + getTensorsInContainer: getTensorsInContainer, + isTensorInList: isTensorInList, + makeTypesMatch: makeTypesMatch + }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function isRegisteredKernelInvocation(kernelInvocation) { + return kernelInvocation.kernelName != null; + } + class EngineState { + constructor() { + // Public since optimizers will use it. + this.registeredVariables = {}; + this.nextTapeNodeId = 0; + this.numBytes = 0; + this.numTensors = 0; + this.numStringTensors = 0; + this.numDataBuffers = 0; + // Number of nested tf.grad() statements when computing higher-order + // gradients. E.g. `1` for first-order gradients and `2` for second-order + // gradients. Used to track if the tape should be removed after a backprop. + this.gradientDepth = 0; + // Number of nested kernel calls. When kernel depth is greater than 1, we turn + // off the tape. + this.kernelDepth = 0; + this.scopeStack = []; + /** + * Keeps track of the number of data moves during a kernel execution. We + * maintain a stack since kernels can call other kernels, recursively. + */ + this.numDataMovesStack = []; + this.nextScopeId = 0; + this.tensorInfo = new WeakMap(); + this.profiling = false; + this.activeProfile = { + newBytes: 0, + newTensors: 0, + peakBytes: 0, + kernels: [], + result: null, + get kernelNames() { + return Array.from(new Set(this.kernels.map(k => k.name))); + } + }; + } + dispose() { + for (const variableName in this.registeredVariables) { + this.registeredVariables[variableName].dispose(); + } + } + } + class Engine { + constructor(ENV) { + this.ENV = ENV; + this.registry = {}; + this.registryFactory = {}; + this.pendingBackendInitId = 0; + this.state = new EngineState(); + } + async ready() { + if (this.pendingBackendInit != null) { + return this.pendingBackendInit.then(() => { }); + } + if (this.backendInstance != null) { + return; + } + const sortedBackends = this.getSortedBackends(); + for (let i = 0; i < sortedBackends.length; i++) { + const backendName = sortedBackends[i]; + const success = await this.initializeBackend(backendName).success; + if (success) { + await this.setBackend(backendName); + return; + } + } + throw new Error(`Could not initialize any backends, all backend initializations ` + + `failed.`); + } + get backend() { + if (this.pendingBackendInit != null) { + throw new Error(`Backend '${this.backendName}' has not yet been initialized. Make ` + + `sure to await tf.ready() or await tf.setBackend() before calling ` + + `other methods`); + } + if (this.backendInstance == null) { + const { name, asyncInit } = this.initializeBackendsAndReturnBest(); + if (asyncInit) { + throw new Error(`The highest priority backend '${name}' has not yet been ` + + `initialized. Make sure to await tf.ready() or ` + + `await tf.setBackend() before calling other methods`); + } + this.setBackend(name); + } + return this.backendInstance; + } + backendNames() { + return Object.keys(this.registryFactory); + } + findBackend(backendName) { + if (!(backendName in this.registry)) { + // If the backend hasn't been initialized but we have a registry entry for + // it, initialize it and return it. + if (backendName in this.registryFactory) { + const { asyncInit } = this.initializeBackend(backendName); + if (asyncInit) { + // Backend is not ready yet. + return null; + } + } + else { + return null; + } + } + return this.registry[backendName]; + } + findBackendFactory(backendName) { + if (!(backendName in this.registryFactory)) { + return null; + } + return this.registryFactory[backendName].factory; + } + registerBackend(backendName, factory, priority = 1) { + if (backendName in this.registryFactory) { + warn(`${backendName} backend was already registered. ` + + `Reusing existing backend factory.`); + return false; + } + this.registryFactory[backendName] = { factory, priority }; + return true; + } + async setBackend(backendName) { + if (this.registryFactory[backendName] == null) { + throw new Error(`Backend name '${backendName}' not found in registry`); + } + this.backendName = backendName; + if (this.registry[backendName] == null) { + this.backendInstance = null; + const { success, asyncInit } = this.initializeBackend(backendName); + const result = asyncInit ? await success : success; + if (!result) { + return false; + } + } + this.backendInstance = this.registry[backendName]; + this.setupRegisteredKernels(); + // Reset the profiler. + this.profiler = new Profiler(this.backendInstance); + return true; + } + setupRegisteredKernels() { + const kernels = getKernelsForBackend(this.backendName); + kernels.forEach(kernel => { + if (kernel.setupFunc != null) { + kernel.setupFunc(this.backendInstance); + } + }); + } + disposeRegisteredKernels(backendName) { + const kernels = getKernelsForBackend(backendName); + kernels.forEach(kernel => { + if (kernel.disposeFunc != null) { + kernel.disposeFunc(this.registry[backendName]); + } + }); + } + /** + * Initializes a backend by looking up the backend name in the factory + * registry and calling the factory method. Returns a boolean representing + * whether the initialization of the backend succeeded. Throws an error if + * there is no backend in the factory registry. + */ + initializeBackend(backendName) { + const registryFactoryEntry = this.registryFactory[backendName]; + if (registryFactoryEntry == null) { + throw new Error(`Cannot initialize backend ${backendName}, no registration found.`); + } + try { + const backend = registryFactoryEntry.factory(); + /* Test if the factory returns a promise. + Done in a more liberal way than + previous 'Promise.resolve(backend)===backend' + as we needed to account for custom Promise + implementations (e.g. Angular) */ + if (backend && !(backend instanceof KernelBackend) && + typeof backend.then === 'function') { + const promiseId = ++this.pendingBackendInitId; + const success = backend + .then(backendInstance => { + // Outdated promise. Another backend was set in the meantime. + if (promiseId < this.pendingBackendInitId) { + return false; + } + this.registry[backendName] = backendInstance; + this.pendingBackendInit = null; + return true; + }) + .catch(err => { + // Outdated promise. Another backend was set in the meantime. + if (promiseId < this.pendingBackendInitId) { + return false; + } + this.pendingBackendInit = null; + warn(`Initialization of backend ${backendName} failed`); + warn(err.stack || err.message); + return false; + }); + this.pendingBackendInit = success; + return { success, asyncInit: true }; + } + else { + this.registry[backendName] = backend; + return { success: true, asyncInit: false }; + } + } + catch (err) { + warn(`Initialization of backend ${backendName} failed`); + warn(err.stack || err.message); + return { success: false, asyncInit: false }; + } + } + removeBackend(backendName) { + if (!(backendName in this.registryFactory)) { + throw new Error(`${backendName} backend not found in registry`); + } + if (this.backendName === backendName && this.pendingBackendInit != null) { + // There is a pending promise of the backend we want to remove. Make it + // obsolete. + this.pendingBackendInitId++; + } + if (backendName in this.registry) { + this.disposeRegisteredKernels(backendName); + this.registry[backendName].dispose(); + delete this.registry[backendName]; + } + delete this.registryFactory[backendName]; + // Unset the backend if it is active. + if (this.backendName === backendName) { + this.pendingBackendInit = null; + this.backendName = null; + this.backendInstance = null; + } + } + getSortedBackends() { + if (Object.keys(this.registryFactory).length === 0) { + throw new Error('No backend found in registry.'); + } + return Object.keys(this.registryFactory).sort((a, b) => { + // Highest priority comes first. + return this.registryFactory[b].priority - + this.registryFactory[a].priority; + }); + } + initializeBackendsAndReturnBest() { + const sortedBackends = this.getSortedBackends(); + for (let i = 0; i < sortedBackends.length; i++) { + const backendName = sortedBackends[i]; + const { success, asyncInit } = this.initializeBackend(backendName); + if (asyncInit || success) { + return { name: backendName, asyncInit }; + } + } + throw new Error(`Could not initialize any backends, all backend initializations ` + + `failed.`); + } + moveData(backend, dataId) { + const info = this.state.tensorInfo.get(dataId); + const srcBackend = info.backend; + const values = this.readSync(dataId); + const refCount = srcBackend.refCount(dataId); + // Delete the tensor from the old backend and move it to the new + // backend. + srcBackend.disposeData(dataId, true); + info.backend = backend; + backend.move(dataId, values, info.shape, info.dtype, refCount); + if (this.shouldCheckForMemLeaks()) { + // Track the number of moves during a kernel execution to correctly + // detect memory leaks. + this.state.numDataMovesStack[this.state.numDataMovesStack.length - 1]++; + } + } + tidy(nameOrFn, fn) { + let name = null; + if (fn == null) { + // Called with only 1 argument. + if (typeof nameOrFn !== 'function') { + throw new Error('Please provide a function to tidy()'); + } + fn = nameOrFn; + } + else { + // Called with 2 arguments. + if (typeof nameOrFn !== 'string' && !(nameOrFn instanceof String)) { + throw new Error('When calling with two arguments, the first argument ' + + 'to tidy() must be a string'); + } + if (typeof fn !== 'function') { + throw new Error('When calling with two arguments, the 2nd argument ' + + 'to tidy() must be a function'); + } + name = nameOrFn; + // TODO(nsthorat,smilkov): Do operation logging and performance + // profiling. + } + let result; + return this.scopedRun(() => this.startScope(name), () => this.endScope(result), () => { + result = fn(); + if (result instanceof Promise) { + console.error('Cannot return a Promise inside of tidy.'); + } + return result; + }); + } + scopedRun(start, end, f) { + start(); + try { + const res = f(); + end(); + return res; + } + catch (ex) { + end(); + throw ex; + } + } + nextTensorId() { + return Engine.nextTensorId++; + } + nextVariableId() { + return Engine.nextVariableId++; + } + /** + * This method is called instead of the public-facing tensor.clone() when + * saving a tensor for backwards pass. It makes sure to add the clone + * operation to the tape regardless of being called inside a kernel + * execution. + */ + clone(x) { + const y = ENGINE.runKernel(Identity$1, { x }); + const inputs = { x }; + const grad = (dy) => ({ + x: () => { + const dtype = 'float32'; + const gradInputs = { x: dy }; + const attrs = { dtype }; + return ENGINE.runKernel(Cast, gradInputs, + // tslint:disable-next-line: no-unnecessary-type-assertion + attrs); + } + }); + const saved = []; + this.addTapeNode(this.state.activeScope.name, inputs, [y], grad, saved, {}); + return y; + } + /** + * Execute a kernel with the given name and return the output tensor. + * + * @param kernelName The name of the kernel to execute. + * @param inputs A map of input names to tensors. + * @param attrs A map of attribute names to their values. An attribute is a + * primitive (non-tensor) input to the kernel. + * @param inputsToSave A list of tensors, inputs to save for the backprop + * computation. + * @param outputsToSave A list of booleans, specifying which output to save + * for the backprop computation. These are booleans since the output + * tensors are not visible to the user. + */ + runKernel(kernelName, inputs, attrs) { + if (this.backendName == null) { + // backend has not been initialized yet (backend initialization is lazy + // can be deferred until an op/ kernel is run). + // The below getter has side effects that will try to initialize the + // backend and set properties like this.backendName + // tslint:disable-next-line: no-unused-expression + this.backend; + } + const hasKernel = getKernel(kernelName, this.backendName) != null; + if (!hasKernel) { + throw new Error(`Kernel '${kernelName}' not registered for backend '${this.backendName}'`); + } + return this.runKernelFunc({ kernelName, inputs, attrs }); + } + shouldCheckForMemLeaks() { + return this.ENV.getBool('IS_TEST'); + } + checkKernelForMemLeak(kernelName, numDataIdsBefore, outInfos) { + const numDataIdsAfter = this.backend.numDataIds(); + // Count the number of data ids associated with the result of the kernel. + let numOutputDataIds = 0; + outInfos.forEach(info => { + // Complex numbers allocate 3 data ids, one for 'real', one for + // 'imaginary', and one for the container that holds the former two. + numOutputDataIds += (info.dtype === 'complex64' ? 3 : 1); + }); + // Account for the number of moves during kernel execution. A "data move" + // can happen in the middle of a kernel execution, placing a new (key,value) + // pair in the data storage. Since data moves have net zero effect (we + // always remove the data from the old backend), we have to cancel them out + // when detecting memory leaks. + const numMoves = this.state.numDataMovesStack[this.state.numDataMovesStack.length - 1]; + const dataIdsLeaked = numDataIdsAfter - numDataIdsBefore - numOutputDataIds - numMoves; + if (dataIdsLeaked > 0) { + throw new Error(`Backend '${this.backendName}' has an internal memory leak ` + + `(${dataIdsLeaked} data ids) after running '${kernelName}'`); + } + } + /** + * Internal helper method to execute a kernel Func + * + * Use `runKernel` to execute kernels from outside of engine. + */ + runKernelFunc(kernelParams) { + let outputs; + let saved = []; + const isTapeOn = this.isTapeOn(); + const startingBytecount = this.state.numBytes; + const startingNumTensors = this.state.numTensors; + if (this.shouldCheckForMemLeaks()) { + this.state.numDataMovesStack.push(0); + } + let kernelFunc; + if (this.backendName == null) { + // backend has not been initialized yet (backend initialization is lazy + // can be deferred until an op/ kernel is run). + // The below getter has side effects that will try to initialize the + // backend and set properties like this.backendName + // tslint:disable-next-line: no-unused-expression + this.backend; + } + let out; + const kernelOrScopeName = isRegisteredKernelInvocation(kernelParams) ? + kernelParams.kernelName : + this.state.activeScope != null ? this.state.activeScope.name : ''; + // Create the kernelFunc from either a registered kernel OR passed in + // forward/backward functions (used by custom grad). In this context a + // kernelFunc wraps a kernel implementation with some bookkeeping. + if (isRegisteredKernelInvocation(kernelParams)) { + const { kernelName, inputs, attrs } = kernelParams; + if (this.backendName == null) { + // backend has not been initialized yet (backend initialization is lazy + // can be deferred until an op/ kernel is run). + // The below getter has side effects that will try to initialize the + // backend and set properties like this.backendName + // tslint:disable-next-line: no-unused-expression + this.backend; + } + const kernel = getKernel(kernelName, this.backendName); + assert$1(kernel != null, () => `Cannot find registered kernel '${kernelName}' for backend '${this.backendName}'`); + kernelFunc = () => { + const numDataIdsBefore = this.backend.numDataIds(); + out = kernel.kernelFunc({ inputs, attrs, backend: this.backend }); + const outInfos = Array.isArray(out) ? out : [out]; + if (this.shouldCheckForMemLeaks()) { + this.checkKernelForMemLeak(kernelName, numDataIdsBefore, outInfos); + } + const outTensors = outInfos.map((outInfo) => { + // todo (yassogba) remove this option (Tensor) when node backend + // methods have been modularized and they all return tensorInfo. + // TensorInfos do not have a rank attribute. + if (outInfo.rank != null) { + return outInfo; + } + return this.makeTensorFromTensorInfo(outInfo); + }); + // Save any required inputs and outputs. + // Do not save unless we are recording to the tape. Otherwise it would + // cause a mem leak since there would be no backprop for these tensors + // (which would otherwise dispose them). + if (isTapeOn) { + const tensorsToSave = this.getTensorsForGradient(kernelName, inputs, outTensors); + saved = this.saveTensorsForBackwardMode(tensorsToSave); + } + return outTensors; + }; + } + else { + const { forwardFunc } = kernelParams; + // Running a customGrad op. + const saveFunc = (tensors) => { + // Do not save unless we are recording to the tape. Otherwise it would + // cause a mem leak since we would never run backprop, which disposes + // the kept tensors. + if (!isTapeOn) { + return; + } + saved = tensors.map(tensor => this.keep(this.clone(tensor))); + }; + kernelFunc = () => { + const numDataIdsBefore = this.backend.numDataIds(); + out = this.tidy(() => forwardFunc(this.backend, saveFunc)); + const outs = (Array.isArray(out) ? out : [out]); + if (this.shouldCheckForMemLeaks()) { + // Scope name is used to print a more helpful error message if needed. + this.checkKernelForMemLeak(kernelOrScopeName, numDataIdsBefore, outs); + } + return outs; + }; + } + // + // Run the kernelFunc. Optionally profiling it. + // + const { inputs, attrs } = kernelParams; + const backwardsFunc = isRegisteredKernelInvocation(kernelParams) ? + null : + kernelParams.backwardsFunc; + let kernelProfile; + this.scopedRun( + // Stop recording to a tape when running a kernel. + () => this.state.kernelDepth++, () => this.state.kernelDepth--, () => { + if (!this.ENV.getBool('DEBUG') && !this.state.profiling) { + outputs = kernelFunc(); + } + else { + kernelProfile = this.profiler.profileKernel(kernelOrScopeName, inputs, () => kernelFunc()); + if (this.ENV.getBool('DEBUG')) { + this.profiler.logKernelProfile(kernelProfile); + } + outputs = kernelProfile.outputs; + } + }); + if (isTapeOn) { + this.addTapeNode(kernelOrScopeName, inputs, outputs, backwardsFunc, saved, attrs); + } + if (this.state.profiling) { + this.state.activeProfile.kernels.push({ + name: kernelOrScopeName, + bytesAdded: this.state.numBytes - startingBytecount, + totalBytesSnapshot: this.state.numBytes, + tensorsAdded: this.state.numTensors - startingNumTensors, + totalTensorsSnapshot: this.state.numTensors, + inputShapes: Object.keys(inputs).map(key => inputs[key] != null ? inputs[key].shape : null), + outputShapes: outputs.map(item => item.shape), + kernelTimeMs: kernelProfile.timeMs, + extraInfo: kernelProfile.extraInfo + }); + } + return (Array.isArray(out) ? outputs : outputs[0]); + } + /** + * Saves tensors used in forward mode for use in backward mode. + * + * @param tensors the list of tensors to save. + */ + saveTensorsForBackwardMode(tensors) { + const saved = tensors.map(tensor => this.keep(this.clone(tensor))); + return saved; + } + /** + * Returns a list of tensors to save for a given gradient calculation. + * + * @param kernelName name of kernel to look up gradient for. + * @param inputs a map of input tensors. + * @param outputs an array of output tensors from forward mode of kernel. + */ + getTensorsForGradient(kernelName, inputs, outputs) { + const gradConfig = getGradient(kernelName); + if (gradConfig != null) { + const inputsToSave = gradConfig.inputsToSave || []; + const outputsToSave = gradConfig.outputsToSave || []; + // If saveAllInputs is true, all inputs will be saved. Otherwise, inputs + // specified in inputsToSave will be saved. + let inputTensorsToSave; + if (gradConfig.saveAllInputs) { + assert$1(Array.isArray(inputs), () => 'saveAllInputs is true, expected inputs to be an array.'); + inputTensorsToSave = Object.keys(inputs).map((key) => inputs[key]); + } + else { + inputTensorsToSave = inputsToSave.map((inputName) => inputs[inputName]); + } + const outputTensorsToSave = outputs.filter((_, i) => outputsToSave[i]); + return inputTensorsToSave.concat(outputTensorsToSave); + } + // We return an empty list rather than throw an error because the kernel we + // are looking up may not actually be relevant to backproping through the + // overall function + // + // See 'does not error if irrelevant (pruned) ops are missing grads' test + // in gradients_test.ts for an example. + return []; + } + /** + * Internal method used by public APIs for tensor creation. Makes a new + * tensor with the provided shape, dtype and values. It always + * creates a new data id and writes the values to the underlying backend. + */ + makeTensor(values, shape, dtype, backend) { + if (values == null) { + throw new Error('Values passed to engine.makeTensor() are null'); + } + dtype = dtype || 'float32'; + backend = backend || this.backend; + let backendVals = values; + if (dtype === 'string' && isString(values[0])) { + backendVals = values.map(d => encodeString(d)); + } + const dataId = backend.write(backendVals, shape, dtype); + const t = new Tensor(shape, dtype, dataId, this.nextTensorId()); + this.trackTensor(t, backend); + // Count bytes for string tensors. + if (dtype === 'string') { + const info = this.state.tensorInfo.get(dataId); + const newBytes = bytesFromStringArray(backendVals); + this.state.numBytes += newBytes - info.bytes; + info.bytes = newBytes; + } + return t; + } + /** + * Internal method used by backends. Makes a new tensor + * that is a wrapper around an existing data id. It doesn't create + * a new data id, only increments the ref count used in memory tracking. + * @deprecated + */ + makeTensorFromDataId(dataId, shape, dtype, backend) { + dtype = dtype || 'float32'; + const tensorInfo = { dataId, shape, dtype }; + return this.makeTensorFromTensorInfo(tensorInfo, backend); + } + /** + * Internal method used by backends. Makes a new tensor that is a wrapper + * around an existing data id in TensorInfo. It doesn't create a new data id, + * only increments the ref count used in memory tracking. + */ + makeTensorFromTensorInfo(tensorInfo, backend) { + const { dataId, shape, dtype } = tensorInfo; + const t = new Tensor(shape, dtype, dataId, this.nextTensorId()); + this.trackTensor(t, backend); + return t; + } + makeVariable(initialValue, trainable = true, name, dtype) { + name = name || this.nextVariableId().toString(); + if (dtype != null && dtype !== initialValue.dtype) { + initialValue = initialValue.cast(dtype); + } + const v = new Variable(initialValue, trainable, name, this.nextTensorId()); + if (this.state.registeredVariables[v.name] != null) { + throw new Error(`Variable with name ${v.name} was already registered`); + } + this.state.registeredVariables[v.name] = v; + this.incRef(v, this.backend); + return v; + } + trackTensor(a, backend) { + this.state.numTensors++; + if (a.dtype === 'string') { + this.state.numStringTensors++; + } + // Bytes for complex numbers are counted by their components. Bytes for + // string tensors are counted when writing values. + let bytes = 0; + if (a.dtype !== 'complex64' && a.dtype !== 'string') { + bytes = a.size * bytesPerElement(a.dtype); + } + this.state.numBytes += bytes; + if (!this.state.tensorInfo.has(a.dataId)) { + this.state.numDataBuffers++; + this.state.tensorInfo.set(a.dataId, { + backend: backend || this.backend, + dtype: a.dtype, + shape: a.shape, + bytes + }); + } + if (!(a instanceof Variable)) { + this.track(a); + } + } + // Track the tensor by dataId and increase the refCount for the dataId in the + // backend. + // TODO(pyu10055): This is currently used by makeVariable method, to increase + // refCount on the backend for the dataId. It can potentially be replaced with + // Identity op indead of calling backend directly. + incRef(a, backend) { + this.trackTensor(a, backend); + this.backend.incRef(a.dataId); + } + removeDataId(dataId, backend) { + if (this.state.tensorInfo.has(dataId) && + this.state.tensorInfo.get(dataId).backend === backend) { + this.state.tensorInfo.delete(dataId); + this.state.numDataBuffers--; + } + } + disposeTensor(a) { + if (!this.state.tensorInfo.has(a.dataId)) { + return; + } + const info = this.state.tensorInfo.get(a.dataId); + this.state.numTensors--; + if (a.dtype === 'string') { + this.state.numStringTensors--; + this.state.numBytes -= info.bytes; + } + // Don't count bytes for complex numbers as they are counted by their + // components. + if (a.dtype !== 'complex64' && a.dtype !== 'string') { + const bytes = a.size * bytesPerElement(a.dtype); + this.state.numBytes -= bytes; + } + // Remove the reference to dataId if backend dispose the data successfully + if (info.backend.disposeData(a.dataId)) { + this.removeDataId(a.dataId, info.backend); + } + // TODO(nsthorat): Construct an error and save the stack trace for + // debugging when in debug mode. Creating a stack trace is too expensive + // to do unconditionally. + } + disposeVariables() { + for (const varName in this.state.registeredVariables) { + const v = this.state.registeredVariables[varName]; + this.disposeVariable(v); + } + } + disposeVariable(v) { + this.disposeTensor(v); + if (this.state.registeredVariables[v.name] != null) { + delete this.state.registeredVariables[v.name]; + } + } + memory() { + const info = this.backend.memory(); + info.numTensors = this.state.numTensors; + info.numDataBuffers = this.state.numDataBuffers; + info.numBytes = this.state.numBytes; + if (this.state.numStringTensors > 0) { + info.unreliable = true; + if (info.reasons == null) { + info.reasons = []; + } + info.reasons.push('Memory usage by string tensors is approximate ' + + '(2 bytes per character)'); + } + return info; + } + async profile(query) { + this.state.profiling = true; + const startBytes = this.state.numBytes; + const startNumTensors = this.state.numTensors; + this.state.activeProfile.kernels = []; + this.state.activeProfile.result = await query(); + this.state.profiling = false; + this.state.activeProfile.peakBytes = Math.max(...this.state.activeProfile.kernels.map(d => d.totalBytesSnapshot)); + this.state.activeProfile.newBytes = this.state.numBytes - startBytes; + this.state.activeProfile.newTensors = + this.state.numTensors - startNumTensors; + for (const kernel of this.state.activeProfile.kernels) { + kernel.kernelTimeMs = await kernel.kernelTimeMs; + kernel.extraInfo = await kernel.extraInfo; + } + return this.state.activeProfile; + } + isTapeOn() { + return this.state.gradientDepth > 0 && this.state.kernelDepth === 0; + } + addTapeNode(kernelName, inputs, outputs, gradientsFunc, saved, attrs) { + const tapeNode = { id: this.state.nextTapeNodeId++, kernelName, inputs, outputs, saved }; + const gradConfig = getGradient(kernelName); + if (gradConfig != null) { + gradientsFunc = gradConfig.gradFunc; + } + if (gradientsFunc != null) { + tapeNode.gradient = (dys) => { + // TODO(smilkov): To optimize back-prop, pass dys that are not used in + // the backprop graph to the user as null instead of zeros + dys = dys.map((dy, i) => { + if (dy == null) { + const output = outputs[i]; + const vals = makeZerosTypedArray(output.size, output.dtype); + return this.makeTensor(vals, output.shape, output.dtype); + } + return dy; + }); + // Grad functions of ops with single outputs expect a dy, while ops + // with multiple outputs expect dys (array of dy). + return gradientsFunc(dys.length > 1 ? dys : dys[0], saved, attrs); + }; + } + this.state.activeTape.push(tapeNode); + } + keep(result) { + result.kept = true; + return result; + } + startTape() { + if (this.state.gradientDepth === 0) { + this.state.activeTape = []; + } + this.state.gradientDepth++; + } + endTape() { + this.state.gradientDepth--; + } + /** + * Start a scope. Use this with endScope() to achieve the same functionality + * as scope() without the need for a function closure. + */ + startScope(name) { + const scopeInfo = { + track: [], + name: 'unnamed scope', + id: this.state.nextScopeId++ + }; + if (name) { + scopeInfo.name = name; + } + this.state.scopeStack.push(scopeInfo); + this.state.activeScope = scopeInfo; + } + /** + * End a scope. Use this with startScope() to achieve the same functionality + * as scope() without the need for a function closure. + */ + endScope(result) { + const tensorsToTrackInParent = getTensorsInContainer(result); + const tensorsToTrackInParentSet = new Set(tensorsToTrackInParent.map(t => t.id)); + // Dispose the arrays tracked in this scope. + for (let i = 0; i < this.state.activeScope.track.length; i++) { + const tensor = this.state.activeScope.track[i]; + if (!tensor.kept && !tensorsToTrackInParentSet.has(tensor.id)) { + tensor.dispose(); + } + } + const oldScope = this.state.scopeStack.pop(); + this.state.activeScope = this.state.scopeStack.length === 0 ? + null : + this.state.scopeStack[this.state.scopeStack.length - 1]; + // Track the current result in the parent scope. + tensorsToTrackInParent.forEach(tensor => { + // Only track the tensor if was allocated in the inner scope and is not + // globally kept. + if (!tensor.kept && tensor.scopeId === oldScope.id) { + this.track(tensor); + } + }); + } + /** + * Returns gradients of `f` with respect to each of the `xs`. The gradients + * returned are of the same length as `xs`, but some might be null if `f` + * was not a function of that `x`. It also takes optional dy to multiply the + * gradient, which defaults to `1`. + */ + gradients(f, xs, dy, allowNoGradients = false) { + assert$1(xs.length > 0, () => 'gradients() received an empty list of xs.'); + if (dy != null && dy.dtype !== 'float32') { + throw new Error(`dy must have 'float32' dtype, but has '${dy.dtype}'`); + } + const y = this.scopedRun(() => this.startTape(), () => this.endTape(), () => this.tidy('forward', f)); + assert$1(y instanceof Tensor, () => 'The result y returned by f() must be a tensor.'); + // Filter out the nodes that don't connect x => y. + const filteredTape = getFilteredNodesXToY(this.state.activeTape, xs, y); + if (!allowNoGradients && filteredTape.length === 0 && xs.length > 0) { + throw new Error('Cannot compute gradient of y=f(x) with respect to x. Make sure ' + + 'that the f you passed encloses all operations that lead from x ' + + 'to y.'); + } + return this.tidy('backward', () => { + const accumulatedGradientMap = {}; + accumulatedGradientMap[y.id] = (dy == null) ? ones$2(y.shape) : dy; + // Backprop gradients through the filtered nodes. + backpropagateGradients(accumulatedGradientMap, filteredTape, + // Pass the tidy function to avoid circular dep with `tape.ts`. + f => this.tidy(f), + // Pass an add function to avoide a circular dep with `tape.ts`. + add$4); + const grads = xs.map(x => accumulatedGradientMap[x.id]); + if (this.state.gradientDepth === 0) { + // This means that we are not computing higher-order gradients + // and can clean up the tape. + this.state.activeTape.forEach(node => { + for (const tensor of node.saved) { + tensor.dispose(); + } + }); + this.state.activeTape = null; + } + return { value: y, grads }; + }); + } + customGrad(f) { + assert$1(isFunction(f), () => 'The f passed in customGrad(f) must be a function.'); + return (...inputs) => { + assert$1(inputs.every(t => t instanceof Tensor), () => 'The args passed in customGrad(f)(x1, x2,...) must all be ' + + 'tensors'); + let res; + const inputMap = {}; + inputs.forEach((input, i) => { + inputMap[i] = input; + }); + const forwardFunc = (_, save) => { + res = f(...[...inputs, save]); + assert$1(res.value instanceof Tensor, () => 'The function f passed in customGrad(f) must return an ' + + 'object where `obj.value` is a tensor'); + assert$1(isFunction(res.gradFunc), () => 'The function f passed in customGrad(f) must return an ' + + 'object where `obj.gradFunc` is a function.'); + return res.value; + }; + const backwardsFunc = (dy, saved) => { + const gradRes = res.gradFunc(dy, saved); + const grads = Array.isArray(gradRes) ? gradRes : [gradRes]; + assert$1(grads.length === inputs.length, () => 'The function f passed in customGrad(f) must return an ' + + 'object where `obj.gradFunc` is a function that returns ' + + 'the same number of tensors as inputs passed to f(...).'); + assert$1(grads.every(t => t instanceof Tensor), () => 'The function f passed in customGrad(f) must return an ' + + 'object where `obj.gradFunc` is a function that returns ' + + 'a list of only tensors.'); + const gradMap = {}; + grads.forEach((grad, i) => { + gradMap[i] = () => grad; + }); + return gradMap; + }; + return this.runKernelFunc({ + forwardFunc, + backwardsFunc, + inputs: inputMap, + }); + }; + } + readSync(dataId) { + // Route the read to the correct backend. + const info = this.state.tensorInfo.get(dataId); + return info.backend.readSync(dataId); + } + read(dataId) { + // Route the read to the correct backend. + const info = this.state.tensorInfo.get(dataId); + return info.backend.read(dataId); + } + readToGPU(dataId, options) { + // Route the read to the correct backend. + const info = this.state.tensorInfo.get(dataId); + return info.backend.readToGPU(dataId, options); + } + async time(query) { + const start = now(); + const timingInfo = await this.backend.time(query); + timingInfo.wallMs = now() - start; + return timingInfo; + } + /** + * Tracks a Tensor in the current scope to be automatically cleaned up + * when the current scope ends, and returns the value. + * + * @param result The Tensor to track in the current scope. + */ + track(result) { + if (this.state.activeScope != null) { + result.scopeId = this.state.activeScope.id; + this.state.activeScope.track.push(result); + } + return result; + } + get registeredVariables() { + return this.state.registeredVariables; + } + /** + * Resets the engine state. Removes all backends but does not remove + * registered backend factories. + */ + reset() { + // Make any pending promise obsolete. + this.pendingBackendInitId++; + this.state.dispose(); + this.ENV.reset(); + this.state = new EngineState(); + for (const backendName in this.registry) { + this.disposeRegisteredKernels(backendName); + this.registry[backendName].dispose(); + delete this.registry[backendName]; + } + this.backendName = null; + this.backendInstance = null; + this.pendingBackendInit = null; + } + } + Engine.nextTensorId = 0; + Engine.nextVariableId = 0; + function ones$2(shape) { + const values = makeOnesTypedArray(sizeFromShape(shape), 'float32'); + return ENGINE.makeTensor(values, shape, 'float32'); + } + function getOrMakeEngine() { + const ns = getGlobalNamespace(); + if (ns._tfengine == null) { + const environment = new Environment(ns); + ns._tfengine = new Engine(environment); + } + setEnvironmentGlobal(ns._tfengine.ENV); + // Tell the current tensor interface that the global engine is responsible + // for tracking. + setTensorTracker(() => ns._tfengine); + return ns._tfengine; + } + const ENGINE = getOrMakeEngine(); + /** + * A implementation of the add op for use within engine and tape. + * + * This allows us to avoid a circular dependency between add.ts and engine. + * It is exported to be available in tape tests. + */ + function add$4(a, b) { + // We duplicate Add here to avoid a circular dependency with add.ts. + const inputs = { a, b }; + return ENGINE.runKernel(Add$1, inputs); + } + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + // tslint:disable-next-line:no-any + function _isNavigatorDefined() { + return typeof navigator !== 'undefined' && navigator != null; + } + let isMobileMockValue; + function mockIsMobile(value) { + isMobileMockValue = value; + } + function isMobile(nav) { + if (isMobileMockValue !== undefined) { + return isMobileMockValue; + } + if (nav || _isNavigatorDefined()) { + if (!nav) { + nav = navigator; + } + if (nav.product === 'ReactNative') { + return true; + } + const a = nav.userAgent || nav.vendor || + // tslint:disable-next-line:no-any + (typeof window !== 'undefined' ? window.opera : ''); + // Use `navigator.userAgentData.mobile` as fallback. + if (!a) { + // tslint:disable-next-line:no-any + const navAny = nav; + return navAny.userAgentData && navAny.userAgentData.mobile; + } + // tslint:disable-next-line:max-line-length + return /(android|bb\d+|meego).+mobile|avantgo|bada\/|blackberry|blazer|compal|elaine|fennec|hiptop|iemobile|ip(hone|od)|iris|kindle|lge |maemo|midp|mmp|mobile.+firefox|netfront|opera m(ob|in)i|palm( os)?|phone|p(ixi|re)\/|plucker|pocket|psp|series(4|6)0|symbian|treo|up\.(browser|link)|vodafone|wap|windows ce|xda|xiino/i + .test(a) || + // tslint:disable-next-line:max-line-length + /1207|6310|6590|3gso|4thp|50[1-6]i|770s|802s|a wa|abac|ac(er|oo|s\-)|ai(ko|rn)|al(av|ca|co)|amoi|an(ex|ny|yw)|aptu|ar(ch|go)|as(te|us)|attw|au(di|\-m|r |s )|avan|be(ck|ll|nq)|bi(lb|rd)|bl(ac|az)|br(e|v)w|bumb|bw\-(n|u)|c55\/|capi|ccwa|cdm\-|cell|chtm|cldc|cmd\-|co(mp|nd)|craw|da(it|ll|ng)|dbte|dc\-s|devi|dica|dmob|do(c|p)o|ds(12|\-d)|el(49|ai)|em(l2|ul)|er(ic|k0)|esl8|ez([4-7]0|os|wa|ze)|fetc|fly(\-|_)|g1 u|g560|gene|gf\-5|g\-mo|go(\.w|od)|gr(ad|un)|haie|hcit|hd\-(m|p|t)|hei\-|hi(pt|ta)|hp( i|ip)|hs\-c|ht(c(\-| |_|a|g|p|s|t)|tp)|hu(aw|tc)|i\-(20|go|ma)|i230|iac( |\-|\/)|ibro|idea|ig01|ikom|im1k|inno|ipaq|iris|ja(t|v)a|jbro|jemu|jigs|kddi|keji|kgt( |\/)|klon|kpt |kwc\-|kyo(c|k)|le(no|xi)|lg( g|\/(k|l|u)|50|54|\-[a-w])|libw|lynx|m1\-w|m3ga|m50\/|ma(te|ui|xo)|mc(01|21|ca)|m\-cr|me(rc|ri)|mi(o8|oa|ts)|mmef|mo(01|02|bi|de|do|t(\-| |o|v)|zz)|mt(50|p1|v )|mwbp|mywa|n10[0-2]|n20[2-3]|n30(0|2)|n50(0|2|5)|n7(0(0|1)|10)|ne((c|m)\-|on|tf|wf|wg|wt)|nok(6|i)|nzph|o2im|op(ti|wv)|oran|owg1|p800|pan(a|d|t)|pdxg|pg(13|\-([1-8]|c))|phil|pire|pl(ay|uc)|pn\-2|po(ck|rt|se)|prox|psio|pt\-g|qa\-a|qc(07|12|21|32|60|\-[2-7]|i\-)|qtek|r380|r600|raks|rim9|ro(ve|zo)|s55\/|sa(ge|ma|mm|ms|ny|va)|sc(01|h\-|oo|p\-)|sdk\/|se(c(\-|0|1)|47|mc|nd|ri)|sgh\-|shar|sie(\-|m)|sk\-0|sl(45|id)|sm(al|ar|b3|it|t5)|so(ft|ny)|sp(01|h\-|v\-|v )|sy(01|mb)|t2(18|50)|t6(00|10|18)|ta(gt|lk)|tcl\-|tdg\-|tel(i|m)|tim\-|t\-mo|to(pl|sh)|ts(70|m\-|m3|m5)|tx\-9|up(\.b|g1|si)|utst|v400|v750|veri|vi(rg|te)|vk(40|5[0-3]|\-v)|vm40|voda|vulc|vx(52|53|60|61|70|80|81|83|85|98)|w3c(\-| )|webc|whit|wi(g |nc|nw)|wmlb|wonu|x700|yas\-|your|zeto|zte\-/i + .test(a.substr(0, 4)); + } + return false; + } + function isBrowser() { + return (typeof window !== 'undefined' && window.document != null) || + //@ts-ignore + (typeof WorkerGlobalScope !== 'undefined'); + } + + var device_util = /*#__PURE__*/Object.freeze({ + __proto__: null, + isBrowser: isBrowser, + isMobile: isMobile, + mockIsMobile: mockIsMobile + }); + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const ENV$3 = env(); + /** + * This file contains environment-related flag registrations. + */ + /** Whether to enable debug mode. */ + ENV$3.registerFlag('DEBUG', () => false, debugValue => { + if (debugValue) { + console.warn('Debugging mode is ON. The output of every math call will ' + + 'be downloaded to CPU and checked for NaNs. ' + + 'This significantly impacts performance.'); + } + }); + /** Whether we are in a browser (as versus, say, node.js) environment. */ + ENV$3.registerFlag('IS_BROWSER', () => isBrowser()); + /** Whether we are in a browser (as versus, say, node.js) environment. */ + ENV$3.registerFlag('IS_NODE', () => (typeof process !== 'undefined') && + (typeof process.versions !== 'undefined') && + (typeof process.versions.node !== 'undefined')); + /** Whether this browser is Chrome. */ + ENV$3.registerFlag('IS_CHROME', () => typeof navigator !== 'undefined' && navigator != null && + navigator.userAgent != null && /Chrome/.test(navigator.userAgent) && + /Google Inc/.test(navigator.vendor)); + /** Whether this browser is Safari. */ + ENV$3.registerFlag('IS_SAFARI', () => typeof navigator !== 'undefined' && navigator != null && + navigator.userAgent != null && /Safari/.test(navigator.userAgent) && + /Apple/.test(navigator.vendor)); + /** + * True when the environment is "production" where we disable safety checks + * to gain performance. + */ + ENV$3.registerFlag('PROD', () => false); + /** + * Whether to do sanity checks when inferring a shape from user-provided + * values, used when creating a new tensor. + */ + ENV$3.registerFlag('TENSORLIKE_CHECK_SHAPE_CONSISTENCY', () => ENV$3.getBool('DEBUG')); + /** Whether deprecation warnings are enabled. */ + ENV$3.registerFlag('DEPRECATION_WARNINGS_ENABLED', () => true); + /** True if running unit tests. */ + ENV$3.registerFlag('IS_TEST', () => false); + /** Whether to check computation result for errors. */ + ENV$3.registerFlag('CHECK_COMPUTATION_FOR_ERRORS', () => ENV$3.getBool('DEBUG')); + /** Whether the backend needs to wrap input to imageBitmap. */ + ENV$3.registerFlag('WRAP_TO_IMAGEBITMAP', () => false); + /** Whether to enable canvas2d willReadFrequently for GPU backends */ + ENV$3.registerFlag('CANVAS2D_WILL_READ_FREQUENTLY_FOR_GPU', () => false); + /** Whether to use setTimeoutCustom */ + ENV$3.registerFlag('USE_SETTIMEOUTCUSTOM', () => false); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function inferShape(val, dtype) { + let firstElem = val; + if (isTypedArray(val)) { + return dtype === 'string' ? [] : [val.length]; + } + if (isWebGLData(val)) { + const usedChannels = val.channels || 'RGBA'; + return [val.height, val.width * usedChannels.length]; + } + else if (isWebGPUData(val)) { + return [val.buffer.size / (dtype == null ? 4 : bytesPerElement(dtype))]; + } + if (!Array.isArray(val)) { + return []; // Scalar. + } + const shape = []; + while (Array.isArray(firstElem) || + isTypedArray(firstElem) && dtype !== 'string') { + shape.push(firstElem.length); + firstElem = firstElem[0]; + } + if (Array.isArray(val) && + env().getBool('TENSORLIKE_CHECK_SHAPE_CONSISTENCY')) { + deepAssertShapeConsistency(val, shape, []); + } + return shape; + } + function deepAssertShapeConsistency(val, shape, indices) { + indices = indices || []; + if (!(Array.isArray(val)) && !isTypedArray(val)) { + assert$1(shape.length === 0, () => `Element arr[${indices.join('][')}] is a primitive, ` + + `but should be an array/TypedArray of ${shape[0]} elements`); + return; + } + assert$1(shape.length > 0, () => `Element arr[${indices.join('][')}] should be a primitive, ` + + `but is an array of ${val.length} elements`); + assert$1(val.length === shape[0], () => `Element arr[${indices.join('][')}] should have ${shape[0]} ` + + `elements, but has ${val.length} elements`); + const subShape = shape.slice(1); + for (let i = 0; i < val.length; ++i) { + deepAssertShapeConsistency(val[i], subShape, indices.concat(i)); + } + } + function assertDtype(expectedDtype, actualDType, argName, functionName) { + if (expectedDtype === 'string_or_numeric') { + return; + } + if (expectedDtype == null) { + throw new Error(`Expected dtype cannot be null.`); + } + if (expectedDtype !== 'numeric' && expectedDtype !== actualDType || + expectedDtype === 'numeric' && actualDType === 'string') { + throw new Error(`Argument '${argName}' passed to '${functionName}' must ` + + `be ${expectedDtype} tensor, but got ${actualDType} tensor`); + } + } + function convertToTensor(x, argName, functionName, parseAsDtype = 'numeric') { + if (x instanceof getGlobalTensorClass()) { + assertDtype(parseAsDtype, x.dtype, argName, functionName); + return x; + } + let inferredDtype = inferDtype(x); + // If the user expects a bool/int/float, use that info to update the + // inferredDtype when it is not a string. + if (inferredDtype !== 'string' && + ['bool', 'int32', 'float32'].indexOf(parseAsDtype) >= 0) { + inferredDtype = parseAsDtype; + } + assertDtype(parseAsDtype, inferredDtype, argName, functionName); + if ((x == null) || + (!isTypedArray(x) && !Array.isArray(x) && typeof x !== 'number' && + typeof x !== 'boolean' && typeof x !== 'string')) { + const type = x == null ? 'null' : x.constructor.name; + throw new Error(`Argument '${argName}' passed to '${functionName}' must be a ` + + `Tensor or TensorLike, but got '${type}'`); + } + const inferredShape = inferShape(x, inferredDtype); + if (!isTypedArray(x) && !Array.isArray(x)) { + x = [x]; + } + const skipTypedArray = true; + const values = inferredDtype !== 'string' ? + toTypedArray(x, inferredDtype) : + flatten$2(x, [], skipTypedArray); + return ENGINE.makeTensor(values, inferredShape, inferredDtype); + } + function convertToTensorArray(arg, argName, functionName, parseAsDtype = 'numeric') { + if (!Array.isArray(arg)) { + throw new Error(`Argument ${argName} passed to ${functionName} must be a ` + + '`Tensor[]` or `TensorLike[]`'); + } + const tensors = arg; + return tensors.map((t, i) => convertToTensor(t, `${argName}[${i}]`, functionName, parseAsDtype)); + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const OP_SCOPE_SUFFIX = '__op'; + /** + * Used for wrapping functions that perform math operations on + * Tensors. The function will be wrapped in a named scope that cleans all + * memory usage after the function is done. + */ + function op(f) { + const keys = Object.keys(f); + if (keys.length !== 1) { + throw new Error(`Please provide an object with a single key ` + + `(operation name) mapping to a function. Got an object with ` + + `${keys.length} keys.`); + } + let opName = keys[0]; + const fn = f[opName]; + // Strip the underscore from the end of the function name. + if (opName.endsWith('_')) { + opName = opName.substring(0, opName.length - 1); + } + // add an __op suffix to distinguish ops from kernels in tf.profile + opName = opName + OP_SCOPE_SUFFIX; + // tslint:disable-next-line:no-any + const f2 = (...args) => { + ENGINE.startScope(opName); + try { + const result = fn(...args); + if (isPromise(result)) { + console.error('Cannot return a Promise inside of tidy.'); + } + ENGINE.endScope(result); + return result; + } + catch (ex) { + ENGINE.endScope(null); + throw ex; + } + }; + Object.defineProperty(f2, 'name', { value: opName, configurable: true }); + // tslint:disable-next-line:no-any + return f2; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Converts two real numbers to a complex number. + * + * Given a tensor `real` representing the real part of a complex number, and a + * tensor `imag` representing the imaginary part of a complex number, this + * operation returns complex numbers elementwise of the form [r0, i0, r1, i1], + * where r represents the real part and i represents the imag part. + * + * The input tensors real and imag must have the same shape. + * + * ```js + * const real = tf.tensor1d([2.25, 3.25]); + * const imag = tf.tensor1d([4.75, 5.75]); + * const complex = tf.complex(real, imag); + * + * complex.print(); + * ``` + * + * @doc {heading: 'Tensors', subheading: 'Creation'} + */ + function complex_(real, imag) { + const $real = convertToTensor(real, 'real', 'complex'); + const $imag = convertToTensor(imag, 'imag', 'complex'); + assertShapesMatch($real.shape, $imag.shape, `real and imag shapes, ${$real.shape} and ${$imag.shape}, ` + + `must match in call to tf.complex().`); + const inputs = { real: $real, imag: $imag }; + return ENGINE.runKernel(Complex, inputs); + } + const complex$2 = /* @__PURE__ */ op({ complex_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** This is shared code across all tensor creation methods. */ + function makeTensor(values, shape, inferredShape, dtype) { + if (dtype == null) { + dtype = inferDtype(values); + } + else if (dtype === 'complex64') { + throw new Error(`Cannot construct a complex64 tensor directly. ` + + `Please use tf.complex(real, imag).`); + } + if (isWebGPUData(values) || isWebGLData(values)) { + if (dtype !== 'float32' && dtype !== 'int32') { + throw new Error(`Creating tensor from GPU data only supports ` + + `'float32'|'int32' dtype, while the dtype is ${dtype}.`); + } + return ENGINE.backend.createTensorFromGPUData(values, shape || inferredShape, dtype); + } + if (!isTypedArray(values) && !Array.isArray(values) && + typeof values !== 'number' && typeof values !== 'boolean' && + typeof values !== 'string') { + throw new Error('values passed to tensor(values) must be a number/boolean/string or ' + + 'an array of numbers/booleans/strings, or a TypedArray'); + } + // Verify that the shape matches the inferred shape. + if (shape != null) { + assertNonNegativeIntegerDimensions(shape); + const providedSize = sizeFromShape(shape); + const inferredSize = sizeFromShape(inferredShape); + assert$1(providedSize === inferredSize, () => `Based on the provided shape, [${shape}], the tensor should have ` + + `${providedSize} values but has ${inferredSize}`); + for (let i = 0; i < inferredShape.length; ++i) { + const inferred = inferredShape[i]; + const flatDimsDontMatch = i === inferredShape.length - 1 ? + inferred !== sizeFromShape(shape.slice(i)) : + true; + assert$1(inferredShape[i] === shape[i] || !flatDimsDontMatch, () => `Error creating a new Tensor. Inferred shape ` + + `(${inferredShape}) does not match the provided ` + + `shape (${shape}). `); + } + } + if (!isTypedArray(values) && !Array.isArray(values)) { + values = [values]; + } + shape = shape || inferredShape; + values = dtype !== 'string' ? + toTypedArray(values, dtype) : + flatten$2(values, [], true); + return ENGINE.makeTensor(values, shape, dtype); + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Creates a `tf.Tensor` with the provided values, shape and dtype. + * + * ```js + * // Pass an array of values to create a vector. + * tf.tensor([1, 2, 3, 4]).print(); + * ``` + * + * ```js + * // Pass a nested array of values to make a matrix or a higher + * // dimensional tensor. + * tf.tensor([[1, 2], [3, 4]]).print(); + * ``` + * + * ```js + * // Pass a flat array and specify a shape yourself. + * tf.tensor([1, 2, 3, 4], [2, 2]).print(); + * ``` + * + * ```js + * // Pass a `WebGLData` object and specify a shape yourself. + * + * // This makes it possible for TF.js applications to avoid GPU / CPU sync. + * // For example, if your application includes a preprocessing step on the GPU, + * // you could upload the GPU output directly to TF.js, rather than first + * // downloading the values. + * + * // Example for WebGL2: + * if (tf.findBackend('custom-webgl') == null) { + * const customCanvas = document.createElement('canvas'); + * const customBackend = new tf.MathBackendWebGL(customCanvas); + * tf.registerBackend('custom-webgl', () => customBackend); + * } + * const savedBackend = tf.getBackend(); + * await tf.setBackend('custom-webgl'); + * const gl = tf.backend().gpgpu.gl; + * const texture = gl.createTexture(); + * const tex2d = gl.TEXTURE_2D; + * const width = 2; + * const height = 2; + * + * gl.bindTexture(tex2d, texture); + * gl.texParameteri(tex2d, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE); + * gl.texParameteri(tex2d, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE); + * gl.texParameteri(tex2d, gl.TEXTURE_MIN_FILTER, gl.NEAREST); + * gl.texParameteri(tex2d, gl.TEXTURE_MAG_FILTER, gl.NEAREST); + * gl.texImage2D( + * tex2d, 0, gl.RGBA32F, // internalFormat + * width, height, 0, + * gl.RGBA, // textureFormat + * gl.FLOAT, // textureType + * new Float32Array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) + * ); + * + * // Currently, the `texture` has 4 pixels: + * // Pixel0 is {R:0, G:1, B:2, A:3} + * // Pixel1 is {R:4, G:5, B:6, A:7} + * // Pixel2 is {R:8, G:9, B:10, A:11} + * // Pixel3 is {R:12, G:13, B:14, A:15} + * + * const logicalShape = [height * width * 2]; + * const a = tf.tensor({texture, height, width, channels: 'BR'}, logicalShape); + * a.print(); + * // Tensor value will be [2, 0, 6, 4, 10, 8, 14, 12], since [2, 0] is the + * // values of 'B' and 'R' channels of Pixel0, [6, 4] is the values of 'B' and + * 'R' + * // channels of Pixel1... + * + * // For postprocessing on the GPU, it's possible to retrieve the texture + * // backing any tensor by calling the tensor's `dataToGPU` method like + * // so: + * + * const tex = a.dataToGPU(); + * await tf.setBackend(savedBackend); + * ``` + * + * ```js + * // Pass a `WebGPUData` object and specify a shape yourself. + * + * // This makes it possible for TF.js applications to avoid GPU / CPU sync. + * // For example, if your application includes a preprocessing step on the GPU, + * // you could upload the GPU output directly to TF.js, rather than first + * // downloading the values. Unlike WebGL, this optionally supports zero copy + * // by WebGPUData.zeroCopy. When zeroCopy is false or undefined(default), this + * // passing GPUBuffer can be destroyed after tensor is created. When zeroCopy + * // is true, this GPUBuffer is bound directly by the tensor, so do not destroy + * // this GPUBuffer until all access is done. + * + * // Example for WebGPU: + * function createGPUBufferFromData(device, data, dtype) { + * const bytesPerElement = 4; + * const sizeInBytes = data.length * bytesPerElement; + * + * const gpuWriteBuffer = device.createBuffer({ + * mappedAtCreation: true, + * size: sizeInBytes, + * usage: GPUBufferUsage.MAP_WRITE | GPUBufferUsage.COPY_SRC + * }); + * const arrayBuffer = gpuWriteBuffer.getMappedRange(); + * if (dtype === 'float32') { + * new Float32Array(arrayBuffer).set(data); + * } else if (dtype === 'int32') { + * new Int32Array(arrayBuffer).set(data); + * } else { + * throw new Error( + * `Creating tensor from GPUBuffer only supports` + + * `'float32'|'int32' dtype, while the dtype is ${dtype}.`); + * } + * gpuWriteBuffer.unmap(); + * + * const gpuReadBuffer = device.createBuffer({ + * mappedAtCreation: false, + * size: sizeInBytes, + * usage: GPUBufferUsage.COPY_DST | GPUBufferUsage.STORAGE | + * GPUBufferUsage.COPY_SRC + * }); + * + * const copyEncoder = device.createCommandEncoder(); + * copyEncoder.copyBufferToBuffer( + * gpuWriteBuffer, 0, gpuReadBuffer, 0, sizeInBytes); + * const copyCommands = copyEncoder.finish(); + * device.queue.submit([copyCommands]); + * gpuWriteBuffer.destroy(); + * return gpuReadBuffer; + * } + * + * const savedBackend = tf.getBackend(); + * await tf.setBackend('webgpu').catch( + * () => {throw new Error( + * 'Failed to use WebGPU backend. Please use Chrome Canary to run.')}); + * const dtype = 'float32'; + * const device = tf.backend().device; + * const aData = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]; + * const bData = [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4]; + * const expected = [2, 4, 6, 8, 6, 8, 10, 12, 10, 12, 14, 16, 14, 16, 18, 20]; + * const aBuffer = createGPUBufferFromData(device, aData, dtype); + * const shape = [aData.length]; + * // To use zeroCopy, use {buffer: aBuffer, zeroCopy: true} instead and destroy + * // aBuffer untill all access is done. + * const a = tf.tensor({buffer: aBuffer}, shape, dtype); + * const b = tf.tensor(bData, shape, dtype); + * const result = tf.add(a, b); + * result.print(); + * a.dispose(); + * b.dispose(); + * result.dispose(); + * aBuffer.destroy(); + * await tf.setBackend(savedBackend); + * ``` + * @param values The values of the tensor. Can be nested array of numbers, + * or a flat array, or a `TypedArray`(At the moment it supports Uint8Array, + * Uint8ClampedArray, Int32Array, Float32Array) data types, or a `WebGLData` + * object, or a `WebGPUData` object. If the values are strings, they will be + * encoded as utf-8 and kept as `Uint8Array[]`. If the values is a `WebGLData` + * object, the dtype could only be 'float32' or 'int32' and the object has to + * have: 1. texture, a `WebGLTexture`, the texture must share the same + * `WebGLRenderingContext` with TFJS's WebGL backend (you could create a custom + * WebGL backend from your texture's canvas) and the internal texture format + * for the input texture must be floating point or normalized integer; 2. + * height, the height of the texture; 3. width, the width of the texture; 4. + * channels, a non-empty subset of 'RGBA', indicating the values of which + * channels will be passed to the tensor, such as 'R' or 'BR' (The order of the + * channels affect the order of tensor values. ). (If the values passed from + * texture is less than the tensor size, zeros will be padded at the rear.). If + * the values is a `WebGPUData` object, the dtype could only be 'float32' or + * 'int32 and the object has to have: buffer, a `GPUBuffer`. The buffer must: + * 1. share the same `GPUDevice` with TFJS's WebGPU backend; 2. buffer.usage + * should at least support GPUBufferUsage.STORAGE | GPUBufferUsage.COPY_SRC; 3. + * buffer.size should not be smaller than the byte size of tensor shape. + * WebGPUData optionally supports zero copy by flag zeroCopy. When zeroCopy is + * false or undefined(default),this passing GPUBuffer can be destroyed after + * tensor is created. When zeroCopy is true, this GPUBuffer is bound directly + * by the tensor, so do not destroy this GPUBuffer until all access is done. + * @param shape The shape of the tensor. Optional. If not provided, + * it is inferred from `values`. + * @param dtype The data type. + * + * @doc {heading: 'Tensors', subheading: 'Creation'} + */ + function tensor(values, shape, dtype) { + const inferredShape = inferShape(values, dtype); + return makeTensor(values, shape, inferredShape, dtype); + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /* Type definitions for exporting and importing of models. */ + /** + * A map from Tensor dtype to number of bytes per element of the Tensor. + */ + const DTYPE_VALUE_SIZE_MAP = { + 'float32': 4, + 'float16': 2, + 'int32': 4, + 'uint16': 2, + 'uint8': 1, + 'bool': 1, + 'complex64': 8 + }; + + /** + * Wraps a list of ArrayBuffers into a `slice()`-able object without allocating + * a large ArrayBuffer. + * + * Allocating large ArrayBuffers (~2GB) can be unstable on Chrome. TFJS loads + * its weights as a list of (usually) 4MB ArrayBuffers and then slices the + * weight tensors out of them. For small models, it's safe to concatenate all + * the weight buffers into a single ArrayBuffer and then slice the weight + * tensors out of it, but for large models, a different approach is needed. + */ + class CompositeArrayBuffer { + /** + * Concatenate a number of ArrayBuffers into one. + * + * @param buffers An array of ArrayBuffers to concatenate, or a single + * ArrayBuffer. + * @returns Result of concatenating `buffers` in order. + */ + static join(buffers) { + return new CompositeArrayBuffer(buffers).slice(); + } + constructor(buffers) { + this.shards = []; + this.previousShardIndex = 0; + if (buffers == null) { + return; + } + // Normalize the `buffers` input to be `ArrayBuffer[]`. + if (!(buffers instanceof Array)) { + buffers = [buffers]; + } + buffers = buffers.map((bufferOrTypedArray) => { + if (isTypedArray(bufferOrTypedArray)) { + return bufferOrTypedArray.buffer; + } + return bufferOrTypedArray; + }); + // Skip setting up shards if there are no buffers. + if (buffers.length === 0) { + return; + } + this.bufferUniformSize = buffers[0].byteLength; + let start = 0; + for (let i = 0; i < buffers.length; i++) { + const buffer = buffers[i]; + // Check that all buffers except the last one have the same length. + if (i !== buffers.length - 1 && + buffer.byteLength !== this.bufferUniformSize) { + // Unset the buffer uniform size, since the buffer sizes are not + // uniform. + this.bufferUniformSize = undefined; + } + // Create the shards, including their start and end points. + const end = start + buffer.byteLength; + this.shards.push({ buffer, start, end }); + start = end; + } + // Set the byteLength + if (this.shards.length === 0) { + this.byteLength = 0; + } + this.byteLength = this.shards[this.shards.length - 1].end; + } + slice(start = 0, end = this.byteLength) { + // If there are no shards, then the CompositeArrayBuffer was initialized + // with no data. + if (this.shards.length === 0) { + return new ArrayBuffer(0); + } + // NaN is treated as zero for slicing. This matches ArrayBuffer's behavior. + start = isNaN(Number(start)) ? 0 : start; + end = isNaN(Number(end)) ? 0 : end; + // Fix the bounds to within the array. + start = Math.max(0, start); + end = Math.min(this.byteLength, end); + if (end <= start) { + return new ArrayBuffer(0); + } + const startShardIndex = this.findShardForByte(start); + if (startShardIndex === -1) { + // This should not happen since the start and end indices are always + // within 0 and the composite array's length. + throw new Error(`Could not find start shard for byte ${start}`); + } + const size = end - start; + const outputBuffer = new ArrayBuffer(size); + const outputArray = new Uint8Array(outputBuffer); + let sliced = 0; + for (let i = startShardIndex; i < this.shards.length; i++) { + const shard = this.shards[i]; + const globalStart = start + sliced; + const localStart = globalStart - shard.start; + const outputStart = sliced; + const globalEnd = Math.min(end, shard.end); + const localEnd = globalEnd - shard.start; + const outputSlice = new Uint8Array(shard.buffer, localStart, localEnd - localStart); + outputArray.set(outputSlice, outputStart); + sliced += outputSlice.length; + if (end < shard.end) { + break; + } + } + return outputBuffer; + } + /** + * Get the index of the shard that contains the byte at `byteIndex`. + */ + findShardForByte(byteIndex) { + if (this.shards.length === 0 || byteIndex < 0 || + byteIndex >= this.byteLength) { + return -1; + } + // If the buffers have a uniform size, compute the shard directly. + if (this.bufferUniformSize != null) { + this.previousShardIndex = Math.floor(byteIndex / this.bufferUniformSize); + return this.previousShardIndex; + } + // If the buffers don't have a uniform size, we need to search for the + // shard. That means we need a function to check where the byteIndex lies + // relative to a given shard. + function check(shard) { + if (byteIndex < shard.start) { + return -1; + } + if (byteIndex >= shard.end) { + return 1; + } + return 0; + } + // For efficiency, try the previous shard first. + if (check(this.shards[this.previousShardIndex]) === 0) { + return this.previousShardIndex; + } + // Otherwise, use a generic search function. + // This should almost never end up being used in practice since the weight + // entries should always be in order. + const index = search(this.shards, check); + if (index === -1) { + return -1; + } + this.previousShardIndex = index; + return this.previousShardIndex; + } + } + /** + * Search for an element of a sorted array. + * + * @param sortedArray The sorted array to search + * @param compare A function to compare the current value against the searched + * value. Return 0 on a match, negative if the searched value is less than + * the value passed to the function, and positive if the searched value is + * greater than the value passed to the function. + * @returns The index of the element, or -1 if it's not in the array. + */ + function search(sortedArray, compare) { + // Binary search + let min = 0; + let max = sortedArray.length; + while (min <= max) { + const middle = Math.floor((max - min) / 2) + min; + const side = compare(sortedArray[middle]); + if (side === 0) { + return middle; + } + else if (side < 0) { + max = middle; + } + else { + min = middle + 1; + } + } + return -1; + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Enables production mode which disables correctness checks in favor of + * performance. + * + * @doc {heading: 'Environment'} + */ + function enableProdMode() { + env().set('PROD', true); + } + /** + * Enables debug mode which will log information about all executed kernels: + * the elapsed time of the kernel execution, as well as the rank, shape, and + * size of the output tensor. + * + * Debug mode will significantly slow down your application as it will + * download the result of every operation to the CPU. This should not be used in + * production. Debug mode does not affect the timing information of the kernel + * execution as we do not measure download time in the kernel execution time. + * + * See also: `tf.profile`, `tf.memory`. + * + * @doc {heading: 'Environment'} + */ + function enableDebugMode() { + env().set('DEBUG', true); + } + /** Globally disables deprecation warnings */ + function disableDeprecationWarnings() { + env().set('DEPRECATION_WARNINGS_ENABLED', false); + console.warn(`TensorFlow.js deprecation warnings have been disabled.`); + } + /** Warn users about deprecated functionality. */ + function deprecationWarn(msg) { + if (env().getBool('DEPRECATION_WARNINGS_ENABLED')) { + console.warn(msg + ' You can disable deprecation warnings with ' + + 'tf.disableDeprecationWarnings().'); + } + } + setDeprecationWarningFn(deprecationWarn); + /** + * Dispose all variables kept in backend engine. + * + * @doc {heading: 'Environment'} + */ + function disposeVariables() { + ENGINE.disposeVariables(); + } + /** + * It returns the global engine that keeps track of all tensors and backends. + * + * @doc {heading: 'Environment'} + */ + function engine() { + return ENGINE; + } + /** + * Returns memory info at the current time in the program. The result is an + * object with the following properties: + * + * - `numBytes`: Number of bytes allocated (undisposed) at this time. + * - `numTensors`: Number of unique tensors allocated. + * - `numDataBuffers`: Number of unique data buffers allocated + * (undisposed) at this time, which is ≤ the number of tensors + * (e.g. `a.reshape(newShape)` makes a new Tensor that shares the same + * data buffer with `a`). + * - `unreliable`: True if the memory usage is unreliable. See `reasons` when + * `unreliable` is true. + * - `reasons`: `string[]`, reasons why the memory is unreliable, present if + * `unreliable` is true. + * + * WebGL Properties: + * - `numBytesInGPU`: Number of bytes allocated (undisposed) in the GPU only at + * this time. + * + * @doc {heading: 'Performance', subheading: 'Memory'} + */ + function memory() { + return ENGINE.memory(); + } + /** + * Executes the provided function `f()` and returns a promise that resolves + * with information about the function's memory use: + * - `newBytes`: the number of new bytes allocated + * - `newTensors`: the number of new tensors created + * - `peakBytes`: the peak number of bytes allocated + * - `kernels`: an array of objects for each kernel involved that reports + * their input and output shapes, number of bytes used, and number of new + * tensors created. + * - `kernelNames`: an array of unique strings with just the names of the + * kernels in the `kernels` array. + * + * ```js + * const profile = await tf.profile(() => { + * const x = tf.tensor1d([1, 2, 3]); + * let x2 = x.square(); + * x2.dispose(); + * x2 = x.square(); + * x2.dispose(); + * return x; + * }); + * + * console.log(`newBytes: ${profile.newBytes}`); + * console.log(`newTensors: ${profile.newTensors}`); + * console.log(`byte usage over all kernels: ${profile.kernels.map(k => + * k.totalBytesSnapshot)}`); + * ``` + * + * + * @doc {heading: 'Performance', subheading: 'Profile'} + */ + function profile(f) { + return ENGINE.profile(f); + } + /** + * Executes the provided function `fn` and after it is executed, cleans up all + * intermediate tensors allocated by `fn` except those returned by `fn`. + * `fn` must not return a Promise (async functions not allowed). The returned + * result can be a complex object. + * + * Using this method helps avoid memory leaks. In general, wrap calls to + * operations in `tf.tidy` for automatic memory cleanup. + * + * NOTE: Variables do *not* get cleaned up when inside a tidy(). If you want to + * dispose variables, please use `tf.disposeVariables` or call dispose() + * directly on variables. + * + * ```js + * // y = 2 ^ 2 + 1 + * const y = tf.tidy(() => { + * // a, b, and one will be cleaned up when the tidy ends. + * const one = tf.scalar(1); + * const a = tf.scalar(2); + * const b = a.square(); + * + * console.log('numTensors (in tidy): ' + tf.memory().numTensors); + * + * // The value returned inside the tidy function will return + * // through the tidy, in this case to the variable y. + * return b.add(one); + * }); + * + * console.log('numTensors (outside tidy): ' + tf.memory().numTensors); + * y.print(); + * ``` + * + * @param nameOrFn The name of the closure, or the function to execute. + * If a name is provided, the 2nd argument should be the function. + * If debug mode is on, the timing and the memory usage of the function + * will be tracked and displayed on the console using the provided name. + * @param fn The function to execute. + * + * @doc {heading: 'Performance', subheading: 'Memory'} + */ + function tidy(nameOrFn, fn) { + return ENGINE.tidy(nameOrFn, fn); + } + /** + * Disposes any `tf.Tensor`s found within the provided object. + * + * @param container an object that may be a `tf.Tensor` or may directly + * contain `tf.Tensor`s, such as a `Tensor[]` or `{key: Tensor, ...}`. If + * the object is not a `tf.Tensor` or does not contain `Tensors`, nothing + * happens. In general it is safe to pass any object here, except that + * `Promise`s are not supported. + * + * @doc {heading: 'Performance', subheading: 'Memory'} + */ + function dispose(container) { + const tensors = getTensorsInContainer(container); + tensors.forEach(tensor => tensor.dispose()); + } + /** + * Keeps a `tf.Tensor` generated inside a `tf.tidy` from being disposed + * automatically. + * + * ```js + * let b; + * const y = tf.tidy(() => { + * const one = tf.scalar(1); + * const a = tf.scalar(2); + * + * // b will not be cleaned up by the tidy. a and one will be cleaned up + * // when the tidy ends. + * b = tf.keep(a.square()); + * + * console.log('numTensors (in tidy): ' + tf.memory().numTensors); + * + * // The value returned inside the tidy function will return + * // through the tidy, in this case to the variable y. + * return b.add(one); + * }); + * + * console.log('numTensors (outside tidy): ' + tf.memory().numTensors); + * console.log('y:'); + * y.print(); + * console.log('b:'); + * b.print(); + * ``` + * + * @param result The tensor to keep from being disposed. + * + * @doc {heading: 'Performance', subheading: 'Memory'} + */ + function keep(result) { + return ENGINE.keep(result); + } + /** + * Executes `f()` and returns a promise that resolves with timing + * information. + * + * The result is an object with the following properties: + * + * - `wallMs`: Wall execution time. + * - `kernelMs`: Kernel execution time, ignoring data transfer. If using the + * WebGL backend and the query timer extension is not available, this will + * return an error object. + * - On `WebGL` The following additional properties exist: + * - `uploadWaitMs`: CPU blocking time on texture uploads. + * - `downloadWaitMs`: CPU blocking time on texture downloads (readPixels). + * + * ```js + * const x = tf.randomNormal([20, 20]); + * const time = await tf.time(() => x.matMul(x)); + * + * console.log(`kernelMs: ${time.kernelMs}, wallTimeMs: ${time.wallMs}`); + * ``` + * + * @param f The function to execute and time. + * + * @doc {heading: 'Performance', subheading: 'Timing'} + */ + function time(f) { + return ENGINE.time(f); + } + /** + * Sets the backend (cpu, webgl, wasm, etc) responsible for creating tensors and + * executing operations on those tensors. Returns a promise that resolves + * to a boolean if the backend initialization was successful. + * + * Note this disposes the current backend, if any, as well as any tensors + * associated with it. A new backend is initialized, even if it is of the + * same type as the previous one. + * + * @param backendName The name of the backend. Currently supports + * `'webgl'|'cpu'` in the browser, `'tensorflow'` under node.js + * (requires tfjs-node), and `'wasm'` (requires tfjs-backend-wasm). + * + * @doc {heading: 'Backends'} + */ + function setBackend$1(backendName) { + return ENGINE.setBackend(backendName); + } + /** + * Returns a promise that resolves when the currently selected backend (or the + * highest priority one) has initialized. Await this promise when you are using + * a backend that has async initialization. + * + * @doc {heading: 'Backends'} + */ + function ready() { + return ENGINE.ready(); + } + /** + * Returns the current backend name (cpu, webgl, etc). The backend is + * responsible for creating tensors and executing operations on those tensors. + * + * @doc {heading: 'Backends'} + */ + function getBackend$1() { + return ENGINE.backendName; + } + /** + * Removes a backend and the registered factory. + * + * @doc {heading: 'Backends'} + */ + function removeBackend(name) { + ENGINE.removeBackend(name); + } + /** + * Finds the backend registered under the provided name. Returns null if the + * name is not in the registry, or the registration hasn't finished yet. + */ + function findBackend(name) { + return ENGINE.findBackend(name); + } + /** + * Finds the backend factory registered under the provided name. Returns a + * function that produces a new backend when called. Returns null if the name + * is not in the registry. + */ + function findBackendFactory(name) { + return ENGINE.findBackendFactory(name); + } + /** + * Registers a global backend. The registration should happen when importing + * a module file (e.g. when importing `backend_webgl.ts`), and is used for + * modular builds (e.g. custom tfjs bundle with only webgl support). + * + * @param factory The backend factory function. When called, it should + * return a backend instance, or a promise of an instance. + * @param priority The priority of the backend (higher = more important). + * In case multiple backends are registered, the priority is used to find + * the best backend. Defaults to 1. + * @return False if there is already a registered backend under this name, true + * if not. + * + * @doc {heading: 'Backends'} + */ + function registerBackend(name, factory, priority = 1) { + return ENGINE.registerBackend(name, factory, priority); + } + /** + * Gets the current backend. If no backends have been initialized, this will + * attempt to initialize the best backend. Will throw an error if the highest + * priority backend has async initialization, in which case you should call + * 'await tf.ready()' before running other code. + * + * @doc {heading: 'Backends'} + */ + function backend$1() { + return ENGINE.backend; + } + /** + * Sets the global platform. + * + * @param platformName The name of this platform. + * @param platform A platform implementation. + */ + function setPlatform(platformName, platform) { + env().setPlatform(platformName, platform); + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** Number of bytes reserved for the length of the string. (32bit integer). */ + const NUM_BYTES_STRING_LENGTH = 4; + /** + * Encode a map from names to weight values as an ArrayBuffer, along with an + * `Array` of `WeightsManifestEntry` as specification of the encoded weights. + * + * This function does not perform sharding. + * + * This function is the reverse of `decodeWeights`. + * + * @param tensors A map ("dict") from names to tensors. + * @param group Group to which the weights belong (optional). + * @returns A `Promise` of + * - A flat `ArrayBuffer` with all the binary values of the `Tensor`s + * concatenated. + * - An `Array` of `WeightManifestEntry`s, carrying information including + * tensor names, `dtype`s and shapes. + * @throws Error: on unsupported tensor `dtype`. + */ + async function encodeWeights(tensors, group) { + // TODO(adarob, cais): Support quantization. + const specs = []; + const dataPromises = []; + const names = Array.isArray(tensors) ? + tensors.map(tensor => tensor.name) : + Object.keys(tensors); + for (let i = 0; i < names.length; ++i) { + const name = names[i]; + const t = Array.isArray(tensors) ? tensors[i].tensor : tensors[name]; + if (t.dtype !== 'float32' && t.dtype !== 'int32' && t.dtype !== 'bool' && + t.dtype !== 'string' && t.dtype !== 'complex64') { + throw new Error(`Unsupported dtype in weight '${name}': ${t.dtype}`); + } + const spec = { name, shape: t.shape, dtype: t.dtype }; + if (t.dtype === 'string') { + const utf8bytes = new Promise(async (resolve) => { + const vals = await t.bytes(); + const totalNumBytes = vals.reduce((p, c) => p + c.length, 0) + + NUM_BYTES_STRING_LENGTH * vals.length; + const bytes = new Uint8Array(totalNumBytes); + let offset = 0; + for (let i = 0; i < vals.length; i++) { + const val = vals[i]; + const bytesOfLength = new Uint8Array(new Uint32Array([val.length]).buffer); + bytes.set(bytesOfLength, offset); + offset += NUM_BYTES_STRING_LENGTH; + bytes.set(val, offset); + offset += val.length; + } + resolve(bytes); + }); + dataPromises.push(utf8bytes); + } + else { + dataPromises.push(t.data()); + } + if (group != null) { + spec.group = group; + } + specs.push(spec); + } + const tensorValues = await Promise.all(dataPromises); + return { data: concatenateTypedArrays(tensorValues), specs }; + } + /** + * Decode flat ArrayBuffer as weights. + * + * This function does not handle sharding. + * + * This function is the reverse of `encodeWeights`. + * + * @param weightData A flat ArrayBuffer or an array of ArrayBuffers carrying the + * binary values of the tensors concatenated in the order specified in + * `specs`. + * @param specs Specifications of the names, dtypes and shapes of the tensors + * whose value are encoded by `buffer`. + * @return A map from tensor name to tensor value, with the names corresponding + * to names in `specs`. + * @throws Error, if any of the tensors has unsupported dtype. + */ + function decodeWeights(weightData, specs) { + // TODO(adarob, cais): Support quantization. + const compositeBuffer = new CompositeArrayBuffer(weightData); + const out = {}; + let offset = 0; + for (const spec of specs) { + const byteLength = getWeightBytelength(spec, (start, end) => { + return compositeBuffer.slice(offset + start, offset + end); + }); + out[spec.name] = decodeWeight(spec, compositeBuffer + .slice(offset, offset + byteLength)); + offset += byteLength; + } + return out; + } + function getWeightBytelength(spec, slice) { + const size = sizeFromShape(spec.shape); + let bytesPerValue; + if ('quantization' in spec) { + const quantization = spec.quantization; + bytesPerValue = DTYPE_VALUE_SIZE_MAP[quantization.dtype]; + } + else if (spec.dtype === 'string') { + // Can not statically determine string length. + let byteLength = 0; + for (let i = 0; i < size; i++) { + byteLength += NUM_BYTES_STRING_LENGTH + new Uint32Array(slice(byteLength, byteLength + NUM_BYTES_STRING_LENGTH))[0]; + } + return byteLength; + } + else { + bytesPerValue = DTYPE_VALUE_SIZE_MAP[spec.dtype]; + } + return size * bytesPerValue; + } + async function getWeightBytelengthAsync(spec, slice) { + const size = sizeFromShape(spec.shape); + let bytesPerValue; + if ('quantization' in spec) { + const quantization = spec.quantization; + bytesPerValue = DTYPE_VALUE_SIZE_MAP[quantization.dtype]; + } + else if (spec.dtype === 'string') { + // Can not statically determine string length. + let byteLength = 0; + for (let i = 0; i < size; i++) { + byteLength += NUM_BYTES_STRING_LENGTH + new Uint32Array(await slice(byteLength, byteLength + NUM_BYTES_STRING_LENGTH))[0]; + } + return byteLength; + } + else { + bytesPerValue = DTYPE_VALUE_SIZE_MAP[spec.dtype]; + } + return size * bytesPerValue; + } + function decodeWeight(spec, byteBuffer) { + const name = spec.name; + const dtype = spec.dtype; + const shape = spec.shape; + const size = sizeFromShape(shape); + let values; + let offset = 0; + if ('quantization' in spec) { + const quantization = spec.quantization; + if (quantization.dtype === 'uint8' || quantization.dtype === 'uint16') { + if (!('min' in quantization && 'scale' in quantization)) { + throw new Error(`Weight ${spec.name} with quantization ${quantization.dtype} ` + + `doesn't have corresponding metadata min and scale.`); + } + } + else if (quantization.dtype === 'float16') { + if (dtype !== 'float32') { + throw new Error(`Weight ${spec.name} is quantized with ${quantization.dtype} ` + + `which only supports weights of type float32 not ${dtype}.`); + } + } + else { + throw new Error(`Weight ${spec.name} has unknown ` + + `quantization dtype ${quantization.dtype}. ` + + `Supported quantization dtypes are: ` + + `'uint8', 'uint16', and 'float16'.`); + } + const quantizationSizeFactor = DTYPE_VALUE_SIZE_MAP[quantization.dtype]; + const quantizedArray = (quantization.dtype === 'uint8') ? + new Uint8Array(byteBuffer) : + new Uint16Array(byteBuffer); + if (dtype === 'float32') { + if (quantization.dtype === 'uint8' || quantization.dtype === 'uint16') { + values = new Float32Array(quantizedArray.length); + for (let i = 0; i < quantizedArray.length; i++) { + const v = quantizedArray[i]; + values[i] = v * quantization.scale + quantization.min; + } + } + else if (quantization.dtype === 'float16') { + // TODO: This is inefficient. Make getFloat16Decoder efficient. + const float16Decode = getFloat16Decoder(); + values = float16Decode(quantizedArray); + } + else { + throw new Error(`Unsupported quantization type ${quantization.dtype} ` + + `for weight type float32.`); + } + } + else if (dtype === 'int32') { + if (quantization.dtype !== 'uint8' && quantization.dtype !== 'uint16') { + throw new Error(`Unsupported quantization type ${quantization.dtype} ` + + `for weight type int32.`); + } + values = new Int32Array(quantizedArray.length); + for (let i = 0; i < quantizedArray.length; i++) { + const v = quantizedArray[i]; + values[i] = Math.round(v * quantization.scale + quantization.min); + } + } + else { + throw new Error(`Unsupported dtype in weight '${name}': ${dtype}`); + } + offset += size * quantizationSizeFactor; + } + else if (dtype === 'string') { + const size = sizeFromShape(spec.shape); + values = []; + for (let i = 0; i < size; i++) { + const byteLength = new Uint32Array(byteBuffer.slice(offset, offset + NUM_BYTES_STRING_LENGTH))[0]; + offset += NUM_BYTES_STRING_LENGTH; + const bytes = new Uint8Array(byteBuffer.slice(offset, offset + byteLength)); + values.push(bytes); + offset += byteLength; + } + } + else { + const dtypeFactor = DTYPE_VALUE_SIZE_MAP[dtype]; + if (dtype === 'float32') { + values = new Float32Array(byteBuffer); + } + else if (dtype === 'int32') { + values = new Int32Array(byteBuffer); + } + else if (dtype === 'bool') { + values = new Uint8Array(byteBuffer); + } + else if (dtype === 'complex64') { + values = new Float32Array(byteBuffer); + const real = new Float32Array(values.length / 2); + const image = new Float32Array(values.length / 2); + for (let i = 0; i < real.length; i++) { + real[i] = values[i * 2]; + image[i] = values[i * 2 + 1]; + } + const realTensor = tensor(real, shape, 'float32'); + const imageTensor = tensor(image, shape, 'float32'); + const complexTensor = complex$2(realTensor, imageTensor); + realTensor.dispose(); + imageTensor.dispose(); + return complexTensor; + } + else { + throw new Error(`Unsupported dtype in weight '${name}': ${dtype}`); + } + offset += size * dtypeFactor; + } + return tensor(values, shape, dtype); + } + async function readToLength(reader, initialData, length) { + let data = new Uint8Array(initialData); + while (data.byteLength < length) { + const { done, value } = await reader.read(); + if (done && value == null) { + const missing = length - data.byteLength; + throw new Error(`Reader is done but ${missing} bytes are still expected`); + } + // TODO: Don't create a new array every loop. + const newData = new Uint8Array(data.length + value.byteLength); + newData.set(data, 0); + newData.set(new Uint8Array(value), data.length); + data = newData; + } + return data.buffer; + } + async function decodeWeightsStream(weightStream, specs) { + const tensors = {}; + const reader = weightStream.getReader(); + let data = new ArrayBuffer(0); + for (const spec of specs) { + const byteLength = await getWeightBytelengthAsync(spec, async (start, end) => { + data = await readToLength(reader, data, end); + return data.slice(start, end); + }); + data = await readToLength(reader, data, byteLength); + // Slice the tensor out + const tensorData = data.slice(0, byteLength); + data = data.slice(byteLength); + const weightTensor = decodeWeight(spec, tensorData); + tensors[spec.name] = weightTensor; + // TODO(mattsoulanille): Better way to call uploadToGPU. + // TODO(mattsoulanille): Make this work for webgl too. + if (getBackend$1() === 'webgpu') { + const b = backend$1(); + if ('uploadToGPU' in b && + sizeFromShape(weightTensor.shape) >= env() + .get('WEBGPU_CPU_HANDOFF_SIZE_THRESHOLD')) { + b.uploadToGPU(weightTensor.dataId); + } + } + } + return tensors; + } + /** + * Concatenate TypedArrays into an ArrayBuffer. + */ + function concatenateTypedArrays(xs) { + // TODO(adarob, cais): Support quantization. + if (xs === null) { + throw new Error(`Invalid input value: ${JSON.stringify(xs)}`); + } + let totalByteLength = 0; + // `normalizedXs` is here for this reason: a `TypedArray`'s `buffer' + // can have a different byte length from that of the `TypedArray` itself, + // for example, when the `TypedArray` is created from an offset in an + // `ArrayBuffer`. `normliazedXs` holds `TypedArray`s whose `buffer`s match + // the `TypedArray` in byte length. If an element of `xs` does not show + // this property, a new `TypedArray` that satisfy this property will be + // constructed and pushed into `normalizedXs`. + const normalizedXs = []; + xs.forEach((x) => { + totalByteLength += x.byteLength; + // tslint:disable:no-any + normalizedXs.push(x.byteLength === x.buffer.byteLength ? x : + new x.constructor(x)); + if (!(x instanceof Float32Array || x instanceof Int32Array || + x instanceof Uint8Array)) { + throw new Error(`Unsupported TypedArray subtype: ${x.constructor.name}`); + } + // tslint:enable:no-any + }); + const y = new Uint8Array(totalByteLength); + let offset = 0; + normalizedXs.forEach((x) => { + y.set(new Uint8Array(x.buffer), offset); + offset += x.byteLength; + }); + return y.buffer; + } + // Use Buffer on Node.js instead of Blob/atob/btoa + const useNodeBuffer = typeof Buffer !== 'undefined' && + (typeof Blob === 'undefined' || typeof atob === 'undefined' || + typeof btoa === 'undefined'); + /** + * Calculate the byte length of a JavaScript string. + * + * Note that a JavaScript string can contain wide characters, therefore the + * length of the string is not necessarily equal to the byte length. + * + * @param str Input string. + * @returns Byte length. + */ + function stringByteLength(str) { + if (useNodeBuffer) { + return Buffer.byteLength(str, 'utf8'); + } + return new Blob([str]).size; + } + /** + * Encode an ArrayBuffer as a base64 encoded string. + * + * @param buffer `ArrayBuffer` to be converted. + * @returns A string that base64-encodes `buffer`. + */ + function arrayBufferToBase64String(buffer) { + if (useNodeBuffer) { + return Buffer.from(buffer).toString('base64'); + } + const buf = new Uint8Array(buffer); + let s = ''; + for (let i = 0, l = buf.length; i < l; i++) { + s += String.fromCharCode(buf[i]); + } + return btoa(s); + } + /** + * Decode a base64 string as an ArrayBuffer. + * + * @param str Base64 string. + * @returns Decoded `ArrayBuffer`. + */ + function base64StringToArrayBuffer(str) { + if (useNodeBuffer) { + const buf = Buffer.from(str, 'base64'); + return buf.buffer.slice(buf.byteOffset, buf.byteOffset + buf.byteLength); + } + const s = atob(str); + const buffer = new Uint8Array(s.length); + for (let i = 0; i < s.length; ++i) { + buffer.set([s.charCodeAt(i)], i); + } + return buffer.buffer; + } + /** + * Concatenate a number of ArrayBuffers into one. + * + * @param buffers An array of ArrayBuffers to concatenate, or a single + * ArrayBuffer. + * @returns Result of concatenating `buffers` in order. + * + * @deprecated Use tf.io.CompositeArrayBuffer.join() instead. + */ + function concatenateArrayBuffers(buffers) { + return CompositeArrayBuffer.join(buffers); + } + /** + * Get the basename of a path. + * + * Behaves in a way analogous to Linux's basename command. + * + * @param path + */ + function basename(path) { + const SEPARATOR = '/'; + path = path.trim(); + while (path.endsWith(SEPARATOR)) { + path = path.slice(0, path.length - 1); + } + const items = path.split(SEPARATOR); + return items[items.length - 1]; + } + /** + * Create `ModelJSON` from `ModelArtifacts`. + * + * @param artifacts Model artifacts, describing the model and its weights. + * @param manifest Weight manifest, describing where the weights of the + * `ModelArtifacts` are stored, and some metadata about them. + * @returns Object representing the `model.json` file describing the model + * artifacts and weights + */ + function getModelJSONForModelArtifacts(artifacts, manifest) { + const result = { + modelTopology: artifacts.modelTopology, + format: artifacts.format, + generatedBy: artifacts.generatedBy, + convertedBy: artifacts.convertedBy, + weightsManifest: manifest + }; + if (artifacts.signature != null) { + result.signature = artifacts.signature; + } + if (artifacts.userDefinedMetadata != null) { + result.userDefinedMetadata = artifacts.userDefinedMetadata; + } + if (artifacts.modelInitializer != null) { + result.modelInitializer = artifacts.modelInitializer; + } + if (artifacts.initializerSignature != null) { + result.initializerSignature = artifacts.initializerSignature; + } + if (artifacts.trainingConfig != null) { + result.trainingConfig = artifacts.trainingConfig; + } + return result; + } + /** + * Create `ModelArtifacts` from a JSON file and weights. + * + * @param modelJSON Object containing the parsed JSON of `model.json` + * @param weightSpecs The list of WeightsManifestEntry for the model. Must be + * passed if the modelJSON has a weightsManifest. + * @param weightData An ArrayBuffer or array of ArrayBuffers of weight data for + * the model corresponding to the weights in weightSpecs. Must be passed if + * the modelJSON has a weightsManifest. + * @returns A Promise of the `ModelArtifacts`, as described by the JSON file. + */ + function getModelArtifactsForJSONSync(modelJSON, weightSpecs, weightData) { + const modelArtifacts = { + modelTopology: modelJSON.modelTopology, + format: modelJSON.format, + generatedBy: modelJSON.generatedBy, + convertedBy: modelJSON.convertedBy + }; + if (modelJSON.trainingConfig != null) { + modelArtifacts.trainingConfig = modelJSON.trainingConfig; + } + if (modelJSON.weightsManifest != null) { + if (!weightSpecs) { + throw new Error('modelJSON has weightsManifest but weightSpecs is null'); + } + if (!weightData) { + throw new Error('modelJSON has weightsManifest but weightData is null'); + } + modelArtifacts.weightSpecs = weightSpecs; + modelArtifacts.weightData = weightData; + } + if (modelJSON.signature != null) { + modelArtifacts.signature = modelJSON.signature; + } + if (modelJSON.userDefinedMetadata != null) { + modelArtifacts.userDefinedMetadata = modelJSON.userDefinedMetadata; + } + if (modelJSON.modelInitializer != null) { + modelArtifacts.modelInitializer = modelJSON.modelInitializer; + } + if (modelJSON.initializerSignature != null) { + modelArtifacts.initializerSignature = modelJSON.initializerSignature; + } + return modelArtifacts; + } + /** + * Create `ModelArtifacts` from a JSON file. + * + * @param modelJSON Object containing the parsed JSON of `model.json` + * @param loadWeights Function that takes the JSON file's weights manifest, + * reads weights from the listed path(s), and returns a Promise of the + * weight manifest entries along with the weights data. + * @returns A Promise of the `ModelArtifacts`, as described by the JSON file. + */ + async function getModelArtifactsForJSON(modelJSON, loadWeights) { + let weightSpecs; + let weightData; + if (modelJSON.weightsManifest != null) { + [weightSpecs, weightData] = await loadWeights(modelJSON.weightsManifest); + } + return getModelArtifactsForJSONSync(modelJSON, weightSpecs, weightData); + } + /** + * Populate ModelArtifactsInfo fields for a model with JSON topology. + * @param modelArtifacts + * @returns A ModelArtifactsInfo object. + */ + function getModelArtifactsInfoForJSON(modelArtifacts) { + if (modelArtifacts.modelTopology instanceof ArrayBuffer) { + throw new Error('Expected JSON model topology, received ArrayBuffer.'); + } + return { + dateSaved: new Date(), + modelTopologyType: 'JSON', + modelTopologyBytes: modelArtifacts.modelTopology == null ? + 0 : + stringByteLength(JSON.stringify(modelArtifacts.modelTopology)), + weightSpecsBytes: modelArtifacts.weightSpecs == null ? + 0 : + stringByteLength(JSON.stringify(modelArtifacts.weightSpecs)), + weightDataBytes: modelArtifacts.weightData == null ? + 0 : + new CompositeArrayBuffer(modelArtifacts.weightData).byteLength, + }; + } + /** + * Concatenate the weights stored in a WeightsManifestConfig into a list of + * WeightsManifestEntry + * + * @param weightsManifest The WeightsManifestConfig to extract weights from. + * @returns A list of WeightsManifestEntry of the weights in the weightsManifest + */ + function getWeightSpecs(weightsManifest) { + const weightSpecs = []; + for (const entry of weightsManifest) { + weightSpecs.push(...entry.weights); + } + return weightSpecs; + } + /** + * Computes mantisa table for casting Float16 to Float32 + * See http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf + * + * @returns Uint32Array, 2048 mantissa lookup values. + */ + function computeFloat16MantisaTable() { + const convertMantissa = (i) => { + let m = i << 13; + let e = 0; + while ((m & 0x00800000) === 0) { + e -= 0x00800000; + m <<= 1; + } + m &= ~0x00800000; + e += 0x38800000; + return m | e; + }; + const mantisaTable = new Uint32Array(2048); + mantisaTable[0] = 0; + for (let i = 1; i < 1024; i++) { + mantisaTable[i] = convertMantissa(i); + } + for (let i = 1024; i < 2048; i++) { + mantisaTable[i] = 0x38000000 + ((i - 1024) << 13); + } + return mantisaTable; + } + /** + * Computes exponent table for casting Float16 to Float32 + * See http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf + * + * @returns Uint32Array, 64 exponent lookup values. + */ + function computeFloat16ExponentTable() { + const exponentTable = new Uint32Array(64); + exponentTable[0] = 0; + exponentTable[31] = 0x47800000; + exponentTable[32] = 0x80000000; + exponentTable[63] = 0xc7800000; + for (let i = 1; i < 31; i++) { + exponentTable[i] = i << 23; + } + for (let i = 33; i < 63; i++) { + exponentTable[i] = 0x80000000 + ((i - 32) << 23); + } + return exponentTable; + } + /** + * Computes offset table for casting Float16 to Float32 + * See http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf + * + * @returns Uint32Array, 6d offset values. + */ + function computeFloat16OffsetTable() { + const offsetTable = new Uint32Array(64); + for (let i = 0; i < 64; i++) { + offsetTable[i] = 1024; + } + offsetTable[0] = offsetTable[32] = 0; + return offsetTable; + } + /** + * Retrieve a Float16 decoder which will decode a ByteArray of Float16 values + * to a Float32Array. + * + * @returns Function (buffer: Uint16Array) => Float32Array which decodes + * the Uint16Array of Float16 bytes to a Float32Array. + */ + function getFloat16Decoder() { + // Algorithm is based off of + // http://www.fox-toolkit.org/ftp/fasthalffloatconversion.pdf + // Cache lookup tables + const mantisaTable = computeFloat16MantisaTable(); + const exponentTable = computeFloat16ExponentTable(); + const offsetTable = computeFloat16OffsetTable(); + return (quantizedArray) => { + const buffer = new ArrayBuffer(4 * quantizedArray.length); + const bufferUint32View = new Uint32Array(buffer); + for (let index = 0; index < quantizedArray.length; index++) { + const float16Bits = quantizedArray[index]; + const float32Bits = mantisaTable[offsetTable[float16Bits >> 10] + (float16Bits & 0x3ff)] + + exponentTable[float16Bits >> 10]; + bufferUint32View[index] = float32Bits; + } + return new Float32Array(buffer); + }; + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class IORouterRegistry { + constructor() { + this.saveRouters = []; + this.loadRouters = []; + } + static getInstance() { + if (IORouterRegistry.instance == null) { + IORouterRegistry.instance = new IORouterRegistry(); + } + return IORouterRegistry.instance; + } + /** + * Register a save-handler router. + * + * @param saveRouter A function that maps a URL-like string onto an instance + * of `IOHandler` with the `save` method defined or `null`. + */ + static registerSaveRouter(saveRouter) { + IORouterRegistry.getInstance().saveRouters.push(saveRouter); + } + /** + * Register a load-handler router. + * + * @param loadRouter A function that maps a URL-like string onto an instance + * of `IOHandler` with the `load` method defined or `null`. + */ + static registerLoadRouter(loadRouter) { + IORouterRegistry.getInstance().loadRouters.push(loadRouter); + } + /** + * Look up IOHandler for saving, given a URL-like string. + * + * @param url + * @returns If only one match is found, an instance of IOHandler with the + * `save` method defined. If no match is found, `null`. + * @throws Error, if more than one match is found. + */ + static getSaveHandlers(url) { + return IORouterRegistry.getHandlers(url, 'save'); + } + /** + * Look up IOHandler for loading, given a URL-like string. + * + * @param url + * @param loadOptions Optional, custom load options. + * @returns All valid handlers for `url`, given the currently registered + * handler routers. + */ + static getLoadHandlers(url, loadOptions) { + return IORouterRegistry.getHandlers(url, 'load', loadOptions); + } + static getHandlers(url, handlerType, loadOptions) { + const validHandlers = []; + const routers = handlerType === 'load' ? + IORouterRegistry.getInstance().loadRouters : + IORouterRegistry.getInstance().saveRouters; + routers.forEach(router => { + const handler = router(url, loadOptions); + if (handler !== null) { + validHandlers.push(handler); + } + }); + return validHandlers; + } + } + const registerSaveRouter = (loudRouter) => IORouterRegistry.registerSaveRouter(loudRouter); + const registerLoadRouter = (loudRouter) => IORouterRegistry.registerLoadRouter(loudRouter); + const getSaveHandlers = (url) => IORouterRegistry.getSaveHandlers(url); + const getLoadHandlers = (url, loadOptions) => IORouterRegistry.getLoadHandlers(url, loadOptions); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const DATABASE_NAME = 'tensorflowjs'; + const DATABASE_VERSION = 1; + // Model data and ModelArtifactsInfo (metadata) are stored in two separate + // stores for efficient access of the list of stored models and their metadata. + // 1. The object store for model data: topology, weights and weight manifests. + const MODEL_STORE_NAME = 'models_store'; + // 2. The object store for ModelArtifactsInfo, including meta-information such + // as the type of topology (JSON vs binary), byte size of the topology, byte + // size of the weights, etc. + const INFO_STORE_NAME = 'model_info_store'; + /** + * Delete the entire database for tensorflow.js, including the models store. + */ + async function deleteDatabase() { + const idbFactory = getIndexedDBFactory(); + return new Promise((resolve, reject) => { + const deleteRequest = idbFactory.deleteDatabase(DATABASE_NAME); + deleteRequest.onsuccess = () => resolve(); + deleteRequest.onerror = error => reject(error); + }); + } + function getIndexedDBFactory() { + if (!env().getBool('IS_BROWSER')) { + // TODO(cais): Add more info about what IOHandler subtypes are available. + // Maybe point to a doc page on the web and/or automatically determine + // the available IOHandlers and print them in the error message. + throw new Error('Failed to obtain IndexedDB factory because the current environment' + + 'is not a web browser.'); + } + // tslint:disable-next-line:no-any + const theWindow = typeof window === 'undefined' ? self : window; + const factory = theWindow.indexedDB || theWindow.mozIndexedDB || + theWindow.webkitIndexedDB || theWindow.msIndexedDB || + theWindow.shimIndexedDB; + if (factory == null) { + throw new Error('The current browser does not appear to support IndexedDB.'); + } + return factory; + } + function setUpDatabase(openRequest) { + const db = openRequest.result; + db.createObjectStore(MODEL_STORE_NAME, { keyPath: 'modelPath' }); + db.createObjectStore(INFO_STORE_NAME, { keyPath: 'modelPath' }); + } + /** + * IOHandler subclass: Browser IndexedDB. + * + * See the doc string of `browserIndexedDB` for more details. + */ + class BrowserIndexedDB { + constructor(modelPath) { + this.indexedDB = getIndexedDBFactory(); + if (modelPath == null || !modelPath) { + throw new Error('For IndexedDB, modelPath must not be null, undefined or empty.'); + } + this.modelPath = modelPath; + } + async save(modelArtifacts) { + // TODO(cais): Support saving GraphDef models. + if (modelArtifacts.modelTopology instanceof ArrayBuffer) { + throw new Error('BrowserLocalStorage.save() does not support saving model topology ' + + 'in binary formats yet.'); + } + return this.databaseAction(this.modelPath, modelArtifacts); + } + async load() { + return this.databaseAction(this.modelPath); + } + /** + * Perform database action to put model artifacts into or read model artifacts + * from IndexedDB object store. + * + * Whether the action is put or get depends on whether `modelArtifacts` is + * specified. If it is specified, the action will be put; otherwise the action + * will be get. + * + * @param modelPath A unique string path for the model. + * @param modelArtifacts If specified, it will be the model artifacts to be + * stored in IndexedDB. + * @returns A `Promise` of `SaveResult`, if the action is put, or a `Promise` + * of `ModelArtifacts`, if the action is get. + */ + databaseAction(modelPath, modelArtifacts) { + return new Promise((resolve, reject) => { + const openRequest = this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION); + openRequest.onupgradeneeded = () => setUpDatabase(openRequest); + openRequest.onsuccess = () => { + const db = openRequest.result; + if (modelArtifacts == null) { + // Read model out from object store. + const modelTx = db.transaction(MODEL_STORE_NAME, 'readonly'); + const modelStore = modelTx.objectStore(MODEL_STORE_NAME); + const getRequest = modelStore.get(this.modelPath); + getRequest.onsuccess = () => { + if (getRequest.result == null) { + db.close(); + return reject(new Error(`Cannot find model with path '${this.modelPath}' ` + + `in IndexedDB.`)); + } + else { + resolve(getRequest.result.modelArtifacts); + } + }; + getRequest.onerror = error => { + db.close(); + return reject(getRequest.error); + }; + modelTx.oncomplete = () => db.close(); + } + else { + // Put model into object store. + // Concatenate all the model weights into a single ArrayBuffer. Large + // models (~1GB) have problems saving if they are not concatenated. + // TODO(mattSoulanille): Save large models to multiple indexeddb + // records. + modelArtifacts.weightData = CompositeArrayBuffer.join(modelArtifacts.weightData); + const modelArtifactsInfo = getModelArtifactsInfoForJSON(modelArtifacts); + // First, put ModelArtifactsInfo into info store. + const infoTx = db.transaction(INFO_STORE_NAME, 'readwrite'); + let infoStore = infoTx.objectStore(INFO_STORE_NAME); + let putInfoRequest; + try { + putInfoRequest = + infoStore.put({ modelPath: this.modelPath, modelArtifactsInfo }); + } + catch (error) { + return reject(error); + } + let modelTx; + putInfoRequest.onsuccess = () => { + // Second, put model data into model store. + modelTx = db.transaction(MODEL_STORE_NAME, 'readwrite'); + const modelStore = modelTx.objectStore(MODEL_STORE_NAME); + let putModelRequest; + try { + putModelRequest = modelStore.put({ + modelPath: this.modelPath, + modelArtifacts, + modelArtifactsInfo + }); + } + catch (error) { + // Sometimes, the serialized value is too large to store. + return reject(error); + } + putModelRequest.onsuccess = () => resolve({ modelArtifactsInfo }); + putModelRequest.onerror = error => { + // If the put-model request fails, roll back the info entry as + // well. + infoStore = infoTx.objectStore(INFO_STORE_NAME); + const deleteInfoRequest = infoStore.delete(this.modelPath); + deleteInfoRequest.onsuccess = () => { + db.close(); + return reject(putModelRequest.error); + }; + deleteInfoRequest.onerror = error => { + db.close(); + return reject(putModelRequest.error); + }; + }; + }; + putInfoRequest.onerror = error => { + db.close(); + return reject(putInfoRequest.error); + }; + infoTx.oncomplete = () => { + if (modelTx == null) { + db.close(); + } + else { + modelTx.oncomplete = () => db.close(); + } + }; + } + }; + openRequest.onerror = error => reject(openRequest.error); + }); + } + } + BrowserIndexedDB.URL_SCHEME = 'indexeddb://'; + const indexedDBRouter = (url) => { + if (!env().getBool('IS_BROWSER')) { + return null; + } + else { + if (!Array.isArray(url) && url.startsWith(BrowserIndexedDB.URL_SCHEME)) { + return browserIndexedDB(url.slice(BrowserIndexedDB.URL_SCHEME.length)); + } + else { + return null; + } + } + }; + IORouterRegistry.registerSaveRouter(indexedDBRouter); + IORouterRegistry.registerLoadRouter(indexedDBRouter); + /** + * Creates a browser IndexedDB IOHandler for saving and loading models. + * + * ```js + * const model = tf.sequential(); + * model.add( + * tf.layers.dense({units: 1, inputShape: [100], activation: 'sigmoid'})); + * + * const saveResult = await model.save('indexeddb://MyModel')); + * console.log(saveResult); + * ``` + * + * @param modelPath A unique identifier for the model to be saved. Must be a + * non-empty string. + * @returns An instance of `BrowserIndexedDB` (subclass of `IOHandler`), + * which can be used with, e.g., `tf.Model.save`. + */ + function browserIndexedDB(modelPath) { + return new BrowserIndexedDB(modelPath); + } + function maybeStripScheme$1(key) { + return key.startsWith(BrowserIndexedDB.URL_SCHEME) ? + key.slice(BrowserIndexedDB.URL_SCHEME.length) : + key; + } + class BrowserIndexedDBManager { + constructor() { + this.indexedDB = getIndexedDBFactory(); + } + async listModels() { + return new Promise((resolve, reject) => { + const openRequest = this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION); + openRequest.onupgradeneeded = () => setUpDatabase(openRequest); + openRequest.onsuccess = () => { + const db = openRequest.result; + const tx = db.transaction(INFO_STORE_NAME, 'readonly'); + const store = tx.objectStore(INFO_STORE_NAME); + // tslint:disable:max-line-length + // Need to cast `store` as `any` here because TypeScript's DOM + // library does not have the `getAll()` method even though the + // method is supported in the latest version of most mainstream + // browsers: + // https://developer.mozilla.org/en-US/docs/Web/API/IDBObjectStore/getAll + // tslint:enable:max-line-length + // tslint:disable-next-line:no-any + const getAllInfoRequest = store.getAll(); + getAllInfoRequest.onsuccess = () => { + const out = {}; + for (const item of getAllInfoRequest.result) { + out[item.modelPath] = item.modelArtifactsInfo; + } + resolve(out); + }; + getAllInfoRequest.onerror = error => { + db.close(); + return reject(getAllInfoRequest.error); + }; + tx.oncomplete = () => db.close(); + }; + openRequest.onerror = error => reject(openRequest.error); + }); + } + async removeModel(path) { + path = maybeStripScheme$1(path); + return new Promise((resolve, reject) => { + const openRequest = this.indexedDB.open(DATABASE_NAME, DATABASE_VERSION); + openRequest.onupgradeneeded = () => setUpDatabase(openRequest); + openRequest.onsuccess = () => { + const db = openRequest.result; + const infoTx = db.transaction(INFO_STORE_NAME, 'readwrite'); + const infoStore = infoTx.objectStore(INFO_STORE_NAME); + const getInfoRequest = infoStore.get(path); + let modelTx; + getInfoRequest.onsuccess = () => { + if (getInfoRequest.result == null) { + db.close(); + return reject(new Error(`Cannot find model with path '${path}' ` + + `in IndexedDB.`)); + } + else { + // First, delete the entry in the info store. + const deleteInfoRequest = infoStore.delete(path); + const deleteModelData = () => { + // Second, delete the entry in the model store. + modelTx = db.transaction(MODEL_STORE_NAME, 'readwrite'); + const modelStore = modelTx.objectStore(MODEL_STORE_NAME); + const deleteModelRequest = modelStore.delete(path); + deleteModelRequest.onsuccess = () => resolve(getInfoRequest.result.modelArtifactsInfo); + deleteModelRequest.onerror = error => reject(getInfoRequest.error); + }; + // Proceed with deleting model data regardless of whether deletion + // of info data succeeds or not. + deleteInfoRequest.onsuccess = deleteModelData; + deleteInfoRequest.onerror = error => { + deleteModelData(); + db.close(); + return reject(getInfoRequest.error); + }; + } + }; + getInfoRequest.onerror = error => { + db.close(); + return reject(getInfoRequest.error); + }; + infoTx.oncomplete = () => { + if (modelTx == null) { + db.close(); + } + else { + modelTx.oncomplete = () => db.close(); + } + }; + }; + openRequest.onerror = error => reject(openRequest.error); + }); + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const PATH_SEPARATOR = '/'; + const PATH_PREFIX = 'tensorflowjs_models'; + const INFO_SUFFIX = 'info'; + const MODEL_TOPOLOGY_SUFFIX = 'model_topology'; + const WEIGHT_SPECS_SUFFIX = 'weight_specs'; + const WEIGHT_DATA_SUFFIX = 'weight_data'; + const MODEL_METADATA_SUFFIX = 'model_metadata'; + /** + * Purge all tensorflow.js-saved model artifacts from local storage. + * + * @returns Paths of the models purged. + */ + function purgeLocalStorageArtifacts() { + if (!env().getBool('IS_BROWSER') || typeof window === 'undefined' || + typeof window.localStorage === 'undefined') { + throw new Error('purgeLocalStorageModels() cannot proceed because local storage is ' + + 'unavailable in the current environment.'); + } + const LS = window.localStorage; + const purgedModelPaths = []; + for (let i = 0; i < LS.length; ++i) { + const key = LS.key(i); + const prefix = PATH_PREFIX + PATH_SEPARATOR; + if (key.startsWith(prefix) && key.length > prefix.length) { + LS.removeItem(key); + const modelName = getModelPathFromKey(key); + if (purgedModelPaths.indexOf(modelName) === -1) { + purgedModelPaths.push(modelName); + } + } + } + return purgedModelPaths; + } + function getModelKeys(path) { + return { + info: [PATH_PREFIX, path, INFO_SUFFIX].join(PATH_SEPARATOR), + topology: [PATH_PREFIX, path, MODEL_TOPOLOGY_SUFFIX].join(PATH_SEPARATOR), + weightSpecs: [PATH_PREFIX, path, WEIGHT_SPECS_SUFFIX].join(PATH_SEPARATOR), + weightData: [PATH_PREFIX, path, WEIGHT_DATA_SUFFIX].join(PATH_SEPARATOR), + modelMetadata: [PATH_PREFIX, path, MODEL_METADATA_SUFFIX].join(PATH_SEPARATOR) + }; + } + function removeItems(keys) { + for (const key of Object.values(keys)) { + window.localStorage.removeItem(key); + } + } + /** + * Get model path from a local-storage key. + * + * E.g., 'tensorflowjs_models/my/model/1/info' --> 'my/model/1' + * + * @param key + */ + function getModelPathFromKey(key) { + const items = key.split(PATH_SEPARATOR); + if (items.length < 3) { + throw new Error(`Invalid key format: ${key}`); + } + return items.slice(1, items.length - 1).join(PATH_SEPARATOR); + } + function maybeStripScheme(key) { + return key.startsWith(BrowserLocalStorage.URL_SCHEME) ? + key.slice(BrowserLocalStorage.URL_SCHEME.length) : + key; + } + /** + * IOHandler subclass: Browser Local Storage. + * + * See the doc string to `browserLocalStorage` for more details. + */ + class BrowserLocalStorage { + constructor(modelPath) { + if (!env().getBool('IS_BROWSER') || typeof window === 'undefined' || + typeof window.localStorage === 'undefined') { + // TODO(cais): Add more info about what IOHandler subtypes are + // available. + // Maybe point to a doc page on the web and/or automatically determine + // the available IOHandlers and print them in the error message. + throw new Error('The current environment does not support local storage.'); + } + this.LS = window.localStorage; + if (modelPath == null || !modelPath) { + throw new Error('For local storage, modelPath must not be null, undefined or empty.'); + } + this.modelPath = modelPath; + this.keys = getModelKeys(this.modelPath); + } + /** + * Save model artifacts to browser local storage. + * + * See the documentation to `browserLocalStorage` for details on the saved + * artifacts. + * + * @param modelArtifacts The model artifacts to be stored. + * @returns An instance of SaveResult. + */ + async save(modelArtifacts) { + if (modelArtifacts.modelTopology instanceof ArrayBuffer) { + throw new Error('BrowserLocalStorage.save() does not support saving model topology ' + + 'in binary formats yet.'); + } + else { + const topology = JSON.stringify(modelArtifacts.modelTopology); + const weightSpecs = JSON.stringify(modelArtifacts.weightSpecs); + const modelArtifactsInfo = getModelArtifactsInfoForJSON(modelArtifacts); + // TODO(mattsoulanille): Support saving models over 2GB that exceed + // Chrome's ArrayBuffer size limit. + const weightBuffer = CompositeArrayBuffer.join(modelArtifacts.weightData); + try { + this.LS.setItem(this.keys.info, JSON.stringify(modelArtifactsInfo)); + this.LS.setItem(this.keys.topology, topology); + this.LS.setItem(this.keys.weightSpecs, weightSpecs); + this.LS.setItem(this.keys.weightData, arrayBufferToBase64String(weightBuffer)); + // Note that JSON.stringify doesn't write out keys that have undefined + // values, so for some keys, we set undefined instead of a null-ish + // value. + const metadata = { + format: modelArtifacts.format, + generatedBy: modelArtifacts.generatedBy, + convertedBy: modelArtifacts.convertedBy, + signature: modelArtifacts.signature != null ? + modelArtifacts.signature : + undefined, + userDefinedMetadata: modelArtifacts.userDefinedMetadata != null ? + modelArtifacts.userDefinedMetadata : + undefined, + modelInitializer: modelArtifacts.modelInitializer != null ? + modelArtifacts.modelInitializer : + undefined, + initializerSignature: modelArtifacts.initializerSignature != null ? + modelArtifacts.initializerSignature : + undefined, + trainingConfig: modelArtifacts.trainingConfig != null ? + modelArtifacts.trainingConfig : + undefined + }; + this.LS.setItem(this.keys.modelMetadata, JSON.stringify(metadata)); + return { modelArtifactsInfo }; + } + catch (err) { + // If saving failed, clean up all items saved so far. + removeItems(this.keys); + throw new Error(`Failed to save model '${this.modelPath}' to local storage: ` + + `size quota being exceeded is a possible cause of this failure: ` + + `modelTopologyBytes=${modelArtifactsInfo.modelTopologyBytes}, ` + + `weightSpecsBytes=${modelArtifactsInfo.weightSpecsBytes}, ` + + `weightDataBytes=${modelArtifactsInfo.weightDataBytes}.`); + } + } + } + /** + * Load a model from local storage. + * + * See the documentation to `browserLocalStorage` for details on the saved + * artifacts. + * + * @returns The loaded model (if loading succeeds). + */ + async load() { + const info = JSON.parse(this.LS.getItem(this.keys.info)); + if (info == null) { + throw new Error(`In local storage, there is no model with name '${this.modelPath}'`); + } + if (info.modelTopologyType !== 'JSON') { + throw new Error('BrowserLocalStorage does not support loading non-JSON model ' + + 'topology yet.'); + } + const out = {}; + // Load topology. + const topology = JSON.parse(this.LS.getItem(this.keys.topology)); + if (topology == null) { + throw new Error(`In local storage, the topology of model '${this.modelPath}' ` + + `is missing.`); + } + out.modelTopology = topology; + // Load weight specs. + const weightSpecs = JSON.parse(this.LS.getItem(this.keys.weightSpecs)); + if (weightSpecs == null) { + throw new Error(`In local storage, the weight specs of model '${this.modelPath}' ` + + `are missing.`); + } + out.weightSpecs = weightSpecs; + // Load meta-data fields. + const metadataString = this.LS.getItem(this.keys.modelMetadata); + if (metadataString != null) { + const metadata = JSON.parse(metadataString); + out.format = metadata.format; + out.generatedBy = metadata.generatedBy; + out.convertedBy = metadata.convertedBy; + if (metadata.signature != null) { + out.signature = metadata.signature; + } + if (metadata.userDefinedMetadata != null) { + out.userDefinedMetadata = metadata.userDefinedMetadata; + } + if (metadata.modelInitializer != null) { + out.modelInitializer = metadata.modelInitializer; + } + if (metadata.initializerSignature != null) { + out.initializerSignature = metadata.initializerSignature; + } + if (metadata.trainingConfig != null) { + out.trainingConfig = metadata.trainingConfig; + } + } + // Load weight data. + const weightDataBase64 = this.LS.getItem(this.keys.weightData); + if (weightDataBase64 == null) { + throw new Error(`In local storage, the binary weight values of model ` + + `'${this.modelPath}' are missing.`); + } + out.weightData = base64StringToArrayBuffer(weightDataBase64); + return out; + } + } + BrowserLocalStorage.URL_SCHEME = 'localstorage://'; + const localStorageRouter = (url) => { + if (!env().getBool('IS_BROWSER')) { + return null; + } + else { + if (!Array.isArray(url) && url.startsWith(BrowserLocalStorage.URL_SCHEME)) { + return browserLocalStorage(url.slice(BrowserLocalStorage.URL_SCHEME.length)); + } + else { + return null; + } + } + }; + IORouterRegistry.registerSaveRouter(localStorageRouter); + IORouterRegistry.registerLoadRouter(localStorageRouter); + /** + * Factory function for local storage IOHandler. + * + * This `IOHandler` supports both `save` and `load`. + * + * For each model's saved artifacts, four items are saved to local storage. + * - `${PATH_SEPARATOR}/${modelPath}/info`: Contains meta-info about the + * model, such as date saved, type of the topology, size in bytes, etc. + * - `${PATH_SEPARATOR}/${modelPath}/topology`: Model topology. For Keras- + * style models, this is a stringized JSON. + * - `${PATH_SEPARATOR}/${modelPath}/weight_specs`: Weight specs of the + * model, can be used to decode the saved binary weight values (see + * item below). + * - `${PATH_SEPARATOR}/${modelPath}/weight_data`: Concatenated binary + * weight values, stored as a base64-encoded string. + * + * Saving may throw an `Error` if the total size of the artifacts exceed the + * browser-specific quota. + * + * @param modelPath A unique identifier for the model to be saved. Must be a + * non-empty string. + * @returns An instance of `IOHandler`, which can be used with, e.g., + * `tf.Model.save`. + */ + function browserLocalStorage(modelPath) { + return new BrowserLocalStorage(modelPath); + } + class BrowserLocalStorageManager { + constructor() { + assert$1(env().getBool('IS_BROWSER'), () => 'Current environment is not a web browser'); + assert$1(typeof window === 'undefined' || + typeof window.localStorage !== 'undefined', () => 'Current browser does not appear to support localStorage'); + this.LS = window.localStorage; + } + async listModels() { + const out = {}; + const prefix = PATH_PREFIX + PATH_SEPARATOR; + const suffix = PATH_SEPARATOR + INFO_SUFFIX; + for (let i = 0; i < this.LS.length; ++i) { + const key = this.LS.key(i); + if (key.startsWith(prefix) && key.endsWith(suffix)) { + const modelPath = getModelPathFromKey(key); + out[modelPath] = JSON.parse(this.LS.getItem(key)); + } + } + return out; + } + async removeModel(path) { + path = maybeStripScheme(path); + const keys = getModelKeys(path); + if (this.LS.getItem(keys.info) == null) { + throw new Error(`Cannot find model at path '${path}'`); + } + const info = JSON.parse(this.LS.getItem(keys.info)); + removeItems(keys); + return info; + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const URL_SCHEME_SUFFIX = '://'; + class ModelStoreManagerRegistry { + constructor() { + this.managers = {}; + } + static getInstance() { + if (ModelStoreManagerRegistry.instance == null) { + ModelStoreManagerRegistry.instance = new ModelStoreManagerRegistry(); + } + return ModelStoreManagerRegistry.instance; + } + /** + * Register a save-handler router. + * + * @param saveRouter A function that maps a URL-like string onto an instance + * of `IOHandler` with the `save` method defined or `null`. + */ + static registerManager(scheme, manager) { + assert$1(scheme != null, () => 'scheme must not be undefined or null.'); + if (scheme.endsWith(URL_SCHEME_SUFFIX)) { + scheme = scheme.slice(0, scheme.indexOf(URL_SCHEME_SUFFIX)); + } + assert$1(scheme.length > 0, () => 'scheme must not be an empty string.'); + const registry = ModelStoreManagerRegistry.getInstance(); + assert$1(registry.managers[scheme] == null, () => `A model store manager is already registered for scheme '${scheme}'.`); + registry.managers[scheme] = manager; + } + static getManager(scheme) { + const manager = ModelStoreManagerRegistry.getInstance().managers[scheme]; + if (manager == null) { + throw new Error(`Cannot find model manager for scheme '${scheme}'`); + } + return manager; + } + static getSchemes() { + return Object.keys(ModelStoreManagerRegistry.getInstance().managers); + } + } + /** + * Helper method for parsing a URL string into a scheme and a path. + * + * @param url E.g., 'localstorage://my-model' + * @returns A dictionary with two fields: scheme and path. + * Scheme: e.g., 'localstorage' in the example above. + * Path: e.g., 'my-model' in the example above. + */ + function parseURL(url) { + if (url.indexOf(URL_SCHEME_SUFFIX) === -1) { + throw new Error(`The url string provided does not contain a scheme. ` + + `Supported schemes are: ` + + `${ModelStoreManagerRegistry.getSchemes().join(',')}`); + } + return { + scheme: url.split(URL_SCHEME_SUFFIX)[0], + path: url.split(URL_SCHEME_SUFFIX)[1], + }; + } + async function cloneModelInternal(sourceURL, destURL, deleteSource = false) { + assert$1(sourceURL !== destURL, () => `Old path and new path are the same: '${sourceURL}'`); + const loadHandlers = IORouterRegistry.getLoadHandlers(sourceURL); + assert$1(loadHandlers.length > 0, () => `Copying failed because no load handler is found for source URL ${sourceURL}.`); + assert$1(loadHandlers.length < 2, () => `Copying failed because more than one (${loadHandlers.length}) ` + + `load handlers for source URL ${sourceURL}.`); + const loadHandler = loadHandlers[0]; + const saveHandlers = IORouterRegistry.getSaveHandlers(destURL); + assert$1(saveHandlers.length > 0, () => `Copying failed because no save handler is found for destination ` + + `URL ${destURL}.`); + assert$1(saveHandlers.length < 2, () => `Copying failed because more than one (${loadHandlers.length}) ` + + `save handlers for destination URL ${destURL}.`); + const saveHandler = saveHandlers[0]; + const sourceScheme = parseURL(sourceURL).scheme; + const sourcePath = parseURL(sourceURL).path; + const sameMedium = sourceScheme === parseURL(sourceURL).scheme; + const modelArtifacts = await loadHandler.load(); + // If moving within the same storage medium, remove the old model as soon as + // the loading is done. Without doing this, it is possible that the combined + // size of the two models will cause the cloning to fail. + if (deleteSource && sameMedium) { + await ModelStoreManagerRegistry.getManager(sourceScheme) + .removeModel(sourcePath); + } + const saveResult = await saveHandler.save(modelArtifacts); + // If moving between mediums, the deletion is done after the save succeeds. + // This guards against the case in which saving to the destination medium + // fails. + if (deleteSource && !sameMedium) { + await ModelStoreManagerRegistry.getManager(sourceScheme) + .removeModel(sourcePath); + } + return saveResult.modelArtifactsInfo; + } + /** + * List all models stored in registered storage mediums. + * + * For a web browser environment, the registered mediums are Local Storage and + * IndexedDB. + * + * ```js + * // First create and save a model. + * const model = tf.sequential(); + * model.add(tf.layers.dense( + * {units: 1, inputShape: [10], activation: 'sigmoid'})); + * await model.save('localstorage://demo/management/model1'); + * + * // Then list existing models. + * console.log(JSON.stringify(await tf.io.listModels())); + * + * // Delete the model. + * await tf.io.removeModel('localstorage://demo/management/model1'); + * + * // List models again. + * console.log(JSON.stringify(await tf.io.listModels())); + * ``` + * + * @returns A `Promise` of a dictionary mapping URLs of existing models to + * their model artifacts info. URLs include medium-specific schemes, e.g., + * 'indexeddb://my/model/1'. Model artifacts info include type of the + * model's topology, byte sizes of the topology, weights, etc. + * + * @doc { + * heading: 'Models', + * subheading: 'Management', + * namespace: 'io', + * ignoreCI: true + * } + */ + async function listModels() { + const schemes = ModelStoreManagerRegistry.getSchemes(); + const out = {}; + for (const scheme of schemes) { + const schemeOut = await ModelStoreManagerRegistry.getManager(scheme).listModels(); + for (const path in schemeOut) { + const url = scheme + URL_SCHEME_SUFFIX + path; + out[url] = schemeOut[path]; + } + } + return out; + } + /** + * Remove a model specified by URL from a registered storage medium. + * + * ```js + * // First create and save a model. + * const model = tf.sequential(); + * model.add(tf.layers.dense( + * {units: 1, inputShape: [10], activation: 'sigmoid'})); + * await model.save('localstorage://demo/management/model1'); + * + * // Then list existing models. + * console.log(JSON.stringify(await tf.io.listModels())); + * + * // Delete the model. + * await tf.io.removeModel('localstorage://demo/management/model1'); + * + * // List models again. + * console.log(JSON.stringify(await tf.io.listModels())); + * ``` + * + * @param url A URL to a stored model, with a scheme prefix, e.g., + * 'localstorage://my-model-1', 'indexeddb://my/model/2'. + * @returns ModelArtifactsInfo of the deleted model (if and only if deletion + * is successful). + * @throws Error if deletion fails, e.g., if no model exists at `path`. + * + * @doc { + * heading: 'Models', + * subheading: 'Management', + * namespace: 'io', + * ignoreCI: true + * } + */ + async function removeModel(url) { + const schemeAndPath = parseURL(url); + const manager = ModelStoreManagerRegistry.getManager(schemeAndPath.scheme); + return manager.removeModel(schemeAndPath.path); + } + /** + * Copy a model from one URL to another. + * + * This function supports: + * + * 1. Copying within a storage medium, e.g., + * `tf.io.copyModel('localstorage://model-1', 'localstorage://model-2')` + * 2. Copying between two storage mediums, e.g., + * `tf.io.copyModel('localstorage://model-1', 'indexeddb://model-1')` + * + * ```js + * // First create and save a model. + * const model = tf.sequential(); + * model.add(tf.layers.dense( + * {units: 1, inputShape: [10], activation: 'sigmoid'})); + * await model.save('localstorage://demo/management/model1'); + * + * // Then list existing models. + * console.log(JSON.stringify(await tf.io.listModels())); + * + * // Copy the model, from Local Storage to IndexedDB. + * await tf.io.copyModel( + * 'localstorage://demo/management/model1', + * 'indexeddb://demo/management/model1'); + * + * // List models again. + * console.log(JSON.stringify(await tf.io.listModels())); + * + * // Remove both models. + * await tf.io.removeModel('localstorage://demo/management/model1'); + * await tf.io.removeModel('indexeddb://demo/management/model1'); + * ``` + * + * @param sourceURL Source URL of copying. + * @param destURL Destination URL of copying. + * @returns ModelArtifactsInfo of the copied model (if and only if copying + * is successful). + * @throws Error if copying fails, e.g., if no model exists at `sourceURL`, or + * if `oldPath` and `newPath` are identical. + * + * @doc { + * heading: 'Models', + * subheading: 'Management', + * namespace: 'io', + * ignoreCI: true + * } + */ + async function copyModel(sourceURL, destURL) { + const deleteSource = false; + return cloneModelInternal(sourceURL, destURL, deleteSource); + } + /** + * Move a model from one URL to another. + * + * This function supports: + * + * 1. Moving within a storage medium, e.g., + * `tf.io.moveModel('localstorage://model-1', 'localstorage://model-2')` + * 2. Moving between two storage mediums, e.g., + * `tf.io.moveModel('localstorage://model-1', 'indexeddb://model-1')` + * + * ```js + * // First create and save a model. + * const model = tf.sequential(); + * model.add(tf.layers.dense( + * {units: 1, inputShape: [10], activation: 'sigmoid'})); + * await model.save('localstorage://demo/management/model1'); + * + * // Then list existing models. + * console.log(JSON.stringify(await tf.io.listModels())); + * + * // Move the model, from Local Storage to IndexedDB. + * await tf.io.moveModel( + * 'localstorage://demo/management/model1', + * 'indexeddb://demo/management/model1'); + * + * // List models again. + * console.log(JSON.stringify(await tf.io.listModels())); + * + * // Remove the moved model. + * await tf.io.removeModel('indexeddb://demo/management/model1'); + * ``` + * + * @param sourceURL Source URL of moving. + * @param destURL Destination URL of moving. + * @returns ModelArtifactsInfo of the copied model (if and only if copying + * is successful). + * @throws Error if moving fails, e.g., if no model exists at `sourceURL`, or + * if `oldPath` and `newPath` are identical. + * + * @doc { + * heading: 'Models', + * subheading: 'Management', + * namespace: 'io', + * ignoreCI: true + * } + */ + async function moveModel(sourceURL, destURL) { + const deleteSource = true; + return cloneModelInternal(sourceURL, destURL, deleteSource); + } + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class PlatformBrowser { + constructor() { + // For setTimeoutCustom + this.messageName = 'setTimeoutCustom'; + this.functionRefs = []; + this.handledMessageCount = 0; + this.hasEventListener = false; + } + fetch(path, init) { + return fetch(path, init); + } + now() { + return performance.now(); + } + encode(text, encoding) { + if (encoding !== 'utf-8' && encoding !== 'utf8') { + throw new Error(`Browser's encoder only supports utf-8, but got ${encoding}`); + } + if (this.textEncoder == null) { + this.textEncoder = new TextEncoder(); + } + return this.textEncoder.encode(text); + } + decode(bytes, encoding) { + return new TextDecoder(encoding).decode(bytes); + } + // If the setTimeout nesting level is greater than 5 and timeout is less + // than 4ms, timeout will be clamped to 4ms, which hurts the perf. + // Interleaving window.postMessage and setTimeout will trick the browser and + // avoid the clamp. + setTimeoutCustom(functionRef, delay) { + if (typeof window === 'undefined' || + !env().getBool('USE_SETTIMEOUTCUSTOM')) { + setTimeout(functionRef, delay); + return; + } + this.functionRefs.push(functionRef); + setTimeout(() => { + window.postMessage({ name: this.messageName, index: this.functionRefs.length - 1 }, '*'); + }, delay); + if (!this.hasEventListener) { + this.hasEventListener = true; + window.addEventListener('message', (event) => { + if (event.source === window && event.data.name === this.messageName) { + event.stopPropagation(); + const functionRef = this.functionRefs[event.data.index]; + functionRef(); + this.handledMessageCount++; + if (this.handledMessageCount === this.functionRefs.length) { + this.functionRefs = []; + this.handledMessageCount = 0; + } + } + }, true); + } + } + isTypedArray(a) { + return isTypedArrayBrowser(a); + } + } + if (env().get('IS_BROWSER')) { + env().setPlatform('browser', new PlatformBrowser()); + // Register LocalStorage IOHandler + try { + ModelStoreManagerRegistry.registerManager(BrowserLocalStorage.URL_SCHEME, new BrowserLocalStorageManager()); + } + catch (err) { + } + // Register IndexedDB IOHandler + try { + ModelStoreManagerRegistry.registerManager(BrowserIndexedDB.URL_SCHEME, new BrowserIndexedDBManager()); + } + catch (err) { + } + } + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + // We are wrapping this within an object so it can be stubbed by Jasmine. + const getNodeFetch = { + // tslint:disable-next-line:no-require-imports + importFetch: () => require('node-fetch') + }; + let systemFetch; + // These getters and setters are for testing so we don't export a mutable + // variable. + function resetSystemFetch() { + systemFetch = null; + } + function setSystemFetch(fetchFn) { + systemFetch = fetchFn; + } + function getSystemFetch() { + return systemFetch; + } + class PlatformNode { + constructor() { + // tslint:disable-next-line:no-require-imports + this.util = require('util'); + // According to the spec, the built-in encoder can do only UTF-8 encoding. + // https://developer.mozilla.org/en-US/docs/Web/API/TextEncoder/TextEncoder + this.textEncoder = new this.util.TextEncoder(); + } + fetch(path, requestInits) { + if (env().global.fetch != null) { + return env().global.fetch(path, requestInits); + } + if (systemFetch == null) { + systemFetch = getNodeFetch.importFetch(); + } + return systemFetch(path, requestInits); + } + now() { + const time = process.hrtime(); + return time[0] * 1000 + time[1] / 1000000; + } + encode(text, encoding) { + if (encoding !== 'utf-8' && encoding !== 'utf8') { + throw new Error(`Node built-in encoder only supports utf-8, but got ${encoding}`); + } + return this.textEncoder.encode(text); + } + decode(bytes, encoding) { + if (bytes.length === 0) { + return ''; + } + return new this.util.TextDecoder(encoding).decode(bytes); + } + isTypedArray(a) { + return this.util.types.isFloat32Array(a) + || this.util.types.isInt32Array(a) + || this.util.types.isUint8Array(a) + || this.util.types.isUint8ClampedArray(a); + } + } + if (env().get('IS_NODE') && !env().get('IS_BROWSER')) { + env().setPlatform('node', new PlatformNode()); + } + + /** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Creates an empty `tf.TensorBuffer` with the specified `shape` and `dtype`. + * + * The values are stored in CPU as `TypedArray`. Fill the buffer using + * `buffer.set()`, or by modifying directly `buffer.values`. + * + * When done, call `buffer.toTensor()` to get an immutable `tf.Tensor` with + * those values. + * + * ```js + * // Create a buffer and set values at particular indices. + * const buffer = tf.buffer([2, 2]); + * buffer.set(3, 0, 0); + * buffer.set(5, 1, 0); + * + * // Convert the buffer back to a tensor. + * buffer.toTensor().print(); + * ``` + * + * @param shape An array of integers defining the output tensor shape. + * @param dtype The dtype of the buffer. Defaults to 'float32'. + * @param values The values of the buffer as `TypedArray`. Defaults to + * zeros. + * + * @doc {heading: 'Tensors', subheading: 'Creation'} + */ + function buffer(shape, dtype = 'float32', values) { + dtype = dtype || 'float32'; + assertNonNegativeIntegerDimensions(shape); + return new TensorBuffer(shape, dtype, values); + } + + /** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Casts a `tf.Tensor` to a new dtype. + * + * ```js + * const x = tf.tensor1d([1.5, 2.5, 3]); + * tf.cast(x, 'int32').print(); + * ``` + * @param x The input tensor to be casted. + * @param dtype The dtype to cast the input tensor to. + * + * @doc {heading: 'Tensors', subheading: 'Transformations'} + */ + function cast_(x, dtype) { + const $x = convertToTensor(x, 'x', 'cast'); + // Sanity checks. + if (!isValidDtype(dtype)) { + throw new Error(`Failed to cast to unknown dtype ${dtype}`); + } + if (dtype === 'string' && $x.dtype !== 'string' || + dtype !== 'string' && $x.dtype === 'string') { + throw new Error('Only strings can be casted to strings'); + } + const inputs = { x: $x }; + const attrs = { dtype }; + return ENGINE.runKernel(Cast, inputs, attrs); + } + const cast$3 = /* @__PURE__ */ op({ cast_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Creates a new tensor with the same values and shape as the specified + * tensor. + * + * ```js + * const x = tf.tensor([1, 2]); + * + * x.clone().print(); + * ``` + * + * @param x The tensor to clone. + * + * @doc {heading: 'Tensors', subheading: 'Creation'} + */ + function clone_(x) { + const $x = convertToTensor(x, 'x', 'clone', 'string_or_numeric'); + const inputs = { x: $x }; + // Note this op is called tf.identity in python. Hence the kernel name used + // here. + return ENGINE.runKernel(Identity$1, inputs); + } + const clone = /* @__PURE__ */ op({ clone_ }); + + /** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Prints information about the `tf.Tensor` including its data. + * + * ```js + * const verbose = true; + * tf.tensor2d([1, 2, 3, 4], [2, 2]).print(verbose); + * ``` + * @param x The tensor to be printed. + * @param verbose Whether to print verbose information about the ` Tensor`, + * including dtype and size. + * + * @doc {heading: 'Tensors', subheading: 'Creation'} + */ + function print(x, verbose = false) { + console.log(x.toString(verbose)); + } + + /** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getOrMakeEngine(); + const opHandler = { + buffer, + cast: cast$3, + clone, + print + }; + setOpHandler(opHandler); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Adds two `tf.Tensor`s element-wise, A + B. Supports broadcasting. + * + * + * ```js + * const a = tf.tensor1d([1, 2, 3, 4]); + * const b = tf.tensor1d([10, 20, 30, 40]); + * + * a.add(b).print(); // or tf.add(a, b) + * ``` + * + * ```js + * // Broadcast add a with b. + * const a = tf.scalar(5); + * const b = tf.tensor1d([10, 20, 30, 40]); + * + * a.add(b).print(); // or tf.add(a, b) + * ``` + * @param a The first `tf.Tensor` to add. + * @param b The second `tf.Tensor` to add. Must have the same type as `a`. + * + * @doc {heading: 'Operations', subheading: 'Arithmetic'} + */ + function add_(a, b) { + let $a = convertToTensor(a, 'a', 'add'); + let $b = convertToTensor(b, 'b', 'add'); + [$a, $b] = makeTypesMatch($a, $b); + const inputs = { a: $a, b: $b }; + return ENGINE.runKernel(Add$1, inputs); + } + const add$3 = /* @__PURE__ */ op({ add_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Divides two `tf.Tensor`s element-wise, A / B. Supports broadcasting. + * The result is rounded with floor function. + * + * + * ```js + * const a = tf.tensor1d([1, 4, 9, 16]); + * const b = tf.tensor1d([1, 2, 3, 4]); + * + * a.floorDiv(b).print(); // or tf.div(a, b) + * ``` + * + * ```js + * // Broadcast div a with b. + * const a = tf.tensor1d([2, 4, 6, 8]); + * const b = tf.scalar(2); + * + * a.floorDiv(b).print(); // or tf.floorDiv(a, b) + * ``` + * + * @param a The first tensor as the numerator. + * @param b The second tensor as the denominator. Must have the same dtype as + * `a`. + * + * @doc {heading: 'Operations', subheading: 'Arithmetic'} + */ + function floorDiv_(a, b) { + let $a = convertToTensor(a, 'a', 'floorDiv'); + let $b = convertToTensor(b, 'b', 'floorDiv'); + [$a, $b] = makeTypesMatch($a, $b); + const inputs = { a: $a, b: $b }; + return ENGINE.runKernel(FloorDiv, inputs); + } + const floorDiv$2 = /* @__PURE__ */ op({ floorDiv_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Divides two `tf.Tensor`s element-wise, A / B. Supports broadcasting. + * + * ```js + * const a = tf.tensor1d([1, 4, 9, 16]); + * const b = tf.tensor1d([1, 2, 3, 4]); + * + * a.div(b).print(); // or tf.div(a, b) + * ``` + * + * ```js + * // Broadcast div a with b. + * const a = tf.tensor1d([2, 4, 6, 8]); + * const b = tf.scalar(2); + * + * a.div(b).print(); // or tf.div(a, b) + * ``` + * + * @param a The first tensor as the numerator. + * @param b The second tensor as the denominator. Must have the same dtype as + * `a`. + * + * @doc {heading: 'Operations', subheading: 'Arithmetic'} + */ + function div_(a, b) { + let $a = convertToTensor(a, 'a', 'div'); + let $b = convertToTensor(b, 'b', 'div'); + [$a, $b] = makeTypesMatch($a, $b); + if ($a.dtype === 'int32' && $b.dtype === 'int32') { + return floorDiv$2($a, $b); + } + const inputs = { a: $a, b: $b }; + const attrs = {}; + // tslint:disable-next-line: no-unnecessary-type-assertion + return ENGINE.runKernel(RealDiv, inputs, attrs); + } + const div$1 = /* @__PURE__ */ op({ div_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Multiplies two `tf.Tensor`s element-wise, A * B. Supports broadcasting. + * + * We also expose `tf.mulStrict` which has the same signature as this op and + * asserts that `a` and `b` are the same shape (does not broadcast). + * + * ```js + * const a = tf.tensor1d([1, 2, 3, 4]); + * const b = tf.tensor1d([2, 3, 4, 5]); + * + * a.mul(b).print(); // or tf.mul(a, b) + * ``` + * + * ```js + * // Broadcast mul a with b. + * const a = tf.tensor1d([1, 2, 3, 4]); + * const b = tf.scalar(5); + * + * a.mul(b).print(); // or tf.mul(a, b) + * ``` + * @param a The first tensor to multiply. + * @param b The second tensor to multiply. Must have the same dtype as `a`. + * + * @doc {heading: 'Operations', subheading: 'Arithmetic'} + */ + function mul_(a, b) { + let $a = convertToTensor(a, 'a', 'mul'); + let $b = convertToTensor(b, 'b', 'mul'); + [$a, $b] = makeTypesMatch($a, $b); + const inputs = { a: $a, b: $b }; + return ENGINE.runKernel(Multiply$1, inputs); + } + const mul = /* @__PURE__ */ op({ mul_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes absolute value element-wise: `abs(x)` + * + * ```js + * const x = tf.tensor1d([-1, 2, -3, 4]); + * + * x.abs().print(); // or tf.abs(x) + * ``` + * @param x The input `tf.Tensor`. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function abs_(x) { + const $x = convertToTensor(x, 'x', 'abs'); + if ($x.dtype === 'complex64') { + const inputs = { x: $x }; + return ENGINE.runKernel(ComplexAbs, inputs); + } + else { + const inputs = { x: $x }; + return ENGINE.runKernel(Abs, inputs); + } + } + const abs$2 = /* @__PURE__ */ op({ abs_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes acos of the input `tf.Tensor` element-wise: `acos(x)` + * + * ```js + * const x = tf.tensor1d([0, 1, -1, .7]); + * + * x.acos().print(); // or tf.acos(x) + * ``` + * @param x The input tensor. + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function acos_(x) { + const $x = convertToTensor(x, 'x', 'acos'); + const inputs = { x: $x }; + return ENGINE.runKernel(Acos, inputs); + } + const acos$2 = /* @__PURE__ */ op({ acos_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the inverse hyperbolic cos of the input `tf.Tensor` element-wise: + * `acosh(x)` + * + * ```js + * const x = tf.tensor1d([10, 1, 3, 5.7]); + * + * x.acosh().print(); // or tf.acosh(x) + * ``` + * @param x The input tensor. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function acosh_(x) { + const $x = convertToTensor(x, 'x', 'acosh'); + const inputs = { x: $x }; + return ENGINE.runKernel(Acosh, inputs); + } + const acosh$2 = /* @__PURE__ */ op({ acosh_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Adds a list of `tf.Tensor`s element-wise, each with the same shape and dtype. + * + * ```js + * const a = tf.tensor1d([1, 2]); + * const b = tf.tensor1d([3, 4]); + * const c = tf.tensor1d([5, 6]); + * + * tf.addN([a, b, c]).print(); + * ``` + * @param tensors A list of tensors with the same shape and dtype. + * @doc {heading: 'Operations', subheading: 'Arithmetic'} + */ + function addN_(tensors) { + assert$1(Array.isArray(tensors), () => 'The argument passed to tf.addN() must be a list of tensors'); + assert$1(tensors.length >= 1, () => `Must pass at least one tensor to tf.addN(), but got ` + + `${tensors.length}`); + const $tensors = tensors.map((t, i) => convertToTensor(t, `tensors${i}`, 'addN')); + const firstTensor = $tensors[0]; + $tensors.forEach(t => { + if (t.dtype !== firstTensor.dtype) { + throw new Error('All tensors passed to tf.addN() must have the same dtype'); + } + }); + $tensors.forEach(t => { + if (!arraysEqual(t.shape, firstTensor.shape)) { + throw new Error('All tensors passed to tf.addN() must have the same shape'); + } + }); + const inputs = $tensors; + return ENGINE.runKernel(AddN, inputs); + } + const addN$2 = /* @__PURE__ */ op({ addN_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the logical and of elements across dimensions of a `tf.Tensor`. + * + * Reduces the input along the dimensions given in `axes`. Unless `keepDims` + * is true, the rank of the `tf.Tensor` is reduced by 1 for each entry in + * `axes`. If `keepDims` is true, the reduced dimensions are retained with + * length 1. If `axes` has no entries, all dimensions are reduced, and a + * `tf.Tensor` with a single element is returned. + * + * ```js + * const x = tf.tensor1d([1, 1, 1], 'bool'); + * + * x.all().print(); // or tf.all(x) + * ``` + * + * ```js + * const x = tf.tensor2d([1, 1, 0, 0], [2, 2], 'bool'); + * + * const axis = 1; + * x.all(axis).print(); // or tf.all(x, axis) + * ``` + * + * @param x The input tensor. Must be of dtype bool. + * @param axis The dimension(s) to reduce. By default it reduces + * all dimensions. + * @param keepDims If true, retains reduced dimensions with size 1. + * + * @doc {heading: 'Operations', subheading: 'Reduction'} + */ + function all_(x, axis = null, keepDims = false) { + const $x = convertToTensor(x, 'x', 'all', 'bool'); + const inputs = { x: $x }; + const attrs = { axis, keepDims }; + return ENGINE.runKernel(All, inputs, attrs); + } + const all$2 = /* @__PURE__ */ op({ all_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the logical or of elements across dimensions of a `tf.Tensor`. + * + * Reduces the input along the dimensions given in `axes`. Unless `keepDims` + * is true, the rank of the `tf.Tensor` is reduced by 1 for each entry in + * `axes`. If `keepDims` is true, the reduced dimensions are retained with + * length 1. If `axes` has no entries, all dimensions are reduced, and a + * `tf.Tensor` with a single element is returned. + * + * ```js + * const x = tf.tensor1d([1, 1, 1], 'bool'); + * + * x.any().print(); // or tf.any(x) + * ``` + * + * ```js + * const x = tf.tensor2d([1, 1, 0, 0], [2, 2], 'bool'); + * + * const axis = 1; + * x.any(axis).print(); // or tf.any(x, axis) + * ``` + * + * @param x The input tensor. Must be of dtype bool. + * @param axis The dimension(s) to reduce. By default it reduces + * all dimensions. + * @param keepDims If true, retains reduced dimensions with size 1. + * + * @doc {heading: 'Operations', subheading: 'Reduction'} + */ + function any_(x, axis = null, keepDims = false) { + const $x = convertToTensor(x, 'x', 'any', 'bool'); + const inputs = { x: $x }; + const attrs = { axis, keepDims }; + return ENGINE.runKernel(Any, inputs, attrs); + } + // tslint:disable-next-line:variable-name + const any$2 = /* @__PURE__ */ op({ any_ }); + + /** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Returns the indices of the maximum values along an `axis`. + * + * The result has the same shape as `input` with the dimension along `axis` + * removed. + * + * ```js + * const x = tf.tensor1d([1, 2, 3]); + * + * x.argMax().print(); // or tf.argMax(x) + * ``` + * + * ```js + * const x = tf.tensor2d([1, 2, 4, 3], [2, 2]); + * + * const axis = 1; + * x.argMax(axis).print(); // or tf.argMax(x, axis) + * ``` + * + * @param x The input tensor. + * @param axis The dimension to reduce. Defaults to 0 (outer-most dimension). + * + * @doc {heading: 'Operations', subheading: 'Reduction'} + */ + function argMax_(x, axis = 0) { + const $x = convertToTensor(x, 'x', 'argMax'); + const inputs = { x: $x }; + const attrs = { axis }; + return ENGINE.runKernel(ArgMax, inputs, attrs); + } + const argMax$2 = /* @__PURE__ */ op({ argMax_ }); + + /** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Returns the indices of the minimum values along an `axis`. + * + * The result has the same shape as `input` with the dimension along `axis` + * removed. + * + * ```js + * const x = tf.tensor1d([1, 2, 3]); + * + * x.argMin().print(); // or tf.argMin(x) + * ``` + * + * ```js + * const x = tf.tensor2d([1, 2, 4, 3], [2, 2]); + * + * const axis = 1; + * x.argMin(axis).print(); // or tf.argMin(x, axis) + * ``` + * + * @param x The input tensor. + * @param axis The dimension to reduce. Defaults to 0 (outer-most dimension). + * + * @doc {heading: 'Operations', subheading: 'Reduction'} + */ + function argMin_(x, axis = 0) { + const $x = convertToTensor(x, 'x', 'argMin'); + const inputs = { x: $x }; + const attrs = { axis }; + return ENGINE.runKernel(ArgMin, inputs, attrs); + } + const argMin$2 = /* @__PURE__ */ op({ argMin_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes asin of the input `tf.Tensor` element-wise: `asin(x)` + * + * ```js + * const x = tf.tensor1d([0, 1, -1, .7]); + * + * x.asin().print(); // or tf.asin(x) + * ``` + * @param x The input tensor. + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function asin_(x) { + const $x = convertToTensor(x, 'x', 'asin'); + const inputs = { x: $x }; + return ENGINE.runKernel(Asin, inputs); + } + const asin$2 = /* @__PURE__ */ op({ asin_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes inverse hyperbolic sin of the input `tf.Tensor` element-wise: + * `asinh(x)` + * + * ```js + * const x = tf.tensor1d([0, 1, -1, .7]); + * + * x.asinh().print(); // or tf.asinh(x) + * ``` + * @param x The input tensor. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function asinh_(x) { + const $x = convertToTensor(x, 'x', 'asinh'); + const inputs = { x: $x }; + return ENGINE.runKernel(Asinh, inputs); + } + const asinh$2 = /* @__PURE__ */ op({ asinh_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes atan of the input `tf.Tensor` element-wise: `atan(x)` + * + * ```js + * const x = tf.tensor1d([0, 1, -1, .7]); + * + * x.atan().print(); // or tf.atan(x) + * ``` + * @param x The input tensor. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function atan_(x) { + const $x = convertToTensor(x, 'x', 'atan'); + const inputs = { x: $x }; + return ENGINE.runKernel(Atan, inputs); + } + const atan$2 = /* @__PURE__ */ op({ atan_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes arctangent of `tf.Tensor`s a / b element-wise: `atan2(a, b)`. + * Supports broadcasting. + * + * ```js + * const a = tf.tensor1d([1.0, 1.0, -1.0, .7]); + * const b = tf.tensor1d([2.0, 13.0, 3.5, .21]); + * + * tf.atan2(a, b).print() + * ``` + * + * @param a The first tensor. + * @param b The second tensor. Must have the same dtype as `a`. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function atan2_(a, b) { + let $a = convertToTensor(a, 'a', 'atan2'); + let $b = convertToTensor(b, 'b', 'atan2'); + [$a, $b] = makeTypesMatch($a, $b); + const inputs = { a: $a, b: $b }; + return ENGINE.runKernel(Atan2, inputs); + } + const atan2$2 = /* @__PURE__ */ op({ atan2_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes inverse hyperbolic tan of the input `tf.Tensor` element-wise: + * `atanh(x)` + * + * ```js + * const x = tf.tensor1d([0, .1, -.1, .7]); + * + * x.atanh().print(); // or tf.atanh(x) + * ``` + * @param x The input tensor. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function atanh_(x) { + const $x = convertToTensor(x, 'x', 'atanh'); + const inputs = { x: $x }; + return ENGINE.runKernel(Atanh, inputs); + } + const atanh$2 = /* @__PURE__ */ op({ atanh_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * + * @param inputShape Input tensor shape is of the following dimensions: + * `[batch, height, width, inChannels]`. + * @param filterShape The filter shape is of the following dimensions: + * `[filterHeight, filterWidth, depth]`. + * @param strides The strides of the sliding window for each dimension of the + * input tensor: `[strideHeight, strideWidth]`. + * If `strides` is a single number, + * then `strideHeight == strideWidth`. + * @param pad The type of padding algorithm. + * - `same` and stride 1: output will be of same size as input, + * regardless of filter size. + * - `valid`: output will be smaller than input if filter is larger + * than 1*1x1. + * - For more info, see this guide: + * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution]( + * https://www.tensorflow.org/api_docs/python/tf/nn/convolution) + * @param dataFormat The data format of the input and output data. + * Defaults to 'NHWC'. + * @param dilations The dilation rates: `[dilationHeight, dilationWidth]`. + * Defaults to `[1, 1]`. If `dilations` is a single number, then + * `dilationHeight == dilationWidth`. + */ + function computeDilation2DInfo(inputShape, filterShape, strides, pad, dataFormat = 'NHWC', dilations) { + // `computerConv2DInfo` require filterShape to be in the dimension of: + // `[filterHeight, filterWidth, depth, outDepth]`, dilation2d doesn't have + // outDepth, it should have the same depth as the input. + // Input shape: [batch, height, width, inChannels] + const inputChannels = inputShape[3]; + const $filterShape = [...filterShape, inputChannels]; + const $dataFormat = convertConv2DDataFormat(dataFormat); + return computeConv2DInfo(inputShape, $filterShape, strides, dilations, pad, null /* roundingMode */, null /* depthWise */, $dataFormat); + } + function computePool2DInfo(inShape, filterSize, strides, dilations, pad, roundingMode, dataFormat = 'channelsLast') { + const [filterHeight, filterWidth] = parseTupleParam(filterSize); + let filterShape; + if (dataFormat === 'channelsLast') { + filterShape = [filterHeight, filterWidth, inShape[3], inShape[3]]; + } + else if (dataFormat === 'channelsFirst') { + filterShape = [filterHeight, filterWidth, inShape[1], inShape[1]]; + } + else { + throw new Error(`Unknown dataFormat ${dataFormat}`); + } + return computeConv2DInfo(inShape, filterShape, strides, dilations, pad, roundingMode, false, dataFormat); + } + /** + * Computes the information for a forward pass of a pooling3D operation. + */ + function computePool3DInfo(inShape, filterSize, strides, dilations, pad, roundingMode, dataFormat = 'NDHWC') { + const [filterDepth, filterHeight, filterWidth] = parse3TupleParam(filterSize); + let filterShape; + let $dataFormat; + if (dataFormat === 'NDHWC') { + $dataFormat = 'channelsLast'; + filterShape = + [filterDepth, filterHeight, filterWidth, inShape[4], inShape[4]]; + } + else if (dataFormat === 'NCDHW') { + $dataFormat = 'channelsFirst'; + filterShape = + [filterDepth, filterHeight, filterWidth, inShape[1], inShape[1]]; + } + else { + throw new Error(`Unknown dataFormat ${dataFormat}`); + } + return computeConv3DInfo(inShape, filterShape, strides, dilations, pad, false, $dataFormat, roundingMode); + } + /** + * Computes the information for a forward pass of a convolution/pooling + * operation. + */ + function computeConv2DInfo(inShape, filterShape, strides, dilations, pad, roundingMode, depthwise = false, dataFormat = 'channelsLast') { + let [batchSize, inHeight, inWidth, inChannels] = [-1, -1, -1, -1]; + if (dataFormat === 'channelsLast') { + [batchSize, inHeight, inWidth, inChannels] = inShape; + } + else if (dataFormat === 'channelsFirst') { + [batchSize, inChannels, inHeight, inWidth] = inShape; + } + else { + throw new Error(`Unknown dataFormat ${dataFormat}`); + } + const [filterHeight, filterWidth, , filterChannels] = filterShape; + const [strideHeight, strideWidth] = parseTupleParam(strides); + const [dilationHeight, dilationWidth] = parseTupleParam(dilations); + const effectiveFilterHeight = getEffectiveFilterSize(filterHeight, dilationHeight); + const effectiveFilterWidth = getEffectiveFilterSize(filterWidth, dilationWidth); + const { padInfo, outHeight, outWidth } = getPadAndOutInfo(pad, inHeight, inWidth, strideHeight, strideWidth, effectiveFilterHeight, effectiveFilterWidth, roundingMode, dataFormat); + const outChannels = depthwise ? filterChannels * inChannels : filterChannels; + let outShape; + if (dataFormat === 'channelsFirst') { + outShape = [batchSize, outChannels, outHeight, outWidth]; + } + else if (dataFormat === 'channelsLast') { + outShape = [batchSize, outHeight, outWidth, outChannels]; + } + return { + batchSize, + dataFormat, + inHeight, + inWidth, + inChannels, + outHeight, + outWidth, + outChannels, + padInfo, + strideHeight, + strideWidth, + filterHeight, + filterWidth, + effectiveFilterHeight, + effectiveFilterWidth, + dilationHeight, + dilationWidth, + inShape, + outShape, + filterShape + }; + } + /** + * Computes the information for a forward pass of a 3D convolution/pooling + * operation. + */ + function computeConv3DInfo(inShape, filterShape, strides, dilations, pad, depthwise = false, dataFormat = 'channelsLast', roundingMode) { + let [batchSize, inDepth, inHeight, inWidth, inChannels] = [-1, -1, -1, -1, -1]; + if (dataFormat === 'channelsLast') { + [batchSize, inDepth, inHeight, inWidth, inChannels] = inShape; + } + else if (dataFormat === 'channelsFirst') { + [batchSize, inChannels, inDepth, inHeight, inWidth] = inShape; + } + else { + throw new Error(`Unknown dataFormat ${dataFormat}`); + } + const [filterDepth, filterHeight, filterWidth, , filterChannels] = filterShape; + const [strideDepth, strideHeight, strideWidth] = parse3TupleParam(strides); + const [dilationDepth, dilationHeight, dilationWidth] = parse3TupleParam(dilations); + const effectiveFilterDepth = getEffectiveFilterSize(filterDepth, dilationDepth); + const effectiveFilterHeight = getEffectiveFilterSize(filterHeight, dilationHeight); + const effectiveFilterWidth = getEffectiveFilterSize(filterWidth, dilationWidth); + const { padInfo, outDepth, outHeight, outWidth } = get3DPadAndOutInfo(pad, inDepth, inHeight, inWidth, strideDepth, strideHeight, strideWidth, effectiveFilterDepth, effectiveFilterHeight, effectiveFilterWidth, roundingMode); + const outChannels = depthwise ? filterChannels * inChannels : filterChannels; + let outShape; + if (dataFormat === 'channelsFirst') { + outShape = [batchSize, outChannels, outDepth, outHeight, outWidth]; + } + else if (dataFormat === 'channelsLast') { + outShape = [batchSize, outDepth, outHeight, outWidth, outChannels]; + } + return { + batchSize, + dataFormat, + inDepth, + inHeight, + inWidth, + inChannels, + outDepth, + outHeight, + outWidth, + outChannels, + padInfo, + strideDepth, + strideHeight, + strideWidth, + filterDepth, + filterHeight, + filterWidth, + effectiveFilterDepth, + effectiveFilterHeight, + effectiveFilterWidth, + dilationDepth, + dilationHeight, + dilationWidth, + inShape, + outShape, + filterShape + }; + } + function computeOutputShape2D(inShape, fieldSize, stride, zeroPad, roundingMode) { + if (zeroPad == null) { + zeroPad = computeDefaultPad(inShape, fieldSize, stride); + } + const inputRows = inShape[0]; + const inputCols = inShape[1]; + const outputRows = round$3((inputRows - fieldSize + 2 * zeroPad) / stride + 1, roundingMode); + const outputCols = round$3((inputCols - fieldSize + 2 * zeroPad) / stride + 1, roundingMode); + return [outputRows, outputCols]; + } + function computeOutputShape4D(inShape, filterShape, outChannels, strides, zeroPad, roundingMode) { + if (zeroPad == null) { + zeroPad = computeDefaultPad(inShape, filterShape[0], strides[0]); + } + const outShape = [0, 0, 0, outChannels]; + for (let index = 0; index < 3; index++) { + if (inShape[index] + 2 * zeroPad >= filterShape[index]) { + outShape[index] = round$3((inShape[index] - filterShape[index] + 2 * zeroPad) / strides[index] + + 1, roundingMode); + } + } + return outShape; + } + function computeDefaultPad(inputShape, fieldSize, stride, dilation = 1) { + const effectiveFieldSize = getEffectiveFilterSize(fieldSize, dilation); + return Math.floor((inputShape[0] * (stride - 1) - stride + effectiveFieldSize) / 2); + } + function parseTupleParam(param) { + if (typeof param === 'number') { + return [param, param, param]; + } + if (param.length === 2) { + return [param[0], param[1], 1]; + } + return param; + } + function parse3TupleParam(param) { + return typeof param === 'number' ? [param, param, param] : param; + } + /* See https://www.tensorflow.org/api_docs/python/tf/nn/atrous_conv2d + * Atrous convolution is equivalent to standard convolution with upsampled + * filters with effective_filter_height = + * filter_height + (filter_height - 1) * (dilation - 1) + * and effective_filter_width = + * filter_width + (filter_width - 1) * (dilation - 1), + * produced by inserting dilation - 1 zeros along consecutive elements across + * the filters' spatial dimensions. + * When there is a dilation, this converts a filter dimension to the + * effective filter dimension, so it can be used in a standard convolution. + */ + function getEffectiveFilterSize(filterSize, dilation) { + if (dilation <= 1) { + return filterSize; + } + return filterSize + (filterSize - 1) * (dilation - 1); + } + function getPadAndOutInfo(pad, inHeight, inWidth, strideHeight, strideWidth, filterHeight, filterWidth, roundingMode, dataFormat) { + let padInfo; + let outHeight; + let outWidth; + if (typeof pad === 'number') { + const padType = (pad === 0) ? 'VALID' : 'NUMBER'; + padInfo = { top: pad, bottom: pad, left: pad, right: pad, type: padType }; + const outShape = computeOutputShape2D([inHeight, inWidth], filterHeight, strideHeight, pad, roundingMode); + outHeight = outShape[0]; + outWidth = outShape[1]; + } + else if (pad === 'same') { + outHeight = Math.ceil(inHeight / strideHeight); + outWidth = Math.ceil(inWidth / strideWidth); + const padAlongHeight = Math.max(0, (outHeight - 1) * strideHeight + filterHeight - inHeight); + const padAlongWidth = Math.max(0, (outWidth - 1) * strideWidth + filterWidth - inWidth); + const top = Math.floor(padAlongHeight / 2); + const bottom = padAlongHeight - top; + const left = Math.floor(padAlongWidth / 2); + const right = padAlongWidth - left; + padInfo = { top, bottom, left, right, type: 'SAME' }; + } + else if (pad === 'valid') { + padInfo = { top: 0, bottom: 0, left: 0, right: 0, type: 'VALID' }; + outHeight = Math.ceil((inHeight - filterHeight + 1) / strideHeight); + outWidth = Math.ceil((inWidth - filterWidth + 1) / strideWidth); + } + else if (typeof pad === 'object') { + const top = dataFormat === 'channelsLast' ? pad[1][0] : pad[2][0]; + const bottom = dataFormat === 'channelsLast' ? pad[1][1] : pad[2][1]; + const left = dataFormat === 'channelsLast' ? pad[2][0] : pad[3][0]; + const right = dataFormat === 'channelsLast' ? pad[2][1] : pad[3][1]; + const padType = (top === 0 && bottom === 0 && left === 0 && right === 0) ? + 'VALID' : + 'EXPLICIT'; + padInfo = { top, bottom, left, right, type: padType }; + outHeight = round$3((inHeight - filterHeight + top + bottom) / strideHeight + 1, roundingMode); + outWidth = round$3((inWidth - filterWidth + left + right) / strideWidth + 1, roundingMode); + } + else { + throw Error(`Unknown padding parameter: ${pad}`); + } + return { padInfo, outHeight, outWidth }; + } + function get3DPadAndOutInfo(pad, inDepth, inHeight, inWidth, strideDepth, strideHeight, strideWidth, filterDepth, filterHeight, filterWidth, roundingMode) { + let padInfo; + let outDepth; + let outHeight; + let outWidth; + if (pad === 'valid') { + pad = 0; + } + if (typeof pad === 'number') { + const padType = (pad === 0) ? 'VALID' : 'NUMBER'; + padInfo = { + top: pad, + bottom: pad, + left: pad, + right: pad, + front: pad, + back: pad, + type: padType + }; + const outShape = computeOutputShape4D([inDepth, inHeight, inWidth, 1], [filterDepth, filterHeight, filterWidth], 1, [strideDepth, strideHeight, strideWidth], pad, roundingMode); + outDepth = outShape[0]; + outHeight = outShape[1]; + outWidth = outShape[2]; + } + else if (pad === 'same') { + outDepth = Math.ceil(inDepth / strideDepth); + outHeight = Math.ceil(inHeight / strideHeight); + outWidth = Math.ceil(inWidth / strideWidth); + const padAlongDepth = (outDepth - 1) * strideDepth + filterDepth - inDepth; + const padAlongHeight = (outHeight - 1) * strideHeight + filterHeight - inHeight; + const padAlongWidth = (outWidth - 1) * strideWidth + filterWidth - inWidth; + const front = Math.floor(padAlongDepth / 2); + const back = padAlongDepth - front; + const top = Math.floor(padAlongHeight / 2); + const bottom = padAlongHeight - top; + const left = Math.floor(padAlongWidth / 2); + const right = padAlongWidth - left; + padInfo = { top, bottom, left, right, front, back, type: 'SAME' }; + } + else { + throw Error(`Unknown padding parameter: ${pad}`); + } + return { padInfo, outDepth, outHeight, outWidth }; + } + /** + * Rounds a value depending on the rounding mode + * @param value + * @param roundingMode A string from: 'ceil', 'round', 'floor'. If none is + * provided, it will default to truncate. + */ + function round$3(value, roundingMode) { + if (!roundingMode) { + return Math.trunc(value); + } + switch (roundingMode) { + case 'round': + // used for Caffe Conv + return Math.round(value); + case 'ceil': + // used for Caffe Pool + return Math.ceil(value); + case 'floor': + return Math.floor(value); + default: + throw new Error(`Unknown roundingMode ${roundingMode}`); + } + } + function tupleValuesAreOne(param) { + const [dimA, dimB, dimC] = parseTupleParam(param); + return dimA === 1 && dimB === 1 && dimC === 1; + } + function eitherStridesOrDilationsAreOne(strides, dilations) { + return tupleValuesAreOne(strides) || tupleValuesAreOne(dilations); + } + function stridesOrDilationsArePositive(values) { + return parseTupleParam(values).every(value => value > 0); + } + /** + * Convert Conv2D dataFormat from 'NHWC'|'NCHW' to + * 'channelsLast'|'channelsFirst' + * @param dataFormat in 'NHWC'|'NCHW' mode + * @return dataFormat in 'channelsLast'|'channelsFirst' mode + * @throws unknown dataFormat + */ + function convertConv2DDataFormat(dataFormat) { + if (dataFormat === 'NHWC') { + return 'channelsLast'; + } + else if (dataFormat === 'NCHW') { + return 'channelsFirst'; + } + else { + throw new Error(`Unknown dataFormat ${dataFormat}`); + } + } + /** + * Check validity of pad when using dimRoundingMode. + * @param opDesc A string of op description + * @param pad The type of padding algorithm. + * - `same` and stride 1: output will be of same size as input, + * regardless of filter size. + * - `valid` output will be smaller than input if filter is larger + * than 1x1. + * - For more info, see this guide: + * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution]( + * https://www.tensorflow.org/api_docs/python/tf/nn/convolution) + * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is + * provided, it will default to truncate. + * @throws unknown padding parameter + */ + function checkPadOnDimRoundingMode(opDesc, pad, dimRoundingMode) { + if (dimRoundingMode != null) { + if (typeof pad === 'string') { + throw Error(`Error in ${opDesc}: pad must be an integer when using ` + + `dimRoundingMode ${dimRoundingMode} but got pad ${pad}.`); + } + else if (typeof pad === 'number') { + assert$1(isInt(pad), () => `Error in ${opDesc}: pad must be an integer when using ` + + `dimRoundingMode ${dimRoundingMode} but got pad ${pad}.`); + } + else if (typeof pad === 'object') { + pad.forEach(p => { + p.forEach(v => { + assert$1(isInt(v), () => `Error in ${opDesc}: pad must be an integer when using ` + + `dimRoundingMode ${dimRoundingMode} but got pad ${v}.`); + }); + }); + } + else { + throw Error(`Error in ${opDesc}: Unknown padding parameter: ${pad}`); + } + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Reshapes a `tf.Tensor` to a given shape. + * + * Given an input tensor, returns a new tensor with the same values as the + * input tensor with shape `shape`. + * + * If one component of shape is the special value -1, the size of that + * dimension is computed so that the total size remains constant. In + * particular, a shape of [-1] flattens into 1-D. At most one component of + * shape can be -1. + * + * If shape is 1-D or higher, then the operation returns a tensor with shape + * shape filled with the values of tensor. In this case, the number of + * elements implied by shape must be the same as the number of elements in + * tensor. + * + * ```js + * const x = tf.tensor1d([1, 2, 3, 4]); + * x.reshape([2, 2]).print(); + * ``` + * + * @param x The input tensor to be reshaped. + * @param shape An array of integers defining the output tensor shape. + * + * @doc {heading: 'Tensors', subheading: 'Transformations'} + */ + function reshape_(x, shape) { + const $x = convertToTensor(x, 'x', 'reshape', 'string_or_numeric'); + const inputs = { x: $x }; + const attrs = { shape }; + return ENGINE.runKernel(Reshape$1, inputs, attrs); + } + const reshape$3 = /* @__PURE__ */ op({ reshape_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the 2D average pooling of an image. + * + * @param x The input tensor, of rank 4 or rank 3 of shape + * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is assumed. + * @param filterSize The filter size: `[filterHeight, filterWidth]`. If + * `filterSize` is a single number, then `filterHeight == filterWidth`. + * @param strides The strides of the pooling: `[strideHeight, strideWidth]`. If + * `strides` is a single number, then `strideHeight == strideWidth`. + * @param pad The type of padding algorithm: + * - `same` and stride 1: output will be of same size as input, + * regardless of filter size. + * - `valid`: output will be smaller than input if filter is larger + * than 1x1. + * - For more info, see this guide: + * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution]( + * https://www.tensorflow.org/api_docs/python/tf/nn/convolution) + * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is + * provided, it will default to truncate. + * + * @doc {heading: 'Operations', subheading: 'Convolution'} + */ + function avgPool_(x, filterSize, strides, pad, dimRoundingMode) { + const $x = convertToTensor(x, 'x', 'avgPool', 'float32'); + const dilations = 1; + assert$1(eitherStridesOrDilationsAreOne(strides, dilations), () => 'Error in avgPool: Either strides or dilations must be 1. ' + + `Got strides ${strides} and dilations '${dilations}'`); + let x4D = $x; + let reshapedTo4D = false; + if ($x.rank === 3) { + reshapedTo4D = true; + x4D = reshape$3($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]); + } + assert$1(x4D.rank === 4, () => `Error in avgPool: x must be rank 4 but got rank ${x4D.rank}.`); + checkPadOnDimRoundingMode('avgPool', pad, dimRoundingMode); + const inputs = { x: x4D }; + const attrs = { filterSize, strides, pad, dimRoundingMode }; + // tslint:disable-next-line: no-unnecessary-type-assertion + let res = ENGINE.runKernel(AvgPool, inputs, attrs); + res = cast$3(res, $x.dtype); + if (reshapedTo4D) { + return reshape$3(res, [res.shape[1], res.shape[2], res.shape[3]]); + } + return res; + } + const avgPool$2 = /* @__PURE__ */ op({ avgPool_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the 3D average pooling. + * + * ```js + * const x = tf.tensor5d([1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 2, 2, 1]); + * const result = tf.avgPool3d(x, 2, 1, 'valid'); + * result.print(); + * ``` + * + * @param x The input tensor, of rank 5 or rank 4 of shape + * `[batch, depth, height, width, inChannels]`. + * @param filterSize The filter size: + * `[filterDepth, filterHeight, filterWidth]`. + * If `filterSize` is a single number, + * then `filterDepth == filterHeight == filterWidth`. + * @param strides The strides of the pooling: + * `[strideDepth, strideHeight, strideWidth]`. + * If `strides` is a single number, + * then `strideDepth == strideHeight == strideWidth`. + * @param pad The type of padding algorithm. + * - `same` and stride 1: output will be of same size as input, + * regardless of filter size. + * - `valid`: output will be smaller than input if filter is larger + * than 1*1x1. + * - For more info, see this guide: + * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution]( + * https://www.tensorflow.org/api_docs/python/tf/nn/convolution) + * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is + * provided, it will default to truncate. + * @param dataFormat An optional string from: "NDHWC", "NCDHW". Defaults to + * "NDHWC". Specify the data format of the input and output data. With the + * default format "NDHWC", the data is stored in the order of: [batch, + * depth, height, width, channels]. Only "NDHWC" is currently supported. + * + * @doc {heading: 'Operations', subheading: 'Convolution'} + */ + function avgPool3d_(x, filterSize, strides, pad, dimRoundingMode, dataFormat = 'NDHWC') { + const $x = convertToTensor(x, 'x', 'avgPool3d', 'float32'); + let x5D = $x; + let reshapedTo5D = false; + if ($x.rank === 4) { + reshapedTo5D = true; + x5D = reshape$3($x, [1, $x.shape[0], $x.shape[1], $x.shape[2], $x.shape[3]]); + } + assert$1(x5D.rank === 5, () => `Error in avgPool3d: x must be rank 5 but got rank ${x5D.rank}.`); + assert$1(dataFormat === 'NDHWC', () => `Error in avgPool3d: Only NDHWC is currently supported, ` + + `but got dataFormat of ${dataFormat}`); + assert$1((typeof strides === 'number' && strides > 0) || + (Array.isArray(strides) && strides[0] > 0 && strides[1] > 0 && + strides[2] > 0), () => `Error in avgPool3d: Stride must be > 0, but got '${strides}'`); + checkPadOnDimRoundingMode('avgPool3d', pad, dimRoundingMode); + const inputs = { x: x5D }; + const attrs = { filterSize, strides, pad, dimRoundingMode, dataFormat }; + // tslint:disable-next-line: no-unnecessary-type-assertion + let res = ENGINE.runKernel(AvgPool3D, inputs, attrs); + res = cast$3(res, x5D.dtype); + if (reshapedTo5D) { + return reshape$3(res, [res.shape[1], res.shape[2], res.shape[3], res.shape[4]]); + } + return res; + } + const avgPool3d$1 = /* @__PURE__ */ op({ avgPool3d_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Concatenates a list of `tf.Tensor`s along a given axis. + * + * The tensors ranks and types must match, and their sizes must match in all + * dimensions except `axis`. + * + * Also available are stricter rank-specific methods that assert that + * `tensors` are of the given rank: + * - `tf.concat1d` + * - `tf.concat2d` + * - `tf.concat3d` + * - `tf.concat4d` + * + * Except `tf.concat1d` (which does not have axis param), all methods have + * same signature as this method. + * + * ```js + * const a = tf.tensor1d([1, 2]); + * const b = tf.tensor1d([3, 4]); + * a.concat(b).print(); // or a.concat(b) + * ``` + * + * ```js + * const a = tf.tensor1d([1, 2]); + * const b = tf.tensor1d([3, 4]); + * const c = tf.tensor1d([5, 6]); + * tf.concat([a, b, c]).print(); + * ``` + * + * ```js + * const a = tf.tensor2d([[1, 2], [10, 20]]); + * const b = tf.tensor2d([[3, 4], [30, 40]]); + * const axis = 1; + * tf.concat([a, b], axis).print(); + * ``` + * @param tensors A list of tensors to concatenate. + * @param axis The axis to concatenate along. Defaults to 0 (the first dim). + * + * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'} + */ + function concat_(tensors, axis = 0) { + assert$1(tensors.length >= 1, () => 'Pass at least one tensor to concat'); + const $tensors = convertToTensorArray(tensors, 'tensors', 'concat', 'string_or_numeric'); + if ($tensors[0].dtype === 'complex64') { + $tensors.forEach(tensor => { + if (tensor.dtype !== 'complex64') { + throw new Error(`Cannot concatenate complex64 tensors with a tensor + with dtype ${tensor.dtype}. `); + } + }); + } + if ($tensors.length === 1) { + return clone($tensors[0]); + } + const inputs = $tensors; + const attr = { axis }; + return ENGINE.runKernel(Concat, inputs, attr); + } + const concat$2 = /* @__PURE__ */ op({ concat_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the dot product of two matrices, A * B. These must be matrices. + * + * ```js + * const a = tf.tensor2d([1, 2], [1, 2]); + * const b = tf.tensor2d([1, 2, 3, 4], [2, 2]); + * + * a.matMul(b).print(); // or tf.matMul(a, b) + * ``` + * @param a First matrix in dot product operation. + * @param b Second matrix in dot product operation. + * @param transposeA If true, `a` is transposed before multiplication. + * @param transposeB If true, `b` is transposed before multiplication. + * + * @doc {heading: 'Operations', subheading: 'Matrices'} + */ + function matMul_(a, b, transposeA = false, transposeB = false) { + let $a = convertToTensor(a, 'a', 'matMul'); + let $b = convertToTensor(b, 'b', 'matMul'); + [$a, $b] = makeTypesMatch($a, $b); + const inputs = { a: $a, b: $b }; + const attrs = { transposeA, transposeB }; + return ENGINE.runKernel(BatchMatMul, inputs, attrs); + } + const matMul$1 = /* @__PURE__ */ op({ matMul_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes sigmoid element-wise, `1 / (1 + exp(-x))` + * + * ```js + * const x = tf.tensor1d([0, -1, 2, -3]); + * + * x.sigmoid().print(); // or tf.sigmoid(x) + * ``` + * @param x The input tensor. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function sigmoid_(x) { + const $x = convertToTensor(x, 'x', 'sigmoid', 'float32'); + const inputs = { x: $x }; + return ENGINE.runKernel(Sigmoid$1, inputs); + } + const sigmoid$2 = /* @__PURE__ */ op({ sigmoid_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Extracts a slice from a `tf.Tensor` starting at coordinates `begin` + * and is of size `size`. + * + * Also available are stricter rank-specific methods with the same signature + * as this method that assert that `x` is of the given rank: + * - `tf.slice1d` + * - `tf.slice2d` + * - `tf.slice3d` + * - `tf.slice4d` + * + * ```js + * const x = tf.tensor1d([1, 2, 3, 4]); + * + * x.slice([1], [2]).print(); + * ``` + * + * ```js + * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]); + * + * x.slice([1, 0], [1, 2]).print(); + * ``` + * @param x The input `tf.Tensor` to slice from. + * @param begin The coordinates to start the slice from. The length can be + * less than the rank of x - the rest of the axes will have implicit 0 as + * start. Can also be a single number, in which case it specifies the + * first axis. + * @param size The size of the slice. The length can be less than the rank of + * x - the rest of the axes will have implicit -1. A value of -1 requests + * the rest of the dimensions in the axis. Can also be a single number, + * in which case it specifies the size of the first axis. + * + * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'} + */ + function slice_(x, begin, size) { + const $x = convertToTensor(x, 'x', 'slice', 'string_or_numeric'); + if ($x.rank === 0) { + throw new Error('Slicing scalar is not possible'); + } + const inputs = { x: $x }; + const attrs = { begin, size }; + return ENGINE.runKernel(Slice, inputs, attrs); + } + const slice$2 = /* @__PURE__ */ op({ slice_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes hyperbolic tangent of the input `tf.Tensor` element-wise: `tanh(x)` + * + * ```js + * const x = tf.tensor1d([0, 1, -1, 70]); + * + * x.tanh().print(); // or tf.tanh(x) + * ``` + * @param x The input tensor. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function tanh_(x) { + const $x = convertToTensor(x, 'x', 'tanh', 'float32'); + const inputs = { x: $x }; + return ENGINE.runKernel(Tanh$1, inputs); + } + const tanh$2 = /* @__PURE__ */ op({ tanh_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the next state and output of a BasicLSTMCell. + * + * Returns `[newC, newH]`. + * + * Derived from tf.contrib.rnn.BasicLSTMCell. + * + * @param forgetBias Forget bias for the cell. + * @param lstmKernel The weights for the cell. + * @param lstmBias The bias for the cell. + * @param data The input to the cell. + * @param c Previous cell state. + * @param h Previous cell output. + * + * @doc {heading: 'Operations', subheading: 'RNN'} + */ + function basicLSTMCell_(forgetBias, lstmKernel, lstmBias, data, c, h) { + const $forgetBias = convertToTensor(forgetBias, 'forgetBias', 'basicLSTMCell'); + const $lstmKernel = convertToTensor(lstmKernel, 'lstmKernel', 'basicLSTMCell'); + const $lstmBias = convertToTensor(lstmBias, 'lstmBias', 'basicLSTMCell'); + const $data = convertToTensor(data, 'data', 'basicLSTMCell'); + const $c = convertToTensor(c, 'c', 'basicLSTMCell'); + const $h = convertToTensor(h, 'h', 'basicLSTMCell'); + const combined = concat$2([$data, $h], 1); + const weighted = matMul$1(combined, $lstmKernel); + const res = add$3(weighted, $lstmBias); + // i = input_gate, j = new_input, f = forget_gate, o = output_gate + const batchSize = res.shape[0]; + const sliceCols = res.shape[1] / 4; + const sliceSize = [batchSize, sliceCols]; + const i = slice$2(res, [0, 0], sliceSize); + const j = slice$2(res, [0, sliceCols], sliceSize); + const f = slice$2(res, [0, sliceCols * 2], sliceSize); + const o = slice$2(res, [0, sliceCols * 3], sliceSize); + const newC = add$3(mul(sigmoid$2(i), tanh$2(j)), mul($c, sigmoid$2(add$3($forgetBias, f)))); + const newH = mul(tanh$2(newC), sigmoid$2(o)); + return [newC, newH]; + } + const basicLSTMCell = /* @__PURE__ */ op({ basicLSTMCell_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of + * shape `blockShape + [batch]`, interleaves these blocks back into the grid + * defined by the spatial dimensions `[1, ..., M]`, to obtain a result with + * the same rank as the input. The spatial dimensions of this intermediate + * result are then optionally cropped according to `crops` to produce the + * output. This is the reverse of `tf.spaceToBatchND`. See below for a precise + * description. + * + * ```js + * const x = tf.tensor4d([1, 2, 3, 4], [4, 1, 1, 1]); + * const blockShape = [2, 2]; + * const crops = [[0, 0], [0, 0]]; + * + * x.batchToSpaceND(blockShape, crops).print(); + * ``` + * + * @param x A `tf.Tensor`. N-D with `x.shape` = `[batch] + spatialShape + + * remainingShape`, where spatialShape has `M` dimensions. + * @param blockShape A 1-D array. Must have shape `[M]`, all values must + * be >= 1. + * @param crops A 2-D array. Must have shape `[M, 2]`, all values must be >= 0. + * `crops[i] = [cropStart, cropEnd]` specifies the amount to crop from input + * dimension `i + 1`, which corresponds to spatial dimension `i`. It is required + * that `cropStart[i] + cropEnd[i] <= blockShape[i] * inputShape[i + 1]` + * + * This operation is equivalent to the following steps: + * + * 1. Reshape `x` to `reshaped` of shape: `[blockShape[0], ..., + * blockShape[M-1], batch / prod(blockShape), x.shape[1], ..., + * x.shape[N-1]]` + * + * 2. Permute dimensions of `reshaped` to produce `permuted` of shape `[batch / + * prod(blockShape),x.shape[1], blockShape[0], ..., x.shape[M], + * blockShape[M-1],x.shape[M+1], ..., x.shape[N-1]]` + * + * 3. Reshape `permuted` to produce `reshapedPermuted` of shape `[batch / + * prod(blockShape),x.shape[1] * blockShape[0], ..., x.shape[M] * + * blockShape[M-1],x.shape[M+1], ..., x.shape[N-1]]` + * + * 4. Crop the start and end of dimensions `[1, ..., M]` of `reshapedPermuted` + * according to `crops` to produce the output of shape: `[batch / + * prod(blockShape),x.shape[1] * blockShape[0] - crops[0,0] - crops[0,1], + * ..., x.shape[M] * blockShape[M-1] - crops[M-1,0] - + * crops[M-1,1],x.shape[M+1], ..., x.shape[N-1]]` + * + * @doc {heading: 'Tensors', subheading: 'Transformations'} + */ + function batchToSpaceND_(x, blockShape, crops) { + const $x = convertToTensor(x, 'x', 'batchToSpaceND'); + const prod = blockShape.reduce((a, b) => a * b); + assert$1($x.rank >= 1 + blockShape.length, () => `input rank is ${$x.rank} but should be > than blockShape.length ${blockShape.length}`); + assert$1(crops.length === blockShape.length, () => `crops.length is ${crops.length} but should be equal to blockShape.length ${blockShape.length}`); + assert$1($x.shape[0] % prod === 0, () => `input tensor batch is ${$x.shape[0]} but is not divisible by the product of ` + + `the elements of blockShape ${blockShape.join(' * ')} === ${prod}`); + const inputs = { x: $x }; + const attrs = { blockShape, crops }; + return ENGINE.runKernel(BatchToSpaceND, inputs, attrs); + } + const batchToSpaceND$2 = /* @__PURE__ */ op({ batchToSpaceND_ }); + + function xAs4D(x) { + let x4D; + if (x.rank === 0 || x.rank === 1) { + x4D = reshape$3(x, [1, 1, 1, x.size]); + } + else if (x.rank === 2) { + x4D = reshape$3(x, [1, 1, x.shape[0], x.shape[1]]); + } + else if (x.rank === 3) { + x4D = reshape$3(x, [1, x.shape[0], x.shape[1], x.shape[2]]); + } + else { + x4D = x; + } + return x4D; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Batch normalization. + * + * As described in + * [http://arxiv.org/abs/1502.03167](http://arxiv.org/abs/1502.03167). + * + * Mean, variance, scale, and offset can be of two shapes: + * - The same shape as the input. + * - In the common case, the depth dimension is the last dimension of x, so + * the values would be a `tf.Tensor1D` of shape [depth]. + * + * Also available are stricter rank-specific methods with the same signature + * as this method that assert that parameters passed are of given rank + * - `tf.batchNorm2d` + * - `tf.batchNorm3d` + * - `tf.batchNorm4d` + * + * @param x The input Tensor. + * @param mean A mean Tensor. + * @param variance A variance Tensor. + * @param offset An offset Tensor. + * @param scale A scale Tensor. + * @param varianceEpsilon A small float number to avoid dividing by 0. + * + * @doc {heading: 'Operations', subheading: 'Normalization'} + */ + function batchNorm_(x, mean, variance, offset, scale, varianceEpsilon) { + if (varianceEpsilon == null) { + varianceEpsilon = 0.001; + } + const $x = convertToTensor(x, 'x', 'batchNorm'); + const $mean = convertToTensor(mean, 'mean', 'batchNorm'); + const $variance = convertToTensor(variance, 'variance', 'batchNorm'); + let $scale; + if (scale != null) { + $scale = convertToTensor(scale, 'scale', 'batchNorm'); + } + let $offset; + if (offset != null) { + $offset = convertToTensor(offset, 'offset', 'batchNorm'); + } + assert$1($mean.rank === $variance.rank, () => 'Batch normalization gradient requires mean and variance to have ' + + 'equal ranks.'); + assert$1($offset == null || $mean.rank === $offset.rank, () => 'Batch normalization gradient requires mean and offset to have ' + + 'equal ranks.'); + assert$1($scale == null || $mean.rank === $scale.rank, () => 'Batch normalization gradient requires mean and scale to have ' + + 'equal ranks.'); + const x4D = xAs4D($x); + const inputs = { + x: x4D, + scale: $scale, + offset: $offset, + mean: $mean, + variance: $variance + }; + const attrs = { varianceEpsilon }; + // tslint:disable-next-line: no-unnecessary-type-assertion + const res = ENGINE.runKernel(FusedBatchNorm, inputs, attrs); + return reshape$3(res, $x.shape); + } + const batchNorm$2 = /* @__PURE__ */ op({ batchNorm_ }); + + /** + * Batch normalization, strictly for 2D. For the more relaxed version, see + * `tf.batchNorm`. + * + * @param x The input Tensor. + * @param mean A mean Tensor. + * @param variance A variance Tensor. + * @param offset An offset Tensor. + * @param scale A scale Tensor. + * @param varianceEpsilon A small float number to avoid dividing by 0. + */ + function batchNorm2d_(x, mean, variance, offset, scale, varianceEpsilon) { + const $x = convertToTensor(x, 'x', 'batchNorm'); + const $mean = convertToTensor(mean, 'mean', 'batchNorm'); + const $variance = convertToTensor(variance, 'variance', 'batchNorm'); + let $scale; + if (scale != null) { + $scale = convertToTensor(scale, 'scale', 'batchNorm'); + } + let $offset; + if (offset != null) { + $offset = convertToTensor(offset, 'offset', 'batchNorm'); + } + assert$1($x.rank === 2, () => `Error in batchNorm2D: x must be rank 2 but got rank ` + + `${$x.rank}.`); + assert$1($mean.rank === 2 || $mean.rank === 1, () => `Error in batchNorm2D: mean must be rank 2 or rank 1 but ` + + `got rank ${$mean.rank}.`); + assert$1($variance.rank === 2 || $variance.rank === 1, () => `Error in batchNorm2D: variance must be rank 2 or rank 1 ` + + `but got rank ${$variance.rank}.`); + if ($scale != null) { + assert$1($scale.rank === 2 || $scale.rank === 1, () => `Error in batchNorm2D: scale must be rank 2 or rank 1 ` + + `but got rank ${$scale.rank}.`); + } + if ($offset != null) { + assert$1($offset.rank === 2 || $offset.rank === 1, () => `Error in batchNorm2D: offset must be rank 2 or rank 1 ` + + `but got rank ${$offset.rank}.`); + } + return batchNorm$2($x, $mean, $variance, $offset, $scale, varianceEpsilon); + } + const batchNorm2d = /* @__PURE__ */ op({ batchNorm2d_ }); + + /** + * Batch normalization, strictly for 3D. For the more relaxed version, see + * `tf.batchNorm`. + * + * @param x The input Tensor. + * @param mean A mean Tensor. + * @param variance A variance Tensor. + * @param offset An offset Tensor. + * @param scale A scale Tensor. + * @param varianceEpsilon A small float number to avoid dividing by 0. + */ + function batchNorm3d_(x, mean, variance, offset, scale, varianceEpsilon) { + const $x = convertToTensor(x, 'x', 'batchNorm'); + const $mean = convertToTensor(mean, 'mean', 'batchNorm'); + const $variance = convertToTensor(variance, 'variance', 'batchNorm'); + let $scale; + if (scale != null) { + $scale = convertToTensor(scale, 'scale', 'batchNorm'); + } + let $offset; + if (offset != null) { + $offset = convertToTensor(offset, 'offset', 'batchNorm'); + } + assert$1($x.rank === 3, () => `Error in batchNorm3D: x must be rank 3 but got rank ` + + `${$x.rank}.`); + assert$1($mean.rank === 3 || $mean.rank === 1, () => `Error in batchNorm3D: mean must be rank 3 or rank 1 but ` + + `got rank ${$mean.rank}.`); + assert$1($variance.rank === 3 || $variance.rank === 1, () => `Error in batchNorm3D: variance must be rank 3 or rank 1 ` + + `but got rank ${$variance.rank}.`); + if ($scale != null) { + assert$1($scale.rank === 3 || $scale.rank === 1, () => `Error in batchNorm3D: scale must be rank 3 or rank 1 ` + + `but got rank ${$scale.rank}.`); + } + if ($offset != null) { + assert$1($offset.rank === 3 || $offset.rank === 1, () => `Error in batchNorm3D: offset must be rank 3 or rank 1 ` + + `but got rank ${$offset.rank}.`); + } + return batchNorm$2($x, $mean, $variance, $offset, $scale, varianceEpsilon); + } + const batchNorm3d = /* @__PURE__ */ op({ batchNorm3d_ }); + + /** + * Batch normalization, strictly for 4D. For the more relaxed version, see + * `tf.batchNorm`. + * + * @param x The input Tensor. + * @param mean A mean Tensor. + * @param variance A variance Tensor. + * @param offset An offset Tensor. + * @param scale A scale Tensor. + * @param varianceEpsilon A small float number to avoid dividing by 0. + */ + function batchNorm4d_(x, mean, variance, offset, scale, varianceEpsilon) { + const $x = convertToTensor(x, 'x', 'batchNorm'); + const $mean = convertToTensor(mean, 'mean', 'batchNorm'); + const $variance = convertToTensor(variance, 'variance', 'batchNorm'); + let $scale; + if (scale != null) { + $scale = convertToTensor(scale, 'scale', 'batchNorm'); + } + let $offset; + if (offset != null) { + $offset = convertToTensor(offset, 'offset', 'batchNorm'); + } + assert$1($x.rank === 4, () => `Error in batchNorm4D: x must be rank 4 but got rank ` + + `${$x.rank}.`); + assert$1($mean.rank === 4 || $mean.rank === 1, () => `Error in batchNorm4D: mean must be rank 4 or rank 1 but ` + + `got rank ${$mean.rank}.`); + assert$1($variance.rank === 4 || $variance.rank === 1, () => `Error in batchNorm4D: variance must be rank 4 or rank 1 ` + + `but got rank ${$variance.rank}.`); + if ($scale != null) { + assert$1($scale.rank === 4 || $scale.rank === 1, () => `Error in batchNorm4D: scale must be rank 4 or rank 1 ` + + `but got rank ${$scale.rank}.`); + } + if ($offset != null) { + assert$1($offset.rank === 4 || $offset.rank === 1, () => `Error in batchNorm4D: offset must be rank 4 or rank 1 ` + + `but got rank ${$offset.rank}.`); + } + return batchNorm$2($x, $mean, $variance, $offset, $scale, varianceEpsilon); + } + const batchNorm4d = /* @__PURE__ */ op({ batchNorm4d_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Outputs a vector with length `size` and the same dtype as `weights`. + * + * If `weights` are empty, then index `i` stores the number of times the value + * `i` is counted in `x`. If `weights` are non-empty, then index `i` stores the + * sum of the value in `weights` at each index where the corresponding value in + * `x` is `i`. + * + * Values in `x` outside of the range [0, size) are ignored. + * + * @param x The input int tensor, rank 1. + * @param weights The weights tensor, must have the same shape as x, or a + * length-0 Tensor, in which case it acts as all weights equal to 1. + * @param size Non-negative integer. + * + * @doc {heading: 'Operations', subheading: 'Reduction'} + */ + function bincount_(x, weights, size) { + const $x = convertToTensor(x, 'x', 'bincount'); + const $weights = convertToTensor(weights, 'weights', 'bincount'); + assert$1($x.dtype === 'int32', () => `Error in bincount: input ` + + `dtype must be int32, but got ${$x.dtype}`); + assert$1(size >= 0, () => `size must be non-negative, but got ${size}.`); + assert$1($weights.size === $x.size || $weights.size === 0, () => `Error in bincount: weights must have the same size as input or` + + `0-length, but got input shape: ${$x.shape}, weights shape: ` + + `${$weights.shape}.`); + const inputs = { x: $x, weights: $weights }; + const attrs = { size }; + return ENGINE.runKernel(Bincount, inputs, attrs); + } + const bincount$2 = /* @__PURE__ */ op({ bincount_ }); + + /** + * @license + * Copyright 2023 Google LLC. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Bitwise `AND` operation for input tensors. + * + * Given two input tensors, returns a new tensor + * with the `AND` calculated values. + * + * The method supports int32 values + * + * + * ```js + * const x = tf.tensor1d([0, 5, 3, 14], 'int32'); + * const y = tf.tensor1d([5, 0, 7, 11], 'int32'); + * tf.bitwiseAnd(x, y).print(); + * ``` + * + * @param x The input tensor to be calculated. + * @param y The input tensor to be calculated. + * + * @doc {heading: 'Operations', subheading: 'Logical'} + */ + function bitwiseAnd_(x, y) { + const $x = convertToTensor(x, 'x', 'bitwiseAnd'); + const $y = convertToTensor(y, 'y', 'bitwiseAnd'); + if (!arraysEqual($x.shape, $y.shape)) { + throw new Error(`BitwiseAnd: Tensors must have the same shape. x: ${$x.shape}, y: ${$y.shape}`); + } + if ($x.dtype !== 'int32' || $y.dtype !== 'int32') { + throw new Error(`BitwiseAnd: Only supports 'int32' values in tensor, found type of x: ${$x.dtype} and type of y: ${$y.dtype}`); + } + const inputs = { a: $x, b: $y }; + return ENGINE.runKernel(BitwiseAnd, inputs); + } + const bitwiseAnd$2 = /* @__PURE__ */ op({ bitwiseAnd_ }); + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Return the shape of s0 op s1 with broadcast. + * + * compute r0, the broadcasted shape as a tensor. + * s0, s1 and r0 are all integer vectors. + * + * This function returns the shape of the result of an operation between + * two tensors of size s0 and s1 performed with broadcast. + * + * @param s0 A tensor representing a shape + * @param s1 A tensor representing a shape + * + * @doc {heading: 'Tensors', subheading: 'Transformations'} + */ + function broadcastArgs_(s0, s1) { + const shape1Input = convertToTensor(s0, 's0', 'broadcastArgs', 'int32'); + const shape2Input = convertToTensor(s1, 's1', 'broadcastArgs', 'int32'); + if (shape1Input.rank !== 1) { + throw new Error('broadcastArgs(): first input must be a vector (rank=1). ' + + `Has rank ${shape1Input.rank}`); + } + if (shape2Input.rank !== 1) { + throw new Error('broadcastArgs(): second input must be a vector (rank=1). ' + + `Has rank ${shape2Input.rank}`); + } + const inputs = { s0: shape1Input, s1: shape2Input }; + return ENGINE.runKernel(BroadcastArgs, inputs); + } + const broadcastArgs$2 = /* @__PURE__ */ op({ broadcastArgs_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Broadcast an array to a compatible shape NumPy-style. + * + * The tensor's shape is compared to the broadcast shape from end to beginning. + * Ones are prepended to the tensor's shape until it has the same length as + * the broadcast shape. If input.shape[i]==shape[i], the (i+1)-th axis is + * already broadcast-compatible. If input.shape[i]==1 and shape[i]==N, then + * the input tensor is tiled N times along that axis (using tf.tile). + * + * @param input The tensor that is to be broadcasted. + * @param shape The input is to be broadcast to this shape. + * + * @doc {heading: 'Tensors', subheading: 'Transformations'} + */ + function broadcastTo_(x, shape) { + let input = convertToTensor(x, 'broadcastTo', 'x'); + const xShape = input.shape; + assertNonNegativeIntegerDimensions(shape); + if (shape.length < input.rank) { + throw new Error(`broadcastTo(): shape.length=${shape.length} < input.rank=${input.rank}.`); + } + if (shape.length > input.rank) { + const newShape = input.shape.slice(); + while (newShape.length < shape.length) { + newShape.unshift(1); + } + input = reshape$3(input, newShape); + } + const inputShape = input.shape; + const reps = Array.from(shape); + for (let i = shape.length - 1; i >= 0; i--) { + if (inputShape[i] === shape[i]) { + reps[i] = 1; + } + else if (input.shape[i] !== 1) { + throw new Error(`broadcastTo(): [${xShape}] cannot be broadcast to [${shape}].`); + } + } + const axes = reps.map((n, i) => n > 1 ? i : -1).filter(i => i >= 0); + if (axes.length === 0) { + return clone(input); + } + // TODO call broadcastTo kernel directly once backends implement broadcstTo + const inputs = { x: input }; + const attrs = { reps }; + return ENGINE.runKernel(Tile, inputs, attrs); + } + const broadcastTo = /* @__PURE__ */ op({ broadcastTo_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes ceiling of input `tf.Tensor` element-wise: `ceil(x)` + * + * ```js + * const x = tf.tensor1d([.6, 1.1, -3.3]); + * + * x.ceil().print(); // or tf.ceil(x) + * ``` + * @param x The input Tensor. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function ceil_(x) { + const $x = convertToTensor(x, 'x', 'ceil', 'float32'); + const inputs = { x: $x }; + return ENGINE.runKernel(Ceil, inputs); + } + const ceil$2 = /* @__PURE__ */ op({ ceil_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Creates a `tf.Tensor` filled with a scalar value. + * + * ```js + * tf.fill([2, 2], 4).print(); + * ``` + * + * @param shape An array of integers defining the output tensor shape. + * @param value The scalar value to fill the tensor with. + * @param dtype The type of an element in the resulting tensor. Defaults to + * 'float32' if the given param value is a number, otherwise 'string'. + * + * @doc {heading: 'Tensors', subheading: 'Creation'} + */ + function fill$2(shape, value, dtype) { + assertNonNegativeIntegerDimensions(shape); + dtype = dtype || inferDtype(value); + const attrs = { shape, value, dtype }; + return ENGINE.runKernel(Fill, {}, attrs); + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Clips values element-wise. `max(min(x, clipValueMax), clipValueMin)` + * + * ```js + * const x = tf.tensor1d([-1, 2, -3, 4]); + * + * x.clipByValue(-2, 3).print(); // or tf.clipByValue(x, -2, 3) + * ``` + * @param x The input tensor. + * @param clipValueMin Lower bound of range to be clipped to. + * @param clipValueMax Upper bound of range to be clipped to. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function clipByValue_(x, clipValueMin, clipValueMax) { + const $x = convertToTensor(x, 'x', 'clipByValue'); + assert$1((clipValueMin <= clipValueMax), () => `Error in clip: min (${clipValueMin}) must be ` + + `less than or equal to max (${clipValueMax}).`); + if (clipValueMin === clipValueMax) { + return fill$2($x.shape, clipValueMin, $x.dtype); + } + const inputs = { x: $x }; + const attrs = { clipValueMin, clipValueMax }; + return ENGINE.runKernel(ClipByValue, inputs, attrs); + } + const clipByValue$2 = /* @__PURE__ */ op({ clipByValue_ }); + + /** + * Concatenates a list of`tf.Tensor1D`s along an axis. See `concat` for details. + * + * For example, if: + * A: shape(3) = |r1, g1, b1| + * B: shape(2) = |r2, g2| + * C = tf.concat1d([A, B]) == |r1, g1, b1, r2, g2| + * + * @param tensors A list of`tf.Tensor`s to concatenate. + * @return The concatenated array. + */ + function concat1d_(tensors) { + return concat$2(tensors, 0 /* axis */); + } + const concat1d = /* @__PURE__ */ op({ concat1d_ }); + + /** + * Concatenates a list of`tf.Tensor2D`s along an axis. See `concat` for details. + * + * For example, if: + * A: shape(2, 3) = | r1, g1, b1 | + * | r2, g2, b2 | + * + * B: shape(2, 3) = | r3, g3, b3 | + * | r4, g4, b4 | + * + * C = tf.concat2d([A, B], axis) + * + * if axis = 0: + * C: shape(4, 3) = | r1, g1, b1 | + * | r2, g2, b2 | + * | r3, g3, b3 | + * | r4, g4, b4 | + * + * if axis = 1: + * C = shape(2, 6) = | r1, g1, b1, r3, g3, b3 | + * | r2, g2, b2, r4, g4, b4 | + * + * + * @param tensors A list of `tf.Tensor`s to concatenate. + * @param axis The axis to concatenate along. + * @return The concatenated array. + */ + function concat2d_(tensors, axis) { + return concat$2(tensors, axis); + } + const concat2d = /* @__PURE__ */ op({ concat2d_ }); + + /** + * Concatenates a list of `tf.Tensor3D`s along an axis. + * See `concat` for details. + * + * For example, if: + * A: shape(2, 1, 3) = | r1, g1, b1 | + * | r2, g2, b2 | + * + * B: shape(2, 1, 3) = | r3, g3, b3 | + * | r4, g4, b4 | + * + * C = tf.concat3d([A, B], axis) + * + * if axis = 0: + * C: shape(4, 1, 3) = | r1, g1, b1 | + * | r2, g2, b2 | + * | r3, g3, b3 | + * | r4, g4, b4 | + * + * if axis = 1: + * C: shape(2, 2, 3) = | r1, g1, b1, r3, g3, b3 | + * | r2, g2, b2, r4, g4, b4 | + * + * if axis = 2: + * C = shape(2, 1, 6) = | r1, g1, b1, r3, g3, b3 | + * | r2, g2, b2, r4, g4, b4 | + * + * @param tensors A list of`tf.Tensor`s to concatenate. + * @param axis The axis to concate along. + * @return The concatenated array. + */ + function concat3d_(tensors, axis) { + return concat$2(tensors, axis); + } + const concat3d = /* @__PURE__ */ op({ concat3d_ }); + + /** + * Concatenates a list of `tf.Tensor4D`s along an axis. + * See `concat` for details. + * + * @param tensors A list of `tf.Tensor`s to concatenate. + * @param axis The axis to concate along. + * @return The concatenated array. + */ + function concat4d_(tensors, axis) { + return concat$2(tensors, axis); + } + const concat4d = /* @__PURE__ */ op({ concat4d_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes a 2D convolution over the input x. + * + * @param x The input tensor, of rank 4 or rank 3, of shape + * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is + * assumed. + * @param filter The filter, rank 4, of shape + * `[filterHeight, filterWidth, inDepth, outDepth]`. + * @param strides The strides of the convolution: `[strideHeight, + * strideWidth]`. + * @param pad The type of padding algorithm. + * - `same` and stride 1: output will be of same size as input, + * regardless of filter size. + * - `valid`: output will be smaller than input if filter is larger + * than 1x1. + * - For more info, see this guide: + * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution]( + * https://www.tensorflow.org/api_docs/python/tf/nn/convolution) + * @param dataFormat: An optional string from: "NHWC", "NCHW". Defaults to + * "NHWC". Specify the data format of the input and output data. With the + * default format "NHWC", the data is stored in the order of: [batch, + * height, width, channels]. + * @param dilations The dilation rates: `[dilationHeight, dilationWidth]` + * in which we sample input values across the height and width dimensions + * in atrous convolution. Defaults to `[1, 1]`. If `dilations` is a single + * number, then `dilationHeight == dilationWidth`. If it is greater than + * 1, then all values of `strides` must be 1. + * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is + * provided, it will default to truncate. + * + * @doc {heading: 'Operations', subheading: 'Convolution'} + */ + function conv2d_(x, filter, strides, pad, dataFormat = 'NHWC', dilations = [1, 1], dimRoundingMode) { + const $x = convertToTensor(x, 'x', 'conv2d', 'float32'); + const $filter = convertToTensor(filter, 'filter', 'conv2d', 'float32'); + let x4D = $x; + let reshapedTo4D = false; + if ($x.rank === 3) { + reshapedTo4D = true; + x4D = reshape$3($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]); + } + assert$1(x4D.rank === 4, () => `Error in conv2d: input must be rank 4, but got rank ${x4D.rank}.`); + assert$1($filter.rank === 4, () => `Error in conv2d: filter must be rank 4, but got rank ` + + `${$filter.rank}.`); + checkPadOnDimRoundingMode('conv2d', pad, dimRoundingMode); + const inDepth = dataFormat === 'NHWC' ? x4D.shape[3] : x4D.shape[1]; + assert$1(inDepth === $filter.shape[2], () => `Error in conv2d: depth of input (${inDepth}) must match ` + + `input depth for filter ${$filter.shape[2]}.`); + assert$1(eitherStridesOrDilationsAreOne(strides, dilations), () => 'Error in conv2D: Either strides or dilations must be 1. ' + + `Got strides ${strides} and dilations '${dilations}'`); + assert$1(stridesOrDilationsArePositive(dilations), () => 'Error in conv2D: Dilated rates should be larger than 0.'); + assert$1(stridesOrDilationsArePositive(strides), () => 'Error in conv2D: Strides should be larger than 0.'); + const inputs = { x: x4D, filter: $filter }; + const attrs = { strides, pad, dataFormat, dilations, dimRoundingMode }; + // tslint:disable-next-line: no-unnecessary-type-assertion + const res = ENGINE.runKernel(Conv2D$1, inputs, attrs); + if (reshapedTo4D) { + return reshape$3(res, [res.shape[1], res.shape[2], res.shape[3]]); + } + return res; + } + const conv2d$4 = /* @__PURE__ */ op({ conv2d_ }); + + /** + * Computes a 1D convolution over the input x. + * + * @param x The input tensor, of rank 3 or rank 2, of shape + * `[batch, width, inChannels]`. If rank 2, batch of 1 is assumed. + * @param filter The filter, rank 3, of shape + * `[filterWidth, inDepth, outDepth]`. + * @param stride The number of entries by which the filter is moved right at + * each step. + * @param pad The type of padding algorithm. + * - `same` and stride 1: output will be of same size as input, + * regardless of filter size. + * - `valid`: output will be smaller than input if filter is larger + * than 1x1. + * - For more info, see this guide: + * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution]( + * https://www.tensorflow.org/api_docs/python/tf/nn/convolution) + * @param dataFormat An optional string from "NWC", "NCW". Defaults to "NWC", + * the data is stored in the order of [batch, in_width, in_channels]. Only + * "NWC" is currently supported. + * @param dilation The dilation rate in which we sample input values in + * atrous convolution. Defaults to `1`. If it is greater than 1, then + * stride must be `1`. + * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is + * provided, it will default to truncate. + * + * @doc {heading: 'Operations', subheading: 'Convolution'} + */ + function conv1d_(x, filter, stride, pad, dataFormat = 'NWC', dilation = 1, dimRoundingMode) { + const $x = convertToTensor(x, 'x', 'conv1d'); + const $filter = convertToTensor(filter, 'filter', 'conv1d'); + let x3D = $x; + let reshapedTo3D = false; + if ($x.rank === 2) { + reshapedTo3D = true; + x3D = reshape$3($x, [1, $x.shape[0], $x.shape[1]]); + } + assert$1(x3D.rank === 3, () => `Error in conv1d: input must be rank 3, but got rank ${x3D.rank}.`); + assert$1($filter.rank === 3, () => `Error in conv1d: filter must be rank 3, but got rank ` + + `${$filter.rank}.`); + checkPadOnDimRoundingMode('conv1d', pad, dimRoundingMode); + assert$1(x3D.shape[2] === $filter.shape[1], () => `Error in conv1d: depth of input (${x3D.shape[2]}) must match ` + + `input depth for filter ${$filter.shape[1]}.`); + assert$1(eitherStridesOrDilationsAreOne(stride, dilation), () => 'Error in conv1D: Either stride or dilation must be 1. ' + + `Got stride ${stride} and dilation '${dilation}'`); + assert$1(stridesOrDilationsArePositive(dilation), () => 'Error in conv1D: Dilated rates should be larger than 0.'); + assert$1(stridesOrDilationsArePositive(stride), () => 'Error in conv1D: Stride should be larger than 0.'); + assert$1(dataFormat === 'NWC', () => `Error in conv1d: got dataFormat of ${dataFormat} but only NWC is currently supported.`); + const filter4D = reshape$3($filter, [1, $filter.shape[0], $filter.shape[1], $filter.shape[2]]); + const input4D = reshape$3(x3D, [x3D.shape[0], 1, x3D.shape[1], x3D.shape[2]]); + const strides = [1, stride]; + const dilations = [1, dilation]; + const conv2dDataFormat = 'NHWC'; + const res = conv2d$4(input4D, filter4D, strides, pad, conv2dDataFormat, dilations, dimRoundingMode); + if (reshapedTo3D) { + return reshape$3(res, [res.shape[2], res.shape[3]]); + } + return reshape$3(res, [res.shape[0], res.shape[2], res.shape[3]]); + } + const conv1d$2 = /* @__PURE__ */ op({ conv1d_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the derivative of the input of a 2D convolution. + * + * @param xShape The shape of the input: [batch, height, width, inDepth]. + * If length of 3, batch of 1 is assumed. + * @param dy The derivative of the output, of rank 4 or rank 3 of shape + * `[batch, outHeight, outWidth, outDepth]`. If rank 3, batch of 1 is + * assumed. + * @param filter The filter, rank 4, of shape + * `[filterHeight, filterWidth, inDepth, outDepth]`. + * @param strides The strides of the convolution: `[strideHeight, + * strideWidth]`. + * @param pad The type of padding algorithm used: + * - `same` and stride 1: output will be of same size as input, + * regardless of filter size. + * - `valid`: output will be smaller than input if filter is larger + * than 1x1. + * @param dataFormat: An optional string from: "NHWC", "NCHW". Defaults to + * "NHWC". Specify the data format of the input and output data. With the + * default format "NHWC", the data is stored in the order of: [batch, + * height, width, channels]. + * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is + * provided, it will default to truncate. + */ + function conv2DBackpropInput_(xShape, dy, filter, strides, pad, dataFormat = 'NHWC', dimRoundingMode) { + assert$1(xShape.length === dy.rank, () => `Length of inShape ` + + `(${xShape.length}) and rank of dy (${dy.rank}) must match`); + let xShape4D = xShape; + let dy4D = dy; + let reshapedTo4D = false; + if (dy.rank === 3) { + reshapedTo4D = true; + dy4D = reshape$3(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2]]); + xShape4D = [1, xShape[0], xShape[1], xShape[2]]; + } + assert$1(xShape4D.length === 4, () => `Error in conv2dDerInput: inShape must be length 4, but got length ` + + `${xShape4D.length}.`); + assert$1(dy4D.rank === 4, () => `Error in conv2dDerInput: dy must be rank 4, but got ` + + `rank ${dy4D.rank}`); + assert$1(filter.rank === 4, () => `Error in conv2dDerInput: filter must be rank 4, but got ` + + `rank ${filter.rank}`); + const inDepth = dataFormat === 'NHWC' ? xShape4D[3] : xShape4D[1]; + const outDepth = dataFormat === 'NHWC' ? dy4D.shape[3] : dy4D.shape[1]; + assert$1(inDepth === filter.shape[2], () => `Error in conv2dDerInput: depth of input (${inDepth}) must ` + + `match input depth for filter ${filter.shape[2]}.`); + assert$1(outDepth === filter.shape[3], () => `Error in conv2dDerInput: depth of output (${outDepth}) must ` + + `match output depth for filter ${filter.shape[3]}.`); + checkPadOnDimRoundingMode('conv2dDerInput', pad, dimRoundingMode); + const inputs = { dy: dy4D, filter }; + const attrs = { strides, pad, dataFormat, dimRoundingMode, inputShape: xShape4D }; + // tslint:disable-next-line: no-unnecessary-type-assertion + const res = ENGINE.runKernel(Conv2DBackpropInput, inputs, attrs); + if (reshapedTo4D) { + return reshape$3(res, [res.shape[1], res.shape[2], res.shape[3]]); + } + return res; + } + const conv2DBackpropInput$2 = /* @__PURE__ */ op({ conv2DBackpropInput_ }); + + /** + * Computes the transposed 2D convolution of an image, also known as a + * deconvolution. + * + * @param x The input image, of rank 4 or rank 3, of shape + * `[batch, height, width, inDepth]`. If rank 3, batch of 1 is assumed. + * @param filter The filter, rank 4, of shape + * `[filterHeight, filterWidth, outDepth, inDepth]`. + * `inDepth` must match `inDepth` in `x`. + * @param outputShape Output shape, of rank 4 or rank 3: + * `[batch, height, width, outDepth]`. If rank 3, batch of 1 is assumed. + * @param strides The strides of the original convolution: + * `[strideHeight, strideWidth]`. + * @param pad The type of padding algorithm used in the non-transpose version + * of the op. + * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is + * provided, it will default to truncate. + * + * @doc {heading: 'Operations', subheading: 'Convolution'} + */ + function conv2dTranspose_(x, filter, outputShape, strides, pad, dimRoundingMode) { + const $x = convertToTensor(x, 'x', 'conv2dTranspose'); + const $filter = convertToTensor(filter, 'filter', 'conv2dTranspose'); + return conv2DBackpropInput$2(outputShape, $x, $filter, strides, pad, 'NHWC', dimRoundingMode); + } + const conv2dTranspose$1 = /* @__PURE__ */ op({ conv2dTranspose_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes a 3D convolution over the input x. + * + * @param x The input tensor, of rank 5 or rank 4, of shape + * `[batch, depth, height, width, channels]`. If rank 4, + * batch of 1 is assumed. + * @param filter The filter, rank 5, of shape + * `[filterDepth, filterHeight, filterWidth, inChannels, outChannels]`. + * inChannels must match between input and filter. + * @param strides The strides of the convolution: `[strideDepth, strideHeight, + * strideWidth]`. + * @param pad The type of padding algorithm. + * - `same` and stride 1: output will be of same size as input, + * regardless of filter size. + * - `valid`: output will be smaller than input if filter is larger + * than 1x1. + * - For more info, see this guide: + * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution]( + * https://www.tensorflow.org/api_docs/python/tf/nn/convolution) + * @param dataFormat: An optional string from: "NDHWC", "NCDHW". Defaults to + * "NDHWC". Specify the data format of the input and output data. With the + * default format "NDHWC", the data is stored in the order of: [batch, + * depth, height, width, channels]. Only "NDHWC" is currently supported. + * @param dilations The dilation rates: `[dilationDepth, dilationHeight, + * dilationWidth]` in which we sample input values across the height + * and width dimensions in atrous convolution. Defaults to `[1, 1, 1]`. + * If `dilations` is a single number, then + * `dilationDepth == dilationHeight == dilationWidth`. If it is greater + * than 1, then all values of `strides` must be 1. + * + * @doc {heading: 'Operations', subheading: 'Convolution'} + */ + function conv3d_(x, filter, strides, pad, dataFormat = 'NDHWC', dilations = [1, 1, 1]) { + const $x = convertToTensor(x, 'x', 'conv3d'); + const $filter = convertToTensor(filter, 'filter', 'conv3d'); + let x5D = $x; + let reshapedTo5D = false; + if ($x.rank === 4) { + reshapedTo5D = true; + x5D = reshape$3($x, [1, $x.shape[0], $x.shape[1], $x.shape[2], $x.shape[3]]); + } + assert$1(x5D.rank === 5, () => `Error in conv3d: input must be rank 5, but got rank ${x5D.rank}.`); + assert$1($filter.rank === 5, () => `Error in conv3d: filter must be rank 5, but got rank ` + + `${$filter.rank}.`); + assert$1(x5D.shape[4] === $filter.shape[3], () => `Error in conv3d: depth of input (${x5D.shape[4]}) must match ` + + `input depth for filter ${$filter.shape[3]}.`); + assert$1(eitherStridesOrDilationsAreOne(strides, dilations), () => 'Error in conv3D: Either strides or dilations must be 1. ' + + `Got strides ${strides} and dilations '${dilations}'`); + assert$1(dataFormat === 'NDHWC', () => `Error in conv3d: got dataFormat of ${dataFormat} but only NDHWC is currently supported.`); + assert$1(stridesOrDilationsArePositive(dilations), () => 'Error in conv3D: Dilated rates should be larger than 0.'); + assert$1(stridesOrDilationsArePositive(strides), () => 'Error in conv3D: Strides should be larger than 0.'); + const inputs = { x: x5D, filter: $filter }; + const attrs = { strides, pad, dataFormat, dilations }; + // tslint:disable-next-line: no-unnecessary-type-assertion + const res = ENGINE.runKernel(Conv3D$1, inputs, attrs); + if (reshapedTo5D) { + return reshape$3(res, [res.shape[1], res.shape[2], res.shape[3], res.shape[4]]); + } + return res; + } + const conv3d$2 = /* @__PURE__ */ op({ conv3d_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the derivative of the input of a 3D convolution. + * + * @param xShape The shape of the input: [batch, depth, height, width, + * in_channels]. If length of 4, batch of 1 is assumed. + * @param dy The derivative of the output, of rank 5 or rank 4 of shape + * `[batch, outDepth, outHeight, outWidth, in_channels]`. + * If rank 4, batch of 1 is assumed. + * @param filter The filter, rank 5, of shape + * `[filterDepth, filterHeight, filterWidth, inDepth, outDepth]`. + * @param strides The strides of the convolution: `[strideDepth, strideHeight, + * strideWidth]`. + * @param pad The type of padding algorithm used: + * - `same` and stride 1: output will be of same size as input, + * regardless of filter size. + * - `valid`: output will be smaller than input if filter is larger + * than 1x1. + */ + function conv3DBackpropInput_(xShape, dy, filter, strides, pad) { + assert$1(xShape.length === dy.rank, () => `Length of inShape ` + + `(${xShape.length}) and rank of dy (${dy.rank}) must match`); + let xShape5D = xShape; + let dy5D = dy; + let reshapedTo5D = false; + if (dy.rank === 4) { + reshapedTo5D = true; + dy5D = reshape$3(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2], dy.shape[3]]); + xShape5D = [1, xShape[0], xShape[1], xShape[2], xShape[3]]; + } + const inDepth = xShape5D[4]; + const outDepth = dy5D.shape[4]; + assert$1(xShape5D.length === 5, () => `Error in conv3dDerInput: inShape must be length 5, but got length ` + + `${xShape5D.length}.`); + assert$1(dy5D.rank === 5, () => `Error in conv3dDerInput: dy must be rank 5, but got ` + + `rank ${dy5D.rank}`); + assert$1(filter.rank === 5, () => `Error in conv3dDerInput: filter must be rank 5, but got ` + + `rank ${filter.rank}`); + assert$1(inDepth === filter.shape[3], () => `Error in conv3dDerInput: depth of input (${inDepth}) must ` + + `match input depth for filter ${filter.shape[3]}.`); + assert$1(outDepth === filter.shape[4], () => `Error in conv3dDerInput: depth of output (${outDepth}) must ` + + `match output depth for filter ${filter.shape[4]}.`); + const inputs = { dy: dy5D, filter }; + const attrs = { pad, strides, inputShape: xShape5D }; + // tslint:disable-next-line: no-unnecessary-type-assertion + const res = ENGINE.runKernel(Conv3DBackpropInputV2, inputs, attrs); + if (reshapedTo5D) { + return reshape$3(res, [res.shape[1], res.shape[2], res.shape[3], res.shape[4]]); + } + return res; + } + const conv3DBackpropInput$1 = /* @__PURE__ */ op({ conv3DBackpropInput_ }); + + /** + * Computes the transposed 3D convolution of a volume, also known as a + * deconvolution. + * + * @param x The input image, of rank 5 or rank 4, of shape + * `[batch, depth, height, width, inDepth]`. If rank 4, batch of 1 is assumed. + * @param filter The filter, rank 4, of shape + * `[depth, filterHeight, filterWidth, outDepth, inDepth]`. + * `inDepth` must match `inDepth` in `x`. + * @param outputShape Output shape, of rank 5 or rank 4: + * `[batch, depth, height, width, outDepth]`. If rank 3, batch of 1 is + * assumed. + * @param strides The strides of the original convolution: + * `[strideDepth, strideHeight, strideWidth]`. + * @param pad The type of padding algorithm used in the non-transpose version + * of the op. + * + * @doc {heading: 'Operations', subheading: 'Convolution'} + */ + function conv3dTranspose_(x, filter, outputShape, strides, pad) { + const $x = convertToTensor(x, 'x', 'conv3dTranspose'); + const $filter = convertToTensor(filter, 'filter', 'conv3dTranspose'); + return conv3DBackpropInput$1(outputShape, $x, $filter, strides, pad); + } + const conv3dTranspose$1 = /* @__PURE__ */ op({ conv3dTranspose_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes cos of the input `tf.Tensor` element-wise: `cos(x)` + * + * ```js + * const x = tf.tensor1d([0, Math.PI / 2, Math.PI * 3 / 4]); + * + * x.cos().print(); // or tf.cos(x) + * ``` + * @param x The input tensor. Must be float32 type. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function cos_(x) { + const $x = convertToTensor(x, 'x', 'cos', 'float32'); + const inputs = { x: $x }; + return ENGINE.runKernel(Cos, inputs); + } + const cos$2 = /* @__PURE__ */ op({ cos_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes hyperbolic cos of the input `tf.Tensor` element-wise: `cosh(x)` + * + * ```js + * const x = tf.tensor1d([0, 1, -1, .7]); + * + * x.cosh().print(); // or tf.cosh(x) + * ``` + * @param x The input tensor. Must be float32 type. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function cosh_(x) { + const $x = convertToTensor(x, 'x', 'cosh', 'float32'); + const inputs = { x: $x }; + return ENGINE.runKernel(Cosh, inputs); + } + const cosh$2 = /* @__PURE__ */ op({ cosh_ }); + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the 'License'); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an 'AS IS' BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the cumulative product of a `tf.Tensor` along `axis`. + * + * ```js + * const x = tf.tensor([1, 2, 3, 4]); + * x.cumprod().print(); + * ``` + * ```js + * const x = tf.tensor([[1, 2], [3, 4]]); + * x.cumprod().print(); + * ``` + * + * @param x The input tensor to cumulatively multiply. + * @param axis The axis along which to multiply. Optional. Defaults to 0. + * @param exclusive Whether to perform exclusive cumulative product. Optional. + * Defaults to false. If set to true then the product of each tensor entry + * does not include its own value, but only the values previous to it + * along the specified axis. + * @param reverse Whether to multiply in the opposite direction. Optional. + * Defaults to false. + * + * @doc {heading: 'Operations', subheading: 'Scan'} + */ + function cumprod_(x, axis = 0, exclusive = false, reverse = false) { + const $x = convertToTensor(x, 'x', 'cumprod'); + const inputs = { x: $x }; + const attrs = { axis, exclusive, reverse }; + return ENGINE.runKernel(Cumprod, inputs, attrs); + } + const cumprod$2 = /* @__PURE__ */ op({ cumprod_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the cumulative sum of a `tf.Tensor` along `axis`. + * + * ```js + * const x = tf.tensor([1, 2, 3, 4]); + * x.cumsum().print(); + * ``` + * ```js + * const x = tf.tensor([[1, 2], [3, 4]]); + * x.cumsum().print(); + * ``` + * + * @param x The input tensor to be summed. + * @param axis The axis along which to sum. Optional. Defaults to 0. + * @param exclusive Whether to perform exclusive cumulative sum. Optional. + * Defaults to false. If set to true then the sum of each tensor entry + * does not include its own value, but only the values previous to it + * along the specified axis. + * @param reverse Whether to sum in the opposite direction. Optional. + * Defaults to false. + * + * @doc {heading: 'Operations', subheading: 'Scan'} + */ + function cumsum_(x, axis = 0, exclusive = false, reverse = false) { + const $x = convertToTensor(x, 'x', 'cumsum'); + const inputs = { x: $x }; + const attrs = { axis, exclusive, reverse }; + return ENGINE.runKernel(Cumsum, inputs, attrs); + } + const cumsum$2 = /* @__PURE__ */ op({ cumsum_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Outputs a vector with length `size` and the same dtype as `weights`. + * + * If `weights` are empty, then index `i` stores the number of times the value + * `i` is counted in `x`. If `weights` are non-empty, then index `i` stores the + * sum of the value in `weights` at each index where the corresponding value in + * `x` is `i`. + * + * Values in `x` outside of the range [0, size) are ignored. + * + * @param x The input int tensor, rank 1 or rank 2. + * @param weights The weights tensor, must have the same shape as x, or a + * length-0 Tensor, in which case it acts as all weights equal to 1. + * @param size Non-negative integer. + * @param binaryOutput Optional. Whether the kernel should count the appearance + * or number of occurrences. Defaults to False. + * + * @doc {heading: 'Operations', subheading: 'Reduction'} + */ + function denseBincount_(x, weights, size, binaryOutput = false) { + const $x = convertToTensor(x, 'x', 'denseBincount'); + const $weights = convertToTensor(weights, 'weights', 'denseBincount'); + assert$1($x.dtype === 'int32', () => `Error in denseBincount: input ` + + `dtype must be int32, but got ${$x.dtype}`); + assert$1($x.rank <= 2, () => `Error in denseBincount: input must be at most rank 2, but got ` + + `rank ${$x.rank}.`); + assert$1(size >= 0, () => `size must be non-negative, but got ${size}.`); + assert$1($weights.size === $x.size || $weights.size === 0, () => `Error in denseBincount: weights must have the same shape as x or ` + + `0-length, but got x shape: ${$x.shape}, weights shape: ` + + `${$weights.shape}.`); + const inputs = { x: $x, weights: $weights }; + const attrs = { size, binaryOutput }; + return ENGINE.runKernel(DenseBincount, inputs, attrs); + } + const denseBincount$2 = /* @__PURE__ */ op({ denseBincount_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Rearranges data from depth into blocks of spatial data. More specifically, + * this op outputs a copy of the input tensor where values from the `depth` + * dimension are moved in spatial blocks to the `height` and `width` dimensions. + * The attr `blockSize` indicates the input block size and how the data is + * moved. + * + * - Chunks of data of size `blockSize * blockSize` from depth are rearranged + * into non-overlapping blocks of size `blockSize x blockSize` + * + * - The width the output tensor is `inputWidth * blockSize`, whereas the + * height is `inputHeight * blockSize` + * + * - The Y, X coordinates within each block of the output image are determined + * by the high order component of the input channel index + * + * - The depth of the input tensor must be divisible by `blockSize * + * blockSize` + * + * The `dataFormat` attr specifies the layout of the input and output tensors + * with the following options: "NHWC": [ `batch, height, width, channels` ] + * "NCHW": [ `batch, channels, height, width` ] + * + * ```js + * const x = tf.tensor4d([1, 2, 3, 4], [1, 1, 1, 4]); + * const blockSize = 2; + * const dataFormat = "NHWC"; + * + * tf.depthToSpace(x, blockSize, dataFormat).print(); + * ``` + * + * @param x The input tensor of rank 4 + * @param blockSIze An `int` that is `>= 2`. The size of the spatial block + * @param dataFormat An optional string from: "NHWC", "NCHW". Defaults to "NHWC" + * + * @doc {heading: 'Tensors', subheading: 'Transformations'} + */ + function depthToSpace_(x, blockSize, dataFormat = 'NHWC') { + const $x = convertToTensor(x, 'x', 'depthToSpace', 'float32'); + const inputHeight = (dataFormat === 'NHWC') ? $x.shape[1] : $x.shape[2]; + const inputWidth = (dataFormat === 'NHWC') ? $x.shape[2] : $x.shape[3]; + const inputDepth = (dataFormat === 'NHWC') ? $x.shape[3] : $x.shape[1]; + assert$1(blockSize > 1, () => `blockSize should be > 1 for depthToSpace, but was: ${blockSize}`); + assert$1(inputHeight * blockSize >= 0, () => `Negative dimension size caused by overflow when multiplying + ${inputHeight} and ${blockSize} for depthToSpace with input shape + ${$x.shape}`); + assert$1(inputWidth * blockSize >= 0, () => `Negative dimension size caused by overflow when multiplying + ${inputWidth} and ${blockSize} for depthToSpace with input shape + ${$x.shape}`); + assert$1((inputDepth % (blockSize * blockSize) === 0), () => `Dimension size must be evenly divisible by ${blockSize * blockSize} but is ${inputDepth} for depthToSpace with input shape ${$x.shape}`); + const inputs = { x: $x }; + const attrs = { blockSize, dataFormat }; + return ENGINE.runKernel(DepthToSpace, inputs, attrs); + } + const depthToSpace$2 = /* @__PURE__ */ op({ depthToSpace_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Depthwise 2D convolution. + * + * Given a 4D `input` array and a `filter` array of shape + * `[filterHeight, filterWidth, inChannels, channelMultiplier]` containing + * `inChannels` convolutional filters of depth 1, this op applies a + * different filter to each input channel (expanding from 1 channel to + * `channelMultiplier` channels for each), then concatenates the results + * together. The output has `inChannels * channelMultiplier` channels. + * + * See + * [https://www.tensorflow.org/api_docs/python/tf/nn/depthwise_conv2d]( + * https://www.tensorflow.org/api_docs/python/tf/nn/depthwise_conv2d) + * for more details. + * + * @param x The input tensor, of rank 4 or rank 3, of shape + * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is + * assumed. + * @param filter The filter tensor, rank 4, of shape + * `[filterHeight, filterWidth, inChannels, channelMultiplier]`. + * @param strides The strides of the convolution: `[strideHeight, + * strideWidth]`. If strides is a single number, then `strideHeight == + * strideWidth`. + * @param pad The type of padding algorithm. + * - `same` and stride 1: output will be of same size as input, + * regardless of filter size. + * - `valid`: output will be smaller than input if filter is larger + * than 1x1. + * - For more info, see this guide: + * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution]( + * https://www.tensorflow.org/api_docs/python/tf/nn/convolution) + * @param dilations The dilation rates: `[dilationHeight, dilationWidth]` + * in which we sample input values across the height and width dimensions + * in atrous convolution. Defaults to `[1, 1]`. If `rate` is a single + * number, then `dilationHeight == dilationWidth`. If it is greater than + * 1, then all values of `strides` must be 1. + * @param dataFormat: An optional string from: "NHWC", "NCHW". Defaults to + * "NHWC". Specify the data format of the input and output data. With the + * default format "NHWC", the data is stored in the order of: [batch, + * height, width, channels]. Only "NHWC" is currently supported. + * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is + * provided, it will default to truncate. + * + * @doc {heading: 'Operations', subheading: 'Convolution'} + */ + function depthwiseConv2d_(x, filter, strides, pad, dataFormat = 'NHWC', dilations = [1, 1], dimRoundingMode) { + const $x = convertToTensor(x, 'x', 'depthwiseConv2d', 'float32'); + const $filter = convertToTensor(filter, 'filter', 'depthwiseConv2d', 'float32'); + let x4D = $x; + let reshapedTo4D = false; + if ($x.rank === 3) { + reshapedTo4D = true; + x4D = reshape$3($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]); + } + assert$1(x4D.rank === 4, () => `Error in depthwiseConv2d: input must be rank 4, but got ` + + `rank ${x4D.rank}.`); + assert$1($filter.rank === 4, () => `Error in depthwiseConv2d: filter must be rank 4, but got rank ` + + `${$filter.rank}.`); + const inChannels = dataFormat === 'NHWC' ? x4D.shape[3] : x4D.shape[1]; + assert$1(inChannels === $filter.shape[2], () => `Error in depthwiseConv2d: number of input channels ` + + `(${inChannels}) must match the inChannels dimension in ` + + `filter ${$filter.shape[2]}.`); + checkPadOnDimRoundingMode('depthwiseConv2d', pad, dimRoundingMode); + const inputs = { x: x4D, filter: $filter }; + const attrs = { strides, pad, dataFormat, dilations, dimRoundingMode }; + // tslint:disable-next-line: no-unnecessary-type-assertion + const res = ENGINE.runKernel(DepthwiseConv2dNative, inputs, attrs); + if (reshapedTo4D) { + return reshape$3(res, [res.shape[1], res.shape[2], res.shape[3]]); + } + return res; + } + const depthwiseConv2d$3 = /* @__PURE__ */ op({ depthwiseConv2d_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Returns a diagonal tensor with given diagonal values. + * + * Given a diagonal, this operation returns a tensor with the diagonal and + * everything else padded with zeros. + * + * Assume the input has dimensions `[D1,..., Dk]`, then the output is a tensor + * of rank 2k with dimensions `[D1,..., Dk, D1,..., Dk]` + * + * ```js + * const x = tf.tensor1d([1, 2, 3, 4]); + * + * tf.diag(x).print() + * ``` + * ```js + * const x = tf.tensor2d([1, 2, 3, 4, 5, 6, 7, 8], [4, 2]) + * + * tf.diag(x).print() + * ``` + * @param x The input tensor. + * + * @doc {heading: 'Tensors', subheading: 'Creation'} + */ + function diag_(x) { + const $x = convertToTensor(x, 'x', 'diag'); + const inputs = { x: $x }; + return ENGINE.runKernel(Diag, inputs); + } + const diag$2 = /* @__PURE__ */ op({ diag_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the grayscale dilation over the input `x`. + * + * @param x The input tensor, rank 3 or rank 4 of shape + * `[batch, height, width, depth]`. If rank 3, batch of 1 is assumed. + * @param filter The filter tensor, rank 3, of shape + * `[filterHeight, filterWidth, depth]`. + * @param strides The strides of the sliding window for each dimension of the + * input tensor: `[strideHeight, strideWidth]`. + * If `strides` is a single number, + * then `strideHeight == strideWidth`. + * @param pad The type of padding algorithm. + * - `same` and stride 1: output will be of same size as input, + * regardless of filter size. + * - `valid`: output will be smaller than input if filter is larger + * than 1*1x1. + * - For more info, see this guide: + * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution]( + * https://www.tensorflow.org/api_docs/python/tf/nn/convolution) + * @param dataFormat Specify the data format of the input and output data. + * Defaults to 'NHWC'. Only 'NHWC' is currently supported. With the + * default format "NHWC", the data is stored in the order of: [batch, + * height, width, channels]. + * @param dilations The dilation rates: `[dilationHeight, dilationWidth]` + * in which we sample input values across the height and width dimensions + * for atrous morphological dilation. Defaults to `[1, 1]`. If `dilations` + * is a single number, then `dilationHeight == dilationWidth`. If it is + * greater than 1, then all values of `strides` must be 1. + * + * @doc {heading: 'Operations', subheading: 'Convolution'} + */ + function dilation2d_(x, filter, strides, pad, dilations = [1, 1], dataFormat = 'NHWC') { + const $x = convertToTensor(x, 'x', 'dilation2d'); + const $filter = convertToTensor(filter, 'filter', 'dilation2d'); + assert$1($x.rank === 3 || $x.rank === 4, () => `Error in dilation2d: input must be rank 3 or 4, but got rank ` + + `${$x.rank}.`); + assert$1($filter.rank === 3, () => `Error in dilation2d: filter must be rank 3, but got rank ` + + `${$filter.rank}.`); + assert$1(dataFormat === 'NHWC', () => `Error in dilation2d: Only NHWC is currently supported, ` + + `but got dataFormat of ${dataFormat}`); + let x4D = $x; + let reshapedTo4D = false; + if ($x.rank === 3) { + x4D = reshape$3($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]); + reshapedTo4D = true; + } + assert$1(x4D.shape[3] === $filter.shape[2], () => `Error in dilation2d: input and filter must have the same depth: ${x4D.shape[3]} vs ${$filter.shape[2]}`); + const inputs = { x: x4D, filter: $filter }; + const attrs = { strides, pad, dilations }; + // tslint:disable-next-line: no-unnecessary-type-assertion + const res = ENGINE.runKernel(Dilation2D, inputs, attrs); + if (reshapedTo4D) { + return reshape$3(res, [res.shape[1], res.shape[2], res.shape[3]]); + } + return res; + } + const dilation2d = /* @__PURE__ */ op({ dilation2d_ }); + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Returns the dimensions in the input shape that are broadcasted to + * produce the provided output shape. + * + * The returned dimensions are 0-indexed and sorted. An example: + * inShape = [4, 1, 3] + * outShape = [5, 4, 3, 3] + * result = [1]. Dimension 1 (2nd dimension of input) gets broadcasted 1 => 3. + */ + function getBroadcastDims$1(inShape, outShape) { + const inRank = inShape.length; + const dims = []; + for (let i = 0; i < inRank; i++) { + const dim = inRank - 1 - i; + const a = inShape[dim] || 1; + const b = outShape[outShape.length - 1 - i] || 1; + if (b > 1 && a === 1) { + dims.unshift(dim); + } + } + return dims; + } + /** + * Returns the axes in the output space that should be reduced to produce + * the input space. + */ + function getReductionAxes(inShape, outShape) { + const result = []; + for (let i = 0; i < outShape.length; i++) { + const inDim = inShape[inShape.length - i - 1]; + const outAxis = outShape.length - i - 1; + const outDim = outShape[outAxis]; + if (inDim == null || (inDim === 1 && outDim > 1)) { + result.unshift(outAxis); + } + } + return result; + } + function assertAndGetBroadcastShape(shapeA, shapeB) { + const l = Math.max(shapeA.length, shapeB.length); + const result = new Array(l); + for (let i = 0; i < l; i++) { + let a = shapeA[shapeA.length - i - 1]; + if (a == null) { + a = 1; + } + let b = shapeB[shapeB.length - i - 1]; + if (b == null) { + b = 1; + } + if (a === 1) { + result[l - i - 1] = b; + } + else if (b === 1) { + result[l - i - 1] = a; + } + else if (a !== b) { + const errMsg = `Operands could not be broadcast together with shapes ` + + `${shapeA} and ${shapeB}.`; + throw Error(errMsg); + } + else { + result[l - i - 1] = a; + } + } + return result; + } + + var broadcast_util = /*#__PURE__*/Object.freeze({ + __proto__: null, + assertAndGetBroadcastShape: assertAndGetBroadcastShape, + getBroadcastDims: getBroadcastDims$1, + getReductionAxes: getReductionAxes + }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Returns the truth value of (a == b) element-wise. Supports broadcasting. + * + * ```js + * const a = tf.tensor1d([1, 2, 3]); + * const b = tf.tensor1d([2, 2, 2]); + * + * a.equal(b).print(); + * ``` + * + * @param a The first input tensor. + * @param b The second input tensor. Must have the same dtype as `a`. + * + * @doc {heading: 'Operations', subheading: 'Logical'} + */ + function equal_(a, b) { + let $a = convertToTensor(a, 'a', 'equal', 'string_or_numeric'); + let $b = convertToTensor(b, 'b', 'equal', 'string_or_numeric'); + [$a, $b] = makeTypesMatch($a, $b); + assertAndGetBroadcastShape($a.shape, $b.shape); + const inputs = { a: $a, b: $b }; + return ENGINE.runKernel(Equal, inputs); + } + const equal$2 = /* @__PURE__ */ op({ equal_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Returns the elements, either `a` or `b` depending on the `condition`. + * + * If the condition is true, select from `a`, otherwise select from `b`. + * + * ```js + * const cond = tf.tensor1d([false, false, true], 'bool'); + * const a = tf.tensor1d([1 , 2, 3]); + * const b = tf.tensor1d([-1, -2, -3]); + * + * a.where(cond, b).print(); + * ``` + * + * @param condition The input condition. Must be of dtype bool. + * @param a If `condition` is rank 1, `a` may have a higher rank but + * its first dimension must match the size of `condition`. + * @param b A tensor with the same dtype as `a` and with shape that is + * compatible with `a`. + * @return A tensor with same dtype as `a` and `b`, and shape that is + * broadcastable from `a` and `b`. + * + * @doc {heading: 'Operations', subheading: 'Logical'} + */ + function where_(condition, a, b) { + const $a = convertToTensor(a, 'a', 'where'); + const $b = convertToTensor(b, 'b', 'where'); + const $condition = convertToTensor(condition, 'condition', 'where', 'bool'); + // TODO: move this logic to forward function when the broadcastTo op is + // implemented in WASM. + // Find the broadcastable shape for $condition, $a, and $b. + const broadcastShape = assertAndGetBroadcastShape(assertAndGetBroadcastShape($condition.shape, $a.shape), $b.shape); + const $broadcastedCondition = broadcastTo($condition, broadcastShape); + const $broadcastedA = broadcastTo($a, broadcastShape); + const $broadcastedB = broadcastTo($b, broadcastShape); + const inputs = { + condition: $broadcastedCondition, + t: $broadcastedA, + e: $broadcastedB + }; + return ENGINE.runKernel(Select, inputs); + } + const where = /* @__PURE__ */ op({ where_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Creates a `tf.Tensor` with all elements set to 0 with the same shape as the + * given tensor. + * + * ```js + * const x = tf.tensor([1, 2]); + * tf.zerosLike(x).print(); + * ``` + * + * @param x The tensor of required shape. + * + * @doc {heading: 'Tensors', subheading: 'Creation'} + */ + function zerosLike_(x) { + const $x = convertToTensor(x, 'x', 'zerosLike'); + const inputs = { x: $x }; + return ENGINE.runKernel(ZerosLike, inputs); + } + const zerosLike$3 = /* @__PURE__ */ op({ zerosLike_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Divides two `tf.Tensor`s element-wise, A / B. Supports broadcasting. Return 0 + * if denominator is 0. + * + * + * ```js + * const a = tf.tensor1d([1, 4, 9, 16]); + * const b = tf.tensor1d([1, 2, 3, 4]); + * const c = tf.tensor1d([0, 0, 0, 0]); + * + * a.divNoNan(b).print(); // or tf.divNoNan(a, b) + * a.divNoNan(c).print(); // or tf.divNoNan(a, c) + * ``` + * + * ```js + * // Broadcast div a with b. + * const a = tf.tensor1d([2, 4, 6, 8]); + * const b = tf.scalar(2); + * const c = tf.scalar(0); + * + * a.divNoNan(b).print(); // or tf.divNoNan(a, b) + * a.divNoNan(c).print(); // or tf.divNoNan(a, c) + * ``` + * + * @param a The first tensor as the numerator. + * @param b The second tensor as the denominator. Must have the same dtype as + * `a`. + * + * @doc {heading: 'Operations', subheading: 'Arithmetic'} + */ + function divNoNan_(a, b) { + // TODO: Make this into its own kernel. + let $a = convertToTensor(a, 'a', 'div'); + let $b = convertToTensor(b, 'b', 'div'); + [$a, $b] = makeTypesMatch($a, $b); + const divResult = div$1($a, $b); + const zeros = zerosLike$3(divResult); + const bEqualsZero = equal$2($b, zeros); + return where(bEqualsZero, zeros, divResult); + } + const divNoNan = /* @__PURE__ */ op({ divNoNan_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the dot product of two matrices and/or vectors, `t1` and `t2`. + * + * ```js + * const a = tf.tensor1d([1, 2]); + * const b = tf.tensor2d([[1, 2], [3, 4]]); + * const c = tf.tensor2d([[1, 2, 3], [4, 5, 6]]); + * + * a.dot(b).print(); // or tf.dot(a, b) + * b.dot(a).print(); + * b.dot(c).print(); + * ``` + * @param t1 The first tensor in the dot operation. + * @param t2 The second tensor in the dot operation. + * + * @doc {heading: 'Operations', subheading: 'Matrices'} + */ + function dot_(t1, t2) { + const $t1 = convertToTensor(t1, 't1', 'dot'); + const $t2 = convertToTensor(t2, 't2', 'dot'); + assert$1(($t1.rank === 1 || $t1.rank === 2) && ($t2.rank === 1 || $t2.rank === 2), () => `Error in dot: inputs must all be rank 1 or 2, but got ranks ` + + `${$t1.rank} and ${$t2.rank}.`); + const t1Inner = ($t1.rank === 1 ? $t1.size : $t1.shape[1]); + const t2Inner = ($t2.rank === 1 ? $t2.size : $t2.shape[0]); + assert$1(t1Inner === t2Inner, () => `Error in dot: inner dimensions of inputs must match, but got ` + + `${t1Inner} and ${t2Inner}.`); + if ($t1.rank === 1 && $t2.rank === 1) { + const t12D = reshape$3($t1, [1, -1]); + const t22D = reshape$3($t2, [-1, 1]); + const t1t2 = matMul$1(t12D, t22D); + return reshape$3(t1t2, []); + } + else if ($t1.rank === 1 && $t2.rank === 2) { + const t12D = reshape$3($t1, [1, -1]); + const t22D = reshape$3($t2, [$t2.shape[0], $t2.shape[1]]); + const t1t2 = matMul$1(t12D, t22D); + return reshape$3(t1t2, [t1t2.size]); + } + else if ($t1.rank === 2 && $t2.rank === 1) { + const t22D = reshape$3($t2, [-1, 1]); + const t1t2 = matMul$1($t1, t22D); + return reshape$3(t1t2, [t1t2.size]); + } + else { + const t22D = reshape$3($t2, [$t2.shape[0], $t2.shape[1]]); + const t1t2 = matMul$1($t1, t22D); + return t1t2; + } + } + const dot$2 = /* @__PURE__ */ op({ dot_ }); + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Tensor contraction over specified indices and outer product. + * + * `einsum` allows defining Tensors by defining their element-wise computation. + * This computation is based on + * [Einstein summation](https://en.wikipedia.org/wiki/Einstein_notation). + * + * Some special cases include: + * + * Matrix multiplication: + * ```js + * const x = tf.tensor2d([[1, 2, 3], [4, 5, 6]]); + * const y = tf.tensor2d([[0, 1], [2, 3], [4, 5]]); + * x.print(); + * y.print(); + * tf.einsum('ij,jk->ik', x, y).print(); + * ``` + * + * Dot product: + * ```js + * const x = tf.tensor1d([1, 2, 3]); + * const y = tf.tensor1d([0, 1, 2]); + * x.print(); + * y.print(); + * tf.einsum('i,i->', x, y).print(); + * ``` + * + * Batch dot product: + * ```js + * const x = tf.tensor2d([[1, 2, 3], [4, 5, 6]]); + * const y = tf.tensor2d([[0, 1, 2], [3, 4, 5]]); + * x.print(); + * y.print(); + * tf.einsum('bi,bi->b', x, y).print(); + * ``` + * + * Outer prouduct: + * ```js + * const x = tf.tensor1d([1, 3, 5]); + * const y = tf.tensor1d([2, 4, 6]); + * x.print(); + * y.print(); + * tf.einsum('i,j->ij', x, y).print(); + * ``` + * + * Matrix transpose: + * ```js + * const x = tf.tensor2d([[1, 2], [3, 4]]); + * x.print(); + * tf.einsum('ij->ji', x).print(); + * ``` + * + * Batch matrix transpose: + * ```js + * const x = tf.tensor3d([[[1, 2], [3, 4]], [[-1, -2], [-3, -4]]]); + * x.print(); + * tf.einsum('bij->bji', x).print(); + * ``` + * + * Limitations: + * + * This implementation of einsum has the following limitations: + * + * - Does not support >2 input tensors. + * - Does not support duplicate axes for any given input tensor. E.g., equation + * 'ii->' is not supported. + * - The `...` notation is not supported. + * + * @param equation a string describing the contraction, in the same format as + * [numpy.einsum](https://numpy.org/doc/stable/reference/generated/numpy.einsum.html). + * @param tensors the input(s) to contract (each one a Tensor), whose shapes + * should be consistent with equation. + * @returns The output tensor. + * + * @doc {heading: 'Tensors', subheading: 'Matrices'} + */ + function einsum_(equation, ...tensors) { + const $tensors = tensors.map((t, i) => convertToTensor(t, `tensors${i}`, 'einsum')); + const attrs = { equation }; + return ENGINE.runKernel(Einsum, $tensors, attrs); + } + const einsum$2 = /* @__PURE__ */ op({ einsum_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes exponential linear element-wise: `x > 0 ? x : (e ^ x) - 1`. + * + * ```js + * const x = tf.tensor1d([-1, 1, -3, 2]); + * + * x.elu().print(); // or tf.elu(x) + * ``` + * @param x The input tensor. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function elu_(x) { + const $x = convertToTensor(x, 'x', 'elu', 'float32'); + const inputs = { x: $x }; + return ENGINE.runKernel(Elu$1, inputs); + } + const elu$4 = /* @__PURE__ */ op({ elu_ }); + + /** + * @license + * Copyright 2023 Google LLC. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Checks the input tensor mathes the given shape. + * + * Given an input tensor, returns a new tensor with the same values as the + * input tensor with shape `shape`. + * + * The method supports the null value in tensor. It will still check the shapes, + * and null is a placeholder. + * + * + * ```js + * const x = tf.tensor1d([1, 2, 3, 4]); + * const y = tf.tensor1d([1, null, 3, 4]); + * const z = tf.tensor2d([1, 2, 3, 4], [2,2]); + * tf.ensureShape(x, [4]).print(); + * tf.ensureShape(y, [4]).print(); + * tf.ensureShape(z, [null, 2]).print(); + * ``` + * + * @param x The input tensor to be ensured. + * @param shape A TensorShape representing the shape of this tensor, an array + * or null. + * + * @doc {heading: 'Tensors', subheading: 'Transformations'} + */ + function ensureShape_(x, shape) { + const $x = convertToTensor(x, 'x', 'ensureShape', 'string_or_numeric'); + if (!arraysEqualWithNull($x.shape, shape)) { + throw new Error(`EnsureShape: Shape of tensor ${$x.shape} is not compatible with expected shape ${shape}`); + } + return x; + } + const ensureShape = /* @__PURE__ */ op({ ensureShape_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes Gauss error function of the input `tf.Tensor` element-wise: + * `erf(x)` + * + * ```js + * const x = tf.tensor1d([0, .1, -.1, .7]); + * + * x.erf().print(); // or tf.erf(x); + * ``` + * @param x The input tensor. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function erf_(x) { + let $x = convertToTensor(x, 'x', 'erf'); + assert$1($x.dtype === 'int32' || $x.dtype === 'float32', () => 'Input dtype must be `int32` or `float32`.'); + if ($x.dtype === 'int32') { + $x = cast$3($x, 'float32'); + } + const inputs = { x: $x }; + return ENGINE.runKernel(Erf, inputs); + } + const erf$2 = /* @__PURE__ */ op({ erf_ }); + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Returns true if the axis specifies the inner most dimensions of the + * array. + */ + function axesAreInnerMostDims(axes, rank) { + for (let i = 0; i < axes.length; ++i) { + if (axes[axes.length - i - 1] !== rank - 1 - i) { + return false; + } + } + return true; + } + function combineLocations(outputLoc, reduceLoc, axes) { + const rank = outputLoc.length + reduceLoc.length; + const loc = []; + let outIdx = 0; + let reduceIdx = 0; + for (let dim = 0; dim < rank; dim++) { + if (axes.indexOf(dim) === -1) { + loc.push(outputLoc[outIdx++]); + } + else { + loc.push(reduceLoc[reduceIdx++]); + } + } + return loc; + } + function computeOutAndReduceShapes(aShape, axes) { + const outShape = []; + const rank = aShape.length; + for (let dim = 0; dim < rank; dim++) { + if (axes.indexOf(dim) === -1) { + outShape.push(aShape[dim]); + } + } + const reduceShape = axes.map(dim => aShape[dim]); + return [outShape, reduceShape]; + } + function expandShapeToKeepDim(shape, axes) { + const reduceSubShape = axes.map(x => 1); + return combineLocations(shape, reduceSubShape, axes); + } + function assertAxesAreInnerMostDims(msg, axes, rank) { + assert$1(axesAreInnerMostDims(axes, rank), () => `${msg} supports only inner-most axes for now. ` + + `Got axes ${axes} and rank-${rank} input.`); + } + /** + * Returns the axes permutation to be used with `tf.transpose`, if such + * permutation is necessary. Otherwise it returns null. This method is used by + * operations that operate only on inner-most axes. + */ + function getAxesPermutation(axes, rank) { + if (axesAreInnerMostDims(axes, rank)) { + return null; + } + const result = []; + for (let i = 0; i < rank; ++i) { + if (axes.indexOf(i) === -1) { + result.push(i); + } + } + axes.forEach(axis => result.push(axis)); + return result; + } + /** Returns the axes permutation that undoes the original permutation. */ + function getUndoAxesPermutation(axes) { + return axes.map((axis, i) => [i, axis]) + .sort((a, b) => a[1] - b[1]) + .map(x => x[0]); + } + function getInnerMostAxes(numAxes, rank) { + const res = []; + for (let i = rank - numAxes; i < rank; ++i) { + res.push(i); + } + return res; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the maximum of elements across dimensions of a `tf.Tensor`. + * + * Reduces the input along the dimensions given in `axes`. Unless `keepDims` + * is true, the rank of the `tf.Tensor` is reduced by 1 for each entry in + * `axes`. If `keepDims` is true, the reduced dimensions are retained with + * length 1. If `axes` has no entries, all dimensions are reduced, and a + * `tf.Tensor` with a single element is returned. + * + * ```js + * const x = tf.tensor1d([1, 2, 3]); + * + * x.max().print(); // or tf.max(x) + * ``` + * + * ```js + * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]); + * + * const axis = 1; + * x.max(axis).print(); // or tf.max(x, axis) + * ``` + * + * @param x The input tensor. + * @param axis The dimension(s) to reduce. By default it reduces + * all dimensions. + * @param keepDims If true, retains reduced dimensions with size 1. + * + * @doc {heading: 'Operations', subheading: 'Reduction'} + */ + function max_(x, axis = null, keepDims = false) { + const $x = convertToTensor(x, 'x', 'max'); + const inputs = { x: $x }; + const attrs = { reductionIndices: axis, keepDims }; + return ENGINE.runKernel(Max, inputs, attrs); + } + const max$3 = /* @__PURE__ */ op({ max_ }); + + /** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the minimum value from the input. + * + * Reduces the input along the dimensions given in `axes`. Unless `keepDims` + * is true, the rank of the array is reduced by 1 for each entry in `axes`. + * If `keepDims` is true, the reduced dimensions are retained with length 1. + * If `axes` has no entries, all dimensions are reduced, and an array with a + * single element is returned. + * + * ```js + * const x = tf.tensor1d([1, 2, 3]); + * + * x.min().print(); // or tf.min(x) + * ``` + * + * ```js + * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]); + * + * const axis = 1; + * x.min(axis).print(); // or tf.min(x, axis) + * ``` + * + * @param x The input Tensor. + * @param axis The dimension(s) to reduce. By default it reduces + * all dimensions. + * @param keepDims If true, retains reduced dimensions with size 1. + * + * @doc {heading: 'Operations', subheading: 'Reduction'} + */ + function min_(x, axis = null, keepDims = false) { + const $x = convertToTensor(x, 'x', 'min'); + const inputs = { x: $x }; + const attrs = { axis, keepDims }; + // tslint:disable-next-line: no-unnecessary-type-assertion + return ENGINE.runKernel(Min, inputs, attrs); + } + const min$3 = /* @__PURE__ */ op({ min_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the power of one `tf.Tensor` to another. Supports broadcasting. + * + * Given a `tf.Tensor` x and a `tf.Tensor` y, this operation computes x^y for + * corresponding elements in x and y. The result's dtype will be the upcasted + * type of the `base` and `exp` dtypes. + * + * ```js + * const a = tf.tensor([[2, 3], [4, 5]]) + * const b = tf.tensor([[1, 2], [3, 0]]).toInt(); + * + * a.pow(b).print(); // or tf.pow(a, b) + * ``` + * + * ```js + * const a = tf.tensor([[1, 2], [3, 4]]) + * const b = tf.tensor(2).toInt(); + * + * a.pow(b).print(); // or tf.pow(a, b) + * ``` + * We also expose `powStrict` which has the same signature as this op and + * asserts that `base` and `exp` are the same shape (does not broadcast). + * + * @param base The base `tf.Tensor` to pow element-wise. + * @param exp The exponent `tf.Tensor` to pow element-wise. + * + * @doc {heading: 'Operations', subheading: 'Arithmetic'} + */ + function pow_(base, exp) { + let $base = convertToTensor(base, 'base', 'pow'); + let $exp = convertToTensor(exp, 'exp', 'pow'); + [$base, $exp] = makeTypesMatch($base, $exp); + const inputs = { a: $base, b: $exp }; + return ENGINE.runKernel(Pow, inputs); + } + const pow$3 = /* @__PURE__ */ op({ pow_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Creates rank-0 `tf.Tensor` (scalar) with the provided value and dtype. + * + * The same functionality can be achieved with `tf.tensor`, but in general + * we recommend using `tf.scalar` as it makes the code more readable. + * + * ```js + * tf.scalar(3.14).print(); + * ``` + * + * @param value The value of the scalar. + * @param dtype The data type. + * + * @doc {heading: 'Tensors', subheading: 'Creation'} + */ + function scalar(value, dtype) { + if (((isTypedArray(value) && dtype !== 'string') || Array.isArray(value)) && + dtype !== 'complex64') { + throw new Error('Error creating a new Scalar: value must be a primitive ' + + '(number|boolean|string)'); + } + if (dtype === 'string' && isTypedArray(value) && + !(value instanceof Uint8Array)) { + throw new Error('When making a scalar from encoded string, ' + + 'the value must be `Uint8Array`.'); + } + const shape = []; + const inferredShape = []; + return makeTensor(value, shape, inferredShape, dtype); + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes square root of the input `tf.Tensor` element-wise: `y = sqrt(x)` + * + * ```js + * const x = tf.tensor1d([1, 2, 4, -1]); + * + * x.sqrt().print(); // or tf.sqrt(x) + * ``` + * @param x The input tensor. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function sqrt_(x) { + const $x = convertToTensor(x, 'x', 'sqrt', 'float32'); + const inputs = { x: $x }; + return ENGINE.runKernel(Sqrt, inputs); + } + const sqrt$2 = /* @__PURE__ */ op({ sqrt_ }); + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes square of `x` element-wise: `x ^ 2` + * + * ```js + * const x = tf.tensor1d([1, 2, Math.sqrt(2), -1]); + * + * x.square().print(); // or tf.square(x) + * ``` + * @param x The input Tensor. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function square_(x) { + const $x = convertToTensor(x, 'x', 'square'); + const attrs = {}; + return ENGINE.runKernel('Square', { x: $x }, attrs); + } + const square$2 = /* @__PURE__ */ op({ square_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the sum of elements across dimensions of a `tf.Tensor`. + * + * Reduces the input along the dimensions given in `axes`. Unless `keepDims` + * is true, the rank of the `tf.Tensor` is reduced by 1 for each entry in + * `axes`. If `keepDims` is true, the reduced dimensions are retained with + * length 1. If axes has no entries, all dimensions are reduced, and a + * `tf.Tensor` with a single element is returned. + * + * ```js + * const x = tf.tensor1d([1, 2, 3]); + * + * x.sum().print(); // or tf.sum(x) + * ``` + * + * ```js + * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]); + * + * const axis = 1; + * x.sum(axis).print(); // or tf.sum(x, axis) + * ``` + * + * @param x The input tensor to compute the sum over. If the dtype is `bool` + * it will be converted to `int32` and the output dtype will be `int32`. + * @param axis The dimension(s) to reduce. By default it reduces + * all dimensions. + * @param keepDims If true, retains reduced dimensions with size 1. + * + * @doc {heading: 'Operations', subheading: 'Reduction'} + */ + function sum_(x, axis = null, keepDims = false) { + let $x = convertToTensor(x, 'x', 'sum'); + if ($x.dtype === 'bool') { + $x = cast$3($x, 'int32'); + } + const inputs = { x: $x }; + const attrs = { axis, keepDims }; + return ENGINE.runKernel(Sum, inputs, attrs); + } + const sum$3 = /* @__PURE__ */ op({ sum_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the norm of scalar, vectors, and matrices. + * This function can compute several different vector norms (the 1-norm, the + * Euclidean or 2-norm, the inf-norm, and in general the p-norm for p > 0) + * and matrix norms (Frobenius, 1-norm, and inf-norm). + * + * ```js + * const x = tf.tensor1d([1, 2, 3, 4]); + * + * x.norm().print(); // or tf.norm(x) + * ``` + * + * @param x The input array. + * @param ord Optional. Order of the norm. Supported norm types are + * following: + * + * | ord | norm for matrices | norm for vectors + * |------------|---------------------------|--------------------- + * |'euclidean' |Frobenius norm |2-norm + * |'fro' |Frobenius norm | + * |Infinity |max(sum(abs(x), axis=1)) |max(abs(x)) + * |-Infinity |min(sum(abs(x), axis=1)) |min(abs(x)) + * |1 |max(sum(abs(x), axis=0)) |sum(abs(x)) + * |2 | |sum(abs(x)^2)^(1/2) + * + * @param axis Optional. If axis is null (the default), the input is + * considered a vector and a single vector norm is computed over the entire + * set of values in the Tensor, i.e. norm(x, ord) is equivalent + * to norm(x.reshape([-1]), ord). If axis is an integer, the input + * is considered a batch of vectors, and axis determines the axis in x + * over which to compute vector norms. If axis is a 2-tuple of integer it is + * considered a batch of matrices and axis determines the axes in NDArray + * over which to compute a matrix norm. + * @param keepDims Optional. If true, the norm has the same dimensionality + * as the input. + * + * @doc {heading: 'Operations', subheading: 'Matrices'} + */ + function norm_(x, ord = 'euclidean', axis = null, keepDims = false) { + x = convertToTensor(x, 'x', 'norm'); + const norm = normImpl(x, ord, axis); + let keepDimsShape = norm.shape; + if (keepDims) { + const axes = parseAxisParam(axis, x.shape); + keepDimsShape = expandShapeToKeepDim(norm.shape, axes); + } + return reshape$3(norm, keepDimsShape); + } + function normImpl(x, p, axis = null) { + if (x.rank === 0) { + return abs$2(x); + } + // consider vector when no axis is specified + if (x.rank !== 1 && axis === null) { + return normImpl(reshape$3(x, [-1]), p, axis); + } + // vector + if (x.rank === 1 || typeof axis === 'number' || + Array.isArray(axis) && axis.length === 1) { + if (p === 1) { + return sum$3(abs$2(x), axis); + } + if (p === Infinity) { + return max$3(abs$2(x), axis); + } + if (p === -Infinity) { + return min$3(abs$2(x), axis); + } + if (p === 'euclidean' || p === 2) { + // norm(x, 2) = sum(abs(xi) ^ 2) ^ 1/2 + return sqrt$2(sum$3(pow$3(abs$2(x), scalar(2, 'int32')), axis)); + } + throw new Error(`Error in norm: invalid ord value: ${p}`); + } + // matrix (assumption axis[0] < axis[1]) + if (Array.isArray(axis) && axis.length === 2) { + if (p === 1) { + return max$3(sum$3(abs$2(x), axis[0]), axis[1] - 1); + } + if (p === Infinity) { + return max$3(sum$3(abs$2(x), axis[1]), axis[0]); + } + if (p === -Infinity) { + return min$3(sum$3(abs$2(x), axis[1]), axis[0]); + } + if (p === 'fro' || p === 'euclidean') { + // norm(x) = sqrt(sum(pow(x, 2))) + return sqrt$2(sum$3(square$2(x), axis)); + } + throw new Error(`Error in norm: invalid ord value: ${p}`); + } + throw new Error(`Error in norm: invalid axis: ${axis}`); + } + const norm = /* @__PURE__ */ op({ norm_ }); + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the Euclidean norm of scalar, vectors, and matrices. + * + * ```js + * const x = tf.tensor1d([1, 2, 3, 4]); + * + * x.euclideanNorm().print(); // or tf.euclideanNorm(x) + * ``` + * + * @param x The input array. + * @param axis Optional. If axis is null (the default), the input is + * considered a vector and a single vector norm is computed over the entire + * set of values in the Tensor, i.e. euclideanNorm(x) is equivalent + * to euclideanNorm(x.reshape([-1])). If axis is an integer, the input + * is considered a batch of vectors, and axis determines the axis in x + * over which to compute vector norms. If axis is a 2-tuple of integer it is + * considered a batch of matrices and axis determines the axes in NDArray + * over which to compute a matrix norm. + * @param keepDims Optional. If true, the norm has the same dimensionality + * as the input. + * + * @doc {heading: 'Operations', subheading: 'Matrices'} + */ + function euclideanNorm_(x, axis = null, keepDims = false) { + return norm(x, 'euclidean', axis, keepDims); + } + const euclideanNorm = /* @__PURE__ */ op({ euclideanNorm_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes exponential of the input `tf.Tensor` element-wise. `e ^ x` + * + * ```js + * const x = tf.tensor1d([1, 2, -3]); + * + * x.exp().print(); // or tf.exp(x) + * ``` + * @param x The input tensor. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function exp_(x) { + const $x = convertToTensor(x, 'x', 'exp'); + const inputs = { x: $x }; + return ENGINE.runKernel(Exp, inputs); + } + const exp$2 = /* @__PURE__ */ op({ exp_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Returns a `tf.Tensor` that has expanded rank, by inserting a dimension + * into the tensor's shape. + * + * ```js + * const x = tf.tensor1d([1, 2, 3, 4]); + * const axis = 1; + * x.expandDims(axis).print(); + * ``` + * + * @param x The input tensor whose dimensions are to be expanded. + * @param axis The dimension index at which to insert shape of `1`. Defaults + * to 0 (the first dimension). + * + * @doc {heading: 'Tensors', subheading: 'Transformations'} + */ + function expandDims_(x, axis = 0) { + const $x = convertToTensor(x, 'x', 'expandDims', 'string_or_numeric'); + assert$1(axis <= $x.rank, () => 'Axis must be <= rank of the tensor'); + const inputs = { input: $x }; + const attrs = { dim: axis }; + return ENGINE.runKernel(ExpandDims, inputs, attrs); + } + const expandDims$3 = /* @__PURE__ */ op({ expandDims_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes exponential of the input `tf.Tensor` minus one element-wise. + * `e ^ x - 1` + * + * ```js + * const x = tf.tensor1d([1, 2, -3]); + * + * x.expm1().print(); // or tf.expm1(x) + * ``` + * @param x The input tensor. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function expm1_(x) { + const $x = convertToTensor(x, 'x', 'expm1'); + const inputs = { x: $x }; + return ENGINE.runKernel(Expm1, inputs); + } + const expm1$2 = /* @__PURE__ */ op({ expm1_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Construct a tensor by repeating it the number of times given by reps. + * + * This operation creates a new tensor by replicating `input` `reps` + * times. The output tensor's `i`th dimension has `input.shape[i] * + * reps[i]` elements, and the values of `input` are replicated + * `reps[i]` times along the `i`th dimension. For example, tiling + * `[a, b, c, d]` by `[2]` produces `[a, b, c, d, a, b, c, d]`. + * + * ```js + * const a = tf.tensor1d([1, 2]); + * + * a.tile([2]).print(); // or tf.tile(a, [2]) + * ``` + * + * ```js + * const a = tf.tensor2d([1, 2, 3, 4], [2, 2]); + * + * a.tile([1, 2]).print(); // or tf.tile(a, [1,2]) + * ``` + * @param x The tensor to tile. + * @param reps Determines the number of replications per dimension. + * + * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'} + */ + function tile_(x, reps) { + const $x = convertToTensor(x, 'x', 'tile', 'string_or_numeric'); + assert$1($x.rank === reps.length, () => `Error in transpose: rank of input ${$x.rank} ` + + `must match length of reps ${reps}.`); + const inputs = { x: $x }; + const attrs = { reps }; + return ENGINE.runKernel(Tile, inputs, attrs); + } + const tile$3 = /* @__PURE__ */ op({ tile_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Create an identity matrix. + * + * @param numRows Number of rows. + * @param numColumns Number of columns. Defaults to `numRows`. + * @param batchShape If provided, will add the batch shape to the beginning + * of the shape of the returned `tf.Tensor` by repeating the identity + * matrix. + * @param dtype Data type. + * @returns Identity matrix of the specified size and data type, possibly + * with batch repetition if `batchShape` is specified. + * + * @doc {heading: 'Tensors', subheading: 'Creation'} + */ + function eye_(numRows, numColumns, batchShape, dtype = 'float32') { + if (numColumns == null) { + numColumns = numRows; + } + const buff = buffer([numRows, numColumns], dtype); + const n = numRows <= numColumns ? numRows : numColumns; + for (let i = 0; i < n; ++i) { + buff.set(1, i, i); + } + const out = reshape$3(buff.toTensor(), [numRows, numColumns]); + if (batchShape == null) { + return out; + } + else { + if (batchShape.length === 1) { + return tile$3(expandDims$3(out, 0), [batchShape[0], 1, 1]); + } + else if (batchShape.length === 2) { + // tslint:disable-next-line:no-unnecessary-type-assertion + return tile$3(expandDims$3(expandDims$3(out, 0), 0), [batchShape[0], batchShape[1], 1, 1]); + } + else if (batchShape.length === 3) { + // tslint:disable-next-line:no-unnecessary-type-assertion + return tile$3(expandDims$3(expandDims$3(expandDims$3(out, 0), 0), 0), [ + batchShape[0], batchShape[1], batchShape[2], 1, 1 + ]); + } + else { + throw new Error(`eye() currently supports only 1D and 2D ` + + // tslint:disable-next-line:no-any + `batchShapes, but received ${batchShape.length}D.`); + } + } + } + const eye = /* @__PURE__ */ op({ eye_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes floor of input `tf.Tensor` element-wise: `floor(x)`. + * + * ```js + * const x = tf.tensor1d([.6, 1.1, -3.3]); + * + * x.floor().print(); // or tf.floor(x) + * ``` + * @param x The input tensor. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function floor_(x) { + const $x = convertToTensor(x, 'x', 'floor', 'float32'); + const inputs = { x: $x }; + return ENGINE.runKernel(Floor, inputs); + } + const floor$2 = /* @__PURE__ */ op({ floor_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Gather slices from tensor `x`'s axis `axis` according to `indices`. + * + * ```js + * const x = tf.tensor1d([1, 2, 3, 4]); + * const indices = tf.tensor1d([1, 3, 3], 'int32'); + * + * x.gather(indices).print(); + * ``` + * + * ```js + * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]); + * const indices = tf.tensor1d([1, 1, 0], 'int32'); + * + * x.gather(indices).print(); + * ``` + * @param x The input tensor whose slices are to be gathered. + * @param indices The indices of the values to extract. + * @param axis The axis over which to select values. Defaults to 0. + * @param batchDims Optional. The number of batch dimensions. It must be less + * than or equal to rank(indices). Defaults to 0. + * The output tensor will have shape of + * `x.shape[:axis] + indices.shape[batchDims:] + x.shape[axis + 1:]` + * + * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'} + */ + function gather_(x, indices, axis = 0, batchDims = 0) { + const $x = convertToTensor(x, 'x', 'gather'); + const $indices = convertToTensor(indices, 'indices', 'gather', 'int32'); + const inputs = { x: $x, indices: $indices }; + const attrs = { axis, batchDims }; + return ENGINE.runKernel(GatherV2, inputs, attrs); + } + const gather$1 = /* @__PURE__ */ op({ gather_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Returns the truth value of (a > b) element-wise. Supports broadcasting. + * + * ```js + * const a = tf.tensor1d([1, 2, 3]); + * const b = tf.tensor1d([2, 2, 2]); + * + * a.greater(b).print(); + * ``` + * + * @param a The first input tensor. + * @param b The second input tensor. Must have the same dtype as `a`. + * + * @doc {heading: 'Operations', subheading: 'Logical'} + */ + function greater_(a, b) { + let $a = convertToTensor(a, 'a', 'greater', 'string_or_numeric'); + let $b = convertToTensor(b, 'b', 'greater', 'string_or_numeric'); + [$a, $b] = makeTypesMatch($a, $b); + assertAndGetBroadcastShape($a.shape, $b.shape); + const inputs = { a: $a, b: $b }; + return ENGINE.runKernel(Greater, inputs); + } + const greater$3 = /* @__PURE__ */ op({ greater_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Returns the truth value of (a >= b) element-wise. Supports broadcasting. + * + * ```js + * const a = tf.tensor1d([1, 2, 3]); + * const b = tf.tensor1d([2, 2, 2]); + * + * a.greaterEqual(b).print(); + * ``` + * + * @param a The first input tensor. + * @param b The second input tensor. Must have the same dtype as `a`. + * + * @doc {heading: 'Operations', subheading: 'Logical'} + */ + function greaterEqual_(a, b) { + let $a = convertToTensor(a, 'a', 'greaterEqual', 'string_or_numeric'); + let $b = convertToTensor(b, 'b', 'greaterEqual', 'string_or_numeric'); + [$a, $b] = makeTypesMatch($a, $b); + assertAndGetBroadcastShape($a.shape, $b.shape); + const inputs = { a: $a, b: $b }; + return ENGINE.runKernel(GreaterEqual, inputs); + } + const greaterEqual$2 = /* @__PURE__ */ op({ greaterEqual_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Returns the imaginary part of a complex (or real) tensor. + * + * Given a tensor input, this operation returns a tensor of type float that is + * the imaginary part of each element in input considered as a complex number. + * If input is real, a tensor of all zeros is returned. + * + * ```js + * const x = tf.complex([-2.25, 3.25], [4.75, 5.75]); + * tf.imag(x).print(); + * ``` + * + * @doc {heading: 'Tensors', subheading: 'Creation'} + */ + function imag_(input) { + const $input = convertToTensor(input, 'input', 'imag'); + const inputs = { input: $input }; + return ENGINE.runKernel(Imag, inputs); + } + const imag$2 = /* @__PURE__ */ op({ imag_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Returns which elements of x are finite. + * + * ```js + * const x = tf.tensor1d([NaN, Infinity, -Infinity, 0, 1]); + * + * x.isFinite().print(); // or tf.isNaN(x) + * ``` + * @param x The input Tensor. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function isFinite_(x) { + const $x = convertToTensor(x, 'x', 'isFinite'); + const inputs = { x: $x }; + return ENGINE.runKernel(IsFinite, inputs); + } + const isFinite$3 = /* @__PURE__ */ op({ isFinite_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Returns which elements of x are Infinity or -Infinity. + * + * ```js + * const x = tf.tensor1d([NaN, Infinity, -Infinity, 0, 1]); + * + * x.isInf().print(); // or tf.isNaN(x) + * ``` + * @param x The input Tensor. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function isInf_(x) { + const $x = convertToTensor(x, 'x', 'isInf'); + const inputs = { x: $x }; + return ENGINE.runKernel(IsInf, inputs); + } + const isInf$2 = /* @__PURE__ */ op({ isInf_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Returns which elements of x are NaN. + * + * ```js + * const x = tf.tensor1d([NaN, Infinity, -Infinity, 0, 1]); + * + * x.isNaN().print(); // or tf.isNaN(x) + * ``` + * @param x The input Tensor. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function isNaN_(x) { + const $x = convertToTensor(x, 'x', 'isNaN'); + const inputs = { x: $x }; + return ENGINE.runKernel(IsNan, inputs); + } + const isNaN$3 = /* @__PURE__ */ op({ isNaN_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes leaky rectified linear element-wise. + * + * See + * [http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf]( + * http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf) + * + * ```js + * const x = tf.tensor1d([-1, 2, -3, 4]); + * + * x.leakyRelu(0.1).print(); // or tf.leakyRelu(x, 0.1) + * ``` + * @param x The input tensor. + * @param alpha The scaling factor for negative values, defaults to 0.2. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function leakyRelu_(x, alpha = 0.2) { + const $x = convertToTensor(x, 'x', 'leakyRelu'); + const inputs = { x: $x }; + const attrs = { alpha }; + return ENGINE.runKernel(LeakyRelu, inputs, attrs); + } + const leakyRelu$2 = /* @__PURE__ */ op({ leakyRelu_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Returns the truth value of (a < b) element-wise. Supports broadcasting. + * + * ```js + * const a = tf.tensor1d([1, 2, 3]); + * const b = tf.tensor1d([2, 2, 2]); + * + * a.less(b).print(); + * ``` + * @param a The first input tensor. + * @param b The second input tensor. Must have the same dtype as `a`. + * + * @doc {heading: 'Operations', subheading: 'Logical'} + */ + function less_(a, b) { + let $a = convertToTensor(a, 'a', 'less', 'string_or_numeric'); + let $b = convertToTensor(b, 'b', 'less', 'string_or_numeric'); + [$a, $b] = makeTypesMatch($a, $b); + assertAndGetBroadcastShape($a.shape, $b.shape); + const inputs = { a: $a, b: $b }; + return ENGINE.runKernel(Less, inputs); + } + const less$3 = /* @__PURE__ */ op({ less_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Returns the truth value of (a <= b) element-wise. Supports broadcasting. + * + * ```js + * const a = tf.tensor1d([1, 2, 3]); + * const b = tf.tensor1d([2, 2, 2]); + * + * a.lessEqual(b).print(); + * ``` + * + * @param a The first input tensor. + * @param b The second input tensor. Must have the same dtype as `a`. + * + * @doc {heading: 'Operations', subheading: 'Logical'} + */ + function lessEqual_(a, b) { + let $a = convertToTensor(a, 'a', 'lessEqual', 'string_or_numeric'); + let $b = convertToTensor(b, 'b', 'lessEqual', 'string_or_numeric'); + [$a, $b] = makeTypesMatch($a, $b); + assertAndGetBroadcastShape($a.shape, $b.shape); + const inputs = { a: $a, b: $b }; + return ENGINE.runKernel(LessEqual, inputs); + } + const lessEqual$2 = /* @__PURE__ */ op({ lessEqual_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Return an evenly spaced sequence of numbers over the given interval. + * + * ```js + * tf.linspace(0, 9, 10).print(); + * ``` + * @param start The start value of the sequence. + * @param stop The end value of the sequence. + * @param num The number of values to generate. + * + * @doc {heading: 'Tensors', subheading: 'Creation'} + */ + function linspace(start, stop, num) { + if (num <= 0) { + throw new Error('The number of values should be positive.'); + } + const attrs = { start, stop, num }; + return ENGINE.runKernel(LinSpace, {}, attrs); + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Normalizes the activation of a local neighborhood across or within + * channels. + * + * @param x The input tensor. The 4-D input tensor is treated as a 3-D array + * of 1D vectors (along the last dimension), and each vector is + * normalized independently. + * @param depthRadius The number of adjacent channels in the 1D normalization + * window. + * @param bias A constant bias term for the basis. + * @param alpha A scale factor, usually positive. + * @param beta An exponent. + * + * @doc {heading: 'Operations', subheading: 'Normalization'} + */ + function localResponseNormalization_(x, depthRadius = 5, bias = 1, alpha = 1, beta = 0.5) { + const $x = convertToTensor(x, 'x', 'localResponseNormalization'); + assert$1($x.rank === 4 || $x.rank === 3, () => `Error in localResponseNormalization: x must be rank 3 or 4 but got + rank ${$x.rank}.`); + assert$1(isInt(depthRadius), () => `Error in localResponseNormalization: depthRadius must be an ` + + `integer but got depthRadius ${depthRadius}.`); + let x4D = $x; + let reshapedTo4D = false; + if ($x.rank === 3) { + reshapedTo4D = true; + x4D = reshape$3($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]); + } + const inputs = { x: x4D }; + const attrs = { depthRadius, bias, alpha, beta }; + // tslint:disable-next-line: no-unnecessary-type-assertion + const res = ENGINE.runKernel(LRN, inputs, attrs); + if (reshapedTo4D) { + return reshape$3(res, [res.shape[1], res.shape[2], res.shape[3]]); + } + else { + return res; + } + } + const localResponseNormalization = /* @__PURE__ */ op({ localResponseNormalization_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes natural logarithm of the input `tf.Tensor` element-wise: `ln(x)` + * + * ```js + * const x = tf.tensor1d([1, 2, Math.E]); + * + * x.log().print(); // or tf.log(x) + * ``` + * @param x The input tensor. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function log_(x) { + const $x = convertToTensor(x, 'x', 'log', 'float32'); + const inputs = { x: $x }; + return ENGINE.runKernel(Log, inputs); + } + const log$2 = /* @__PURE__ */ op({ log_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes natural logarithm of the input `tf.Tensor` plus one + * element-wise: `ln(1 + x)` + * + * ```js + * const x = tf.tensor1d([1, 2, Math.E - 1]); + * + * x.log1p().print(); // or tf.log1p(x) + * ``` + * @param x The input tensor. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function log1p_(x) { + const $x = convertToTensor(x, 'x', 'log1p'); + const inputs = { x: $x }; + return ENGINE.runKernel(Log1p, inputs); + } + const log1p$2 = /* @__PURE__ */ op({ log1p_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Provided `f(x)`, returns another function `g(x, dy?)`, which gives the + * gradient of `f(x)` with respect to `x`. + * + * If `dy` is provided, the gradient of `f(x).mul(dy).sum()` with respect to + * `x` is computed instead. `f(x)` must take a single tensor `x` and return a + * single tensor `y`. If `f()` takes multiple inputs, use `tf.grads` instead. + * + * ```js + * // f(x) = x ^ 2 + * const f = x => x.square(); + * // f'(x) = 2x + * const g = tf.grad(f); + * + * const x = tf.tensor1d([2, 3]); + * g(x).print(); + * ``` + * + * ```js + * // f(x) = x ^ 3 + * const f = x => x.pow(tf.scalar(3, 'int32')); + * // f'(x) = 3x ^ 2 + * const g = tf.grad(f); + * // f''(x) = 6x + * const gg = tf.grad(g); + * + * const x = tf.tensor1d([2, 3]); + * gg(x).print(); + * ``` + * + * @param f The function f(x), to compute gradient for. + * + * @doc {heading: 'Training', subheading: 'Gradients'} + */ + function grad(f) { + assert$1(isFunction(f), () => 'The f passed in grad(f) must be a function'); + return (x, dy) => { + // x can be of any dtype, thus null as the last argument. + const $x = convertToTensor(x, 'x', 'tf.grad', 'string_or_numeric'); + const $dy = (dy != null) ? convertToTensor(dy, 'dy', 'tf.grad') : null; + return ENGINE.tidy(() => { + const { value, grads } = ENGINE.gradients(() => f($x), [$x], $dy); + if ($dy != null) { + assertShapesMatch(value.shape, $dy.shape, 'The shape of dy passed in grad(f)(x, dy) must match the shape ' + + 'returned by f(x)'); + } + checkGrads(grads); + return grads[0]; + }); + }; + } + /** + * Provided `f(x1, x2,...)`, returns another function `g([x1, x2,...], dy?)`, + * which gives an array of gradients of `f()` with respect to each input + * [`x1`,`x2`,...]. + * + * If `dy` is passed when calling `g()`, the gradient of + * `f(x1,...).mul(dy).sum()` with respect to each input is computed instead. + * The provided `f` must take one or more tensors and return a single tensor + * `y`. If `f()` takes a single input, we recommend using `tf.grad` instead. + * + * ```js + * // f(a, b) = a * b + * const f = (a, b) => a.mul(b); + * // df / da = b, df / db = a + * const g = tf.grads(f); + * + * const a = tf.tensor1d([2, 3]); + * const b = tf.tensor1d([-2, -3]); + * const [da, db] = g([a, b]); + * console.log('da'); + * da.print(); + * console.log('db'); + * db.print(); + * ``` + * + * @param f The function `f(x1, x2,...)` to compute gradients for. + * + * @doc {heading: 'Training', subheading: 'Gradients'} + */ + function grads(f) { + assert$1(isFunction(f), () => 'The f passed in grads(f) must be a function'); + return (args, dy) => { + assert$1(Array.isArray(args), () => 'The args passed in grads(f)(args) must be an array ' + + 'of `Tensor`s or `TensorLike`s'); + // args can be of any dtype, thus null as the last argument. + const $args = convertToTensorArray(args, 'args', 'tf.grads', 'string_or_numeric'); + const $dy = (dy != null) ? convertToTensor(dy, 'dy', 'tf.grads') : null; + return ENGINE.tidy(() => { + const { value, grads } = ENGINE.gradients(() => f(...$args), $args, $dy); + if ($dy != null) { + assertShapesMatch(value.shape, $dy.shape, 'The shape of dy passed in grads(f)([x1,...], dy) must ' + + 'match the shape returned by f([x1,...])'); + } + checkGrads(grads); + return grads; + }); + }; + } + /** + * Like `tf.grad`, but also returns the value of `f()`. Useful when `f()` + * returns a metric you want to show. + * + * The result is a rich object with the following properties: + * - grad: The gradient of `f(x)` w.r.t. `x` (result of `tf.grad`). + * - value: The value returned by `f(x)`. + * + * ```js + * // f(x) = x ^ 2 + * const f = x => x.square(); + * // f'(x) = 2x + * const g = tf.valueAndGrad(f); + * + * const x = tf.tensor1d([2, 3]); + * const {value, grad} = g(x); + * + * console.log('value'); + * value.print(); + * console.log('grad'); + * grad.print(); + * ``` + * + * @doc {heading: 'Training', subheading: 'Gradients'} + */ + function valueAndGrad(f) { + assert$1(isFunction(f), () => 'The f passed in valueAndGrad(f) must be a function'); + return (x, dy) => { + assert$1(x instanceof Tensor, () => 'The x passed in valueAndGrad(f)(x) must be a tensor'); + assert$1(dy == null || dy instanceof Tensor, () => 'The dy passed in valueAndGrad(f)(x, dy) must be a tensor'); + const { grads, value } = ENGINE.gradients(() => f(x), [x], dy); + checkGrads(grads); + return { grad: grads[0], value }; + }; + } + /** + * Like `tf.grads`, but returns also the value of `f()`. Useful when `f()` + * returns a metric you want to show. + * + * The result is a rich object with the following properties: + * - grads: The gradients of `f()` w.r.t. each input (result of `tf.grads`). + * - value: The value returned by `f(x)`. + * + * ```js + * // f(a, b) = a * b + * const f = (a, b) => a.mul(b); + * // df/da = b, df/db = a + * const g = tf.valueAndGrads(f); + * + * const a = tf.tensor1d([2, 3]); + * const b = tf.tensor1d([-2, -3]); + * const {value, grads} = g([a, b]); + * + * const [da, db] = grads; + * + * console.log('value'); + * value.print(); + * + * console.log('da'); + * da.print(); + * console.log('db'); + * db.print(); + * ``` + * + * @doc {heading: 'Training', subheading: 'Gradients'} + */ + function valueAndGrads(f) { + assert$1(isFunction(f), () => 'The f passed in valueAndGrads(f) must be a function'); + return (args, dy) => { + assert$1(Array.isArray(args) && args.every(arg => arg instanceof Tensor), () => 'The args passed in valueAndGrads(f)(args) must be array of ' + + 'tensors'); + assert$1(dy == null || dy instanceof Tensor, () => 'The dy passed in valueAndGrads(f)(args, dy) must be a tensor'); + const res = ENGINE.gradients(() => f(...args), args, dy); + if (dy != null) { + assertShapesMatch(res.value.shape, dy.shape, 'The shape of dy passed in valueAndGrads(f)([x1,...], dy) must ' + + 'match the shape returned by f([x1,...])'); + } + checkGrads(res.grads); + return res; + }; + } + /** + * Computes and returns the gradient of f(x) with respect to the list of + * trainable variables provided by `varList`. If no list is provided, it + * defaults to all trainable variables. + * + * ```js + * const a = tf.variable(tf.tensor1d([3, 4])); + * const b = tf.variable(tf.tensor1d([5, 6])); + * const x = tf.tensor1d([1, 2]); + * + * // f(a, b) = a * x ^ 2 + b * x + * const f = () => a.mul(x.square()).add(b.mul(x)).sum(); + * // df/da = x ^ 2, df/db = x + * const {value, grads} = tf.variableGrads(f); + * + * Object.keys(grads).forEach(varName => grads[varName].print()); + * ``` + * + * @param f The function to execute. f() should return a scalar. + * @param varList The list of variables to compute the gradients with respect + * to. Defaults to all trainable variables. + * @returns An object with the following keys and values: + * - `value`: The value of the function `f`. + * - `grads`: A map from the names of the variables to the gradients. + * If the `varList` argument is provided explicitly and contains a subset of + * non-trainable variables, this map in the return value will contain keys + * that map the names of the non-trainable variables to `null`. + * + * @doc {heading: 'Training', subheading: 'Gradients'} + */ + function variableGrads(f, varList) { + assert$1(isFunction(f), () => 'The f passed in variableGrads(f) must be a function'); + assert$1(varList == null || + Array.isArray(varList) && varList.every(v => v instanceof Variable), () => 'The varList passed in variableGrads(f, varList) must be an array ' + + 'of variables'); + const specifiedVarList = varList != null; + if (!specifiedVarList) { + // Get all of the trainable variables. + varList = []; + for (const varName in ENGINE.registeredVariables) { + varList.push(ENGINE.registeredVariables[varName]); + } + } + const specifiedNonTrainable = specifiedVarList ? varList.filter(variable => !variable.trainable) : null; + // Prune non-trainable variables. + const originalVarCount = varList.length; + varList = varList.filter(variable => variable.trainable); + assert$1(varList.length > 0, () => `variableGrads() expects at least one of the input variables to ` + + `be trainable, but none of the ${originalVarCount} variables is ` + + `trainable.`); + const allowNoGradients = true; + const { value, grads } = ENGINE.gradients(f, varList, null, allowNoGradients); + assert$1(grads.some(g => g != null), () => 'Cannot find a connection between any variable and the result of ' + + 'the loss function y=f(x). Please make sure the operations that ' + + 'use variables are inside the function f passed to minimize().'); + assert$1(value.rank === 0, () => `The f passed in variableGrads(f) must return a scalar, but it ` + + `returned a rank-${value.rank} tensor`); + const namedGrads = {}; + varList.forEach((v, i) => { + if (grads[i] != null) { + namedGrads[v.name] = grads[i]; + } + }); + if (specifiedNonTrainable != null) { + // If varList is explicitly provided and contains non-trainable values, + // add them to the returned gradients with `null` values. + specifiedNonTrainable.forEach(v => namedGrads[v.name] = null); + } + return { value, grads: namedGrads }; + } + /** + * Overrides the gradient computation of a function `f`. + * + * Takes a function + * `f(...inputs, save) => {value: Tensor, gradFunc: (dy, saved) => Tensor[]}` + * and returns another function `g(...inputs)` which takes the same inputs as + * `f`. When called, `g` returns `f().value`. In backward mode, custom gradients + * with respect to each input of `f` are computed using `f().gradFunc`. + * + * The `save` function passed to `f` should be used for saving tensors needed + * in the gradient. And the `saved` passed to the `gradFunc` is a + * `NamedTensorMap`, which contains those saved tensors. + * + * ```js + * const customOp = tf.customGrad((x, save) => { + * // Save x to make sure it's available later for the gradient. + * save([x]); + * // Override gradient of our custom x ^ 2 op to be dy * abs(x); + * return { + * value: x.square(), + * // Note `saved.x` which points to the `x` we saved earlier. + * gradFunc: (dy, saved) => [dy.mul(saved[0].abs())] + * }; + * }); + * + * const x = tf.tensor1d([-1, -2, 3]); + * const dx = tf.grad(x => customOp(x)); + * + * console.log(`f(x):`); + * customOp(x).print(); + * console.log(`f'(x):`); + * dx(x).print(); + * ``` + * + * @param f The function to evaluate in forward mode, which should return + * `{value: Tensor, gradFunc: (dy, saved) => Tensor[]}`, where `gradFunc` + * returns the custom gradients of `f` with respect to its inputs. + * + * @doc {heading: 'Training', subheading: 'Gradients'} + */ + function customGrad(f) { + return ENGINE.customGrad(f); + } + function checkGrads(grads) { + const numNullGradients = grads.filter(g => g == null).length; + if (numNullGradients > 0) { + throw new Error(`Cannot compute gradient of y=f(x) with respect to x. Make sure that + the f you passed encloses all operations that lead from x to y.`); + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes `-1 * x` element-wise. + * + * ```js + * const x = tf.tensor2d([1, 2, -2, 0], [2, 2]); + * + * x.neg().print(); // or tf.neg(x) + * ``` + * + * @param x The input tensor. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function neg_(x) { + const $x = convertToTensor(x, 'x', 'neg'); + const inputs = { x: $x }; + return ENGINE.runKernel(Neg, inputs); + } + const neg$2 = /* @__PURE__ */ op({ neg_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes softplus of the input `tf.Tensor` element-wise: `log(exp(x) + 1)` + * + * ```js + * const x = tf.tensor1d([0, 1, -1, .7]); + * + * x.softplus().print(); // or tf.softplus(x) + * ``` + * @param x The input tensor. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function softplus_(x) { + const $x = convertToTensor(x, 'x', 'softplus'); + const inputs = { x: $x }; + return ENGINE.runKernel(Softplus$1, inputs); + } + const softplus$2 = /* @__PURE__ */ op({ softplus_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes log sigmoid of the input `tf.Tensor` element-wise: + * `logSigmoid(x)`. For numerical stability, we use `-tf.softplus(-x)`. + * + * ```js + * const x = tf.tensor1d([0, 1, -1, .7]); + * + * x.logSigmoid().print(); // or tf.logSigmoid(x) + * ``` + * @param x The input tensor. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function logSigmoid_(x) { + const $x = convertToTensor(x, 'x', 'logSigmoid'); + // Use a custom gradient to maintain previous implementation. + // There is no LogSigmoid kernel in TF so we can't use engine.runKernel + // directly + const customOp = customGrad((x) => { + // TODO(yassogba) we can remove the chained softplus call here only + // after backends have modualrized softplus at which point we can call + // engine runKernel(..., Sotfplus, ...) directly. + const value = neg$2(softplus$2(neg$2(x))); + const gradFunc = (dy) => { + const derX = mul(dy, sigmoid$2(neg$2(x))); + return derX; + }; + return { value, gradFunc }; + }); + return customOp($x); + } + const logSigmoid = /* @__PURE__ */ op({ logSigmoid_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Subtracts two `tf.Tensor`s element-wise, A - B. Supports broadcasting. + * + * ```js + * const a = tf.tensor1d([10, 20, 30, 40]); + * const b = tf.tensor1d([1, 2, 3, 4]); + * + * a.sub(b).print(); // or tf.sub(a, b) + * ``` + * + * ```js + * // Broadcast subtract a with b. + * const a = tf.tensor1d([10, 20, 30, 40]); + * const b = tf.scalar(5); + * + * a.sub(b).print(); // or tf.sub(a, b) + * ``` + * @param a The first `tf.Tensor` to subtract from. + * @param b The second `tf.Tensor` to be subtracted. Must have the same dtype as + * `a`. + * + * @doc {heading: 'Operations', subheading: 'Arithmetic'} + */ + function sub_(a, b) { + let $a = convertToTensor(a, 'a', 'sub'); + let $b = convertToTensor(b, 'b', 'sub'); + [$a, $b] = makeTypesMatch($a, $b); + const inputs = { a: $a, b: $b }; + return ENGINE.runKernel(Sub, inputs); + } + const sub$2 = /* @__PURE__ */ op({ sub_ }); + + /** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the log softmax. + * + * ```js + * const a = tf.tensor1d([1, 2, 3]); + * + * a.logSoftmax().print(); // or tf.logSoftmax(a) + * ``` + * + * ```js + * const a = tf.tensor2d([2, 4, 6, 1, 2, 3], [2, 3]); + * + * a.logSoftmax().print(); // or tf.logSoftmax(a) + * ``` + * + * @param logits The logits array. + * @param axis The dimension softmax would be performed on. Defaults to `-1` + * which indicates the last dimension. + * + * @doc {heading: 'Operations', subheading: 'Normalization'} + */ + function logSoftmax_(logits, axis = -1) { + const $logits = convertToTensor(logits, 'logits', 'logSoftmax'); + if (axis === -1) { + axis = $logits.rank - 1; + } + if (axis !== $logits.rank - 1) { + throw Error('Log Softmax along a non-last dimension is not yet supported. ' + + `Logits was rank ${$logits.rank} and axis was ${axis}`); + } + // const forward: ForwardFunc = (backend, save) => { + // const keepDims = true; + // const xMax = max(logits, axis, true); + // const shifted = sub(logits, xMax); + // const value = + // sub(cast(shifted, 'float32'), log(sum(exp(shifted), axis, + // keepDims))); + // save([value]); + // return value; + // }; + // Use a custom gradient for numerical stability. + const customOp = customGrad((logits, save) => { + const keepDims = true; + const xMax = max$3(logits, axis, true); + const shifted = sub$2(logits, xMax); + const value = sub$2(cast$3(shifted, 'float32'), log$2(sum$3(exp$2(shifted), axis, keepDims))); + save([value]); + const gradFunc = (dy, saved) => { + const [value] = saved; + const keepDims = true; + const softmax = exp$2(value); + return sub$2(dy, mul(sum$3(dy, axis, keepDims), softmax)); + }; + return { value, gradFunc }; + }); + return customOp($logits); + // TODO Use Engine.runKernel when CPU/WebGL/WASM backends implement this. + // const inputs: LogSoftmaxInputs = {logits: $logits}; + // const attrs: LogSoftmaxAttrs = {axis}; + // return ENGINE.runKernel( + // LogSoftmax, inputs as unknown as NamedTensorMap, + // attrs as unknown as NamedAttrMap); + } + const logSoftmax = /* @__PURE__ */ op({ logSoftmax_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the log(sum(exp(elements across the reduction dimensions))). + * + * Reduces the input along the dimensions given in `axis`. Unless `keepDims` + * is true, the rank of the array is reduced by 1 for each entry in `axis`. + * If `keepDims` is true, the reduced dimensions are retained with length 1. + * If `axis` has no entries, all dimensions are reduced, and an array with a + * single element is returned. + * + * ```js + * const x = tf.tensor1d([1, 2, 3]); + * + * x.logSumExp().print(); // or tf.logSumExp(x) + * ``` + * + * ```js + * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]); + * + * const axis = 1; + * x.logSumExp(axis).print(); // or tf.logSumExp(a, axis) + * ``` + * @param x The input tensor. + * @param axis The dimension(s) to reduce. If null (the default), + * reduces all dimensions. + * @param keepDims If true, retains reduced dimensions with length + * of 1. Defaults to false. + * + * @doc {heading: 'Operations', subheading: 'Reduction'} + */ + function logSumExp_(x, axis = null, keepDims = false) { + const $x = convertToTensor(x, 'x', 'logSumExp'); + const axes = parseAxisParam(axis, $x.shape); + const xMax = max$3($x, axes, true /* keepDims */); + const a = sub$2($x, xMax); + const b = exp$2(a); + const c = sum$3(b, axes); + const d = log$2(c); + const res = add$3(reshape$3(xMax, d.shape), d); + if (keepDims) { + const newShape = expandShapeToKeepDim(res.shape, axes); + return reshape$3(res, newShape); + } + return res; + } + const logSumExp = /* @__PURE__ */ op({ logSumExp_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Returns the truth value of `a AND b` element-wise. Supports broadcasting. + * + * ```js + * const a = tf.tensor1d([false, false, true, true], 'bool'); + * const b = tf.tensor1d([false, true, false, true], 'bool'); + * + * a.logicalAnd(b).print(); + * ``` + * + * @param a The first input tensor. Must be of dtype bool. + * @param b The second input tensor. Must be of dtype bool. + * + * @doc {heading: 'Operations', subheading: 'Logical'} + */ + function logicalAnd_(a, b) { + const $a = convertToTensor(a, 'a', 'logicalAnd', 'bool'); + const $b = convertToTensor(b, 'b', 'logicalAnd', 'bool'); + assertAndGetBroadcastShape($a.shape, $b.shape); + const inputs = { a: $a, b: $b }; + return ENGINE.runKernel(LogicalAnd, inputs); + } + const logicalAnd$2 = /* @__PURE__ */ op({ logicalAnd_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Returns the truth value of `NOT x` element-wise. + * + * ```js + * const a = tf.tensor1d([false, true], 'bool'); + * + * a.logicalNot().print(); + * ``` + * + * @param x The input tensor. Must be of dtype 'bool'. + * + * @doc {heading: 'Operations', subheading: 'Logical'} + */ + function logicalNot_(x) { + const $x = convertToTensor(x, 'x', 'logicalNot', 'bool'); + const inputs = { x: $x }; + return ENGINE.runKernel(LogicalNot, inputs); + } + const logicalNot$2 = /* @__PURE__ */ op({ logicalNot_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Returns the truth value of `a OR b` element-wise. Supports broadcasting. + * + * ```js + * const a = tf.tensor1d([false, false, true, true], 'bool'); + * const b = tf.tensor1d([false, true, false, true], 'bool'); + * + * a.logicalOr(b).print(); + * ``` + * @param a The first input tensor. Must be of dtype bool. + * @param b The second input tensor. Must be of dtype bool. + * + * @doc {heading: 'Operations', subheading: 'Logical'} + */ + function logicalOr_(a, b) { + const $a = convertToTensor(a, 'a', 'logicalOr', 'bool'); + const $b = convertToTensor(b, 'b', 'logicalOr', 'bool'); + assertAndGetBroadcastShape($a.shape, $b.shape); + const inputs = { a: $a, b: $b }; + return ENGINE.runKernel(LogicalOr, inputs); + } + const logicalOr$2 = /* @__PURE__ */ op({ logicalOr_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Returns the truth value of `a XOR b` element-wise. Supports broadcasting. + * + * ```js + * const a = tf.tensor1d([false, false, true, true], 'bool'); + * const b = tf.tensor1d([false, true, false, true], 'bool'); + * + * a.logicalXor(b).print(); + * ``` + * + * @param a The first input tensor. Must be of dtype bool. + * @param b The second input tensor. Must be of dtype bool. + * + * @doc {heading: 'Operations', subheading: 'Logical'} + */ + function logicalXor_(a, b) { + const $a = convertToTensor(a, 'a', 'logicalXor', 'bool'); + const $b = convertToTensor(b, 'b', 'logicalXor', 'bool'); + assertAndGetBroadcastShape($a.shape, $b.shape); + // x ^ y = (x | y) & ~(x & y) + return logicalAnd$2(logicalOr$2(a, b), logicalNot$2(logicalAnd$2(a, b))); + } + const logicalXor = /* @__PURE__ */ op({ logicalXor_ }); + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const INT32_MAX$1 = 2147483648; + /** + * Searches for where a value would go in a sorted sequence. + * + * This is not a method for checking containment (like javascript in). + * + * The typical use case for this operation is "binning", "bucketing", or + * "discretizing". The values are assigned to bucket-indices based on the edges + * listed in 'sortedSequence'. This operation returns the bucket-index for each + * value. + * + * The side argument controls which index is returned if a value lands exactly + * on an edge. + * + * The axis is not settable for this operation. It always operates on the + * innermost dimension (axis=-1). The operation will accept any number of outer + * dimensions. + * + * Note: This operation assumes that 'sortedSequence' is sorted along the + * innermost axis, maybe using 'sort(..., axis=-1)'. If the sequence is not + * sorted no error is raised and the content of the returned tensor is not well + * defined. + * + * ```js + * const edges = tf.tensor1d([-1, 3.3, 9.1, 10.0]); + * let values = tf.tensor1d([0.0, 4.1, 12.0]); + * const result1 = tf.searchSorted(edges, values, 'left'); + * result1.print(); // [1, 2, 4] + * + * const seq = tf.tensor1d([0, 3, 9, 10, 10]); + * values = tf.tensor1d([0, 4, 10]); + * const result2 = tf.searchSorted(seq, values, 'left'); + * result2.print(); // [0, 2, 3] + * const result3 = tf.searchSorted(seq, values, 'right'); + * result3.print(); // [1, 2, 5] + * + * const sortedSequence = tf.tensor2d([[0., 3., 8., 9., 10.], + * [1., 2., 3., 4., 5.]]); + * values = tf.tensor2d([[9.8, 2.1, 4.3], + * [0.1, 6.6, 4.5, ]]); + * const result4 = tf.searchSorted(sortedSequence, values, 'left'); + * result4.print(); // [[4, 1, 2], [0, 5, 4]] + * ``` + * @param sortedSequence: N-D. Sorted sequence. + * @param values: N-D. Search values. + * @param side: 'left'|'right'. Defaults to 'left'. 'left' corresponds to lower + * bound and 'right' to upper bound. + * @return An N-D int32 tensor the size of values containing the result of + * applying either lower bound or upper bound (depending on side) to each + * value. The result is not a global index to the entire Tensor, but the + * index in the last dimension. + * @doc {heading: 'Operations', subheading: 'Evaluation'} + */ + function searchSorted_(sortedSequence, values, side = 'left') { + const $sortedSequence = convertToTensor(sortedSequence, 'sortedSequence', 'searchSorted'); + const $values = convertToTensor(values, 'values', 'searchSorted'); + const sequenceSize = $sortedSequence.shape[$sortedSequence.shape.length - 1]; + const valuesSize = $values.shape[$values.shape.length - 1]; + const $sortedSequence2D = reshape$3($sortedSequence, [-1, sequenceSize]); + const $values2D = reshape$3($values, [-1, valuesSize]); + if ($sortedSequence2D.rank < 2) { + throw new Error(`Sorted input argument must be at least 2-dimensional`); + } + if ($sortedSequence2D.shape[0] !== $values2D.shape[0]) { + throw new Error(`Leading dimension of 'sortedSequence' and 'values' must match.`); + } + if (sizeFromShape($values2D.shape) >= INT32_MAX$1) { + throw new Error(`values tensor size must less than ${INT32_MAX$1}`); + } + if ($sortedSequence2D.shape[1] >= INT32_MAX$1) { + throw new Error(`trailing dim_size must less than ${INT32_MAX$1} for int32 output type, was ${$sortedSequence2D.shape[1]}`); + } + const inputs = { + sortedSequence: $sortedSequence2D, + values: $values2D, + }; + const attrs = { side }; + return ENGINE.runKernel(SearchSorted, inputs, attrs); + } + const searchSorted$2 = /* @__PURE__ */ op({ searchSorted_ }); + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Searches for where a value would go in a sorted sequence. + * + * This is not a method for checking containment (like javascript in). + * + * The typical use case for this operation is "binning", "bucketing", or + * "discretizing". The values are assigned to bucket-indices based on the edges + * listed in 'sortedSequence'. This operation returns the bucket-index for each + * value. + * + * The index returned corresponds to the first edge greater than or equal to the + * value. + * + * The axis is not settable for this operation. It always operates on the + * innermost dimension (axis=-1). The operation will accept any number of outer + * dimensions. + * + * Note: This operation assumes that 'lowerBound' is sorted along the + * innermost axis, maybe using 'sort(..., axis=-1)'. If the sequence is not + * sorted no error is raised and the content of the returned tensor is not well + * defined. + * + * ```js + * const edges = tf.tensor1d([-1, 3.3, 9.1, 10.0]); + * let values = tf.tensor1d([0.0, 4.1, 12.0]); + * const result1 = tf.lowerBound(edges, values); + * result1.print(); // [1, 2, 4] + * + * const seq = tf.tensor1d([0, 3, 9, 10, 10]); + * values = tf.tensor1d([0, 4, 10]); + * const result2 = tf.lowerBound(seq, values); + * result2.print(); // [0, 2, 3] + * + * const sortedSequence = tf.tensor2d([[0., 3., 8., 9., 10.], + * [1., 2., 3., 4., 5.]]); + * values = tf.tensor2d([[9.8, 2.1, 4.3], + * [0.1, 6.6, 4.5, ]]); + * const result3 = tf.lowerBound(sortedSequence, values); + * result3.print(); // [[4, 1, 2], [0, 5, 4]] + * ``` + * @param sortedSequence: N-D. Sorted sequence. + * @param values: N-D. Search values. + * @return An N-D int32 tensor the size of values containing the result of + * applying lower bound to each value. The result is not a global index to + * the entire Tensor, but the index in the last dimension. + * @doc {heading: 'Operations', subheading: 'Evaluation'} + */ + function lowerBound$1(sortedSequence, values) { + return searchSorted$2(sortedSequence, values, 'left'); + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the 2D max pooling of an image. + * + * @param x The input tensor, of rank 4 or rank 3 of shape + * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is assumed. + * @param filterSize The filter size: `[filterHeight, filterWidth]`. If + * `filterSize` is a single number, then `filterHeight == filterWidth`. + * @param strides The strides of the pooling: `[strideHeight, strideWidth]`. If + * `strides` is a single number, then `strideHeight == strideWidth`. + * @param dilations The dilation rates: `[dilationHeight, dilationWidth]` + * in which we sample input values across the height and width dimensions + * in dilated pooling. Defaults to `[1, 1]`. If `dilations` is a single + * number, then `dilationHeight == dilationWidth`. If it is greater than + * 1, then all values of `strides` must be 1. + * @param pad The type of padding algorithm. + * - `same` and stride 1: output will be of same size as input, + * regardless of filter size. + * - `valid`: output will be smaller than input if filter is larger + * than 1x1. + * - For more info, see this guide: + * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution]( + * https://www.tensorflow.org/api_docs/python/tf/nn/convolution) + * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is + * provided, it will default to truncate. + */ + function maxPool_(x, filterSize, strides, pad, dimRoundingMode) { + const $x = convertToTensor(x, 'x', 'maxPool'); + const dilations = 1; + let x4D = $x; + let reshapedTo4D = false; + if ($x.rank === 3) { + reshapedTo4D = true; + x4D = reshape$3($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]); + } + assert$1(x4D.rank === 4, () => `Error in maxPool: input must be rank 4 but got rank ${x4D.rank}.`); + assert$1(eitherStridesOrDilationsAreOne(strides, dilations), () => 'Error in maxPool: Either strides or dilations must be 1. ' + + `Got strides ${strides} and dilations '${dilations}'`); + checkPadOnDimRoundingMode('maxPool', pad, dimRoundingMode); + const inputs = { x: x4D }; + const attrs = { filterSize, strides, pad, dimRoundingMode }; + // tslint:disable-next-line: no-unnecessary-type-assertion + const res = ENGINE.runKernel(MaxPool, inputs, attrs); + if (reshapedTo4D) { + return reshape$3(res, [res.shape[1], res.shape[2], res.shape[3]]); + } + return res; + } + const maxPool$2 = /* @__PURE__ */ op({ maxPool_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the 3D max pooling. + * + * ```js + * const x = tf.tensor5d([1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 2, 2, 1]); + * const result = tf.maxPool3d(x, 2, 1, 'valid'); + * result.print(); + * ``` + * + * @param x The input tensor, of rank 5 or rank 4 of shape + * `[batch, depth, height, width, inChannels]`. + * @param filterSize The filter size: + * `[filterDepth, filterHeight, filterWidth]`. + * If `filterSize` is a single number, + * then `filterDepth == filterHeight == filterWidth`. + * @param strides The strides of the pooling: + * `[strideDepth, strideHeight, strideWidth]`. + * If `strides` is a single number, + * then `strideDepth == strideHeight == strideWidth`. + * @param pad The type of padding algorithm. + * - `same` and stride 1: output will be of same size as input, + * regardless of filter size. + * - `valid`: output will be smaller than input if filter is larger + * than 1*1x1. + * - For more info, see this guide: + * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution]( + * https://www.tensorflow.org/api_docs/python/tf/nn/convolution) + * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is + * provided, it will default to truncate. + * @param dataFormat An optional string from: "NDHWC", "NCDHW". Defaults to + * "NDHWC". Specify the data format of the input and output data. With the + * default format "NDHWC", the data is stored in the order of: [batch, + * depth, height, width, channels]. Only "NDHWC" is currently supported. + * @doc {heading: 'Operations', subheading: 'Convolution'} + */ + function maxPool3d_(x, filterSize = [1, 1, 1], strides, pad, dimRoundingMode, dataFormat = 'NDHWC') { + const $x = convertToTensor(x, 'x', 'maxPool3d'); + let x5D = $x; + let reshapedTo5D = false; + if ($x.rank === 4) { + reshapedTo5D = true; + x5D = reshape$3($x, [1, $x.shape[0], $x.shape[1], $x.shape[2], $x.shape[3]]); + } + assert$1(x5D.rank === 5, () => `Error in maxPool3d: x must be rank 5 but got rank ${x5D.rank}.`); + assert$1(dataFormat === 'NDHWC', () => `Error in maxPool3d: Only NDHWC is currently supported, ` + + `but got dataFormat of ${dataFormat}`); + checkPadOnDimRoundingMode('maxPool3d', pad, dimRoundingMode); + const inputs = { x: x5D }; + const attrs = { filterSize, strides, pad, dimRoundingMode, dataFormat }; + // tslint:disable-next-line: no-unnecessary-type-assertion + const res = ENGINE.runKernel(MaxPool3D, inputs, attrs); + if (reshapedTo5D) { + return reshape$3(res, [res.shape[1], res.shape[2], res.shape[3], res.shape[4]]); + } + return res; + } + const maxPool3d$1 = /* @__PURE__ */ op({ maxPool3d_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the 2D max pooling of an image with Argmax index. + * The indices in argmax are flattened, so that a maximum value at position `[b, + * y, x, c]` becomes flattened index: `(y * width + x) * channels + c` if + * include_batch_in_index is False; `((b * height + y) * width + x) * channels + * +c` if include_batch_in_index is True. + * + * The indices returned are always in `[0, height) x [0, width)` before + * flattening. + * + * @param x The input tensor, of rank 4 or rank 3 of shape + * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is assumed. + * @param filterSize The filter size: `[filterHeight, filterWidth]`. If + * `filterSize` is a single number, then `filterHeight == filterWidth`. + * @param strides The strides of the pooling: `[strideHeight, strideWidth]`. If + * `strides` is a single number, then `strideHeight == strideWidth`. + * @param dataFormat An optional string from: "NDHWC", "NCDHW". Defaults to + * "NDHWC". Specify the data format of the input and output data. With the + * default format "NDHWC", the data is stored in the order of: [batch, + * depth, height, width, channels]. Only "NDHWC" is currently supported. + * @param pad The type of padding algorithm. + * - `same` and stride 1: output will be of same size as input, + * regardless of filter size. + * - `valid`: output will be smaller than input if filter is larger + * than 1x1. + * - For more info, see this guide: + * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution]( + * https://www.tensorflow.org/api_docs/python/tf/nn/convolution) + * @param includeBatchIndex Defaults to False. Whether to include batch + * dimension in flattened index of argmax. + * + * @doc {heading: 'Operations', subheading: 'Convolution'} + */ + function maxPoolWithArgmax_(x, filterSize, strides, pad, includeBatchInIndex = false) { + const $x = convertToTensor(x, 'x', 'maxPoolWithArgmax'); + const inputs = { x: $x }; + const attrs = { filterSize, strides, pad, includeBatchInIndex }; + // tslint:disable-next-line: no-unnecessary-type-assertion + const result = ENGINE.runKernel(MaxPoolWithArgmax, inputs, attrs); + return { result: result[0], indexes: result[1] }; + } + const maxPoolWithArgmax = /* @__PURE__ */ op({ maxPoolWithArgmax_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Returns the max of a and b (`a > b ? a : b`) element-wise. + * Supports broadcasting. + * + * We also expose `tf.maximumStrict` which has the same signature as this op and + * asserts that `a` and `b` are the same shape (does not broadcast). + * + * ```js + * const a = tf.tensor1d([1, 4, 3, 16]); + * const b = tf.tensor1d([1, 2, 9, 4]); + * + * a.maximum(b).print(); // or tf.maximum(a, b) + * ``` + * + * ```js + * // Broadcast maximum a with b. + * const a = tf.tensor1d([2, 4, 6, 8]); + * const b = tf.scalar(5); + * + * a.maximum(b).print(); // or tf.maximum(a, b) + * ``` + * + * @param a The first tensor. + * @param b The second tensor. Must have the same type as `a`. + * + * @doc {heading: 'Operations', subheading: 'Arithmetic'} + */ + function maximum_(a, b) { + let $a = convertToTensor(a, 'a', 'maximum'); + let $b = convertToTensor(b, 'b', 'maximum'); + [$a, $b] = makeTypesMatch($a, $b); + if ($a.dtype === 'bool') { + $a = cast$3($a, 'int32'); + $b = cast$3($b, 'int32'); + } + assertAndGetBroadcastShape($a.shape, $b.shape); + const inputs = { a: $a, b: $b }; + return ENGINE.runKernel(Maximum$1, inputs); + } + const maximum$4 = /* @__PURE__ */ op({ maximum_ }); + + /** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the mean of elements across dimensions of a `tf.Tensor`. + * + * Reduces `x` along the dimensions given in `axis`. Unless `keepDims` is + * true, the rank of the `tf.Tensor` is reduced by 1 for each entry in `axis`. + * If `keepDims` is true, the reduced dimensions are retained with length 1. + * If `axis` has no entries, all dimensions are reduced, and a `tf.Tensor` with + * a single element is returned. + * + * ```js + * const x = tf.tensor1d([1, 2, 3]); + * + * x.mean().print(); // or tf.mean(a) + * ``` + * + * ```js + * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]); + * + * const axis = 1; + * x.mean(axis).print(); // or tf.mean(x, axis) + * ``` + * + * @param x The input tensor. + * @param axis The dimension(s) to reduce. By default it reduces + * all dimensions. + * @param keepDims If true, retains reduced dimensions with size 1. + * + * @doc {heading: 'Operations', subheading: 'Reduction'} + */ + function mean_(x, axis = null, keepDims = false) { + const $x = convertToTensor(x, 'x', 'mean'); + const inputs = { x: $x }; + const attrs = { axis, keepDims }; + return ENGINE.runKernel(Mean, inputs, attrs); + } + const mean$3 = /* @__PURE__ */ op({ mean_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Creates a `tf.Tensor` with all elements set to 0. + * + * ```js + * tf.zeros([2, 2]).print(); + * ``` + * + * @param shape An array of integers defining the output tensor shape. + * @param dtype The type of an element in the resulting tensor. Can + * be 'float32', 'int32' or 'bool'. Defaults to 'float'. + * + * @doc {heading: 'Tensors', subheading: 'Creation'} + */ + function zeros$2(shape, dtype = 'float32') { + assertNonNegativeIntegerDimensions(shape); + if (dtype === 'complex64') { + const real = zeros$2(shape, 'float32'); + const imag = zeros$2(shape, 'float32'); + return complex$2(real, imag); + } + const values = makeZerosTypedArray(sizeFromShape(shape), dtype); + return ENGINE.makeTensor(values, shape, dtype); + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Creates a `tf.Tensor` with all elements set to 1. + * + * ```js + * tf.ones([2, 2]).print(); + * ``` + * + * @param shape An array of integers defining the output tensor shape. + * @param dtype The type of an element in the resulting tensor. Defaults to + * 'float'. + * + * @doc {heading: 'Tensors', subheading: 'Creation'} + */ + function ones$1(shape, dtype = 'float32') { + assertNonNegativeIntegerDimensions(shape); + if (dtype === 'complex64') { + const real = ones$1(shape, 'float32'); + const imag = zeros$2(shape, 'float32'); + return complex$2(real, imag); + } + const values = makeOnesTypedArray(sizeFromShape(shape), dtype); + return ENGINE.makeTensor(values, shape, dtype); + } + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Broadcasts parameters for evaluation on an N-D grid. + * + * Given N one-dimensional coordinate arrays `*args`, returns a list `outputs` + * of N-D coordinate arrays for evaluating expressions on an N-D grid. + * + * Notes: + * `meshgrid` supports cartesian ('xy') and matrix ('ij') indexing conventions. + * When the `indexing` argument is set to 'xy' (the default), the broadcasting + * instructions for the first two dimensions are swapped. + * Examples: + * Calling `const [X, Y] = meshgrid(x, y)` with the tensors + * + * ```javascript + * const x = [1, 2, 3]; + * const y = [4, 5, 6]; + * const [X, Y] = tf.meshgrid(x, y); + * // X = [[1, 2, 3], + * // [1, 2, 3], + * // [1, 2, 3]] + * // Y = [[4, 4, 4], + * // [5, 5, 5], + * // [6, 6, 6]] + * ``` + * + * @param x Tensor with rank geq 1. + * @param y Tensor with rank geq 1. + * @param indexing + * + * @doc {heading: 'Operations', subheading: 'Slicing and Joining'} + */ + function meshgrid(x, y, { indexing = 'xy' } = {}) { + if (indexing !== 'xy' && indexing !== 'ij') { + throw new TypeError(`${indexing} is not a valid third argument to meshgrid`); + } + if (x === undefined) { + return []; + } + let $x = convertToTensor(x, 'x', 'meshgrid', x instanceof Tensor ? x.dtype : 'float32'); + if (y === undefined) { + return [$x]; + } + let $y = convertToTensor(y, 'y', 'meshgrid', y instanceof Tensor ? y.dtype : 'float32'); + const w = sizeFromShape($x.shape); + const h = sizeFromShape($y.shape); + if (indexing === 'xy') { + $x = reshape$3($x, [1, -1]); + $y = reshape$3($y, [-1, 1]); + return [ + matMul$1(ones$1([h, 1], $x.dtype), $x), + matMul$1($y, ones$1([1, w], $y.dtype)), + ]; + } + $x = reshape$3($x, [-1, 1]); + $y = reshape$3($y, [1, -1]); + return [ + matMul$1($x, ones$1([1, h], $x.dtype)), + matMul$1(ones$1([w, 1], $y.dtype), $y), + ]; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Returns the min of a and b (`a < b ? a : b`) element-wise. + * Supports broadcasting. + * + * We also expose `minimumStrict` which has the same signature as this op and + * asserts that `a` and `b` are the same shape (does not broadcast). + * + * ```js + * const a = tf.tensor1d([1, 4, 3, 16]); + * const b = tf.tensor1d([1, 2, 9, 4]); + * + * a.minimum(b).print(); // or tf.minimum(a, b) + * ``` + * + * ```js + * // Broadcast minimum a with b. + * const a = tf.tensor1d([2, 4, 6, 8]); + * const b = tf.scalar(5); + * + * a.minimum(b).print(); // or tf.minimum(a, b) + * ``` + * + * @param a The first tensor. + * @param b The second tensor. Must have the same type as `a`. + * + * @doc {heading: 'Operations', subheading: 'Arithmetic'} + */ + function minimum_(a, b) { + let $a = convertToTensor(a, 'a', 'minimum'); + let $b = convertToTensor(b, 'b', 'minimum'); + [$a, $b] = makeTypesMatch($a, $b); + if ($a.dtype === 'bool') { + $a = cast$3($a, 'int32'); + $b = cast$3($b, 'int32'); + } + assertAndGetBroadcastShape($a.shape, $b.shape); + const inputs = { a: $a, b: $b }; + return ENGINE.runKernel(Minimum$1, inputs); + } + const minimum$4 = /* @__PURE__ */ op({ minimum_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Pads a `tf.Tensor` using mirror padding. + * + * This operation implements the `REFLECT` and `SYMMETRIC` modes of pad. + * + * ```js + * const x = tf.range(0, 9).reshape([1, 1, 3, 3]); + * x.mirrorPad([[0, 0], [0, 0], [2, 2], [2, 2]], 'reflect').print(); + * ``` + * @param x The tensor to pad. + * @param paddings An array of length `R` (the rank of the tensor), where + * each element is a length-2 tuple of ints `[padBefore, padAfter]`, + * specifying how much to pad along each dimension of the tensor. + * In "reflect" mode, the padded regions do not include the borders, + * while in "symmetric" mode the padded regions do include the borders. + * For example, if the input is `[1, 2, 3]` and paddings is `[0, 2]`, + * then the output is `[1, 2, 3, 2, 1]` in "reflect" mode, and + * `[1, 2, 3, 3, 2]` in "symmetric" mode. + * If `mode` is "reflect" then both `paddings[D, 0]` and `paddings[D, 1]` + * must be no greater than `x.shape[D] - 1`. If mode is "symmetric" + * then both `paddings[D, 0]` and `paddings[D, 1]` must be no greater than + * `x.shape[D]` + * @param mode String to specify padding mode. Can be `'reflect' | 'symmetric'` + */ + /** @doc {heading: 'Tensors', subheading: 'Transformations'} */ + function mirrorPad_(x, paddings, mode) { + assert$1(mode === 'reflect' || mode === 'symmetric', () => `Invalid mode. Mode must be either reflect or symmetric. ` + + `Got ${mode}.`); + const $x = convertToTensor(x, 'x', 'mirrorPad'); + if ($x.rank === 0) { + throw new Error('mirrorPad(scalar) is not defined. ' + + 'Pass non-scalar to mirrorPad'); + } + assert$1(paddings.length === $x.rank, () => `Padding doesn't match input. Must be ${$x.rank}. ` + + `Got ${paddings.length}.`); + const shapeOffset = mode === 'reflect' ? 1 : 0; + for (let i = 0; i < $x.rank; i++) { + assert$1(paddings[i].length === 2, () => `Invalid number of paddings. Must be length of 2 each.`); + assert$1(paddings[i][0] >= 0 && paddings[i][0] <= $x.shape[i] - shapeOffset && + paddings[i][1] >= 0 && paddings[i][1] <= $x.shape[i] - shapeOffset, () => `Padding in dimension ${i} cannot be greater than or equal ` + + `to ${$x.shape[i] - shapeOffset} or less than 0 for input of ` + + `shape ${$x.shape}`); + } + const attrs = { paddings, mode }; + const inputs = { x: $x }; + return ENGINE.runKernel(MirrorPad, inputs, attrs); + } + const mirrorPad$1 = /* @__PURE__ */ op({ mirrorPad_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Returns the mod of a and b element-wise. + * `floor(x / y) * y + mod(x, y) = x` + * Supports broadcasting. + * + * We also expose `tf.modStrict` which has the same signature as this op and + * asserts that `a` and `b` are the same shape (does not broadcast). + * + * ```js + * const a = tf.tensor1d([1, 4, 3, 16]); + * const b = tf.tensor1d([1, 2, 9, 4]); + * + * a.mod(b).print(); // or tf.mod(a, b) + * ``` + * + * ```js + * // Broadcast a mod b. + * const a = tf.tensor1d([2, 4, 6, 8]); + * const b = tf.scalar(5); + * + * a.mod(b).print(); // or tf.mod(a, b) + * ``` + * + * @param a The first tensor. + * @param b The second tensor. Must have the same type as `a`. + * + * @doc {heading: 'Operations', subheading: 'Arithmetic'} + */ + function mod_(a, b) { + let $a = convertToTensor(a, 'a', 'mod'); + let $b = convertToTensor(b, 'b', 'mod'); + [$a, $b] = makeTypesMatch($a, $b); + const inputs = { a: $a, b: $b }; + return ENGINE.runKernel(Mod, inputs); + } + const mod$2 = /* @__PURE__ */ op({ mod_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Calculates the mean and variance of `x`. The mean and variance are + * calculated by aggregating the contents of `x` across `axes`. If `x` is + * 1-D and `axes = [0]` this is just the mean and variance of a vector. + * + * @param x The input tensor. + * @param axis The dimension(s) along with to compute mean and + * variance. By default it reduces all dimensions. + * @param keepDims If true, the moments have the same dimensionality as the + * input. + * @return An object with two keys: `mean` and `variance`. + * + * @doc {heading: 'Operations', subheading: 'Normalization'} + */ + function moments_(x, axis = null, keepDims = false) { + x = convertToTensor(x, 'x', 'moments'); + const axes = parseAxisParam(axis, x.shape); + const xMean = mean$3(x, axes, keepDims); + let keepDimsShape = xMean.shape; + if (!keepDims) { + keepDimsShape = expandShapeToKeepDim(xMean.shape, axes); + } + const devSquared = square$2(sub$2(cast$3(x, 'float32'), reshape$3(xMean, keepDimsShape))); + const variance = mean$3(devSquared, axes, keepDims); + return { mean: xMean, variance }; + } + const moments = /* @__PURE__ */ op({ moments_ }); + + /** + * Computes the next states and outputs of a stack of LSTMCells. + * + * Each cell output is used as input to the next cell. + * + * Returns `[cellState, cellOutput]`. + * + * Derived from tf.contrib.rn.MultiRNNCell. + * + * @param lstmCells Array of LSTMCell functions. + * @param data The input to the cell. + * @param c Array of previous cell states. + * @param h Array of previous cell outputs. + * + * @doc {heading: 'Operations', subheading: 'RNN'} + */ + function multiRNNCell_(lstmCells, data, c, h) { + const $data = convertToTensor(data, 'data', 'multiRNNCell'); + const $c = convertToTensorArray(c, 'c', 'multiRNNCell'); + const $h = convertToTensorArray(h, 'h', 'multiRNNCell'); + let input = $data; + const newStates = []; + for (let i = 0; i < lstmCells.length; i++) { + const output = lstmCells[i](input, $c[i], $h[i]); + newStates.push(output[0]); + newStates.push(output[1]); + input = output[1]; + } + const newC = []; + const newH = []; + for (let i = 0; i < newStates.length; i += 2) { + newC.push(newStates[i]); + newH.push(newStates[i + 1]); + } + return [newC, newH]; + } + const multiRNNCell = /* @__PURE__ */ op({ multiRNNCell_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Creates a `tf.Tensor` with values drawn from a multinomial distribution. + * + * ```js + * const probs = tf.tensor([.75, .25]); + * tf.multinomial(probs, 3).print(); + * ``` + * + * @param logits 1D array with unnormalized log-probabilities, or + * 2D array of shape `[batchSize, numOutcomes]`. See the `normalized` + * parameter. + * @param numSamples Number of samples to draw for each row slice. + * @param seed The seed number. + * @param normalized Whether the provided `logits` are normalized true + * probabilities (sum to 1). Defaults to false. + * @return 1D array of shape `[numSamples]`, or 2D array of shape + * `[batchSize, numSamples]`, depending on the rank of the input. + * + * @doc {heading: 'Tensors', subheading: 'Random'} + */ + function multinomial_(logits, numSamples, seed, normalized = false) { + const $logits = convertToTensor(logits, 'logits', 'multinomial'); + const numOutcomes = $logits.size; + const origRank = $logits.rank; + if (numOutcomes < 2) { + throw new Error(`Error in multinomial: you need at least 2 outcomes, but got ` + + `${numOutcomes}.`); + } + if (origRank > 2) { + throw new Error(`Rank of probabilities must be 1 or 2, but is ${origRank}`); + } + // TODO(lina128): Investigate correct seed behavior. The code seems not allow + // setting see to 0. + seed = seed || Math.random(); + // The kernel only accepts (and returns) rank 2 tensors. + const logits2D = origRank === 1 ? reshape$3($logits, [1, -1]) : $logits; + const inputs = { logits: logits2D }; + const attrs = { numSamples, seed, normalized }; + // tslint:disable-next-line: no-unnecessary-type-assertion + const res = ENGINE.runKernel(Multinomial, inputs, attrs); + // tslint:disable-next-line:no-unnecessary-type-assertion + return origRank === 1 ? reshape$3(res, [res.size]) : res; + } + const multinomial$2 = /* @__PURE__ */ op({ multinomial_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Returns the truth value of (a != b) element-wise. Supports broadcasting. + * + * ```js + * const a = tf.tensor1d([1, 2, 3]); + * const b = tf.tensor1d([0, 2, 3]); + * + * a.notEqual(b).print(); + * ``` + * @param a The first input tensor. + * @param b The second input tensor. Must have the same dtype as `a`. + * + * @doc {heading: 'Operations', subheading: 'Logical'} + */ + function notEqual_(a, b) { + let $a = convertToTensor(a, 'a', 'notEqual', 'string_or_numeric'); + let $b = convertToTensor(b, 'b', 'notEqual', 'string_or_numeric'); + [$a, $b] = makeTypesMatch($a, $b); + assertAndGetBroadcastShape($a.shape, $b.shape); + const inputs = { a: $a, b: $b }; + return ENGINE.runKernel(NotEqual, inputs); + } + const notEqual$2 = /* @__PURE__ */ op({ notEqual_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Creates a one-hot `tf.Tensor`. The locations represented by `indices` take + * value `onValue` (defaults to 1), while all other locations take value + * `offValue` (defaults to 0). If `indices` is rank `R`, the output has rank + * `R+1` with the last axis of size `depth`. + * `indices` used to encode prediction class must start from 0. For example, + * if you have 3 classes of data, class 1 should be encoded as 0, class 2 + * should be 1, and class 3 should be 2. + * + * ```js + * tf.oneHot(tf.tensor1d([0, 1], 'int32'), 3).print(); + * ``` + * + * @param indices `tf.Tensor` of indices with dtype `int32`. Indices must + * start from 0. + * @param depth The depth of the one hot dimension. + * @param onValue A number used to fill in the output when the index matches + * the location. + * @param offValue A number used to fill in the output when the index does + * not match the location. + * @param dtype The dtype of the output tensor, default to 'int32'. + * + * @doc {heading: 'Tensors', subheading: 'Creation'} + */ + function oneHot_(indices, depth, onValue = 1, offValue = 0, dtype = 'int32') { + if (depth < 2) { + throw new Error(`Error in oneHot: depth must be >=2, but it is ${depth}`); + } + const $indices = convertToTensor(indices, 'indices', 'oneHot', 'int32'); + const inputs = { indices: $indices }; + const attrs = { dtype, depth, onValue, offValue }; + return ENGINE.runKernel(OneHot, inputs, attrs); + } + const oneHot$3 = /* @__PURE__ */ op({ oneHot_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Creates a `tf.Tensor` with all elements set to 1 with the same shape as the + * given tensor. + * + * ```js + * const x = tf.tensor([1, 2]); + * tf.onesLike(x).print(); + * ``` + * @param x A tensor. + * + * @doc {heading: 'Tensors', subheading: 'Creation'} + */ + function onesLike_(x) { + const $x = convertToTensor(x, 'x', 'onesLike'); + const inputs = { x: $x }; + return ENGINE.runKernel(OnesLike, inputs); + } + const onesLike$3 = /* @__PURE__ */ op({ onesLike_ }); + + /** + * Computes the outer product of two vectors, `v1` and `v2`. + * + * ```js + * const a = tf.tensor1d([1, 2, 3]); + * const b = tf.tensor1d([3, 4, 5]); + * + * tf.outerProduct(a, b).print(); + * ``` + * @param v1 The first vector in the outer product operation. + * @param v2 The second vector in the outer product operation. + * + * @doc {heading: 'Operations', subheading: 'Matrices'} + */ + function outerProduct_(v1, v2) { + const $v1 = convertToTensor(v1, 'v1', 'outerProduct'); + const $v2 = convertToTensor(v2, 'v2', 'outerProduct'); + assert$1($v1.rank === 1 && $v2.rank === 1, () => `Error in outerProduct: inputs must be rank 1, but got ranks ` + + `${$v1.rank} and ${$v2.rank}.`); + const v12D = reshape$3($v1, [-1, 1]); + const v22D = reshape$3($v2, [1, -1]); + return matMul$1(v12D, v22D); + } + const outerProduct = /* @__PURE__ */ op({ outerProduct_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Pads a `tf.Tensor` with a given value and paddings. + * + * This operation implements `CONSTANT` mode. For `REFLECT` and `SYMMETRIC`, + * refer to `tf.mirrorPad`. + * + * Also available are stricter rank-specific methods with the same signature + * as this method that assert that `paddings` is of given length. + * - `tf.pad1d` + * - `tf.pad2d` + * - `tf.pad3d` + * - `tf.pad4d` + * + * ```js + * const x = tf.tensor1d([1, 2, 3, 4]); + * x.pad([[1, 2]]).print(); + * ``` + * @param x The tensor to pad. + * @param paddings An array of length `R` (the rank of the tensor), where + * each element is a length-2 tuple of ints `[padBefore, padAfter]`, + * specifying how much to pad along each dimension of the tensor. + * @param constantValue The pad value to use. Defaults to 0. + * + * @doc {heading: 'Tensors', subheading: 'Transformations'} + */ + function pad_(x, paddings, constantValue = 0) { + const $x = convertToTensor(x, 'x', 'pad'); + if ($x.rank === 0) { + throw new Error('pad(scalar) is not defined. Pass non-scalar to pad'); + } + const attrs = { paddings, constantValue }; + const inputs = { x: $x }; + return ENGINE.runKernel(PadV2, inputs, attrs); + } + const pad = /* @__PURE__ */ op({ pad_ }); + + /** + * Pads a `tf.Tensor1D` with a given value and paddings. See `pad` for details. + */ + function pad1d_(x, paddings, constantValue = 0) { + assert$1(paddings.length === 2, () => 'Invalid number of paddings. Must be length of 2.'); + return pad(x, [paddings], constantValue); + } + const pad1d = /* @__PURE__ */ op({ pad1d_ }); + + /** + * Pads a `tf.Tensor2D` with a given value and paddings. See `pad` for details. + */ + function pad2d_(x, paddings, constantValue = 0) { + assert$1(paddings.length === 2 && paddings[0].length === 2 && + paddings[1].length === 2, () => 'Invalid number of paddings. Must be length of 2 each.'); + return pad(x, paddings, constantValue); + } + const pad2d = /* @__PURE__ */ op({ pad2d_ }); + + /** + * Pads a `tf.Tensor3D` with a given value and paddings. See `pad` for details. + */ + function pad3d_(x, paddings, constantValue = 0) { + assert$1(paddings.length === 3 && paddings[0].length === 2 && + paddings[1].length === 2 && paddings[2].length === 2, () => 'Invalid number of paddings. Must be length of 2 each.'); + return pad(x, paddings, constantValue); + } + const pad3d = /* @__PURE__ */ op({ pad3d_ }); + + /** + * Pads a `tf.Tensor4D` with a given value and paddings. See `pad` for details. + */ + function pad4d_(x, paddings, constantValue = 0) { + assert$1(paddings.length === 4 && paddings[0].length === 2 && + paddings[1].length === 2 && paddings[2].length === 2 && + paddings[3].length === 2, () => 'Invalid number of paddings. Must be length of 2 each.'); + return pad(x, paddings, constantValue); + } + const pad4d = /* @__PURE__ */ op({ pad4d_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * This operation divides "spatial" dimensions `[1, ..., M]` of the input into + * a grid of blocks of shape `blockShape`, and interleaves these blocks with + * the "batch" dimension (0) such that in the output, the spatial + * dimensions `[1, ..., M]` correspond to the position within the grid, + * and the batch dimension combines both the position within a spatial block + * and the original batch position. Prior to division into blocks, + * the spatial dimensions of the input are optionally zero padded + * according to `paddings`. See below for a precise description. + * + * ```js + * const x = tf.tensor4d([1, 2, 3, 4], [1, 2, 2, 1]); + * const blockShape = [2, 2]; + * const paddings = [[0, 0], [0, 0]]; + * + * x.spaceToBatchND(blockShape, paddings).print(); + * ``` + * + * @param x A `tf.Tensor`. N-D with `x.shape` = `[batch] + spatialShape + + * remainingShape`, where spatialShape has `M` dimensions. + * @param blockShape A 1-D array. Must have shape `[M]`, all values must + * be >= 1. + * @param paddings A 2-D array. Must have shape `[M, 2]`, all values must be >= + * 0. `paddings[i] = [padStart, padEnd]` specifies the amount to zero-pad + * from input dimension `i + 1`, which corresponds to spatial dimension `i`. It + * is required that + * `(inputShape[i + 1] + padStart + padEnd) % blockShape[i] === 0` + * + * This operation is equivalent to the following steps: + * + * 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the input + * according to `paddings` to produce `padded` of shape paddedShape. + * + * 2. Reshape `padded` to `reshapedPadded` of shape: + * `[batch] + [paddedShape[1] / blockShape[0], blockShape[0], ..., + * paddedShape[M] / blockShape[M-1], blockShape[M-1]] + remainingShape` + * + * 3. Permute dimensions of `reshapedPadded` to produce `permutedReshapedPadded` + * of shape: `blockShape + [batch] + [paddedShape[1] / blockShape[0], ..., + * paddedShape[M] / blockShape[M-1]] + remainingShape` + * + * 4. Reshape `permutedReshapedPadded` to flatten `blockShape` into the + * batch dimension, producing an output tensor of shape: + * `[batch * prod(blockShape)] + [paddedShape[1] / blockShape[0], ..., + * paddedShape[M] / blockShape[M-1]] + remainingShape` + * + * @doc {heading: 'Tensors', subheading: 'Transformations'} + */ + function spaceToBatchND_(x, blockShape, paddings) { + const $x = convertToTensor(x, 'x', 'spaceToBatchND'); + assert$1($x.rank >= 1 + blockShape.length, () => `input rank ${$x.rank} should be > than [blockShape] ${blockShape.length}`); + assert$1(paddings.length === blockShape.length, () => `paddings.shape[0] ${paddings.length} must be equal to [blockShape] ${blockShape.length}`); + assert$1($x.shape.reduce((a, b, i) => { + if (i > 0 && i <= blockShape.length) { + return a && + ((b + paddings[i - 1][0] + paddings[i - 1][1]) % + blockShape[i - 1] === + 0); + } + return a; + }, true), () => `input spatial dimensions ${$x.shape.slice(1)} with paddings ${paddings.toString()} must be divisible by blockShapes ${blockShape.toString()}`); + const inputs = { x: $x }; + const attrs = { blockShape, paddings }; + return ENGINE.runKernel(SpaceToBatchND, inputs, attrs); + } + const spaceToBatchND$2 = /* @__PURE__ */ op({ spaceToBatchND_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Performs an N-D pooling operation + * + * @param input The input tensor, of rank 4 or rank 3 of shape + * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is assumed. + * @param windowShape The filter size: `[filterHeight, filterWidth]`. If + * `filterSize` is a single number, then `filterHeight == filterWidth`. + * @param poolingType The type of pooling, either 'max' or 'avg'. + * @param pad The type of padding algorithm: + * - `same` and stride 1: output will be of same size as input, + * regardless of filter size. + * - `valid`: output will be smaller than input if filter is larger + * than 1x1. + * - For more info, see this guide: + * [https://www.tensorflow.org/api_guides/python/nn#Convolution]( + * https://www.tensorflow.org/api_guides/python/nn#Convolution) + * @param dilations The dilation rates: `[dilationHeight, dilationWidth]` + * in which we sample input values across the height and width dimensions + * in dilated pooling. Defaults to `[1, 1]`. If `dilationRate` is a single + * number, then `dilationHeight == dilationWidth`. If it is greater than + * 1, then all values of `strides` must be 1. + * @param strides The strides of the pooling: `[strideHeight, strideWidth]`. If + * `strides` is a single number, then `strideHeight == strideWidth`. + * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is + * provided, it will default to truncate. + * + * @doc {heading: 'Operations', subheading: 'Convolution'} + */ + function pool_(input, windowShape, poolingType, pad, dilations, strides, dimRoundingMode) { + if (dilations == null) { + dilations = [1, 1]; + } + if (strides == null) { + strides = 1; + } + if (pad === 0) { + pad = 'valid'; + } + const $x = convertToTensor(input, 'x', 'maxPool'); + let x4D = $x; + let reshapedTo4D = false; + if ($x.rank === 3) { + reshapedTo4D = true; + x4D = reshape$3($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]); + } + assert$1(eitherStridesOrDilationsAreOne(strides, dilations), () => 'Error in pool: Either strides or dilations must be 1. ' + + `Got strides ${strides} and dilations '${dilations}'`); + const convInfo = computePool2DInfo(x4D.shape, windowShape, strides, dilations, pad); + const dilation = [convInfo.dilationHeight, convInfo.dilationWidth]; + // The following implementation does batchToSpace(pool(spaceToBatch(x))) + // whenever dilation > 1 since the TF kernels do not support dilation > 1. + // tslint:disable-next-line:max-line-length + // https://github.com/tensorflow/tensorflow/blob/50f6bb67dc98c9b74630b6047aae7a4f8a40fd02/tensorflow/python/ops/nn_ops.py#L1037 + let basePadding; + if (pad === 'same') { + basePadding = withSpaceToBatchBasePaddings([convInfo.filterHeight, convInfo.filterWidth], dilation); + } + else { + basePadding = [[0, 0], [0, 0]]; + } + const isDilationOne = dilation[0] === 1 && dilation[1] === 1; + const [adjustedPadding, adjustedCrops] = requiredSpaceToBatchPaddings([convInfo.inHeight, convInfo.inWidth], dilation, basePadding); + const convertedPad = isDilationOne ? pad : 'valid'; + const convertedX = isDilationOne ? x4D : spaceToBatchND$2(x4D, dilation, adjustedPadding); + const forwardOp = poolingType === 'avg' ? + () => avgPool$2(convertedX, windowShape, strides, convertedPad, dimRoundingMode) : + () => maxPool$2(convertedX, windowShape, strides, convertedPad, dimRoundingMode); + const y = forwardOp(); + const res = isDilationOne ? y : batchToSpaceND$2(y, dilation, adjustedCrops); + if (reshapedTo4D) { + return reshape$3(res, [res.shape[1], res.shape[2], res.shape[3]]); + } + return res; + } + // Helper function to compute crops and paddings for pool with dilation > 1. + // tslint:disable-next-line:max-line-length + // https://github.com/tensorflow/tensorflow/blob/50f6bb67dc98c9b74630b6047aae7a4f8a40fd02/tensorflow/python/ops/array_ops.py#L2184 + function requiredSpaceToBatchPaddings(inputShape, blockShape, basePadding) { + const padStart = basePadding.map(b => b[0]); + const origPadEnd = basePadding.map(b => b[1]); + const fullInputShape = inputShape.concat(padStart, origPadEnd); + const padEndExtra = blockShape.map((b, i) => (b - fullInputShape[i] % b) % b); + const padEnd = origPadEnd.map((s, i) => s + padEndExtra[i]); + const paddings = blockShape.map((_, i) => [padStart[i], padEnd[i]]); + const crops = blockShape.map((_, i) => [0, padEndExtra[i]]); + return [paddings, crops]; + } + // Helper function to compute base paddings for pool with dilation > 1. + // tslint:disable-next-line:max-line-length + // https://github.com/tensorflow/tensorflow/blob/50f6bb67dc98c9b74630b6047aae7a4f8a40fd02/tensorflow/python/ops/nn_ops.py#L524 + function withSpaceToBatchBasePaddings(filterShape, dilation) { + // Spatial dimensions of the filters and the upsampled filters in which we + // introduce (rate - 1) zeros between consecutive filter values. + const dilatedFilterShape = filterShape.map((s, i) => { + return s + (s - 1) * (dilation[i] - 1); + }); + const padExtraShape = dilatedFilterShape.map(s => s - 1); + // When padding is odd, we pad more at end, following the same + // convention as conv2d. + const padExtraStart = padExtraShape.map(s => Math.floor(s / 2)); + const padExtraEnd = padExtraShape.map((s, i) => s - padExtraStart[i]); + return padExtraShape.map((_, i) => { + return [padExtraStart[i], padExtraEnd[i]]; + }); + } + const pool$1 = /* @__PURE__ */ op({ pool_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes leaky rectified linear element-wise with parametric alphas. + * + * `x < 0 ? alpha * x : f(x) = x` + * + * ```js + * const x = tf.tensor1d([-1, 2, -3, 4]); + * const alpha = tf.scalar(0.1); + * + * x.prelu(alpha).print(); // or tf.prelu(x, alpha) + * ``` + * @param x The input tensor. + * @param alpha Scaling factor for negative values. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function prelu_(x, alpha) { + const $x = convertToTensor(x, 'x', 'prelu'); + const $alpha = convertToTensor(alpha, 'alpha', 'prelu'); + const inputs = { x: $x, alpha: $alpha }; + return ENGINE.runKernel(Prelu, inputs); + } + const prelu$3 = /* @__PURE__ */ op({ prelu_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the product of elements across dimensions of a `tf.Tensor`. + * + * Reduces the input along the dimensions given in `axes`. Unless `keepDims` + * is true, the rank of the `tf.Tensor` is reduced by 1 for each entry in + * `axes`. If `keepDims` is true, the reduced dimensions are retained with + * length 1. If `axes` has no entries, all dimensions are reduced, and a + * `tf.Tensor` with a single element is returned. + * + * ```js + * const x = tf.tensor1d([1, 2, 3]); + * + * x.prod().print(); // or tf.prod(x) + * ``` + * + * ```js + * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]); + * + * const axis = 1; + * x.prod(axis).print(); // or tf.prod(x, axis) + * ``` + * + * @param x The input tensor to compute the product over. If the dtype is `bool` + * it will be converted to `int32` and the output dtype will be `int32`. + * @param axis The dimension(s) to reduce. By default it reduces + * all dimensions. + * @param keepDims If true, retains reduced dimensions with size 1. + * + * @doc {heading: 'Operations', subheading: 'Reduction'} + */ + function prod_(x, axis = null, keepDims = false) { + let $x = convertToTensor(x, 'x', 'prod'); + if ($x.dtype === 'bool') { + // bool is not an allowed type for the underlying kernel. + $x = cast$3($x, 'int32'); + } + const inputs = { x: $x }; + const attrs = { axis, keepDims }; + return ENGINE.runKernel(Prod, inputs, attrs); + } + const prod$2 = /* @__PURE__ */ op({ prod_ }); + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function raggedGather_(paramsNestedSplits, paramsDenseValues, indices, outputRaggedRank) { + const $paramsNestedSplits = paramsNestedSplits.map((t, i) => convertToTensor(t, `tensors${i}`, 'raggedGather', 'int32')); + const $paramsDenseValues = convertToTensor(paramsDenseValues, 'paramsDenseValues', 'raggedGather'); + const $indices = convertToTensor(indices, 'indices', 'raggedGather', 'int32'); + const inputs = { + paramsNestedSplits: $paramsNestedSplits, + paramsDenseValues: $paramsDenseValues, + indices: $indices, + }; + const attrs = { outputRaggedRank }; + const result = ENGINE.runKernel(RaggedGather, inputs, attrs); + return { + outputNestedSplits: result.slice(0, result.length - 1), + outputDenseValues: result[result.length - 1], + }; + } + const raggedGather$2 = /* @__PURE__ */ op({ raggedGather_ }); + + /** + * @license + * Copyright 2022 Google LLC. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Returns a RaggedTensor result composed from rtDenseValues and rtNestedSplits, + * such that result[i] = [starts[i], starts[i] + deltas[i], ..., limits[i]]). + * + * @param starts: A Tensor. Must be one of the following types: + * 'float32', 'int32'. The starts of each range. + * @param limits: A Tensor. Must have the same type as starts. The limits of + * each range. + * @param deltas: A Tensor. Must have the same type as starts. The deltas of + * each range. + * @return A map with the following properties: + * - rtNestedSplits: A Tensor of type 'int32'. + * - rtDenseValues: A Tensor. Has the same type as starts. + */ + function raggedRange_(starts, limits, deltas) { + const $starts = convertToTensor(starts, 'starts', 'raggedRange'); + const $limits = convertToTensor(limits, 'limits', 'raggedRange', $starts.dtype); + const $deltas = convertToTensor(deltas, 'deltas', 'raggedRange', $starts.dtype); + const inputs = { + starts: $starts, + limits: $limits, + deltas: $deltas, + }; + const result = ENGINE.runKernel(RaggedRange, inputs); + return { + rtNestedSplits: result[0], + rtDenseValues: result[1], + }; + } + const raggedRange$2 = /* @__PURE__ */ op({ raggedRange_ }); + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Create a dense tensor from a ragged tensor, possibly altering its shape. + * + * The raggedTensorToTensor op creates a dense tensor from am array of row + * partition tensors, a value vector, and default values. If the shape is + * unspecified, the minimal shape required to contain all the elements in the + * ragged tensor (the natural shape) will be used. If some dimensions are left + * unspecified, then the size of the natural shape is used in that dimension. + * + * The defaultValue will be broadcast to the output shape. After that, the + * values from the ragged tensor overwrite the default values. Note that the + * defaultValue must have less dimensions than the value. + * + * The row partition tensors are in the order of the dimensions. At present, the + * types can be: "ROW_SPLITS": the row_splits tensor from the ragged tensor. + * "VALUE_ROWIDS": the value_rowids tensor from the ragged tensor. + * "FIRST_DIM_SIZE": if value_rowids is used for the first dimension, then it + * is preceded by "FIRST_DIM_SIZE". + * ``` + * @param shape: A Tensor. Must be one of the following types: 'int32'. The + * desired shape of the output tensor. If left unspecified (empty), the + * minimal shape required to contain all the elements in the ragged tensor + * (the natural shape) will be used. If some dimensions are left + * unspecified, then the size of the natural shape is used in that + * dimension. + * + * Note that dense dimensions cannot be modified by the shape argument. + * Trying to change the size of a dense dimension will cause the op to fail. + * Examples: natural shape: [4, 5, 6] shape: -1 output shape: [4, 5, 6] + * + * natural shape: [4, 5, 6] shape: [3, -1, 2] output shape: [3, 5, 2] + * + * natural shape: [4, 5, 6] shape: [3, 7, 2] output shape: [3, 7, 2] + * @param values: A Tensor. A 1D tensor representing the values of the ragged + * tensor. + * @param defaultValue: A Tensor. Must have the same type as values. The + * defaultValue when the shape is larger than the ragged tensor. The + * defaultValue is broadcast until it is the shape of the output tensor, + * and then overwritten by values in the ragged tensor. The default value + * must be compatible with this broadcast operation, and must have fewer + * dimensions than the value tensor. + * @param rowPartitionTensors: A list of at least 1 Tensor objects with the same + * type in: 'int32'. + * @param rowPartitionTypes: A list of strings. The types of the row partition + * tensors. At present, these can be: + * "ROW_SPLITS": the row_splits tensor from the ragged tensor. + * "VALUE_ROWIDS": the value_rowids tensor from the ragged tensor. + * "FIRST_DIM_SIZE": if value_rowids is used for the first dimension, then + * it is preceded by "FIRST_DIM_SIZE". The tensors are in the order of + * the dimensions. + * @return A Tensor. Has the same type as values. + * @doc {heading: 'Operations', subheading: 'Ragged'} + */ + function raggedTensorToTensor_(shape, values, defaultValue, rowPartitionTensors, rowPartitionTypes) { + const $shape = convertToTensor(shape, 'shape', 'raggedTensorToTensor', 'int32'); + const $values = convertToTensor(values, 'values', 'raggedTensorToTensor'); + const $defaultValue = convertToTensor(defaultValue, 'defaultValue', 'raggedTensorToTensor', $values.dtype); + const $rowPartitionTensors = rowPartitionTensors.map((t, i) => convertToTensor(t, `tensors${i}`, 'raggedTensorToTensor', 'int32')); + const inputs = { + shape: $shape, + values: $values, + defaultValue: $defaultValue, + rowPartitionTensors: $rowPartitionTensors + }; + const attrs = { rowPartitionTypes }; + return ENGINE.runKernel(RaggedTensorToTensor, inputs, attrs); + } + const raggedTensorToTensor$2 = /* @__PURE__ */ op({ raggedTensorToTensor_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Creates a `tf.Tensor` with values sampled from a random number generator + * function defined by the user. + * + * @param shape An array of integers defining the output tensor shape. + * @param randFunction A random number generator function which is called + * for each element in the output tensor. + * @param dtype The data type of the output tensor. Defaults to 'float32'. + * + * @doc {heading: 'Tensors', subheading: 'Random'} + */ + function rand_(shape, randFunction, dtype) { + assertNonNegativeIntegerDimensions(shape); + const size = sizeFromShape(shape); + let values = null; + if (dtype == null || dtype === 'float32') { + values = new Float32Array(size); + } + else if (dtype === 'int32') { + values = new Int32Array(size); + } + else if (dtype === 'bool') { + values = new Uint8Array(size); + } + else { + throw new Error(`Unknown data type ${dtype}`); + } + for (let i = 0; i < size; i++) { + values[i] = randFunction(); + } + return ENGINE.makeTensor(values, shape, dtype); + } + const rand = /* @__PURE__ */ op({ rand_ }); + + var alea$3 = {exports: {}}; + + var alea$1 = alea$3.exports; + + (function (module) { + // A port of an algorithm by Johannes Baagøe , 2010 + // http://baagoe.com/en/RandomMusings/javascript/ + // https://github.com/nquinlan/better-random-numbers-for-javascript-mirror + // Original work is under MIT license - + + // Copyright (C) 2010 by Johannes Baagøe + // + // Permission is hereby granted, free of charge, to any person obtaining a copy + // of this software and associated documentation files (the "Software"), to deal + // in the Software without restriction, including without limitation the rights + // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + // copies of the Software, and to permit persons to whom the Software is + // furnished to do so, subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in + // all copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + // THE SOFTWARE. + + + + (function(global, module, define) { + + function Alea(seed) { + var me = this, mash = Mash(); + + me.next = function() { + var t = 2091639 * me.s0 + me.c * 2.3283064365386963e-10; // 2^-32 + me.s0 = me.s1; + me.s1 = me.s2; + return me.s2 = t - (me.c = t | 0); + }; + + // Apply the seeding algorithm from Baagoe. + me.c = 1; + me.s0 = mash(' '); + me.s1 = mash(' '); + me.s2 = mash(' '); + me.s0 -= mash(seed); + if (me.s0 < 0) { me.s0 += 1; } + me.s1 -= mash(seed); + if (me.s1 < 0) { me.s1 += 1; } + me.s2 -= mash(seed); + if (me.s2 < 0) { me.s2 += 1; } + mash = null; + } + + function copy(f, t) { + t.c = f.c; + t.s0 = f.s0; + t.s1 = f.s1; + t.s2 = f.s2; + return t; + } + + function impl(seed, opts) { + var xg = new Alea(seed), + state = opts && opts.state, + prng = xg.next; + prng.int32 = function() { return (xg.next() * 0x100000000) | 0; }; + prng.double = function() { + return prng() + (prng() * 0x200000 | 0) * 1.1102230246251565e-16; // 2^-53 + }; + prng.quick = prng; + if (state) { + if (typeof(state) == 'object') copy(state, xg); + prng.state = function() { return copy(xg, {}); }; + } + return prng; + } + + function Mash() { + var n = 0xefc8249d; + + var mash = function(data) { + data = String(data); + for (var i = 0; i < data.length; i++) { + n += data.charCodeAt(i); + var h = 0.02519603282416938 * n; + n = h >>> 0; + h -= n; + h *= n; + n = h >>> 0; + h -= n; + n += h * 0x100000000; // 2^32 + } + return (n >>> 0) * 2.3283064365386963e-10; // 2^-32 + }; + + return mash; + } + + + if (module && module.exports) { + module.exports = impl; + } else if (define && define.amd) { + define(function() { return impl; }); + } else { + this.alea = impl; + } + + })( + commonjsGlobal, + ('object') == 'object' && module, // present in node.js + (typeof undefined) == 'function' && undefined // present with an AMD loader + ); + } (alea$3)); + + var aleaExports = alea$3.exports; + var alea$2 = /*@__PURE__*/getDefaultExportFromCjs(aleaExports); + + var xor128$3 = {exports: {}}; + + var xor128$1 = xor128$3.exports; + + (function (module) { + // A Javascript implementaion of the "xor128" prng algorithm by + // George Marsaglia. See http://www.jstatsoft.org/v08/i14/paper + + (function(global, module, define) { + + function XorGen(seed) { + var me = this, strseed = ''; + + me.x = 0; + me.y = 0; + me.z = 0; + me.w = 0; + + // Set up generator function. + me.next = function() { + var t = me.x ^ (me.x << 11); + me.x = me.y; + me.y = me.z; + me.z = me.w; + return me.w ^= (me.w >>> 19) ^ t ^ (t >>> 8); + }; + + if (seed === (seed | 0)) { + // Integer seed. + me.x = seed; + } else { + // String seed. + strseed += seed; + } + + // Mix in string seed, then discard an initial batch of 64 values. + for (var k = 0; k < strseed.length + 64; k++) { + me.x ^= strseed.charCodeAt(k) | 0; + me.next(); + } + } + + function copy(f, t) { + t.x = f.x; + t.y = f.y; + t.z = f.z; + t.w = f.w; + return t; + } + + function impl(seed, opts) { + var xg = new XorGen(seed), + state = opts && opts.state, + prng = function() { return (xg.next() >>> 0) / 0x100000000; }; + prng.double = function() { + do { + var top = xg.next() >>> 11, + bot = (xg.next() >>> 0) / 0x100000000, + result = (top + bot) / (1 << 21); + } while (result === 0); + return result; + }; + prng.int32 = xg.next; + prng.quick = prng; + if (state) { + if (typeof(state) == 'object') copy(state, xg); + prng.state = function() { return copy(xg, {}); }; + } + return prng; + } + + if (module && module.exports) { + module.exports = impl; + } else if (define && define.amd) { + define(function() { return impl; }); + } else { + this.xor128 = impl; + } + + })( + commonjsGlobal, + ('object') == 'object' && module, // present in node.js + (typeof undefined) == 'function' && undefined // present with an AMD loader + ); + } (xor128$3)); + + var xor128Exports = xor128$3.exports; + var xor128$2 = /*@__PURE__*/getDefaultExportFromCjs(xor128Exports); + + var xorwow$3 = {exports: {}}; + + var xorwow$1 = xorwow$3.exports; + + (function (module) { + // A Javascript implementaion of the "xorwow" prng algorithm by + // George Marsaglia. See http://www.jstatsoft.org/v08/i14/paper + + (function(global, module, define) { + + function XorGen(seed) { + var me = this, strseed = ''; + + // Set up generator function. + me.next = function() { + var t = (me.x ^ (me.x >>> 2)); + me.x = me.y; me.y = me.z; me.z = me.w; me.w = me.v; + return (me.d = (me.d + 362437 | 0)) + + (me.v = (me.v ^ (me.v << 4)) ^ (t ^ (t << 1))) | 0; + }; + + me.x = 0; + me.y = 0; + me.z = 0; + me.w = 0; + me.v = 0; + + if (seed === (seed | 0)) { + // Integer seed. + me.x = seed; + } else { + // String seed. + strseed += seed; + } + + // Mix in string seed, then discard an initial batch of 64 values. + for (var k = 0; k < strseed.length + 64; k++) { + me.x ^= strseed.charCodeAt(k) | 0; + if (k == strseed.length) { + me.d = me.x << 10 ^ me.x >>> 4; + } + me.next(); + } + } + + function copy(f, t) { + t.x = f.x; + t.y = f.y; + t.z = f.z; + t.w = f.w; + t.v = f.v; + t.d = f.d; + return t; + } + + function impl(seed, opts) { + var xg = new XorGen(seed), + state = opts && opts.state, + prng = function() { return (xg.next() >>> 0) / 0x100000000; }; + prng.double = function() { + do { + var top = xg.next() >>> 11, + bot = (xg.next() >>> 0) / 0x100000000, + result = (top + bot) / (1 << 21); + } while (result === 0); + return result; + }; + prng.int32 = xg.next; + prng.quick = prng; + if (state) { + if (typeof(state) == 'object') copy(state, xg); + prng.state = function() { return copy(xg, {}); }; + } + return prng; + } + + if (module && module.exports) { + module.exports = impl; + } else if (define && define.amd) { + define(function() { return impl; }); + } else { + this.xorwow = impl; + } + + })( + commonjsGlobal, + ('object') == 'object' && module, // present in node.js + (typeof undefined) == 'function' && undefined // present with an AMD loader + ); + } (xorwow$3)); + + var xorwowExports = xorwow$3.exports; + var xorwow$2 = /*@__PURE__*/getDefaultExportFromCjs(xorwowExports); + + var xorshift7$3 = {exports: {}}; + + var xorshift7$1 = xorshift7$3.exports; + + (function (module) { + // A Javascript implementaion of the "xorshift7" algorithm by + // François Panneton and Pierre L'ecuyer: + // "On the Xorgshift Random Number Generators" + // http://saluc.engr.uconn.edu/refs/crypto/rng/panneton05onthexorshift.pdf + + (function(global, module, define) { + + function XorGen(seed) { + var me = this; + + // Set up generator function. + me.next = function() { + // Update xor generator. + var X = me.x, i = me.i, t, v, w; + t = X[i]; t ^= (t >>> 7); v = t ^ (t << 24); + t = X[(i + 1) & 7]; v ^= t ^ (t >>> 10); + t = X[(i + 3) & 7]; v ^= t ^ (t >>> 3); + t = X[(i + 4) & 7]; v ^= t ^ (t << 7); + t = X[(i + 7) & 7]; t = t ^ (t << 13); v ^= t ^ (t << 9); + X[i] = v; + me.i = (i + 1) & 7; + return v; + }; + + function init(me, seed) { + var j, w, X = []; + + if (seed === (seed | 0)) { + // Seed state array using a 32-bit integer. + w = X[0] = seed; + } else { + // Seed state using a string. + seed = '' + seed; + for (j = 0; j < seed.length; ++j) { + X[j & 7] = (X[j & 7] << 15) ^ + (seed.charCodeAt(j) + X[(j + 1) & 7] << 13); + } + } + // Enforce an array length of 8, not all zeroes. + while (X.length < 8) X.push(0); + for (j = 0; j < 8 && X[j] === 0; ++j); + if (j == 8) w = X[7] = -1; else w = X[j]; + + me.x = X; + me.i = 0; + + // Discard an initial 256 values. + for (j = 256; j > 0; --j) { + me.next(); + } + } + + init(me, seed); + } + + function copy(f, t) { + t.x = f.x.slice(); + t.i = f.i; + return t; + } + + function impl(seed, opts) { + if (seed == null) seed = +(new Date); + var xg = new XorGen(seed), + state = opts && opts.state, + prng = function() { return (xg.next() >>> 0) / 0x100000000; }; + prng.double = function() { + do { + var top = xg.next() >>> 11, + bot = (xg.next() >>> 0) / 0x100000000, + result = (top + bot) / (1 << 21); + } while (result === 0); + return result; + }; + prng.int32 = xg.next; + prng.quick = prng; + if (state) { + if (state.x) copy(state, xg); + prng.state = function() { return copy(xg, {}); }; + } + return prng; + } + + if (module && module.exports) { + module.exports = impl; + } else if (define && define.amd) { + define(function() { return impl; }); + } else { + this.xorshift7 = impl; + } + + })( + commonjsGlobal, + ('object') == 'object' && module, // present in node.js + (typeof undefined) == 'function' && undefined // present with an AMD loader + ); + } (xorshift7$3)); + + var xorshift7Exports = xorshift7$3.exports; + var xorshift7$2 = /*@__PURE__*/getDefaultExportFromCjs(xorshift7Exports); + + var xor4096$3 = {exports: {}}; + + var xor4096$1 = xor4096$3.exports; + + (function (module) { + // A Javascript implementaion of Richard Brent's Xorgens xor4096 algorithm. + // + // This fast non-cryptographic random number generator is designed for + // use in Monte-Carlo algorithms. It combines a long-period xorshift + // generator with a Weyl generator, and it passes all common batteries + // of stasticial tests for randomness while consuming only a few nanoseconds + // for each prng generated. For background on the generator, see Brent's + // paper: "Some long-period random number generators using shifts and xors." + // http://arxiv.org/pdf/1004.3115v1.pdf + // + // Usage: + // + // var xor4096 = require('xor4096'); + // random = xor4096(1); // Seed with int32 or string. + // assert.equal(random(), 0.1520436450538547); // (0, 1) range, 53 bits. + // assert.equal(random.int32(), 1806534897); // signed int32, 32 bits. + // + // For nonzero numeric keys, this impelementation provides a sequence + // identical to that by Brent's xorgens 3 implementaion in C. This + // implementation also provides for initalizing the generator with + // string seeds, or for saving and restoring the state of the generator. + // + // On Chrome, this prng benchmarks about 2.1 times slower than + // Javascript's built-in Math.random(). + + (function(global, module, define) { + + function XorGen(seed) { + var me = this; + + // Set up generator function. + me.next = function() { + var w = me.w, + X = me.X, i = me.i, t, v; + // Update Weyl generator. + me.w = w = (w + 0x61c88647) | 0; + // Update xor generator. + v = X[(i + 34) & 127]; + t = X[i = ((i + 1) & 127)]; + v ^= v << 13; + t ^= t << 17; + v ^= v >>> 15; + t ^= t >>> 12; + // Update Xor generator array state. + v = X[i] = v ^ t; + me.i = i; + // Result is the combination. + return (v + (w ^ (w >>> 16))) | 0; + }; + + function init(me, seed) { + var t, v, i, j, w, X = [], limit = 128; + if (seed === (seed | 0)) { + // Numeric seeds initialize v, which is used to generates X. + v = seed; + seed = null; + } else { + // String seeds are mixed into v and X one character at a time. + seed = seed + '\0'; + v = 0; + limit = Math.max(limit, seed.length); + } + // Initialize circular array and weyl value. + for (i = 0, j = -32; j < limit; ++j) { + // Put the unicode characters into the array, and shuffle them. + if (seed) v ^= seed.charCodeAt((j + 32) % seed.length); + // After 32 shuffles, take v as the starting w value. + if (j === 0) w = v; + v ^= v << 10; + v ^= v >>> 15; + v ^= v << 4; + v ^= v >>> 13; + if (j >= 0) { + w = (w + 0x61c88647) | 0; // Weyl. + t = (X[j & 127] ^= (v + w)); // Combine xor and weyl to init array. + i = (0 == t) ? i + 1 : 0; // Count zeroes. + } + } + // We have detected all zeroes; make the key nonzero. + if (i >= 128) { + X[(seed && seed.length || 0) & 127] = -1; + } + // Run the generator 512 times to further mix the state before using it. + // Factoring this as a function slows the main generator, so it is just + // unrolled here. The weyl generator is not advanced while warming up. + i = 127; + for (j = 4 * 128; j > 0; --j) { + v = X[(i + 34) & 127]; + t = X[i = ((i + 1) & 127)]; + v ^= v << 13; + t ^= t << 17; + v ^= v >>> 15; + t ^= t >>> 12; + X[i] = v ^ t; + } + // Storing state as object members is faster than using closure variables. + me.w = w; + me.X = X; + me.i = i; + } + + init(me, seed); + } + + function copy(f, t) { + t.i = f.i; + t.w = f.w; + t.X = f.X.slice(); + return t; + }; + + function impl(seed, opts) { + if (seed == null) seed = +(new Date); + var xg = new XorGen(seed), + state = opts && opts.state, + prng = function() { return (xg.next() >>> 0) / 0x100000000; }; + prng.double = function() { + do { + var top = xg.next() >>> 11, + bot = (xg.next() >>> 0) / 0x100000000, + result = (top + bot) / (1 << 21); + } while (result === 0); + return result; + }; + prng.int32 = xg.next; + prng.quick = prng; + if (state) { + if (state.X) copy(state, xg); + prng.state = function() { return copy(xg, {}); }; + } + return prng; + } + + if (module && module.exports) { + module.exports = impl; + } else if (define && define.amd) { + define(function() { return impl; }); + } else { + this.xor4096 = impl; + } + + })( + commonjsGlobal, // window object or global + ('object') == 'object' && module, // present in node.js + (typeof undefined) == 'function' && undefined // present with an AMD loader + ); + } (xor4096$3)); + + var xor4096Exports = xor4096$3.exports; + var xor4096$2 = /*@__PURE__*/getDefaultExportFromCjs(xor4096Exports); + + var tychei$3 = {exports: {}}; + + var tychei$1 = tychei$3.exports; + + (function (module) { + // A Javascript implementaion of the "Tyche-i" prng algorithm by + // Samuel Neves and Filipe Araujo. + // See https://eden.dei.uc.pt/~sneves/pubs/2011-snfa2.pdf + + (function(global, module, define) { + + function XorGen(seed) { + var me = this, strseed = ''; + + // Set up generator function. + me.next = function() { + var b = me.b, c = me.c, d = me.d, a = me.a; + b = (b << 25) ^ (b >>> 7) ^ c; + c = (c - d) | 0; + d = (d << 24) ^ (d >>> 8) ^ a; + a = (a - b) | 0; + me.b = b = (b << 20) ^ (b >>> 12) ^ c; + me.c = c = (c - d) | 0; + me.d = (d << 16) ^ (c >>> 16) ^ a; + return me.a = (a - b) | 0; + }; + + /* The following is non-inverted tyche, which has better internal + * bit diffusion, but which is about 25% slower than tyche-i in JS. + me.next = function() { + var a = me.a, b = me.b, c = me.c, d = me.d; + a = (me.a + me.b | 0) >>> 0; + d = me.d ^ a; d = d << 16 ^ d >>> 16; + c = me.c + d | 0; + b = me.b ^ c; b = b << 12 ^ d >>> 20; + me.a = a = a + b | 0; + d = d ^ a; me.d = d = d << 8 ^ d >>> 24; + me.c = c = c + d | 0; + b = b ^ c; + return me.b = (b << 7 ^ b >>> 25); + } + */ + + me.a = 0; + me.b = 0; + me.c = 2654435769 | 0; + me.d = 1367130551; + + if (seed === Math.floor(seed)) { + // Integer seed. + me.a = (seed / 0x100000000) | 0; + me.b = seed | 0; + } else { + // String seed. + strseed += seed; + } + + // Mix in string seed, then discard an initial batch of 64 values. + for (var k = 0; k < strseed.length + 20; k++) { + me.b ^= strseed.charCodeAt(k) | 0; + me.next(); + } + } + + function copy(f, t) { + t.a = f.a; + t.b = f.b; + t.c = f.c; + t.d = f.d; + return t; + }; + + function impl(seed, opts) { + var xg = new XorGen(seed), + state = opts && opts.state, + prng = function() { return (xg.next() >>> 0) / 0x100000000; }; + prng.double = function() { + do { + var top = xg.next() >>> 11, + bot = (xg.next() >>> 0) / 0x100000000, + result = (top + bot) / (1 << 21); + } while (result === 0); + return result; + }; + prng.int32 = xg.next; + prng.quick = prng; + if (state) { + if (typeof(state) == 'object') copy(state, xg); + prng.state = function() { return copy(xg, {}); }; + } + return prng; + } + + if (module && module.exports) { + module.exports = impl; + } else if (define && define.amd) { + define(function() { return impl; }); + } else { + this.tychei = impl; + } + + })( + commonjsGlobal, + ('object') == 'object' && module, // present in node.js + (typeof undefined) == 'function' && undefined // present with an AMD loader + ); + } (tychei$3)); + + var tycheiExports = tychei$3.exports; + var tychei$2 = /*@__PURE__*/getDefaultExportFromCjs(tycheiExports); + + var seedrandom$3 = {exports: {}}; + + /* + Copyright 2019 David Bau. + + Permission is hereby granted, free of charge, to any person obtaining + a copy of this software and associated documentation files (the + "Software"), to deal in the Software without restriction, including + without limitation the rights to use, copy, modify, merge, publish, + distribute, sublicense, and/or sell copies of the Software, and to + permit persons to whom the Software is furnished to do so, subject to + the following conditions: + + The above copyright notice and this permission notice shall be + included in all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY + CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, + TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE + SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + */ + var seedrandom$1 = seedrandom$3.exports; + + (function (module) { + (function (global, pool, math) { + // + // The following constants are related to IEEE 754 limits. + // + + var width = 256, // each RC4 output is 0 <= x < 256 + chunks = 6, // at least six RC4 outputs for each double + digits = 52, // there are 52 significant digits in a double + rngname = 'random', // rngname: name for Math.random and Math.seedrandom + startdenom = math.pow(width, chunks), + significance = math.pow(2, digits), + overflow = significance * 2, + mask = width - 1, + nodecrypto; // node.js crypto module, initialized at the bottom. + + // + // seedrandom() + // This is the seedrandom function described above. + // + function seedrandom(seed, options, callback) { + var key = []; + options = (options == true) ? { entropy: true } : (options || {}); + + // Flatten the seed string or build one from local entropy if needed. + var shortseed = mixkey(flatten( + options.entropy ? [seed, tostring(pool)] : + (seed == null) ? autoseed() : seed, 3), key); + + // Use the seed to initialize an ARC4 generator. + var arc4 = new ARC4(key); + + // This function returns a random double in [0, 1) that contains + // randomness in every bit of the mantissa of the IEEE 754 value. + var prng = function() { + var n = arc4.g(chunks), // Start with a numerator n < 2 ^ 48 + d = startdenom, // and denominator d = 2 ^ 48. + x = 0; // and no 'extra last byte'. + while (n < significance) { // Fill up all significant digits by + n = (n + x) * width; // shifting numerator and + d *= width; // denominator and generating a + x = arc4.g(1); // new least-significant-byte. + } + while (n >= overflow) { // To avoid rounding up, before adding + n /= 2; // last byte, shift everything + d /= 2; // right using integer math until + x >>>= 1; // we have exactly the desired bits. + } + return (n + x) / d; // Form the number within [0, 1). + }; + + prng.int32 = function() { return arc4.g(4) | 0; }; + prng.quick = function() { return arc4.g(4) / 0x100000000; }; + prng.double = prng; + + // Mix the randomness into accumulated entropy. + mixkey(tostring(arc4.S), pool); + + // Calling convention: what to return as a function of prng, seed, is_math. + return (options.pass || callback || + function(prng, seed, is_math_call, state) { + if (state) { + // Load the arc4 state from the given state if it has an S array. + if (state.S) { copy(state, arc4); } + // Only provide the .state method if requested via options.state. + prng.state = function() { return copy(arc4, {}); }; + } + + // If called as a method of Math (Math.seedrandom()), mutate + // Math.random because that is how seedrandom.js has worked since v1.0. + if (is_math_call) { math[rngname] = prng; return seed; } + + // Otherwise, it is a newer calling convention, so return the + // prng directly. + else return prng; + })( + prng, + shortseed, + 'global' in options ? options.global : (this == math), + options.state); + } + + // + // ARC4 + // + // An ARC4 implementation. The constructor takes a key in the form of + // an array of at most (width) integers that should be 0 <= x < (width). + // + // The g(count) method returns a pseudorandom integer that concatenates + // the next (count) outputs from ARC4. Its return value is a number x + // that is in the range 0 <= x < (width ^ count). + // + function ARC4(key) { + var t, keylen = key.length, + me = this, i = 0, j = me.i = me.j = 0, s = me.S = []; + + // The empty key [] is treated as [0]. + if (!keylen) { key = [keylen++]; } + + // Set up S using the standard key scheduling algorithm. + while (i < width) { + s[i] = i++; + } + for (i = 0; i < width; i++) { + s[i] = s[j = mask & (j + key[i % keylen] + (t = s[i]))]; + s[j] = t; + } + + // The "g" method returns the next (count) outputs as one number. + (me.g = function(count) { + // Using instance members instead of closure state nearly doubles speed. + var t, r = 0, + i = me.i, j = me.j, s = me.S; + while (count--) { + t = s[i = mask & (i + 1)]; + r = r * width + s[mask & ((s[i] = s[j = mask & (j + t)]) + (s[j] = t))]; + } + me.i = i; me.j = j; + return r; + // For robust unpredictability, the function call below automatically + // discards an initial batch of values. This is called RC4-drop[256]. + // See http://google.com/search?q=rsa+fluhrer+response&btnI + })(width); + } + + // + // copy() + // Copies internal state of ARC4 to or from a plain object. + // + function copy(f, t) { + t.i = f.i; + t.j = f.j; + t.S = f.S.slice(); + return t; + }; + + // + // flatten() + // Converts an object tree to nested arrays of strings. + // + function flatten(obj, depth) { + var result = [], typ = (typeof obj), prop; + if (depth && typ == 'object') { + for (prop in obj) { + try { result.push(flatten(obj[prop], depth - 1)); } catch (e) {} + } + } + return (result.length ? result : typ == 'string' ? obj : obj + '\0'); + } + + // + // mixkey() + // Mixes a string seed into a key that is an array of integers, and + // returns a shortened string seed that is equivalent to the result key. + // + function mixkey(seed, key) { + var stringseed = seed + '', smear, j = 0; + while (j < stringseed.length) { + key[mask & j] = + mask & ((smear ^= key[mask & j] * 19) + stringseed.charCodeAt(j++)); + } + return tostring(key); + } + + // + // autoseed() + // Returns an object for autoseeding, using window.crypto and Node crypto + // module if available. + // + function autoseed() { + try { + var out; + if (nodecrypto && (out = nodecrypto.randomBytes)) { + // The use of 'out' to remember randomBytes makes tight minified code. + out = out(width); + } else { + out = new Uint8Array(width); + (global.crypto || global.msCrypto).getRandomValues(out); + } + return tostring(out); + } catch (e) { + var browser = global.navigator, + plugins = browser && browser.plugins; + return [+new Date, global, plugins, global.screen, tostring(pool)]; + } + } + + // + // tostring() + // Converts an array of charcodes to a string + // + function tostring(a) { + return String.fromCharCode.apply(0, a); + } + + // + // When seedrandom.js is loaded, we immediately mix a few bits + // from the built-in RNG into the entropy pool. Because we do + // not want to interfere with deterministic PRNG state later, + // seedrandom will not call math.random on its own again after + // initialization. + // + mixkey(math.random(), pool); + + // + // Nodejs and AMD support: export the implementation as a module using + // either convention. + // + if (('object') == 'object' && module.exports) { + module.exports = seedrandom; + // When in node.js, try using crypto package for autoseeding. + try { + nodecrypto = require('crypto'); + } catch (ex) {} + } else if ((typeof undefined) == 'function' && undefined.amd) { + undefined(function() { return seedrandom; }); + } else { + // When included as a plain script, set up Math.seedrandom global. + math['seed' + rngname] = seedrandom; + } + + + // End anonymous scope, and pass initial values. + })( + // global: `self` in browsers (including strict mode and web workers), + // otherwise `this` in Node and other environments + (typeof self !== 'undefined') ? self : commonjsGlobal, + [], // pool: entropy pool starts empty + Math // math: package containing random, pow, and seedrandom + ); + } (seedrandom$3)); + + var seedrandomExports = seedrandom$3.exports; + var seedrandom$2 = /*@__PURE__*/getDefaultExportFromCjs(seedrandomExports); + + // A library of seedable RNGs implemented in Javascript. + // + // Usage: + // + // var seedrandom = require('seedrandom'); + // var random = seedrandom(1); // or any seed. + // var x = random(); // 0 <= x < 1. Every bit is random. + // var x = random.quick(); // 0 <= x < 1. 32 bits of randomness. + + // alea, a 53-bit multiply-with-carry generator by Johannes Baagøe. + // Period: ~2^116 + // Reported to pass all BigCrush tests. + var alea = aleaExports; + + // xor128, a pure xor-shift generator by George Marsaglia. + // Period: 2^128-1. + // Reported to fail: MatrixRank and LinearComp. + var xor128 = xor128Exports; + + // xorwow, George Marsaglia's 160-bit xor-shift combined plus weyl. + // Period: 2^192-2^32 + // Reported to fail: CollisionOver, SimpPoker, and LinearComp. + var xorwow = xorwowExports; + + // xorshift7, by François Panneton and Pierre L'ecuyer, takes + // a different approach: it adds robustness by allowing more shifts + // than Marsaglia's original three. It is a 7-shift generator + // with 256 bits, that passes BigCrush with no systmatic failures. + // Period 2^256-1. + // No systematic BigCrush failures reported. + var xorshift7 = xorshift7Exports; + + // xor4096, by Richard Brent, is a 4096-bit xor-shift with a + // very long period that also adds a Weyl generator. It also passes + // BigCrush with no systematic failures. Its long period may + // be useful if you have many generators and need to avoid + // collisions. + // Period: 2^4128-2^32. + // No systematic BigCrush failures reported. + var xor4096 = xor4096Exports; + + // Tyche-i, by Samuel Neves and Filipe Araujo, is a bit-shifting random + // number generator derived from ChaCha, a modern stream cipher. + // https://eden.dei.uc.pt/~sneves/pubs/2011-snfa2.pdf + // Period: ~2^127 + // No systematic BigCrush failures reported. + var tychei = tycheiExports; + + // The original ARC4-based prng included in this library. + // Period: ~2^1600 + var sr = seedrandomExports; + + sr.alea = alea; + sr.xor128 = xor128; + sr.xorwow = xorwow; + sr.xorshift7 = xorshift7; + sr.xor4096 = xor4096; + sr.tychei = tychei; + + var seedrandom = sr; + + var index$1 = /*@__PURE__*/getDefaultExportFromCjs(seedrandom); + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const TEST_EPSILON_FLOAT32 = 1e-3; + const TEST_EPSILON_FLOAT16 = 1e-1; + function expectArraysClose(actual, expected, epsilon) { + if (epsilon == null) { + epsilon = testEpsilon(); + } + return expectArraysPredicate(actual, expected, (a, b) => areClose(a, b, epsilon)); + } + function testEpsilon() { + return ENGINE.backend.floatPrecision() === 32 ? TEST_EPSILON_FLOAT32 : + TEST_EPSILON_FLOAT16; + } + function expectArraysPredicate(actual, expected, predicate) { + let checkClassType = true; + if (isTypedArray(actual) || isTypedArray(expected)) { + checkClassType = false; + } + if (isTypedArray(actual) && isTypedArray(expected)) { + checkClassType = true; + } + if (checkClassType) { + const aType = actual.constructor.name; + const bType = expected.constructor.name; + if (aType !== bType) { + throw new Error(`Arrays are of different type. Actual: ${aType}. ` + + `Expected: ${bType}`); + } + } + if (Array.isArray(actual) && Array.isArray(expected)) { + const actualShape = inferShape(actual); + const expectedShape = inferShape(expected); + if (!arraysEqual(actualShape, expectedShape)) { + throw new Error(`Arrays have different shapes. ` + + `Actual: [${actualShape}]. Expected: [${expectedShape}]`); + } + } + const actualFlat = isTypedArray(actual) ? actual : flatten$2(actual); + const expectedFlat = isTypedArray(expected) ? + expected : + flatten$2(expected); + if (actualFlat.length !== expectedFlat.length) { + throw new Error(`Arrays have different lengths actual: ${actualFlat.length} vs ` + + `expected: ${expectedFlat.length}.\n` + + `Actual: ${actualFlat}.\n` + + `Expected: ${expectedFlat}.`); + } + for (let i = 0; i < expectedFlat.length; ++i) { + const a = actualFlat[i]; + const e = expectedFlat[i]; + if (!predicate(a, e)) { + throw new Error(`Arrays differ: actual[${i}] = ${a}, expected[${i}] = ${e}.\n` + + `Actual: ${actualFlat}.\n` + + `Expected: ${expectedFlat}.`); + } + } + if (typeof expect !== 'undefined') { + expect().nothing(); + } + } + function expectPromiseToFail(fn, done) { + fn().then(() => done.fail(), () => done()); + if (typeof expect !== 'undefined') { + expect().nothing(); + } + } + function expectArraysEqual(actual, expected) { + const exp = typeof expected === 'string' || typeof expected === 'number' || + typeof expected === 'boolean' ? + [expected] : + expected; + if (isString(actual) || isString(actual[0]) || + isString(expected) || isString(expected[0])) { + // tslint:disable-next-line: triple-equals + return expectArraysPredicate(actual, exp, (a, b) => a == b); + } + return expectArraysPredicate(actual, expected, (a, b) => areClose(a, b, 0)); + } + function expectNumbersClose(a, e, epsilon) { + if (epsilon == null) { + epsilon = testEpsilon(); + } + if (!areClose(a, e, epsilon)) { + throw new Error(`Numbers differ: actual === ${a}, expected === ${e}`); + } + if (typeof expect !== 'undefined') { + expect().nothing(); + } + } + function areClose(a, e, epsilon) { + if (!isFinite(a) && !isFinite(e)) { + return true; + } + if (isNaN(a) || isNaN(e) || Math.abs(a - e) > epsilon) { + return false; + } + return true; + } + function expectValuesInRange(actual, low, high) { + for (let i = 0; i < actual.length; i++) { + if (actual[i] < low || actual[i] > high) { + throw new Error(`Value out of range:${actual[i]} low: ${low}, high: ${high}`); + } + } + } + function expectArrayBuffersEqual(actual, expected) { + // Safari does not like comparing ArrayBuffers directly. Wrapping in + // a Float32Array solves this issue. + const actualArray = new Float32Array(actual); + const expectedArray = new Float32Array(expected); + if (actualArray.length !== expectedArray.length) { + throw new Error('Expected ArrayBuffer to be of length ' + + `${expectedArray.length}, but it was ${actualArray.length}`); + } + for (let i = 0; i < expectedArray.length; i++) { + if (actualArray[i] !== expectedArray[i]) { + throw new Error(`Expected ArrayBuffer value at ${i} to be ` + + `${expectedArray[i]} but got ${actualArray[i]} instead`); + } + } + } + /** Encodes strings into utf-8 bytes. */ + function encodeStrings(a) { + for (let i = 0; i < a.length; i++) { + const val = a[i]; + if (Array.isArray(val)) { + encodeStrings(val); + } + else { + a[i] = encodeString(val); + } + } + return a; + } + /** Creates an HTMLVideoElement with autoplay-friendly default settings. */ + function createVideoElement(source) { + const video = document.createElement('video'); + if ('playsInline' in video) { + // tslint:disable-next-line:no-any + video.playsInline = true; + } + video.muted = true; + video.loop = true; + video.style.position = 'fixed'; + video.style.left = '0px'; + video.style.top = '0px'; + video.preload = 'auto'; + video.appendChild(source); + return new Promise(resolve => { + video.addEventListener('loadeddata', _ => resolve(video)); + video.load(); + }); + } + async function play(video) { + await video.play(); + if ('requestVideoFrameCallback' in video) { + await new Promise(resolve => { + // tslint:disable-next-line:no-any + video.requestVideoFrameCallback(resolve); + }); + } + } + + var test_util = /*#__PURE__*/Object.freeze({ + __proto__: null, + TEST_EPSILON_FLOAT16: TEST_EPSILON_FLOAT16, + createVideoElement: createVideoElement, + encodeStrings: encodeStrings, + expectArrayBuffersEqual: expectArrayBuffersEqual, + expectArraysClose: expectArraysClose, + expectArraysEqual: expectArraysEqual, + expectNumbersClose: expectNumbersClose, + expectPromiseToFail: expectPromiseToFail, + expectValuesInRange: expectValuesInRange, + play: play, + testEpsilon: testEpsilon + }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + // https://en.wikipedia.org/wiki/Marsaglia_polar_method + class MPRandGauss { + constructor(mean, stdDeviation, dtype, truncated, seed) { + this.mean = mean; + this.stdDev = stdDeviation; + this.dtype = dtype; + this.nextVal = NaN; + this.truncated = truncated; + if (this.truncated) { + this.upper = this.mean + this.stdDev * 2; + this.lower = this.mean - this.stdDev * 2; + } + const seedValue = seed ? seed : Math.random(); + this.random = seedrandom.alea(seedValue.toString()); + } + /** Returns next sample from a Gaussian distribution. */ + nextValue() { + if (!isNaN(this.nextVal)) { + const value = this.nextVal; + this.nextVal = NaN; + return value; + } + let resultX, resultY; + let isValid = false; + while (!isValid) { + let v1, v2, s; + do { + v1 = 2 * this.random() - 1; + v2 = 2 * this.random() - 1; + s = v1 * v1 + v2 * v2; + } while (s >= 1 || s === 0); + const mul = Math.sqrt(-2.0 * Math.log(s) / s); + resultX = this.mean + this.stdDev * v1 * mul; + resultY = this.mean + this.stdDev * v2 * mul; + if (!this.truncated || this.isValidTruncated(resultX)) { + isValid = true; + } + } + if (!this.truncated || this.isValidTruncated(resultY)) { + this.nextVal = this.convertValue(resultY); + } + return this.convertValue(resultX); + } + /** Handles proper rounding for non-floating-point numbers. */ + convertValue(value) { + if (this.dtype == null || this.dtype === 'float32') { + return value; + } + return Math.round(value); + } + /** Returns true if less than 2-standard-deviations from the mean. */ + isValidTruncated(value) { + return value <= this.upper && value >= this.lower; + } + } + // Marsaglia, George, and Wai Wan Tsang. 2000. "A Simple Method for Generating + // Gamma Variables." + class RandGamma { + constructor(alpha, beta, dtype, seed) { + this.alpha = alpha; + this.beta = 1 / beta; // convert rate to scale parameter + this.dtype = dtype; + const seedValue = seed ? seed : Math.random(); + this.randu = seedrandom.alea(seedValue.toString()); + this.randn = new MPRandGauss(0, 1, dtype, false, this.randu()); + if (alpha < 1) { + this.d = alpha + (2 / 3); + } + else { + this.d = alpha - (1 / 3); + } + this.c = 1 / Math.sqrt(9 * this.d); + } + /** Returns next sample from a gamma distribution. */ + nextValue() { + let x2, v0, v1, x, u, v; + while (true) { + do { + x = this.randn.nextValue(); + v = 1 + (this.c * x); + } while (v <= 0); + v *= v * v; + x2 = x * x; + v0 = 1 - (0.331 * x2 * x2); + v1 = (0.5 * x2) + (this.d * (1 - v + Math.log(v))); + u = this.randu(); + if (u < v0 || Math.log(u) < v1) { + break; + } + } + v = (1 / this.beta) * this.d * v; + if (this.alpha < 1) { + v *= Math.pow(this.randu(), 1 / this.alpha); + } + return this.convertValue(v); + } + /** Handles proper rounding for non-floating-point numbers. */ + convertValue(value) { + if (this.dtype === 'float32') { + return value; + } + return Math.round(value); + } + } + class UniformRandom { + constructor(min = 0, max = 1, dtype, seed) { + /** Handles proper rounding for non floating point numbers. */ + this.canReturnFloat = () => (this.dtype == null || this.dtype === 'float32'); + this.min = min; + this.range = max - min; + this.dtype = dtype; + if (seed == null) { + seed = Math.random(); + } + if (typeof seed === 'number') { + seed = seed.toString(); + } + if (!this.canReturnFloat() && this.range <= 1) { + throw new Error(`The difference between ${min} - ${max} <= 1 and dtype is not float`); + } + this.random = seedrandom.alea(seed); + } + convertValue(value) { + if (this.canReturnFloat()) { + return value; + } + return Math.round(value); + } + nextValue() { + return this.convertValue(this.min + this.range * this.random()); + } + } + function jarqueBeraNormalityTest(values) { + // https://en.wikipedia.org/wiki/Jarque%E2%80%93Bera_test + const n = values.length; + const s = skewness(values); + const k = kurtosis(values); + const jb = n / 6 * (Math.pow(s, 2) + 0.25 * Math.pow(k - 3, 2)); + // JB test requires 2-degress of freedom from Chi-Square @ 0.95: + // http://www.itl.nist.gov/div898/handbook/eda/section3/eda3674.htm + const CHI_SQUARE_2DEG = 5.991; + if (jb > CHI_SQUARE_2DEG) { + throw new Error(`Invalid p-value for JB: ${jb}`); + } + } + function expectArrayInMeanStdRange(actual, expectedMean, expectedStdDev, epsilon) { + if (epsilon == null) { + epsilon = testEpsilon(); + } + const actualMean = mean$2(actual); + expectNumbersClose(actualMean, expectedMean, epsilon); + expectNumbersClose(standardDeviation(actual, actualMean), expectedStdDev, epsilon); + } + function mean$2(values) { + let sum = 0; + for (let i = 0; i < values.length; i++) { + sum += values[i]; + } + return sum / values.length; + } + function standardDeviation(values, mean) { + let squareDiffSum = 0; + for (let i = 0; i < values.length; i++) { + const diff = values[i] - mean; + squareDiffSum += diff * diff; + } + return Math.sqrt(squareDiffSum / values.length); + } + function kurtosis(values) { + // https://en.wikipedia.org/wiki/Kurtosis + const valuesMean = mean$2(values); + const n = values.length; + let sum2 = 0; + let sum4 = 0; + for (let i = 0; i < n; i++) { + const v = values[i] - valuesMean; + sum2 += Math.pow(v, 2); + sum4 += Math.pow(v, 4); + } + return (1 / n) * sum4 / Math.pow((1 / n) * sum2, 2); + } + function skewness(values) { + // https://en.wikipedia.org/wiki/Skewness + const valuesMean = mean$2(values); + const n = values.length; + let sum2 = 0; + let sum3 = 0; + for (let i = 0; i < n; i++) { + const v = values[i] - valuesMean; + sum2 += Math.pow(v, 2); + sum3 += Math.pow(v, 3); + } + return (1 / n) * sum3 / Math.pow((1 / (n - 1)) * sum2, 3 / 2); + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Creates a `tf.Tensor` with values sampled from a gamma distribution. + * + * ```js + * tf.randomGamma([2, 2], 1).print(); + * ``` + * + * @param shape An array of integers defining the output tensor shape. + * @param alpha The shape parameter of the gamma distribution. + * @param beta The inverse scale parameter of the gamma distribution. Defaults + * to 1. + * @param dtype The data type of the output. Defaults to float32. + * @param seed The seed for the random number generator. + * + * @doc {heading: 'Tensors', subheading: 'Random'} + */ + function randomGamma_(shape, alpha, beta = 1, dtype = 'float32', seed) { + assertNonNegativeIntegerDimensions(shape); + if (beta == null) { + beta = 1; + } + if (dtype == null) { + dtype = 'float32'; + } + if (dtype !== 'float32' && dtype !== 'int32') { + throw new Error(`Unsupported data type ${dtype}`); + } + const rgamma = new RandGamma(alpha, beta, dtype, seed); + const res = buffer(shape, dtype); + for (let i = 0; i < res.values.length; i++) { + res.values[i] = rgamma.nextValue(); + } + return res.toTensor(); + } + const randomGamma = /* @__PURE__ */ op({ randomGamma_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Creates a `tf.Tensor` with values sampled from a normal distribution. + * + * ```js + * tf.randomNormal([2, 2]).print(); + * ``` + * + * @param shape An array of integers defining the output tensor shape. + * @param mean The mean of the normal distribution. + * @param stdDev The standard deviation of the normal distribution. + * @param dtype The data type of the output. + * @param seed The seed for the random number generator. + * + * @doc {heading: 'Tensors', subheading: 'Random'} + */ + function randomNormal_(shape, mean = 0, stdDev = 1, dtype, seed) { + assertNonNegativeIntegerDimensions(shape); + if (dtype != null && dtype === 'bool') { + throw new Error(`Unsupported data type ${dtype}`); + } + const randGauss = new MPRandGauss(mean, stdDev, dtype, false /* truncated */, seed); + const res = buffer(shape, dtype); + for (let i = 0; i < res.values.length; i++) { + res.values[i] = randGauss.nextValue(); + } + return res.toTensor(); + } + const randomNormal$2 = /* @__PURE__ */ op({ randomNormal_ }); + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Creates a `tf.Tensor` with values sampled from a normal distribution. + * + * The generated values will have mean 0 and standard deviation 1. + * + * ```js + * tf.randomStandardNormal([2, 2]).print(); + * ``` + * + * @param shape An array of integers defining the output tensor shape. + * @param dtype The data type of the output. + * @param seed The seed for the random number generator. + * + * @doc {heading: 'Tensors', subheading: 'Random'} + */ + function randomStandardNormal_(shape, dtype, seed) { + if (dtype != null && dtype === 'bool') { + throw new Error(`Unsupported data type ${dtype}`); + } + return randomNormal$2(shape, 0, 1, dtype, seed); + } + const randomStandardNormal = /* @__PURE__ */ op({ randomStandardNormal_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Creates a `tf.Tensor` with values sampled from a uniform distribution. + * + * The generated values follow a uniform distribution in the range [minval, + * maxval). The lower bound minval is included in the range, while the upper + * bound maxval is excluded. + * + * ```js + * tf.randomUniform([2, 2]).print(); + * ``` + * + * @param shape An array of integers defining the output tensor shape. + * @param minval The lower bound on the range of random values to generate. + * Defaults to 0. + * @param maxval The upper bound on the range of random values to generate. + * Defaults to 1. + * @param dtype The data type of the output tensor. Defaults to 'float32'. + * @param seed An optional int. Defaults to 0. If seed is set to be non-zero, + * the random number generator is seeded by the given seed. Otherwise, it is + * seeded by a random seed. + * + * @doc {heading: 'Tensors', subheading: 'Random'} + */ + function randomUniform_(shape, minval = 0, maxval = 1, dtype = 'float32', seed) { + assertNonNegativeIntegerDimensions(shape); + const res = buffer(shape, dtype); + const random = new UniformRandom(minval, maxval, null, seed); + for (let i = 0; i < res.values.length; i++) { + res.values[i] = random.nextValue(); + } + return res.toTensor(); + } + const randomUniform$1 = /* @__PURE__ */ op({ randomUniform_ }); + + /** + * @license + * Copyright 2023 Google LLC. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Creates a `tf.Tensor` with integers sampled from a uniform distribution. + * + * The generated values are uniform integers in the range [minval, maxval). The + * lower bound minval is included in the range, while the upper bound maxval is + * excluded. + * + * ```js + * tf.randomUniformInt([2, 2], 0, 10).print(); + * ``` + * + * @param shape An array of integers defining the output tensor shape. + * @param minval Inclusive lower bound on the generated integers. + * @param maxval Exclusive upper bound on the generated integers. + * @param seed An optional int. Defaults to 0. If seed is set to be non-zero, + * the random number generator is seeded by the given seed. Otherwise, it is + * seeded by a random seed. + * + * @doc {heading: 'Tensors', subheading: 'Random'} + */ + function randomUniformInt_(shape, minval, maxval, seed) { + // TODO(mattsoulanille): Handle optional seed2 input. + return randomUniform$1(shape, minval, maxval, 'int32', seed); + } + const randomUniformInt = /* @__PURE__ */ op({ randomUniformInt_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Creates a new `tf.Tensor1D` filled with the numbers in the range provided. + * + * The tensor is a half-open interval meaning it includes start, but + * excludes stop. Decrementing ranges and negative step values are also + * supported. + * + * + * ```js + * tf.range(0, 9, 2).print(); + * ``` + * + * @param start An integer start value + * @param stop An integer stop value + * @param step An integer increment (will default to 1 or -1) + * @param dtype The data type of the output tensor. Defaults to 'float32'. + * + * @doc {heading: 'Tensors', subheading: 'Creation'} + */ + function range$3(start, stop, step = 1, dtype = 'float32') { + if (step === 0) { + throw new Error('Cannot have a step of zero'); + } + const attrs = { start, stop, step, dtype }; + return ENGINE.runKernel(Range, {} /* inputs */, attrs); + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Returns the real part of a complex (or real) tensor. + * + * Given a tensor input, this operation returns a tensor of type float that is + * the real part of each element in input considered as a complex number. + * + * If the input is real, it simply makes a clone. + * + * ```js + * const x = tf.complex([-2.25, 3.25], [4.75, 5.75]); + * tf.real(x).print(); + * ``` + * + * @doc {heading: 'Tensors', subheading: 'Creation'} + */ + function real_(input) { + const $input = convertToTensor(input, 'input', 'real'); + const inputs = { input: $input }; + return ENGINE.runKernel(Real, inputs); + } + const real$2 = /* @__PURE__ */ op({ real_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes reciprocal of x element-wise: `1 / x` + * + * ```js + * const x = tf.tensor1d([0, 1, 2]); + * + * x.reciprocal().print(); // or tf.reciprocal(x) + * ``` + * @param x The input tensor. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function reciprocal_(x) { + const $x = convertToTensor(x, 'x', 'reciprocal'); + const inputs = { x: $x }; + return ENGINE.runKernel(Reciprocal, inputs); + } + const reciprocal$2 = /* @__PURE__ */ op({ reciprocal_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes rectified linear element-wise: `max(x, 0)`. + * + * ```js + * const x = tf.tensor1d([-1, 2, -3, 4]); + * + * x.relu().print(); // or tf.relu(x) + * ``` + * @param x The input tensor. If the dtype is `bool`, the output dtype will be + * `int32`. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function relu_(x) { + const $x = convertToTensor(x, 'x', 'relu'); + const inputs = { x: $x }; + return ENGINE.runKernel(Relu$1, inputs); + } + const relu$2 = /* @__PURE__ */ op({ relu_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes rectified linear 6 element-wise: `min(max(x, 0), 6)`. + * + * ```js + * const x = tf.tensor1d([-1, 2, -3, 8]); + * + * x.relu6().print(); // or tf.relu6(x) + * ``` + * @param x The input tensor. If the dtype is `bool`, the output dtype will be + * `int32`. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function relu6_(x) { + const $x = convertToTensor(x, 'x', 'relu6'); + const inputs = { x: $x }; + return ENGINE.runKernel(Relu6$1, inputs); + } + const relu6$2 = /* @__PURE__ */ op({ relu6_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Reverses a `tf.Tensor` along a specified axis. + * + * Also available are stricter rank-specific methods that assert that `x` is + * of the given rank: + * - `tf.reverse1d` + * - `tf.reverse2d` + * - `tf.reverse3d` + * - `tf.reverse4d` + * + * Except `tf.reverse1d` (which does not have axis param), all methods have + * same signature as this method. + * + * ```js + * const x = tf.tensor1d([1, 2, 3, 4]); + * + * x.reverse().print(); + * ``` + * + * ```js + * const x = tf.tensor2d([1, 2, 3, 4], [2, 2]); + * + * const axis = 1; + * x.reverse(axis).print(); + * ``` + * @param x The input tensor to be reversed. + * @param axis The set of dimensions to reverse. Must be in the + * range [-rank(x), rank(x)). Defaults to all axes. + * + * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'} + */ + function reverse_(x, axis) { + const $x = convertToTensor(x, 'x', 'reverse'); + const inputs = { x: $x }; + const attrs = { dims: axis }; + return ENGINE.runKernel(Reverse, inputs, attrs); + } + const reverse$2 = /* @__PURE__ */ op({ reverse_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Reverses a `tf.Tensor1D`. + * + * @param x The input tensor. + */ + function reverse1d_(x) { + const $x = convertToTensor(x, 'x', 'reverse'); + assert$1($x.rank === 1, () => `Error in reverse1D: x must be rank 1 but got rank ${$x.rank}.`); + return reverse$2($x, 0); + } + const reverse1d = /* @__PURE__ */ op({ reverse1d_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Reverses a `tf.Tensor2D` along a specified axis. + * + * @param x The input tensor. + * @param axis The set of dimensions to reverse. Must be in the + * range [-rank(x), rank(x)). Defaults to all axes. + */ + function reverse2d_(x, axis) { + const $x = convertToTensor(x, 'x', 'reverse'); + assert$1($x.rank === 2, () => `Error in reverse2D: x must be rank 2 but got rank ${$x.rank}.`); + return reverse$2($x, axis); + } + const reverse2d = /* @__PURE__ */ op({ reverse2d_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Reverses a `tf.Tensor3D` along a specified axis. + * + * @param x The input tensor. + * @param axis The set of dimensions to reverse. Must be in the + * range [-rank(x), rank(x)). Defaults to all axes. + */ + function reverse3d_(x, axis) { + const $x = convertToTensor(x, 'x', 'reverse'); + assert$1($x.rank === 3, () => `Error in reverse3D: x must be rank 3 but got rank ${$x.rank}.`); + return reverse$2($x, axis); + } + const reverse3d = /* @__PURE__ */ op({ reverse3d_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Reverses a `tf.Tensor4D` along a specified axis. + * + * @param x The input tensor. + * @param axis The set of dimensions to reverse. Must be in the + * range [-rank(x), rank(x)). Defaults to all axes. + */ + function reverse4d_(x, axis) { + const $x = convertToTensor(x, 'x', 'reverse'); + assert$1($x.rank === 4, () => `Error in reverse4D: x must be rank 4 but got rank ${$x.rank}.`); + return reverse$2($x, axis); + } + const reverse4d = /* @__PURE__ */ op({ reverse4d_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes round of input `tf.Tensor` element-wise: `round(x)`. + * It implements banker's rounding. + * + * ```js + * const x = tf.tensor1d([.6, 1.1, -3.3]); + * + * x.round().print(); // or tf.round(x) + * ``` + * @param x The input tensor. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function round_(x) { + const $x = convertToTensor(x, 'x', 'round'); + const inputs = { x: $x }; + return ENGINE.runKernel(Round, inputs); + } + const round$2 = /* @__PURE__ */ op({ round_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes reciprocal of square root of the input `tf.Tensor` element-wise: + * `y = 1 / sqrt(x)` + * + * ```js + * const x = tf.tensor1d([1, 2, 4, -1]); + * + * x.rsqrt().print(); // or tf.rsqrt(x) + * ``` + * @param x The input tensor. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function rsqrt_(x) { + const $x = convertToTensor(x, 'x', 'rsqrt', 'float32'); + const inputs = { x: $x }; + return ENGINE.runKernel(Rsqrt, inputs); + } + const rsqrt$2 = /* @__PURE__ */ op({ rsqrt_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes scaled exponential linear element-wise. + * + * `x < 0 ? scale * alpha * (exp(x) - 1) : scale * x` + * + * ```js + * const x = tf.tensor1d([-1, 2, -3, 4]); + * + * x.selu().print(); // or tf.selu(x) + * ``` + * @param x The input tensor. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function selu_(x) { + const $x = convertToTensor(x, 'x', 'selu'); + const inputs = { x: $x }; + return ENGINE.runKernel(Selu$1, inputs); + } + const selu$2 = /* @__PURE__ */ op({ selu_ }); + + /** + * 2-D convolution with separable filters. + * + * Performs a depthwise convolution that acts separately on channels followed + * by a pointwise convolution that mixes channels. Note that this is + * separability between dimensions [1, 2] and 3, not spatial separability + * between dimensions 1 and 2. + * + * See + * [https://www.tensorflow.org/api_docs/python/tf/nn/separable_conv2d]( + * https://www.tensorflow.org/api_docs/python/tf/nn/separable_conv2d) + * for more details. + * + * @param x The input tensor, of rank 4 or rank 3, of shape + * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is + * assumed. + * @param depthwiseFilter The depthwise filter tensor, rank 4, of shape + * `[filterHeight, filterWidth, inChannels, channelMultiplier]`. This is + * the filter used in the first step. + * @param pointwiseFilter The pointwise filter tensor, rank 4, of shape + * `[1, 1, inChannels * channelMultiplier, outChannels]`. This is + * the filter used in the second step. + * @param strides The strides of the convolution: `[strideHeight, + * strideWidth]`. If strides is a single number, then `strideHeight == + * strideWidth`. + * @param pad The type of padding algorithm. + * - `same` and stride 1: output will be of same size as input, + * regardless of filter size. + * - `valid`: output will be smaller than input if filter is larger + * than 1x1. + * - For more info, see this guide: + * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution]( + * https://www.tensorflow.org/api_docs/python/tf/nn/convolution) + * @param dilations The dilation rates: `[dilationHeight, dilationWidth]` + * in which we sample input values across the height and width dimensions + * in atrous convolution. Defaults to `[1, 1]`. If `rate` is a single + * number, then `dilationHeight == dilationWidth`. If it is greater than + * 1, then all values of `strides` must be 1. + * @param dataFormat: An optional string from: "NHWC", "NCHW". Defaults to + * "NHWC". Specify the data format of the input and output data. With the + * default format "NHWC", the data is stored in the order of: [batch, + * height, width, channels]. Only "NHWC" is currently supported. + * + * @doc {heading: 'Operations', subheading: 'Convolution'} + */ + function separableConv2d_(x, depthwiseFilter, pointwiseFilter, strides, pad, dilation = [1, 1], dataFormat = 'NHWC') { + const $x = convertToTensor(x, 'x', 'separableConv2d'); + const $depthwiseFilter = convertToTensor(depthwiseFilter, 'depthwiseFilter', 'separableConv2d'); + const $pointwiseFilter = convertToTensor(pointwiseFilter, 'pointwiseFilter', 'separableConv2d'); + let x4D = $x; + let reshapedTo4D = false; + if ($x.rank === 3) { + reshapedTo4D = true; + x4D = reshape$3($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]); + } + if (dataFormat === 'NCHW') { + throw new Error('separableConv2d currently does not support dataFormat NCHW; only ' + + 'NHWC is supported'); + } + assert$1(x4D.rank === 4, () => `Error in separableConv2d: input must be rank 4, but got ` + + `rank ${x4D.rank}.`); + assert$1($depthwiseFilter.rank === 4, () => `Error in separableConv2d: depthwise filter must be rank 4, but ` + + `got rank ${$depthwiseFilter.rank}.`); + assert$1($pointwiseFilter.rank === 4, () => `Error in separableConv2d: pointwise filter must be rank 4, but ` + + `got rank ${$depthwiseFilter.rank}.`); + assert$1($pointwiseFilter.shape[0] === 1, () => `Error in separableConv2d: the first dimension of pointwise filter ` + + ` must be 1, but got ${$pointwiseFilter.shape[0]}.`); + assert$1($pointwiseFilter.shape[1] === 1, () => `Error in separableConv2d: the second dimension of pointwise ` + + `filter must be 1, but got ${$pointwiseFilter.shape[1]}.`); + const inChannels = $depthwiseFilter.shape[2]; + const channelMultiplier = $depthwiseFilter.shape[3]; + assert$1($pointwiseFilter.shape[2] === inChannels * channelMultiplier, () => `Error in separableConv2d: the third dimension of pointwise filter ` + + `must be ${inChannels * channelMultiplier}, ` + + `but got ${$pointwiseFilter.shape[2]}.`); + const depthwise = depthwiseConv2d$3(x4D, $depthwiseFilter, strides, pad, dataFormat, dilation); + const pointwiseStride = 1; + const res = conv2d$4(depthwise, $pointwiseFilter, pointwiseStride, 'valid', dataFormat); + if (reshapedTo4D) { + return reshape$3(res, [res.shape[1], res.shape[2], res.shape[3]]); + } + return res; + } + const separableConv2d$1 = /* @__PURE__ */ op({ separableConv2d_ }); + + /** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the difference between two lists of numbers. + * + * Given a Tensor `x` and a Tensor `y`, this operation returns a Tensor `out` + * that represents all values that are in `x` but not in `y`. The returned + * Tensor `out` is sorted in the same order that the numbers appear in `x` + * (duplicates are preserved). This operation also returns a Tensor indices that + * represents the position of each out element in `x`. In other words: + * + * `out[i] = x[idx[i]] for i in [0, 1, ..., out.length - 1]` + * + * ```js + * const x = [1, 2, 3, 4, 5, 6]; + * const y = [1, 3, 5]; + * + * const [out, indices] = await tf.setdiff1dAsync(x, y); + * out.print(); // [2, 4, 6] + * indices.print(); // [1, 3, 5] + * ``` + * + * @param x 1-D Tensor. Values to keep. + * @param y 1-D Tensor. Must have the same type as x. Values to exclude in the + * output. + * @returns Promise of Tensor tuple [out, indices]. + * out: Tensor with the same type as x. + * indices: A Tensor of type int32. + * + * @doc {heading: 'Tensors', subheading: 'Transformations'} + */ + async function setdiff1dAsync_(x, y) { + const $x = convertToTensor(x, 'x', 'setdiff1d'); + const $y = convertToTensor(y, 'y', 'setdiff1d'); + assert$1($x.dtype === $y.dtype, () => `x and y should have the same dtype, but got x (${$x.dtype}) and y (${$y.dtype}).`); + assert$1($x.rank === 1, () => `x should be 1D tensor, but got x (${$x.shape}).`); + assert$1($y.rank === 1, () => `y should be 1D tensor, but got y (${$y.shape}).`); + const xVals = await $x.data(); + const yVals = await $y.data(); + const ySet = new Set(yVals); + let outputSize = 0; + for (let i = 0; i < xVals.length; i++) { + if (!ySet.has(xVals[i])) { + outputSize++; + } + } + const buffer = new TensorBuffer([outputSize], $x.dtype); + const indices = new TensorBuffer([outputSize], 'int32'); + for (let i = 0, p = 0; i < xVals.length; i++) { + if (!ySet.has(xVals[i])) { + buffer.values[p] = xVals[i]; + indices.values[p] = i; + p++; + } + } + return [buffer.toTensor(), indices.toTensor()]; + } + const setdiff1dAsync = setdiff1dAsync_; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Returns an element-wise indication of the sign of a number. + * + * ```js + * const x = tf.tensor1d([.6, 1.1, -3.3, NaN, 0]); + * + * x.sign().print(); // or tf.sign(x) + * ``` + * @param x The input Tensor. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function sign_(x) { + const $x = convertToTensor(x, 'x', 'sign'); + const inputs = { x: $x }; + return ENGINE.runKernel(Sign, inputs); + } + const sign$3 = /* @__PURE__ */ op({ sign_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes sin of the input Tensor element-wise: `sin(x)` + * + * ```js + * const x = tf.tensor1d([0, Math.PI / 2, Math.PI * 3 / 4]); + * + * x.sin().print(); // or tf.sin(x) + * ``` + * @param x The input tensor. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function sin_(x) { + const $x = convertToTensor(x, 'x', 'sin', 'float32'); + const inputs = { x: $x }; + return ENGINE.runKernel(Sin, inputs); + } + const sin$2 = /* @__PURE__ */ op({ sin_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes hyperbolic sin of the input `tf.Tensor` element-wise: `sinh(x)` + * + * ```js + * const x = tf.tensor1d([0, 1, -1, .7]); + * + * x.sinh().print(); // or tf.sinh(x) + * ``` + * @param x The input tensor. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function sinh_(x) { + const $x = convertToTensor(x, 'x', 'sinh'); + const inputs = { x: $x }; + return ENGINE.runKernel(Sinh, inputs); + } + const sinh$2 = /* @__PURE__ */ op({ sinh_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Extracts a 1D slice from 1D array starting at coordinates `begin` and is + * of length `size`. See `slice` for details. + */ + function slice1d_(x, begin, size) { + const $x = convertToTensor(x, 'x', 'slice1d'); + assert$1($x.rank === 1, () => `slice1d expects a rank-1 tensor, but got a rank-${$x.rank} tensor`); + return slice$2($x, [begin], [size]); + } + const slice1d = /* @__PURE__ */ op({ slice1d_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Extracts a 2D slice from a 2D array starting at coordinates `begin` and + * is of size `size`. See `slice` for details. + */ + function slice2d_(x, begin, size) { + const $x = convertToTensor(x, 'x', 'slice2d'); + assert$1($x.rank === 2, () => `slice2d expects a rank-2 tensor, but got a rank-${$x.rank} tensor`); + return slice$2($x, begin, size); + } + const slice2d = /* @__PURE__ */ op({ slice2d_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Extracts a 3D slice from a 3D array starting at coordinates `begin` and + * is of size `size`. See `slice` for details. + */ + function slice3d_(x, begin, size) { + const $x = convertToTensor(x, 'x', 'slice3d'); + assert$1($x.rank === 3, () => `slice3d expects a rank-3 tensor, but got a rank-${$x.rank} tensor`); + return slice$2($x, begin, size); + } + const slice3d = /* @__PURE__ */ op({ slice3d_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Extracts a 4D slice from a 4D array starting at coordinates `begin` and + * is of size `size`. See `slice` for details. + */ + function slice4d_(x, begin, size) { + const $x = convertToTensor(x, 'x', 'slice4d'); + assert$1($x.rank === 4, () => `slice4d expects a rank-4 tensor, but got a rank-${$x.rank} tensor`); + return slice$2($x, begin, size); + } + const slice4d = /* @__PURE__ */ op({ slice4d_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the softmax normalized vector given the logits. + * + * ```js + * const a = tf.tensor1d([1, 2, 3]); + * + * a.softmax().print(); // or tf.softmax(a) + * ``` + * + * ```js + * const a = tf.tensor2d([2, 4, 6, 1, 2, 3], [2, 3]); + * + * a.softmax().print(); // or tf.softmax(a) + * ``` + * + * @param logits The logits array. + * @param dim The dimension softmax would be performed on. Defaults to `-1` + * which indicates the last dimension. + * + * @doc {heading: 'Operations', subheading: 'Normalization'} + */ + function softmax_(logits, dim = -1) { + const $logits = convertToTensor(logits, 'logits', 'softmax', 'float32'); + if (dim === -1) { + dim = $logits.rank - 1; + } + if (dim !== $logits.rank - 1) { + throw Error('Softmax along a non-last dimension is not yet supported. ' + + `Logits was rank ${$logits.rank} and dim was ${dim}`); + } + const inputs = { logits: $logits }; + const attrs = { dim }; + return ENGINE.runKernel(Softmax$2, inputs, attrs); + } + const softmax$3 = /* @__PURE__ */ op({ softmax_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Fast Fourier transform. + * + * Computes the 1-dimensional discrete Fourier transform over the inner-most + * dimension of input. + * + * ```js + * const real = tf.tensor1d([1, 2, 3]); + * const imag = tf.tensor1d([1, 2, 3]); + * const x = tf.complex(real, imag); + * + * x.fft().print(); // tf.spectral.fft(x).print(); + * ``` + * @param input The complex input to compute an fft over. + * + * @doc {heading: 'Operations', subheading: 'Spectral', namespace: 'spectral'} + */ + function fft_(input) { + assert$1(input.dtype === 'complex64', () => `The dtype for tf.spectral.fft() must be complex64 ` + + `but got ${input.dtype}.`); + const inputs = { input }; + return ENGINE.runKernel(FFT, inputs); + } + const fft$2 = /* @__PURE__ */ op({ fft_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Inverse fast Fourier transform. + * + * Computes the inverse 1-dimensional discrete Fourier transform over the + * inner-most dimension of input. + * + * ```js + * const real = tf.tensor1d([1, 2, 3]); + * const imag = tf.tensor1d([1, 2, 3]); + * const x = tf.complex(real, imag); + * + * x.ifft().print(); // tf.spectral.ifft(x).print(); + * ``` + * @param input The complex input to compute an ifft over. + * + * @doc {heading: 'Operations', subheading: 'Spectral', namespace: 'spectral'} + */ + function ifft_(input) { + assert$1(input.dtype === 'complex64', () => `The dtype for tf.spectral.ifft() must be complex64 ` + + `but got ${input.dtype}.`); + const inputs = { input }; + return ENGINE.runKernel(IFFT, inputs); + } + const ifft$2 = /* @__PURE__ */ op({ ifft_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Inversed real value input fast Fourier transform. + * + * Computes the 1-dimensional inversed discrete Fourier transform over the + * inner-most dimension of the real input. + * + * ```js + * const real = tf.tensor1d([1, 2, 3]); + * const imag = tf.tensor1d([0, 0, 0]); + * const x = tf.complex(real, imag); + * + * x.irfft().print(); + * ``` + * @param input The real value input to compute an irfft over. + * + * @doc {heading: 'Operations', subheading: 'Spectral', namespace: 'spectral'} + */ + function irfft_(input) { + const innerDimensionSize = input.shape[input.shape.length - 1]; + const batch = input.size / innerDimensionSize; + let ret; + if (innerDimensionSize <= 2) { + const complexInput = reshape$3(input, [batch, innerDimensionSize]); + ret = ifft$2(complexInput); + } + else { + // The length of unique components of the DFT of a real-valued signal + // is 2 * (input_len - 1) + const outputShape = [batch, 2 * (innerDimensionSize - 1)]; + const realInput = reshape$3(real$2(input), [batch, innerDimensionSize]); + const imagInput = reshape$3(imag$2(input), [batch, innerDimensionSize]); + const realConjugate = reverse$2(slice$2(realInput, [0, 1], [batch, innerDimensionSize - 2]), 1); + const imagConjugate = mul(reverse$2(slice$2(imagInput, [0, 1], [batch, innerDimensionSize - 2]), 1), scalar(-1)); + const r = concat$2([realInput, realConjugate], 1); + const i = concat$2([imagInput, imagConjugate], 1); + const complexInput = reshape$3(complex$2(r, i), [outputShape[0], outputShape[1]]); + ret = ifft$2(complexInput); + } + ret = real$2(ret); + // reshape the result if the input is 3D tensor. + if (input.rank === 3 && input.shape[0] !== 0) { + const temp = ret; + const batch = input.shape[0]; + ret = reshape$3(ret, [batch, ret.shape[0] / batch, ret.shape[1]]); + temp.dispose(); + } + return ret; + } + const irfft = /* @__PURE__ */ op({ irfft_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Splits a `tf.Tensor` into sub tensors. + * + * If `numOrSizeSplits` is a number, splits `x` along dimension `axis` + * into `numOrSizeSplits` smaller tensors. + * Requires that `numOrSizeSplits` evenly divides `x.shape[axis]`. + * + * If `numOrSizeSplits` is a number array, splits `x` into + * `numOrSizeSplits.length` pieces. The shape of the `i`-th piece has the + * same size as `x` except along dimension `axis` where the size is + * `numOrSizeSplits[i]`. + * + * ```js + * const x = tf.tensor2d([1, 2, 3, 4, 5, 6, 7, 8], [2, 4]); + * const [a, b] = tf.split(x, 2, 1); + * a.print(); + * b.print(); + * + * const [c, d, e] = tf.split(x, [1, 2, 1], 1); + * c.print(); + * d.print(); + * e.print(); + * ``` + * + * @param x The input tensor to split. + * @param numOrSizeSplits Either an integer indicating the number of + * splits along the axis or an array of integers containing the sizes of + * each output tensor along the axis. If a number then it must evenly divide + * `x.shape[axis]`; otherwise the sum of sizes must match `x.shape[axis]`. + * Can contain one -1 indicating that dimension is to be inferred. + * @param axis The dimension along which to split. Defaults to 0 (the first + * dim). + * + * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'} + */ + function split_(x, numOrSizeSplits, axis = 0) { + const $x = convertToTensor(x, 'x', 'split'); + const inputs = { x: $x }; + const attr = { numOrSizeSplits, axis }; + return ENGINE.runKernel(SplitV, inputs, attr); + } + const split$3 = /* @__PURE__ */ op({ split_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Real value input fast Fourier transform. + * + * Computes the 1-dimensional discrete Fourier transform over the + * inner-most dimension of the real input. + * + * ```js + * const real = tf.tensor1d([1, 2, 3]); + * + * real.rfft().print(); + * ``` + * @param input The real value input to compute an rfft over. + * + * @doc {heading: 'Operations', subheading: 'Spectral', namespace: 'spectral'} + */ + function rfft_(input, fftLength) { + assert$1(input.dtype === 'float32', () => `The dtype for rfft() must be real value but got ${input.dtype}`); + let innerDimensionSize = input.shape[input.shape.length - 1]; + const batch = input.size / innerDimensionSize; + let adjustedInput; + if (fftLength != null && fftLength < innerDimensionSize) { + // Need to crop + const begin = input.shape.map(v => 0); + const size = input.shape.map(v => v); + size[input.shape.length - 1] = fftLength; + adjustedInput = slice$2(input, begin, size); + innerDimensionSize = fftLength; + } + else if (fftLength != null && fftLength > innerDimensionSize) { + // Need to pad with zeros + const zerosShape = input.shape.map(v => v); + zerosShape[input.shape.length - 1] = fftLength - innerDimensionSize; + adjustedInput = concat$2([input, zeros$2(zerosShape)], input.shape.length - 1); + innerDimensionSize = fftLength; + } + else { + adjustedInput = input; + } + // Complement the input with zero imaginary numbers. + const zerosInput = zerosLike$3(adjustedInput); + const complexInput = reshape$3(complex$2(adjustedInput, zerosInput), [batch, innerDimensionSize]); + const ret = fft$2(complexInput); + // Exclude complex conjugations. These conjugations are put symmetrically. + const half = Math.floor(innerDimensionSize / 2) + 1; + const realValues = real$2(ret); + const imagValues = imag$2(ret); + const realComplexConjugate = split$3(realValues, [half, innerDimensionSize - half], realValues.shape.length - 1); + const imagComplexConjugate = split$3(imagValues, [half, innerDimensionSize - half], imagValues.shape.length - 1); + const outputShape = adjustedInput.shape.slice(); + outputShape[adjustedInput.shape.length - 1] = half; + return reshape$3(complex$2(realComplexConjugate[0], imagComplexConjugate[0]), outputShape); + } + const rfft = /* @__PURE__ */ op({ rfft_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Returns (a - b) * (a - b) element-wise. + * Supports broadcasting. + * + * ```js + * const a = tf.tensor1d([1, 4, 3, 16]); + * const b = tf.tensor1d([1, 2, 9, 4]); + * + * a.squaredDifference(b).print(); // or tf.squaredDifference(a, b) + * ``` + * + * ```js + * // Broadcast squared difference a with b. + * const a = tf.tensor1d([2, 4, 6, 8]); + * const b = tf.scalar(5); + * + * a.squaredDifference(b).print(); // or tf.squaredDifference(a, b) + * ``` + * + * @param a The first tensor. + * @param b The second tensor. Must have the same type as `a`. + * + * @doc {heading: 'Operations', subheading: 'Arithmetic'} + */ + function squaredDifference_(a, b) { + let $a = convertToTensor(a, 'a', 'squaredDifference'); + let $b = convertToTensor(b, 'b', 'squaredDifference'); + [$a, $b] = makeTypesMatch($a, $b); + assertAndGetBroadcastShape($a.shape, $b.shape); + const inputs = { a: $a, b: $b }; + const attrs = {}; + return ENGINE.runKernel(SquaredDifference, inputs, attrs); + } + const squaredDifference$2 = /* @__PURE__ */ op({ squaredDifference_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Removes dimensions of size 1 from the shape of a `tf.Tensor`. + * + * ```js + * const x = tf.tensor([1, 2, 3, 4], [1, 1, 4]); + * x.squeeze().print(); + * ``` + * + * @param x The input tensor to be squeezed. + * @param axis An optional list of numbers. If specified, only + * squeezes the dimensions listed. The dimension index starts at 0. It + * is an error to squeeze a dimension that is not 1. + * + * @doc {heading: 'Tensors', subheading: 'Transformations'} + */ + function squeeze_(x, axis) { + const $x = convertToTensor(x, 'x', 'squeeze', 'string_or_numeric'); + return reshape$3($x, squeezeShape($x.shape, axis).newShape); + } + const squeeze = /* @__PURE__ */ op({ squeeze_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Stacks a list of rank-`R` `tf.Tensor`s into one rank-`(R+1)` `tf.Tensor`. + * + * ```js + * const a = tf.tensor1d([1, 2]); + * const b = tf.tensor1d([3, 4]); + * const c = tf.tensor1d([5, 6]); + * tf.stack([a, b, c]).print(); + * ``` + * + * @param tensors A list of tensor objects with the same shape and dtype. + * @param axis The axis to stack along. Defaults to 0 (the first dim). + * + * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'} + */ + function stack_(tensors, axis = 0) { + const $tensors = convertToTensorArray(tensors, 'tensors', 'stack', 'string_or_numeric'); + assert$1($tensors.length >= 1, () => 'Pass at least one tensor to tf.stack'); + if ($tensors.length > 0) { + assert$1(axis <= $tensors[0].rank, () => 'Axis must be <= rank of the tensor'); + } + const inputs = $tensors; + const attrs = { axis }; + return ENGINE.runKernel(Pack, inputs, attrs); + } + const stack = /* @__PURE__ */ op({ stack_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes step of the input `tf.Tensor` element-wise: `x > 0 ? 1 : alpha` + * + * ```js + * const x = tf.tensor1d([0, 2, -1, -3]); + * + * x.step(.5).print(); // or tf.step(x, .5) + * ``` + * @param x The input tensor. + * @param alpha The gradient when input is negative. Defaults to 0. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function step_(x, alpha = 0.0) { + const $x = convertToTensor(x, 'x', 'step'); + const inputs = { x: $x }; + const attrs = { alpha }; + return ENGINE.runKernel(Step, inputs, attrs); + } + const step$2 = /* @__PURE__ */ op({ step_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Extracts a strided slice of a tensor. + * + * Roughly speaking, this op extracts a slice of size (end-begin)/stride from + * the given input tensor (x). Starting at the location specified by begin the + * slice continues by adding stride to the index until all dimensions are not + * less than end. Note that a stride can be negative, which causes a reverse + * slice. + * + * ```js + * const t = tf.tensor3d([1, 1, 1 ,2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6], + * [3, 2, 3]); + * t.stridedSlice([1, 0, 0], [2, 1, 3], [1, 1, 1]).print() // [[[3, 3, 3]]] + * t.stridedSlice([1, 0, 0], [2, 2, 3], [1, 1, 1]).print() // [[[3, 3, 3], + * // [4, 4, 4]]] + * t.stridedSlice([1, -1, 0], [2, -3, 3], [1, -1, 1]).print() // [[[4, 4, 4], + * // [3, 3, 3]]] + * ``` + * + * @param x The tensor to stride slice. + * @param begin The coordinates to start the slice from. + * @param end: The coordinates to end the slice at. + * @param strides: The size of the slice. + * @param beginMask: If the ith bit of beginMask is set, begin[i] is ignored + * and the fullest possible range in that dimension is used instead. + * @param endMask: If the ith bit of endMask is set, end[i] is ignored + * and the fullest possible range in that dimension is used instead. + * @param shrinkAxisMask: a bitmask where bit i implies that + * the ith specification should shrink the dimensionality. begin and end must + * imply a slice of size 1 in the dimension. + * + * @doc {heading: 'Operations', subheading: 'Slicing and Joining'} + */ + function stridedSlice_(x, begin, end, strides, beginMask = 0, endMask = 0, ellipsisMask = 0, newAxisMask = 0, shrinkAxisMask = 0) { + const $x = convertToTensor(x, 'x', 'stridedSlice', 'string_or_numeric'); + const inputs = { x: $x }; + const attrs = { + begin, + end, + strides, + beginMask, + endMask, + ellipsisMask, + newAxisMask, + shrinkAxisMask + }; + return ENGINE.runKernel(StridedSlice, inputs, attrs); + } + const stridedSlice$2 = /* @__PURE__ */ op({ stridedSlice_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes tan of the input `tf.Tensor` element-wise, `tan(x)` + * + * ```js + * const x = tf.tensor1d([0, Math.PI / 2, Math.PI * 3 / 4]); + * + * x.tan().print(); // or tf.tan(x) + * ``` + * @param x The input tensor. + * + * @doc {heading: 'Operations', subheading: 'Basic math'} + */ + function tan_(x) { + const $x = convertToTensor(x, 'x', 'tan', 'float32'); + const inputs = { x: $x }; + return ENGINE.runKernel(Tan, inputs); + } + const tan$2 = /* @__PURE__ */ op({ tan_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Creates rank-1 `tf.Tensor` with the provided values, shape and dtype. + * + * The same functionality can be achieved with `tf.tensor`, but in general + * we recommend using `tf.tensor1d` as it makes the code more readable. + * + * ```js + * tf.tensor1d([1, 2, 3]).print(); + * ``` + * + * @param values The values of the tensor. Can be array of numbers, + * or a `TypedArray`. + * @param dtype The data type. + * + * @doc {heading: 'Tensors', subheading: 'Creation'} + */ + function tensor1d(values, dtype) { + assertNonNull(values); + const inferredShape = inferShape(values, dtype); + if (inferredShape.length !== 1) { + throw new Error('tensor1d() requires values to be a flat/TypedArray'); + } + const shape = null; + return makeTensor(values, shape, inferredShape, dtype); + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Creates rank-2 `tf.Tensor` with the provided values, shape and dtype. + * + * The same functionality can be achieved with `tf.tensor`, but in general + * we recommend using `tf.tensor2d` as it makes the code more readable. + * + * ```js + * // Pass a nested array. + * tf.tensor2d([[1, 2], [3, 4]]).print(); + * ``` + * ```js + * // Pass a flat array and specify a shape. + * tf.tensor2d([1, 2, 3, 4], [2, 2]).print(); + * ``` + * + * @param values The values of the tensor. Can be nested array of numbers, + * or a flat array, or a `TypedArray`. + * @param shape The shape of the tensor. If not provided, it is inferred from + * `values`. + * @param dtype The data type. + * + * @doc {heading: 'Tensors', subheading: 'Creation'} + */ + function tensor2d(values, shape, dtype) { + assertNonNull(values); + if (shape != null && shape.length !== 2) { + throw new Error('tensor2d() requires shape to have two numbers'); + } + const inferredShape = inferShape(values, dtype); + if (inferredShape.length !== 2 && inferredShape.length !== 1) { + throw new Error('tensor2d() requires values to be number[][] or flat/TypedArray'); + } + if (inferredShape.length === 1 && shape == null) { + throw new Error('tensor2d() requires shape to be provided when `values` ' + + 'are a flat/TypedArray'); + } + return makeTensor(values, shape, inferredShape, dtype); + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Creates rank-3 `tf.Tensor` with the provided values, shape and dtype. + * + * The same functionality can be achieved with `tf.tensor`, but in general + * we recommend using `tf.tensor3d` as it makes the code more readable. + * + * ```js + * // Pass a nested array. + * tf.tensor3d([[[1], [2]], [[3], [4]]]).print(); + * ``` + * ```js + * // Pass a flat array and specify a shape. + * tf.tensor3d([1, 2, 3, 4], [2, 2, 1]).print(); + * ``` + * + * @param values The values of the tensor. Can be nested array of numbers, + * or a flat array, or a `TypedArray`. + * @param shape The shape of the tensor. If not provided, it is inferred from + * `values`. + * @param dtype The data type. + * + * @doc {heading: 'Tensors', subheading: 'Creation'} + */ + function tensor3d(values, shape, dtype) { + assertNonNull(values); + if (shape != null && shape.length !== 3) { + throw new Error('tensor3d() requires shape to have three numbers'); + } + const inferredShape = inferShape(values, dtype); + if (inferredShape.length !== 3 && inferredShape.length !== 1) { + throw new Error('tensor3d() requires values to be number[][][] or flat/TypedArray'); + } + if (inferredShape.length === 1 && shape == null) { + throw new Error('tensor3d() requires shape to be provided when `values` ' + + 'are a flat array'); + } + return makeTensor(values, shape, inferredShape, dtype); + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Creates rank-4 `tf.Tensor` with the provided values, shape and dtype. + * + * The same functionality can be achieved with `tf.tensor`, but in general + * we recommend using `tf.tensor4d` as it makes the code more readable. + * + * ```js + * // Pass a nested array. + * tf.tensor4d([[[[1], [2]], [[3], [4]]]]).print(); + * ``` + * ```js + * // Pass a flat array and specify a shape. + * tf.tensor4d([1, 2, 3, 4], [1, 2, 2, 1]).print(); + * ``` + * + * @param values The values of the tensor. Can be nested array of numbers, + * or a flat array, or a `TypedArray`. + * @param shape The shape of the tensor. Optional. If not provided, + * it is inferred from `values`. + * @param dtype The data type. + * + * @doc {heading: 'Tensors', subheading: 'Creation'} + */ + function tensor4d(values, shape, dtype) { + assertNonNull(values); + if (shape != null && shape.length !== 4) { + throw new Error('tensor4d() requires shape to have four numbers'); + } + const inferredShape = inferShape(values, dtype); + if (inferredShape.length !== 4 && inferredShape.length !== 1) { + throw new Error('tensor4d() requires values to be number[][][][] or flat/TypedArray'); + } + if (inferredShape.length === 1 && shape == null) { + throw new Error('tensor4d() requires shape to be provided when `values` ' + + 'are a flat array'); + } + return makeTensor(values, shape, inferredShape, dtype); + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Creates rank-5 `tf.Tensor` with the provided values, shape and dtype. + * + * The same functionality can be achieved with `tf.tensor`, but in general + * we recommend using `tf.tensor5d` as it makes the code more readable. + * + * ```js + * // Pass a nested array. + * tf.tensor5d([[[[[1],[2]],[[3],[4]]],[[[5],[6]],[[7],[8]]]]]).print(); + * ``` + * ```js + * // Pass a flat array and specify a shape. + * tf.tensor5d([1, 2, 3, 4, 5, 6, 7, 8], [1, 2, 2, 2, 1]).print(); + * ``` + * + * @param values The values of the tensor. Can be nested array of numbers, + * or a flat array, or a `TypedArray`. + * @param shape The shape of the tensor. Optional. If not provided, + * it is inferred from `values`. + * @param dtype The data type. + * + * @doc {heading: 'Tensors', subheading: 'Creation'} + */ + function tensor5d(values, shape, dtype) { + assertNonNull(values); + if (shape != null && shape.length !== 5) { + throw new Error('tensor5d() requires shape to have five numbers'); + } + const inferredShape = inferShape(values, dtype); + if (inferredShape.length !== 5 && inferredShape.length !== 1) { + throw new Error('tensor5d() requires values to be ' + + 'number[][][][][] or flat/TypedArray'); + } + if (inferredShape.length === 1 && shape == null) { + throw new Error('tensor5d() requires shape to be provided when `values` ' + + 'are a flat array'); + } + return makeTensor(values, shape, inferredShape, dtype); + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Creates rank-6 `tf.Tensor` with the provided values, shape and dtype. + * + * The same functionality can be achieved with `tf.tensor`, but in general + * we recommend using `tf.tensor6d` as it makes the code more readable. + * + * ```js + * // Pass a nested array. + * tf.tensor6d([[[[[[1],[2]],[[3],[4]]],[[[5],[6]],[[7],[8]]]]]]).print(); + * ``` + * ```js + * // Pass a flat array and specify a shape. + * tf.tensor6d([1, 2, 3, 4, 5, 6, 7, 8], [1, 1, 2, 2, 2, 1]).print(); + * ``` + * + * @param values The values of the tensor. Can be nested array of numbers, + * or a flat array, or a `TypedArray`. + * @param shape The shape of the tensor. Optional. If not provided, + * it is inferred from `values`. + * @param dtype The data type. + * + * @doc {heading: 'Tensors', subheading: 'Creation'} + */ + function tensor6d(values, shape, dtype) { + assertNonNull(values); + if (shape != null && shape.length !== 6) { + throw new Error('tensor6d() requires shape to have six numbers'); + } + const inferredShape = inferShape(values, dtype); + if (inferredShape.length !== 6 && inferredShape.length !== 1) { + throw new Error('tensor6d() requires values to be number[][][][][][] or ' + + 'flat/TypedArray'); + } + if (inferredShape.length === 1 && shape == null) { + throw new Error('tensor6d() requires shape to be provided when `values` ' + + 'are a flat array'); + } + shape = shape || + inferredShape; + return makeTensor(values, shape, inferredShape, dtype); + } + + /** + * Check whether updates.shape = indices.shape[:batchDim] + + * shape[sliceDim:] + * + * @param x The input tensor. + */ + function validateUpdateShape(shape, indices, updates) { + const sliceDim = (indices.rank > 1) ? indices.shape[indices.rank - 1] : 1; + const batchDim = (indices.rank > 1) ? indices.rank - 1 : 1; + const shapeError = 'Must have updates.shape = indices.shape[:batchDim] + ' + + `shape[sliceDim:], got updates.shape: ${updates.shape}` + + `, indices.shape: ${indices.shape}, shape: ${shape}` + + `, sliceDim: ${sliceDim}, and batchDim: ${batchDim}.`; + if (updates.rank < batchDim) { + throw new Error(shapeError + ` update.rank < ${batchDim}. `); + } + if (shape.length < sliceDim + (updates.rank - batchDim)) { + throw new Error(shapeError + + ` Output shape length < ${sliceDim + (updates.rank - batchDim)}`); + } + if (updates.rank !== batchDim + shape.length - sliceDim) { + throw new Error(shapeError + ` update.rank != ${batchDim + shape.length - sliceDim}`); + } + for (let d = 0; d < batchDim; ++d) { + if (updates.shape[d] !== indices.shape[d]) { + throw new Error(shapeError + + ` updates.shape[${d}] (${updates.shape[d]}) != indices.shape[${d}] (${indices.shape[d]}).`); + } + } + for (let d = 0; d < updates.rank - batchDim; ++d) { + if (updates.shape[d + batchDim] !== shape[d + sliceDim]) { + throw new Error(shapeError + + ` updates.shape[${d + batchDim}] (${updates.shape[d + batchDim]}) != shape[${d + batchDim}] (${shape[d + batchDim]})`); + } + } + } + /** + * Validate scatter nd inputs. + * + * @param update The tensor contains the update values. + * @param indices The tensor contains the indices for the update values. + * @param shape The shape of the output tensor. + */ + function validateInput$1(updates, indices, shape) { + if (indices.rank < 1) { + throw new Error('tf.scatterND() expects the indices to be rank 1 or higher,' + + ` but the rank was ${indices.rank}.`); + } + if (updates.rank < 1) { + throw new Error('tf.scatterND() expects the updates to be rank 1 or higher,' + + ` but the rank was ${updates.rank}.`); + } + if (indices.dtype !== 'int32') { + throw new Error(`The dtype of 'indices' should be int32, but got dtype: ${indices.dtype}`); + } + if (shape.length < 1) { + throw new Error(`Output rank must be greater or equal to 1, but got shape: ${shape}`); + } + if (shape.length === 0) { + if (indices.size === 0) { + throw new Error(`Indices specified for empty output. indices shape: ${indices.shape}`); + } + if (updates.size === 0) { + throw new Error(`Updates specified for empty output. updates shape: ${updates.shape}`); + } + } + validateUpdateShape(shape, indices, updates); + } + /** + * Calculate the shape information for the output. + * + * @param update The tensor contains the update values. + * @param indices The tensor contains the indices for the update values. + * @param shape The shape of the output tensor. + * + * @returns ScatterShapeInfo + */ + function calculateShapes(updates, indices, shape) { + // Calculate the number of dimensions in indices + const indicesRank = indices.shape.length; + const sliceRank = (indicesRank > 1) ? indices.shape[indicesRank - 1] : 1; + // Calculate the number of elements that make up each slice of our updated + // tensor. This allows us to work with flattened tensors and copy over whole + // slices at a time. + const totalNd = shape.length; + let sliceSize = 1; + for (let i = sliceRank; i < totalNd; ++i) { + sliceSize *= shape[i]; + } + const safeSliceDim = (sliceRank < 1) ? 1 : sliceRank; + const numUpdates = sizeFromShape(indices.shape) / safeSliceDim; + const strides = [...computeStrides(shape.slice(0, sliceRank)), 1]; + const outputSize = sizeFromShape(shape); + return { sliceRank, numUpdates, sliceSize, strides, outputSize }; + } + + var scatter_nd_util = /*#__PURE__*/Object.freeze({ + __proto__: null, + calculateShapes: calculateShapes, + validateInput: validateInput$1, + validateUpdateShape: validateUpdateShape + }); + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Creates a new tensor by applying sparse updates to individual + * values or slices to the passed in tensor according to + * indices. This operator is the similar to scatterNd op, except that the + * udpates are scattered on an existing tensor (as opposed to a zero-tensor). + * + * If indices contains duplicates, then we pick the last update for the index. + * + * If an out of bound index is found on CPU, an error is returned. + * + * Warning: There are some GPU specific semantics for this operation. + * - If an out of bound index is found, the index is ignored. + * - The order in which updates are applied is nondeterministic, so the output + * will be nondeterministic if indices contains duplicates. + * ```js + * const shape = [8]; + * const tensor = tf.ones(shape); + * const indices = tf.tensor2d([4, 3, 1, 7], [4, 1], 'int32'); + * const updates = tf.tensor1d([9, 10, 11, 12]); + * + * tf.tensorScatterUpdate(tensor, indices, updates).print(); + * //[1, 11, 1, 10, 9, 1, 1, 12] + * ``` + * + * @param tensor A Tensor. Tensor to copy/update. + * @param indices The tensor contains the indices into the output tensor, must + * have at least 2 axes: (num_updates, index_depth). + * @param updates The tensor contains the value for the indices. + * + * @doc {heading: 'Operations', subheading: 'Slicing and Joining'} + */ + function tensorScatterUpdate_(tensor, indices, updates) { + const $tensor = convertToTensor(tensor, 'tensor', 'tensorScatterupdate'); + const $indices = convertToTensor(indices, 'indices', 'tensorScatterupdate', 'int32'); + const $updates = convertToTensor(updates, 'updates', 'tensorScatterupdate'); + validateInput$1($updates, $indices, $tensor.shape); + if ($tensor.dtype !== $updates.dtype) { + throw new Error(`tensor and updates must have the same dtype, instead they are ${$tensor.dtype} and ${$updates.dtype}.`); + } + const inputs = { + tensor: $tensor, + indices: $indices, + updates: $updates + }; + const attrs = {}; + // tslint:disable-next-line: no-unnecessary-type-assertion + return ENGINE.runKernel(TensorScatterUpdate, inputs, attrs); + } + const tensorScatterUpdate$2 = op({ tensorScatterUpdate_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Finds the values and indices of the `k` largest entries along the last + * dimension. + * + * If the input is a vector (rank=1), finds the k largest entries in the vector + * and outputs their values and indices as vectors. Thus values[j] is the j-th + * largest entry in input, and its index is indices[j]. + * For higher rank inputs, computes the top k entries along the last dimension. + * + * If two elements are equal, the lower-index element appears first. + * + * ```js + * const a = tf.tensor2d([[1, 5], [4, 3]]); + * const {values, indices} = tf.topk(a); + * values.print(); + * indices.print(); + * ``` + * @param x 1-D or higher `tf.Tensor` with last dimension being at least `k`. + * @param k Number of top elements to look for along the last dimension. + * @param sorted If true, the resulting `k` elements will be sorted by the + * values in descending order. + * + * @doc {heading: 'Operations', subheading: 'Evaluation'} + */ + function topk_(x, k = 1, sorted = true) { + const $x = convertToTensor(x, 'x', 'topk'); + if ($x.rank === 0) { + throw new Error('topk() expects the input to be of rank 1 or higher'); + } + const lastDim = $x.shape[$x.shape.length - 1]; + if (k < 0) { + throw new Error(`'k' passed to topk() must be >= 0 but got ${k}`); + } + if (k > lastDim) { + throw new Error(`'k' passed to topk() must be <= the last dimension (${lastDim}) ` + + `but got ${k}`); + } + const inputs = { x: $x }; + const attrs = { k, sorted }; + const [values, indices] = ENGINE.runKernel(TopK, inputs, attrs); + return { values, indices }; + } + const topk = /* @__PURE__ */ op({ topk_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Creates a `tf.Tensor` with values sampled from a truncated normal + * distribution. + * + * ```js + * tf.truncatedNormal([2, 2]).print(); + * ``` + * + * The generated values follow a normal distribution with specified mean and + * standard deviation, except that values whose magnitude is more than 2 + * standard deviations from the mean are dropped and re-picked. + * + * @param shape An array of integers defining the output tensor shape. + * @param mean The mean of the normal distribution. + * @param stdDev The standard deviation of the normal distribution. + * @param dtype The data type of the output tensor. + * @param seed The seed for the random number generator. + * + * @doc {heading: 'Tensors', subheading: 'Creation'} + */ + function truncatedNormal_(shape, mean = 0, stdDev = 1, dtype, seed) { + assertNonNegativeIntegerDimensions(shape); + if (dtype != null && dtype === 'bool') { + throw new Error(`Unsupported data type $ { dtype }`); + } + const randGauss = new MPRandGauss(mean, stdDev, dtype, true /* truncated */, seed); + const res = buffer(shape, dtype); + for (let i = 0; i < res.values.length; i++) { + res.values[i] = randGauss.nextValue(); + } + return res.toTensor(); + } + const truncatedNormal$1 = /* @__PURE__ */ op({ truncatedNormal_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Finds unique elements along an axis of a tensor. + * + * It returns a tensor `values` containing all of the unique elements along the + * `axis` of the given tensor `x` in the same order that they occur along the + * `axis` in `x`; `x` does not need to be sorted. It also returns a tensor + * `indices` the same size as the number of the elements in `x` along the `axis` + * dimension. It contains the index in the unique output `values`. + * + * ```js + * // A 1-D tensor + * const a = tf.tensor1d([1, 1, 2, 4, 4, 4, 7, 8, 8]); + * const {values, indices} = tf.unique(a); + * values.print(); // [1, 2, 4, 7, 8,] + * indices.print(); // [0, 0, 1, 2, 2, 2, 3, 4, 4] + * ``` + * + * ```js + * // A 2-D tensor with axis=0 + * // + * // 'a' is: [[1, 0, 0], + * // [1, 0, 0], + * // [2, 0, 0]] + * const a = tf.tensor2d([[1, 0, 0], [1, 0, 0], [2, 0, 0]]); + * const {values, indices} = tf.unique(a, 0) + * values.print(); // [[1, 0, 0], + * // [2, 0, 0]] + * indices.print(); // [0, 0, 1] + * ``` + * + * ```js + * // A 2-D tensor with axis=1 + * // + * // 'a' is: [[1, 0, 0], + * // [1, 0, 0], + * // [2, 0, 0]] + * const a = tf.tensor2d([[1, 0, 0], [1, 0, 0], [2, 0, 0]]); + * const {values, indices} = tf.unique(a, 1) + * values.print(); // [[1, 0], + * // [1, 0], + * // [2, 0]] + * indices.print(); // [0, 1, 1] + * ``` + * @param x A tensor (int32, string, bool). + * @param axis The axis of the tensor to find the unique elements. + * @returns [uniqueElements, indices] (see above for details) + * + * @doc {heading: 'Operations', subheading: 'Evaluation'} + */ + function unique_(x, axis = 0) { + const $x = convertToTensor(x, 'x', 'unique', 'string_or_numeric'); + assert$1($x.rank > 0, () => 'The input tensor must be at least 1D'); + const inputs = { x: $x }; + const attrs = { axis }; + const [values, indices] = ENGINE.runKernel(Unique, inputs, attrs); + return { values, indices }; + } + const unique$3 = /* @__PURE__ */ op({ unique_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the sum along segments of a `tf.Tensor`. + * + * ```js + * const x = tf.tensor1d([1, 2, 3, 4]); + * const segmentIds = tf.tensor1d([1, 2, 0, 1], 'int32'); + * const numSegments = 3; + * + * x.unsortedSegmentSum(segmentIds, numSegments).print() + * //or tf.unsortedSegmentSum(x, segmentIds, numSegments) + * ``` + * @param x The `tf.Tensor` that will be summed along its segments. + * @param segmentIds A `tf.Tensor1D` whose rank is equal to the rank of `x`'s + * dimension along the `axis`. Maps each element of `x` to a segment. + * @param numSegments The number of distinct `segmentIds`. + * + * @doc {heading: 'Operations', subheading: 'Segment'} + */ + function unsortedSegmentSum_(x, segmentIds, numSegments) { + const $x = convertToTensor(x, 'x', 'unsortedSegmentSum'); + const $segmentIds = convertToTensor(segmentIds, 'segmentIds', 'unsortedSegmentSum', 'int32'); + assert$1(isInt(numSegments), () => 'numSegments must be of dtype int'); + const inputs = { x: $x, segmentIds: $segmentIds }; + const attrs = { numSegments }; + return ENGINE.runKernel(UnsortedSegmentSum, inputs, attrs); + } + const unsortedSegmentSum$2 = /* @__PURE__ */ op({ unsortedSegmentSum_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Unstacks a `tf.Tensor` of rank-`R` into a list of rank-`(R-1)` `tf.Tensor`s. + * + * ```js + * const a = tf.tensor2d([1, 2, 3, 4], [2, 2]); + * + * tf.unstack(a).forEach(tensor => tensor.print()); + * ``` + * + * @param x A tensor object. + * @param axis The axis to unstack along. Defaults to 0 (the first dim). + * + * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'} + */ + function unstack_(x, axis = 0) { + const $x = convertToTensor(x, 'x', 'unstack', 'string_or_numeric'); + assert$1(axis >= -$x.shape.length && axis < $x.shape.length, () => `Axis = ${axis} is not in [-${$x.shape.length}, ${$x.shape.length})`); + const inputs = { value: $x }; + const attrs = { axis }; + return ENGINE.runKernel(Unpack, inputs, attrs); + } + const unstack = /* @__PURE__ */ op({ unstack_ }); + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Searches for where a value would go in a sorted sequence. + * + * This is not a method for checking containment (like javascript in). + * + * The typical use case for this operation is "binning", "bucketing", or + * "discretizing". The values are assigned to bucket-indices based on the edges + * listed in 'sortedSequence'. This operation returns the bucket-index for each + * value. + * + * The index returned corresponds to the first edge greater than the value. + * + * The axis is not settable for this operation. It always operates on the + * innermost dimension (axis=-1). The operation will accept any number of outer + * dimensions. + * + * Note: This operation assumes that 'upperBound' is sorted along the + * innermost axis, maybe using 'sort(..., axis=-1)'. If the sequence is not + * sorted no error is raised and the content of the returned tensor is not well + * defined. + * + * ```js + * const seq = tf.tensor1d([0, 3, 9, 10, 10]); + * const values = tf.tensor1d([0, 4, 10]); + * const result = tf.upperBound(seq, values); + * result.print(); // [1, 2, 5] + * ``` + * @param sortedSequence: N-D. Sorted sequence. + * @param values: N-D. Search values. + * @return An N-D int32 tensor the size of values containing the result of + * applying upper bound to each value. The result is not a global index to + * the entire Tensor, but the index in the last dimension. + * @doc {heading: 'Operations', subheading: 'Evaluation'} + */ + function upperBound$1(sortedSequence, values) { + return searchSorted$2(sortedSequence, values, 'right'); + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Creates a new variable with the provided initial value. + * ```js + * const x = tf.variable(tf.tensor([1, 2, 3])); + * x.assign(tf.tensor([4, 5, 6])); + * + * x.print(); + * ``` + * + * @param initialValue Initial value for the tensor. + * @param trainable If true, optimizers are allowed to update it. + * @param name Name of the variable. Defaults to a unique id. + * @param dtype If set, initialValue will be converted to the given type. + * + * @doc {heading: 'Tensors', subheading: 'Creation'} + */ + function variable$1(initialValue, trainable = true, name, dtype) { + return ENGINE.makeVariable(initialValue, trainable, name, dtype); + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function whereImpl$2(condShape, condVals) { + const indices = []; + for (let i = 0; i < condVals.length; i++) { + if (condVals[i]) { + indices.push(i); + } + } + const inBuffer = buffer(condShape, 'int32'); + const out = buffer([indices.length, condShape.length], 'int32'); + for (let i = 0; i < indices.length; i++) { + const loc = inBuffer.indexToLoc(indices[i]); + const offset = i * condShape.length; + out.values.set(loc, offset); + } + return out.toTensor(); + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Returns the coordinates of true elements of condition. + * + * The coordinates are returned in a 2-D tensor where the first dimension (rows) + * represents the number of true elements, and the second dimension (columns) + * represents the coordinates of the true elements. Keep in mind, the shape of + * the output tensor can vary depending on how many true values there are in + * input. Indices are output in row-major order. The resulting tensor has the + * shape `[numTrueElems, condition.rank]`. + * + * This is analogous to calling the python `tf.where(cond)` without an x or y. + * + * ```js + * const cond = tf.tensor1d([false, false, true], 'bool'); + * const result = await tf.whereAsync(cond); + * result.print(); + * ``` + * + * @doc {heading: 'Operations', subheading: 'Logical'} + */ + async function whereAsync_(condition) { + const $condition = convertToTensor(condition, 'condition', 'whereAsync', 'bool'); + const vals = await $condition.data(); + const res = whereImpl$2($condition.shape, vals); + if (condition !== $condition) { + $condition.dispose(); + } + return res; + } + const whereAsync = whereAsync_; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Apply boolean mask to tensor. + * + * ```js + * const tensor = tf.tensor2d([1, 2, 3, 4, 5, 6], [3, 2]); + * const mask = tf.tensor1d([1, 0, 1], 'bool'); + * const result = await tf.booleanMaskAsync(tensor, mask); + * result.print(); + * ``` + * + * @param tensor N-D tensor. + * @param mask K-D boolean tensor, K <= N and K must be known statically. + * @param axis A 0-D int Tensor representing the axis in tensor to mask from. + * By default, axis is 0 which will mask from the first dimension. + * Otherwise K + axis <= N. + * + * @doc {heading: 'Tensors', subheading: 'Slicing and Joining'} + */ + async function booleanMaskAsync_(tensor, mask, axis) { + const $tensor = convertToTensor(tensor, 'tensor', 'boolMask'); + const $mask = convertToTensor(mask, 'mask', 'boolMask', 'bool'); + const axisFrom = axis == null ? 0 : axis; + const maskDim = $mask.rank; + const tensorShape = $tensor.shape; + assert$1(maskDim > 0, () => 'mask cannot be scalar'); + assertShapesMatch(tensorShape.slice(axisFrom, axisFrom + maskDim), $mask.shape, `mask's shape must match the first K dimensions of tensor's shape,`); + let leadingSize = 1; + for (let i = axisFrom; i < axisFrom + maskDim; i++) { + leadingSize *= tensorShape[i]; + } + const targetTensorShape = tensorShape.slice(0, axisFrom) + .concat([leadingSize], tensorShape.slice(axisFrom + maskDim)); + const reshapedTensor = reshape$3($tensor, targetTensorShape); + const reshapedMask = reshape$3($mask, [-1]); + const positivePositions = await whereAsync(reshapedMask); + const indices = squeeze(positivePositions, [1]); + const res = gather$1(reshapedTensor, indices, axisFrom); + // Ensure no memory leak. + if (tensor !== $tensor) { + $tensor.dispose(); + } + if (mask !== $mask) { + $mask.dispose(); + } + indices.dispose(); + reshapedTensor.dispose(); + reshapedMask.dispose(); + positivePositions.dispose(); + return res; + } + const booleanMaskAsync = booleanMaskAsync_; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Transposes the `tf.Tensor`. Permutes the dimensions according to `perm`. + * + * The returned `tf.Tensor`'s dimension `i` will correspond to the input + * dimension `perm[i]`. If `perm` is not given, it is set to `[n-1...0]`, + * where `n` is the rank of the input `tf.Tensor`. Hence by default, this + * operation performs a regular matrix transpose on 2-D input `tf.Tensor`s. + * + * ```js + * const a = tf.tensor2d([1, 2, 3, 4, 5, 6], [2, 3]); + * + * a.transpose().print(); // or tf.transpose(a) + * ``` + * + * @param x The tensor to transpose. + * @param perm The permutation of the dimensions of a. + * @param conjugate Will conjugate complex input if true. + * + * @doc {heading: 'Operations', subheading: 'Matrices'} + */ + function transpose_(x, perm, conjugate) { + const $x = convertToTensor(x, 'x', 'transpose'); + if (perm == null) { + perm = $x.shape.map((s, i) => i).reverse(); + } + assert$1($x.rank === perm.length, () => `Error in transpose: rank of input ${$x.rank} ` + + `must match length of perm ${perm}.`); + perm.forEach(axis => { + assert$1(axis >= 0 && axis < $x.rank, () => `All entries in 'perm' must be between 0 and ${$x.rank - 1}` + + ` but got ${perm}`); + }); + if ($x.rank <= 1) { + return $x.clone(); + } + const inputs = { x: $x }; + const attrs = { perm }; + if ($x.dtype === 'complex64') { + return tidy(() => { + let $real = real$2($x); + let $imag = imag$2($x); + $real = ENGINE.runKernel(Transpose, { x: $real }, attrs); + $imag = ENGINE.runKernel(Transpose, { x: $imag }, attrs); + if (conjugate) { + $imag = neg$2($imag); + } + return complex$2($real, $imag); + }); + } + return ENGINE.runKernel(Transpose, inputs, attrs); + } + const transpose$2 = /* @__PURE__ */ op({ transpose_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Compute the moving average of a variable. + * + * Without zeroDebias, the moving average operation is defined by: + * `v += delta` + * where + * `delta = (1 - decay) * (x - v)` + * + * With zeroDebias (default), the `delta` term is scaled to debias the + * effect of the (assumed) zero-initialization of `v`. + * `delta /= (1 - decay ^ step)` + * + * For more details on the zero-debiasing algorithm, see: + * https://arxiv.org/abs/1412.6980 + * + * Note that this function is completely stateless and does not keep track of + * step count. The step count needs to be maintained by the caller and passed + * in as `step`. + * + * @param v The current moving average value. + * @param x New input value, must have the same shape and dtype as `v`. + * @param decay The decay factor. Typical values are 0.95 and 0.99. + * @param step Step count. + * @param zeroDebias: Whether zeroDebias is to be performed (default: `true`). + * @returns The new moving average value. + * + * @doc {heading: 'Operations', subheading: 'Moving Average'} + */ + function movingAverage_(v, x, decay, step, zeroDebias = true) { + const $v = convertToTensor(v, 'v', 'movingAverage'); + const $x = convertToTensor(x, 'x', 'movingAverage'); + const $decay = convertToTensor(decay, 'decay', 'movingAverage'); + assertTypesMatch($v, $x); + assert$1(arraysEqual($v.shape, $x.shape), () => 'Shape mismatch in v and x'); + const one = scalar(1); + const oneMinusDecay = sub$2(one, $decay); + let update = mul(sub$2($x, $v), oneMinusDecay); + if (zeroDebias) { + assert$1(step != null, () => 'When using zeroDebias: true, step is required.'); + const $step = convertToTensor(step, 'step', 'movingAverage'); + update = div$1(update, sub$2(one, pow$3($decay, $step))); + } + return add$3($v, update); + } + const movingAverage = /* @__PURE__ */ op({ movingAverage_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Creates a new tensor by applying sparse updates to individual + * values or slices within a zero tensor of the given shape tensor according to + * indices. This operator is the inverse of the `tf.gatherND` operator which + * extracts values or slices from a given tensor. + * + * ```js + * const indices = tf.tensor2d([4, 3, 1, 7], [4, 1], 'int32'); + * const updates = tf.tensor1d([9, 10, 11, 12]); + * const shape = [8]; + * tf.scatterND(indices, updates, shape).print() //[0, 11, 0, 10, 9, 0, 0, 12] + * ``` + * + * @param indices The tensor contains the indices into the output tensor. + * @param updates The tensor contains the value for the indices. + * @param shape: The shape of the output tensor. + * + * @doc {heading: 'Operations', subheading: 'Slicing and Joining'} + */ + function scatterND_(indices, updates, shape) { + assertNonNegativeIntegerDimensions(shape); + const $indices = convertToTensor(indices, 'indices', 'scatterND', 'int32'); + const $updates = convertToTensor(updates, 'updates', 'scatterND'); + validateInput$1($updates, $indices, shape); + const inputs = { indices: $indices, updates: $updates }; + const attrs = { shape }; + // tslint:disable-next-line: no-unnecessary-type-assertion + return ENGINE.runKernel(ScatterNd, inputs, attrs); + } + const scatterND = /* @__PURE__ */ op({ scatterND_ }); + + /** + * Validate sparseToDense inputs. + * + * @param sparseIndices A 0-D, 1-D, or 2-D Tensor of type int32. + * sparseIndices[i] contains the complete index where sparseValues[i] will be + * placed. + * @param sparseValues A 0-D or 1-D Tensor. Values + * corresponding to each row of sparseIndices, or a scalar value to be used for + * all sparse indices. + * @param outputShape number[]. Shape of the dense output tensor. + * @param validateIndices boolean. indice validation is not supported, error + * will be thrown if it is set. + */ + function validateInput(sparseIndices, sparseValues, outputShape, defaultValues) { + if (sparseIndices.dtype !== 'int32') { + throw new Error('tf.sparseToDense() expects the indices to be int32 type,' + + ` but the dtype was ${sparseIndices.dtype}.`); + } + if (sparseIndices.rank > 2) { + throw new Error('sparseIndices should be a scalar, vector, or matrix,' + + ` but got shape ${sparseIndices.shape}.`); + } + const numElems = sparseIndices.rank > 0 ? sparseIndices.shape[0] : 1; + const numDims = sparseIndices.rank > 1 ? sparseIndices.shape[1] : 1; + if (outputShape.length !== numDims) { + throw new Error('outputShape has incorrect number of elements:,' + + ` ${outputShape.length}, should be: ${numDims}.`); + } + const numValues = sparseValues.size; + if (!(sparseValues.rank === 0 || + sparseValues.rank === 1 && numValues === numElems)) { + throw new Error('sparseValues has incorrect shape ' + + `${sparseValues.shape}, should be [] or [${numElems}]`); + } + if (sparseValues.dtype !== defaultValues.dtype) { + throw new Error('sparseValues.dtype must match defaultValues.dtype'); + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Converts a sparse representation into a dense tensor. + * + * Builds an array dense with shape outputShape such that: + * + * // If sparseIndices is scalar + * dense[i] = (i == sparseIndices ? sparseValues : defaultValue) + * + * // If sparseIndices is a vector, then for each i + * dense[sparseIndices[i]] = sparseValues[i] + * + * // If sparseIndices is an n by d matrix, then for each i in [0, n) + * dense[sparseIndices[i][0], ..., sparseIndices[i][d-1]] = sparseValues[i] + * All other values in dense are set to defaultValue. If sparseValues is a + * scalar, all sparse indices are set to this single value. + * + * If indices are repeated the final value is summed over all values for those + * indices. + * + * ```js + * const indices = tf.tensor1d([4, 5, 6, 1, 2, 3], 'int32'); + * const values = tf.tensor1d([10, 11, 12, 13, 14, 15], 'float32'); + * const shape = [8]; + * tf.sparseToDense(indices, values, shape).print(); + * ``` + * + * @param sparseIndices A 0-D, 1-D, or 2-D Tensor of type int32. + * sparseIndices[i] contains the complete index where sparseValues[i] will be + * placed. + * @param sparseValues A 0-D or 1-D Tensor. Values + * corresponding to each row of sparseIndices, or a scalar value to be used for + * all sparse indices. + * @param outputShape Shape of the dense output tensor. The type is inferred. + * @param defaultValue Scalar. Value to set for indices not specified in + * sparseIndices. Defaults to zero. + * + * @doc {heading: 'Operations', subheading: 'Normalization'} + */ + function sparseToDense_(sparseIndices, sparseValues, outputShape, defaultValue = 0) { + assertNonNegativeIntegerDimensions(outputShape); + const $sparseIndices = convertToTensor(sparseIndices, 'sparseIndices', 'sparseToDense', 'int32'); + const $sparseValues = convertToTensor(sparseValues, 'sparseValues', 'sparseToDense', 'string_or_numeric'); + const $defaultValue = convertToTensor(defaultValue, 'defaultValue', 'sparseToDense', $sparseValues.dtype); + validateInput($sparseIndices, $sparseValues, outputShape, $defaultValue); + const inputs = { + sparseIndices: $sparseIndices, + sparseValues: $sparseValues, + defaultValue: $defaultValue + }; + const attrs = { outputShape }; + return ENGINE.runKernel(SparseToDense, inputs, attrs); + } + const sparseToDense$2 = /* @__PURE__ */ op({ sparseToDense_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Gather slices from input tensor into a Tensor with shape specified by + * `indices`. + * + * `indices` is a K-dimensional integer tensor, best thought of as a + * (K-1)-dimensional tensor of indices into input, where each element defines a + * slice of input: + * output[\\(i_0, ..., i_{K-2}\\)] = input[indices[\\(i_0, ..., i_{K-2}\\)]] + * + * Whereas in `tf.gather`, `indices` defines slices into the first dimension of + * input, in `tf.gatherND`, `indices` defines slices into the first N dimensions + * of input, where N = indices.shape[-1]. + * + * The last dimension of indices can be at most the rank of input: + * indices.shape[-1] <= input.rank + * + * The last dimension of `indices` corresponds to elements + * (if indices.shape[-1] == input.rank) or slices + * (if indices.shape[-1] < input.rank) along dimension indices.shape[-1] of + * input. + * The output tensor has shape + * indices.shape[:-1] + input.shape[indices.shape[-1]:] + * + * Note that on CPU, if an out of bound index is found, an error is returned. On + * GPU, if an out of bound index is found, a 0 is stored in the corresponding + * output value. + * + * ```js + * const indices = tf.tensor2d([0, 1, 1, 0], [2,2], 'int32'); + * const input = tf.tensor2d([9, 10, 11, 12], [2, 2]); + * tf.gatherND(input, indices).print() // [10, 11] + * ``` + * + * @param x The tensor from which to gather values. + * @param indices Index tensor, must be of type int32. + * + * @doc {heading: 'Operations', subheading: 'Slicing and Joining'} + */ + function gatherND_(x, indices) { + const $indices = convertToTensor(indices, 'indices', 'gatherND', 'int32'); + const $x = convertToTensor(x, 'x', 'gatherND', 'string_or_numeric'); + const inputs = { params: $x, indices: $indices }; + return ENGINE.runKernel(GatherNd, inputs); + } + const gatherND = /* @__PURE__ */ op({ gatherND_ }); + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Normalize noise shape based on provided tensor and noise shape. + * + * @param x Tensor. + * @param noiseShape The shape for the randomly generated keep/drop flags, as + * an array of numbers. Optional. + * @returns Normalized noise shape. + */ + function getNoiseShape(x, noiseShape) { + if (noiseShape == null) { + return x.shape.slice(); + } + if (arraysEqual(x.shape, noiseShape)) { + return noiseShape; + } + if (x.shape.length === noiseShape.length) { + const newDimension = []; + for (let i = 0; i < x.shape.length; i++) { + if (noiseShape[i] == null && x.shape[i] != null) { + newDimension.push(x.shape[i]); + } + else { + newDimension.push(noiseShape[i]); + } + } + return newDimension; + } + return noiseShape; + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes dropout. + * + * ```js + * const x = tf.tensor1d([1, 2, 2, 1]); + * const rate = 0.75; + * const output = tf.dropout(x, rate); + * output.print(); + * ``` + * + * @param x A floating point Tensor or TensorLike. + * @param rate A float in the range [0, 1). The probability that each element + * of x is discarded. + * @param noiseShape An array of numbers of type int32, representing the + * shape for randomly generated keep/drop flags. If the noiseShape has null + * value, it will be automatically replaced with the x's relative dimension + * size. Optional. + * @param seed Used to create random seeds. Optional. + * @returns A Tensor of the same shape of x. + * + * @doc {heading: 'Operations', subheading: 'Dropout'} + */ + function dropout_(x, rate, noiseShape, seed) { + const $x = convertToTensor(x, 'x', 'dropout'); + assert$1($x.dtype === 'float32', () => `x has to be a floating point tensor since it's going to be ` + + `scaled, but got a ${$x.dtype} tensor instead.`); + assert$1(rate >= 0 && rate < 1, () => `rate must be a float in the range [0, 1), but got ${rate}.`); + if (rate === 0) { + return x instanceof Tensor ? $x.clone() : $x; + } + const $noiseShape = getNoiseShape($x, noiseShape); + const keepProb = 1 - rate; + const multiplier = div$1(floor$2(add$3(randomUniform$1($noiseShape, 0, 1, 'float32', seed), keepProb)), keepProb); + return mul($x, multiplier); + } + const dropout$2 = /* @__PURE__ */ op({ dropout_ }); + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function enclosingPowerOfTwo(value) { + // Return 2**N for integer N such that 2**N >= value. + return Math.floor(Math.pow(2, Math.ceil(Math.log(value) / Math.log(2.0)))); + } + function cosineWindow(windowLength, a, b) { + const even = 1 - windowLength % 2; + const newValues = new Float32Array(windowLength); + for (let i = 0; i < windowLength; ++i) { + const cosArg = (2.0 * Math.PI * i) / (windowLength + even - 1); + newValues[i] = a - b * Math.cos(cosArg); + } + return tensor1d(newValues, 'float32'); + } + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Returns whether the targets are in the top K predictions. + * + * ```js + * const predictions = tf.tensor2d([[20, 10, 40, 30], [30, 50, -20, 10]]); + * const targets = tf.tensor1d([2, 0]); + * const precision = await tf.inTopKAsync(predictions, targets); + * precision.print(); + * ``` + * @param predictions 2-D or higher `tf.Tensor` with last dimension being + * at least `k`. + * @param targets 1-D or higher `tf.Tensor`. + * @param k Optional Number of top elements to look at for computing precision, + * default to 1. + * + * @doc {heading: 'Operations', subheading: 'Evaluation'} + */ + async function inTopKAsync_(predictions, targets, k = 1) { + const $predictions = convertToTensor(predictions, 'predictions', 'inTopK'); + const $targets = convertToTensor(targets, 'targets', 'inTopK'); + assert$1($predictions.rank > 1, () => 'inTopK() expects the predictions to be of rank 2 or higher, ' + + `but got ${$predictions.rank}`); + assert$1($predictions.rank - 1 === $targets.rank, () => `predictions rank should be 1 larger than ` + + `targets rank, but got predictions rank ` + + `${$predictions.rank} and targets rank ${$targets.rank}`); + assertShapesMatch($predictions.shape.slice(0, $predictions.shape.length - 1), $targets.shape, `predictions's shape should be align with the targets' shape, ` + + 'except the last dimension.'); + const lastDim = $predictions.shape[$predictions.shape.length - 1]; + assert$1(k > 0 && k <= lastDim, () => `'k' passed to inTopK() must be > 0 && <= the predictions last ` + + `dimension (${lastDim}), but got ${k}`); + const predictionsVals = await $predictions.data(); + const targetsVals = await $targets.data(); + // Reshape predictionsVals into a 2d tensor [batch, lastDim] + // and look up topK along lastDim. + const [batch, size] = [predictionsVals.length / lastDim, lastDim]; + const precision = getTypedArrayFromDType('bool', batch); + for (let b = 0; b < batch; b++) { + const offset = b * size; + const vals = predictionsVals.subarray(offset, offset + size); + const valAndInd = []; + for (let i = 0; i < vals.length; i++) { + valAndInd.push({ value: vals[i], index: i }); + } + valAndInd.sort((a, b) => b.value - a.value); + precision[b] = 0; + for (let i = 0; i < k; i++) { + if (valAndInd[i].index === targetsVals[b]) { + precision[b] = 1; + break; + } + } + } + if (predictions !== $predictions) { + $predictions.dispose(); + } + if (targets !== $targets) { + $targets.dispose(); + } + // Output precision has the same shape as targets. + return tensor(precision, $targets.shape, 'bool'); + } + const inTopKAsync = inTopKAsync_; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the derivative of the filter of a 2D convolution. + * + * @param x The input tensor, of rank 4 or rank 3 of shape + * [batch, height, width, inChannels]. If rank 3, batch of 1 is assumed. + * @param dy The dy image, of rank 4 or rank 3, of shape + * [batch, height, width, outDepth]. If rank 3, batch of 1 is assumed. + * @param filterShape The shape of the filter, length 4, + * [filterHeight, filterWidth, inDepth, outDepth]. + * @param strides The strides of the convolution: [strideHeight, + * strideWidth]. + * @param pad A string from: 'same', 'valid'. The type of padding algorithm + * used in the forward prop of the op. + * @param dataFormat: An optional string from: "NHWC", "NCHW". Defaults to + * "NHWC". Specify the data format of the input and output data. With the + * default format "NHWC", the data is stored in the order of: [batch, + * height, width, channels]. + * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is + * provided, it will default to truncate. + */ + function conv2DBackpropFilter_(x, dy, filterShape, strides, pad, dataFormat = 'NHWC', dimRoundingMode) { + let x4D = x; + if (x.rank === 3) { + x4D = reshape$3(x, [1, x.shape[0], x.shape[1], x.shape[2]]); + } + let dy4D = dy; + if (dy4D.rank === 3) { + dy4D = reshape$3(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2]]); + } + assert$1(x4D.rank === 4, () => `Error in conv2dDerFilter: input must be rank 4, but got shape ` + + `${x4D.shape}.`); + assert$1(dy4D.rank === 4, () => `Error in conv2dDerFilter: dy must be rank 4, but got shape ` + + `${dy4D.shape}.`); + assert$1(filterShape.length === 4, () => `Error in conv2dDerFilter: filterShape must be length 4, but got ` + + `${filterShape}.`); + const inDepth = dataFormat === 'NHWC' ? x4D.shape[3] : x4D.shape[1]; + const outDepth = dataFormat === 'NHWC' ? dy4D.shape[3] : dy4D.shape[1]; + assert$1(inDepth === filterShape[2], () => `Error in conv2dDerFilter: depth of input ${inDepth}) must ` + + `match input depth in filter (${filterShape[2]}.`); + assert$1(outDepth === filterShape[3], () => `Error in conv2dDerFilter: depth of dy (${outDepth}) must ` + + `match output depth for filter (${filterShape[3]}).`); + checkPadOnDimRoundingMode('conv2dDerFilter', pad, dimRoundingMode); + const inputs = { x: x4D, dy: dy4D }; + const attrs = { strides, pad, dataFormat, dimRoundingMode, filterShape }; + // tslint:disable-next-line: no-unnecessary-type-assertion + return ENGINE.runKernel(Conv2DBackpropFilter, inputs, attrs); + } + const conv2DBackpropFilter$2 = /* @__PURE__ */ op({ conv2DBackpropFilter_ }); + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + // Returns gradient for fused activation. + function getFusedDyActivation(dy, y, activation) { + if (activation == null || activation === 'linear') { + return dy; + } + if (activation === 'relu') { + return mul(dy, step$2(y)); + } + throw new Error(`Cannot compute gradient for fused activation ${activation}.`); + } + // Returns gradient for fused bias. + function getFusedBiasGradient(bias, dyActivation) { + let res = dyActivation; + const reduceAxes = getReductionAxes(bias.shape, dyActivation.shape); + if (reduceAxes.length > 0) { + res = sum$3(res, reduceAxes); + } + return reshape$3(res, bias.shape); + } + function applyActivation$1(x, activation, preluActivationWeights, leakyreluAlpha) { + if (activation === 'linear') { + return x; + } + else if (activation === 'relu') { + return relu$2(x); + } + else if (activation === 'elu') { + return elu$4(x); + } + else if (activation === 'relu6') { + return relu6$2(x); + } + else if (activation === 'prelu') { + return prelu$3(x, preluActivationWeights); + } + else if (activation === 'leakyrelu') { + return leakyRelu$2(x, leakyreluAlpha); + } + else if (activation === 'sigmoid') { + return sigmoid$2(x); + } + throw new Error(`Unknown fused activation ${activation}.`); + } + // Whether we should call fused ops. + const shouldFuse = (gradientDepth, activation) => { + const gradientMode = gradientDepth > 0; + return !gradientMode || activation === 'linear'; + }; + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes a 2D convolution over the input x, optionally fused with adding a + * bias and applying an activation. + * + * ```js + * const inputDepth = 2; + * const inShape = [2, 2, 2, inputDepth]; + * const outputDepth = 2; + * const fSize = 1; + * const pad = 0; + * const strides = 1; + * + * const x = tf.tensor4d( [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, + * 16], inShape); + * const w = tf.tensor4d([-1, 1, -2, 0.5], [fSize, fSize, inputDepth, + * outputDepth]); + * + * tf.fused.conv2d({ x, filter: w, strides, pad, dataFormat: 'NHWC', + * dilations: [1, 1], bias: tf.scalar(5), activation: 'relu' }).print(); + * ``` + * + * @param obj An object with the following properties: + * @param x The input tensor, of rank 4 or rank 3, of shape + * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is + * assumed. + * @param filter The filter, rank 4, of shape + * `[filterHeight, filterWidth, inDepth, outDepth]`. + * @param strides The strides of the convolution: `[strideHeight, + * strideWidth]`. + * @param pad The type of padding algorithm. + * - `same` and stride 1: output will be of same size as input, + * regardless of filter size. + * - `valid` output will be smaller than input if filter is larger + * than 1x1. + * - For more info, see this guide: + * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution]( + * https://www.tensorflow.org/api_docs/python/tf/nn/convolution) + * @param dataFormat An optional string from: "NHWC", "NCHW". Defaults to + * "NHWC". Specify the data format of the input and output data. With the + * default format "NHWC", the data is stored in the order of: [batch, + * height, width, channels]. Only "NHWC" is currently supported. + * @param dilations The dilation rates: `[dilationHeight, dilationWidth]` + * in which we sample input values across the height and width dimensions + * in atrous convolution. Defaults to `[1, 1]`. If `dilations` is a single + * number, then `dilationHeight == dilationWidth`. If it is greater than + * 1, then all values of `strides` must be 1. + * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is + * provided, it will default to truncate. + * @param bias Tensor to be added to the result. + * @param activation Name of activation kernel (defaults to `linear`) to be + * applied + * after biasAdd. + * @param preluActivationWeights Tensor of prelu weights to be applied as part + * of a `prelu` activation, typically the same shape as `x`. + * @param leakyreluAlpha Optional. Alpha to be applied as part of a `leakyrelu` + * activation. + */ + function fusedConv2d_({ x, filter, strides, pad, dataFormat = 'NHWC', dilations = [1, 1], dimRoundingMode, bias, activation = 'linear', preluActivationWeights, leakyreluAlpha }) { + activation = activation || 'linear'; + if (shouldFuse(ENGINE.state.gradientDepth, activation) === false) { + // TODO: Transpose bias and preluActivationWeights properly for NCHW + // format before computation. + assert$1(dataFormat === 'NHWC', () => `Error in fused conv2d: got dataFormat of ${dataFormat} but ` + + `only NHWC is currently supported for the case of gradient depth ` + + `is 0 and the activation is not linear.`); + let result = conv2d$4(x, filter, strides, pad, dataFormat, dilations, dimRoundingMode); + if (bias != null) { + result = add$3(result, bias); + } + return applyActivation$1(result, activation, preluActivationWeights, leakyreluAlpha); + } + const $x = convertToTensor(x, 'x', 'conv2d', 'float32'); + const $filter = convertToTensor(filter, 'filter', 'conv2d', 'float32'); + let x4D = $x; + let reshapedTo4D = false; + if ($x.rank === 3) { + reshapedTo4D = true; + x4D = reshape$3($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]); + } + assert$1(x4D.rank === 4, () => `Error in fused conv2d: input must be rank 4, but got rank ` + + `${x4D.rank}.`); + assert$1($filter.rank === 4, () => `Error in fused conv2d: filter must be rank 4, but got rank ` + + `${$filter.rank}.`); + checkPadOnDimRoundingMode('fused conv2d', pad, dimRoundingMode); + const inputChannels = dataFormat === 'NHWC' ? x4D.shape[3] : x4D.shape[1]; + assert$1($filter.shape[2] === inputChannels, () => `Error in conv2d: depth of input (${inputChannels}) must match ` + + `input depth for filter ${$filter.shape[2]}.`); + assert$1(eitherStridesOrDilationsAreOne(strides, dilations), () => 'Error in conv2D: Either strides or dilations must be 1. ' + + `Got strides ${strides} and dilations '${dilations}'`); + const convInfo = computeConv2DInfo(x4D.shape, $filter.shape, strides, dilations, pad, dimRoundingMode); + let $bias; + if (bias != null) { + $bias = convertToTensor(bias, 'bias', 'fused conv2d'); + [$bias] = makeTypesMatch($bias, $x); + // According to TensorFlow, the bias is supposed be a 1-D tensor or a + // scalar. + // + // 3-D or 4-D bias is not disabled for NHWC format, because they are + // currently being used in some cases. For examplem in our code base, + // https://github.com/tensorflow/tfjs/blob/b53bd47e880367ae57493f0ea628abaf08db2d5d/tfjs-core/src/ops/fused/fused_conv2d_test.ts#L1972. + if (dataFormat === 'NHWC') { + assertAndGetBroadcastShape(convInfo.outShape, $bias.shape); + } + else { + assert$1($bias.shape.length <= 1, () => `Error in fused conv2d: only supports scalar or 1-D Tensor ` + + `bias for NCHW format but got the bias of ` + + `rank-${$bias.shape.length}.`); + assert$1($bias.shape.length === 0 || $bias.shape[0] === convInfo.outChannels || + $bias.shape[0] === 1, () => `Error in fused conv2d: bias shape (${$bias.shape}) is not ` + + `compatible with the number of output channels ` + + `(${convInfo.outChannels})`); + } + } + let $preluActivationWeights; + if (preluActivationWeights != null) { + // PReLU's activation weights could be a scalar, a 1-D tensor or a 3-D + // tensor. + const alphaShape = preluActivationWeights.shape; + assert$1(alphaShape.length <= 1 || alphaShape.length === 3, () => `Error in fused conv2d: only supports scalar, 1-D Tensor or ` + + `3-D Tensor PReLU activation weights but got a tensor of ` + + `rank-${alphaShape.length}.`); + if (alphaShape.length === 1) { + // Whether the data format is NCHW or NHWC, the 1-D PReLU activation + // weights tensor should be aligned with the output channels of conv2d + // result. + assert$1(alphaShape[0] === 1 || alphaShape[0] === convInfo.outChannels, () => `Error in fused conv2d: PReLU activation weights ` + + `(${alphaShape}) is not compatible with the number of output ` + + `channels (${convInfo.outChannels}).`); + } + else if (alphaShape.length === 3) { + // Whether the data format is NCHW or NHWC, the PReLU activation weights + // tensor should has the compatible shape with the result of conv2d. + try { + assertAndGetBroadcastShape(alphaShape, convInfo.outShape); + } + catch (e) { + const errMsg = `Error in fused conv2d: PReLU activation weights (${alphaShape}) ` + + `is not compatible with the output shape of the conv2d ` + + `(${convInfo.outShape}).`; + throw Error(errMsg); + } + } + $preluActivationWeights = convertToTensor(preluActivationWeights, 'prelu weights', 'fused conv2d'); + } + const grad = (dy, saved) => { + assert$1(dataFormat === 'NHWC', () => `Error in gradient of fused conv2D: got dataFormat of ${dataFormat} but only NHWC is currently supported.`); + const [$filter, x4D, y, $bias] = saved; + const dyActivation = getFusedDyActivation(dy, y, activation); + assert$1(tupleValuesAreOne(dilations), () => 'Error in gradient of fused conv2D: ' + + `dilation rates greater than 1 ` + + `are not yet supported in gradients. Got dilations '${dilations}'`); + const xDer = conv2DBackpropInput$2(x4D.shape, dyActivation, $filter, strides, pad); + const filterDer = conv2DBackpropFilter$2(x4D, dyActivation, $filter.shape, strides, pad); + const der = [xDer, filterDer]; + if ($bias != null) { + const biasDer = getFusedBiasGradient($bias, dyActivation); + der.push(biasDer); + } + return der; + }; + const inputs = { + x: x4D, + filter: $filter, + bias: $bias, + preluActivationWeights: $preluActivationWeights + }; + const attrs = { + strides, + pad, + dataFormat, + dilations, + dimRoundingMode, + activation, + leakyreluAlpha + }; + // Depending on the the params passed in we will have different number of + // inputs and thus a a different number of elements in the gradient. + if (bias == null) { + const customOp = customGrad((x4D, filter, save) => { + let res = + // tslint:disable-next-line: no-unnecessary-type-assertion + ENGINE.runKernel(FusedConv2D, inputs, attrs); + save([filter, x4D, res]); + if (reshapedTo4D) { + // tslint:disable-next-line: no-unnecessary-type-assertion + res = reshape$3(res, [res.shape[1], res.shape[2], res.shape[3]]); + } + return { value: res, gradFunc: grad }; + }); + return customOp(x4D, $filter); + } + else { + const customOpWithBias = customGrad((x4D, filter, bias, save) => { + let res = ENGINE.runKernel(FusedConv2D, inputs, attrs); + save([filter, x4D, res, bias]); + if (reshapedTo4D) { + // tslint:disable-next-line: no-unnecessary-type-assertion + res = reshape$3(res, [res.shape[1], res.shape[2], res.shape[3]]); + } + return { value: res, gradFunc: grad }; + }); + return customOpWithBias(x4D, $filter, $bias); + } + } + const conv2d$3 = /* @__PURE__ */ op({ fusedConv2d_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function depthwiseConv2dNativeBackpropFilter_(x, dy, filterShape, strides, pad, dilations = [1, 1], dimRoundingMode) { + let x4D = x; + if (x.rank === 3) { + x4D = reshape$3(x, [1, x.shape[0], x.shape[1], x.shape[2]]); + } + let dy4D = dy; + if (dy4D.rank === 3) { + dy4D = reshape$3(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2]]); + } + const inputs = { x: x4D, dy: dy4D }; + const attrs = { strides, pad, dimRoundingMode, dilations, filterShape }; + // tslint:disable-next-line: no-unnecessary-type-assertion + return ENGINE.runKernel(DepthwiseConv2dNativeBackpropFilter, inputs, attrs); + } + const depthwiseConv2dNativeBackpropFilter$2 = op({ depthwiseConv2dNativeBackpropFilter_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function depthwiseConv2dNativeBackpropInput_(xShape, dy, filter, strides, pad, dilations = [1, 1], dimRoundingMode) { + let dy4D = dy; + let reshapedTo4D = false; + if (dy.rank === 3) { + reshapedTo4D = true; + dy4D = reshape$3(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2]]); + } + const inputs = { dy: dy4D, filter }; + const attrs = { strides, pad, dimRoundingMode, dilations, inputShape: xShape }; + const res = + // tslint:disable-next-line: no-unnecessary-type-assertion + ENGINE.runKernel(DepthwiseConv2dNativeBackpropInput, inputs, attrs); + if (reshapedTo4D) { + return reshape$3(res, [res.shape[1], res.shape[2], res.shape[3]]); + } + return res; + } + const depthwiseConv2dNativeBackpropInput$2 = op({ depthwiseConv2dNativeBackpropInput_ }); + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes depthwise 2D convolution, optionally fused with adding a + * bias and applying an activation. + * + * Given a 4D `input` array and a `filter` array of shape + * `[filterHeight, filterWidth, inChannels, channelMultiplier]` containing + * `inChannels` convolutional filters of depth 1, this op applies a + * different filter to each input channel (expanding from 1 channel to + * `channelMultiplier` channels for each), then concatenates the results + * together. The output has `inChannels * channelMultiplier` channels. + * + * See + * [https://www.tensorflow.org/api_docs/python/tf/nn/depthwise_conv2d]( + * https://www.tensorflow.org/api_docs/python/tf/nn/depthwise_conv2d) + * for more details. + * + * @param obj An object with the following properties: + * @param x The input tensor, of rank 4 or rank 3, of shape + * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is + * assumed. + * @param filter The filter tensor, rank 4, of shape + * `[filterHeight, filterWidth, inChannels, channelMultiplier]`. + * @param strides The strides of the convolution: `[strideHeight, + * strideWidth]`. If strides is a single number, then `strideHeight == + * strideWidth`. + * @param pad The type of padding algorithm. + * - `same` and stride 1: output will be of same size as input, + * regardless of filter size. + * - `valid`: output will be smaller than input if filter is larger + * than 1x1. + * - For more info, see this guide: + * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution]( + * https://www.tensorflow.org/api_docs/python/tf/nn/convolution) + * @param dilations The dilation rates: `[dilationHeight, dilationWidth]` + * in which we sample input values across the height and width dimensions + * in atrous convolution. Defaults to `[1, 1]`. If `rate` is a single + * number, then `dilationHeight == dilationWidth`. If it is greater than + * 1, then all values of `strides` must be 1. + * @param dataFormat: An optional string from: "NHWC", "NCHW". Defaults to + * "NHWC". Specify the data format of the input and output data. With the + * default format "NHWC", the data is stored in the order of: [batch, + * height, width, channels]. Only "NHWC" is currently supported. + * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is + * provided, it will default to truncate. + * @param bias Tensor to be added to the result. + * @param activation Name of activation kernel (defaults to `linear`). + * @param preluActivationWeights Tensor of prelu weights to be applied as part + * of a `prelu` activation, typically the same shape as `x`. + * @param leakyreluAlpha Optional. Alpha to be applied as part of a `leakyrelu` + * activation. + */ + function fusedDepthwiseConv2d_({ x, filter, strides, pad, dataFormat = 'NHWC', dilations = [1, 1], dimRoundingMode, bias, activation = 'linear', preluActivationWeights, leakyreluAlpha }) { + if (shouldFuse(ENGINE.state.gradientDepth, activation) === false) { + let result = depthwiseConv2d$3(x, filter, strides, pad, dataFormat, dilations, dimRoundingMode); + if (bias != null) { + result = add$3(result, bias); + } + return applyActivation$1(result, activation, preluActivationWeights, leakyreluAlpha); + } + const $x = convertToTensor(x, 'x', 'depthwiseConv2d', 'float32'); + const $filter = convertToTensor(filter, 'filter', 'depthwiseConv2d', 'float32'); + let x4D = $x; + let reshapedTo4D = false; + if ($x.rank === 3) { + reshapedTo4D = true; + x4D = reshape$3($x, [1, $x.shape[0], $x.shape[1], $x.shape[2]]); + } + assert$1(x4D.rank === 4, () => `Error in fused depthwiseConv2d: input must be rank 4, but got ` + + `rank ${x4D.rank}.`); + assert$1($filter.rank === 4, () => `Error in fused depthwiseConv2d: filter must be rank 4, ` + + `but got rank ${$filter.rank}.`); + assert$1(x4D.shape[3] === $filter.shape[2], () => `Error in fused depthwiseConv2d: number of input channels ` + + `(${x4D.shape[3]}) must match the inChannels dimension in ` + + `filter ${$filter.shape[2]}.`); + if (dilations == null) { + dilations = [1, 1]; + } + assert$1(eitherStridesOrDilationsAreOne(strides, dilations), () => 'Error in fused depthwiseConv2d: Either strides or dilations must ' + + `be 1. Got strides ${strides} and dilations '${dilations}'`); + checkPadOnDimRoundingMode('fused depthwiseConv2d', pad, dimRoundingMode); + const convInfo = computeConv2DInfo(x4D.shape, $filter.shape, strides, dilations, pad, dimRoundingMode, true /* depthwise */); + let $bias; + if (bias != null) { + $bias = convertToTensor(bias, 'bias', 'fused conv2d'); + [$bias] = makeTypesMatch($bias, $x); + assertAndGetBroadcastShape(convInfo.outShape, $bias.shape); + } + let $preluActivationWeights; + if (preluActivationWeights != null) { + $preluActivationWeights = convertToTensor(preluActivationWeights, 'prelu weights', 'fused depthwiseConv2d'); + } + const grad = (dy, saved) => { + assert$1(tupleValuesAreOne(dilations), () => 'Error in gradient of fused depthwiseConv2d: dilation rates ' + + `greater than 1 are not yet supported. Got dilations ` + + `'${dilations}'`); + const [$filter, x4D, y, bias] = saved; + const dyActivation = getFusedDyActivation(dy, y, activation); + const xDer = depthwiseConv2dNativeBackpropInput$2(x4D.shape, dyActivation, $filter, strides, pad, dilations, dimRoundingMode); + const filterDer = depthwiseConv2dNativeBackpropFilter$2(x4D, dyActivation, $filter.shape, strides, pad, dilations, dimRoundingMode); + if (bias != null) { + const biasDer = getFusedBiasGradient($bias, dyActivation); + return [xDer, filterDer, biasDer]; + } + return [xDer, filterDer]; + }; + const inputs = { + x: x4D, + filter: $filter, + bias: $bias, + preluActivationWeights: $preluActivationWeights + }; + const attrs = { + strides, + pad, + dataFormat, + dilations, + dimRoundingMode, + activation, + leakyreluAlpha + }; + // Depending on the the params passed in we will have different number of + // inputs and thus a a different number of elements in the gradient. + if (bias == null) { + const customOp = customGrad((x4D, filter, save) => { + // tslint:disable-next-line: no-unnecessary-type-assertion + let res = ENGINE.runKernel(FusedDepthwiseConv2D, inputs, attrs); + save([filter, x4D, res]); + if (reshapedTo4D) { + // tslint:disable-next-line: no-unnecessary-type-assertion + res = reshape$3(res, [res.shape[1], res.shape[2], res.shape[3]]); + } + return { value: res, gradFunc: grad }; + }); + return customOp(x4D, $filter); + } + else { + const customOpWithBias = customGrad((x4D, filter, bias, save) => { + // tslint:disable-next-line: no-unnecessary-type-assertion + let res = ENGINE.runKernel(FusedDepthwiseConv2D, inputs, attrs); + save([filter, x4D, res, bias]); + if (reshapedTo4D) { + // tslint:disable-next-line: no-unnecessary-type-assertion + res = reshape$3(res, [res.shape[1], res.shape[2], res.shape[3]]); + } + return { value: res, gradFunc: grad }; + }); + return customOpWithBias(x4D, $filter, $bias); + } + } + const depthwiseConv2d$2 = /* @__PURE__ */ op({ fusedDepthwiseConv2d_ }); + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the dot product of two matrices with optional activation and bias. + * + * ```js + * const a = tf.tensor2d([-1, -2], [1, 2]); + * const b = tf.tensor2d([1, 2, 3, 4], [2, 2]); + * const bias = tf.tensor2d([1, 2], [1, 2]); + * + * tf.fused.matMul({a, b, bias, activation: 'relu'}).print(); + * ``` + * + * @param obj An object with the following properties: + * - `a` First matrix in dot product operation. + * - `b` Second matrix in dot product operation. + * - `transposeA` If true, `a` is transposed before multiplication. + * - `transposeB` If true, `b` is transposed before multiplication. + * - `bias` Matrix to be added to the result. + * - `activation` Name of activation kernel (defaults to `linear`). + * - `preluActivationWeights` Tensor of prelu weights. + * - `leakyreluAlpha` Alpha of leakyrelu. + */ + function fusedMatMul_({ a, b, transposeA = false, transposeB = false, bias, activation = 'linear', preluActivationWeights, leakyreluAlpha = 0.2, }) { + if (shouldFuse(ENGINE.state.gradientDepth, activation) === false) { + let result = matMul$1(a, b, transposeA, transposeB); + if (bias != null) { + result = add$3(result, bias); + } + return applyActivation$1(result, activation, preluActivationWeights, leakyreluAlpha); + } + let $a = convertToTensor(a, 'a', 'fused matMul'); + let $b = convertToTensor(b, 'b', 'fused matMul'); + [$a, $b] = makeTypesMatch($a, $b); + const innerShapeA = transposeA ? $a.shape[$a.rank - 2] : $a.shape[$a.rank - 1]; + const innerShapeB = transposeB ? $b.shape[$b.rank - 1] : $b.shape[$b.rank - 2]; + const outerShapeA = transposeA ? $a.shape[$a.rank - 1] : $a.shape[$a.rank - 2]; + const outerShapeB = transposeB ? $b.shape[$b.rank - 2] : $b.shape[$b.rank - 1]; + const outerDimsA = $a.shape.slice(0, -2); + const outerDimsB = $b.shape.slice(0, -2); + const batchDimA = sizeFromShape(outerDimsA); + const batchDimB = sizeFromShape(outerDimsB); + assert$1(innerShapeA === innerShapeB, () => `Error in fused matMul: inner shapes (${innerShapeA}) and (` + + `${innerShapeB}) of Tensors with shapes ${$a.shape} and ` + + `${$b.shape} and transposeA=${transposeA}` + + ` and transposeB=${transposeB} must match.`); + const outShapeOuterDims = assertAndGetBroadcastShape($a.shape.slice(0, -2), $b.shape.slice(0, -2)); + const outShape = outShapeOuterDims.concat([outerShapeA, outerShapeB]); + const a3D = transposeA ? + reshape$3($a, [batchDimA, innerShapeA, outerShapeA]) : + reshape$3($a, [batchDimA, outerShapeA, innerShapeA]); + const b3D = transposeB ? + reshape$3($b, [batchDimB, outerShapeB, innerShapeB]) : + reshape$3($b, [batchDimB, innerShapeB, outerShapeB]); + let $bias; + if (bias != null) { + $bias = convertToTensor(bias, 'bias', 'fused matMul'); + [$bias] = makeTypesMatch($bias, $a); + assertAndGetBroadcastShape(outShape, $bias.shape); + } + let $preluActivationWeights; + if (preluActivationWeights != null) { + $preluActivationWeights = convertToTensor(preluActivationWeights, 'prelu weights', 'fused matMul'); + } + const grad = (dy, saved) => { + const [a3D, b3D, y, $bias] = saved; + // we reshape dy because the result of the forward is not + // necessarily going to be a 3d tensor due to a reshape done at the end of + // the customOp. + const dyActivation = getFusedDyActivation(reshape$3(dy, y.shape), y, activation); + let aDer; + let bDer; + if (!transposeA && !transposeB) { + aDer = matMul$1(dyActivation, b3D, false, true); + bDer = matMul$1(a3D, dyActivation, true, false); + } + else if (!transposeA && transposeB) { + aDer = matMul$1(dyActivation, b3D, false, false); + bDer = matMul$1(dyActivation, a3D, true, false); + } + else if (transposeA && !transposeB) { + aDer = matMul$1(b3D, dyActivation, false, true); + bDer = matMul$1(a3D, dyActivation, false, false); + } + else { + aDer = matMul$1(b3D, dyActivation, true, true); + bDer = matMul$1(dyActivation, a3D, true, true); + } + if (bias != null) { + const biasDer = getFusedBiasGradient($bias, dyActivation); + return [aDer, bDer, biasDer]; + } + else { + return [aDer, bDer]; + } + }; + const inputs = { + a: a3D, + b: b3D, + bias: $bias, + preluActivationWeights: $preluActivationWeights + }; + const attrs = { transposeA, transposeB, activation, leakyreluAlpha }; + // Depending on the the params passed in we will have different number of + // inputs and thus a a different number of elements in the gradient. + if (bias == null) { + const customOp = customGrad((a3D, b3D, save) => { + const res = + // tslint:disable-next-line: no-unnecessary-type-assertion + ENGINE.runKernel(_FusedMatMul, inputs, attrs); + save([a3D, b3D, res]); + return { value: reshape$3(res, outShape), gradFunc: grad }; + }); + return customOp(a3D, b3D); + } + else { + const customOpWithBias = customGrad((a3D, b3D, $bias, save) => { + const res = + // tslint:disable-next-line: no-unnecessary-type-assertion + ENGINE.runKernel(_FusedMatMul, inputs, attrs); + save([a3D, b3D, res, $bias]); + return { value: reshape$3(res, outShape), gradFunc: grad }; + }); + return customOpWithBias(a3D, b3D, $bias); + } + } + const matMul = /* @__PURE__ */ op({ fusedMatMul_ }); + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + + var fused_ops = /*#__PURE__*/Object.freeze({ + __proto__: null, + conv2d: conv2d$3, + depthwiseConv2d: depthwiseConv2d$2, + matMul: matMul + }); + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Generate a hamming window. + * + * See: https://en.wikipedia.org/wiki/Window_function#Hann_and_Hamming_windows + * + * ```js + * tf.signal.hammingWindow(10).print(); + * ``` + * @param The length of window + * + * @doc {heading: 'Operations', subheading: 'Signal', namespace: 'signal'} + */ + function hammingWindow_(windowLength) { + return cosineWindow(windowLength, 0.54, 0.46); + } + const hammingWindow = /* @__PURE__ */ op({ hammingWindow_ }); + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Generate a Hann window. + * + * See: https://en.wikipedia.org/wiki/Window_function#Hann_and_Hamming_windows + * + * ```js + * tf.signal.hannWindow(10).print(); + * ``` + * @param The length of window + * + * @doc {heading: 'Operations', subheading: 'Signal', namespace: 'signal'} + */ + function hannWindow_(windowLength) { + return cosineWindow(windowLength, 0.5, 0.5); + } + const hannWindow = /* @__PURE__ */ op({ hannWindow_ }); + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Expands input into frames of frameLength. + * Slides a window size with frameStep. + * + * ```js + * tf.signal.frame([1, 2, 3], 2, 1).print(); + * ``` + * @param signal The input tensor to be expanded + * @param frameLength Length of each frame + * @param frameStep The frame hop size in samples. + * @param padEnd Whether to pad the end of signal with padValue. + * @param padValue A number to use where the input signal does + * not exist when padEnd is True. + * + * @doc {heading: 'Operations', subheading: 'Signal', namespace: 'signal'} + */ + function frame_(signal, frameLength, frameStep, padEnd = false, padValue = 0) { + let start = 0; + const output = []; + while (start + frameLength <= signal.size) { + output.push(slice$2(signal, start, frameLength)); + start += frameStep; + } + if (padEnd) { + while (start < signal.size) { + const padLen = (start + frameLength) - signal.size; + const pad = concat$2([ + slice$2(signal, start, frameLength - padLen), fill$2([padLen], padValue) + ]); + output.push(pad); + start += frameStep; + } + } + if (output.length === 0) { + return tensor2d([], [0, frameLength]); + } + return reshape$3(concat$2(output), [output.length, frameLength]); + } + const frame = /* @__PURE__ */ op({ frame_ }); + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the Short-time Fourier Transform of signals + * See: https://en.wikipedia.org/wiki/Short-time_Fourier_transform + * + * ```js + * const input = tf.tensor1d([1, 1, 1, 1, 1]) + * tf.signal.stft(input, 3, 1).print(); + * ``` + * @param signal 1-dimensional real value tensor. + * @param frameLength The window length of samples. + * @param frameStep The number of samples to step. + * @param fftLength The size of the FFT to apply. + * @param windowFn A callable that takes a window length and returns 1-d tensor. + * + * @doc {heading: 'Operations', subheading: 'Signal', namespace: 'signal'} + */ + function stft_(signal, frameLength, frameStep, fftLength, windowFn = hannWindow) { + if (fftLength == null) { + fftLength = enclosingPowerOfTwo(frameLength); + } + const framedSignal = frame(signal, frameLength, frameStep); + const windowedSignal = mul(framedSignal, windowFn(frameLength)); + return rfft(windowedSignal, fftLength); + } + const stft = /* @__PURE__ */ op({ stft_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Extracts crops from the input image tensor and resizes them using bilinear + * sampling or nearest neighbor sampling (possibly with aspect ratio change) + * to a common output size specified by cropSize. + * + * @param image 4d tensor of shape `[batch,imageHeight,imageWidth, depth]`, + * where imageHeight and imageWidth must be positive, specifying the + * batch of images from which to take crops + * @param boxes 2d float32 tensor of shape `[numBoxes, 4]`. Each entry is + * `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the normalized + * coordinates of the box in the `boxInd[i]`th image in the batch + * @param boxInd 1d int32 tensor of shape `[numBoxes]` with values in range + * `[0, batch)` that specifies the image that the `i`-th box refers to. + * @param cropSize 1d int32 tensor of 2 elements `[cropHeigh, cropWidth]` + * specifying the size to which all crops are resized to. + * @param method Optional string from `'bilinear' | 'nearest'`, + * defaults to bilinear, which specifies the sampling method for resizing + * @param extrapolationValue A threshold for deciding when to remove boxes based + * on score. Defaults to 0. + * @return A 4D tensor of the shape `[numBoxes,cropHeight,cropWidth,depth]` + * + * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'} + */ + function cropAndResize_(image, boxes, boxInd, cropSize, method = 'bilinear', extrapolationValue = 0) { + const $image = convertToTensor(image, 'image', 'cropAndResize'); + const $boxes = convertToTensor(boxes, 'boxes', 'cropAndResize', 'float32'); + const $boxInd = convertToTensor(boxInd, 'boxInd', 'cropAndResize', 'int32'); + const numBoxes = $boxes.shape[0]; + assert$1($image.rank === 4, () => 'Error in cropAndResize: image must be rank 4,' + + `but got rank ${$image.rank}.`); + assert$1($boxes.rank === 2 && $boxes.shape[1] === 4, () => `Error in cropAndResize: boxes must be have size [${numBoxes},4] ` + + `but had shape ${$boxes.shape}.`); + assert$1($boxInd.rank === 1 && $boxInd.shape[0] === numBoxes, () => `Error in cropAndResize: boxInd must be have size [${numBoxes}] ` + + `but had shape ${$boxes.shape}.`); + assert$1(cropSize.length === 2, () => `Error in cropAndResize: cropSize must be of length 2, but got ` + + `length ${cropSize.length}.`); + assert$1(cropSize[0] >= 1 && cropSize[1] >= 1, () => `cropSize must be atleast [1,1], but was ${cropSize}`); + assert$1(method === 'bilinear' || method === 'nearest', () => `method must be bilinear or nearest, but was ${method}`); + const inputs = { image: $image, boxes: $boxes, boxInd: $boxInd }; + const attrs = { method, extrapolationValue, cropSize }; + const res = ENGINE.runKernel(CropAndResize, inputs, attrs); + return res; + } + const cropAndResize$3 = /* @__PURE__ */ op({ cropAndResize_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Flips the image left to right. Currently available in the CPU, WebGL, and + * WASM backends. + * + * @param image 4d tensor of shape `[batch, imageHeight, imageWidth, depth]`. + */ + /** @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'} */ + function flipLeftRight_(image) { + const $image = convertToTensor(image, 'image', 'flipLeftRight', 'float32'); + assert$1($image.rank === 4, () => 'Error in flipLeftRight: image must be rank 4,' + + `but got rank ${$image.rank}.`); + const inputs = { image: $image }; + const res = ENGINE.runKernel(FlipLeftRight, inputs, {}); + return res; + } + const flipLeftRight = /* @__PURE__ */ op({ flipLeftRight_ }); + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Converts images from grayscale to RGB format. + * + * @param image A grayscale tensor to convert. The `image`'s last dimension must + * be size 1 with at least a two-dimensional shape. + * + * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'} + */ + function grayscaleToRGB_(image) { + const $image = convertToTensor(image, 'image', 'grayscaleToRGB'); + const lastDimsIdx = $image.rank - 1; + const lastDims = $image.shape[lastDimsIdx]; + assert$1($image.rank >= 2, () => 'Error in grayscaleToRGB: images must be at least rank 2, ' + + `but got rank ${$image.rank}.`); + assert$1(lastDims === 1, () => 'Error in grayscaleToRGB: last dimension of a grayscale image ' + + `should be size 1, but got size ${lastDims}.`); + const reps = new Array($image.rank); + reps.fill(1, 0, lastDimsIdx); + reps[lastDimsIdx] = 3; + return tile$3($image, reps); + } + const grayscaleToRGB = /* @__PURE__ */ op({ grayscaleToRGB_ }); + + /** + * @license + * Copyright 2023 Google LLC. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Converts images from RGB format to grayscale. + * + * @param image A RGB tensor to convert. The `image`'s last dimension must + * be size 3 with at least a two-dimensional shape. + * + * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'} + */ + function rgbToGrayscale_(image) { + const $image = convertToTensor(image, 'image', 'RGBToGrayscale'); + const lastDimsIdx = $image.rank - 1; + const lastDims = $image.shape[lastDimsIdx]; + assert$1($image.rank >= 2, () => 'Error in RGBToGrayscale: images must be at least rank 2, ' + + `but got rank ${$image.rank}.`); + assert$1(lastDims === 3, () => 'Error in RGBToGrayscale: last dimension of an RGB image ' + + `should be size 3, but got size ${lastDims}.`); + // Remember original dtype so we can convert back if needed + const origDtype = $image.dtype; + const fltImage = cast$3($image, 'float32'); + const rgbWeights = tensor1d([0.2989, 0.5870, 0.1140]); + let grayFloat; + switch ($image.rank) { + case 2: + grayFloat = einsum$2('ij,j->i', fltImage, rgbWeights); + break; + case 3: + grayFloat = einsum$2('ijk,k->ij', fltImage, rgbWeights); + break; + case 4: + grayFloat = einsum$2('ijkl,l->ijk', fltImage, rgbWeights); + break; + case 5: + grayFloat = einsum$2('ijklm,m->ijkl', fltImage, rgbWeights); + break; + case 6: + grayFloat = einsum$2('ijklmn,n->ijklm', fltImage, rgbWeights); + break; + default: + throw new Error('Not a valid tensor rank.'); + } + grayFloat = expandDims$3(grayFloat, -1); + return cast$3(grayFloat, origDtype); + } + const rgbToGrayscale = /* @__PURE__ */ op({ rgbToGrayscale_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Rotates the input image tensor counter-clockwise with an optional offset + * center of rotation. Currently available in the CPU, WebGL, and WASM backends. + * + * @param image 4d tensor of shape `[batch, imageHeight, imageWidth, depth]`. + * @param radians The amount of rotation. + * @param fillValue The value to fill in the empty space leftover + * after rotation. Can be either a single grayscale value (0-255), or an + * array of three numbers `[red, green, blue]` specifying the red, green, + * and blue channels. Defaults to `0` (black). + * @param center The center of rotation. Can be either a single value (0-1), or + * an array of two numbers `[centerX, centerY]`. Defaults to `0.5` (rotates + * the image around its center). + * + * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'} + */ + function rotateWithOffset_(image, radians, fillValue = 0, center = 0.5) { + const $image = convertToTensor(image, 'image', 'rotateWithOffset', 'float32'); + assert$1($image.rank === 4, () => 'Error in rotateWithOffset: image must be rank 4,' + + `but got rank ${$image.rank}.`); + const inputs = { image: $image }; + const attrs = { radians, fillValue, center }; + const res = ENGINE.runKernel(RotateWithOffset, inputs, attrs); + return res; + } + const rotateWithOffset = /* @__PURE__ */ op({ rotateWithOffset_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function nonMaxSuppSanityCheck(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma) { + if (iouThreshold == null) { + iouThreshold = 0.5; + } + if (scoreThreshold == null) { + scoreThreshold = Number.NEGATIVE_INFINITY; + } + if (softNmsSigma == null) { + softNmsSigma = 0.0; + } + const numBoxes = boxes.shape[0]; + maxOutputSize = Math.min(maxOutputSize, numBoxes); + assert$1(0 <= iouThreshold && iouThreshold <= 1, () => `iouThreshold must be in [0, 1], but was '${iouThreshold}'`); + assert$1(boxes.rank === 2, () => `boxes must be a 2D tensor, but was of rank '${boxes.rank}'`); + assert$1(boxes.shape[1] === 4, () => `boxes must have 4 columns, but 2nd dimension was ${boxes.shape[1]}`); + assert$1(scores.rank === 1, () => 'scores must be a 1D tensor'); + assert$1(scores.shape[0] === numBoxes, () => `scores has incompatible shape with boxes. Expected ${numBoxes}, ` + + `but was ${scores.shape[0]}`); + assert$1(0 <= softNmsSigma && softNmsSigma <= 1, () => `softNmsSigma must be in [0, 1], but was '${softNmsSigma}'`); + return { maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma }; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Performs non maximum suppression of bounding boxes based on + * iou (intersection over union). + * + * @param boxes a 2d tensor of shape `[numBoxes, 4]`. Each entry is + * `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the corners of + * the bounding box. + * @param scores a 1d tensor providing the box scores of shape `[numBoxes]`. + * @param maxOutputSize The maximum number of boxes to be selected. + * @param iouThreshold A float representing the threshold for deciding whether + * boxes overlap too much with respect to IOU. Must be between [0, 1]. + * Defaults to 0.5 (50% box overlap). + * @param scoreThreshold A threshold for deciding when to remove boxes based + * on score. Defaults to -inf, which means any score is accepted. + * @return A 1D tensor with the selected box indices. + * + * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'} + */ + function nonMaxSuppression_(boxes, scores, maxOutputSize, iouThreshold = 0.5, scoreThreshold = Number.NEGATIVE_INFINITY) { + const $boxes = convertToTensor(boxes, 'boxes', 'nonMaxSuppression', 'float32'); + const $scores = convertToTensor(scores, 'scores', 'nonMaxSuppression', 'float32'); + const inputs = nonMaxSuppSanityCheck($boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold); + maxOutputSize = inputs.maxOutputSize; + iouThreshold = inputs.iouThreshold; + scoreThreshold = inputs.scoreThreshold; + const attrs = { maxOutputSize, iouThreshold, scoreThreshold }; + return ENGINE.runKernel(NonMaxSuppressionV3, { boxes: $boxes, scores: $scores }, attrs); + } + const nonMaxSuppression = /* @__PURE__ */ op({ nonMaxSuppression_ }); + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Inserts a value into a sorted array. This method allows duplicate, meaning it + * allows inserting duplicate value, in which case, the element will be inserted + * at the lowest index of the value. + * @param arr The array to modify. + * @param element The element to insert. + * @param comparator Optional. If no comparator is specified, elements are + * compared using array_util.defaultComparator, which is suitable for Strings + * and Numbers in ascending arrays. If the array contains multiple instances of + * the target value, the left-most instance will be returned. To provide a + * comparator, it should take 2 arguments to compare and return a negative, + * zero, or a positive number. + */ + function binaryInsert(arr, element, comparator) { + const index = binarySearch(arr, element, comparator); + const insertionPoint = index < 0 ? -(index + 1) : index; + arr.splice(insertionPoint, 0, element); + } + /** + * Searches the array for the target using binary search, returns the index + * of the found element, or position to insert if element not found. If no + * comparator is specified, elements are compared using array_ + * util.defaultComparator, which is suitable for Strings and Numbers in + * ascending arrays. If the array contains multiple instances of the target + * value, the left-most instance will be returned. + * @param arr The array to be searched in. + * @param target The target to be searched for. + * @param comparator Should take 2 arguments to compare and return a negative, + * zero, or a positive number. + * @return Lowest index of the target value if found, otherwise the insertion + * point where the target should be inserted, in the form of + * (-insertionPoint - 1). + */ + function binarySearch(arr, target, comparator) { + return binarySearch_(arr, target, comparator || defaultComparator); + } + /** + * Compares its two arguments for order. + * @param a The first element to be compared. + * @param b The second element to be compared. + * @return A negative number, zero, or a positive number as the first + * argument is less than, equal to, or greater than the second. + */ + function defaultComparator(a, b) { + return a > b ? 1 : a < b ? -1 : 0; + } + function binarySearch_(arr, target, comparator) { + let left = 0; + let right = arr.length; + let middle = 0; + let found = false; + while (left < right) { + middle = left + ((right - left) >>> 1); + const compareResult = comparator(target, arr[middle]); + if (compareResult > 0) { + left = middle + 1; + } + else { + right = middle; + // If compareResult is 0, the value is found. We record it is found, + // and then keep looking because there may be duplicate. + found = !compareResult; + } + } + return found ? left : -left - 1; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function nonMaxSuppressionV3Impl$2(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold) { + return nonMaxSuppressionImpl_(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, 0 /* softNmsSigma */); + } + function nonMaxSuppressionV4Impl$2(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, padToMaxOutputSize) { + return nonMaxSuppressionImpl_(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, 0 /* softNmsSigma */, false /* returnScoresTensor */, padToMaxOutputSize /* padToMaxOutputSize */, true + /* returnValidOutputs */ ); + } + function nonMaxSuppressionV5Impl$2(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma) { + return nonMaxSuppressionImpl_(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma, true /* returnScoresTensor */); + } + function nonMaxSuppressionImpl_(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma, returnScoresTensor = false, padToMaxOutputSize = false, returnValidOutputs = false) { + // The list is sorted in ascending order, so that we can always pop the + // candidate with the largest score in O(1) time. + const candidates = []; + for (let i = 0; i < scores.length; i++) { + if (scores[i] > scoreThreshold) { + candidates.push({ score: scores[i], boxIndex: i, suppressBeginIndex: 0 }); + } + } + candidates.sort(ascendingComparator); + // If softNmsSigma is 0, the outcome of this algorithm is exactly same as + // before. + const scale = softNmsSigma > 0 ? (-0.5 / softNmsSigma) : 0.0; + const selectedIndices = []; + const selectedScores = []; + while (selectedIndices.length < maxOutputSize && candidates.length > 0) { + const candidate = candidates.pop(); + const { score: originalScore, boxIndex, suppressBeginIndex } = candidate; + if (originalScore < scoreThreshold) { + break; + } + // Overlapping boxes are likely to have similar scores, therefore we + // iterate through the previously selected boxes backwards in order to + // see if candidate's score should be suppressed. We use + // suppressBeginIndex to track and ensure a candidate can be suppressed + // by a selected box no more than once. Also, if the overlap exceeds + // iouThreshold, we simply ignore the candidate. + let ignoreCandidate = false; + for (let j = selectedIndices.length - 1; j >= suppressBeginIndex; --j) { + const iou = intersectionOverUnion(boxes, boxIndex, selectedIndices[j]); + if (iou >= iouThreshold) { + ignoreCandidate = true; + break; + } + candidate.score = + candidate.score * suppressWeight(iouThreshold, scale, iou); + if (candidate.score <= scoreThreshold) { + break; + } + } + // At this point, if `candidate.score` has not dropped below + // `scoreThreshold`, then we know that we went through all of the + // previous selections and can safely update `suppressBeginIndex` to the + // end of the selected array. Then we can re-insert the candidate with + // the updated score and suppressBeginIndex back in the candidate list. + // If on the other hand, `candidate.score` has dropped below the score + // threshold, we will not add it back to the candidates list. + candidate.suppressBeginIndex = selectedIndices.length; + if (!ignoreCandidate) { + // Candidate has passed all the tests, and is not suppressed, so + // select the candidate. + if (candidate.score === originalScore) { + selectedIndices.push(boxIndex); + selectedScores.push(candidate.score); + } + else if (candidate.score > scoreThreshold) { + // Candidate's score is suppressed but is still high enough to be + // considered, so add back to the candidates list. + binaryInsert(candidates, candidate, ascendingComparator); + } + } + } + // NonMaxSuppressionV4 feature: padding output to maxOutputSize. + const validOutputs = selectedIndices.length; + const elemsToPad = maxOutputSize - validOutputs; + if (padToMaxOutputSize && elemsToPad > 0) { + selectedIndices.push(...new Array(elemsToPad).fill(0)); + selectedScores.push(...new Array(elemsToPad).fill(0.0)); + } + const result = { selectedIndices }; + if (returnScoresTensor) { + result['selectedScores'] = selectedScores; + } + if (returnValidOutputs) { + result['validOutputs'] = validOutputs; + } + return result; + } + function intersectionOverUnion(boxes, i, j) { + const iCoord = boxes.subarray(i * 4, i * 4 + 4); + const jCoord = boxes.subarray(j * 4, j * 4 + 4); + const yminI = Math.min(iCoord[0], iCoord[2]); + const xminI = Math.min(iCoord[1], iCoord[3]); + const ymaxI = Math.max(iCoord[0], iCoord[2]); + const xmaxI = Math.max(iCoord[1], iCoord[3]); + const yminJ = Math.min(jCoord[0], jCoord[2]); + const xminJ = Math.min(jCoord[1], jCoord[3]); + const ymaxJ = Math.max(jCoord[0], jCoord[2]); + const xmaxJ = Math.max(jCoord[1], jCoord[3]); + const areaI = (ymaxI - yminI) * (xmaxI - xminI); + const areaJ = (ymaxJ - yminJ) * (xmaxJ - xminJ); + if (areaI <= 0 || areaJ <= 0) { + return 0.0; + } + const intersectionYmin = Math.max(yminI, yminJ); + const intersectionXmin = Math.max(xminI, xminJ); + const intersectionYmax = Math.min(ymaxI, ymaxJ); + const intersectionXmax = Math.min(xmaxI, xmaxJ); + const intersectionArea = Math.max(intersectionYmax - intersectionYmin, 0.0) * + Math.max(intersectionXmax - intersectionXmin, 0.0); + return intersectionArea / (areaI + areaJ - intersectionArea); + } + // A Gaussian penalty function, this method always returns values in [0, 1]. + // The weight is a function of similarity, the more overlap two boxes are, the + // smaller the weight is,meaning highly overlapping boxes will be significantly + // penalized. On the other hand, a non-overlapping box will not be penalized. + function suppressWeight(iouThreshold, scale, iou) { + const weight = Math.exp(scale * iou * iou); + return iou <= iouThreshold ? weight : 0.0; + } + function ascendingComparator(c1, c2) { + // For objects with same scores, we make the object with the larger index go + // first. In an array that pops from the end, this means that the object with + // the smaller index will be popped first. This ensures the same output as + // the TensorFlow python version. + return (c1.score - c2.score) || + ((c1.score === c2.score) && (c2.boxIndex - c1.boxIndex)); + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Performs non maximum suppression of bounding boxes based on + * iou (intersection over union). + * + * This is the async version of `nonMaxSuppression` + * + * @param boxes a 2d tensor of shape `[numBoxes, 4]`. Each entry is + * `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the corners of + * the bounding box. + * @param scores a 1d tensor providing the box scores of shape `[numBoxes]`. + * @param maxOutputSize The maximum number of boxes to be selected. + * @param iouThreshold A float representing the threshold for deciding whether + * boxes overlap too much with respect to IOU. Must be between [0, 1]. + * Defaults to 0.5 (50% box overlap). + * @param scoreThreshold A threshold for deciding when to remove boxes based + * on score. Defaults to -inf, which means any score is accepted. + * @return A 1D tensor with the selected box indices. + * + * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'} + */ + async function nonMaxSuppressionAsync_(boxes, scores, maxOutputSize, iouThreshold = 0.5, scoreThreshold = Number.NEGATIVE_INFINITY) { + const $boxes = convertToTensor(boxes, 'boxes', 'nonMaxSuppressionAsync'); + const $scores = convertToTensor(scores, 'scores', 'nonMaxSuppressionAsync'); + const inputs = nonMaxSuppSanityCheck($boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold); + maxOutputSize = inputs.maxOutputSize; + iouThreshold = inputs.iouThreshold; + scoreThreshold = inputs.scoreThreshold; + const boxesAndScores = await Promise.all([$boxes.data(), $scores.data()]); + const boxesVals = boxesAndScores[0]; + const scoresVals = boxesAndScores[1]; + // We call a cpu based impl directly with the typedarray data here rather + // than a kernel because all kernels are synchronous (and thus cannot await + // .data()). + const { selectedIndices } = nonMaxSuppressionV3Impl$2(boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold); + if ($boxes !== boxes) { + $boxes.dispose(); + } + if ($scores !== scores) { + $scores.dispose(); + } + return tensor1d(selectedIndices, 'int32'); + } + const nonMaxSuppressionAsync = nonMaxSuppressionAsync_; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Performs non maximum suppression of bounding boxes based on + * iou (intersection over union). + * + * This op also supports a Soft-NMS mode (cf. + * Bodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score + * of other overlapping boxes, therefore favoring different regions of the image + * with high scores. To enable this Soft-NMS mode, set the `softNmsSigma` + * parameter to be larger than 0. + * + * @param boxes a 2d tensor of shape `[numBoxes, 4]`. Each entry is + * `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the corners of + * the bounding box. + * @param scores a 1d tensor providing the box scores of shape `[numBoxes]`. + * @param maxOutputSize The maximum number of boxes to be selected. + * @param iouThreshold A float representing the threshold for deciding whether + * boxes overlap too much with respect to IOU. Must be between [0, 1]. + * Defaults to 0.5 (50% box overlap). + * @param scoreThreshold A threshold for deciding when to remove boxes based + * on score. Defaults to -inf, which means any score is accepted. + * @param softNmsSigma A float representing the sigma parameter for Soft NMS. + * When sigma is 0, it falls back to nonMaxSuppression. + * @return A map with the following properties: + * - selectedIndices: A 1D tensor with the selected box indices. + * - selectedScores: A 1D tensor with the corresponding scores for each + * selected box. + * + * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'} + */ + function nonMaxSuppressionWithScore_(boxes, scores, maxOutputSize, iouThreshold = 0.5, scoreThreshold = Number.NEGATIVE_INFINITY, softNmsSigma = 0.0) { + const $boxes = convertToTensor(boxes, 'boxes', 'nonMaxSuppression'); + const $scores = convertToTensor(scores, 'scores', 'nonMaxSuppression'); + const params = nonMaxSuppSanityCheck($boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma); + maxOutputSize = params.maxOutputSize; + iouThreshold = params.iouThreshold; + scoreThreshold = params.scoreThreshold; + softNmsSigma = params.softNmsSigma; + const inputs = { boxes: $boxes, scores: $scores }; + const attrs = { maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma }; + // tslint:disable-next-line: no-unnecessary-type-assertion + const result = ENGINE.runKernel(NonMaxSuppressionV5, inputs, attrs); + return { selectedIndices: result[0], selectedScores: result[1] }; + } + const nonMaxSuppressionWithScore = /* @__PURE__ */ op({ nonMaxSuppressionWithScore_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Asynchronously performs non maximum suppression of bounding boxes based on + * iou (intersection over union). + * + * This op also supports a Soft-NMS mode (cf. + * Bodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score + * of other overlapping boxes, therefore favoring different regions of the image + * with high scores. To enable this Soft-NMS mode, set the `softNmsSigma` + * parameter to be larger than 0. + * + * @param boxes a 2d tensor of shape `[numBoxes, 4]`. Each entry is + * `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the corners of + * the bounding box. + * @param scores a 1d tensor providing the box scores of shape `[numBoxes]`. + * @param maxOutputSize The maximum number of boxes to be selected. + * @param iouThreshold A float representing the threshold for deciding whether + * boxes overlap too much with respect to IOU. Must be between [0, 1]. + * Defaults to 0.5 (50% box overlap). + * @param scoreThreshold A threshold for deciding when to remove boxes based + * on score. Defaults to -inf, which means any score is accepted. + * @param softNmsSigma A float representing the sigma parameter for Soft NMS. + * When sigma is 0, it falls back to nonMaxSuppression. + * @return A map with the following properties: + * - selectedIndices: A 1D tensor with the selected box indices. + * - selectedScores: A 1D tensor with the corresponding scores for each + * selected box. + * + * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'} + */ + async function nonMaxSuppressionWithScoreAsync_(boxes, scores, maxOutputSize, iouThreshold = 0.5, scoreThreshold = Number.NEGATIVE_INFINITY, softNmsSigma = 0.0) { + const $boxes = convertToTensor(boxes, 'boxes', 'nonMaxSuppressionAsync'); + const $scores = convertToTensor(scores, 'scores', 'nonMaxSuppressionAsync'); + const params = nonMaxSuppSanityCheck($boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma); + maxOutputSize = params.maxOutputSize; + iouThreshold = params.iouThreshold; + scoreThreshold = params.scoreThreshold; + softNmsSigma = params.softNmsSigma; + const boxesAndScores = await Promise.all([$boxes.data(), $scores.data()]); + const boxesVals = boxesAndScores[0]; + const scoresVals = boxesAndScores[1]; + // We call a cpu based impl directly with the typedarray data here rather + // than a kernel because all kernels are synchronous (and thus cannot await + // .data()). + const { selectedIndices, selectedScores } = nonMaxSuppressionV5Impl$2(boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma); + if ($boxes !== boxes) { + $boxes.dispose(); + } + if ($scores !== scores) { + $scores.dispose(); + } + return { + selectedIndices: tensor1d(selectedIndices, 'int32'), + selectedScores: tensor1d(selectedScores) + }; + } + const nonMaxSuppressionWithScoreAsync = nonMaxSuppressionWithScoreAsync_; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Asynchronously performs non maximum suppression of bounding boxes based on + * iou (intersection over union), with an option to pad results. + * + * @param boxes a 2d tensor of shape `[numBoxes, 4]`. Each entry is + * `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the corners of + * the bounding box. + * @param scores a 1d tensor providing the box scores of shape `[numBoxes]`. + * @param maxOutputSize The maximum number of boxes to be selected. + * @param iouThreshold A float representing the threshold for deciding whether + * boxes overlap too much with respect to IOU. Must be between [0, 1]. + * Defaults to 0.5 (50% box overlap). + * @param scoreThreshold A threshold for deciding when to remove boxes based + * on score. Defaults to -inf, which means any score is accepted. + * @param padToMaxOutputSize Defaults to false. If true, size of output + * `selectedIndices` is padded to maxOutputSize. + * @return A map with the following properties: + * - selectedIndices: A 1D tensor with the selected box indices. + * - validOutputs: A scalar denoting how many elements in `selectedIndices` + * are valid. Valid elements occur first, then padding. + * + * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'} + */ + function nonMaxSuppressionPadded_(boxes, scores, maxOutputSize, iouThreshold = 0.5, scoreThreshold = Number.NEGATIVE_INFINITY, padToMaxOutputSize = false) { + const $boxes = convertToTensor(boxes, 'boxes', 'nonMaxSuppression'); + const $scores = convertToTensor(scores, 'scores', 'nonMaxSuppression'); + const params = nonMaxSuppSanityCheck($boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold, null /* softNmsSigma */); + const $maxOutputSize = params.maxOutputSize; + const $iouThreshold = params.iouThreshold; + const $scoreThreshold = params.scoreThreshold; + const inputs = { boxes: $boxes, scores: $scores }; + const attrs = { + maxOutputSize: $maxOutputSize, + iouThreshold: $iouThreshold, + scoreThreshold: $scoreThreshold, + padToMaxOutputSize + }; + // tslint:disable-next-line: no-unnecessary-type-assertion + const result = ENGINE.runKernel(NonMaxSuppressionV4, inputs, attrs); + return { selectedIndices: result[0], validOutputs: result[1] }; + } + const nonMaxSuppressionPadded = /* @__PURE__ */ op({ nonMaxSuppressionPadded_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Asynchronously performs non maximum suppression of bounding boxes based on + * iou (intersection over union), with an option to pad results. + * + * @param boxes a 2d tensor of shape `[numBoxes, 4]`. Each entry is + * `[y1, x1, y2, x2]`, where `(y1, x1)` and `(y2, x2)` are the corners of + * the bounding box. + * @param scores a 1d tensor providing the box scores of shape `[numBoxes]`. + * @param maxOutputSize The maximum number of boxes to be selected. + * @param iouThreshold A float representing the threshold for deciding whether + * boxes overlap too much with respect to IOU. Must be between [0, 1]. + * Defaults to 0.5 (50% box overlap). + * @param scoreThreshold A threshold for deciding when to remove boxes based + * on score. Defaults to -inf, which means any score is accepted. + * @param padToMaxOutputSize Defaults to false. If true, size of output + * `selectedIndices` is padded to maxOutputSize. + * @return A map with the following properties: + * - selectedIndices: A 1D tensor with the selected box indices. + * - validOutputs: A scalar denoting how many elements in `selectedIndices` + * are valid. Valid elements occur first, then padding. + * + * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'} + */ + async function nonMaxSuppressionPaddedAsync_(boxes, scores, maxOutputSize, iouThreshold = 0.5, scoreThreshold = Number.NEGATIVE_INFINITY, padToMaxOutputSize = false) { + const $boxes = convertToTensor(boxes, 'boxes', 'nonMaxSuppressionAsync'); + const $scores = convertToTensor(scores, 'scores', 'nonMaxSuppressionAsync'); + const params = nonMaxSuppSanityCheck($boxes, $scores, maxOutputSize, iouThreshold, scoreThreshold, null /* softNmsSigma */); + const $maxOutputSize = params.maxOutputSize; + const $iouThreshold = params.iouThreshold; + const $scoreThreshold = params.scoreThreshold; + const [boxesVals, scoresVals] = await Promise.all([$boxes.data(), $scores.data()]); + // We call a cpu based impl directly with the typedarray data here rather + // than a kernel because all kernels are synchronous (and thus cannot await + // .data()). + const { selectedIndices, validOutputs } = nonMaxSuppressionV4Impl$2(boxesVals, scoresVals, $maxOutputSize, $iouThreshold, $scoreThreshold, padToMaxOutputSize); + if ($boxes !== boxes) { + $boxes.dispose(); + } + if ($scores !== scores) { + $scores.dispose(); + } + return { + selectedIndices: tensor1d(selectedIndices, 'int32'), + validOutputs: scalar(validOutputs, 'int32') + }; + } + const nonMaxSuppressionPaddedAsync = nonMaxSuppressionPaddedAsync_; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Bilinear resize a single 3D image or a batch of 3D images to a new shape. + * + * @param images The images, of rank 4 or rank 3, of shape + * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is assumed. + * @param size The new shape `[newHeight, newWidth]` to resize the + * images to. Each channel is resized individually. + * @param alignCorners Defaults to `false`. If true, rescale + * input by `(new_height - 1) / (height - 1)`, which exactly aligns the 4 + * corners of images and resized images. If false, rescale by + * `new_height / height`. Treat similarly the width dimension. + * @param halfPixelCenters Defaults to `false`. Whether to assume pixel centers + * are at 0.5, which would make the floating point coordinates of the top + * left pixel 0.5, 0.5. + * + * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'} + */ + function resizeBilinear_(images, size, alignCorners = false, halfPixelCenters = false) { + const $images = convertToTensor(images, 'images', 'resizeBilinear'); + assert$1($images.rank === 3 || $images.rank === 4, () => `Error in resizeBilinear: x must be rank 3 or 4, but got ` + + `rank ${$images.rank}.`); + assert$1(size.length === 2, () => `Error in resizeBilinear: new shape must 2D, but got shape ` + + `${size}.`); + assert$1(halfPixelCenters === false || alignCorners === false, () => `Error in resizeBilinear: If halfPixelCenters is true, ` + + `alignCorners must be false.`); + let batchImages = $images; + let reshapedTo4D = false; + if ($images.rank === 3) { + reshapedTo4D = true; + batchImages = reshape$3($images, [1, $images.shape[0], $images.shape[1], $images.shape[2]]); + } + const [] = size; + const inputs = { images: batchImages }; + const attrs = { alignCorners, halfPixelCenters, size }; + // tslint:disable-next-line: no-unnecessary-type-assertion + const res = ENGINE.runKernel(ResizeBilinear, inputs, attrs); + if (reshapedTo4D) { + return reshape$3(res, [res.shape[1], res.shape[2], res.shape[3]]); + } + return res; + } + const resizeBilinear$3 = /* @__PURE__ */ op({ resizeBilinear_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * NearestNeighbor resize a batch of 3D images to a new shape. + * + * @param images The images, of rank 4 or rank 3, of shape + * `[batch, height, width, inChannels]`. If rank 3, batch of 1 is assumed. + * @param size The new shape `[newHeight, newWidth]` to resize the + * images to. Each channel is resized individually. + * @param alignCorners Defaults to False. If true, rescale + * input by `(new_height - 1) / (height - 1)`, which exactly aligns the 4 + * corners of images and resized images. If false, rescale by + * `new_height / height`. Treat similarly the width dimension. + * @param halfPixelCenters Defaults to `false`. Whether to assume pixels are of + * half the actual dimensions, and yield more accurate resizes. This flag + * would also make the floating point coordinates of the top left pixel + * 0.5, 0.5. + * + * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'} + */ + function resizeNearestNeighbor_(images, size, alignCorners = false, halfPixelCenters = false) { + const $images = convertToTensor(images, 'images', 'resizeNearestNeighbor'); + assert$1($images.rank === 3 || $images.rank === 4, () => `Error in resizeNearestNeighbor: x must be rank 3 or 4, but got ` + + `rank ${$images.rank}.`); + assert$1(size.length === 2, () => `Error in resizeNearestNeighbor: new shape must 2D, but got shape ` + + `${size}.`); + assert$1($images.dtype === 'float32' || $images.dtype === 'int32', () => '`images` must have `int32` or `float32` as dtype'); + assert$1(halfPixelCenters === false || alignCorners === false, () => `Error in resizeNearestNeighbor: If halfPixelCenters is true, ` + + `alignCorners must be false.`); + let batchImages = $images; + let reshapedTo4D = false; + if ($images.rank === 3) { + reshapedTo4D = true; + batchImages = reshape$3($images, [1, $images.shape[0], $images.shape[1], $images.shape[2]]); + } + const [] = size; + const inputs = { images: batchImages }; + const attrs = { alignCorners, halfPixelCenters, size }; + // tslint:disable-next-line: no-unnecessary-type-assertion + const res = ENGINE.runKernel(ResizeNearestNeighbor, inputs, attrs); + if (reshapedTo4D) { + return reshape$3(res, [res.shape[1], res.shape[2], res.shape[3]]); + } + return res; + } + const resizeNearestNeighbor$2 = /* @__PURE__ */ op({ resizeNearestNeighbor_ }); + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Performs image binarization with corresponding threshold + * (depends on the method)value, which creates a binary image from a grayscale. + * @param image 3d tensor of shape [imageHeight,imageWidth, depth], + * where imageHeight and imageWidth must be positive.The image color + * range should be [0, 255]. + * @param method Optional string from `'binary' | 'otsu'` + * which specifies the method for thresholding. Defaults to 'binary'. + * @param inverted Optional boolean whichspecifies + * if colours should be inverted. Defaults to false. + * @param threshValue Optional number which defines threshold value from 0 to 1. + * Defaults to 0.5. + * @return A 3d tensor of shape [imageHeight,imageWidth, depth], which + * contains binarized image. + */ + function threshold_(image, method = 'binary', inverted = false, threshValue = 0.5) { + const $image = convertToTensor(image, 'image', 'threshold'); + /* 0.2989, 0.5870, 0.1140 are represent luma coefficients in CCIR601. + Reference for converting between RGB and grayscale: https://en.wikipedia.org/wiki/Luma_%28video%29 */ + const RED_INTENCITY_COEF = 0.2989; + const GREEN_INTENCITY_COEF = 0.5870; + const BLUE_INTENCITY_COEF = 0.1140; + const totalPixelsInImage = $image.shape[0] * $image.shape[1]; + let $threshold = mul(tensor1d([threshValue]), 255); + let r, g, b, grayscale; + assert$1($image.rank === 3, () => 'Error in threshold: image must be rank 3,' + + `but got rank ${$image.rank}.`); + assert$1($image.shape[2] === 3 || $image.shape[2] === 1, () => 'Error in threshold: ' + + 'image color channel must be equal to 3 or 1' + + `but got ${$image.shape[2]}.`); + assert$1($image.dtype === 'int32' || $image.dtype === 'float32', () => 'Error in dtype: image dtype must be int32 or float32,' + + `but got dtype ${$image.dtype}.`); + assert$1(method === 'otsu' || method === 'binary', () => `Method must be binary or otsu, but was ${method}`); + if ($image.shape[2] === 3) { + [r, g, b] = split$3($image, [1, 1, 1], -1); + const $r = mul(r, RED_INTENCITY_COEF); + const $g = mul(g, GREEN_INTENCITY_COEF); + const $b = mul(b, BLUE_INTENCITY_COEF); + grayscale = add$3(add$3($r, $g), $b); + } + else { + grayscale = image; + } + if (method === 'otsu') { + const $histogram = bincount$2(cast$3(round$2(grayscale), 'int32'), tensor([]), 256); + $threshold = otsu($histogram, totalPixelsInImage); + } + const invCondition = inverted ? + lessEqual$2(grayscale, $threshold) : greater$3(grayscale, $threshold); + const result = cast$3(mul(invCondition, 255), 'int32'); + return result; + } + function otsu(histogram, total) { + let bestThresh = tensor1d([-1]); + let bestInBetVar = tensor1d([0]); + let cInBetVar = tensor1d([0]); + let classFirst, classSecond, meanFirst, meanSec, weightForeground, weightBack; + for (let index = 0; index < histogram.size - 1; index++) { + classFirst = slice$2(histogram, 0, index + 1); + classSecond = slice$2(histogram, index + 1); + weightForeground = div$1(sum$3(classFirst), total); + weightBack = div$1(sum$3(classSecond), total); + const meanFirstDivA = sum$3(mul(classFirst, range$3(0, classFirst.size))); + meanFirst = div$1(meanFirstDivA, sum$3(classFirst)); + const meanSecFill = fill$2(classSecond.shape, classFirst.size); + const meanSecAdd = add$3(range$3(0, classSecond.size), meanSecFill); + const meanSecMul = mul(classSecond, (meanSecAdd)); + meanSec = div$1(sum$3(meanSecMul), sum$3(classSecond)); + const cInBetVarSubA = sub$2(meanFirst, meanSec); + const cInBetVarSubB = sub$2(meanFirst, meanSec); + const cInBetVarMul = mul(weightForeground, weightBack); + cInBetVar = mul(mul(cInBetVarMul, cInBetVarSubA), cInBetVarSubB); + const condition = greater$3(cInBetVar, bestInBetVar); + bestInBetVar = where(condition, cInBetVar, bestInBetVar); + bestThresh = where(condition, tensor1d([index]), bestThresh); + } + return bestThresh; + } + const threshold$1 = /* @__PURE__ */ op({ threshold_ }); + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Applies the given transform(s) to the image(s). + * + * @param image 4d tensor of shape `[batch, imageHeight, imageWidth, depth]`. + * @param transforms Projective transform matrix/matrices. A tensor1d of length + * 8 or tensor of size N x 8. If one row of transforms is [a0, a1, a2, b0, + * b1, b2, c0, c1], then it maps the output point (x, y) to a transformed + * input point (x', y') = ((a0 x + a1 y + a2) / k, (b0 x + b1 y + b2) / k), + * where k = c0 x + c1 y + 1. The transforms are inverted compared to the + * transform mapping input points to output points. + * @param interpolation Interpolation mode. + * Supported values: 'nearest', 'bilinear'. Default to 'nearest'. + * @param fillMode Points outside the boundaries of the input are filled + * according to the given mode, one of 'constant', 'reflect', 'wrap', + * 'nearest'. Default to 'constant'. + * 'reflect': (d c b a | a b c d | d c b a ) The input is extended by + * reflecting about the edge of the last pixel. + * 'constant': (k k k k | a b c d | k k k k) The input is extended by + * filling all values beyond the edge with the same constant value k. + * 'wrap': (a b c d | a b c d | a b c d) The input is extended by + * wrapping around to the opposite edge. + * 'nearest': (a a a a | a b c d | d d d d) The input is extended by + * the nearest pixel. + * @param fillValue A float represents the value to be filled outside the + * boundaries when fillMode is 'constant'. + * @param Output dimension after the transform, [height, width]. If undefined, + * output is the same size as input image. + * + * @doc {heading: 'Operations', subheading: 'Images', namespace: 'image'} + */ + function transform_(image, transforms, interpolation = 'nearest', fillMode = 'constant', fillValue = 0, outputShape) { + const $image = convertToTensor(image, 'image', 'transform', 'float32'); + const $transforms = convertToTensor(transforms, 'transforms', 'transform', 'float32'); + assert$1($image.rank === 4, () => 'Error in transform: image must be rank 4,' + + `but got rank ${$image.rank}.`); + assert$1($transforms.rank === 2 && + ($transforms.shape[0] === $image.shape[0] || + $transforms.shape[0] === 1) && + $transforms.shape[1] === 8, () => `Error in transform: Input transform should be batch x 8 or 1 x 8`); + assert$1(outputShape == null || outputShape.length === 2, () => 'Error in transform: outputShape must be [height, width] or null, ' + + `but got ${outputShape}.`); + const inputs = { image: $image, transforms: $transforms }; + const attrs = { interpolation, fillMode, fillValue, outputShape }; + return ENGINE.runKernel(Transform, inputs, attrs); + } + const transform$2 = /* @__PURE__ */ op({ transform_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Copy a tensor setting everything outside a central band in each innermost + * matrix to zero. + * + * The band part is computed as follows: Assume input has `k` dimensions + * `[I, J, K, ..., M, N]`, then the output is a tensor with the same shape where + * `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`. + * The indicator function + * `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)` + * `&& (num_upper < 0 || (n-m) <= num_upper)` + * + * ```js + * const x = tf.tensor2d([[ 0, 1, 2, 3], + * [-1, 0, 1, 2], + * [-2, -1, 0, 1], + * [-3, -2, -1, 0]]); + * let y = tf.linalg.bandPart(x, 1, -1); + * y.print(); // [[ 0, 1, 2, 3], + * // [-1, 0, 1, 2], + * // [ 0, -1, 0, 1], + * // [ 0, 0 , -1, 0]] + * let z = tf.linalg.bandPart(x, 2, 1); + * z.print(); // [[ 0, 1, 0, 0], + * // [-1, 0, 1, 0], + * // [-2, -1, 0, 1], + * // [ 0, -2, -1, 0]] + * ``` + * + * @param x Rank `k` tensor + * @param numLower Number of subdiagonals to keep. + * If negative, keep entire lower triangle. + * @param numUpper Number of subdiagonals to keep. + * If negative, keep entire upper triangle. + * @returns Rank `k` tensor of the same shape as input. + * The extracted banded tensor. + * + * @doc {heading:'Operations', subheading:'Linear Algebra', namespace:'linalg'} + */ + function bandPart_(a, numLower, numUpper) { + const $a = convertToTensor(a, 'a', 'bandPart'); + assert$1($a.rank >= 2, () => `bandPart(): Rank must be at least 2, got ${$a.rank}.`); + const shape = $a.shape; + const [M, N] = $a.shape.slice(-2); + let $numLower; + let $numUpper; + if (typeof numLower === 'number') { + assert$1(numLower % 1 === 0, () => `bandPart(): numLower must be an integer, got ${numLower}.`); + assert$1(numLower <= M, () => `bandPart(): numLower (${numLower})` + + ` must not be greater than the number of rows (${M}).`); + $numLower = + convertToTensor(numLower < 0 ? M : numLower, 'numLower', 'bandPart'); + } + else { + assert$1(numLower.dtype === 'int32', () => `bandPart(): numLower's dtype must be an int32.`); + // If numLower is a Scalar, checking `numLower <= M` could hurt performance, + // but minimum(numLower, M) could avoid unexpected results. + $numLower = where(less$3(numLower, 0), M, minimum$4(numLower, M)); + } + if (typeof numUpper === 'number') { + assert$1(numUpper % 1 === 0, () => `bandPart(): numUpper must be an integer, got ${numUpper}.`); + assert$1(numUpper <= N, () => `bandPart(): numUpper (${numUpper})` + + ` must not be greater than the number of columns (${N}).`); + $numUpper = + convertToTensor(numUpper < 0 ? N : numUpper, 'numUpper', 'bandPart'); + } + else { + assert$1(numUpper.dtype === 'int32', () => `bandPart(): numUpper's dtype must be an int32.`); + $numUpper = where(less$3(numUpper, 0), N, minimum$4(numUpper, N)); + } + const i = reshape$3(range$3(0, M, 1, 'int32'), [-1, 1]); + const j = range$3(0, N, 1, 'int32'); + const ij = sub$2(i, j); + const inBand = logicalAnd$2(lessEqual$2(ij, $numLower), greaterEqual$2(ij, neg$2($numUpper))); + const zero = zeros$2([M, N], $a.dtype); + return reshape$3(stack(unstack(reshape$3($a, [-1, M, N])) + .map(mat => where(inBand, mat, zero))), shape); + } + const bandPart = /* @__PURE__ */ op({ bandPart_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Gram-Schmidt orthogonalization. + * + * ```js + * const x = tf.tensor2d([[1, 2], [3, 4]]); + * let y = tf.linalg.gramSchmidt(x); + * y.print(); + * console.log('Orthogonalized:'); + * y.dot(y.transpose()).print(); // should be nearly the identity matrix. + * console.log('First row direction maintained:'); + * const data = await y.array(); + * console.log(data[0][1] / data[0][0]); // should be nearly 2. + * ``` + * + * @param xs The vectors to be orthogonalized, in one of the two following + * formats: + * - An Array of `tf.Tensor1D`. + * - A `tf.Tensor2D`, i.e., a matrix, in which case the vectors are the rows + * of `xs`. + * In each case, all the vectors must have the same length and the length + * must be greater than or equal to the number of vectors. + * @returns The orthogonalized and normalized vectors or matrix. + * Orthogonalization means that the vectors or the rows of the matrix + * are orthogonal (zero inner products). Normalization means that each + * vector or each row of the matrix has an L2 norm that equals `1`. + * + * @doc {heading:'Operations', subheading:'Linear Algebra', namespace:'linalg'} + */ + function gramSchmidt_(xs) { + let inputIsTensor2D; + if (Array.isArray(xs)) { + inputIsTensor2D = false; + assert$1(xs != null && xs.length > 0, () => 'Gram-Schmidt process: input must not be null, undefined, or ' + + 'empty'); + const dim = xs[0].shape[0]; + for (let i = 1; i < xs.length; ++i) { + assert$1(xs[i].shape[0] === dim, () => 'Gram-Schmidt: Non-unique lengths found in the input vectors: ' + + `(${xs[i].shape[0]} vs. ${dim})`); + } + } + else { + inputIsTensor2D = true; + xs = split$3(xs, xs.shape[0], 0).map(x => squeeze(x, [0])); + } + assert$1(xs.length <= xs[0].shape[0], () => `Gram-Schmidt: Number of vectors (${xs.length}) exceeds ` + + `number of dimensions (${xs[0].shape[0]}).`); + const ys = []; + const xs1d = xs; + for (let i = 0; i < xs.length; ++i) { + ys.push(ENGINE.tidy(() => { + let x = xs1d[i]; + if (i > 0) { + for (let j = 0; j < i; ++j) { + const proj = mul(sum$3(mul(ys[j], x)), ys[j]); + x = sub$2(x, proj); + } + } + return div$1(x, norm(x, 'euclidean')); + })); + } + if (inputIsTensor2D) { + return stack(ys, 0); + } + else { + return ys; + } + } + const gramSchmidt = /* @__PURE__ */ op({ gramSchmidt_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Compute QR decomposition of m-by-n matrix using Householder transformation. + * + * Implementation based on + * [http://www.cs.cornell.edu/~bindel/class/cs6210-f09/lec18.pdf] + * (http://www.cs.cornell.edu/~bindel/class/cs6210-f09/lec18.pdf) + * + * ```js + * const a = tf.tensor2d([[1, 2], [3, 4]]); + * let [q, r] = tf.linalg.qr(a); + * console.log('Q'); + * q.print(); + * console.log('R'); + * r.print(); + * console.log('Orthogonalized'); + * q.dot(q.transpose()).print() // should be nearly the identity matrix. + * console.log('Reconstructed'); + * q.dot(r).print(); // should be nearly [[1, 2], [3, 4]]; + * ``` + * + * @param x The `tf.Tensor` to be QR-decomposed. Must have rank >= 2. Suppose + * it has the shape `[..., M, N]`. + * @param fullMatrices An optional boolean parameter. Defaults to `false`. + * If `true`, compute full-sized `Q`. If `false` (the default), + * compute only the leading N columns of `Q` and `R`. + * @returns An `Array` of two `tf.Tensor`s: `[Q, R]`. `Q` is a unitary matrix, + * i.e., its columns all have unit norm and are mutually orthogonal. + * If `M >= N`, + * If `fullMatrices` is `false` (default), + * - `Q` has a shape of `[..., M, N]`, + * - `R` has a shape of `[..., N, N]`. + * If `fullMatrices` is `true` (default), + * - `Q` has a shape of `[..., M, M]`, + * - `R` has a shape of `[..., M, N]`. + * If `M < N`, + * - `Q` has a shape of `[..., M, M]`, + * - `R` has a shape of `[..., M, N]`. + * @throws If the rank of `x` is less than 2. + * + * @doc {heading:'Operations', + * subheading:'Linear Algebra', + * namespace:'linalg'} + */ + function qr_(x, fullMatrices = false) { + assert$1(x.rank >= 2, () => `qr() requires input tensor to have a rank >= 2, but got rank ${x.rank}`); + if (x.rank === 2) { + return qr2d(x, fullMatrices); + } + else { + // Rank > 2. + // TODO(cais): Below we split the input into individual 2D tensors, + // perform QR decomposition on them and then stack the results back + // together. We should explore whether this can be parallelized. + const outerDimsProd = x.shape.slice(0, x.shape.length - 2) + .reduce((value, prev) => value * prev); + const x2ds = unstack(reshape$3(x, [ + outerDimsProd, x.shape[x.shape.length - 2], + x.shape[x.shape.length - 1] + ]), 0); + const q2ds = []; + const r2ds = []; + x2ds.forEach(x2d => { + const [q2d, r2d] = qr2d(x2d, fullMatrices); + q2ds.push(q2d); + r2ds.push(r2d); + }); + const q = reshape$3(stack(q2ds, 0), x.shape); + const r = reshape$3(stack(r2ds, 0), x.shape); + return [q, r]; + } + } + function qr2d(x, fullMatrices = false) { + return ENGINE.tidy(() => { + assert$1(x.shape.length === 2, () => `qr2d() requires a 2D Tensor, but got a ${x.shape.length}D Tensor.`); + const m = x.shape[0]; + const n = x.shape[1]; + let q = eye(m); // Orthogonal transform so far. + let r = clone(x); // Transformed matrix so far. + const one2D = tensor2d([[1]], [1, 1]); + let w = clone(one2D); + const iters = m >= n ? n : m; + for (let j = 0; j < iters; ++j) { + // This tidy within the for-loop ensures we clean up temporary + // tensors as soon as they are no longer needed. + const rTemp = r; + const wTemp = w; + const qTemp = q; + [w, r, q] = ENGINE.tidy(() => { + // Find H = I - tau * w * w', to put zeros below R(j, j). + const rjEnd1 = slice$2(r, [j, j], [m - j, 1]); + const normX = norm(rjEnd1); + const rjj = slice$2(r, [j, j], [1, 1]); + // The sign() function returns 0 on 0, which causes division by zero. + const s = where(greater$3(rjj, 0), tensor2d([[-1]]), tensor2d([[1]])); + const u1 = sub$2(rjj, mul(s, normX)); + const wPre = div$1(rjEnd1, u1); + if (wPre.shape[0] === 1) { + w = clone(one2D); + } + else { + w = concat$2([ + one2D, + slice$2(wPre, [1, 0], [wPre.shape[0] - 1, wPre.shape[1]]) + ], 0); + } + const tau = neg$2(div$1(matMul$1(s, u1), normX)); + // -- R := HR, Q := QH. + const rjEndAll = slice$2(r, [j, 0], [m - j, n]); + const tauTimesW = mul(tau, w); + const wT = transpose$2(w); + if (j === 0) { + r = sub$2(rjEndAll, matMul$1(tauTimesW, matMul$1(wT, rjEndAll))); + } + else { + const rTimesTau = sub$2(rjEndAll, matMul$1(tauTimesW, matMul$1(wT, rjEndAll))); + r = concat$2([slice$2(r, [0, 0], [j, n]), rTimesTau], 0); + } + const tawTimesWT = transpose$2(tauTimesW); + const qAllJEnd = slice$2(q, [0, j], [m, q.shape[1] - j]); + if (j === 0) { + q = sub$2(qAllJEnd, matMul$1(matMul$1(qAllJEnd, w), tawTimesWT)); + } + else { + const qTimesTau = sub$2(qAllJEnd, matMul$1(matMul$1(qAllJEnd, w), tawTimesWT)); + q = concat$2([slice$2(q, [0, 0], [m, j]), qTimesTau], 1); + } + return [w, r, q]; + }); + dispose([rTemp, wTemp, qTemp]); + } + if (!fullMatrices && m > n) { + q = slice$2(q, [0, 0], [m, n]); + r = slice$2(r, [0, 0], [n, n]); + } + return [q, r]; + }); + } + const qr = /* @__PURE__ */ op({ qr_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + exports.Reduction = void 0; + (function (Reduction) { + Reduction[Reduction["NONE"] = 0] = "NONE"; + Reduction[Reduction["MEAN"] = 1] = "MEAN"; + Reduction[Reduction["SUM"] = 2] = "SUM"; + Reduction[Reduction["SUM_BY_NONZERO_WEIGHTS"] = 3] = "SUM_BY_NONZERO_WEIGHTS"; + })(exports.Reduction || (exports.Reduction = {})); + + /** + * Computes the weighted loss between two tensors. + * + * @param losses Tensor of shape `[batch_size, d1, ..., dN]`. + * @param weights Tensor whose rank is either 0, or the same rank as + * `losses`, and must be broadcastable to `losses` (i.e., all + * dimensions must be either `1`, or the same as the corresponding + * `losses` dimension). + * + * @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'} + */ + function computeWeightedLoss_(losses, weights, reduction = exports.Reduction.SUM_BY_NONZERO_WEIGHTS) { + const $losses = convertToTensor(losses, 'losses', 'computeWeightedLoss'); + let $weights = null; + if (weights != null) { + $weights = convertToTensor(weights, 'weights', 'computeWeightedLoss'); + } + const weightedLoss = ($weights == null) ? $losses : mul($losses, $weights); + if (reduction === exports.Reduction.NONE) { + return weightedLoss; + } + if (reduction === exports.Reduction.SUM) { + return sum$3(weightedLoss); + } + if (reduction === exports.Reduction.MEAN) { + if ($weights == null) { + return mean$3(weightedLoss); + } + else { + const broadcastFactor = $losses.size / $weights.size; + const result = div$1(sum$3(weightedLoss), sum$3($weights)); + return broadcastFactor > 1 ? div$1(result, scalar(broadcastFactor)) : + result; + } + } + if (reduction === exports.Reduction.SUM_BY_NONZERO_WEIGHTS) { + if ($weights == null) { + return div$1(sum$3(weightedLoss), scalar($losses.size)); + } + else { + const broadcastedWeights = mul($weights, ones$1($losses.shape)); + const numNonZeros = cast$3(sum$3(notEqual$2(broadcastedWeights, scalar(0))), 'float32'); + return div$1(sum$3(weightedLoss), numNonZeros); + } + } + throw Error(`Unknown reduction: ${reduction}`); + } + const computeWeightedLoss$1 = /* @__PURE__ */ op({ computeWeightedLoss_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the absolute difference loss between two tensors. + * + * @param labels The ground truth output tensor, same dimensions as + * 'predictions'. + * @param predictions The predicted outputs. + * @param weights Tensor whose rank is either 0, or the same rank as + * `labels`, and must be broadcastable to `labels` (i.e., all dimensions + * must be either `1`, or the same as the corresponding `losses` + * dimension). + * @param reduction Type of reduction to apply to loss. Should be of type + * `Reduction` + * + * @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'} + */ + function absoluteDifference_(labels, predictions, weights, reduction = exports.Reduction.SUM_BY_NONZERO_WEIGHTS) { + const $labels = convertToTensor(labels, 'labels', 'absoluteDifference'); + const $predictions = convertToTensor(predictions, 'predictions', 'absoluteDifference'); + let $weights = null; + if (weights != null) { + $weights = convertToTensor(weights, 'weights', 'absoluteDifference'); + } + assertShapesMatch($labels.shape, $predictions.shape, 'Error in absoluteDifference: '); + const losses = abs$2(sub$2($labels, $predictions)); + return computeWeightedLoss$1(losses, $weights, reduction); + } + const absoluteDifference = /* @__PURE__ */ op({ absoluteDifference_ }); + + /** + * Computes the cosine distance loss between two tensors. + * + * @param labels The ground truth output tensor, same dimensions as + * 'predictions'. + * @param predictions The predicted outputs. + * @param axis The dimension along which the cosine distance is computed. + * @param weights Tensor whose rank is either 0, or the same rank as + * `labels`, and must be broadcastable to `labels` (i.e., all dimensions + * must be either `1`, or the same as the corresponding `losses` + * dimension). + * @param reduction Type of reduction to apply to loss. Should be of type + * `Reduction` + * + * @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'} + */ + function cosineDistance_(labels, predictions, axis, weights, reduction = exports.Reduction.SUM_BY_NONZERO_WEIGHTS) { + const $labels = convertToTensor(labels, 'labels', 'cosineDistance'); + const $predictions = convertToTensor(predictions, 'predictions', 'cosineDistance'); + let $weights = null; + if (weights != null) { + $weights = convertToTensor(weights, 'weights', 'cosineDistance'); + } + assertShapesMatch($labels.shape, $predictions.shape, 'Error in cosineDistance: '); + const one = scalar(1); + const losses = sub$2(one, sum$3(mul($labels, $predictions), axis, true)); + return computeWeightedLoss$1(losses, $weights, reduction); + } + const cosineDistance = /* @__PURE__ */ op({ cosineDistance_ }); + + /** + * Computes the Hinge loss between two tensors. + * + * @param labels The ground truth output tensor, same dimensions as + * 'predictions'. + * @param predictions The predicted outputs. + * @param weights Tensor whose rank is either 0, or the same rank as + * `labels`, and must be broadcastable to `labels` (i.e., all dimensions + * must be either `1`, or the same as the corresponding `losses` + * dimension). + * @param reduction Type of reduction to apply to loss. Should be of type + * `Reduction` + * + * @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'} + */ + function hingeLoss_(labels, predictions, weights, reduction = exports.Reduction.SUM_BY_NONZERO_WEIGHTS) { + let $labels = convertToTensor(labels, 'labels', 'hingeLoss'); + const $predictions = convertToTensor(predictions, 'predictions', 'hingeLoss'); + let $weights = null; + if (weights != null) { + $weights = convertToTensor(weights, 'weights', 'hingeLoss'); + } + assertShapesMatch($labels.shape, $predictions.shape, 'Error in hingeLoss: '); + const one = scalar(1); + // Convert binary labels to (-1, 1) + $labels = sub$2(mul(scalar(2), $labels), one); + const losses = relu$2(sub$2(one, mul($labels, $predictions))); + return computeWeightedLoss$1(losses, $weights, reduction); + } + const hingeLoss = /* @__PURE__ */ op({ hingeLoss_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the Huber loss between two tensors. + * + * @param labels The ground truth output tensor, same dimensions as + * 'predictions'. + * @param predictions The predicted outputs. + * @param weights Tensor whose rank is either 0, or the same rank as + * `labels`, and must be broadcastable to `labels` (i.e., all dimensions + * must be either `1`, or the same as the corresponding `losses` + * dimension). + * @param delta Point where Huber loss changes from quadratic to linear. + * @param reduction Type of reduction to apply to loss. Should be of type + * `Reduction`. + * + * @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'} + */ + function huberLoss_(labels, predictions, weights, delta = 1.0, reduction = exports.Reduction.SUM_BY_NONZERO_WEIGHTS) { + const $labels = convertToTensor(labels, 'labels', 'huberLoss'); + const $predictions = convertToTensor(predictions, 'predictions', 'huberLoss'); + let $weights = null; + if (weights != null) { + $weights = convertToTensor(weights, 'weights', 'huberLoss'); + } + assertShapesMatch($labels.shape, $predictions.shape, 'Error in huberLoss: '); + const deltaScalar = scalar(delta); + const error = abs$2(sub$2($predictions, $labels)); + const quadratic = minimum$4(error, deltaScalar); + const linear = sub$2(error, quadratic); + const losses = add$3(mul(scalar(0.5), square$2(quadratic)), mul(deltaScalar, linear)); + return computeWeightedLoss$1(losses, $weights, reduction); + } + const huberLoss = /* @__PURE__ */ op({ huberLoss_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the log loss between two tensors. + * + * @param labels The ground truth output tensor, same dimensions as + * 'predictions'. + * @param predictions The predicted outputs. + * @param weights Tensor whose rank is either 0, or the same rank as + * `labels`, and must be broadcastable to `labels` (i.e., all dimensions + * must be either `1`, or the same as the corresponding `losses` + * dimension). + * @param epsilon A small increment to avoid taking log of zero + * @param reduction Type of reduction to apply to loss. Should be of type + * `Reduction` + * + * @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'} + */ + function logLoss_(labels, predictions, weights, epsilon = 1e-7, reduction = exports.Reduction.SUM_BY_NONZERO_WEIGHTS) { + const $labels = convertToTensor(labels, 'labels', 'logLoss'); + const $predictions = convertToTensor(predictions, 'predictions', 'logLoss'); + let $weights = null; + if (weights != null) { + $weights = convertToTensor(weights, 'weights', 'logLoss'); + } + assertShapesMatch($labels.shape, $predictions.shape, 'Error in logLoss: '); + const one = scalar(1); + const epsilonScalar = scalar(epsilon); + const l1 = neg$2(mul($labels, log$2(add$3($predictions, epsilonScalar)))); + const l2 = mul(sub$2(one, $labels), log$2(add$3(sub$2(one, $predictions), epsilonScalar))); + const losses = sub$2(l1, l2); + return computeWeightedLoss$1(losses, $weights, reduction); + } + const logLoss = /* @__PURE__ */ op({ logLoss_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the mean squared error between two tensors. + * + * @param labels The ground truth output tensor, same dimensions as + * 'predictions'. + * @param predictions The predicted outputs. + * @param weights Tensor whose rank is either 0, or the same rank as + * `labels`, and must be broadcastable to `labels` (i.e., all dimensions + * must be either `1`, or the same as the corresponding `losses` + * dimension). + * @param reduction Type of reduction to apply to loss. Should be of type + * `Reduction` + * + * @doc {heading: 'Training', subheading: 'Losses', namespace: 'losses'} + */ + function meanSquaredError_(labels, predictions, weights, reduction = exports.Reduction.SUM_BY_NONZERO_WEIGHTS) { + const $labels = convertToTensor(labels, 'labels', 'meanSquaredError'); + const $predictions = convertToTensor(predictions, 'predictions', 'meanSquaredError'); + let $weights = null; + if (weights != null) { + $weights = convertToTensor(weights, 'weights', 'meanSquaredError'); + } + assertShapesMatch($labels.shape, $predictions.shape, 'Error in meanSquaredError: '); + const losses = squaredDifference$2($labels, $predictions); + return computeWeightedLoss$1(losses, $weights, reduction); + } + const meanSquaredError$2 = /* @__PURE__ */ op({ meanSquaredError_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function sigmoidCrossEntropyWithLogits_(labels, logits) { + const $labels = convertToTensor(labels, 'labels', 'sigmoidCrossEntropyWithLogits'); + const $logits = convertToTensor(logits, 'logits', 'sigmoidCrossEntropyWithLogits'); + assertShapesMatch($labels.shape, $logits.shape, 'Error in sigmoidCrossEntropyWithLogits: '); + /** + * Implementation Details: + * + * For brevity, let `x = logits`, `z = labels`. The logistic loss is + * z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x)) + * = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x))) + * = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x))) + * = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x)) + * = (1 - z) * x + log(1 + exp(-x)) + * = x - x * z + log(1 + exp(-x)) + * + * For x < 0, to avoid overflow in exp(-x), we reformulate the above + * x - x * z + log(1 + exp(-x)) + * = log(exp(x)) - x * z + log(1 + exp(-x)) + * = - x * z + log(1 + exp(x)) + * + * Hence, to ensure stability and avoid overflow, the implementation uses + * this equivalent formulation: + * max(x, 0) - x * z + log(1 + exp(-abs(x))) + */ + const maxOutput = relu$2($logits); + const outputXTarget = mul($logits, $labels); + const sigmoidOutput = log1p$2(exp$2(neg$2(abs$2($logits)))); + return add$3(sub$2(maxOutput, outputXTarget), sigmoidOutput); + } + /** + * Computes the sigmoid cross entropy loss between two tensors. + * + * If labelSmoothing is nonzero, smooth the labels towards 1/2: + * + * newMulticlassLabels = multiclassLabels * (1 - labelSmoothing) + * + 0.5 * labelSmoothing + * + * @param multiClassLabels The ground truth output tensor of shape + * [batch_size, num_classes], same dimensions as 'predictions'. + * @param logits The predicted outputs. + * @param weights Tensor whose rank is either 0, or the same rank as + * `labels`, and must be broadcastable to `labels` (i.e., all dimensions + * must be either `1`, or the same as the corresponding `losses` + * dimension). + * @param labelSmoothing If greater than 0, then smooth the labels. + * @param reduction Type of reduction to apply to loss. Should be of type + * `Reduction` + * + * @doc { heading: 'Training', subheading: 'Losses', namespace: 'losses' } + */ + function sigmoidCrossEntropy_(multiClassLabels, logits, weights, labelSmoothing = 0, reduction = exports.Reduction.SUM_BY_NONZERO_WEIGHTS) { + let $multiClassLabels = convertToTensor(multiClassLabels, 'multiClassLabels', 'sigmoidCrossEntropy'); + const $logits = convertToTensor(logits, 'logits', 'sigmoidCrossEntropy'); + let $weights = null; + if (weights != null) { + $weights = convertToTensor(weights, 'weights', 'sigmoidCrossEntropy'); + } + assertShapesMatch($multiClassLabels.shape, $logits.shape, 'Error in sigmoidCrossEntropy: '); + if (labelSmoothing > 0) { + const labelSmoothingScalar = scalar(labelSmoothing); + const one = scalar(1); + const half = scalar(0.5); + $multiClassLabels = + add$3(mul($multiClassLabels, sub$2(one, labelSmoothingScalar)), mul(half, labelSmoothingScalar)); + } + const losses = sigmoidCrossEntropyWithLogits_($multiClassLabels, $logits); + return computeWeightedLoss$1(losses, $weights, reduction); + } + const sigmoidCrossEntropy = /* @__PURE__ */ op({ sigmoidCrossEntropy_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes softmax cross entropy between logits and labels. + * + * Measures the probability error in discrete classification tasks in which + * the classes are mutually exclusive (each entry is in exactly one class). + * For example, each CIFAR-10 image is labeled with one and only one label: an + * image can be a dog or a truck, but not both. + * + * `NOTE`: While the classes are mutually exclusive, their probabilities need + * not be. All that is required is that each row of labels is a valid + * probability distribution. If they are not, the computation of the gradient + * will be incorrect. + * + * `WARNING`: This op expects unscaled logits, since it performs a softmax on + * logits internally for efficiency. Do not call this op with the output of + * softmax, as it will produce incorrect results. + * + * logits and labels must have the same shape, e.g. [batch_size, num_classes] + * and the same dtype. + * @param labels The labels array. + * @param logits The logits array. + * @param dim The dimension softmax would be performed on. Defaults to `-1` + * which indicates the last dimension. + */ + function softmaxCrossEntropyWithLogits_(labels, logits, dim = -1) { + if (dim === -1) { + dim = logits.rank - 1; + } + if (dim !== logits.rank - 1) { + throw Error(`Softmax cross entropy along a non-last dimension is not yet ` + + `supported. Labels / logits was rank ${logits.rank} ` + + `and dim was ${dim}`); + } + // Use a custom gradient for numerical stability. + const customOp = customGrad((labels, logits, save) => { + // Reference: + // 1. http://cs231n.github.io/linear-classify/#softmax + // 2. https://blog.feedly.com/tricks-of-the-trade-logsumexp/ + const keepDims = true; + const lse = logSumExp(logits, [dim], keepDims); + const logResult = sub$2(cast$3(logits, 'float32'), lse); + save([labels, logResult]); + const costVector = neg$2(mul(logResult, labels)); + const value = sum$3(costVector, [dim]); + const gradFunc = (dy, saved) => { + const [labels, logResult] = saved; + const dyShape = expandShapeToKeepDim(dy.shape, [dim]); + return [ + mul(reshape$3(dy, dyShape), sub$2(cast$3(labels, 'float32'), exp$2(logResult))), + mul(reshape$3(dy, dyShape), sub$2(exp$2(logResult), cast$3(labels, 'float32'))), + ]; + }; + return { value, gradFunc }; + }); + return customOp(labels, logits); + } + /** + * Computes the softmax cross entropy loss between two tensors. + * + * If labelSmoothing is nonzero, smooth the labels towards 1/2: + * + * newOnehotLabels = onehotLabels * (1 - labelSmoothing) + * + labelSmoothing / numClasses + * + * @param onehotLabels One hot encoded labels + * [batch_size, num_classes], same dimensions as 'predictions'. + * @param logits The predicted outputs. + * @param weights Tensor whose rank is either 0, or 1, and must be + * broadcastable to `loss` of shape [batch_size] + * @param labelSmoothing If greater than 0, then smooth the labels. + * @param reduction Type of reduction to apply to loss. Should be of type + * `Reduction` + * + * @doc { heading: 'Training', subheading: 'Losses', namespace: 'losses' } + */ + function softmaxCrossEntropy_(onehotLabels, logits, weights, labelSmoothing = 0, reduction = exports.Reduction.SUM_BY_NONZERO_WEIGHTS) { + let $onehotLabels = convertToTensor(onehotLabels, 'onehotLabels', 'softmaxCrossEntropy'); + const $logits = convertToTensor(logits, 'logits', 'softmaxCrossEntropy'); + let $weights = null; + if (weights != null) { + $weights = convertToTensor(weights, 'weights', 'softmaxCrossEntropy'); + } + assertShapesMatch($onehotLabels.shape, $logits.shape, 'Error in softmaxCrossEntropy: '); + if (labelSmoothing > 0) { + const labelSmoothingScalar = scalar(labelSmoothing); + const one = scalar(1); + const numClasses = scalar($onehotLabels.shape[1]); + $onehotLabels = + add$3(mul($onehotLabels, sub$2(one, labelSmoothingScalar)), div$1(labelSmoothingScalar, numClasses)); + } + const losses = softmaxCrossEntropyWithLogits_($onehotLabels, $logits); + return computeWeightedLoss$1(losses, $weights, reduction); + } + const softmaxCrossEntropy = /* @__PURE__ */ op({ softmaxCrossEntropy_ }); + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * The input SparseTensor is represented via the map of inputs {`indices`, + * `values`, `denseShape`}. The output SparseTensor has the same `denseShape` + * but with indices `outputIndices` and values `outputValues`. This op inserts a + * single entry for every row that doesn't have any values. The index is created + * as `[row, 0, ..., 0]` and the inserted value is `defaultValue`. + * + * For example, suppose `spInput` has shape [5, 6] and non-empty values: + * [0, 1]: a + * [0, 3]: b + * [2, 0]: c + * [3, 1]: d + * + * Rows 1 and 4 are empty, so the output will be of shape [5, 6] with values: + * [0, 1]: a + * [0, 3]: b + * [1, 0]: `defaultValue` + * [2, 0]: c + * [3, 1]: d + * [4, 0]: `defaultValue` + * + * The output SparseTensor will be in row-major order and will have the same + * shape as the input. + * + * This op also returns an indicator vector shaped [dense_shape[0]] such that + * emptyRowIndicator[i] = True iff row i was an empty row. + * + * And a reverse index map vector shaped [indices.shape[0]] that is used during + * backpropagation, reverseIndexMap[i] = outi s.t. indices[i, j] == + * outputIndices[outi, j] for all j + * + * ```js + * const result = tf.sparse.sparseFillEmptyRows( + * [[0, 0], [1, 0], [1, 3], [1, 4], [3, 2], [3, 3]], + * [0, 10, 13, 14, 32, 33], [5, 6], -1); + * console.log(result); + * result['outputIndices'].print(); // [[0, 0], [1, 0], [1, 3], [1, 4], + * // [2, 0], [3, 2], [3, 3], [4, 0]] + * result['outputValues'].print(); // [0, 10, 13, 14,-1, 32, 33, -1] + * result['emptyRowIndicator'].print(); // [false, false, true, false, true] + * result['reverseIndexMap'].print(); // [0, 1, 2, 3, 5, 6] + * ``` + * @param indices: 2-D. The indices of the sparse tensor. + * @param values: 1-D. The values of the sparse tensor. + * @param denseShape: 1-D. The shape of the sparse tensor. + * @param defaultValue: 0-D. Default value to insert into location [row, 0, ..., + * 0] for rows missing from the input sparse tensor. + * @return A map with the following properties: + * - outputIndices + * - outputValues: 1-D. The values of the filled sparse tensor. + * - emptyRowIndicator: 1-D. Whether the dense row was missing in the input + * sparse tensor. + * - reverseIndexMap: 1-D. A map from the input indices to the output + * indices. + * @doc {heading: 'Operations', subheading: 'Sparse'} + */ + function sparseFillEmptyRows_(indices, values, denseShape, defaultValue) { + const $indices = convertToTensor(indices, 'indices', 'sparseFillEmptyRows', 'int32'); + const $values = convertToTensor(values, 'values', 'sparseFillEmptyRows'); + const $denseShape = convertToTensor(denseShape, 'denseShape', 'sparseFillEmptyRows', 'int32'); + const $defaultValue = convertToTensor(defaultValue, 'defaultValue', 'sparseFillEmptyRows', $values.dtype); + if ($indices.rank !== 2) { + throw new Error(`Indices should be Tensor2D but received shape + ${$indices.shape}`); + } + if ($values.rank !== 1) { + throw new Error(`Values should be Tensor1D but received shape ${$values.shape}`); + } + if ($denseShape.rank !== 1) { + throw new Error(`Dense shape should be Tensor1D but received shape ${$denseShape.shape}`); + } + if ($defaultValue.rank !== 0) { + throw new Error(`Default value should be a scalar but received shape ${$defaultValue.shape}`); + } + const inputs = { + indices: $indices, + values: $values, + denseShape: $denseShape, + defaultValue: $defaultValue + }; + const result = ENGINE.runKernel(SparseFillEmptyRows, inputs); + return { + outputIndices: result[0], + outputValues: result[1], + emptyRowIndicator: result[2], + reverseIndexMap: result[3] + }; + } + const sparseFillEmptyRows$2 = /* @__PURE__ */ op({ sparseFillEmptyRows_ }); + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * This operation has the same semantics as reshape on the represented dense + * tensor. The `inputIndices` are recomputed based on the requested `newShape`. + * If one component of `newShape` is the special value -1, the size of that + * dimension is computed so that the total dense size remains constant. At most + * one component of `newShape` can be -1. The number of dense elements implied + * by `newShape` must be the same as the number of dense elements originally + * implied by `inputShape`. Reshaping does not affect the order of values in the + * SparseTensor. If the input tensor has rank R_in and N non-empty values, and + * `newShape` has length R_out, then `inputIndices` has shape [N, R_in], + * `inputShape` has length R_in, `outputIndices` has shape [N, R_out], and + * `outputShape` has length R_out. + * + * ```js + * const result = tf.sparse.sparseReshape( + * [[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0], [1, 2, 3]], + * [2, 3, 6], [9, -1]); + * console.log(result); + * result['outputIndices'].print(); //[[0, 0], [0, 1], [1, 2], [4, 2], [8, 1]] + * result['outputShape'].print(); // [9, 4] + * ``` + * @param inputIndices: 2-D. N x R_in matrix with the indices of non-empty + * values in a SparseTensor. + * @param inputShape: 1-D. R_in Tensor1D with the input SparseTensor's dense + * shape. + * @param newShape: 1-D. R_out Tensor1D with the requested new dense shape. + * @return A map with the following properties: + * - outputIndices: 2-D. N x R_out matrix with the updated indices of + * non-empty values in the output SparseTensor. + * - outputShape: 1-D. R_out vector with the full dense shape of the output + * SparseTensor. This is the same as newShape but with any -1 dimensions + * filled in. + * @doc {heading: 'Operations', subheading: 'Sparse'} + */ + function sparseReshape_(inputIndices, inputShape, newShape) { + const $inputIndices = convertToTensor(inputIndices, 'inputIndices', 'sparseReshape', 'int32'); + const $inputShape = convertToTensor(inputShape, 'inputShape', 'sparseReshape', 'int32'); + const $newShape = convertToTensor(newShape, 'newShape', 'sparseReshape', 'int32'); + if ($inputIndices.rank !== 2) { + throw new Error(`Input indices should be Tensor2D but received shape + ${$inputIndices.shape}`); + } + if ($inputShape.rank !== 1) { + throw new Error(`Input shape should be Tensor1D but received shape ${$inputShape.shape}`); + } + if ($newShape.rank !== 1) { + throw new Error(`New shape should be Tensor1D but received shape ${$newShape.shape}`); + } + const inputs = { + inputIndices: $inputIndices, + inputShape: $inputShape, + newShape: $newShape + }; + const result = ENGINE.runKernel(SparseReshape, inputs); + return { outputIndices: result[0], outputShape: result[1] }; + } + const sparseReshape$2 = /* @__PURE__ */ op({ sparseReshape_ }); + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the mean along sparse segments of a tensor. + * + * ```js + * const c = tf.tensor2d([[1,2,3,4], [-1,-2,-3,-4], [6,7,8,9]]); + * // Select two rows, one segment. + * const result1 = tf.sparse.sparseSegmentMean(c, + * tf.tensor1d([0, 1], 'int32'), + * tf.tensor1d([0, 0], 'int32')); + * result1.print(); // [[0, 0, 0, 0]] + * + * // Select two rows, two segments. + * const result2 = tf.sparse.sparseSegmentMean(c, + * tf.tensor1d([0, 1], 'int32'), + * tf.tensor1d([0, 1], 'int32')); + * result2.print(); // [[1, 2, 3, 4], [-1, -2, -3, -4]] + * + * // Select all rows, two segments. + * const result3 = tf.sparse.sparseSegmentMean(c, + * tf.tensor1d([0, 1, 2], 'int32'), + * tf.tensor1d([0, 1, 1], 'int32')); + * result3.print(); // [[1.0, 2.0, 3.0, 4.0], [2.5, 2.5, 2.5, 2.5]] + * ``` + * @param data: A Tensor of at least one dimension with data that will be + * assembled in the output. + * @param indices: A 1-D Tensor with indices into data. Has same rank as + * segmentIds. + * @param segmentIds: A 1-D Tensor with indices into the output Tensor. Values + * should be sorted and can be repeated. + * @return Has same shape as data, except for dimension 0 which has equal to + * the number of segments. + * + * @doc {heading: 'Operations', subheading: 'Sparse'} + */ + function sparseSegmentMean_(data, indices, segmentIds) { + const $data = convertToTensor(data, 'data', 'sparseSegmentMean'); + const $indices = convertToTensor(indices, 'indices', 'sparseSegmentMean', 'int32'); + const $segmentIds = convertToTensor(segmentIds, 'segmentIds', 'sparseSegmentMean', 'int32'); + if ($data.rank < 1) { + throw new Error(`Data should be at least 1 dimensional but received scalar`); + } + if ($indices.rank !== 1) { + throw new Error(`Indices should be Tensor1D but received shape + ${$indices.shape}`); + } + if ($segmentIds.rank !== 1) { + throw new Error(`Segment ids should be Tensor1D but received shape + ${$segmentIds.shape}`); + } + const inputs = { + data: $data, + indices: $indices, + segmentIds: $segmentIds + }; + return ENGINE.runKernel(SparseSegmentMean, inputs); + } + const sparseSegmentMean$2 = /* @__PURE__ */ op({ sparseSegmentMean_ }); + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the sum along sparse segments of a tensor. + * + * ```js + * const c = tf.tensor2d([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]); + * // Select two rows, one segment. + * const result1 = tf.sparse.sparseSegmentSum(c, + * tf.tensor1d([0, 1], 'int32'), + * tf.tensor1d([0, 0], 'int32')); + * result1.print(); // [[0, 0, 0, 0]] + * + * // Select two rows, two segments. + * const result2 = tf.sparse.sparseSegmentSum(c, + * tf.tensor1d([0, 1], 'int32'), + * tf.tensor1d([0, 1], 'int32')); + * result2.print(); // [[1, 2, 3, 4], [-1, -2, -3, -4]] + * + * // Select all rows, two segments. + * const result3 = tf.sparse.sparseSegmentSum(c, + * tf.tensor1d([0, 1, 2], 'int32'), + * tf.tensor1d([0, 0, 1], 'int32')); + * result3.print(); // [[0, 0, 0, 0], [5, 6, 7, 8]] + * ``` + * @param data: A Tensor of at least one dimension with data that will be + * assembled in the output. + * @param indices: A 1-D Tensor with indices into data. Has same rank as + * segmentIds. + * @param segmentIds: A 1-D Tensor with indices into the output Tensor. Values + * should be sorted and can be repeated. + * @return Has same shape as data, except for dimension 0 which has equal to + * the number of segments. + * + * @doc {heading: 'Operations', subheading: 'Sparse'} + */ + function sparseSegmentSum_(data, indices, segmentIds) { + const $data = convertToTensor(data, 'data', 'sparseSegmentSum'); + const $indices = convertToTensor(indices, 'indices', 'sparseSegmentSum', 'int32'); + const $segmentIds = convertToTensor(segmentIds, 'segmentIds', 'sparseSegmentSum', 'int32'); + if ($data.rank < 1) { + throw new Error(`Data should be at least 1 dimensional but received scalar`); + } + if ($indices.rank !== 1) { + throw new Error(`Indices should be Tensor1D but received shape + ${$indices.shape}`); + } + if ($segmentIds.rank !== 1) { + throw new Error(`Segment ids should be Tensor1D but received shape + ${$segmentIds.shape}`); + } + const inputs = { + data: $data, + indices: $indices, + segmentIds: $segmentIds + }; + return ENGINE.runKernel(SparseSegmentSum, inputs); + } + const sparseSegmentSum$2 = /* @__PURE__ */ op({ sparseSegmentSum_ }); + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Creates ngrams from ragged string data. + * + * This op accepts a ragged tensor with 1 ragged dimension containing only + * strings and outputs a ragged tensor with 1 ragged dimension containing ngrams + * of that string, joined along the innermost axis. + * + * ```js + * const result = tf.string.stringNGrams( + * ['a', 'b', 'c', 'd'], tf.tensor1d([0, 2, 4], 'int32'), + * '|', [1, 2], 'LP', 'RP', -1, false); + * result['nGrams'].print(); // ['a', 'b', 'LP|a', 'a|b', 'b|RP', + * // 'c', 'd', 'LP|c', 'c|d', 'd|RP'] + * result['nGramsSplits'].print(); // [0, 5, 10] + * ``` + * @param data: The values tensor of the ragged string tensor to make ngrams out + * of. Must be a 1D string tensor. + * @param dataSplits: The splits tensor of the ragged string tensor to make + * ngrams out of. + * @param separator: The string to append between elements of the token. Use "" + * for no separator. + * @param nGramWidths: The sizes of the ngrams to create. + * @param leftPad: The string to use to pad the left side of the ngram sequence. + * Only used if pad_width !== 0. + * @param rightPad: The string to use to pad the right side of the ngram + * sequence. Only used if pad_width !== 0. + * @param padWidth: The number of padding elements to add to each side of each + * sequence. Note that padding will never be greater than `nGramWidths`-1 + * regardless of this value. If `padWidth`=-1, then add max(`nGramWidths`)-1 + * elements. + * @param preserveShortSequences: If true, then ensure that at least one ngram + * is generated for each input sequence. In particular, if an input sequence + * is shorter than min(ngramWidth) + 2*padWidth, then generate a single + * ngram containing the entire sequence. If false, then no ngrams are + * generated for these short input sequences. + * @return A map with the following properties: + * - nGrams: The values tensor of the output ngrams ragged tensor. + * - nGramsSplits: The splits tensor of the output ngrams ragged tensor. + * + * @doc {heading: 'Operations', subheading: 'String'} + */ + function stringNGrams_(data, dataSplits, separator, nGramWidths, leftPad, rightPad, padWidth, preserveShortSequences) { + const $data = convertToTensor(data, 'data', 'stringNGrams', 'string'); + if ($data.dtype !== 'string') { + throw new Error('Data must be of datatype string'); + } + if ($data.shape.length !== 1) { + throw new Error(`Data must be a vector, saw: ${$data.shape}`); + } + const $dataSplits = convertToTensor(dataSplits, 'dataSplits', 'stringNGrams'); + if ($dataSplits.dtype !== 'int32') { + throw new Error('Data splits must be of datatype int32'); + } + const attrs = { + separator, + nGramWidths, + leftPad, + rightPad, + padWidth, + preserveShortSequences + }; + const inputs = { data: $data, dataSplits: $dataSplits }; + const result = ENGINE.runKernel(StringNGrams, inputs, attrs); + return { nGrams: result[0], nGramsSplits: result[1] }; + } + const stringNGrams$2 = /* @__PURE__ */ op({ stringNGrams_ }); + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Split elements of `input` based on `delimiter` into a SparseTensor . + * + * Let N be the size of source (typically N will be the batch size). Split each + * element of `input` based on `delimiter` and return a SparseTensor containing + * the splitted tokens. Empty tokens are ignored if `skipEmpty` is set to True. + * + * `delimiter` can be empty, or a string of split characters. If `delimiter` is + * an empty string, each element of `input` is split into individual + * character strings. Otherwise every character of `delimiter` is a potential + * split point. + * + * ```js + * const result = tf.string.stringSplit(['hello world', 'a b c'], ' '); + * result['indices'].print(); // [[0, 0], [0, 1], [1, 0], [1, 1], [1, 2]] + * result['values'].print(); // ['hello', 'world', 'a', 'b', 'c'] + * result['shape'].print(); // [2, 3] + * ``` + * @param input: 1-D. Strings to split. + * @param delimiter: 0-D. Delimiter characters, or empty string. + * @param skipEmpty: Optional. If true, skip the empty strings from the result. + * Defaults to true. + * @return A map with the following properties: + * - indices: A dense matrix of int32 representing the indices of the sparse + * tensor. + * - values: A vector of strings corresponding to the splited values. + * - shape: a length-2 vector of int32 representing the shape of the sparse + * tensor, where the first value is N and the second value is the maximum number + * of tokens in a single input entry. + * + * @doc {heading: 'Operations', subheading: 'String'} + */ + function stringSplit_(input, delimiter, skipEmpty = true) { + const $input = convertToTensor(input, 'input', 'stringSplit', 'string'); + const $delimiter = convertToTensor(delimiter, 'delimiter', 'stringSplit', 'string'); + if ($input.rank !== 1) { + throw new Error(`Input should be Tensor1D but received shape ${$input.shape}`); + } + if ($delimiter.rank !== 0) { + throw new Error(`Delimiter should be a scalar but received shape ${$delimiter.shape}`); + } + const attrs = { skipEmpty }; + const inputs = { input: $input, delimiter: $delimiter }; + const result = ENGINE.runKernel(StringSplit, inputs, attrs); + return { indices: result[0], values: result[1], shape: result[2] }; + } + const stringSplit$2 = /* @__PURE__ */ op({ stringSplit_ }); + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Converts each string in the input Tensor to its hash mod by a number of + * buckets. + * + * The hash function is deterministic on the content of the string within the + * process and will never change. However, it is not suitable for cryptography. + * This function may be used when CPU time is scarce and inputs are trusted or + * unimportant. There is a risk of adversaries constructing inputs that all hash + * to the same bucket. + * + * ```js + * const result = tf.string.stringToHashBucketFast( + * ['Hello', 'TensorFlow', '2.x'], 3); + * result.print(); // [0, 2, 2] + * ``` + * @param input: The strings to assign a hash bucket. + * @param numBuckets: The number of buckets. + * @return A Tensor of the same shape as the input tensor. + * + * @doc {heading: 'Operations', subheading: 'String'} + */ + function stringToHashBucketFast_(input, numBuckets) { + const $input = convertToTensor(input, 'input', 'stringToHashBucketFast', 'string'); + const attrs = { numBuckets }; + if (numBuckets <= 0) { + throw new Error(`Number of buckets must be at least 1`); + } + const inputs = { input: $input }; + return ENGINE.runKernel(StringToHashBucketFast, inputs, attrs); + } + const stringToHashBucketFast$2 = /* @__PURE__ */ op({ stringToHashBucketFast_ }); + + /** + * @license + * Copyright 2023 Google LLC. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Replace the match of a `pattern` in `input` with `rewrite`. + * + * ```js + * const result = tf.string.staticRegexReplace( + * ['format this spacing better'], ' +', ' '); + * result.print(); // ['format this spacing better'] + * ``` + * @param input: A Tensor of type string. The text to be processed. + * @param pattern: A string. The regular expression to match the input. + * @param rewrite: A string. The rewrite to be applied to the matched + * expression. + * @param replaceGlobal: An optional bool. Defaults to True. If True, the + * replacement is global, otherwise the replacement is done only on the + * first match. + * @return A Tensor of type string. + * + * @doc {heading: 'Operations', subheading: 'String'} + */ + function staticRegexReplace_(input, pattern, rewrite, replaceGlobal = true) { + const $input = convertToTensor(input, 'input', 'staticRegexReplace', 'string'); + const attrs = { pattern, rewrite, replaceGlobal }; + return ENGINE.runKernel(StaticRegexReplace, { x: $input }, attrs); + } + const staticRegexReplace$2 = /* @__PURE__ */ op({ staticRegexReplace_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const spectral$1 = { + fft: fft$2, + ifft: ifft$2, + rfft, + irfft + }; + const signal = { + hammingWindow, + hannWindow, + frame, + stft, + }; + const image$1 = { + flipLeftRight, + grayscaleToRGB, + resizeNearestNeighbor: resizeNearestNeighbor$2, + resizeBilinear: resizeBilinear$3, + rgbToGrayscale, + rotateWithOffset, + cropAndResize: cropAndResize$3, + nonMaxSuppression, + nonMaxSuppressionAsync, + nonMaxSuppressionWithScore, + nonMaxSuppressionWithScoreAsync, + nonMaxSuppressionPadded, + nonMaxSuppressionPaddedAsync, + threshold: threshold$1, + transform: transform$2 + }; + const linalg = { + bandPart, + gramSchmidt, + qr + }; + const losses = { + absoluteDifference, + computeWeightedLoss: computeWeightedLoss$1, + cosineDistance, + hingeLoss, + huberLoss, + logLoss, + meanSquaredError: meanSquaredError$2, + sigmoidCrossEntropy, + softmaxCrossEntropy + }; + const sparse$1 = { + sparseFillEmptyRows: sparseFillEmptyRows$2, + sparseReshape: sparseReshape$2, + sparseSegmentMean: sparseSegmentMean$2, + sparseSegmentSum: sparseSegmentSum$2 + }; + // tslint:disable-next-line:variable-name + const string$1 = { + stringNGrams: stringNGrams$2, + stringSplit: stringSplit$2, + stringToHashBucketFast: stringToHashBucketFast$2, + staticRegexReplace: staticRegexReplace$2, + }; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Maps to mapping between the custom object and its name. + * + * After registering a custom class, these two maps will add key-value pairs + * for the class object and the registered name. + * + * Therefore we can get the relative registered name by calling + * getRegisteredName() function. + * + * For example: + * GLOBAL_CUSTOM_OBJECT: {key=registeredName: value=corresponding + * CustomObjectClass} + * + * GLOBAL_CUSTOM_NAMES: {key=CustomObjectClass: value=corresponding + * registeredName} + * + */ + const GLOBAL_CUSTOM_OBJECT = new Map(); + const GLOBAL_CUSTOM_NAMES = new Map(); + /** + * Serializable defines the serialization contract. + * + * TFJS requires serializable classes to return their className when asked + * to avoid issues with minification. + */ + class Serializable { + /** + * Return the class name for this class to use in serialization contexts. + * + * Generally speaking this will be the same thing that constructor.name + * would have returned. However, the class name needs to be robust + * against minification for serialization/deserialization to work properly. + * + * There's also places such as initializers.VarianceScaling, where + * implementation details between different languages led to different + * class hierarchies and a non-leaf node is used for serialization purposes. + */ + getClassName() { + return this.constructor + .className; + } + /** + * Creates an instance of T from a ConfigDict. + * + * This works for most descendants of serializable. A few need to + * provide special handling. + * @param cls A Constructor for the class to instantiate. + * @param config The Configuration for the object. + */ + /** @nocollapse */ + static fromConfig(cls, config) { + return new cls(config); + } + } + /** + * Maps string keys to class constructors. + * + * Used during (de)serialization from the cross-language JSON format, which + * requires the class name in the serialization format matches the class + * names as used in Python, should it exist. + */ + class SerializationMap { + constructor() { + this.classNameMap = {}; + } + /** + * Returns the singleton instance of the map. + */ + static getMap() { + if (SerializationMap.instance == null) { + SerializationMap.instance = new SerializationMap(); + } + return SerializationMap.instance; + } + /** + * Registers the class as serializable. + */ + static register(cls) { + SerializationMap.getMap().classNameMap[cls.className] = + [cls, cls.fromConfig]; + } + } + /** + * Register a class with the serialization map of TensorFlow.js. + * + * This is often used for registering custom Layers, so they can be + * serialized and deserialized. + * + * Example 1. Register the class without package name and specified name. + * + * ```js + * class MyCustomLayer extends tf.layers.Layer { + * static className = 'MyCustomLayer'; + * + * constructor(config) { + * super(config); + * } + * } + * tf.serialization.registerClass(MyCustomLayer); + * console.log(tf.serialization.GLOBALCUSTOMOBJECT.get("Custom>MyCustomLayer")); + * console.log(tf.serialization.GLOBALCUSTOMNAMES.get(MyCustomLayer)); + * ``` + * + * Example 2. Register the class with package name: "Package" and specified + * name: "MyLayer". + * ```js + * class MyCustomLayer extends tf.layers.Layer { + * static className = 'MyCustomLayer'; + * + * constructor(config) { + * super(config); + * } + * } + * tf.serialization.registerClass(MyCustomLayer, "Package", "MyLayer"); + * console.log(tf.serialization.GLOBALCUSTOMOBJECT.get("Package>MyLayer")); + * console.log(tf.serialization.GLOBALCUSTOMNAMES.get(MyCustomLayer)); + * ``` + * + * Example 3. Register the class with specified name: "MyLayer". + * ```js + * class MyCustomLayer extends tf.layers.Layer { + * static className = 'MyCustomLayer'; + * + * constructor(config) { + * super(config); + * } + * } + * tf.serialization.registerClass(MyCustomLayer, undefined, "MyLayer"); + * console.log(tf.serialization.GLOBALCUSTOMOBJECT.get("Custom>MyLayer")); + * console.log(tf.serialization.GLOBALCUSTOMNAMES.get(MyCustomLayer)); + * ``` + * + * Example 4. Register the class with specified package name: "Package". + * ```js + * class MyCustomLayer extends tf.layers.Layer { + * static className = 'MyCustomLayer'; + * + * constructor(config) { + * super(config); + * } + * } + * tf.serialization.registerClass(MyCustomLayer, "Package"); + * console.log(tf.serialization.GLOBALCUSTOMOBJECT + * .get("Package>MyCustomLayer")); + * console.log(tf.serialization.GLOBALCUSTOMNAMES + * .get(MyCustomLayer)); + * ``` + * + * @param cls The class to be registered. It must have a public static member + * called `className` defined and the value must be a non-empty string. + * @param pkg The package name that this class belongs to. This used to define + * the key in GlobalCustomObject. If not defined, it defaults to `Custom`. + * @param name The name that user specified. It defaults to the actual name of + * the class as specified by its static `className` property. + * @doc {heading: 'Models', subheading: 'Serialization', ignoreCI: true} + */ + function registerClass(cls, pkg, name) { + assert$1(cls.className != null, () => `Class being registered does not have the static className ` + + `property defined.`); + assert$1(typeof cls.className === 'string', () => `className is required to be a string, but got type ` + + typeof cls.className); + assert$1(cls.className.length > 0, () => `Class being registered has an empty-string as its className, ` + + `which is disallowed.`); + if (typeof pkg === 'undefined') { + pkg = 'Custom'; + } + if (typeof name === 'undefined') { + name = cls.className; + } + const className = name; + const registerName = pkg + '>' + className; + SerializationMap.register(cls); + GLOBAL_CUSTOM_OBJECT.set(registerName, cls); + GLOBAL_CUSTOM_NAMES.set(cls, registerName); + return cls; + } + /** + * Get the registered name of a class. If the class has not been registered, + * return the class name. + * + * @param cls The class we want to get register name for. It must have a public + * static member called `className` defined. + * @returns registered name or class name. + */ + function getRegisteredName(cls) { + if (GLOBAL_CUSTOM_NAMES.has(cls)) { + return GLOBAL_CUSTOM_NAMES.get(cls); + } + else { + return cls.className; + } + } + + var serialization = /*#__PURE__*/Object.freeze({ + __proto__: null, + Serializable: Serializable, + SerializationMap: SerializationMap, + getRegisteredName: getRegisteredName, + registerClass: registerClass + }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** @doc {heading: 'Training', subheading: 'Classes', namespace: 'train'} */ + class Optimizer extends Serializable { + /** + * Executes `f()` and minimizes the scalar output of `f()` by computing + * gradients of y with respect to the list of trainable variables provided by + * `varList`. If no list is provided, it defaults to all trainable variables. + * + * @param f The function to execute and whose output to minimize. + * @param returnCost Whether to return the scalar cost value produced by + * executing `f()`. + * @param varList An optional list of variables to update. If specified, only + * the trainable variables in varList will be updated by minimize. Defaults to + * all trainable variables. + * + * @doc {heading: 'Training', subheading: 'Optimizers'} + */ + minimize(f, returnCost = false, varList) { + const { value, grads } = this.computeGradients(f, varList); + if (varList != null) { + const gradArray = varList.map(v => ({ name: v.name, tensor: grads[v.name] })); + this.applyGradients(gradArray); + } + else { + this.applyGradients(grads); + } + // Dispose gradients. + dispose(grads); + if (returnCost) { + return value; + } + else { + value.dispose(); + return null; + } + } + /** + * The number of iterations that this optimizer instance has been invoked for. + */ + get iterations() { + if (this.iterations_ == null) { + this.iterations_ = 0; + } + return this.iterations_; + } + incrementIterations() { + this.iterations_ = this.iterations + 1; + } + /** + * Executes f() and computes the gradient of the scalar output of f() with + * respect to the list of trainable variables provided by `varList`. If no + * list is provided, it defaults to all trainable variables. + * + * @param f The function to execute and whose output to use for computing + * gradients with respect to variables. + * @param varList An optional list of variables to compute gradients with + * respect to. If specified, only the trainable variables in varList will have + * gradients computed with respect to. Defaults to all trainable variables. + * + * @doc {heading: 'Training', subheading: 'Optimizers'} + */ + computeGradients(f, varList) { + return variableGrads(f, varList); + } + /** + * Dispose the variables (if any) owned by this optimizer instance. + */ + dispose() { + if (this.iterations_ != null) { + dispose(this.iterations_); + } + } + async saveIterations() { + if (this.iterations_ == null) { + this.iterations_ = 0; + } + return { + name: 'iter', + // TODO(cais): Use 'int64' type when available. + tensor: scalar(this.iterations_, 'int32') + }; + } + async getWeights() { + throw new Error('getWeights() is not implemented for this optimizer yet.'); + } + async setWeights(weightValues) { + throw new Error(`setWeights() is not implemented for this optimizer class ` + + `${this.getClassName()}`); + } + /** + * Extract the first element of the weight values and set it + * as the iterations counter variable of this instance of optimizer. + * + * @param weightValues + * @returns Weight values with the first element consumed and excluded. + */ + async extractIterations(weightValues) { + this.iterations_ = (await weightValues[0].tensor.data())[0]; + return weightValues.slice(1); + } + } + Object.defineProperty(Optimizer, Symbol.hasInstance, { + value: (instance) => { + return instance.minimize != null && instance.computeGradients != null && + instance.applyGradients != null; + } + }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** @doclink Optimizer */ + class AdadeltaOptimizer extends Optimizer { + /** @nocollapse */ + static get className() { + // Name matters for Python compatibility. + // This is a getter instead of a property because when it's a property, it + // prevents the entire class from being tree-shaken. + return 'Adadelta'; + } + constructor(learningRate, rho, epsilon = null) { + super(); + this.learningRate = learningRate; + this.rho = rho; + this.epsilon = epsilon; + this.accumulatedGrads = []; + this.accumulatedUpdates = []; + if (epsilon == null) { + this.epsilon = ENGINE.backend.epsilon(); + } + } + applyGradients(variableGradients) { + const variableNames = Array.isArray(variableGradients) ? + variableGradients.map(item => item.name) : + Object.keys(variableGradients); + variableNames.forEach((name, i) => { + const value = ENGINE.registeredVariables[name]; + const trainable = false; + if (this.accumulatedGrads[i] == null) { + this.accumulatedGrads[i] = { + originalName: `${name}/accum_grad`, + variable: tidy(() => zerosLike$3(value).variable(trainable)) + }; + } + if (this.accumulatedUpdates[i] == null) { + this.accumulatedUpdates[i] = { + originalName: `${name}/accum_var`, + variable: tidy(() => zerosLike$3(value).variable(trainable)) + }; + } + const gradient = Array.isArray(variableGradients) ? + variableGradients[i].tensor : + variableGradients[name]; + if (gradient == null) { + return; + } + const accumulatedGrad = this.accumulatedGrads[i].variable; + const accumulatedUpdate = this.accumulatedUpdates[i].variable; + tidy(() => { + const newAccumulatedGrad = add$3(mul(accumulatedGrad, this.rho), mul(square$2(gradient), 1 - this.rho)); + const updates = mul(div$1(sqrt$2(add$3(accumulatedUpdate, this.epsilon)), sqrt$2(add$3(accumulatedGrad, this.epsilon))), gradient); + const newAccumulatedUpdate = add$3(mul(accumulatedUpdate, this.rho), mul(square$2(updates), 1 - this.rho)); + accumulatedGrad.assign(newAccumulatedGrad); + accumulatedUpdate.assign(newAccumulatedUpdate); + const newValue = add$3(mul(updates, -this.learningRate), value); + value.assign(newValue); + }); + }); + this.incrementIterations(); + } + dispose() { + if (this.accumulatedUpdates != null) { + dispose(this.accumulatedGrads.map(v => v.variable)); + dispose(this.accumulatedUpdates.map(v => v.variable)); + } + } + async getWeights() { + // Order matters for Python compatibility. + const variables = [...this.accumulatedGrads, ...this.accumulatedUpdates]; + return [await this.saveIterations()].concat(variables.map(v => ({ name: v.originalName, tensor: v.variable }))); + } + async setWeights(weightValues) { + weightValues = await this.extractIterations(weightValues); + const variableCount = weightValues.length / 2; + const trainable = false; + this.accumulatedGrads = + weightValues.slice(0, variableCount).map(v => ({ + originalName: v.name, + variable: v.tensor.variable(trainable) + })); + this.accumulatedUpdates = + weightValues.slice(variableCount, variableCount * 2) + .map(v => ({ + originalName: v.name, + variable: v.tensor.variable(trainable) + })); + } + getConfig() { + return { + 'learningRate': this.learningRate, + 'rho': this.rho, + 'epsilon': this.epsilon + }; + } + /** @nocollapse */ + static fromConfig(cls, config) { + return new cls(config['learningRate'], config['rho'], config['epsilon']); + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** @doclink Optimizer */ + class AdagradOptimizer extends Optimizer { + /** @nocollapse */ + static get className() { + // Name matters for Python compatibility. + // This is a getter instead of a property because when it's a property, it + // prevents the entire class from being tree-shaken. + return 'Adagrad'; + } + constructor(learningRate, initialAccumulatorValue = 0.1) { + super(); + this.learningRate = learningRate; + this.initialAccumulatorValue = initialAccumulatorValue; + this.accumulatedGrads = []; + } + applyGradients(variableGradients) { + const variableNames = Array.isArray(variableGradients) ? + variableGradients.map(item => item.name) : + Object.keys(variableGradients); + variableNames.forEach((name, i) => { + const value = ENGINE.registeredVariables[name]; + if (this.accumulatedGrads[i] == null) { + const trainable = false; + this.accumulatedGrads[i] = { + originalName: `${name}/accumulator`, + variable: tidy(() => fill$2(value.shape, this.initialAccumulatorValue) + .variable(trainable)) + }; + } + const gradient = Array.isArray(variableGradients) ? + variableGradients[i].tensor : + variableGradients[name]; + if (gradient == null) { + return; + } + const accumulatedGrad = this.accumulatedGrads[i].variable; + tidy(() => { + const newAccumulatedGrad = add$3(accumulatedGrad, square$2(gradient)); + accumulatedGrad.assign(newAccumulatedGrad); + const newValue = add$3(mul(div$1(gradient, sqrt$2(add$3(newAccumulatedGrad, ENGINE.backend.epsilon()))), -this.learningRate), value); + value.assign(newValue); + }); + }); + this.incrementIterations(); + } + dispose() { + if (this.accumulatedGrads != null) { + dispose(this.accumulatedGrads.map(v => v.variable)); + } + } + async getWeights() { + // Order matters for Python compatibility. + return [await this.saveIterations()].concat(this.accumulatedGrads.map(v => ({ name: v.originalName, tensor: v.variable }))); + } + async setWeights(weightValues) { + weightValues = await this.extractIterations(weightValues); + const trainable = false; + this.accumulatedGrads = weightValues.map(v => ({ originalName: v.name, variable: v.tensor.variable(trainable) })); + } + getConfig() { + return { + 'learningRate': this.learningRate, + 'initialAccumulatorValue': this.initialAccumulatorValue, + }; + } + /** @nocollapse */ + static fromConfig(cls, config) { + return new cls(config['learningRate'], config['initialAccumulatorValue']); + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class AdamOptimizer extends Optimizer { + /** @nocollapse */ + static get className() { + // Name matters for Python compatibility. + // This is a getter instead of a property because when it's a property, it + // prevents the entire class from being tree-shaken. + return 'Adam'; + } + constructor(learningRate, beta1, beta2, epsilon = null) { + super(); + this.learningRate = learningRate; + this.beta1 = beta1; + this.beta2 = beta2; + this.epsilon = epsilon; + this.accumulatedFirstMoment = []; + this.accumulatedSecondMoment = []; + tidy(() => { + // accB* will be updated by batch. + this.accBeta1 = scalar(beta1).variable(); + this.accBeta2 = scalar(beta2).variable(); + }); + if (epsilon == null) { + this.epsilon = ENGINE.backend.epsilon(); + } + } + applyGradients(variableGradients) { + const varNames = Array.isArray(variableGradients) ? + variableGradients.map(v => v.name) : + Object.keys(variableGradients); + tidy(() => { + const oneMinusAccBeta1 = sub$2(1, this.accBeta1); + const oneMinusAccBeta2 = sub$2(1, this.accBeta2); + varNames.forEach((name, i) => { + const value = ENGINE.registeredVariables[name]; + const trainable = false; + if (this.accumulatedFirstMoment[i] == null) { + this.accumulatedFirstMoment[i] = { + originalName: `${name}/m`, + variable: tidy(() => zerosLike$3(value).variable(trainable)) + }; + } + if (this.accumulatedSecondMoment[i] == null) { + this.accumulatedSecondMoment[i] = { + originalName: `${name}/v`, + variable: tidy(() => zerosLike$3(value).variable(trainable)) + }; + } + const gradient = Array.isArray(variableGradients) ? + variableGradients[i].tensor : + variableGradients[name]; + if (gradient == null) { + return; + } + const firstMoment = this.accumulatedFirstMoment[i].variable; + const secondMoment = this.accumulatedSecondMoment[i].variable; + const newFirstMoment = add$3(mul(firstMoment, this.beta1), mul(gradient, 1 - this.beta1)); + const newSecondMoment = add$3(mul(secondMoment, this.beta2), mul(square$2(gradient), 1 - this.beta2)); + const biasCorrectedFirstMoment = div$1(newFirstMoment, oneMinusAccBeta1); + const biasCorrectedSecondMoment = div$1(newSecondMoment, oneMinusAccBeta2); + firstMoment.assign(newFirstMoment); + secondMoment.assign(newSecondMoment); + const newValue = add$3(mul(div$1(biasCorrectedFirstMoment, add$3(sqrt$2(biasCorrectedSecondMoment), this.epsilon)), -this.learningRate), value); + value.assign(newValue); + }); + this.accBeta1.assign(mul(this.accBeta1, this.beta1)); + this.accBeta2.assign(mul(this.accBeta2, this.beta2)); + }); + this.incrementIterations(); + } + dispose() { + this.accBeta1.dispose(); + this.accBeta2.dispose(); + if (this.accumulatedFirstMoment != null) { + dispose(this.accumulatedFirstMoment.map(v => v.variable)); + } + if (this.accumulatedSecondMoment != null) { + dispose(this.accumulatedSecondMoment.map(v => v.variable)); + } + } + async getWeights() { + // Order matters for Python compatibility. + const variables = [...this.accumulatedFirstMoment, ...this.accumulatedSecondMoment]; + return [await this.saveIterations()].concat(variables.map(v => ({ name: v.originalName, tensor: v.variable }))); + } + async setWeights(weightValues) { + weightValues = await this.extractIterations(weightValues); + tidy(() => { + this.accBeta1.assign(pow$3(this.beta1, this.iterations_ + 1)); + this.accBeta2.assign(pow$3(this.beta2, this.iterations_ + 1)); + }); + const variableCount = weightValues.length / 2; + const trainable = false; + this.accumulatedFirstMoment = + weightValues.slice(0, variableCount).map(v => ({ + originalName: v.name, + variable: v.tensor.variable(trainable) + })); + this.accumulatedSecondMoment = + weightValues.slice(variableCount, variableCount * 2) + .map(v => ({ + originalName: v.name, + variable: v.tensor.variable(trainable) + })); + } + getConfig() { + return { + 'learningRate': this.learningRate, + 'beta1': this.beta1, + 'beta2': this.beta2, + 'epsilon': this.epsilon, + }; + } + /** @nocollapse */ + static fromConfig(cls, config) { + return new cls(config['learningRate'], config['beta1'], config['beta2'], config['epsilon']); + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class AdamaxOptimizer extends Optimizer { + /** @nocollapse */ + static get className() { + // Name matters for Python compatibility. + // This is a getter instead of a property because when it's a property, it + // prevents the entire class from being tree-shaken. + return 'Adamax'; + } + constructor(learningRate, beta1, beta2, epsilon = null, decay = 0.0) { + super(); + this.learningRate = learningRate; + this.beta1 = beta1; + this.beta2 = beta2; + this.epsilon = epsilon; + this.decay = decay; + this.accumulatedFirstMoment = []; + this.accumulatedWeightedInfNorm = []; + tidy(() => { + this.iteration = scalar(0).variable(); + this.accBeta1 = scalar(beta1).variable(); + }); + if (epsilon == null) { + this.epsilon = ENGINE.backend.epsilon(); + } + } + applyGradients(variableGradients) { + const variableNames = Array.isArray(variableGradients) ? + variableGradients.map(item => item.name) : + Object.keys(variableGradients); + tidy(() => { + const oneMinusAccBeta1 = sub$2(1, this.accBeta1); + const lr = div$1(-this.learningRate, add$3(mul(this.iteration, this.decay), 1)); + variableNames.forEach((name, i) => { + const value = ENGINE.registeredVariables[name]; + const trainable = false; + if (this.accumulatedFirstMoment[i] == null) { + this.accumulatedFirstMoment[i] = { + originalName: `${name}/m`, + variable: zerosLike$3(value).variable(trainable) + }; + } + if (this.accumulatedWeightedInfNorm[i] == null) { + this.accumulatedWeightedInfNorm[i] = { + originalName: `${name}/v`, + variable: zerosLike$3(value).variable(trainable) + }; + } + const gradient = Array.isArray(variableGradients) ? + variableGradients[i].tensor : + variableGradients[name]; + if (gradient == null) { + return; + } + const firstMoment = this.accumulatedFirstMoment[i].variable; + const weightedInfNorm = this.accumulatedWeightedInfNorm[i].variable; + const newFirstMoment = add$3(mul(firstMoment, this.beta1), mul(gradient, 1 - this.beta1)); + const ut0 = mul(weightedInfNorm, this.beta2); + const ut1 = abs$2(gradient); + const newWeightedInfNorm = maximum$4(ut0, ut1); + firstMoment.assign(newFirstMoment); + weightedInfNorm.assign(newWeightedInfNorm); + const newValue = add$3(mul(div$1(lr, oneMinusAccBeta1), div$1(newFirstMoment, add$3(newWeightedInfNorm, this.epsilon))), value); + value.assign(newValue); + }); + this.iteration.assign(add$3(this.iteration, 1)); + this.accBeta1.assign(mul(this.accBeta1, this.beta1)); + }); + this.incrementIterations(); + } + dispose() { + this.accBeta1.dispose(); + this.iteration.dispose(); + if (this.accumulatedFirstMoment != null) { + dispose(this.accumulatedFirstMoment.map(v => v.variable)); + } + if (this.accumulatedWeightedInfNorm != null) { + dispose(this.accumulatedWeightedInfNorm.map(v => v.variable)); + } + } + async getWeights() { + throw new Error('getWeights() is not implemented for Adamax yet.'); + } + async setWeights(weightValues) { + throw new Error('setWeights() is not implemented for Adamax yet.'); + } + getConfig() { + return { + 'learningRate': this.learningRate, + 'beta1': this.beta1, + 'beta2': this.beta2, + 'epsilon': this.epsilon, + 'decay': this.decay + }; + } + /** @nocollapse */ + static fromConfig(cls, config) { + return new cls(config['learningRate'], config['beta1'], config['beta2'], config['epsilon'], config['decay']); + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** @doclink Optimizer */ + class SGDOptimizer extends Optimizer { + /** @nocollapse */ + static get className() { + // Name matters for Python compatibility. + // This is a getter instead of a property because when it's a property, it + // prevents the entire class from being tree-shaken. + return 'SGD'; + } + constructor(learningRate) { + super(); + this.learningRate = learningRate; + this.setLearningRate(learningRate); + } + applyGradients(variableGradients) { + const varNames = Array.isArray(variableGradients) ? + variableGradients.map(v => v.name) : + Object.keys(variableGradients); + varNames.forEach((name, i) => { + const gradient = Array.isArray(variableGradients) ? + variableGradients[i].tensor : + variableGradients[name]; + if (gradient == null) { + return; + } + const value = ENGINE.registeredVariables[name]; + tidy(() => { + const newValue = add$3(mul(this.c, gradient), value); + value.assign(newValue); + }); + }); + this.incrementIterations(); + } + /** + * Sets the learning rate of the optimizer. + */ + setLearningRate(learningRate) { + this.learningRate = learningRate; + if (this.c != null) { + this.c.dispose(); + } + this.c = keep(scalar(-learningRate)); + } + dispose() { + this.c.dispose(); + } + async getWeights() { + return [await this.saveIterations()]; + } + async setWeights(weightValues) { + weightValues = await this.extractIterations(weightValues); + if (weightValues.length !== 0) { + throw new Error('SGD optimizer does not have settable weights.'); + } + } + getConfig() { + return { 'learningRate': this.learningRate }; + } + /** @nocollapse */ + static fromConfig(cls, config) { + return new cls(config['learningRate']); + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** @doclink Optimizer */ + class MomentumOptimizer extends SGDOptimizer { + /** @nocollapse */ + // Name matters for Python compatibility. + static get className() { + // Name matters for Python compatibility. + // This is a getter instead of a property because when it's a property, it + // prevents the entire class from being tree-shaken. + return 'Momentum'; + } + constructor(learningRate, momentum, useNesterov = false) { + super(learningRate); + this.learningRate = learningRate; + this.momentum = momentum; + this.useNesterov = useNesterov; + this.accumulations = []; + this.m = scalar(this.momentum); + } + applyGradients(variableGradients) { + const variableNames = Array.isArray(variableGradients) ? + variableGradients.map(item => item.name) : + Object.keys(variableGradients); + variableNames.forEach((name, i) => { + const value = ENGINE.registeredVariables[name]; + if (this.accumulations[i] == null) { + const trainable = false; + this.accumulations[i] = { + originalName: `${name}/momentum`, + variable: tidy(() => zerosLike$3(value).variable(trainable)) + }; + } + const accumulation = this.accumulations[i].variable; + const gradient = Array.isArray(variableGradients) ? + variableGradients[i].tensor : + variableGradients[name]; + if (gradient == null) { + return; + } + tidy(() => { + let newValue; + const newAccumulation = add$3(mul(this.m, accumulation), gradient); + if (this.useNesterov) { + newValue = add$3(mul(this.c, add$3(gradient, mul(newAccumulation, this.m))), value); + } + else { + newValue = add$3(mul(this.c, newAccumulation), value); + } + accumulation.assign(newAccumulation); + value.assign(newValue); + }); + }); + this.incrementIterations(); + } + dispose() { + this.m.dispose(); + if (this.accumulations != null) { + dispose(this.accumulations.map(v => v.variable)); + } + } + /** + * Sets the momentum of the optimizer. + * + * @param momentum + */ + setMomentum(momentum) { + this.momentum = momentum; + } + async getWeights() { + // Order matters for Python compatibility. + return [await this.saveIterations()].concat(this.accumulations.map(v => ({ name: v.originalName, tensor: v.variable }))); + } + async setWeights(weightValues) { + weightValues = await this.extractIterations(weightValues); + const trainable = false; + this.accumulations = weightValues.map(v => ({ originalName: v.name, variable: v.tensor.variable(trainable) })); + } + getConfig() { + return { + 'learningRate': this.learningRate, + 'momentum': this.momentum, + 'useNesterov': this.useNesterov + }; + } + /** @nocollapse */ + static fromConfig(cls, config) { + return new cls(config['learningRate'], config['momentum'], config['useNesterov']); + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** @doclink Optimizer */ + class RMSPropOptimizer extends Optimizer { + /** @nocollapse */ + static get className() { + // Name matters for Python compatibility. + // This is a getter instead of a property because when it's a property, it + // prevents the entire class from being tree-shaken. + return 'RMSProp'; + } + constructor(learningRate, decay = 0.9, momentum = 0.0, epsilon = null, centered = false) { + super(); + this.learningRate = learningRate; + this.decay = decay; + this.momentum = momentum; + this.epsilon = epsilon; + this.accumulatedMeanSquares = []; + this.accumulatedMoments = []; + this.accumulatedMeanGrads = []; + this.centered = centered; + if (epsilon == null) { + this.epsilon = ENGINE.backend.epsilon(); + } + if (learningRate == null) { + throw new Error(`learningRate for RMSPropOptimizer must be defined.`); + } + } + applyGradients(variableGradients) { + const variableNames = Array.isArray(variableGradients) ? + variableGradients.map(item => item.name) : + Object.keys(variableGradients); + variableNames.forEach((name, i) => { + const value = ENGINE.registeredVariables[name]; + const trainable = false; + if (this.accumulatedMeanSquares[i] == null) { + this.accumulatedMeanSquares[i] = { + originalName: `${name}/rms`, + variable: tidy(() => zerosLike$3(value).variable(trainable)) + }; + } + if (this.accumulatedMoments[i] == null) { + this.accumulatedMoments[i] = { + originalName: `${name}/momentum`, + variable: tidy(() => zerosLike$3(value).variable(trainable)) + }; + } + if (this.accumulatedMeanGrads[i] == null && this.centered) { + this.accumulatedMeanGrads[i] = { + originalName: `${name}/mg`, + variable: tidy(() => zerosLike$3(value).variable(trainable)) + }; + } + const gradient = Array.isArray(variableGradients) ? + variableGradients[i].tensor : + variableGradients[name]; + if (gradient == null) { + return; + } + const accumulatedMeanSquare = this.accumulatedMeanSquares[i].variable; + const accumulatedMoments = this.accumulatedMoments[i].variable; + tidy(() => { + const newAccumulatedMeanSquare = add$3(mul(accumulatedMeanSquare, this.decay), mul(square$2(gradient), 1 - this.decay)); + if (this.centered) { + const accumulatedMeanGrad = this.accumulatedMeanGrads[i].variable; + // Centered gradient + const newAccumulatedMeanGrad = add$3(mul(accumulatedMeanGrad, this.decay), mul(gradient, 1 - this.decay)); + const gradContribution = div$1(mul(gradient, this.learningRate), sqrt$2(sub$2(newAccumulatedMeanSquare, add$3(square$2(newAccumulatedMeanGrad), this.epsilon)))); + const newAccumulatedMoments = add$3(mul(accumulatedMoments, this.momentum), gradContribution); + accumulatedMeanSquare.assign(newAccumulatedMeanSquare); + accumulatedMeanGrad.assign(newAccumulatedMeanGrad); + accumulatedMoments.assign(newAccumulatedMoments); + const newValue = sub$2(value, newAccumulatedMoments); + value.assign(newValue); + } + else { + // Plain gradient + const newAccumulatedMeanSquare = add$3(mul(accumulatedMeanSquare, this.decay), mul(square$2(gradient), 1 - this.decay)); + const newAccumulatedMoments = add$3(mul(accumulatedMoments, this.momentum), div$1(mul(gradient, this.learningRate), sqrt$2(add$3(newAccumulatedMeanSquare, this.epsilon)))); + accumulatedMeanSquare.assign(newAccumulatedMeanSquare); + accumulatedMoments.assign(newAccumulatedMoments); + const newValue = sub$2(value, newAccumulatedMoments); + value.assign(newValue); + } + }); + }); + this.incrementIterations(); + } + dispose() { + if (this.accumulatedMeanSquares != null) { + dispose(this.accumulatedMeanSquares.map(v => v.variable)); + } + if (this.accumulatedMeanGrads != null && this.centered) { + dispose(this.accumulatedMeanGrads.map(v => v.variable)); + } + if (this.accumulatedMoments != null) { + dispose(this.accumulatedMoments.map(v => v.variable)); + } + } + async getWeights() { + // Order matters for Python compatibility. + const variables = [...this.accumulatedMeanSquares, ...this.accumulatedMoments]; + if (this.centered) { + variables.push(...this.accumulatedMeanGrads); + } + return [await this.saveIterations()].concat(variables.map(v => ({ name: v.originalName, tensor: v.variable }))); + } + async setWeights(weightValues) { + weightValues = await this.extractIterations(weightValues); + const variableCount = this.centered ? weightValues.length / 3 : weightValues.length / 2; + const trainable = false; + this.accumulatedMeanSquares = + weightValues.slice(0, variableCount).map(v => ({ + originalName: v.name, + variable: v.tensor.variable(trainable) + })); + this.accumulatedMoments = + weightValues.slice(variableCount, variableCount * 2) + .map(v => ({ + originalName: v.name, + variable: v.tensor.variable(trainable) + })); + if (this.centered) { + this.accumulatedMeanGrads = + weightValues.slice(variableCount * 2, variableCount * 3) + .map(v => ({ + originalName: v.name, + variable: v.tensor.variable(trainable) + })); + } + } + getConfig() { + return { + 'learningRate': this.learningRate, + 'decay': this.decay, + 'momentum': this.momentum, + 'epsilon': this.epsilon, + 'centered': this.centered + }; + } + /** @nocollapse */ + static fromConfig(cls, config) { + return new cls(config['learningRate'], config['decay'], config['momentum'], config['epsilon'], config['centered']); + } + } + + /** + * @license + * Copyright 2022 Google LLC. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const OPTIMIZERS = [ + AdadeltaOptimizer, + AdagradOptimizer, + AdamOptimizer, + AdamaxOptimizer, + MomentumOptimizer, + RMSPropOptimizer, + SGDOptimizer, + ]; + function registerOptimizers() { + for (const optimizer of OPTIMIZERS) { + registerClass(optimizer); + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const DEFAULT_FILE_NAME_PREFIX = 'model'; + const DEFAULT_JSON_EXTENSION_NAME = '.json'; + const DEFAULT_WEIGHT_DATA_EXTENSION_NAME = '.weights.bin'; + function defer(f) { + return new Promise(resolve => setTimeout(resolve)).then(f); + } + class BrowserDownloads { + constructor(fileNamePrefix) { + if (!env().getBool('IS_BROWSER')) { + // TODO(cais): Provide info on what IOHandlers are available under the + // current environment. + throw new Error('browserDownloads() cannot proceed because the current environment ' + + 'is not a browser.'); + } + if (fileNamePrefix.startsWith(BrowserDownloads.URL_SCHEME)) { + fileNamePrefix = fileNamePrefix.slice(BrowserDownloads.URL_SCHEME.length); + } + if (fileNamePrefix == null || fileNamePrefix.length === 0) { + fileNamePrefix = DEFAULT_FILE_NAME_PREFIX; + } + this.modelJsonFileName = fileNamePrefix + DEFAULT_JSON_EXTENSION_NAME; + this.weightDataFileName = + fileNamePrefix + DEFAULT_WEIGHT_DATA_EXTENSION_NAME; + } + async save(modelArtifacts) { + if (typeof (document) === 'undefined') { + throw new Error('Browser downloads are not supported in ' + + 'this environment since `document` is not present'); + } + // TODO(mattsoulanille): Support saving models over 2GB that exceed + // Chrome's ArrayBuffer size limit. + const weightBuffer = CompositeArrayBuffer.join(modelArtifacts.weightData); + const weightsURL = window.URL.createObjectURL(new Blob([weightBuffer], { type: 'application/octet-stream' })); + if (modelArtifacts.modelTopology instanceof ArrayBuffer) { + throw new Error('BrowserDownloads.save() does not support saving model topology ' + + 'in binary formats yet.'); + } + else { + const weightsManifest = [{ + paths: ['./' + this.weightDataFileName], + weights: modelArtifacts.weightSpecs + }]; + const modelJSON = getModelJSONForModelArtifacts(modelArtifacts, weightsManifest); + const modelJsonURL = window.URL.createObjectURL(new Blob([JSON.stringify(modelJSON)], { type: 'application/json' })); + // If anchor elements are not provided, create them without attaching them + // to parents, so that the downloaded file names can be controlled. + const jsonAnchor = this.modelJsonAnchor == null ? + document.createElement('a') : + this.modelJsonAnchor; + jsonAnchor.download = this.modelJsonFileName; + jsonAnchor.href = modelJsonURL; + // Trigger downloads by evoking a click event on the download anchors. + // When multiple downloads are started synchronously, Firefox will only + // save the last one. + await defer(() => jsonAnchor.dispatchEvent(new MouseEvent('click'))); + if (modelArtifacts.weightData != null) { + const weightDataAnchor = this.weightDataAnchor == null ? + document.createElement('a') : + this.weightDataAnchor; + weightDataAnchor.download = this.weightDataFileName; + weightDataAnchor.href = weightsURL; + await defer(() => weightDataAnchor.dispatchEvent(new MouseEvent('click'))); + } + return { modelArtifactsInfo: getModelArtifactsInfoForJSON(modelArtifacts) }; + } + } + } + BrowserDownloads.URL_SCHEME = 'downloads://'; + class BrowserFiles { + constructor(files) { + if (files == null || files.length < 1) { + throw new Error(`When calling browserFiles, at least 1 file is required, ` + + `but received ${files}`); + } + this.jsonFile = files[0]; + this.weightsFiles = files.slice(1); + } + async load() { + return new Promise((resolve, reject) => { + const jsonReader = new FileReader(); + jsonReader.onload = (event) => { + // tslint:disable-next-line:no-any + const modelJSON = JSON.parse(event.target.result); + const modelTopology = modelJSON.modelTopology; + if (modelTopology == null) { + reject(new Error(`modelTopology field is missing from file ${this.jsonFile.name}`)); + return; + } + const weightsManifest = modelJSON.weightsManifest; + if (weightsManifest == null) { + reject(new Error(`weightManifest field is missing from file ${this.jsonFile.name}`)); + return; + } + if (this.weightsFiles.length === 0) { + resolve({ modelTopology }); + return; + } + const modelArtifactsPromise = getModelArtifactsForJSON(modelJSON, (weightsManifest) => this.loadWeights(weightsManifest)); + resolve(modelArtifactsPromise); + }; + jsonReader.onerror = error => reject(`Failed to read model topology and weights manifest JSON ` + + `from file '${this.jsonFile.name}'. BrowserFiles supports loading ` + + `Keras-style tf.Model artifacts only.`); + jsonReader.readAsText(this.jsonFile); + }); + } + loadWeights(weightsManifest) { + const weightSpecs = []; + const paths = []; + for (const entry of weightsManifest) { + weightSpecs.push(...entry.weights); + paths.push(...entry.paths); + } + const pathToFile = this.checkManifestAndWeightFiles(weightsManifest); + const promises = paths.map(path => this.loadWeightsFile(path, pathToFile[path])); + return Promise.all(promises).then(buffers => [weightSpecs, buffers]); + } + loadWeightsFile(path, file) { + return new Promise((resolve, reject) => { + const weightFileReader = new FileReader(); + weightFileReader.onload = (event) => { + // tslint:disable-next-line:no-any + const weightData = event.target.result; + resolve(weightData); + }; + weightFileReader.onerror = error => reject(`Failed to weights data from file of path '${path}'.`); + weightFileReader.readAsArrayBuffer(file); + }); + } + /** + * Check the compatibility between weights manifest and weight files. + */ + checkManifestAndWeightFiles(manifest) { + const basenames = []; + const fileNames = this.weightsFiles.map(file => basename(file.name)); + const pathToFile = {}; + for (const group of manifest) { + group.paths.forEach(path => { + const pathBasename = basename(path); + if (basenames.indexOf(pathBasename) !== -1) { + throw new Error(`Duplicate file basename found in weights manifest: ` + + `'${pathBasename}'`); + } + basenames.push(pathBasename); + if (fileNames.indexOf(pathBasename) === -1) { + throw new Error(`Weight file with basename '${pathBasename}' is not provided.`); + } + else { + pathToFile[path] = this.weightsFiles[fileNames.indexOf(pathBasename)]; + } + }); + } + if (basenames.length !== this.weightsFiles.length) { + throw new Error(`Mismatch in the number of files in weights manifest ` + + `(${basenames.length}) and the number of weight files provided ` + + `(${this.weightsFiles.length}).`); + } + return pathToFile; + } + } + const browserDownloadsRouter = (url) => { + if (!env().getBool('IS_BROWSER')) { + return null; + } + else { + if (!Array.isArray(url) && url.startsWith(BrowserDownloads.URL_SCHEME)) { + return browserDownloads(url.slice(BrowserDownloads.URL_SCHEME.length)); + } + else { + return null; + } + } + }; + IORouterRegistry.registerSaveRouter(browserDownloadsRouter); + /** + * Creates an IOHandler that triggers file downloads from the browser. + * + * The returned `IOHandler` instance can be used as model exporting methods such + * as `tf.Model.save` and supports only saving. + * + * ```js + * const model = tf.sequential(); + * model.add(tf.layers.dense( + * {units: 1, inputShape: [10], activation: 'sigmoid'})); + * const saveResult = await model.save('downloads://mymodel'); + * // This will trigger downloading of two files: + * // 'mymodel.json' and 'mymodel.weights.bin'. + * console.log(saveResult); + * ``` + * + * @param fileNamePrefix Prefix name of the files to be downloaded. For use with + * `tf.Model`, `fileNamePrefix` should follow either of the following two + * formats: + * 1. `null` or `undefined`, in which case the default file + * names will be used: + * - 'model.json' for the JSON file containing the model topology and + * weights manifest. + * - 'model.weights.bin' for the binary file containing the binary weight + * values. + * 2. A single string or an Array of a single string, as the file name prefix. + * For example, if `'foo'` is provided, the downloaded JSON + * file and binary weights file will be named 'foo.json' and + * 'foo.weights.bin', respectively. + * @param config Additional configuration for triggering downloads. + * @returns An instance of `BrowserDownloads` `IOHandler`. + * + * @doc { + * heading: 'Models', + * subheading: 'Loading', + * namespace: 'io', + * ignoreCI: true + * } + */ + function browserDownloads(fileNamePrefix = 'model') { + return new BrowserDownloads(fileNamePrefix); + } + /** + * Creates an IOHandler that loads model artifacts from user-selected files. + * + * This method can be used for loading from files such as user-selected files + * in the browser. + * When used in conjunction with `tf.loadLayersModel`, an instance of + * `tf.LayersModel` (Keras-style) can be constructed from the loaded artifacts. + * + * ```js + * // Note: This code snippet won't run properly without the actual file input + * // elements in the HTML DOM. + * + * // Suppose there are two HTML file input (``) + * // elements. + * const uploadJSONInput = document.getElementById('upload-json'); + * const uploadWeightsInput = document.getElementById('upload-weights'); + * const model = await tf.loadLayersModel(tf.io.browserFiles( + * [uploadJSONInput.files[0], uploadWeightsInput.files[0]])); + * ``` + * + * @param files `File`s to load from. Currently, this function supports only + * loading from files that contain Keras-style models (i.e., `tf.Model`s), for + * which an `Array` of `File`s is expected (in that order): + * - A JSON file containing the model topology and weight manifest. + * - Optionally, one or more binary files containing the binary weights. + * These files must have names that match the paths in the `weightsManifest` + * contained by the aforementioned JSON file, or errors will be thrown + * during loading. These weights files have the same format as the ones + * generated by `tensorflowjs_converter` that comes with the `tensorflowjs` + * Python PIP package. If no weights files are provided, only the model + * topology will be loaded from the JSON file above. + * @returns An instance of `Files` `IOHandler`. + * + * @doc { + * heading: 'Models', + * subheading: 'Loading', + * namespace: 'io', + * ignoreCI: true + * } + */ + function browserFiles(files) { + return new BrowserFiles(files); + } + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Monitor Promise.all progress, fire onProgress callback function. + * + * @param promises Promise list going to be monitored + * @param onProgress Callback function. Fired when a promise resolved. + * @param startFraction Optional fraction start. Default to 0. + * @param endFraction Optional fraction end. Default to 1. + */ + function monitorPromisesProgress(promises, onProgress, startFraction, endFraction) { + checkPromises(promises); + startFraction = startFraction == null ? 0 : startFraction; + endFraction = endFraction == null ? 1 : endFraction; + checkFraction(startFraction, endFraction); + let resolvedPromise = 0; + const registerMonitor = (promise) => { + promise.then(value => { + const fraction = startFraction + + ++resolvedPromise / promises.length * (endFraction - startFraction); + // pass fraction as parameter to callback function. + onProgress(fraction); + return value; + }); + return promise; + }; + function checkPromises(promises) { + assert$1(promises != null && Array.isArray(promises) && promises.length > 0, () => 'promises must be a none empty array'); + } + function checkFraction(startFraction, endFraction) { + assert$1(startFraction >= 0 && startFraction <= 1, () => `Progress fraction must be in range [0, 1], but ` + + `got startFraction ${startFraction}`); + assert$1(endFraction >= 0 && endFraction <= 1, () => `Progress fraction must be in range [0, 1], but ` + + `got endFraction ${endFraction}`); + assert$1(endFraction >= startFraction, () => `startFraction must be no more than endFraction, but ` + + `got startFraction ${startFraction} and endFraction ` + + `${endFraction}`); + } + return Promise.all(promises.map(registerMonitor)); + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Reads binary weights data from a number of URLs. + * + * @param fetchURLs URLs to send the HTTP requests at, using `fetch` calls. + * @param requestOptions RequestInit (options) for the HTTP requests. + * @param fetchFunc Optional overriding value for the `window.fetch` function. + * @param onProgress Optional, progress callback function, fired periodically + * before the load is completed. + * @returns A `Promise` of an Array of `ArrayBuffer`. The Array has the same + * length as `fetchURLs`. + */ + async function loadWeightsAsArrayBuffer(fetchURLs, loadOptions) { + if (loadOptions == null) { + loadOptions = {}; + } + const fetchFunc = loadOptions.fetchFunc == null ? env().platform.fetch : + loadOptions.fetchFunc; + // Create the requests for all of the weights in parallel. + const requests = fetchURLs.map(fetchURL => fetchFunc(fetchURL, loadOptions.requestInit, { isBinary: true })); + const fetchStartFraction = 0; + const fetchEndFraction = 0.5; + const responses = loadOptions.onProgress == null ? + await Promise.all(requests) : + await monitorPromisesProgress(requests, loadOptions.onProgress, fetchStartFraction, fetchEndFraction); + const bufferPromises = responses.map(response => response.arrayBuffer()); + const bufferStartFraction = 0.5; + const bufferEndFraction = 1; + const buffers = loadOptions.onProgress == null ? + await Promise.all(bufferPromises) : + await monitorPromisesProgress(bufferPromises, loadOptions.onProgress, bufferStartFraction, bufferEndFraction); + return buffers; + } + function streamWeights(fetchURLs, loadOptions) { + var _a; + const fetchFunc = loadOptions.fetchFunc == null ? env().platform.fetch : + loadOptions.fetchFunc; + let fetchIndex = 0; + let chunkReader; + (_a = loadOptions.onProgress) === null || _a === void 0 ? void 0 : _a.call(loadOptions, 0); + return new ReadableStream({ + pull: async (controller) => { + var _a; + while (fetchIndex < fetchURLs.length) { + if (!chunkReader) { + const body = (await fetchFunc(fetchURLs[fetchIndex], loadOptions.requestInit, { isBinary: true })).body; + chunkReader = body.getReader(); + } + const { done, value } = await chunkReader.read(); + if (done) { + fetchIndex++; + chunkReader = undefined; + (_a = loadOptions.onProgress) === null || _a === void 0 ? void 0 : _a.call(loadOptions, fetchIndex / fetchURLs.length); + continue; + } + controller.enqueue(value); + return; + } + controller.close(); + }, + }); + } + /** + * Reads a weights manifest JSON configuration, fetches the weights and + * returns them as `Tensor`s. + * + * @param manifest The weights manifest JSON. + * @param filePathPrefix The path prefix for filenames given in the manifest. + * Defaults to the empty string. + * @param weightNames The names of the weights to be fetched. + */ + async function loadWeights(manifest, filePathPrefix = '', weightNames, requestInit) { + // TODO(nsthorat): Groups are currently fetched atomically. If you need a + // single weight from a group, the whole group will be fetched. At a future + // date, we should support fetching only the individual shards within a + // group that are needed to reconstruct the requested weight. + // TODO(cais): Use `decodeWeights` for implementation. + const fetchWeights = (fetchUrls) => loadWeightsAsArrayBuffer(fetchUrls, { requestInit }); + const loadWeights = weightsLoaderFactory(fetchWeights); + return loadWeights(manifest, filePathPrefix, weightNames); + } + /** + * Creates a function, which reads a weights manifest JSON configuration, + * fetches the weight files using the specified function and returns them as + * `Tensor`s. + * + * ```js + * // example for creating a nodejs weight loader, which reads the weight files + * // from disk using fs.readFileSync + * + * import * as fs from 'fs' + * + * const fetchWeightsFromDisk = (filePaths: string[]) => + * filePaths.map(filePath => fs.readFileSync(filePath).buffer) + * + * const loadWeights = tf.io.weightsLoaderFactory(fetchWeightsFromDisk) + * + * const manifest = JSON.parse( + * fs.readFileSync('./my_model-weights_manifest').toString() + * ) + * const weightMap = await loadWeights(manifest, './') + * ``` + * @param fetchWeightsFunction The function used for fetching the weight files. + * @returns Weight loading function. + */ + function weightsLoaderFactory(fetchWeightsFunction) { + return async (manifest, filePathPrefix = '', weightNames) => { + // Collect all the groups, weights, and their relative offsets to be + // fetched. + const groupIndicesToFetchMap = manifest.map(() => false); + const groupWeightsToFetch = {}; + const weightsFound = weightNames != null ? weightNames.map(() => false) : []; + const allManifestWeightNames = []; + manifest.forEach((manifestGroupConfig, groupIndex) => { + let groupOffset = 0; + manifestGroupConfig.weights.forEach(weightsEntry => { + const rawDtype = ('quantization' in weightsEntry) ? + weightsEntry.quantization.dtype : + weightsEntry.dtype; + const weightsBytes = DTYPE_VALUE_SIZE_MAP[rawDtype] * + sizeFromShape(weightsEntry.shape); + const enqueueWeightsForFetchingFn = () => { + groupIndicesToFetchMap[groupIndex] = true; + if (groupWeightsToFetch[groupIndex] == null) { + groupWeightsToFetch[groupIndex] = []; + } + groupWeightsToFetch[groupIndex].push({ + manifestEntry: weightsEntry, + groupOffset, + sizeBytes: weightsBytes + }); + }; + if (weightNames != null) { + weightNames.forEach((weightName, weightIndex) => { + if (weightName === weightsEntry.name) { + enqueueWeightsForFetchingFn(); + weightsFound[weightIndex] = true; + } + }); + } + else { + enqueueWeightsForFetchingFn(); + } + allManifestWeightNames.push(weightsEntry.name); + groupOffset += weightsBytes; + }); + }); + if (!weightsFound.every(found => found)) { + const weightsNotFound = weightNames.filter((_, i) => !weightsFound[i]); + throw new Error(`Could not find weights in manifest with names: ` + + `${weightsNotFound.join(', ')}. \n` + + `Manifest JSON has weights with names: ` + + `${allManifestWeightNames.join(', ')}.`); + } + // Convert the one-hot boolean groupId => shouldFetch map to a list of group + // IDs. + const groupIndicesToFetch = groupIndicesToFetchMap.reduce((accumulator, shouldFetch, i) => { + if (shouldFetch) { + accumulator.push(i); + } + return accumulator; + }, []); + const fetchUrls = []; + groupIndicesToFetch.forEach(i => { + manifest[i].paths.forEach(filepath => { + const fetchUrl = filePathPrefix + + (!filePathPrefix.endsWith('/') ? '/' : '') + filepath; + fetchUrls.push(fetchUrl); + }); + }); + const buffers = await fetchWeightsFunction(fetchUrls); + const weightsTensorMap = {}; + let bufferIndexOffset = 0; + groupIndicesToFetch.forEach(i => { + const numBuffers = manifest[i].paths.length; + const weightsBuffer = new CompositeArrayBuffer(buffers.slice(bufferIndexOffset, bufferIndexOffset + numBuffers)); + const weightsEntries = groupWeightsToFetch[i]; + weightsEntries.forEach(weightsEntry => { + const byteBuffer = weightsBuffer.slice(weightsEntry.groupOffset, weightsEntry.groupOffset + weightsEntry.sizeBytes); + const nameToTensorMap = decodeWeights(byteBuffer, [weightsEntry.manifestEntry]); + for (const name in nameToTensorMap) { + weightsTensorMap[name] = nameToTensorMap[name]; + } + }); + bufferIndexOffset += numBuffers; + }); + return weightsTensorMap; + }; + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const OCTET_STREAM_MIME_TYPE = 'application/octet-stream'; + const JSON_TYPE = 'application/json'; + class HTTPRequest { + constructor(path, loadOptions) { + this.DEFAULT_METHOD = 'POST'; + if (loadOptions == null) { + loadOptions = {}; + } + this.weightPathPrefix = loadOptions.weightPathPrefix; + this.weightUrlConverter = loadOptions.weightUrlConverter; + if (loadOptions.fetchFunc != null) { + assert$1(typeof loadOptions.fetchFunc === 'function', () => 'Must pass a function that matches the signature of ' + + '`fetch` (see ' + + 'https://developer.mozilla.org/en-US/docs/Web/API/Fetch_API)'); + this.fetch = loadOptions.fetchFunc; + } + else { + this.fetch = env().platform.fetch; + } + assert$1(path != null && path.length > 0, () => 'URL path for http must not be null, undefined or ' + + 'empty.'); + if (Array.isArray(path)) { + assert$1(path.length === 2, () => 'URL paths for http must have a length of 2, ' + + `(actual length is ${path.length}).`); + } + this.path = path; + if (loadOptions.requestInit != null && + loadOptions.requestInit.body != null) { + throw new Error('requestInit is expected to have no pre-existing body, but has one.'); + } + this.requestInit = loadOptions.requestInit || {}; + this.loadOptions = loadOptions; + } + async save(modelArtifacts) { + if (modelArtifacts.modelTopology instanceof ArrayBuffer) { + throw new Error('BrowserHTTPRequest.save() does not support saving model topology ' + + 'in binary formats yet.'); + } + const init = Object.assign({ method: this.DEFAULT_METHOD }, this.requestInit); + init.body = new FormData(); + const weightsManifest = [{ + paths: ['./model.weights.bin'], + weights: modelArtifacts.weightSpecs, + }]; + const modelTopologyAndWeightManifest = getModelJSONForModelArtifacts(modelArtifacts, weightsManifest); + init.body.append('model.json', new Blob([JSON.stringify(modelTopologyAndWeightManifest)], { type: JSON_TYPE }), 'model.json'); + if (modelArtifacts.weightData != null) { + // TODO(mattsoulanille): Support saving models over 2GB that exceed + // Chrome's ArrayBuffer size limit. + const weightBuffer = CompositeArrayBuffer.join(modelArtifacts.weightData); + init.body.append('model.weights.bin', new Blob([weightBuffer], { type: OCTET_STREAM_MIME_TYPE }), 'model.weights.bin'); + } + const response = await this.fetch(this.path, init); + if (response.ok) { + return { + modelArtifactsInfo: getModelArtifactsInfoForJSON(modelArtifacts), + responses: [response], + }; + } + else { + throw new Error(`BrowserHTTPRequest.save() failed due to HTTP response status ` + + `${response.status}.`); + } + } + async loadModelJSON() { + const modelConfigRequest = await this.fetch(this.path, this.requestInit); + if (!modelConfigRequest.ok) { + throw new Error(`Request to ${this.path} failed with status code ` + + `${modelConfigRequest.status}. Please verify this URL points to ` + + `the model JSON of the model to load.`); + } + let modelJSON; + try { + modelJSON = await modelConfigRequest.json(); + } + catch (e) { + let message = `Failed to parse model JSON of response from ${this.path}.`; + // TODO(nsthorat): Remove this after some time when we're comfortable that + // .pb files are mostly gone. + if (this.path.endsWith('.pb')) { + message += ' Your path contains a .pb file extension. ' + + 'Support for .pb models have been removed in TensorFlow.js 1.0 ' + + 'in favor of .json models. You can re-convert your Python ' + + 'TensorFlow model using the TensorFlow.js 1.0 conversion scripts ' + + 'or you can convert your.pb models with the \'pb2json\'' + + 'NPM script in the tensorflow/tfjs-converter repository.'; + } + else { + message += ' Please make sure the server is serving valid ' + + 'JSON for this request.'; + } + throw new Error(message); + } + // We do not allow both modelTopology and weightsManifest to be missing. + const modelTopology = modelJSON.modelTopology; + const weightsManifest = modelJSON.weightsManifest; + if (modelTopology == null && weightsManifest == null) { + throw new Error(`The JSON from HTTP path ${this.path} contains neither model ` + + `topology or manifest for weights.`); + } + return modelJSON; + } + /** + * Load model artifacts via HTTP request(s). + * + * See the documentation to `tf.io.http` for details on the saved + * artifacts. + * + * @returns The loaded model artifacts (if loading succeeds). + */ + async load() { + if (this.loadOptions.streamWeights) { + return this.loadStream(); + } + const modelJSON = await this.loadModelJSON(); + return getModelArtifactsForJSON(modelJSON, (weightsManifest) => this.loadWeights(weightsManifest)); + } + async loadStream() { + const modelJSON = await this.loadModelJSON(); + const fetchURLs = await this.getWeightUrls(modelJSON.weightsManifest); + const weightSpecs = getWeightSpecs(modelJSON.weightsManifest); + const stream = () => streamWeights(fetchURLs, this.loadOptions); + return Object.assign(Object.assign({}, modelJSON), { weightSpecs, getWeightStream: stream }); + } + async getWeightUrls(weightsManifest) { + const weightPath = Array.isArray(this.path) ? this.path[1] : this.path; + const [prefix, suffix] = parseUrl(weightPath); + const pathPrefix = this.weightPathPrefix || prefix; + const fetchURLs = []; + const urlPromises = []; + for (const weightsGroup of weightsManifest) { + for (const path of weightsGroup.paths) { + if (this.weightUrlConverter != null) { + urlPromises.push(this.weightUrlConverter(path)); + } + else { + fetchURLs.push(pathPrefix + path + suffix); + } + } + } + if (this.weightUrlConverter) { + fetchURLs.push(...await Promise.all(urlPromises)); + } + return fetchURLs; + } + async loadWeights(weightsManifest) { + const fetchURLs = await this.getWeightUrls(weightsManifest); + const weightSpecs = getWeightSpecs(weightsManifest); + const buffers = await loadWeightsAsArrayBuffer(fetchURLs, this.loadOptions); + return [weightSpecs, buffers]; + } + } + HTTPRequest.URL_SCHEME_REGEX = /^https?:\/\//; + /** + * Extract the prefix and suffix of the url, where the prefix is the path before + * the last file, and suffix is the search params after the last file. + * ``` + * const url = 'http://tfhub.dev/model/1/tensorflowjs_model.pb?tfjs-format=file' + * [prefix, suffix] = parseUrl(url) + * // prefix = 'http://tfhub.dev/model/1/' + * // suffix = '?tfjs-format=file' + * ``` + * @param url the model url to be parsed. + */ + function parseUrl(url) { + const lastSlash = url.lastIndexOf('/'); + const lastSearchParam = url.lastIndexOf('?'); + const prefix = url.substring(0, lastSlash); + const suffix = lastSearchParam > lastSlash ? url.substring(lastSearchParam) : ''; + return [prefix + '/', suffix]; + } + function isHTTPScheme(url) { + return url.match(HTTPRequest.URL_SCHEME_REGEX) != null; + } + const httpRouter = (url, loadOptions) => { + if (typeof fetch === 'undefined' && + (loadOptions == null || loadOptions.fetchFunc == null)) { + // `http` uses `fetch` or `node-fetch`, if one wants to use it in + // an environment that is not the browser or node they have to setup a + // global fetch polyfill. + return null; + } + else { + let isHTTP = true; + if (Array.isArray(url)) { + isHTTP = url.every(urlItem => isHTTPScheme(urlItem)); + } + else { + isHTTP = isHTTPScheme(url); + } + if (isHTTP) { + return http(url, loadOptions); + } + } + return null; + }; + IORouterRegistry.registerSaveRouter(httpRouter); + IORouterRegistry.registerLoadRouter(httpRouter); + /** + * Creates an IOHandler subtype that sends model artifacts to HTTP server. + * + * An HTTP request of the `multipart/form-data` mime type will be sent to the + * `path` URL. The form data includes artifacts that represent the topology + * and/or weights of the model. In the case of Keras-style `tf.Model`, two + * blobs (files) exist in form-data: + * - A JSON file consisting of `modelTopology` and `weightsManifest`. + * - A binary weights file consisting of the concatenated weight values. + * These files are in the same format as the one generated by + * [tfjs_converter](https://js.tensorflow.org/tutorials/import-keras.html). + * + * The following code snippet exemplifies the client-side code that uses this + * function: + * + * ```js + * const model = tf.sequential(); + * model.add( + * tf.layers.dense({units: 1, inputShape: [100], activation: 'sigmoid'})); + * + * const saveResult = await model.save(tf.io.http( + * 'http://model-server:5000/upload', {requestInit: {method: 'PUT'}})); + * console.log(saveResult); + * ``` + * + * If the default `POST` method is to be used, without any custom parameters + * such as headers, you can simply pass an HTTP or HTTPS URL to `model.save`: + * + * ```js + * const saveResult = await model.save('http://model-server:5000/upload'); + * ``` + * + * The following GitHub Gist + * https://gist.github.com/dsmilkov/1b6046fd6132d7408d5257b0976f7864 + * implements a server based on [flask](https://github.com/pallets/flask) that + * can receive the request. Upon receiving the model artifacts via the request, + * this particular server reconstitutes instances of [Keras + * Models](https://keras.io/models/model/) in memory. + * + * + * @param path A URL path to the model. + * Can be an absolute HTTP path (e.g., + * 'http://localhost:8000/model-upload)') or a relative path (e.g., + * './model-upload'). + * @param requestInit Request configurations to be used when sending + * HTTP request to server using `fetch`. It can contain fields such as + * `method`, `credentials`, `headers`, `mode`, etc. See + * https://developer.mozilla.org/en-US/docs/Web/API/Request/Request + * for more information. `requestInit` must not have a body, because the + * body will be set by TensorFlow.js. File blobs representing the model + * topology (filename: 'model.json') and the weights of the model (filename: + * 'model.weights.bin') will be appended to the body. If `requestInit` has a + * `body`, an Error will be thrown. + * @param loadOptions Optional configuration for the loading. It includes the + * following fields: + * - weightPathPrefix Optional, this specifies the path prefix for weight + * files, by default this is calculated from the path param. + * - fetchFunc Optional, custom `fetch` function. E.g., in Node.js, + * the `fetch` from node-fetch can be used here. + * - onProgress Optional, progress callback function, fired periodically + * before the load is completed. + * @returns An instance of `IOHandler`. + * + * @doc { + * heading: 'Models', + * subheading: 'Loading', + * namespace: 'io', + * ignoreCI: true + * } + */ + function http(path, loadOptions) { + return new HTTPRequest(path, loadOptions); + } + /** + * Deprecated. Use `tf.io.http`. + * @param path + * @param loadOptions + */ + function browserHTTPRequest(path, loadOptions) { + return http(path, loadOptions); + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class PassthroughLoader { + constructor(modelArtifacts) { + this.modelArtifacts = modelArtifacts; + } + load() { + return this.modelArtifacts; + } + } + class PassthroughSaver { + constructor(saveHandler) { + this.saveHandler = saveHandler; + } + save(modelArtifacts) { + return this.saveHandler(modelArtifacts); + } + } + class PassthroughAsync { + constructor(handler) { + if (handler.load) { + this.load = () => Promise.resolve(handler.load()); + } + if (handler.save) { + this.save = (modelArtifacts) => Promise.resolve(handler.save(modelArtifacts)); + } + } + } + /** + * Creates an IOHandler that loads model artifacts from memory. + * + * When used in conjunction with `tf.loadLayersModel`, an instance of + * `tf.LayersModel` (Keras-style) can be constructed from the loaded artifacts. + * + * ```js + * const model = await tf.loadLayersModel(tf.io.fromMemory( + * modelTopology, weightSpecs, weightData)); + * ``` + * + * @param modelArtifacts a object containing model topology (i.e., parsed from + * the JSON format). + * @param weightSpecs An array of `WeightsManifestEntry` objects describing the + * names, shapes, types, and quantization of the weight data. Optional. + * @param weightData A single `ArrayBuffer` containing the weight data, + * concatenated in the order described by the weightSpecs. Optional. + * @param trainingConfig Model training configuration. Optional. + * + * @returns A passthrough `IOHandler` that simply loads the provided data. + */ + function fromMemory(modelArtifacts, weightSpecs, weightData, trainingConfig) { + const args = arguments; + return new PassthroughAsync(fromMemorySync(...args)); + } + /** + * Creates an IOHandler that loads model artifacts from memory. + * + * When used in conjunction with `tf.loadLayersModel`, an instance of + * `tf.LayersModel` (Keras-style) can be constructed from the loaded artifacts. + * + * ```js + * const model = await tf.loadLayersModel(tf.io.fromMemory( + * modelTopology, weightSpecs, weightData)); + * ``` + * + * @param modelArtifacts a object containing model topology (i.e., parsed from + * the JSON format). + * @param weightSpecs An array of `WeightsManifestEntry` objects describing the + * names, shapes, types, and quantization of the weight data. Optional. + * @param weightData A single `ArrayBuffer` containing the weight data, + * concatenated in the order described by the weightSpecs. Optional. + * @param trainingConfig Model training configuration. Optional. + * + * @returns A passthrough `IOHandlerSync` that simply loads the provided data. + */ + function fromMemorySync(modelArtifacts, weightSpecs, weightData, trainingConfig) { + if (arguments.length === 1) { + const isModelArtifacts = modelArtifacts.modelTopology != null || + modelArtifacts.weightSpecs != null; + if (isModelArtifacts) { + return new PassthroughLoader(modelArtifacts); + } + else { + // Legacy support: with only modelTopology. + // TODO(cais): Remove this deprecated API. + console.warn('Please call tf.io.fromMemory() with only one argument. ' + + 'The argument should be of type ModelArtifacts. ' + + 'The multi-argument signature of tf.io.fromMemory() has been ' + + 'deprecated and will be removed in a future release.'); + return new PassthroughLoader({ modelTopology: modelArtifacts }); + } + } + else { + // Legacy support. + // TODO(cais): Remove this deprecated API. + console.warn('Please call tf.io.fromMemory() with only one argument. ' + + 'The argument should be of type ModelArtifacts. ' + + 'The multi-argument signature of tf.io.fromMemory() has been ' + + 'deprecated and will be removed in a future release.'); + return new PassthroughLoader({ + modelTopology: modelArtifacts, + weightSpecs, + weightData, + trainingConfig + }); + } + } + /** + * Creates an IOHandler that passes saved model artifacts to a callback. + * + * ```js + * function handleSave(artifacts) { + * // ... do something with the artifacts ... + * return {modelArtifactsInfo: {...}, ...}; + * } + * + * const saveResult = model.save(tf.io.withSaveHandler(handleSave)); + * ``` + * + * @param saveHandler A function that accepts a `ModelArtifacts` and returns a + * promise that resolves to a `SaveResult`. + */ + function withSaveHandler(saveHandler) { + return new PassthroughSaver(saveHandler); + } + /** + * Creates an IOHandlerSync that passes saved model artifacts to a callback. + * + * ```js + * function handleSave(artifacts) { + * // ... do something with the artifacts ... + * return {modelArtifactsInfo: {...}, ...}; + * } + * + * const saveResult = model.save(tf.io.withSaveHandler(handleSave)); + * ``` + * + * @param saveHandler A function that accepts a `ModelArtifacts` and returns a + * `SaveResult`. + */ + function withSaveHandlerSync(saveHandler) { + return new PassthroughSaver(saveHandler); + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + + var io = /*#__PURE__*/Object.freeze({ + __proto__: null, + CompositeArrayBuffer: CompositeArrayBuffer, + browserFiles: browserFiles, + browserHTTPRequest: browserHTTPRequest, + concatenateArrayBuffers: concatenateArrayBuffers, + copyModel: copyModel, + decodeWeights: decodeWeights, + decodeWeightsStream: decodeWeightsStream, + encodeWeights: encodeWeights, + fromMemory: fromMemory, + fromMemorySync: fromMemorySync, + getLoadHandlers: getLoadHandlers, + getModelArtifactsForJSON: getModelArtifactsForJSON, + getModelArtifactsForJSONSync: getModelArtifactsForJSONSync, + getModelArtifactsInfoForJSON: getModelArtifactsInfoForJSON, + getSaveHandlers: getSaveHandlers, + getWeightSpecs: getWeightSpecs, + http: http, + isHTTPScheme: isHTTPScheme, + listModels: listModels, + loadWeights: loadWeights, + moveModel: moveModel, + registerLoadRouter: registerLoadRouter, + registerSaveRouter: registerSaveRouter, + removeModel: removeModel, + weightsLoaderFactory: weightsLoaderFactory, + withSaveHandler: withSaveHandler, + withSaveHandlerSync: withSaveHandlerSync + }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the confusion matrix from true labels and predicted labels. + * + * ```js + * const labels = tf.tensor1d([0, 1, 2, 1, 0], 'int32'); + * const predictions = tf.tensor1d([0, 2, 2, 1, 0], 'int32'); + * const numClasses = 3; + * const out = tf.math.confusionMatrix(labels, predictions, numClasses); + * out.print(); + * // Expected output matrix: + * // [[2, 0, 0], + * // [0, 1, 1], + * // [0, 0, 1]] + * ``` + * + * @param labels The target labels, assumed to be 0-based integers + * for the classes. The shape is `[numExamples]`, where + * `numExamples` is the number of examples included. + * @param predictions The predicted classes, assumed to be + * 0-based integers for the classes. Must have the same shape as `labels`. + * @param numClasses Number of all classes, as an integer. + * Its value must be larger than the largest element in `labels` and + * `predictions`. + * @returns The confusion matrix as a int32-type 2D tensor. The value at + * row `r` and column `c` is the number of times examples of actual class + * `r` were predicted as class `c`. + * + * @doc {heading: 'Operations', subheading: 'Evaluation'} + */ + function confusionMatrix_(labels, predictions, numClasses) { + const $labels = convertToTensor(labels, 'labels', 'confusionMatrix'); + const $predictions = convertToTensor(predictions, 'predictions', 'confusionMatrix'); + assert$1(numClasses == null || numClasses > 0 && Number.isInteger(numClasses), () => `If provided, numClasses must be a positive integer, ` + + `but got ${numClasses}`); + assert$1($labels.rank === 1, () => `Expected the rank of labels to be 1, but got ${$labels.rank}`); + assert$1($predictions.rank === 1, () => `Expected the rank of predictions to be 1, ` + + `but got ${$predictions.rank}`); + assert$1($labels.shape[0] === $predictions.shape[0], () => `Mismatch in the number of examples: ` + + `${$labels.shape[0]} vs. ${$predictions.shape[0]}. ` + + `Labels and predictions should have the same number of elements.`); + assert$1(numClasses > 0 && Number.isInteger(numClasses), () => `numClasses is required to be a positive integer, but got ` + + `${numClasses}`); + // TODO(cais): In the future, if oneHot supports tensors inputs for + // `numClasses`, `confusionMatrix` can make `numClasses` optional. + const oneHotLabels = oneHot$3(cast$3($labels, 'int32'), numClasses); + const oneHotPredictions = oneHot$3(cast$3($predictions, 'int32'), numClasses); + const oneHotLabelsT = transpose$2(oneHotLabels); + const product = matMul$1(oneHotLabelsT, oneHotPredictions); + return cast$3(product, 'int32'); + } + const confusionMatrix = /* @__PURE__ */ op({ confusionMatrix_ }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + + var math = /*#__PURE__*/Object.freeze({ + __proto__: null, + confusionMatrix: confusionMatrix + }); + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + let fromPixels2DContext$1; + let hasToPixelsWarned = false; + /** + * Creates a `tf.Tensor` from an image. + * + * ```js + * const image = new ImageData(1, 1); + * image.data[0] = 100; + * image.data[1] = 150; + * image.data[2] = 200; + * image.data[3] = 255; + * + * tf.browser.fromPixels(image).print(); + * ``` + * + * @param pixels The input image to construct the tensor from. The + * supported image types are all 4-channel. You can also pass in an image + * object with following attributes: + * `{data: Uint8Array; width: number; height: number}` + * @param numChannels The number of channels of the output tensor. A + * numChannels value less than 4 allows you to ignore channels. Defaults to + * 3 (ignores alpha channel of input image). + * + * @returns A Tensor3D with the shape `[height, width, numChannels]`. + * + * Note: fromPixels can be lossy in some cases, same image may result in + * slightly different tensor values, if rendered by different rendering + * engines. This means that results from different browsers, or even same + * browser with CPU and GPU rendering engines can be different. See discussion + * in details: + * https://github.com/tensorflow/tfjs/issues/5482 + * + * @doc {heading: 'Browser', namespace: 'browser', ignoreCI: true} + */ + function fromPixels_(pixels, numChannels = 3) { + // Sanity checks. + if (numChannels > 4) { + throw new Error('Cannot construct Tensor with more than 4 channels from pixels.'); + } + if (pixels == null) { + throw new Error('pixels passed to tf.browser.fromPixels() can not be null'); + } + let isPixelData = false; + let isImageData = false; + let isVideo = false; + let isImage = false; + let isCanvasLike = false; + let isImageBitmap = false; + if (pixels.data instanceof Uint8Array) { + isPixelData = true; + } + else if (typeof (ImageData) !== 'undefined' && pixels instanceof ImageData) { + isImageData = true; + } + else if (typeof (HTMLVideoElement) !== 'undefined' && + pixels instanceof HTMLVideoElement) { + isVideo = true; + } + else if (typeof (HTMLImageElement) !== 'undefined' && + pixels instanceof HTMLImageElement) { + isImage = true; + // tslint:disable-next-line: no-any + } + else if (pixels.getContext != null) { + isCanvasLike = true; + } + else if (typeof (ImageBitmap) !== 'undefined' && pixels instanceof ImageBitmap) { + isImageBitmap = true; + } + else { + throw new Error('pixels passed to tf.browser.fromPixels() must be either an ' + + `HTMLVideoElement, HTMLImageElement, HTMLCanvasElement, ImageData ` + + `in browser, or OffscreenCanvas, ImageData in webworker` + + ` or {data: Uint32Array, width: number, height: number}, ` + + `but was ${pixels.constructor.name}`); + } + // If the current backend has 'FromPixels' registered, it has a more + // efficient way of handling pixel uploads, so we call that. + const kernel = getKernel(FromPixels, ENGINE.backendName); + if (kernel != null) { + const inputs = { pixels }; + const attrs = { numChannels }; + return ENGINE.runKernel(FromPixels, inputs, attrs); + } + const [width, height] = isVideo ? + [ + pixels.videoWidth, + pixels.videoHeight + ] : + [pixels.width, pixels.height]; + let vals; + if (isCanvasLike) { + vals = + // tslint:disable-next-line:no-any + pixels.getContext('2d').getImageData(0, 0, width, height).data; + } + else if (isImageData || isPixelData) { + vals = pixels.data; + } + else if (isImage || isVideo || isImageBitmap) { + if (fromPixels2DContext$1 == null) { + if (typeof document === 'undefined') { + if (typeof OffscreenCanvas !== 'undefined' && + typeof OffscreenCanvasRenderingContext2D !== 'undefined') { + // @ts-ignore + fromPixels2DContext$1 = new OffscreenCanvas(1, 1).getContext('2d'); + } + else { + throw new Error('Cannot parse input in current context. ' + + 'Reason: OffscreenCanvas Context2D rendering is not supported.'); + } + } + else { + fromPixels2DContext$1 = document.createElement('canvas').getContext('2d', { willReadFrequently: true }); + } + } + fromPixels2DContext$1.canvas.width = width; + fromPixels2DContext$1.canvas.height = height; + fromPixels2DContext$1.drawImage(pixels, 0, 0, width, height); + vals = fromPixels2DContext$1.getImageData(0, 0, width, height).data; + } + let values; + if (numChannels === 4) { + values = new Int32Array(vals); + } + else { + const numPixels = width * height; + values = new Int32Array(numPixels * numChannels); + for (let i = 0; i < numPixels; i++) { + for (let channel = 0; channel < numChannels; ++channel) { + values[i * numChannels + channel] = vals[i * 4 + channel]; + } + } + } + const outShape = [height, width, numChannels]; + return tensor3d(values, outShape, 'int32'); + } + // Helper functions for |fromPixelsAsync| to check whether the input can + // be wrapped into imageBitmap. + function isPixelData(pixels) { + return (pixels != null) && (pixels.data instanceof Uint8Array); + } + function isImageBitmapFullySupported() { + return typeof window !== 'undefined' && + typeof (ImageBitmap) !== 'undefined' && + window.hasOwnProperty('createImageBitmap'); + } + function isNonEmptyPixels(pixels) { + return pixels != null && pixels.width !== 0 && pixels.height !== 0; + } + function canWrapPixelsToImageBitmap(pixels) { + return isImageBitmapFullySupported() && !(pixels instanceof ImageBitmap) && + isNonEmptyPixels(pixels) && !isPixelData(pixels); + } + /** + * Creates a `tf.Tensor` from an image in async way. + * + * ```js + * const image = new ImageData(1, 1); + * image.data[0] = 100; + * image.data[1] = 150; + * image.data[2] = 200; + * image.data[3] = 255; + * + * (await tf.browser.fromPixelsAsync(image)).print(); + * ``` + * This API is the async version of fromPixels. The API will first + * check |WRAP_TO_IMAGEBITMAP| flag, and try to wrap the input to + * imageBitmap if the flag is set to true. + * + * @param pixels The input image to construct the tensor from. The + * supported image types are all 4-channel. You can also pass in an image + * object with following attributes: + * `{data: Uint8Array; width: number; height: number}` + * @param numChannels The number of channels of the output tensor. A + * numChannels value less than 4 allows you to ignore channels. Defaults to + * 3 (ignores alpha channel of input image). + * + * @doc {heading: 'Browser', namespace: 'browser', ignoreCI: true} + */ + async function fromPixelsAsync(pixels, numChannels = 3) { + let inputs = null; + // Check whether the backend needs to wrap |pixels| to imageBitmap and + // whether |pixels| can be wrapped to imageBitmap. + if (env().getBool('WRAP_TO_IMAGEBITMAP') && + canWrapPixelsToImageBitmap(pixels)) { + // Force the imageBitmap creation to not do any premultiply alpha + // ops. + let imageBitmap; + try { + // wrap in try-catch block, because createImageBitmap may not work + // properly in some browsers, e.g. + // https://bugzilla.mozilla.org/show_bug.cgi?id=1335594 + // tslint:disable-next-line: no-any + imageBitmap = await createImageBitmap(pixels, { premultiplyAlpha: 'none' }); + } + catch (e) { + imageBitmap = null; + } + // createImageBitmap will clip the source size. + // In some cases, the input will have larger size than its content. + // E.g. new Image(10, 10) but with 1 x 1 content. Using + // createImageBitmap will clip the size from 10 x 10 to 1 x 1, which + // is not correct. We should avoid wrapping such resouce to + // imageBitmap. + if (imageBitmap != null && imageBitmap.width === pixels.width && + imageBitmap.height === pixels.height) { + inputs = imageBitmap; + } + else { + inputs = pixels; + } + } + else { + inputs = pixels; + } + return fromPixels_(inputs, numChannels); + } + function validateImgTensor(img) { + if (img.rank !== 2 && img.rank !== 3) { + throw new Error(`toPixels only supports rank 2 or 3 tensors, got rank ${img.rank}.`); + } + const depth = img.rank === 2 ? 1 : img.shape[2]; + if (depth > 4 || depth === 2) { + throw new Error(`toPixels only supports depth of size ` + + `1, 3 or 4 but got ${depth}`); + } + if (img.dtype !== 'float32' && img.dtype !== 'int32') { + throw new Error(`Unsupported type for toPixels: ${img.dtype}.` + + ` Please use float32 or int32 tensors.`); + } + } + function validateImageOptions(imageOptions) { + const alpha = (imageOptions === null || imageOptions === void 0 ? void 0 : imageOptions.alpha) || 1; + if (alpha > 1 || alpha < 0) { + throw new Error(`Alpha value ${alpha} is suppoed to be in range [0 - 1].`); + } + } + /** + * Draws a `tf.Tensor` of pixel values to a byte array or optionally a + * canvas. + * + * When the dtype of the input is 'float32', we assume values in the range + * [0-1]. Otherwise, when input is 'int32', we assume values in the range + * [0-255]. + * + * Returns a promise that resolves when the canvas has been drawn to. + * + * @param img A rank-2 tensor with shape `[height, width]`, or a rank-3 tensor + * of shape `[height, width, numChannels]`. If rank-2, draws grayscale. If + * rank-3, must have depth of 1, 3 or 4. When depth of 1, draws + * grayscale. When depth of 3, we draw with the first three components of + * the depth dimension corresponding to r, g, b and alpha = 1. When depth of + * 4, all four components of the depth dimension correspond to r, g, b, a. + * @param canvas The canvas to draw to. + * + * @doc {heading: 'Browser', namespace: 'browser'} + */ + async function toPixels(img, canvas) { + let $img = convertToTensor(img, 'img', 'toPixels'); + if (!(img instanceof Tensor)) { + // Assume int32 if user passed a native array. + const originalImgTensor = $img; + $img = cast$3(originalImgTensor, 'int32'); + originalImgTensor.dispose(); + } + validateImgTensor($img); + const [height, width] = $img.shape.slice(0, 2); + const depth = $img.rank === 2 ? 1 : $img.shape[2]; + const data = await $img.data(); + const multiplier = $img.dtype === 'float32' ? 255 : 1; + const bytes = new Uint8ClampedArray(width * height * 4); + for (let i = 0; i < height * width; ++i) { + const rgba = [0, 0, 0, 255]; + for (let d = 0; d < depth; d++) { + const value = data[i * depth + d]; + if ($img.dtype === 'float32') { + if (value < 0 || value > 1) { + throw new Error(`Tensor values for a float32 Tensor must be in the ` + + `range [0 - 1] but encountered ${value}.`); + } + } + else if ($img.dtype === 'int32') { + if (value < 0 || value > 255) { + throw new Error(`Tensor values for a int32 Tensor must be in the ` + + `range [0 - 255] but encountered ${value}.`); + } + } + if (depth === 1) { + rgba[0] = value * multiplier; + rgba[1] = value * multiplier; + rgba[2] = value * multiplier; + } + else { + rgba[d] = value * multiplier; + } + } + const j = i * 4; + bytes[j + 0] = Math.round(rgba[0]); + bytes[j + 1] = Math.round(rgba[1]); + bytes[j + 2] = Math.round(rgba[2]); + bytes[j + 3] = Math.round(rgba[3]); + } + if (canvas != null) { + if (!hasToPixelsWarned) { + const kernel = getKernel(Draw, ENGINE.backendName); + if (kernel != null) { + console.warn('tf.browser.toPixels is not efficient to draw tensor on canvas. ' + + 'Please try tf.browser.draw instead.'); + hasToPixelsWarned = true; + } + } + canvas.width = width; + canvas.height = height; + const ctx = canvas.getContext('2d'); + const imageData = new ImageData(bytes, width, height); + ctx.putImageData(imageData, 0, 0); + } + if ($img !== img) { + $img.dispose(); + } + return bytes; + } + /** + * Draws a `tf.Tensor` to a canvas. + * + * When the dtype of the input is 'float32', we assume values in the range + * [0-1]. Otherwise, when input is 'int32', we assume values in the range + * [0-255]. + * + * @param image The tensor to draw on the canvas. Must match one of + * these shapes: + * - Rank-2 with shape `[height, width`]: Drawn as grayscale. + * - Rank-3 with shape `[height, width, 1]`: Drawn as grayscale. + * - Rank-3 with shape `[height, width, 3]`: Drawn as RGB with alpha set in + * `imageOptions` (defaults to 1, which is opaque). + * - Rank-3 with shape `[height, width, 4]`: Drawn as RGBA. + * @param canvas The canvas to draw to. + * @param options The configuration arguments for image to be drawn and the + * canvas to draw to. + * + * @doc {heading: 'Browser', namespace: 'browser'} + */ + function draw$1(image, canvas, options) { + let $img = convertToTensor(image, 'img', 'draw'); + if (!(image instanceof Tensor)) { + // Assume int32 if user passed a native array. + const originalImgTensor = $img; + $img = cast$3(originalImgTensor, 'int32'); + originalImgTensor.dispose(); + } + validateImgTensor($img); + validateImageOptions(options === null || options === void 0 ? void 0 : options.imageOptions); + const inputs = { image: $img }; + const attrs = { canvas, options }; + ENGINE.runKernel(Draw, inputs, attrs); + } + const fromPixels$1 = /* @__PURE__ */ op({ fromPixels_ }); + + var browser = /*#__PURE__*/Object.freeze({ + __proto__: null, + draw: draw$1, + fromPixels: fromPixels$1, + fromPixelsAsync: fromPixelsAsync, + toPixels: toPixels + }); + + /** + * Validate gather nd inputs. + * + * @param tensor The tensor contains the source values. + * @param indices The tensor contains the indices to slice the source. + * + * @returns [resultShape, numUpdates, sliceSize, strides] + */ + function prepareAndValidate(tensor, indices) { + const tensorRank = tensor.shape.length; + const indicesRank = indices.shape.length; + if (tensorRank < 1) { + throw new Error('tf.gatherND() expects the input to be rank 1 or higher,' + + ` but the rank was ${tensorRank}.`); + } + if (indicesRank < 1) { + throw new Error('tf.gatherND() expects the indices to be rank 1 or higher,' + + ` but the rank was ${indicesRank}.`); + } + if (indices.dtype !== 'int32') { + throw new Error('tf.gatherND() expects the indices to be int32 type,' + + ` but the dtype was ${indices.dtype}.`); + } + if (indices.shape[indicesRank - 1] > tensorRank) { + throw new Error('index innermost dimension length must be <= tensor rank; saw: ' + + `${indices.shape[indicesRank - 1]} vs. ${tensorRank}`); + } + if (sizeFromShape(tensor.shape) === 0) { + throw new Error('Requested more than 0 entries, but input is empty.' + + ` Input shape: ${tensor.shape}.`); + } + const indicesShape = indices.shape; + const sliceRank = indicesShape[indicesShape.length - 1]; + // The result shape is + // indices.shape[:-1] + params.shape[indices.shape[-1]:] + let nResult = 1; + for (let i = 0; i < indicesShape.length - 1; ++i) { + nResult *= indicesShape[i]; + } + const inputShape = tensor.shape; + const resultShape = indicesShape.slice(); + resultShape.pop(); + let sliceSize = 1; + for (let i = sliceRank; i < tensorRank; ++i) { + sliceSize *= inputShape[i]; + resultShape.push(inputShape[i]); + } + const strides = [...computeStrides(tensor.shape).map(stride => stride / sliceSize), + 1].slice(0, sliceRank); + return [resultShape, nResult, sliceSize, strides]; + } + + var gather_nd_util = /*#__PURE__*/Object.freeze({ + __proto__: null, + prepareAndValidate: prepareAndValidate + }); + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const NEW_AXIS = -2; + const SHRINK_AXIS = -1; + function assertParamsValid(input, begin, size) { + const inputRank = input.shape.length; + assert$1(inputRank === begin.length, () => `Error in slice${inputRank}D: Length of begin ${begin} must ` + + `match the rank of the array (${inputRank}).`); + assert$1(inputRank === size.length, () => `Error in slice${inputRank}D: Length of size ${size} must ` + + `match the rank of the array (${inputRank}).`); + for (let i = 0; i < inputRank; ++i) { + assert$1(begin[i] + size[i] <= input.shape[i], () => `Error in slice${inputRank}D: begin[${i}] + size[${i}] ` + + `(${begin[i] + size[i]}) would overflow input.shape[${i}] (${input.shape[i]})`); + } + } + /** Converts a binary mask to an array of axes. Used in stridedSlice(). */ + function maskToAxes(mask) { + const axes = []; + let axis = 0; + while (mask > 0) { + if (mask & 1) { + axes.push(axis); + } + mask /= 2; + axis++; + } + return axes; + } + /** Computes the output shape given the strided slice params. */ + function computeOutShape$2(begin, end, strides) { + const size = []; + for (let axis = 0; axis < begin.length; axis++) { + size[axis] = Math.ceil((end[axis] - begin[axis]) / strides[axis]); + } + return size; + } + // Creates full selection at the elided dimensions. If the dimension matches + // the ellipsis mask, override the current stride value. Otherwise, insert. + function stridesWithElidedDims(strides, ellipsisInsertionIndex, numElidedAxes, inputShape) { + const newStrides = [...strides]; + for (let i = newStrides.length; i < inputShape.length; i++) { + newStrides.push(1); + } + for (let i = 0; i < numElidedAxes; i++) { + if (i === 0) { + newStrides[ellipsisInsertionIndex] = 1; + } + else { + newStrides.splice(ellipsisInsertionIndex, 0 /* num elements to delete */, 1 /* element to add */); + newStrides.pop(); + } + } + return newStrides; + } + function unnormalizeAxis(ellipsisInsertionIndex, numElidedAxes, normalizedAxis) { + if (normalizedAxis <= ellipsisInsertionIndex) { + return normalizedAxis; + } + return normalizedAxis - (numElidedAxes - 1); + } + function getElidedAxes(numElidedAxes, ellipsisInsertionIndex) { + const elidedAxes = []; + for (let i = 0; i < numElidedAxes; i++) { + elidedAxes.push(ellipsisInsertionIndex + i); + } + return elidedAxes; + } + // Normalize the start, end and strides. + function getNormalizedAxes(inputShape, ellipsisAxes, numInterpolatedAxes, begin, end, strides, beginMask, endMask, ellipsisMask) { + const inputRank = inputShape.length; + let normalizedBegin = new Array(inputRank), normalizedEnd = new Array(inputRank), normalizedStrides = new Array(inputRank); + if (ellipsisAxes.length && numInterpolatedAxes > 0) { + const fullIndex = ellipsisAxes[0]; + // The ellipsis applies to the masked index as well as any dimensions + // that are interpolated. + const numElidedAxes = numInterpolatedAxes + 1; + normalizedBegin = startIndicesWithElidedDims(beginMask, fullIndex, numElidedAxes, begin, inputShape); + normalizedEnd = stopIndicesWithElidedDims(endMask, fullIndex, numElidedAxes, end, inputShape); + normalizedStrides = + stridesWithElidedDims(strides, fullIndex, numElidedAxes, inputShape); + } + else { + for (let axis = 0; axis < inputRank; axis++) { + normalizedBegin[axis] = startForAxis(beginMask, begin, strides, inputShape, axis, ellipsisMask); + normalizedEnd[axis] = + stopForAxis(endMask, end, strides, inputShape, axis, ellipsisMask); + normalizedStrides[axis] = stridesForAxis(strides, axis, ellipsisMask); + } + } + return { + begin: normalizedBegin, + end: normalizedEnd, + strides: normalizedStrides + }; + } + // Creates full selection at the elided dimensions. If the dimension matches + // the ellipsis mask, override the current start value. Otherwise, insert. + function startIndicesWithElidedDims(beginMask, ellipsisInsertionIndex, numElidedAxes, originalBegin, inputShape) { + const newIndices = [...inputShape]; + const elidedAxes = getElidedAxes(numElidedAxes, ellipsisInsertionIndex); + for (let axis = 0; axis < newIndices.length; axis++) { + if (elidedAxes.indexOf(axis) > -1) { + newIndices[axis] = 0; + } + else { + const originalAxis = unnormalizeAxis(ellipsisInsertionIndex, numElidedAxes, axis); + let originalValue = originalBegin[originalAxis]; + if (beginMask & 1 << originalAxis) { + originalValue = 0; + } + newIndices[axis] = originalValue; + } + } + return newIndices; + } + // Creates full selection at the elided dimensions. If the dimension matches + // the ellipsis mask, override the current stop value. Otherwise, insert. + function stopIndicesWithElidedDims(endMask, ellipsisInsertionIndex, numElidedAxes, originalEnd, inputShape) { + const newIndices = [...inputShape]; + const elidedAxes = getElidedAxes(numElidedAxes, ellipsisInsertionIndex); + for (let axis = 0; axis < newIndices.length; axis++) { + if (elidedAxes.indexOf(axis) > -1) { + newIndices[axis] = Number.MAX_SAFE_INTEGER; + } + else { + const originalAxis = unnormalizeAxis(ellipsisInsertionIndex, numElidedAxes, axis); + let originalValue = originalEnd[originalAxis]; + if (endMask & 1 << originalAxis) { + originalValue = Number.MAX_SAFE_INTEGER; + } + newIndices[axis] = originalValue; + } + } + for (let i = 0; i < newIndices.length; i++) { + // Handle negative indices + const axisSize = inputShape[i]; + if (newIndices[i] < 0) { + newIndices[i] += axisSize; + } + newIndices[i] = clamp(0, newIndices[i], inputShape[i]); + } + return newIndices; + } + function stridesForAxis(strides, axis, ellipsisMask) { + let stride = strides[axis]; + if (ellipsisMask & (1 << axis) || stride == null) { + stride = 1; + } + return stride; + } + function startForAxis(beginMask, startIndices, strides, inputShape, axis, ellipsisMask) { + // Begin with the specified index + let start = startIndices[axis]; + const stride = strides[axis] || 1; + // Check the axis bit from right of masked axes, or the begin index is not set + // for the axis. + if (beginMask & 1 << axis || ellipsisMask & 1 << axis || start == null) { + if (stride > 0) { + // Forward iteration - use the first element. These values will get + // clamped below (Note: We could have set them to 0 and axis_size-1, but + // use lowest() and max() to maintain symmetry with StopForAxis()) + start = Number.MIN_SAFE_INTEGER; + } + else { + // Backward iteration - use the last element. + start = Number.MAX_SAFE_INTEGER; + } + } + // Handle negative indices + const axisSize = inputShape[axis]; + if (start < 0) { + start += axisSize; + } + // Clamping + start = clamp(0, start, axisSize - 1); + return start; + } + function stopForAxis(endMask, stopIndices, strides, inputShape, axis, ellipsisMask) { + // Begin with the specified index + let stop = stopIndices[axis]; + const stride = strides[axis] || 1; + // Check the axis bit from right of masked axes, or if the stop index is not + // set for this axis. + if (endMask & (1 << axis) || ellipsisMask & (1 << axis) || stop == null) { + if (stride > 0) { + // Forward iteration - use the last element. These values will get + // clamped below + stop = Number.MAX_SAFE_INTEGER; + } + else { + // Backward iteration - use the first element. + stop = Number.MIN_SAFE_INTEGER; + } + } + // Handle negative indices + const axisSize = inputShape[axis]; + if (stop < 0) { + stop += axisSize; + } + // Clamping + // Because the end index points one past the last element, we need slightly + // different clamping ranges depending on the direction. + if (stride > 0) { + // Forward iteration + stop = clamp(0, stop, axisSize); + } + else { + // Backward iteration + stop = clamp(-1, stop, axisSize - 1); + } + return stop; + } + /** + * Returns true if the slice occupies a continous set of elements in the + * 'flat' space. + */ + function isSliceContinous(shape, begin, size) { + // Index of the first axis that has size > 1. + let firstNonOneAxis = size.length; + for (let i = 0; i < size.length; i++) { + if (size[i] > 1) { + firstNonOneAxis = i; + break; + } + } + for (let i = firstNonOneAxis + 1; i < size.length; i++) { + if (begin[i] > 0 || size[i] !== shape[i]) { + return false; + } + } + return true; + } + function computeFlatOffset(begin, strides) { + let flatOffset = begin.length > 0 ? begin[begin.length - 1] : 1; + for (let i = 0; i < begin.length - 1; i++) { + flatOffset += begin[i] * strides[i]; + } + return flatOffset; + } + function parseSliceParams(x, begin, size) { + // The following logic allows for more ergonomic calls. + let begin_; + const xRank = x.shape.length; + if (typeof begin === 'number') { + begin_ = [begin, ...new Array(xRank - 1).fill(0)]; + } + else if (begin.length < xRank) { + begin_ = begin.concat(new Array(xRank - begin.length).fill(0)); + } + else { + begin_ = begin.slice(); + } + begin_.forEach(d => { + assert$1(d !== -1, () => 'slice() does not support negative begin indexing.'); + }); + let size_; + if (size == null) { + size_ = new Array(xRank).fill(-1); + } + else if (typeof size === 'number') { + size_ = [size, ...new Array(xRank - 1).fill(-1)]; + } + else if (size.length < xRank) { + size_ = size.concat(new Array(xRank - size.length).fill(-1)); + } + else { + size_ = size; + } + size_ = size_.map((d, i) => { + if (d >= 0) { + return d; + } + else { + assert$1(d === -1, () => `Negative size values should be exactly -1 but got ` + + `${d} for the slice() size at index ${i}.`); + return x.shape[i] - begin_[i]; + } + }); + return [begin_, size_]; + } + // Convert the slicing specification from a sparse representation to a dense + // representation. This means that all ellipses and newaxis are expanded out. + function sliceInfo(xShape, begin, end, strides, beginMask, endMask, ellipsisMask, newAxisMask, shrinkAxisMask) { + let stridesNonNull; + if (strides == null) { + stridesNonNull = new Array(begin.length); + stridesNonNull.fill(1); + } + else { + stridesNonNull = strides; + } + // Only one non-zero bit is allowed in ellipsisMask, which means ellipsisMask + // is a power of 2. Use bit compares to ensure ellipsisMask is 0 or a power + // of 2. When i is a power of 2, i & (i - 1) is always 0. + // Also ref: + // https://stackoverflow.com/questions/600293/how-to-check-if-a-number-is-a-power-of-2 + if (ellipsisMask != null && (ellipsisMask & (ellipsisMask - 1)) !== 0) { + throw new Error('Multiple ellipses in slice is not allowed.'); + } + // Step 1: Account for ellipsis and new axis. + // Check for ellipsis and count how many non-newaxis there are after. + let ellipsisSeen = false; + const sparseSpec = { + dims: stridesNonNull.length, + numAddAxisAfterEllipsis: 0, + begin: begin.slice(), + end: end.slice(), + strides: stridesNonNull.slice(), + beginMask, + endMask, + ellipsisMask, + newAxisMask, + shrinkAxisMask + }; + for (let i = 0; i < sparseSpec.dims; i++) { + if (ellipsisSeen && ((1 << i) & newAxisMask) !== 0) { + sparseSpec.numAddAxisAfterEllipsis++; + } + if ((1 << i) & ellipsisMask) { + ellipsisSeen = true; + } + } + // If no ellipsis insert one at the end. + if (!ellipsisSeen) { + sparseSpec.ellipsisMask |= (1 << sparseSpec.dims); + sparseSpec.dims++; // this effects loop iteration below + } + // Step 2: Make a sparse spec into a full index spec. + // + // The sparse spec deos not correspond to the number of dimensions. + // Make a dense spec that cooresponds to the number of dimensions. + // + // For example suppose foo[...,3:] on foo.shape = [2, 2, 3] then we need to + // produce the missing beginMask for the first two dimensions i.e. from + // beginMaskSpec = 0, endMaskSpec = 2, we achieve beginMask = 6 (110), + // endMask = 7 (111). + const denseSpec = { + dims: xShape.length, + beginMask: 0, + endMask: 0, + beginValid: false, + endValid: false + }; + buildDenseSpec(sparseSpec, denseSpec); + // Step 3: Make implicit ranges (non-zero beginMasks and endMasks) explicit + // and bounds check. + let isIdentity = true; + let sliceDim0 = true; + let isSimpleSlice = true; + const processingShape = []; + const finalShape = []; + for (let i = 0; i < xShape.length; ++i) { + if (denseSpec.strides[i] === 0) { + throw Error(`strides[${i}] must be non-zero`); + } + const shrinkI = !!(denseSpec.shrinkAxisMask & (1 << i)); + const dimI = xShape[i]; + if (dimI === -1) { + processingShape.push(shrinkI ? 1 : -1); + continue; + } + const masks = [denseSpec.beginMask & (1 << i), denseSpec.endMask & (1 << i)]; + const validRange = [ + denseSpec.strides[i] > 0 ? 0 : -1, + denseSpec.strides[i] > 0 ? dimI : dimI - 1 + ]; + if (shrinkI && denseSpec.strides[i] <= 0) { + throw Error('only stride 1 allowed on non-range indexing.'); + } + isSimpleSlice = isSimpleSlice && (denseSpec.strides[i] === 1); + const beginAndEndMasked = !!((denseSpec.beginMask & (1 << i)) && (denseSpec.endMask & (1 << i))); + if (denseSpec.beginValid && denseSpec.endValid) { + if (shrinkI) { + // If we are shrinking, the end index is now possibly incorrect. In + // particular foo[-1] produces sparseBegin = -1, sparseEnd = 0. + // and canonical puts these to n-1 and 0, which implies a degenerate + // interval. Fortunately, it is now safe to re-create end as begin + 1. + const xFwd = denseSpec.begin[i] < 0 ? dimI + denseSpec.begin[i] : + denseSpec.begin[i]; + denseSpec.begin[i] = xFwd; + denseSpec.end[i] = denseSpec.begin[i] + 1; + if (xFwd < 0 || xFwd >= dimI) { + throw Error(`slice index ${denseSpec.begin[i]} of dimension ${i} out of bounds.`); + } + } + else { + denseSpec.begin[i] = canonical(denseSpec.begin[i], 0, denseSpec.strides[i], dimI, masks, validRange); + denseSpec.end[i] = canonical(denseSpec.end[i], 1, denseSpec.strides[i], dimI, masks, validRange); + } + // Update optimization values + const takeAllInDimension = denseSpec.strides[i] === 1 && + denseSpec.begin[i] === 0 && denseSpec.end[i] === dimI; + isIdentity = isIdentity && takeAllInDimension; + sliceDim0 = sliceDim0 && + ((i === 0 && denseSpec.strides[i] === 1) || takeAllInDimension); + } + else { + isIdentity = + isIdentity && ((denseSpec.strides[i] === 1) && beginAndEndMasked); + sliceDim0 = sliceDim0 && + ((i === 0 && denseSpec.strides[i] === 1) || beginAndEndMasked); + } + // Compute the processing shape (the intermediate Eigen will produce) + let intervalLength; + let knownInterval = false; + if (denseSpec.beginValid && denseSpec.endValid) { + intervalLength = denseSpec.end[i] - denseSpec.begin[i]; + knownInterval = true; + } + else if (shrinkI) { + // The dimension is still known as 1 for the processingShape, but will be + // discarded for the final shape. + intervalLength = 1; + knownInterval = true; + } + else if (beginAndEndMasked) { + // Even if we don't have values for begin or end, we do know that this + // dimension covers the whole interval. If we have shape information for + // this dimension, that tells us the interval length. + if (dimI >= 0) { + if (denseSpec.strides[i] < 0) { + intervalLength = -dimI; + } + else { + intervalLength = dimI; + } + knownInterval = true; + } + } + if (knownInterval) { + let sizeI; + // Hold zero if the interval is degenerate, otherwise account for + // remainder + if (intervalLength === 0 || + ((intervalLength < 0) !== (denseSpec.strides[i] < 0))) { + sizeI = 0; + } + else { + sizeI = Math.trunc(intervalLength / denseSpec.strides[i]) + + (intervalLength % denseSpec.strides[i] !== 0 ? 1 : 0); + } + processingShape.push(sizeI); + } + else { + processingShape.push(-1); + } + } + // Step 4: Compute the final shape + // + // newAxis will increase dimension by 1 (with a one-size dimension) + // slices like foo[3, ...] will reduce dimension by 1. + // This cannot be done earlier, because it depends on Step 3. + for (let denseDim = 0; denseDim < denseSpec.finalShapeGatherIndices.length; ++denseDim) { + const gatherIndex = denseSpec.finalShapeGatherIndices[denseDim]; + if (gatherIndex >= 0) { + finalShape.push(processingShape[gatherIndex]); + } + else if (gatherIndex === NEW_AXIS) { + finalShape.push(1); + } + } + const finalShapeSparse = finalShape.filter((dim, i) => denseSpec.finalShapeGatherIndices[i] !== NEW_AXIS); + return { + finalShapeSparse, + finalShape, + isIdentity, + sliceDim0, + isSimpleSlice, + begin: denseSpec.begin, + end: denseSpec.end, + strides: denseSpec.strides + }; + } + function buildDenseSpec(sparse, dense) { + dense.beginMask = 0; + dense.endMask = 0; + dense.shrinkAxisMask = 0; + let fullIndex = 0; + dense.beginValid = sparse.begin != null; + dense.endValid = sparse.end != null; + dense.begin = new Array(dense.dims); + dense.end = new Array(dense.dims); + dense.strides = new Array(dense.dims); + dense.finalShapeGatherIndices = []; + dense.finalShapeGatherIndicesSparse = []; + dense.inputShapeGatherIndicesSparse = new Array(dense.dims); + for (let i = 0; i < sparse.dims; i++) { + if ((1 << i) & sparse.ellipsisMask) { + // Only the bit that has ellipsis will fall in this condition. + // Expand the ellipsis into the appropriate indices + // Note: this only works because we guaranteed one ellipsis. + const nextIndex = Math.min(dense.dims - (sparse.dims - i) + 1 + sparse.numAddAxisAfterEllipsis, dense.dims); + for (; fullIndex < nextIndex; fullIndex++) { + // newAxis aren't real axis so you have to skip. + dense.begin[fullIndex] = 0; + dense.end[fullIndex] = 0; + dense.strides[fullIndex] = 1; + dense.beginMask |= (1 << fullIndex); + dense.endMask |= (1 << fullIndex); + dense.finalShapeGatherIndices.push(fullIndex); + dense.finalShapeGatherIndicesSparse.push(-1); + dense.inputShapeGatherIndicesSparse[fullIndex] = i; + } + } + else if ((1 << i) & sparse.newAxisMask) { + // Only the bit that has newAxis will fall in this condition. + dense.finalShapeGatherIndices.push(NEW_AXIS); + dense.finalShapeGatherIndicesSparse.push(-1); + } + else { + if (fullIndex === dense.begin.length) { + throw Error(`Index out of range using input dim ${fullIndex}; input ` + + `has only ${dense.dims} dims, ${dense.begin.length}.`); + } + // Gather slicing spec into appropriate index. + if (sparse.begin != null) { + dense.begin[fullIndex] = sparse.begin[i]; + } + if (sparse.end != null) { + dense.end[fullIndex] = sparse.end[i]; + } + dense.strides[fullIndex] = sparse.strides[i]; + if (sparse.beginMask & (1 << i)) { + dense.beginMask |= (1 << fullIndex); + } + if (sparse.endMask & (1 << i)) { + dense.endMask |= (1 << fullIndex); + } + // If shrink, record where to get the dimensionality from (i.e. newAxis) + // creates a fake 1 size dimension. Also remember shrink axis (now in + // dense form) so we can ignore dense.end below. + if (sparse.shrinkAxisMask & (1 << i)) { + dense.finalShapeGatherIndices.push(SHRINK_AXIS); + dense.finalShapeGatherIndicesSparse.push(-1); + dense.shrinkAxisMask |= (1 << fullIndex); + } + else { + dense.finalShapeGatherIndices.push(fullIndex); + // Remember that where in the sparse shape the dense dim comes from. + dense.finalShapeGatherIndicesSparse.push(i); + } + dense.inputShapeGatherIndicesSparse[fullIndex] = i; + fullIndex++; + } + } + } + function canonical(x, c, strideI, dimI, masks, validRange) { + if (masks[c]) { + return strideI > 0 ? validRange[c] : validRange[(c + 1) & 1]; + } + else { + const xFwd = x < 0 ? dimI + x : x; // make negative indices positive + return xFwd < validRange[0] ? validRange[0] : + xFwd > validRange[1] ? validRange[1] : xFwd; + } + } + + var slice_util = /*#__PURE__*/Object.freeze({ + __proto__: null, + assertParamsValid: assertParamsValid, + computeFlatOffset: computeFlatOffset, + computeOutShape: computeOutShape$2, + getNormalizedAxes: getNormalizedAxes, + isSliceContinous: isSliceContinous, + maskToAxes: maskToAxes, + parseSliceParams: parseSliceParams, + sliceInfo: sliceInfo, + startForAxis: startForAxis, + startIndicesWithElidedDims: startIndicesWithElidedDims, + stopForAxis: stopForAxis, + stopIndicesWithElidedDims: stopIndicesWithElidedDims, + stridesForAxis: stridesForAxis, + stridesWithElidedDims: stridesWithElidedDims + }); + + /** @license See the LICENSE file. */ + // This code is auto-generated, do not modify this file! + const version$7 = '4.22.0'; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class OptimizerConstructors { + /** + * Constructs a `tf.SGDOptimizer` that uses stochastic gradient descent. + * + * ```js + * // Fit a quadratic function by learning the coefficients a, b, c. + * const xs = tf.tensor1d([0, 1, 2, 3]); + * const ys = tf.tensor1d([1.1, 5.9, 16.8, 33.9]); + * + * const a = tf.scalar(Math.random()).variable(); + * const b = tf.scalar(Math.random()).variable(); + * const c = tf.scalar(Math.random()).variable(); + * + * // y = a * x^2 + b * x + c. + * const f = x => a.mul(x.square()).add(b.mul(x)).add(c); + * const loss = (pred, label) => pred.sub(label).square().mean(); + * + * const learningRate = 0.01; + * const optimizer = tf.train.sgd(learningRate); + * + * // Train the model. + * for (let i = 0; i < 10; i++) { + * optimizer.minimize(() => loss(f(xs), ys)); + * } + * + * // Make predictions. + * console.log( + * `a: ${a.dataSync()}, b: ${b.dataSync()}, c: ${c.dataSync()}`); + * const preds = f(xs).dataSync(); + * preds.forEach((pred, i) => { + * console.log(`x: ${i}, pred: ${pred}`); + * }); + * ``` + * + * @param learningRate The learning rate to use for the SGD algorithm. + * + * @doc {heading: 'Training', subheading: 'Optimizers', namespace: 'train'} + */ + static sgd(learningRate) { + return new SGDOptimizer(learningRate); + } + /** + * Constructs a `tf.MomentumOptimizer` that uses momentum gradient + * descent. + * + * See + * [http://proceedings.mlr.press/v28/sutskever13.pdf]( + * http://proceedings.mlr.press/v28/sutskever13.pdf) + * + * @param learningRate The learning rate to use for the Momentum gradient + * descent algorithm. + * @param momentum The momentum to use for the momentum gradient descent + * algorithm. + * + * @doc {heading: 'Training', subheading: 'Optimizers', namespace: 'train'} + */ + static momentum(learningRate, momentum, useNesterov = false) { + return new MomentumOptimizer(learningRate, momentum, useNesterov); + } + /** + * Constructs a `tf.RMSPropOptimizer` that uses RMSProp gradient + * descent. This implementation uses plain momentum and is not centered + * version of RMSProp. + * + * See + * [http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf]( + * http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf) + * + * @param learningRate The learning rate to use for the RMSProp gradient + * descent algorithm. + * @param decay The discounting factor for the history/coming gradient. + * @param momentum The momentum to use for the RMSProp gradient descent + * algorithm. + * @param epsilon Small value to avoid zero denominator. + * @param centered If true, gradients are normalized by the estimated + * variance of the gradient. + * + * @doc {heading: 'Training', subheading: 'Optimizers', namespace: 'train'} + */ + static rmsprop(learningRate, decay = .9, momentum = 0.0, epsilon = null, centered = false) { + return new RMSPropOptimizer(learningRate, decay, momentum, epsilon, centered); + } + /** + * Constructs a `tf.AdamOptimizer` that uses the Adam algorithm. + * See [https://arxiv.org/abs/1412.6980](https://arxiv.org/abs/1412.6980) + * + * @param learningRate The learning rate to use for the Adam gradient + * descent algorithm. + * @param beta1 The exponential decay rate for the 1st moment estimates. + * @param beta2 The exponential decay rate for the 2nd moment estimates. + * @param epsilon A small constant for numerical stability. + * + * @doc {heading: 'Training', subheading: 'Optimizers', namespace: 'train'} + */ + static adam(learningRate = 0.001, beta1 = 0.9, beta2 = 0.999, epsilon = null) { + return new AdamOptimizer(learningRate, beta1, beta2, epsilon); + } + /** + * Constructs a `tf.AdadeltaOptimizer` that uses the Adadelta algorithm. + * See [https://arxiv.org/abs/1212.5701](https://arxiv.org/abs/1212.5701) + * + * @param learningRate The learning rate to use for the Adadelta gradient + * descent algorithm. + * @param rho The learning rate decay over each update. + * @param epsilon A constant epsilon used to better condition the grad + * update. + * + * @doc {heading: 'Training', subheading: 'Optimizers', namespace: 'train'} + */ + static adadelta(learningRate = .001, rho = .95, epsilon = null) { + return new AdadeltaOptimizer(learningRate, rho, epsilon); + } + /** + * Constructs a `tf.AdamaxOptimizer` that uses the Adamax algorithm. + * See [https://arxiv.org/abs/1412.6980](https://arxiv.org/abs/1412.6980) + * + * @param learningRate The learning rate to use for the Adamax gradient + * descent algorithm. + * @param beta1 The exponential decay rate for the 1st moment estimates. + * @param beta2 The exponential decay rate for the 2nd moment estimates. + * @param epsilon A small constant for numerical stability. + * @param decay The learning rate decay over each update. + * + * @doc {heading: 'Training', subheading: 'Optimizers', namespace: 'train'} + */ + static adamax(learningRate = 0.002, beta1 = 0.9, beta2 = 0.999, epsilon = null, decay = 0.0) { + return new AdamaxOptimizer(learningRate, beta1, beta2, epsilon, decay); + } + /** + * Constructs a `tf.AdagradOptimizer` that uses the Adagrad algorithm. + * See + * [http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf]( + * http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf) + * or + * [http://ruder.io/optimizing-gradient-descent/index.html#adagrad]( + * http://ruder.io/optimizing-gradient-descent/index.html#adagrad) + * + * @param learningRate The learning rate to use for the Adagrad gradient + * descent algorithm. + * @param initialAccumulatorValue Starting value for the accumulators, must be + * positive. + * + * @doc {heading: 'Training', subheading: 'Optimizers', namespace: 'train'} + */ + static adagrad(learningRate, initialAccumulatorValue = 0.1) { + return new AdagradOptimizer(learningRate, initialAccumulatorValue); + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const train = OptimizerConstructors; + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const delayCallback = (() => { + if (typeof requestAnimationFrame !== 'undefined') { + return requestAnimationFrame; + } + else if (typeof setImmediate !== 'undefined') { + return setImmediate; + } + return (f) => f(); // no delays + })(); + /** + * Returns a promise that resolves when a requestAnimationFrame has completed. + * + * On Node.js this uses setImmediate instead of requestAnimationFrame. + * + * This is simply a sugar method so that users can do the following: + * `await tf.nextFrame();` + * + * @doc {heading: 'Performance', subheading: 'Timing'} + */ + function nextFrame() { + return new Promise(resolve => delayCallback(() => resolve())); + } + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function assertParamsConsistent(shapes, axis) { + const rank = shapes[0].length; + shapes.forEach((shape, i) => { + assert$1(shape.length === rank, () => `Error in concat${rank}D: rank of tensors[${i}] must be the same ` + + `as the rank of the rest (${rank})`); + }); + assert$1(axis >= 0 && axis < rank, () => `Error in concat${rank}D: axis must be between 0 and ${rank - 1}.`); + const firstShape = shapes[0]; + shapes.forEach((shape, i) => { + for (let r = 0; r < rank; r++) { + assert$1((r === axis) || (shape[r] === firstShape[r]), () => `Error in concat${rank}D: Shape of tensors[${i}] (${shape}) ` + + `does not match the shape of the rest (${firstShape}) ` + + `along the non-concatenated axis ${i}.`); + } + }); + } + function computeOutShape$1(shapes, axis) { + const outputShape = shapes[0].slice(); + for (let i = 1; i < shapes.length; i++) { + outputShape[axis] += shapes[i][axis]; + } + return outputShape; + } + + /** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var RowPartitionType$1; + (function (RowPartitionType) { + RowPartitionType[RowPartitionType["FIRST_DIM_SIZE"] = 0] = "FIRST_DIM_SIZE"; + RowPartitionType[RowPartitionType["VALUE_ROWIDS"] = 1] = "VALUE_ROWIDS"; + RowPartitionType[RowPartitionType["ROW_LENGTHS"] = 2] = "ROW_LENGTHS"; + RowPartitionType[RowPartitionType["ROW_SPLITS"] = 3] = "ROW_SPLITS"; + RowPartitionType[RowPartitionType["ROW_LIMITS"] = 4] = "ROW_LIMITS"; + RowPartitionType[RowPartitionType["ROW_STARTS"] = 5] = "ROW_STARTS"; + })(RowPartitionType$1 || (RowPartitionType$1 = {})); + function combineRaggedTensorToTensorShapes(raggedRank, shape, valueShape) { + // Test for consistency of valueShape and shape specified. + // If shape is unspecified and valueShape is specified, then copy + // over the size from the valueShape dimension. + let outputShape = new Array(); + if (valueShape == null && shape == null) { + return outputShape; + } + if (shape == null) { + // Here, value_shape must be of known size. + while (outputShape.length < raggedRank + valueShape.length) { + outputShape.push(-1); + } + } + else { + outputShape = shape.slice(); + } + if (valueShape == null) { + return outputShape; + } + // At this point, valueShape and output_shape have known ranks. + if (raggedRank + valueShape.length !== outputShape.length) { + throw new Error(`rt input.shape and shape=${shape} are incompatible: rt input.rank = ${raggedRank + + valueShape.length}, but shape.rank = ${outputShape.length}`); + } + for (let i = 1; i < valueShape.length; ++i) { + const valueDim = valueShape[i]; + const outputShapeDimIndex = outputShape[outputShape.length - valueShape.length + i]; + const outputShapeDim = outputShape[outputShapeDimIndex]; + if (valueDim >= 0) { + if (outputShapeDim >= 0) { + if (outputShapeDim !== valueDim) { + throw new Error(`rt input.shape and shape=${shape} are incompatible: rt input.shape[${i + raggedRank}] = ${valueDim} but shape[${i + raggedRank}] = ${outputShapeDim}`); + } + } + else { + outputShape[outputShapeDimIndex] = valueDim; + } + } + } + return outputShape; + } + function getRowPartitionTypesHelper(rowPartitionTypeStrings) { + const stringToType = { + 'FIRST_DIM_SIZE': RowPartitionType$1.FIRST_DIM_SIZE, + 'VALUE_ROWIDS': RowPartitionType$1.VALUE_ROWIDS, + 'ROW_LENGTHS': RowPartitionType$1.ROW_LENGTHS, + 'ROW_SPLITS': RowPartitionType$1.ROW_SPLITS, + 'ROW_LIMITS': RowPartitionType$1.ROW_LIMITS, + 'ROW_STARTS': RowPartitionType$1.ROW_STARTS + }; + const result = []; + for (const typeStr of rowPartitionTypeStrings) { + if (typeStr in stringToType) { + result.push(stringToType[typeStr]); + } + else { + break; + } + } + return result; + } + function getRaggedRank(rowPartitionTypes) { + if (rowPartitionTypes.length === 0) { + return 0; + } + if (rowPartitionTypes[0] === RowPartitionType$1.FIRST_DIM_SIZE) { + return rowPartitionTypes.length - 1; + } + return rowPartitionTypes.length; + } + function validateDefaultValueShape(defaultValueShape, valueShape) { + if (defaultValueShape == null || valueShape == null) { + return; + } + const defaultNDims = defaultValueShape.length; + const valuesNDims = valueShape.length; + if (defaultNDims >= valuesNDims) { + throw new Error(`defaultValue.shape=${defaultValueShape} and ragged tensor flatValues.shape=${valueShape}, are incompatible: defaultValue.rank = ${defaultNDims} must be less than ragged tensor input flatValues.rank = ${valuesNDims})`); + } + for (let i = 0; i < Math.min(defaultNDims, valuesNDims - 1); ++i) { + const defaultDim = defaultValueShape[i]; + const valueDim = valueShape[i + 1]; + if (defaultDim >= 0 && valueDim >= 0 && defaultDim !== 1 && + defaultDim !== valueDim) { + throw new Error(`defaultValue.shape=${defaultValueShape}, and ragged tensor input flatValues.shape=${valueShape} are incompatible: defaultValue.shape[${i - defaultValueShape.length}] = ${defaultDim} but ragged tensor input.flatValues.shape[${i - defaultValueShape.length}] = ${valueDim}`); + } + } + } + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const PARALLELIZE_THRESHOLD = 30; + function computeOptimalWindowSize(inSize) { + if (inSize <= PARALLELIZE_THRESHOLD) { + return inSize; + } + return nearestDivisor(inSize, Math.floor(Math.sqrt(inSize))); + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + // Returns the image center in pixels. + function getImageCenter(center, imageHeight, imageWidth) { + const centerX = imageWidth * (typeof center === 'number' ? center : center[0]); + const centerY = imageHeight * (typeof center === 'number' ? center : center[1]); + return [centerX, centerY]; + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Gets the new shape of the input Tensor after it's been reshaped + * to: + * [blockShape[0], ..., blockShape[M-1], batch / prod(blockShape), + * inputShape[1], ..., inputShape[N-1]] + * + * See step 1: https://www.tensorflow.org/api_docs/python/tf/batch_to_space_nd + */ + function getReshaped(inputShape, blockShape, prod, batchToSpace = true) { + let reshaped = []; + if (batchToSpace) { + reshaped = reshaped.concat(blockShape.slice(0)); + reshaped.push(inputShape[0] / prod); + reshaped = reshaped.concat(inputShape.slice(1)); + } + else { + reshaped = reshaped.concat(inputShape[0]); + const spatialLength = blockShape.length; + for (let i = 0; i < spatialLength; ++i) { + reshaped = + reshaped.concat([inputShape[i + 1] / blockShape[i], blockShape[i]]); + } + reshaped = reshaped.concat(inputShape.slice(spatialLength + 1)); + } + return reshaped; + } + /** + * Gets the permutation that will transpose the dimensions of the + * reshaped tensor to shape: + * + * [batch / prod(block_shape),inputShape[1], blockShape[0], ..., + * inputShape[M], blockShape[M-1],inputShape[M+1], ..., inputShape[N-1]] + * + * see step 2: https://www.tensorflow.org/api_docs/python/tf/batch_to_space_nd + */ + function getPermuted(reshapedRank, blockShapeRank, batchToSpace = true) { + const permuted = []; + if (batchToSpace) { + permuted.push(blockShapeRank); + for (let i = blockShapeRank + 1; i < reshapedRank; ++i) { + if (i <= 2 * blockShapeRank) { + permuted.push(i); + permuted.push(i - (blockShapeRank + 1)); + } + else { + permuted.push(i); + } + } + } + else { + const permutedBeforeBatch = []; + const permutedAfterBatch = []; + for (let i = 1; i < reshapedRank; ++i) { + if (i >= blockShapeRank * 2 + 1 || i % 2 === 1) { + permutedAfterBatch.push(i); + } + else { + permutedBeforeBatch.push(i); + } + } + permuted.push(...permutedBeforeBatch); + permuted.push(0); + permuted.push(...permutedAfterBatch); + } + return permuted; + } + /** + * Gets the shape of the reshaped and permuted input Tensor before any cropping + * is applied. The new shape will be: + * + * [batch / prod(blockShape),inputShape[1] * blockShape[0], ..., + * inputShape[M] * blockShape[M-1],inputShape[M+1], ..., inputShape[N-1]] + * + * See step 3: https://www.tensorflow.org/api_docs/python/tf/batch_to_space_nd + */ + function getReshapedPermuted(inputShape, blockShape, prod, batchToSpace = true) { + const reshapedPermuted = []; + if (batchToSpace) { + reshapedPermuted.push(inputShape[0] / prod); + } + else { + reshapedPermuted.push(inputShape[0] * prod); + } + for (let i = 1; i < inputShape.length; ++i) { + if (i <= blockShape.length) { + if (batchToSpace) { + reshapedPermuted.push(blockShape[i - 1] * inputShape[i]); + } + else { + reshapedPermuted.push(inputShape[i] / blockShape[i - 1]); + } + } + else { + reshapedPermuted.push(inputShape[i]); + } + } + return reshapedPermuted; + } + /** + * Converts the crops argument into the beginning coordinates of a slice + * operation. + */ + function getSliceBeginCoords(crops, blockShape) { + const sliceBeginCoords = [0]; + for (let i = 0; i < blockShape; ++i) { + sliceBeginCoords.push(crops[i][0]); + } + return sliceBeginCoords; + } + /** + * Converts the crops argument into the size of a slice operation. When + * combined with getSliceBeginCoords this function allows the reshaped and + * permuted Tensor to be cropped to its final output shape of: + * + * inputShape[1] * blockShape[0] - crops[0,0] - crops[0,1], ..., + * inputShape[M] * blockShape[M-1] -crops[M-1,0] - + * crops[M-1,1],inputShape[M+1], ..., inputShape[N-1]] + * + * See step 4: https://www.tensorflow.org/api_docs/python/tf/batch_to_space_nd + */ + function getSliceSize(uncroppedShape, crops, blockShape) { + const sliceSize = uncroppedShape.slice(0, 1); + for (let i = 0; i < blockShape; ++i) { + sliceSize.push(uncroppedShape[i + 1] - crops[i][0] - crops[i][1]); + } + return sliceSize; + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const SELU_SCALEALPHA = 1.7580993408473768599402175208123; + const SELU_SCALE = 1.0507009873554804934193349852946; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const ERF_P = 0.3275911; + const ERF_A1 = 0.254829592; + const ERF_A2 = -0.284496736; + const ERF_A3 = 1.421413741; + const ERF_A4 = -1.453152027; + const ERF_A5 = 1.061405429; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Merges real and imaginary Float32Arrays into a single complex Float32Array. + * + * The memory layout is interleaved as follows: + * real: [r0, r1, r2] + * imag: [i0, i1, i2] + * complex: [r0, i0, r1, i1, r2, i2] + * + * This is the inverse of splitRealAndImagArrays. + * + * @param real The real values of the complex tensor values. + * @param imag The imag values of the complex tensor values. + * @returns A complex tensor as a Float32Array with merged values. + */ + function mergeRealAndImagArrays(real, imag) { + if (real.length !== imag.length) { + throw new Error(`Cannot merge real and imag arrays of different lengths. real:` + + `${real.length}, imag: ${imag.length}.`); + } + const result = new Float32Array(real.length * 2); + for (let i = 0; i < result.length; i += 2) { + result[i] = real[i / 2]; + result[i + 1] = imag[i / 2]; + } + return result; + } + /** + * Splits a complex Float32Array into real and imag parts. + * + * The memory layout is interleaved as follows: + * complex: [r0, i0, r1, i1, r2, i2] + * real: [r0, r1, r2] + * imag: [i0, i1, i2] + * + * This is the inverse of mergeRealAndImagArrays. + * + * @param complex The complex tensor values. + * @returns An object with real and imag Float32Array components of the complex + * tensor. + */ + function splitRealAndImagArrays(complex) { + const real = new Float32Array(complex.length / 2); + const imag = new Float32Array(complex.length / 2); + for (let i = 0; i < complex.length; i += 2) { + real[i / 2] = complex[i]; + imag[i / 2] = complex[i + 1]; + } + return { real, imag }; + } + /** + * Extracts even indexed complex values in the given array. + * @param complex The complex tensor values + */ + function complexWithEvenIndex(complex) { + const len = Math.ceil(complex.length / 4); + const real = new Float32Array(len); + const imag = new Float32Array(len); + for (let i = 0; i < complex.length; i += 4) { + real[Math.floor(i / 4)] = complex[i]; + imag[Math.floor(i / 4)] = complex[i + 1]; + } + return { real, imag }; + } + /** + * Extracts odd indexed complete values in the given array. + * @param complex The complex tensor values + */ + function complexWithOddIndex(complex) { + const len = Math.floor(complex.length / 4); + const real = new Float32Array(len); + const imag = new Float32Array(len); + for (let i = 2; i < complex.length; i += 4) { + real[Math.floor(i / 4)] = complex[i]; + imag[Math.floor(i / 4)] = complex[i + 1]; + } + return { real, imag }; + } + /** + * Get the map representing a complex value in the given array. + * @param complex The complex tensor values. + * @param index An index of the target complex value. + */ + function getComplexWithIndex(complex, index) { + const real = complex[index * 2]; + const imag = complex[index * 2 + 1]; + return { real, imag }; + } + /** + * Insert a given complex value into the TypedArray. + * @param data The array in which the complex value is inserted. + * @param c The complex value to be inserted. + * @param index An index of the target complex value. + */ + function assignToTypedArray(data, real, imag, index) { + data[index * 2] = real; + data[index * 2 + 1] = imag; + } + /** + * Make the list of exponent terms used by FFT. + */ + function exponents(n, inverse) { + const real = new Float32Array(n / 2); + const imag = new Float32Array(n / 2); + for (let i = 0; i < Math.ceil(n / 2); i++) { + const x = (inverse ? 2 : -2) * Math.PI * (i / n); + real[i] = Math.cos(x); + imag[i] = Math.sin(x); + } + return { real, imag }; + } + /** + * Make the exponent term used by FFT. + */ + function exponent(k, n, inverse) { + const x = (inverse ? 2 : -2) * Math.PI * (k / n); + const real = Math.cos(x); + const imag = Math.sin(x); + return { real, imag }; + } + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const ARROW = '->'; + const ARROW_REGEX = /->/g; + const COMMA = ','; + const ELLIPSIS = '...'; + /** + * Parse an equation for einsum. + * + * @param equation The einsum equation (e.g., "ij,jk->ik"). + * @param numTensors Number of tensors provided along with `equation`. Used to + * check matching number of input tensors. + * @returns An object consisting of the following fields: + * - allDims: all dimension names as strings. + * - summedDims: a list of all dimensions being summed over, as indices to + * the elements of `allDims`. + * - idDims: indices of the dimensions in each input tensor, as indices to + * the elements of `allDims. + */ + function decodeEinsumEquation(equation, numTensors) { + equation = equation.replace(/\s/g, ''); // Remove witespace in equation. + const numArrows = (equation.length - equation.replace(ARROW_REGEX, '').length) / + ARROW.length; + if (numArrows < 1) { + throw new Error('Equations without an arrow are not supported.'); + } + else if (numArrows > 1) { + throw new Error(`Equation must contain exactly one arrow ("${ARROW}").`); + } + const [inputString, outputString] = equation.split(ARROW); + assert$1(inputString.indexOf(ELLIPSIS) === -1, () => `The ellipsis notation ("${ELLIPSIS}") is not supported yet.`); + const inputTerms = inputString.split(COMMA); + const numInputs = inputTerms.length; + if (numTensors !== numInputs) { + throw new Error(`Expected ${numInputs} input tensors, received ${numTensors}`); + } + if (numInputs > 2) { + throw new Error('Support for more than 2 input tensors is not implemented yet.'); + } + const allDims = []; + for (let i = 0; i < outputString.length; ++i) { + const dimName = outputString[i]; + if (!inputTerms.some(inputTerm => inputTerm.indexOf(dimName) !== -1)) { + throw new Error(`Output subscripts contain the label ${dimName} ` + + `not present in the input subscripts.`); + } + if (allDims.indexOf(dimName) === -1) { + allDims.push(dimName); + } + } + for (let i = 0; i < inputString.length; ++i) { + const dimName = inputString[i]; + if (allDims.indexOf(dimName) === -1 && dimName !== COMMA) { + allDims.push(dimName); + } + } + const idDims = new Array(inputTerms.length); + for (let i = 0; i < numInputs; ++i) { + if (new Set(inputTerms[i].split('')).size !== inputTerms[i].length) { + throw new Error(`Found duplicate axes in input component ${inputTerms[i]}. ` + + `Support for duplicate axes in input is not implemented yet.`); + } + idDims[i] = []; + for (let j = 0; j < inputTerms[i].length; ++j) { + idDims[i].push(allDims.indexOf(inputTerms[i][j])); + } + } + const numDims = allDims.length; // Number of unique dimensions. + const numOutDims = outputString.length; // Number of output dimensions. + const summedDims = []; // Dimensions being summed over. + for (let i = numOutDims; i < numDims; ++i) { + summedDims.push(i); + } + return { allDims, summedDims, idDims }; + } + /** + * Get the permutation for a given input tensor. + * + * @param nDims Total number of dimension of all tensors involved in the einsum + * operation. + * @param idDims Dimension indices involve in the tensor in question. + * @returns An object consisting of the following fields: + * - permutationIndices: Indices to permute the axes of the tensor with. + * - expandDims: Indices to the dimension that need to be expanded from the + * tensor after permutation. + */ + function getEinsumPermutation(nDims, idDims) { + let permutationIndices = new Array(nDims); + permutationIndices.fill(-1); + for (let i = 0; i < idDims.length; ++i) { + permutationIndices[idDims[i]] = i; + } + const expandDims = []; + for (let i = 0; i < nDims; ++i) { + if (permutationIndices[i] === -1) { + expandDims.push(i); + } + } + permutationIndices = permutationIndices.filter(d => d !== -1); + return { permutationIndices, expandDims }; + } + /** + * Checks that the dimension sizes from different input tensors match the + * equation. + */ + function checkEinsumDimSizes(nDims, idDims, tensors) { + const dimSizes = new Array(nDims); + for (let i = 0; i < tensors.length; ++i) { + const shape = tensors[i].shape; + for (let j = 0; j < idDims[i].length; ++j) { + if (dimSizes[idDims[i][j]] === undefined) { + dimSizes[idDims[i][j]] = shape[j]; + } + else { + assert$1(dimSizes[idDims[i][j]] === shape[j], () => `Expected dimension ${dimSizes[idDims[i][j]]} at axis ${j} ` + + `of input shaped ${JSON.stringify(shape)}, ` + + `but got dimension ${shape[j]}`); + } + } + } + } + /** + * Gets path of computation for einsum. + * + * @param summedDims indices to the dimensions being summed over. + * @param idDims A look up table for the dimensions present in each input + * tensor.Each constituent array contains indices for the dimensions in the + * corresponding input tensor. + * + * @return A map with two fields: + * - path: The path of computation, with each element indicating the dimension + * being summed over after the element-wise multiplication in that step. + * - steps: With the same length as `path`. Each element contains the indices + * to the input tensors being used for element-wise multiplication in the + * corresponding step. + */ + function getEinsumComputePath(summedDims, idDims) { + const path = summedDims; + const steps = []; + let nSteps = 0; + if (summedDims.length === 0) { + // Einsum that involes no summing: e.g., transpose and outer product. + path.push(-1); + } + nSteps = summedDims.length + 1; + for (let i = 0; i < nSteps; ++i) { + steps.push([]); + } + const computedTermIndices = []; + for (let i = 0; i < path.length; ++i) { + const summedDim = path[i]; + const termIndices = findTermsWithDim(idDims, summedDim); + for (const termIndex of termIndices) { + if (computedTermIndices.indexOf(termIndex) === -1) { + steps[i].push(termIndex); + computedTermIndices.push(termIndex); + } + } + } + return { path, steps }; + } + /** Determines if an axes permutation is the identity permutation. */ + function isIdentityPermutation(perm) { + return perm.every((dim, index) => dim === index); + } + function findTermsWithDim(idDims, dim) { + const termIndices = []; + for (let i = 0; i < idDims.length; ++i) { + if (idDims[i].length === 0 || idDims[i].indexOf(dim) !== -1 || dim === -1) { + termIndices.push(i); + } + } + return termIndices; + } + + /** + * Prepare the split size array. When the input is a number, the axis is evenly + * divided among the split size. When the input contains the negative value, the + * rest of the axis is allocated toward that. + */ + function prepareSplitSize(x, numOrSizeSplits, axis = 0) { + let splitSizes = []; + if (typeof (numOrSizeSplits) === 'number') { + assert$1(x.shape[axis] % numOrSizeSplits === 0, () => 'Number of splits must evenly divide the axis.'); + splitSizes = + new Array(numOrSizeSplits).fill(x.shape[axis] / numOrSizeSplits); + } + else { + const numOfNegs = numOrSizeSplits.reduce((count, value) => { + if (value === -1) { + count += 1; + } + return count; + }, 0); + assert$1(numOfNegs <= 1, () => 'There should be only one negative value in split array.'); + const negIndex = numOrSizeSplits.indexOf(-1); + // Allow the number of split array to be -1, which indicates the rest + // of dimension is allocated to that split. + if (negIndex !== -1) { + const total = numOrSizeSplits.reduce((a, b) => b > 0 ? a + b : a); + numOrSizeSplits[negIndex] = x.shape[axis] - total; + } + assert$1(x.shape[axis] === numOrSizeSplits.reduce((a, b) => a + b), () => 'The sum of sizes must match the size of the axis dimension.'); + splitSizes = numOrSizeSplits; + } + return splitSizes; + } + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Generates sparse fill empty rows indices, dense shape mismatch error message. + * + * @param indicesLength The first dimension of indices. + */ + function getSparseFillEmptyRowsIndicesDenseShapeMismatch(indicesLength) { + return `Received SparseTensor with denseShape[0] = 0 but + indices.shape[0] = ${indicesLength}`; + } + /** + * Generates sparse fill empty rows negative index error message. + * + * @param index The index with a negative value. + * @param value The negative value. + */ + function getSparseFillEmptyRowsNegativeIndexErrorMessage(index, value) { + return `indices(${index}, 0) is invalid: ${value} < 0`; + } + /** + * Generates sparse fill empty rows out of range index error message. + * + * @param index The index with an out of range value. + * @param value The out of range value. + * @param limit The upper limit for indices. + */ + function getSparseFillEmptyRowsOutOfRangeIndexErrorMessage(index, value, limit) { + return `indices(${index}, 0) is invalid: ${value} >= ${limit}`; + } + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Generates sparse reshape multiple negative 1 output dimension error message. + * + * @param dim1 The first dimension with a negative 1 value. + * @param dim2 The second dimension with a negative 1 value. + */ + function getSparseReshapeMultipleNegativeOneOutputDimErrorMessage(dim1, dim2) { + return `only one output dimension may be -1, not both ${dim1} and ${dim2}`; + } + /** + * Generates sparse reshape negative output dimension error message. + * + * @param dim The dimension with a negative value. + * @param value The negative value. + */ + function getSparseReshapeNegativeOutputDimErrorMessage(dim, value) { + return `size ${dim} must be non-negative, not ${value}`; + } + /** + * Generates sparse reshape empty tensor zero output dimension error message. + * + */ + function getSparseReshapeEmptyTensorZeroOutputDimErrorMessage() { + return 'reshape cannot infer the missing input size for an empty tensor ' + + 'unless all specified input sizes are non-zero'; + } + /** + * Generates sparse reshape input output multiple mismatch error message. + * + * @param inputShape the input shape. + * @param outputShape the requested output shape. + */ + function getSparseReshapeInputOutputMultipleErrorMessage(inputShape, outputShape) { + const inputSize = sizeFromShape(inputShape); + const outputSize = sizeFromShape(outputShape); + return `Input to reshape is a SparseTensor with ${inputSize} + dense values, but the requested shape requires a multiple of ${outputSize}. inputShape=${inputShape} outputShape= ${outputShape}`; + } + /** + * Generates sparse reshape input output inequality error message. + * + * @param inputShape the input shape. + * @param outputShape the requested output shape. + */ + function getSparseReshapeInputOutputMismatchErrorMessage(inputShape, outputShape) { + const inputSize = sizeFromShape(inputShape); + const outputSize = sizeFromShape(outputShape); + return `Input to reshape is a tensor with ${inputSize} dense values, but the requested shape has ${outputSize}. inputShape=${inputShape} outputShape=${outputShape}`; + } + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Generates sparse segment reduction negative segment ids error message. + * + */ + function getSparseSegmentReductionNegativeSegmentIdsErrorMessage() { + return `segment ids must be >= 0`; + } + /** + * Generates sparse segment reduction non increasing segment ids error message. + * + */ + function getSparseSegmentReductionNonIncreasingSegmentIdsErrorMessage() { + return `segment ids are not increasing`; + } + /** + * Generates sparse segment reduction segment id out of range error message. + * + * @param segmentId The segment id index that is out of range. + * @param outputRows Upper bound of valid segment id values. + */ + function getSparseSegmentReductionSegmentIdOutOfRangeErrorMessage(segmentId, outputRows) { + return `Segment id ${segmentId} out of range [0, ${outputRows}), possibly because segmentIds input is not sorted.`; + } + /** + * Generates sparse segment reduction input indice out of range error message. + * + * @param index The index that holds the out of range value. + * @param indexValue The value that is out of range. + * @param inputRows Upper bound of valid index values. + */ + function getSparseSegmentReductionIndicesOutOfRangeErrorMessage(index, indexValue, inputRows) { + return `Bad: indices[${index}] == ${indexValue} out of range [0, ${inputRows})`; + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function segOpComputeOptimalWindowSize(inSize, numSegments) { + let done = false; + let res; + if (inSize <= PARALLELIZE_THRESHOLD) { + res = inSize; + done = true; + } + else { + res = nearestDivisor(inSize, Math.floor(Math.sqrt(inSize))); + } + while (!done) { + if (res > numSegments || res === inSize) { + done = true; + } + else { + res = nearestDivisor(inSize, res + 1); + } + } + return res; + } + function computeOutShape(aShape, axis, numSegments) { + const outShape = []; + const rank = aShape.length; + for (let dim = 0; dim < rank; dim++) { + if (dim !== axis) { + outShape.push(aShape[dim]); + } + else { + outShape.push(numSegments); + } + } + return outShape; + } + function collectGatherOpShapeInfo(x, indices, axis, batchDims) { + const indicesRank = indices.shape.length; + const xRank = x.shape.length; + if (batchDims !== 0) { + if (batchDims < -indicesRank || batchDims > indicesRank) { + throw new Error(`Expect batchDims in the range of [-${indicesRank}, ${indicesRank}], but got ${batchDims}`); + } + } + if (batchDims < 0) { + batchDims += indicesRank; + } + if (batchDims > xRank) { + throw new Error(`batchDims (${batchDims}) must be less than rank(x) ( + ${xRank}).`); + } + if (axis < batchDims) { + throw new Error(`batchDims (${batchDims}) must be less than or equal to axis (${axis}).`); + } + for (let i = 0; i < batchDims; ++i) { + if (x.shape[i] !== indices.shape[i]) { + throw new Error(`x.shape[${i}]: ${x.shape[i]} should be equal to indices.shape[${i}]: ${indices.shape[i]}.`); + } + } + const dimSize = x.shape[axis]; + const outputShape = []; + let batchSize = 1; + let outerSize = 1; + let sliceSize = 1; + for (let i = 0; i < batchDims; ++i) { + outputShape.push(x.shape[i]); + batchSize *= x.shape[i]; + } + for (let i = batchDims; i < axis; i++) { + outputShape.push(x.shape[i]); + outerSize *= x.shape[i]; + } + for (let i = batchDims; i < indicesRank; i++) { + outputShape.push(indices.shape[i]); + } + for (let i = axis + 1; i < xRank; i++) { + outputShape.push(x.shape[i]); + sliceSize *= x.shape[i]; + } + return { batchSize, sliceSize, outerSize, dimSize, outputShape }; + } + + var segment_util = /*#__PURE__*/Object.freeze({ + __proto__: null, + collectGatherOpShapeInfo: collectGatherOpShapeInfo, + computeOutShape: computeOutShape, + segOpComputeOptimalWindowSize: segOpComputeOptimalWindowSize + }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function fromUint8ToStringArray(vals) { + try { + // Decode the bytes into string. + return vals.map(val => decodeString(val)); + } + catch (err) { + throw new Error(`Failed to decode encoded string bytes into utf-8, error: ${err}`); + } + } + function fromStringArrayToUint8(strings) { + return strings.map(s => encodeString(s)); + } + + var backend_util = /*#__PURE__*/Object.freeze({ + __proto__: null, + ERF_A1: ERF_A1, + ERF_A2: ERF_A2, + ERF_A3: ERF_A3, + ERF_A4: ERF_A4, + ERF_A5: ERF_A5, + ERF_P: ERF_P, + PARALLELIZE_THRESHOLD: PARALLELIZE_THRESHOLD, + get RowPartitionType () { return RowPartitionType$1; }, + SELU_SCALE: SELU_SCALE, + SELU_SCALEALPHA: SELU_SCALEALPHA, + applyActivation: applyActivation$1, + assertAndGetBroadcastShape: assertAndGetBroadcastShape, + assertAxesAreInnerMostDims: assertAxesAreInnerMostDims, + assertParamsConsistent: assertParamsConsistent, + assignToTypedArray: assignToTypedArray, + axesAreInnerMostDims: axesAreInnerMostDims, + calculateShapes: calculateShapes, + checkEinsumDimSizes: checkEinsumDimSizes, + checkPadOnDimRoundingMode: checkPadOnDimRoundingMode, + combineLocations: combineLocations, + combineRaggedTensorToTensorShapes: combineRaggedTensorToTensorShapes, + complexWithEvenIndex: complexWithEvenIndex, + complexWithOddIndex: complexWithOddIndex, + computeConv2DInfo: computeConv2DInfo, + computeConv3DInfo: computeConv3DInfo, + computeDefaultPad: computeDefaultPad, + computeDilation2DInfo: computeDilation2DInfo, + computeOptimalWindowSize: computeOptimalWindowSize, + computeOutAndReduceShapes: computeOutAndReduceShapes, + computeOutShape: computeOutShape$1, + computePool2DInfo: computePool2DInfo, + computePool3DInfo: computePool3DInfo, + convertConv2DDataFormat: convertConv2DDataFormat, + decodeEinsumEquation: decodeEinsumEquation, + eitherStridesOrDilationsAreOne: eitherStridesOrDilationsAreOne, + expandShapeToKeepDim: expandShapeToKeepDim, + exponent: exponent, + exponents: exponents, + fromStringArrayToUint8: fromStringArrayToUint8, + fromUint8ToStringArray: fromUint8ToStringArray, + getAxesPermutation: getAxesPermutation, + getBroadcastDims: getBroadcastDims$1, + getComplexWithIndex: getComplexWithIndex, + getEinsumComputePath: getEinsumComputePath, + getEinsumPermutation: getEinsumPermutation, + getFusedBiasGradient: getFusedBiasGradient, + getFusedDyActivation: getFusedDyActivation, + getImageCenter: getImageCenter, + getInnerMostAxes: getInnerMostAxes, + getPermuted: getPermuted, + getRaggedRank: getRaggedRank, + getReductionAxes: getReductionAxes, + getReshaped: getReshaped, + getReshapedPermuted: getReshapedPermuted, + getRowPartitionTypesHelper: getRowPartitionTypesHelper, + getSliceBeginCoords: getSliceBeginCoords, + getSliceSize: getSliceSize, + getSparseFillEmptyRowsIndicesDenseShapeMismatch: getSparseFillEmptyRowsIndicesDenseShapeMismatch, + getSparseFillEmptyRowsNegativeIndexErrorMessage: getSparseFillEmptyRowsNegativeIndexErrorMessage, + getSparseFillEmptyRowsOutOfRangeIndexErrorMessage: getSparseFillEmptyRowsOutOfRangeIndexErrorMessage, + getSparseReshapeEmptyTensorZeroOutputDimErrorMessage: getSparseReshapeEmptyTensorZeroOutputDimErrorMessage, + getSparseReshapeInputOutputMismatchErrorMessage: getSparseReshapeInputOutputMismatchErrorMessage, + getSparseReshapeInputOutputMultipleErrorMessage: getSparseReshapeInputOutputMultipleErrorMessage, + getSparseReshapeMultipleNegativeOneOutputDimErrorMessage: getSparseReshapeMultipleNegativeOneOutputDimErrorMessage, + getSparseReshapeNegativeOutputDimErrorMessage: getSparseReshapeNegativeOutputDimErrorMessage, + getSparseSegmentReductionIndicesOutOfRangeErrorMessage: getSparseSegmentReductionIndicesOutOfRangeErrorMessage, + getSparseSegmentReductionNegativeSegmentIdsErrorMessage: getSparseSegmentReductionNegativeSegmentIdsErrorMessage, + getSparseSegmentReductionNonIncreasingSegmentIdsErrorMessage: getSparseSegmentReductionNonIncreasingSegmentIdsErrorMessage, + getSparseSegmentReductionSegmentIdOutOfRangeErrorMessage: getSparseSegmentReductionSegmentIdOutOfRangeErrorMessage, + getUndoAxesPermutation: getUndoAxesPermutation, + isIdentityPermutation: isIdentityPermutation, + log: log$3, + mergeRealAndImagArrays: mergeRealAndImagArrays, + prepareAndValidate: prepareAndValidate, + prepareSplitSize: prepareSplitSize, + segment_util: segment_util, + shouldFuse: shouldFuse, + slice_util: slice_util, + splitRealAndImagArrays: splitRealAndImagArrays, + stridesOrDilationsArePositive: stridesOrDilationsArePositive, + tupleValuesAreOne: tupleValuesAreOne, + upcastType: upcastType, + validateDefaultValueShape: validateDefaultValueShape, + validateInput: validateInput$1, + validateUpdateShape: validateUpdateShape, + warn: warn + }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + + var kernel_impls = /*#__PURE__*/Object.freeze({ + __proto__: null, + nonMaxSuppressionV3Impl: nonMaxSuppressionV3Impl$2, + nonMaxSuppressionV4Impl: nonMaxSuppressionV4Impl$2, + nonMaxSuppressionV5Impl: nonMaxSuppressionV5Impl$2, + whereImpl: whereImpl$2 + }); + + /** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + registerOptimizers(); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const absGradConfig = { + kernelName: Abs, + inputsToSave: ['x'], + gradFunc: (dy, saved) => { + const [x] = saved; + return { x: () => mul(dy, step$2(cast$3(x, 'float32'), -1)) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const acosGradConfig = { + kernelName: Acos, + inputsToSave: ['x'], + gradFunc: (dy, saved) => { + const [x] = saved; + return { + x: () => { + const a = square$2(cast$3(x, 'float32')); + const b = sqrt$2(sub$2(scalar(1), a)); + return neg$2(div$1(dy, b)); + } + }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const acoshGradConfig = { + kernelName: Acosh, + inputsToSave: ['x'], + gradFunc: (dy, saved) => { + const [x] = saved; + return { + x: () => { + const a = sqrt$2(sub$2(square$2(cast$3(x, 'float32')), 1)); + return div$1(dy, a); + } + }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const addGradConfig = { + kernelName: Add$1, + inputsToSave: ['a', 'b'], + gradFunc: (dy, saved) => { + const [a, b] = saved; + const outShape = assertAndGetBroadcastShape(a.shape, b.shape); + const derA = () => { + let res = dy; + const reduceAxes = getReductionAxes(a.shape, outShape); + if (reduceAxes.length > 0) { + res = sum$3(res, reduceAxes); + } + return reshape$3(res, a.shape); + }; + const derB = () => { + let res = dy; + const reduceAxes = getReductionAxes(b.shape, outShape); + if (reduceAxes.length > 0) { + res = sum$3(res, reduceAxes); + } + return reshape$3(res, b.shape); + }; + return { a: derA, b: derB }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const addNGradConfig = { + kernelName: AddN, + saveAllInputs: true, + gradFunc: (dy, saved) => { + const ders = {}; + saved.forEach((_, i) => { + ders[i] = () => dy.clone(); + }); + return ders; + } + }; + + /** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const argMaxGradConfig = { + kernelName: ArgMax, + inputsToSave: ['x'], + gradFunc: (dy, saved) => { + const [x] = saved; + return { x: () => zerosLike$3(x) }; + } + }; + + /** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const argMinGradConfig = { + kernelName: ArgMin, + inputsToSave: ['x'], + gradFunc: (dy, saved) => { + const [x] = saved; + return { x: () => zerosLike$3(x) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const asinGradConfig = { + kernelName: Asin, + inputsToSave: ['x'], + gradFunc: (dy, saved) => { + const [x] = saved; + return { x: () => div$1(dy, sqrt$2(sub$2(scalar(1), square$2(cast$3(x, 'float32'))))) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const asinhGradConfig = { + kernelName: Asinh, + inputsToSave: ['x'], + gradFunc: (dy, saved) => { + const [x] = saved; + return { + x: () => { + const a = sqrt$2(add$3(scalar(1), square$2(cast$3(x, 'float32')))); + return div$1(dy, a); + } + }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const atan2GradConfig = { + kernelName: Atan2, + inputsToSave: ['a', 'b'], + gradFunc: (dy, saved) => { + const [a, b] = saved; + const outShape = assertAndGetBroadcastShape(a.shape, b.shape); + const derA = () => { + const d = add$3(square$2(a), square$2(b)); + let res = mul(dy, div$1(b, d)); + const reduceAxes = getReductionAxes(a.shape, outShape); + if (reduceAxes.length > 0) { + res = sum$3(res, reduceAxes); + } + return reshape$3(res, a.shape); + }; + const derB = () => { + const d = add$3(square$2(a), square$2(b)); + let res = neg$2(mul(dy, div$1(a, d))); + const reduceAxes = getReductionAxes(b.shape, outShape); + if (reduceAxes.length > 0) { + res = sum$3(res, reduceAxes); + } + return reshape$3(res, b.shape); + }; + return { a: derA, b: derB }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const atanGradConfig = { + kernelName: Atan, + inputsToSave: ['x'], + gradFunc: (dy, saved) => { + const [x] = saved; + return { x: () => div$1(dy, add$3(square$2(cast$3(x, 'float32')), 1)) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const atanhGradConfig = { + kernelName: Atanh, + inputsToSave: ['x'], + gradFunc: (dy, saved) => { + const [x] = saved; + return { x: () => div$1(dy, sub$2(scalar(1), square$2(cast$3(x, 'float32')))) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the backprop of a 3d avg pool. + * + * @param dy The dy error, of rank 5 of shape + * [batchSize, depth, height, width, channels]. + * assumed. + * @param input The original input image, of rank 5 or rank4 of shape + * [batchSize, depth, height, width, channels]. + * @param filterSize The filter size: + * `[filterDepth, filterHeight, filterWidth]`. + * `filterSize` is a single number, + * then `filterDepth == filterHeight == filterWidth`. + * @param strides The strides of the pooling: + * `[strideDepth, strideHeight, strideWidth]`. If + * `strides` is a single number, then `strideHeight == strideWidth`. + * @param pad A string from: 'same', 'valid'. The type of padding algorithm + * used in the forward prop of the op. + * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is + * provided, it will default to truncate. + */ + function avgPool3dGrad_(dy, input, filterSize, strides, pad, dimRoundingMode) { + const $dy = convertToTensor(dy, 'dy', 'avgPool3dGrad'); + const $input = convertToTensor(input, 'input', 'avgPool3dGrad'); + let dy5D = $dy; + let input5D = $input; + let reshapedTo5D = false; + if ($input.rank === 4) { + reshapedTo5D = true; + dy5D = reshape$3($dy, [1, $dy.shape[0], $dy.shape[1], $dy.shape[2], $dy.shape[3]]); + input5D = reshape$3($input, [ + 1, $input.shape[0], $input.shape[1], $input.shape[2], $input.shape[3] + ]); + } + assert$1(dy5D.rank === 5, () => `Error in avgPool3dGrad: dy must be rank 5 but got rank ` + + `${dy5D.rank}.`); + assert$1(input5D.rank === 5, () => `Error in avgPool3dGrad: input must be rank 5 but got rank ` + + `${input5D.rank}.`); + checkPadOnDimRoundingMode('avgPool3dGrad', pad, dimRoundingMode); + const inputs = { dy: dy5D, input: input5D }; + const attrs = { filterSize, strides, pad, dimRoundingMode }; + // tslint:disable-next-line: no-unnecessary-type-assertion + const res = ENGINE.runKernel(AvgPool3DGrad, inputs, attrs); + if (reshapedTo5D) { + return reshape$3(res, [res.shape[1], res.shape[2], res.shape[3], res.shape[4]]); + } + return res; + } + const avgPool3dGrad = /* @__PURE__ */ op({ avgPool3dGrad_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const avgPool3DGradConfig$2 = { + kernelName: AvgPool3D, + inputsToSave: ['x'], + gradFunc: (dy, saved, attrs) => { + const [x] = saved; + const { filterSize, strides, pad, dimRoundingMode } = attrs; + return { + x: () => avgPool3dGrad(dy, x, filterSize, strides, pad, dimRoundingMode) + }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the backprop of an 2D avg pool. + * + * @param dy The dy error, of rank 4 or rank 3 of shape + * [batchSize, height, width, channels]. If rank 3, batch of 1 is + * assumed. + * @param input The input image, of rank 4 or rank 3 of shape + * [batchSize, height, width, channels]. If rank 3, batch of 1 is + * assumed. + * @param filterSize The filter size: `[filterHeight, filterWidth]`. If + * `filterSize` is a single number, then `filterHeight == filterWidth`. + * @param strides The strides of the pooling: `[strideHeight, strideWidth]`. If + * `strides` is a single number, then `strideHeight == strideWidth`. + * @param pad The type of padding algorithm used in the forward prop of the op. + * 'same', 'valid', for more info, see this guide: + * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution]( + * https://www.tensorflow.org/api_docs/python/tf/nn/convolution) + */ + function avgPoolGrad_(dy, input, filterSize, strides, pad) { + const $dy = convertToTensor(dy, 'dy', 'avgPoolGrad'); + const $input = convertToTensor(input, 'input', 'avgPoolGrad'); + assert$1($input.rank === $dy.rank, () => `Rank of input (${$input.rank}) does not match rank of dy (${$dy.rank})`); + let input4D = $input; + let dy4D = $dy; + let reshapedTo4D = false; + if ($input.rank === 3) { + reshapedTo4D = true; + input4D = + reshape$3($input, [1, $input.shape[0], $input.shape[1], $input.shape[2]]); + dy4D = reshape$3($dy, [1, $dy.shape[0], $dy.shape[1], $dy.shape[2]]); + } + assert$1(dy4D.rank === 4, () => `Error in avgPoolGrad: dy must be rank 4 but got rank ` + + `${dy4D.rank}.`); + assert$1(input4D.rank === 4, () => `Error in avgPoolGrad: input must be rank 4 but got rank ` + + `${input4D.rank}.`); + const inputs = { dy: dy4D, input: input4D }; + const attrs = { filterSize, strides, pad }; + // tslint:disable-next-line: no-unnecessary-type-assertion + const res = ENGINE.runKernel(AvgPoolGrad, inputs, attrs); + if (reshapedTo4D) { + return reshape$3(res, [res.shape[1], res.shape[2], res.shape[3]]); + } + return res; + } + const avgPoolGrad$2 = /* @__PURE__ */ op({ avgPoolGrad_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const avgPoolGradConfig$2 = { + kernelName: AvgPool, + inputsToSave: ['x'], + gradFunc: (dy, saved, attrs) => { + const [x] = saved; + const { filterSize, strides, pad } = attrs; + return { x: () => avgPoolGrad$2(dy, x, filterSize, strides, pad) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const batchMatMulGradConfig = { + kernelName: BatchMatMul, + inputsToSave: ['a', 'b'], + gradFunc: (dy, saved, attrs) => { + const [a, b] = saved; + const { transposeA, transposeB } = attrs; + if (!transposeA && !transposeB) { + return { + a: () => matMul$1(dy, b, false, true), + b: () => matMul$1(a, dy, true, false) + }; + } + else if (!transposeA && transposeB) { + return { + a: () => matMul$1(dy, b, false, false), + b: () => matMul$1(dy, a, true, false) + }; + } + else if (transposeA && !transposeB) { + return { + a: () => matMul$1(b, dy, false, true), + b: () => matMul$1(a, dy, false, false) + }; + } + else { + return { + a: () => matMul$1(b, dy, true, true), + b: () => matMul$1(dy, a, true, true) + }; + } + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const batchToSpaceNDGradConfig = { + kernelName: BatchToSpaceND, + gradFunc: (dy, saved, attrs) => { + const { blockShape, crops } = attrs; + return { x: () => spaceToBatchND$2(dy, blockShape, crops) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const broadcastToGradConfig = { + kernelName: BroadcastTo, + gradFunc: (dy, saved, attrs) => { + const broadCastToAttrs = attrs; + const inputShape = broadCastToAttrs.inputShape; + const outputShape = broadCastToAttrs.shape; + const reps = Array.from(outputShape); + for (let i = inputShape.length - 1; i >= 0; i--) { + if (inputShape[i] === outputShape[i]) { + reps[i] = 1; + } + else if (inputShape[i] !== 1) { + throw new Error(`broadcastTo(): [${inputShape}] cannot be broadcast to [${outputShape}].`); + } + } + const axes = []; + for (let i = 0; i < reps.length; i++) { + if (reps[i] > 1) { + axes.push(i); + } + } + return { x: () => sum$3(dy, axes, true /* keepDims */) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const castGradConfig = { + kernelName: Cast, + gradFunc: (dy) => { + return { x: () => dy.clone() }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const ceilGradConfig = { + kernelName: Ceil, + gradFunc: (dy) => { + // TODO(manrajgrover): Return null for gradients when backprop supports it. + return { x: () => zerosLike$3(dy) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const clipByValueGradConfig = { + kernelName: ClipByValue, + inputsToSave: ['x'], + gradFunc: (dy, saved, attrs) => { + const [x] = saved; + const { clipValueMin, clipValueMax } = attrs; + return { + x: () => where(logicalAnd$2(greaterEqual$2(x, clipValueMin), lessEqual$2(x, clipValueMax)), dy, zerosLike$3(dy)), + }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const complexAbsGradConfig = { + kernelName: ComplexAbs, + inputsToSave: ['x'], + gradFunc: absGradConfig.gradFunc, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const concatGradConfig = { + kernelName: Concat, + saveAllInputs: true, + gradFunc: (dy, saved, attrs) => { + const shapes = saved.map(t => t.shape); + const { axis } = attrs; + const $axis = parseAxisParam(axis, saved[0].shape)[0]; + const sizeSplits = shapes.map(s => s[$axis]); + const derTensors = split$3(dy, sizeSplits, $axis); + return derTensors.map(t => () => t); + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const conv2DGradConfig = { + kernelName: Conv2D$1, + inputsToSave: ['x', 'filter'], + gradFunc: (dy, saved, attrs) => { + const [x4D, $filter] = saved; + const { dilations, strides, pad, dataFormat } = attrs; + assert$1(tupleValuesAreOne(dilations), () => 'Error in gradient of conv2D: dilation rates greater than 1 ' + + `are not yet supported in gradients. Got dilations '${dilations}'`); + return { + x: () => conv2DBackpropInput$2(x4D.shape, dy, $filter, strides, pad, dataFormat), + filter: () => conv2DBackpropFilter$2(x4D, dy, $filter.shape, strides, pad, dataFormat) + }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const conv2DBackpropInputGradConfig = { + kernelName: Conv2DBackpropInput, + inputsToSave: ['dy', 'filter'], + gradFunc: (ddx, saved, attrs) => { + const [dy, filter] = saved; + const { strides, pad, dataFormat, dimRoundingMode } = attrs; + return { + dy: () => conv2d$4(ddx, filter, strides, pad, dataFormat, 1 /* dilations */, dimRoundingMode), + filter: () => conv2DBackpropFilter$2(ddx, dy, filter.shape, strides, pad, dataFormat, dimRoundingMode) + }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the derivative of the filter of a 3D convolution. + * + * @param x The input tensor, of rank 5 or rank 4 of shape + * [batch, depth, height, width, inChannels]. If rank 4, batch of 1 is + * assumed. + * @param dy The dy image, of rank 5 or rank 4, of shape + * [batch, depth, height, width, outDepth]. If rank 4, batch of 1 is + * assumed. + * @param filterShape The shape of the filter, length 5, + * [filterDepth, filterHeight, filterWidth, inDepth, outDepth]. + * @param strides The strides of the convolution: [strideDepth, strideHeight, + * strideWidth]. + * @param pad A string from: 'same', 'valid'. The type of padding algorithm + * used in the forward prop of the op. + */ + function conv3DBackpropFilter_(x, dy, filterShape, strides, pad) { + let x5D = x; + if (x.rank === 4) { + x5D = reshape$3(x, [1, x.shape[0], x.shape[1], x.shape[2], x.shape[3]]); + } + let dy5D = dy; + if (dy5D.rank === 4) { + dy5D = reshape$3(dy, [1, dy.shape[0], dy.shape[1], dy.shape[2], dy.shape[3]]); + } + assert$1(x5D.rank === 5, () => `Error in conv3dDerFilter: input must be rank 5, but got shape ` + + `${x5D.shape}.`); + assert$1(dy5D.rank === 5, () => `Error in conv3dDerFilter: dy must be rank 5, but got shape ` + + `${dy5D.shape}.`); + assert$1(filterShape.length === 5, () => `Error in conv3dDerFilter: filterShape must be length 5, but got ` + + `${filterShape}.`); + assert$1(x5D.shape[4] === filterShape[3], () => `Error in conv3dDerFilter: depth of input ${x5D.shape[4]}) must ` + + `match input depth in filter (${filterShape[3]}.`); + assert$1(dy5D.shape[4] === filterShape[4], () => `Error in conv3dDerFilter: depth of dy (${dy5D.shape[4]}) must ` + + `match output depth for filter (${filterShape[4]}).`); + const inputs = { x: x5D, dy: dy5D }; + const attrs = { strides, pad, filterShape }; + // tslint:disable-next-line: no-unnecessary-type-assertion + return ENGINE.runKernel(Conv3DBackpropFilterV2, inputs, attrs); + } + const conv3DBackpropFilter = /* @__PURE__ */ op({ conv3DBackpropFilter_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const conv3DGradConfig = { + kernelName: Conv3D$1, + inputsToSave: ['x', 'filter'], + gradFunc: (dy, saved, attrs) => { + const { dilations, strides, pad } = attrs; + assert$1(tupleValuesAreOne(dilations), () => 'Error in gradient of conv3D: dilation rates greater than 1 are ' + + `not yet supported in gradients. Got dilations '${dilations}'`); + const [x5D, $filter] = saved; + return { + x: () => conv3DBackpropInput$1(x5D.shape, dy, $filter, strides, pad), + filter: () => conv3DBackpropFilter(x5D, dy, $filter.shape, strides, pad) + }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const cosGradConfig = { + kernelName: Cos, + inputsToSave: ['x'], + gradFunc: (dy, saved) => { + const [x] = saved; + return { x: () => mul(neg$2(sin$2(cast$3(x, 'float32'))), dy) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const coshGradConfig = { + kernelName: Cosh, + inputsToSave: ['x'], + gradFunc: (dy, saved) => { + const [x] = saved; + return { x: () => mul(sinh$2(cast$3(x, 'float32')), dy) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const cumsumGradConfig = { + kernelName: Cumsum, + inputsToSave: ['x'], + gradFunc: (dy, saved, attrs) => { + const [x] = saved; + const { axis, exclusive, reverse } = attrs; + return { + x: () => { + const permutation = getAxesPermutation([axis], x.rank); + let out = cumsum$2(dy, axis, exclusive, !reverse); + if (permutation != null) { + out = transpose$2(out, permutation); + } + return out; + } + }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const depthwiseConv2dNativeGradConfig = { + kernelName: DepthwiseConv2dNative, + inputsToSave: ['x', 'filter'], + gradFunc: (dy, saved, attrs) => { + const { dilations, strides, pad, dimRoundingMode } = attrs; + const $dilations = dilations == null ? [1, 1] : dilations; + assert$1(tupleValuesAreOne($dilations), () => 'Error in gradient of depthwiseConv2dNative: dilation rates ' + + `greater than 1 are not yet supported. Got dilations ` + + `'${$dilations}'`); + const [x, filter] = saved; + assert$1(x.rank === 4, () => `Error in gradient of depthwiseConv2dNative: input must be ` + + `rank 4, but got rank ${x.rank}.`); + assert$1(filter.rank === 4, () => `Error in gradient of depthwiseConv2dNative: filter must be ` + + `rank 4, but got rank ${filter.rank}.`); + assert$1(x.shape[3] === filter.shape[2], () => `Error in gradient of depthwiseConv2d: number of input ` + + `channels (${x.shape[3]}) must match the inChannels dimension ` + + `in filter ${filter.shape[2]}.`); + assert$1(eitherStridesOrDilationsAreOne(strides, $dilations), () => 'Error in gradient of depthwiseConv2d: Either strides or ' + + `dilations must be 1. Got strides ${strides} and dilations ` + + `'${$dilations}'.`); + checkPadOnDimRoundingMode('depthwiseConv2d', pad, dimRoundingMode); + return { + x: () => depthwiseConv2dNativeBackpropInput$2(x.shape, dy, filter, strides, pad, $dilations, dimRoundingMode), + filter: () => depthwiseConv2dNativeBackpropFilter$2(x, dy, filter.shape, strides, pad, $dilations, dimRoundingMode), + }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const dilation2dGradConfig = { + kernelName: Dilation2D, + inputsToSave: ['x', 'filter'], + gradFunc: (dy, saved, attrs) => { + const [x, filter] = saved; + const inputInputs = { x, filter, dy }; + const filterInputs = { x, filter, dy }; + return { + x: () => ENGINE.runKernel(Dilation2DBackpropInput, inputInputs, attrs), + filter: () => ENGINE.runKernel(Dilation2DBackpropFilter, filterInputs, attrs) + }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const eluGradConfig$2 = { + kernelName: Elu$1, + outputsToSave: [true], + gradFunc: (dy, saved) => { + const [y] = saved; + const inputs = { dy, y }; + return { x: () => ENGINE.runKernel(EluGrad, inputs) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const erfGradConfig = { + kernelName: Erf, + inputsToSave: ['x'], + gradFunc: (dy, saved) => { + const [x] = saved; + const a = mul(exp$2(neg$2(square$2(x))), 2 / Math.sqrt(Math.PI)); + return { x: () => mul(dy, a) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const expGradConfig = { + kernelName: Exp, + outputsToSave: [true], + gradFunc: (dy, saved) => { + const [y] = saved; + return { x: () => mul(dy, y) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const expandDimsGradConfig = { + kernelName: ExpandDims, + inputsToSave: ['input'], + gradFunc: (dy, saved) => { + const [input] = saved; + return { input: () => reshape$3(dy, input.shape) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const expm1GradConfig = { + kernelName: Expm1, + inputsToSave: ['x'], + gradFunc: (dy, saved) => { + const [x] = saved; + return { x: () => mul(dy, exp$2(x)) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const floorGradConfig = { + kernelName: Floor, + gradFunc: (dy) => { + return { x: () => zerosLike$3(dy) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const floorDivGradConfig = { + kernelName: FloorDiv, + inputsToSave: ['a', 'b'], + gradFunc: (dy, saved) => { + const [a, b] = saved; + const outShape = assertAndGetBroadcastShape(a.shape, b.shape); + const derA = () => { + const res = div$1(dy, cast$3(b, 'float32')); + const reduceAxes = getReductionAxes(a.shape, outShape); + if (reduceAxes.length > 0) { + return reshape$3(sum$3(res, reduceAxes), a.shape); + } + return res; + }; + const derB = () => { + let res = mul(dy, cast$3(a, 'float32')); + const reduceAxes = getReductionAxes(b.shape, outShape); + if (reduceAxes.length > 0) { + res = reshape$3(sum$3(res, reduceAxes), b.shape); + } + const tmp = square$2(b); + return neg$2(div$1(res, cast$3(tmp, 'float32'))); + }; + return { a: derA, b: derB }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const fusedBatchNormGradConfig = { + kernelName: FusedBatchNorm, + inputsToSave: ['x', 'mean', 'variance', 'scale'], + gradFunc: (dy, saved, attrs) => { + const { varianceEpsilon } = attrs; + const [x, mean, variance, scale] = saved; + const scaleValue = scale == null ? scalar(1) : scale; + const reductionAxes = getReductionAxes(mean.shape, x.shape); + const tileShape = []; + if (mean.rank === 1) { + for (let i = 0; i < x.shape.length - 1; ++i) { + tileShape.push(x.shape[i]); + } + tileShape.push(1); + } + const xMinusMean = sub$2(x, mean); + const dyTimesScaleValue = mul(dy, scaleValue); + const oneOverSqrtVariance = rsqrt$2(add$3(variance, scalar(varianceEpsilon))); + const minusHalfRCube = mul(mul(mul(oneOverSqrtVariance, oneOverSqrtVariance), oneOverSqrtVariance), scalar(-0.5)); + const derX = () => { + if (mean.rank === 1) { + return reshape$3(mul(mul(dy, tile$3(reshape$3(oneOverSqrtVariance, [1, 1, 1, mean.shape[0]]), tileShape)), scaleValue), x.shape); + } + else { + return reshape$3(mul(mul(dy, oneOverSqrtVariance), scaleValue), x.shape); + } + }; + const derMean = () => { + let meanDer = mul(mul(oneOverSqrtVariance, scalar(-1)), dyTimesScaleValue); + if (mean.rank === 1) { + meanDer = sum$3(meanDer, reductionAxes); + } + return reshape$3(meanDer, mean.shape); + }; + const derVariance = () => { + let varianceDer = mul(mul(minusHalfRCube, xMinusMean), dyTimesScaleValue); + if (mean.rank === 1) { + varianceDer = sum$3(varianceDer, reductionAxes); + } + return reshape$3(varianceDer, mean.shape); + }; + const derScale = () => { + const xMinusMean2TimesRsqrt = mul(xMinusMean, oneOverSqrtVariance); + let scaleDer = mul(dy, xMinusMean2TimesRsqrt); + if (mean.rank === 1) { + scaleDer = sum$3(scaleDer, reductionAxes); + } + return reshape$3(scaleDer, mean.shape); + }; + const derOffset = () => { + let offsetDer = dy; + if (mean.rank === 1) { + offsetDer = sum$3(offsetDer, reductionAxes); + } + return reshape$3(offsetDer, mean.shape); + }; + return { + x: derX, + mean: derMean, + variance: derVariance, + scale: derScale, + offset: derOffset + }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const gatherGradConfig = { + kernelName: GatherV2, + inputsToSave: ['x', 'indices'], + gradFunc: (dy, saved, attrs) => { + const [x, indices] = saved; + const { axis, batchDims } = attrs; + const parsedAxis = parseAxisParam(axis, x.shape)[0]; + const derXBatch = (x, indices, dy) => { + return () => { + const paramsShape = x.shape; + const indicesSize = indices.size; + const outerShape = paramsShape.slice(0, parsedAxis); + const outerDims = outerShape.length; + const innerShape = paramsShape.slice(axis, paramsShape.length).slice(1); + const innerDims = innerShape.length; + const outerAxesIndices = arrayRange(0, outerDims); + const innerAxesIndices = arrayRange(outerDims + 1, outerDims + 1 + innerDims); + const valuesShape = arrayConcat([outerShape, [indicesSize], + innerShape]); + const values = reshape$3(dy, valuesShape); + const reshapedIndices = reshape$3(indices, [indicesSize]); + const transposeDims = arrayConcat([[outerDims], outerAxesIndices, innerAxesIndices]); + const valuesTranspose = transpose$2(values, transposeDims); + let paramsGrad = unsortedSegmentSum$2(valuesTranspose, reshapedIndices, x.shape[parsedAxis]); + const invertTransposeDims = getUndoAxesPermutation(transposeDims); + paramsGrad = transpose$2(paramsGrad, invertTransposeDims); + return paramsGrad; + }; + }; + if (batchDims === 1) { + const batchSize = x.shape[0]; + const xBatch = x.split(batchSize, 0); + const derXBatched = () => { + const stacked = stack(xBatch.map((x, i) => { + return derXBatch(x, indices.slice(i, 1), dy.slice(i, 1))(); + })); + return stacked.reshape(x.shape); + }; + return { x: derXBatched, indices: () => indices }; + } + else { + return { x: derXBatch(x, indices, dy), indices: () => indices }; + } + } + }; + function arrayRange(start, stop) { + const result = []; + for (let i = start; i < stop; ++i) { + result.push(i); + } + return result; + } + function arrayConcat(arrays) { + const result = []; + for (let i = 0; i < arrays.length; ++i) { + for (let j = 0; j < arrays[i].length; ++j) { + result.push(arrays[i][j]); + } + } + return result; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const greaterEqualGradConfig = { + kernelName: GreaterEqual, + inputsToSave: ['a', 'b'], + gradFunc: (dy, saved) => { + const [a, b] = saved; + return { a: () => zerosLike$3(a), b: () => zerosLike$3(b) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const identityGradConfig = { + kernelName: Identity$1, + gradFunc: (dy) => { + return { x: () => cast$3(dy, 'float32') }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const isFiniteGradConfig = { + kernelName: IsFinite, + gradFunc: (dy) => { + // TODO(nsthorat): Let gradients be null for cases where we want to stop + // backpropgation. + return { x: () => zerosLike$3(dy) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const isInfGradConfig = { + kernelName: IsInf, + gradFunc: (dy) => { + // TODO(nsthorat): Let gradients be null for cases where we want to stop + // backpropgation. + return { x: () => zerosLike$3(dy) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const isNanGradConfig = { + kernelName: IsNan, + gradFunc: (dy) => { + // TODO(nsthorat): Let gradients be null for cases where we want to stop + // backpropgation. + return { x: () => zerosLike$3(dy) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const leakyReluGradConfig = { + kernelName: LeakyRelu, + inputsToSave: ['x'], + gradFunc: (dy, saved, attrs) => { + const [x] = saved; + const { alpha } = attrs; + const mask = greater$3(x, 0); + // Returns `gradients * (features > 0) + alpha * gradients * (features <= + // 0)`. + return { x: () => where(mask, dy, mul(dy, alpha)) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const log1pGradConfig = { + kernelName: Log1p, + inputsToSave: ['x'], + gradFunc: (dy, saved) => { + const [x] = saved; + return { x: () => div$1(dy, add$3(x, 1)) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const logGradConfig = { + kernelName: Log, + inputsToSave: ['x'], + gradFunc: (dy, saved) => { + const [x] = saved; + return { x: () => div$1(dy, cast$3(x, 'float32')) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const logSoftmaxGradConfig = { + kernelName: LogSoftmax$1, + inputsToSave: [], + outputsToSave: [true], + gradFunc: (dy, saved, attrs) => { + const [value] = saved; + const { axis } = attrs; + return { + logits: () => { + const keepDims = true; + const softmax = exp$2(value); + return sub$2(dy, mul(sum$3(dy, axis, keepDims), softmax)); + } + }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function localResponseNormalizationBackprop_(x, y, dy, depthRadius = 5, bias = 1, alpha = 1, beta = 0.5) { + const inputs = { x, y, dy }; + const attrs = { depthRadius, bias, alpha, beta }; + return ENGINE.runKernel(LRNGrad, inputs, attrs); + } + const localResponseNormalizationBackprop = op({ localResponseNormalizationBackprop_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const lrnGradConfig = { + kernelName: LRN, + inputsToSave: ['x'], + outputsToSave: [true], + gradFunc: (dy, saved, attrs) => { + const [x, y] = saved; + const { depthRadius, bias, alpha, beta } = attrs; + return { + x: () => localResponseNormalizationBackprop(x, y, dy, depthRadius, bias, alpha, beta) + }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Gradient helper function for the min and max operations. + */ + function gradForMinAndMax(dy, y, xOrig, origAxes) { + if (y.rank < xOrig.rank) { + y = reshape$3(y, expandShapeToKeepDim(y.shape, origAxes)); + } + if (dy.rank < xOrig.rank) { + dy = reshape$3(dy, expandShapeToKeepDim(dy.shape, origAxes)); + } + return { + x: () => { + const dx = mul(dy, cast$3(equal$2(xOrig, y), dy.dtype)); + return dx; + } + }; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const maxGradConfig = { + kernelName: Max, + inputsToSave: ['x'], + outputsToSave: [true], + gradFunc: (dy, saved, attrs) => { + const maxAttrs = attrs; + const { reductionIndices } = maxAttrs; + const x = saved[0]; + const y = saved[1]; + const origAxes = parseAxisParam(reductionIndices, x.shape); + const maxGrad = gradForMinAndMax(dy, y, x, origAxes); + return { + x: () => { + return maxGrad['x'](); + } + }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const maximumGradConfig = { + kernelName: Maximum$1, + inputsToSave: ['a', 'b'], + gradFunc: (dy, saved) => { + const [a, b] = saved; + const derA = () => mul(dy, cast$3(greaterEqual$2(a, b), 'float32')); + const derB = () => mul(dy, cast$3(less$3(a, b), 'float32')); + return { a: derA, b: derB }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the backprop of a 3d max pool. + * + * @param dy The dy error, of rank 5 of shape + * [batchSize, depth, height, width, channels]. + * assumed. + * @param input The original input image, of rank 5 or rank 4 of shape + * [batchSize, depth, height, width, channels]. + * @param output The original output image, of rank 5 of shape + * [batchSize, outDepth, outHeight, outWidth, channels]. + * @param filterSize The filter size: + * `[filterDepth, filterHeight, filterWidth]`. + * `filterSize` is a single number, + * then `filterDepth == filterHeight == filterWidth`. + * @param strides The strides of the pooling: + * `[strideDepth, strideHeight, strideWidth]`. If + * `strides` is a single number, then `strideHeight == strideWidth`. + * @param pad A string from: 'same', 'valid'. The type of padding algorithm + * used in the forward prop of the op. + * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is + * provided, it will default to truncate. + */ + function maxPool3dGrad_(dy, input, output, filterSize, strides, pad, dimRoundingMode) { + const $dy = convertToTensor(dy, 'dy', 'maxPool3dGrad'); + const $input = convertToTensor(input, 'input', 'maxPool3dGrad'); + const $output = convertToTensor(output, 'output', 'maxPool3dGrad'); + let dy5D = $dy; + let input5D = $input; + let output5D = $output; + let reshapedTo5D = false; + if ($input.rank === 4) { + reshapedTo5D = true; + dy5D = reshape$3($dy, [1, $dy.shape[0], $dy.shape[1], $dy.shape[2], $dy.shape[3]]); + input5D = reshape$3($input, [ + 1, $input.shape[0], $input.shape[1], $input.shape[2], $input.shape[3] + ]); + output5D = reshape$3($output, [ + 1, $output.shape[0], $output.shape[1], $output.shape[2], $output.shape[3] + ]); + } + assert$1(dy5D.rank === 5, () => `Error in maxPool3dGrad: dy must be rank 5 but got rank ` + + `${dy5D.rank}.`); + assert$1(input5D.rank === 5, () => `Error in maxPool3dGrad: input must be rank 5 but got rank ` + + `${input5D.rank}.`); + assert$1(output5D.rank === 5, () => `Error in maxPool3dGrad: output must be rank 5 but got rank ` + + `${output5D.rank}.`); + checkPadOnDimRoundingMode('maxPool3dGrad', pad, dimRoundingMode); + const inputs = { dy: dy5D, input: input5D, output: output5D }; + const attrs = { filterSize, strides, pad, dimRoundingMode }; + // tslint:disable-next-line: no-unnecessary-type-assertion + const res = ENGINE.runKernel(MaxPool3DGrad, inputs, attrs); + if (reshapedTo5D) { + return reshape$3(res, [res.shape[1], res.shape[2], res.shape[3], res.shape[4]]); + } + return res; + } + const maxPool3dGrad = /* @__PURE__ */ op({ maxPool3dGrad_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const maxPool3DGradConfig$2 = { + kernelName: MaxPool3D, + inputsToSave: ['x'], + outputsToSave: [true], + gradFunc: (dy, saved, attrs) => { + const [x, y] = saved; + const { filterSize, strides, pad, dimRoundingMode } = attrs; + return { + x: () => maxPool3dGrad(dy, x, y, filterSize, strides, pad, dimRoundingMode) + }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Computes the backprop of a 2D max pool. + * + * @param dy The dy error, of rank 4 or rank 3 of shape + * [batchSize, height, width, channels]. If rank 3, batch of 1 is + * assumed. + * @param input The original input image, of rank 4, of shape + * [batchSize, height, width, channels]. + * @param output The original output image, of rank 4, of shape + * [batchSize, outHeight, outWidth, channels]. + * @param filterSize The filter size: `[filterHeight, filterWidth]`. If + * `filterSize` is a single number, then `filterHeight == filterWidth`. + * @param strides The strides of the pooling: `[strideHeight, strideWidth]`. If + * `strides` is a single number, then `strideHeight == strideWidth`. + * @param pad The type of padding algorithm used in the forward prop of the op. + * 'same', 'valid', for more info, see this guide: + * [https://www.tensorflow.org/api_docs/python/tf/nn/convolution]( + * https://www.tensorflow.org/api_docs/python/tf/nn/convolution) + * @param dimRoundingMode A string from: 'ceil', 'round', 'floor'. If none is + * provided, it will default to truncate. + */ + function maxPoolGrad_(dy, input, output, filterSize, strides, pad, dimRoundingMode) { + const $dy = convertToTensor(dy, 'dy', 'maxPoolGrad'); + const $input = convertToTensor(input, 'input', 'maxPoolGrad'); + const $output = convertToTensor(output, 'output', 'maxPoolGrad'); + assert$1($input.rank === $dy.rank, () => `Rank of input (${$input.rank}) does not match rank of dy ` + + `(${$dy.rank})`); + assert$1($dy.rank === 4, () => `Error in maxPoolGrad: dy must be rank 4 but got rank ` + + `${$dy.rank}.`); + assert$1($input.rank === 4, () => `Error in maxPoolGrad: input must be rank 4 but got rank ` + + `${$input.rank}.`); + checkPadOnDimRoundingMode('maxPoolGrad', pad, dimRoundingMode); + const inputs = { dy: $dy, input: $input, output: $output }; + const attrs = { filterSize, strides, pad, dimRoundingMode }; + // tslint:disable-next-line: no-unnecessary-type-assertion + return ENGINE.runKernel(MaxPoolGrad, inputs, attrs); + } + const maxPoolGrad$2 = /* @__PURE__ */ op({ maxPoolGrad_ }); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const maxPoolGradConfig$2 = { + kernelName: MaxPool, + inputsToSave: ['x'], + outputsToSave: [true], + gradFunc: (dy, saved, attrs) => { + const [x, y] = saved; + const { filterSize, strides, pad } = attrs; + return { + x: () => maxPoolGrad$2(dy, x, y, filterSize, strides, pad) + }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const meanGradConfig = { + kernelName: Mean, + inputsToSave: ['x'], + gradFunc: (dy, saved, attrs) => { + const [x] = saved; + const { axis } = attrs; + const axes = parseAxisParam(axis, x.shape); + const shapes = computeOutAndReduceShapes(x.shape, axes); + const reduceShape = shapes[1]; + const reduceSize = sizeFromShape(reduceShape); + const derX = () => { + const expandedDyShape = x.shape.slice(); + axes.forEach(axis => { + expandedDyShape[axis] = 1; + }); + const expandedDy = reshape$3(dy, expandedDyShape); + const res = div$1(mul(expandedDy, ones$1(x.shape, 'float32')), reduceSize); + return res; + }; + return { x: derX }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const minGradConfig = { + kernelName: Min, + inputsToSave: ['x'], + outputsToSave: [true], + gradFunc: (dy, saved, attrs) => { + const minAttrs = attrs; + const { axis } = minAttrs; + const [x, y] = saved; + const origAxes = parseAxisParam(axis, x.shape); + const minGrad = gradForMinAndMax(dy, y, x, origAxes); + return { + x: () => { + return minGrad['x'](); + } + }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const minimumGradConfig = { + kernelName: Minimum$1, + inputsToSave: ['a', 'b'], + gradFunc: (dy, saved) => { + const [a, b] = saved; + const derA = () => mul(dy, cast$3(lessEqual$2(a, b), 'float32')); + const derB = () => mul(dy, cast$3(greater$3(a, b), 'float32')); + return { a: derA, b: derB }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const mirrorPadGradConfig = { + kernelName: MirrorPad, + inputsToSave: ['x'], + gradFunc: (dy, saved, attrs) => { + // Pad introduces values around the original tensor, so the gradient + // slices the original shape out of the gradient. + const x = saved[0]; + const { paddings } = attrs; + const begin = paddings.map(p => p[0]); + return { x: () => slice$2(dy, begin, x.shape) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const modGradConfig = { + kernelName: Mod, + inputsToSave: ['a', 'b'], + gradFunc: (dy, saved) => { + const [a, b] = saved; + const outShape = assertAndGetBroadcastShape(a.shape, b.shape); + const derA = () => { + const reduceAxes = getReductionAxes(a.shape, outShape); + if (reduceAxes.length > 0) { + return reshape$3(sum$3(dy, reduceAxes), a.shape); + } + return dy; + }; + const derB = () => { + const res = mul(dy, neg$2(floor$2(div$1(a, b)))); + const reduceAxes = getReductionAxes(b.shape, outShape); + if (reduceAxes.length > 0) { + return reshape$3(sum$3(res, reduceAxes), b.shape); + } + return res; + }; + return { a: derA, b: derB }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const multiplyGradConfig = { + kernelName: Multiply$1, + inputsToSave: ['a', 'b'], + gradFunc: (dy, saved) => { + const [a, b] = saved; + const outShape = assertAndGetBroadcastShape(a.shape, b.shape); + const derA = () => { + const res = mul(dy, cast$3(b, 'float32')); + const reduceAxes = getReductionAxes(a.shape, outShape); + if (reduceAxes.length > 0) { + return reshape$3(sum$3(res, reduceAxes), a.shape); + } + return res; + }; + const derB = () => { + const res = mul(dy, cast$3(a, 'float32')); + const reduceAxes = getReductionAxes(b.shape, outShape); + if (reduceAxes.length > 0) { + return reshape$3(sum$3(res, reduceAxes), b.shape); + } + return res; + }; + return { a: derA, b: derB }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const negGradConfig = { + kernelName: Neg, + gradFunc: (dy) => { + return { x: () => neg$2(dy) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const oneHotGradConfig = { + kernelName: OneHot, + inputsToSave: ['indices'], + gradFunc: (dy, saved) => { + const indices = saved[0]; + return { indices: () => zeros$2(indices.shape, 'float32') }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const onesLikeGradConfig = { + kernelName: OnesLike, + gradFunc: (dy) => { + return { x: () => zerosLike$3(dy) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const packGradConfig = { + kernelName: Pack, + saveAllInputs: true, + gradFunc: (dy, saved, attrs) => { + const { axis } = attrs; + const derTensors = unstack(dy, axis); + return derTensors.map(t => () => t); + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const padV2GradConfig = { + kernelName: PadV2, + inputsToSave: ['x'], + gradFunc: (dy, saved, attrs) => { + // Pad introduces values around the original tensor, so the gradient + // slices the original shape out of the gradient. + const x = saved[0]; + const { paddings } = attrs; + const begin = paddings.map(p => p[0]); + return { x: () => slice$2(dy, begin, x.shape) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const powGradConfig = { + kernelName: Pow, + inputsToSave: ['a', 'b'], + outputsToSave: [true], + gradFunc: (dy, saved) => { + const [a, b, y] = saved; + const base = a; + const exp = b; + const outShape = assertAndGetBroadcastShape(base.shape, exp.shape); + const derBase = () => { + const expFloat = cast$3(exp, 'float32'); + let res = mul(dy, mul(expFloat, pow$3(base, sub$2(expFloat, scalar(1))))); + const reduceAxes = getReductionAxes(base.shape, outShape); + if (reduceAxes.length > 0) { + res = sum$3(res, reduceAxes); + } + return reshape$3(res, base.shape); + }; + const derExp = () => { + const condition = greater$3(base, 0); + const logBase = where(condition, log$2(base), zerosLike$3(base)); + let res = mul(dy, mul(y, logBase)); + const reduceAxes = getReductionAxes(exp.shape, outShape); + if (reduceAxes.length > 0) { + res = sum$3(res, reduceAxes); + } + return reshape$3(res, exp.shape); + }; + return { a: derBase, b: derExp }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const preluGradConfig = { + kernelName: Prelu, + inputsToSave: ['x', 'alpha'], + gradFunc: (dy, saved) => { + const [x, alpha] = saved; + const mask = greater$3(x, 0); + return { + x: () => where(mask, dy, mul(dy, alpha)), + alpha: () => { + let res = where(mask, zerosLike$3(dy), mul(dy, x)); + const reduceAxes = getReductionAxes(alpha.shape, dy.shape); + if (reduceAxes.length > 0) { + res = sum$3(res, reduceAxes); + } + return reshape$3(res, alpha.shape); + } + }; + } + }; + + /** + * @license + * Copyright 2022 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + // Gradient for product operation on a single axis. + function prodGradFn_(x, dy, axis) { + // The gradient tensor (dy) has a set of axes removed, so we create re-shaped + // versions (of size 1) for the removed axis; this supports broadcasting over + // those dimensions. + const expandedYShape = x.shape.slice(); + expandedYShape[axis] = 1; + // The actual gradient computation. + const expandedDy = reshape$3(dy, expandedYShape); + const xCumProd = cumprod$2(x, axis, true, false); + const xCumRevProd = cumprod$2(x, axis, true, true); + const dx = mul(xCumProd, xCumRevProd); + return mul(expandedDy, dx); + } + // Support gradients when the product is done on many axes at once. + // This done py pushing all the axes on which the product is applied into a + // single axis. + function prodsGradFn_(x, dy, axis) { + // Move all axes for doing prod over to the end of the tensor. + const xRank = x.shape.length; + const finalProdAxis = xRank - axis.length; + const xPermutation = getAxesPermutation(axis, xRank); + let permutedX = x; + if (xPermutation != null) { + permutedX = transpose$2(x, xPermutation); + } + // Reshape all the prod dimensions into a single one, and do compute prod + // gradients on that. + const newShape = permutedX.shape.slice(); + const removedShape = newShape.splice(xRank - axis.length, axis.length); + const endPartShape = removedShape.reduce((p, c) => p * c, 1); + newShape.push(endPartShape); + const reshapedPermutedX = permutedX.reshape(newShape); + let prodGrad = prodGradFn_(reshapedPermutedX, dy, finalProdAxis); + // Undo the re-shaping now we have the dx vector, and permute back to + // original axes order. + prodGrad = prodGrad.reshape(permutedX.shape); + if (xPermutation != null) { + const undoPermutation = getUndoAxesPermutation(xPermutation); + prodGrad = transpose$2(prodGrad, undoPermutation); + } + return prodGrad; + } + // Running example: + // [ + // [ + // [3.0, 4.0], + // [5.0, 6.0], + // [7.0, 8.0] + // ], + // [ + // [3.0, 5.0], + // [0.0, 6.0], + // [5.0, 6.0] + // ] + // ] + // + const prodGradConfig = { + kernelName: Prod, + inputsToSave: ['x'], + gradFunc: (dy, saved, attrs) => { + const [x] = saved; + const { axis } = attrs; + let axisArr = []; + if (axis === undefined || axis === null) { + axisArr = x.shape.map((_, i) => i); + } + else if (typeof axis === 'number') { + axisArr = [axis]; + } + else { + axisArr = axis; + } + return { x: () => prodsGradFn_(x, dy, axisArr) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const divGradConfig = { + kernelName: RealDiv, + inputsToSave: ['a', 'b'], + gradFunc: (dy, saved) => { + const [a, b] = saved; + const outShape = assertAndGetBroadcastShape(a.shape, b.shape); + const derA = () => { + const res = div$1(dy, cast$3(b, 'float32')); + const reduceAxes = getReductionAxes(a.shape, outShape); + if (reduceAxes.length > 0) { + return reshape$3(sum$3(res, reduceAxes), a.shape); + } + return res; + }; + const derB = () => { + let res = mul(dy, cast$3(a, 'float32')); + const reduceAxes = getReductionAxes(b.shape, outShape); + if (reduceAxes.length > 0) { + res = reshape$3(sum$3(res, reduceAxes), b.shape); + } + const tmp = square$2(b); + return neg$2(div$1(res, cast$3(tmp, 'float32'))); + }; + return { a: derA, b: derB }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const reciprocalGradConfig = { + kernelName: Reciprocal, + inputsToSave: ['x'], + gradFunc: (dy, saved) => { + const [x] = saved; + return { x: () => div$1(dy, neg$2(square$2(x))) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const relu6GradConfig = { + kernelName: Relu6$1, + inputsToSave: ['x'], + gradFunc: (dy, saved) => { + const [x] = saved; + const mask = mul(lessEqual$2(x, 6), step$2(x)); + return { x: () => mul(dy, cast$3(mask, 'float32')) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const reluGradConfig = { + kernelName: Relu$1, + inputsToSave: ['x'], + gradFunc: (dy, saved) => { + const [x] = saved; + return { x: () => mul(dy, cast$3(step$2(x), 'float32')) }; + } + }; + + /** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const reshapeGradConfig = { + kernelName: Reshape$1, + inputsToSave: ['x'], + gradFunc: (dy, saved) => { + const [x] = saved; + return { x: () => reshape$3(dy, x.shape) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const resizeBilinearGradConfig$2 = { + kernelName: ResizeBilinear, + inputsToSave: ['images'], + gradFunc: (dy, saved, attrs) => { + const [images] = saved; + const inputs = { dy, images }; + const imagesDer = () => + // tslint:disable-next-line: no-unnecessary-type-assertion + ENGINE.runKernel(ResizeBilinearGrad, inputs, attrs); + return { images: imagesDer }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const resizeNearestNeighborGradConfig$2 = { + kernelName: ResizeNearestNeighbor, + inputsToSave: ['images'], + gradFunc: (dy, saved, attrs) => { + const [images] = saved; + const inputs = { dy, images }; + const imagesDer = () => + // tslint:disable-next-line: no-unnecessary-type-assertion + ENGINE.runKernel(ResizeNearestNeighborGrad, inputs, attrs); + return { images: imagesDer }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const reverseGradConfig = { + kernelName: Reverse, + gradFunc: (dy, saved, attrs) => { + const { dims } = attrs; + const axes = parseAxisParam(dims, dy.shape); + return { x: () => reverse$2(dy, axes) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const roundGradConfig = { + kernelName: Round, + gradFunc: (dy) => { + // TODO(nsthorat): Let gradients be null for cases where we want to stop + // backpropgation. + return { x: () => zerosLike$3(dy) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const rsqrtGradConfig = { + kernelName: Rsqrt, + inputsToSave: ['x'], + gradFunc: (dy, saved) => { + const [x] = saved; + return { x: () => neg$2(div$1(dy, mul(pow$3(x, 1.5), 2))) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const selectGradConfig = { + kernelName: Select, + inputsToSave: ['condition'], + gradFunc: (dy, saved) => { + const [condition] = saved; + return { + // TODO(julianoks): Return null for condition gradient + // when backprop supports it. + condition: () => cast$3(zerosLike$3(condition), 'float32'), + t: () => mul(dy, cast$3(condition, dy.dtype)), + e: () => mul(dy, cast$3(logicalNot$2(condition), dy.dtype)) + }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const seluGradConfig = { + kernelName: Selu$1, + inputsToSave: ['x'], + gradFunc: (dy, saved) => { + const [x] = saved; + return { + x: () => { + const mask = greater$3(x, scalar(0)); + const scaleAlpha = scalar(SELU_SCALEALPHA); + const scale = scalar(SELU_SCALE); + const greaterThanZeroDer = mul(dy, scale); + const lessEqualZeroDer = mul(mul(dy, scaleAlpha), exp$2(cast$3(x, 'float32'))); + return where(mask, greaterThanZeroDer, lessEqualZeroDer); + } + }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const sigmoidGradConfig = { + kernelName: Sigmoid$1, + outputsToSave: [true], + gradFunc: (dy, saved) => { + const [y] = saved; + return { x: () => mul(dy, mul(y, sub$2(scalar(1), y))) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const signGradConfig = { + kernelName: Sign, + gradFunc: (dy) => { + return { x: () => zerosLike$3(dy) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const sinGradConfig = { + kernelName: Sin, + inputsToSave: ['x'], + gradFunc: (dy, saved) => { + const [x] = saved; + return { x: () => mul(cos$2(cast$3(x, 'float32')), dy) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const sinhGradConfig = { + kernelName: Sinh, + inputsToSave: ['x'], + gradFunc: (dy, saved) => { + const [x] = saved; + return { x: () => mul(cosh$2(cast$3(x, 'float32')), dy) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const sliceGradConfig = { + kernelName: Slice, + inputsToSave: ['x'], + gradFunc: (dy, saved, attrs) => { + const [x] = saved; + const { begin, size } = attrs; + const inputShape = x.shape; + const [begin_, size_] = parseSliceParams(x, begin, size); + // Create an Nx2 padding where the first column represents how many + // zeros are prepended (at start) for each dimension, and the second + // column indicates how many zeros are appended (at end). + // The number of zeros to append is the shape of the input + // elementwise-subtracted by both the begin vector and sizes vector. + const paddings = []; + for (let i = 0; i < dy.rank; i++) { + paddings.push([begin_[i], inputShape[i] - begin_[i] - size_[i]]); + } + return { x: () => pad(dy, paddings) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const softmaxGradConfig = { + kernelName: Softmax$2, + outputsToSave: [true], + gradFunc: (dy, saved, attrs) => { + const [y] = saved; + const { dim } = attrs; + const keepDims = true; + const dyTimesY = mul(dy, y); + return { + logits: () => sub$2(dyTimesY, mul(sum$3(dyTimesY, [dim], keepDims), y)) + }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const softplusGradConfig = { + kernelName: Softplus$1, + inputsToSave: ['x'], + gradFunc: (dy, saved) => { + const [x] = saved; + return { x: () => mul(dy, sigmoid$2(x)) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const spaceToBatchNDGradConfig = { + kernelName: SpaceToBatchND, + gradFunc: (dy, saved, attrs) => { + const { blockShape, paddings } = attrs; + return { x: () => batchToSpaceND$2(dy, blockShape, paddings) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const splitVGradConfig = { + kernelName: SplitV, + gradFunc: (dy, saved, attrs) => { + const { axis } = attrs; + return { x: () => concat$2(dy, axis) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const sqrtGradConfig = { + kernelName: Sqrt, + inputsToSave: ['x'], + gradFunc: (dy, saved) => { + const [x] = saved; + return { x: () => div$1(dy, mul(sqrt$2(cast$3(x, 'float32')), 2)) }; + } + }; + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const squareGradConfig = { + kernelName: Square, + inputsToSave: ['x'], + gradFunc: (dy, saved) => { + const [x] = saved; + return { x: () => mul(dy, mul(cast$3(x, 'float32'), 2)) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const squaredDifferenceGradConfig = { + kernelName: SquaredDifference, + inputsToSave: ['a', 'b'], + gradFunc: (dy, saved) => { + const [a, b] = saved; + const two = scalar(2); + const derA = () => mul(dy, mul(two, sub$2(a, b))); + const derB = () => mul(dy, mul(two, sub$2(b, a))); + return { a: derA, b: derB }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const stepGradConfig = { + kernelName: Step, + gradFunc: (dy) => { + // TODO(manrajgrover): Return null for gradients when backprop supports + // it. + return { x: () => zerosLike$3(dy) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const subGradConfig = { + kernelName: Sub, + inputsToSave: ['a', 'b'], + gradFunc: (dy, saved) => { + const [a, b] = saved; + const outShape = assertAndGetBroadcastShape(a.shape, b.shape); + const derA = () => { + let res = dy; + const reduceAxes = getReductionAxes(a.shape, outShape); + if (reduceAxes.length > 0) { + res = sum$3(res, reduceAxes); + } + return reshape$3(res, a.shape); + }; + const derB = () => { + let res = dy; + const reduceAxes = getReductionAxes(b.shape, outShape); + if (reduceAxes.length > 0) { + res = sum$3(res, reduceAxes); + } + return reshape$3(neg$2(res), b.shape); + }; + return { a: derA, b: derB }; + } + }; + + /** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const sumGradConfig = { + kernelName: Sum, + inputsToSave: ['x'], + gradFunc: (dy, saved, attrs) => { + const [x] = saved; + const expandedDyShape = x.shape.slice(); + const { axis } = attrs; + const axes = parseAxisParam(axis, x.shape); + axes.forEach(axis => { + expandedDyShape[axis] = 1; + }); + const expandedDy = reshape$3(dy, expandedDyShape); + const derX = mul(expandedDy, ones$1(x.shape, 'float32')); + return { x: () => derX }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const tanGradConfig = { + kernelName: Tan, + inputsToSave: ['x'], + gradFunc: (dy, saved) => { + const [x] = saved; + return { x: () => div$1(dy, square$2(cos$2(x))) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const tanhGradConfig = { + kernelName: Tanh$1, + outputsToSave: [true], + gradFunc: (dy, saved) => { + const [y] = saved; + return { x: () => mul(sub$2(scalar(1), square$2(y)), dy) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const tileGradConfig = { + kernelName: Tile, + inputsToSave: ['x'], + gradFunc: (dy, saved, attrs) => { + const [x] = saved; + const { reps } = attrs; + const derX = () => { + let xGrad = zerosLike$3(x); + // TODO(cais): Maybe reduce memory footprint by avoiding repeated + // slicing. + if (x.rank === 1) { + for (let i = 0; i < reps[0]; ++i) { + xGrad = add$3(xGrad, slice$2(dy, [i * x.shape[0]], [x.shape[0]])); + } + } + else if (x.rank === 2) { + for (let i = 0; i < reps[0]; ++i) { + for (let j = 0; j < reps[1]; ++j) { + xGrad = add$3(xGrad, slice$2(dy, [i * x.shape[0], j * x.shape[1]], [ + x.shape[0], x.shape[1] + ])); + } + } + } + else if (x.rank === 3) { + for (let i = 0; i < reps[0]; ++i) { + for (let j = 0; j < reps[1]; ++j) { + for (let k = 0; k < reps[2]; ++k) { + xGrad = + add$3(xGrad, slice$2(dy, [i * x.shape[0], j * x.shape[1], k * x.shape[2]], [x.shape[0], x.shape[1], x.shape[2]])); + } + } + } + } + else if (x.rank === 4) { + for (let i = 0; i < reps[0]; ++i) { + for (let j = 0; j < reps[1]; ++j) { + for (let k = 0; k < reps[2]; ++k) { + for (let l = 0; l < reps[3]; ++l) { + xGrad = + add$3(xGrad, slice$2(dy, [ + i * x.shape[0], j * x.shape[1], k * x.shape[2], + l * x.shape[3] + ], [x.shape[0], x.shape[1], x.shape[2], x.shape[3]])); + } + } + } + } + } + else { + throw new Error(`Gradient for tile operation is not implemented for rank-` + + `${x.rank} tensors yet.`); + } + return xGrad; + }; + return { x: derX }; + }, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const transposeGradConfig = { + kernelName: Transpose, + gradFunc: (dy, saved, attrs) => { + const transposeAttrs = attrs; + const { perm } = transposeAttrs; + const undoPerm = getUndoAxesPermutation(perm); + return { x: () => transpose$2(dy, undoPerm) }; + } + }; + + /** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const unpackGradConfig = { + kernelName: Unpack, + gradFunc: (dy, saved, attrs) => { + const unpackAttrs = attrs; + const { axis } = unpackAttrs; + return { value: () => stack(dy, axis) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const unsortedSegmentSumGradConfig = { + kernelName: UnsortedSegmentSum, + inputsToSave: ['segmentIds'], + gradFunc: (dy, saved) => { + const [segmentIds] = saved; + const derX = () => { + return gatherDropNegatives(dy, segmentIds); + }; + return { x: derX }; + } + }; + function gatherDropNegatives(x, indices) { + // Helper function for unsorted segment ops. Gathers params for + // positive segment ids and gathers 0 for inputs with negative segment id. + // Mirrors _GatherDropNegatives from tensorflow/python/ops/math_grad.py + const zeroClippedIndices = maximum$4(indices, zerosLike$3(indices)); + const gathered = gather$1(x, zeroClippedIndices); + let isPositive = greaterEqual$2(indices, scalar(0, 'int32')); + const numIters = gathered.rank - isPositive.rank; + for (let i = 0; i < numIters; ++i) { + isPositive = expandDims$3(isPositive, i + 1); + } + isPositive = logicalAnd$2(isPositive, ones$1(gathered.shape, 'bool')); + const zeroSlice = zerosLike$3(gathered); + return where(isPositive, gathered, zeroSlice); + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const zerosLikeGradConfig = { + kernelName: ZerosLike, + gradFunc: (dy) => { + return { x: () => zerosLike$3(dy) }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + // Export all kernel configs here so that the package can auto register them + const gradConfigs = [ + absGradConfig, + acosGradConfig, + acoshGradConfig, + addGradConfig, + addNGradConfig, + argMaxGradConfig, + argMinGradConfig, + asinGradConfig, + asinhGradConfig, + atan2GradConfig, + atanGradConfig, + atanhGradConfig, + avgPool3DGradConfig$2, + avgPoolGradConfig$2, + batchMatMulGradConfig, + batchToSpaceNDGradConfig, + broadcastToGradConfig, + castGradConfig, + ceilGradConfig, + clipByValueGradConfig, + complexAbsGradConfig, + concatGradConfig, + conv2DBackpropInputGradConfig, + conv2DGradConfig, + conv3DGradConfig, + cosGradConfig, + coshGradConfig, + cumsumGradConfig, + depthwiseConv2dNativeGradConfig, + dilation2dGradConfig, + divGradConfig, + eluGradConfig$2, + erfGradConfig, + expGradConfig, + expandDimsGradConfig, + expm1GradConfig, + floorDivGradConfig, + floorGradConfig, + fusedBatchNormGradConfig, + gatherGradConfig, + greaterEqualGradConfig, + identityGradConfig, + isFiniteGradConfig, + isInfGradConfig, + isNanGradConfig, + leakyReluGradConfig, + log1pGradConfig, + logGradConfig, + logSoftmaxGradConfig, + lrnGradConfig, + maxGradConfig, + maxGradConfig, + maximumGradConfig, + maxPool3DGradConfig$2, + maxPoolGradConfig$2, + meanGradConfig, + minGradConfig, + minimumGradConfig, + mirrorPadGradConfig, + modGradConfig, + multiplyGradConfig, + negGradConfig, + oneHotGradConfig, + onesLikeGradConfig, + packGradConfig, + padV2GradConfig, + padV2GradConfig, + powGradConfig, + preluGradConfig, + prodGradConfig, + reciprocalGradConfig, + relu6GradConfig, + reluGradConfig, + reshapeGradConfig, + resizeBilinearGradConfig$2, + resizeNearestNeighborGradConfig$2, + reverseGradConfig, + roundGradConfig, + rsqrtGradConfig, + selectGradConfig, + seluGradConfig, + sigmoidGradConfig, + signGradConfig, + sinGradConfig, + sinhGradConfig, + sliceGradConfig, + softmaxGradConfig, + softplusGradConfig, + spaceToBatchNDGradConfig, + spaceToBatchNDGradConfig, + splitVGradConfig, + splitVGradConfig, + sqrtGradConfig, + squaredDifferenceGradConfig, + squareGradConfig, + stepGradConfig, + subGradConfig, + sumGradConfig, + tanGradConfig, + tanhGradConfig, + tileGradConfig, + transposeGradConfig, + unpackGradConfig, + unsortedSegmentSumGradConfig, + zerosLikeGradConfig + ]; + for (const gradientConfig of gradConfigs) { + registerGradient(gradientConfig); + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.abs = function () { + this.throwIfDisposed(); + return abs$2(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.acos = function () { + this.throwIfDisposed(); + return acos$2(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.acosh = function () { + this.throwIfDisposed(); + return acosh$2(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.add = function (b) { + this.throwIfDisposed(); + return add$3(this, b); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.all = function (axis, keepDims) { + this.throwIfDisposed(); + return all$2(this, axis, keepDims); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.any = function (axis, keepDims) { + this.throwIfDisposed(); + return any$2(this, axis, keepDims); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.argMax = function (axis) { + this.throwIfDisposed(); + return argMax$2(this, axis); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.argMin = function (axis) { + this.throwIfDisposed(); + return argMin$2(this, axis); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Converts a size-1 `tf.Tensor` to a `tf.Scalar`. + * @doc {heading: 'Tensors', subheading: 'Classes'} + */ + getGlobalTensorClass().prototype.asScalar = function () { + this.throwIfDisposed(); + assert$1(this.size === 1, () => 'The array must have only 1 element.'); + return reshape$3(this, []); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Casts a `tf.Tensor` to a specified dtype. + * + * @param dtype Data-type to cast the tensor to. + * + * @doc {heading: 'Tensors', subheading: 'Classes'} + */ + getGlobalTensorClass().prototype.asType = function (dtype) { + this.throwIfDisposed(); + return cast$3(this, dtype); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Converts a `tf.Tensor` to a `tf.Tensor1D`. + * @doc {heading: 'Tensors', subheading: 'Classes'} + */ + getGlobalTensorClass().prototype.as1D = function () { + this.throwIfDisposed(); + return reshape$3(this, [this.size]); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Converts a `tf.Tensor` to a `tf.Tensor2D`. + * + * @param rows Number of rows in `tf.Tensor2D`. + * @param columns Number of columns in `tf.Tensor2D`. + * @doc {heading: 'Tensors', subheading: 'Classes'} + */ + getGlobalTensorClass().prototype.as2D = function (rows, columns) { + this.throwIfDisposed(); + return reshape$3(this, [rows, columns]); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Converts a `tf.Tensor` to a `tf.Tensor3D`. + * + * @param rows Number of rows in `tf.Tensor3D`. + * @param columns Number of columns in `tf.Tensor3D`. + * @param depth Depth of `tf.Tensor3D`. + * @doc {heading: 'Tensors', subheading: 'Classes'} + */ + getGlobalTensorClass().prototype.as3D = function (rows, columns, depth) { + this.throwIfDisposed(); + return reshape$3(this, [rows, columns, depth]); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Converts a `tf.Tensor` to a `tf.Tensor4D`. + * + * @param rows Number of rows in `tf.Tensor4D`. + * @param columns Number of columns in `tf.Tensor4D`. + * @param depth Depth of `tf.Tensor4D`. + * @param depth2 4th dimension of `tf.Tensor4D`. + * @doc {heading: 'Tensors', subheading: 'Classes'} + */ + getGlobalTensorClass().prototype.as4D = function (rows, columns, depth, depth2) { + this.throwIfDisposed(); + return reshape$3(this, [rows, columns, depth, depth2]); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Converts a `tf.Tensor` to a `tf.Tensor5D`. + * + * @param rows Number of rows in `tf.Tensor5D`. + * @param columns Number of columns in `tf.Tensor5D`. + * @param depth Depth of `tf.Tensor5D`. + * @param depth2 4th dimension of `tf.Tensor5D`. + * @param depth3 5th dimension of 'tf.Tensor5D' + * + * @doc {heading: 'Tensors', subheading: 'Classes'} + */ + getGlobalTensorClass().prototype.as5D = function (rows, columns, depth, depth2, depth3) { + this.throwIfDisposed(); + return reshape$3(this, [rows, columns, depth, depth2, depth3]); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.asin = function () { + this.throwIfDisposed(); + return asin$2(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.asinh = function () { + this.throwIfDisposed(); + return asinh$2(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.atan = function () { + this.throwIfDisposed(); + return atan$2(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.atan2 = function (b) { + this.throwIfDisposed(); + return atan2$2(this, b); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.atanh = function () { + this.throwIfDisposed(); + return atanh$2(this); + }; + + getGlobalTensorClass().prototype.avgPool = + function (filterSize, strides, pad, dimRoundingMode) { + this.throwIfDisposed(); + return avgPool$2(this, filterSize, strides, pad, dimRoundingMode); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.batchToSpaceND = function (blockShape, crops) { + this.throwIfDisposed(); + return batchToSpaceND$2(this, blockShape, crops); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.batchNorm = function (mean, variance, offset, scale, varianceEpsilon) { + this.throwIfDisposed(); + return batchNorm$2(this, mean, variance, offset, scale, varianceEpsilon); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.broadcastTo = function (shape) { + this.throwIfDisposed(); + return broadcastTo(this, shape); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.cast = function (dtype) { + this.throwIfDisposed(); + return cast$3(this, dtype); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.ceil = function () { + this.throwIfDisposed(); + return ceil$2(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.clipByValue = function (min, max) { + this.throwIfDisposed(); + return clipByValue$2(this, min, max); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.concat = function (x, axis) { + this.throwIfDisposed(); + if (x instanceof Tensor) { + x = [x]; + } + return concat$2([this, ...x], axis); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.conv1d = function (filter, stride, pad, dataFormat, dilation, dimRoundingMode) { + this.throwIfDisposed(); + return conv1d$2(this, filter, stride, pad, dataFormat, dilation, dimRoundingMode); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.conv2dTranspose = + function (filter, outputShape, strides, pad, dimRoundingMode) { + this.throwIfDisposed(); + return conv2dTranspose$1(this, filter, outputShape, strides, pad, dimRoundingMode); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.conv2d = function (filter, strides, pad, dataFormat, dilations, dimRoundingMode) { + this.throwIfDisposed(); + return conv2d$4(this, filter, strides, pad, dataFormat, dilations, dimRoundingMode); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.cos = function () { + this.throwIfDisposed(); + return cos$2(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.cosh = function () { + this.throwIfDisposed(); + return cosh$2(this); + }; + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the 'License'); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an 'AS IS' BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.cumprod = function (axis, exclusive, reverse) { + this.throwIfDisposed(); + return cumprod$2(this, axis, exclusive, reverse); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.cumsum = function (axis, exclusive, reverse) { + this.throwIfDisposed(); + return cumsum$2(this, axis, exclusive, reverse); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.depthToSpace = function (blockSize, dataFormat) { + this.throwIfDisposed(); + return depthToSpace$2(this, blockSize, dataFormat); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.depthwiseConv2d = + function (filter, strides, pad, dataFormat, dilations, dimRoundingMode) { + this.throwIfDisposed(); + return depthwiseConv2d$3(this, filter, strides, pad, dataFormat, dilations, dimRoundingMode); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.dilation2d = + function (filter, strides, pad, dilations, dataFormat) { + this.throwIfDisposed(); + return dilation2d(this, filter, strides, pad, dilations, dataFormat); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.divNoNan = function (b) { + this.throwIfDisposed(); + return divNoNan(this, b); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.div = function (b) { + this.throwIfDisposed(); + return div$1(this, b); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.dot = function (b) { + this.throwIfDisposed(); + return dot$2(this, b); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.elu = function () { + this.throwIfDisposed(); + return elu$4(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.equal = function (b) { + this.throwIfDisposed(); + return equal$2(this, b); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.erf = function () { + this.throwIfDisposed(); + return erf$2(this); + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.euclideanNorm = function (axis, keepDims) { + this.throwIfDisposed(); + return euclideanNorm(this, axis, keepDims); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.exp = function () { + this.throwIfDisposed(); + return exp$2(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.expandDims = function (axis) { + this.throwIfDisposed(); + return expandDims$3(this, axis); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.expm1 = function () { + this.throwIfDisposed(); + return expm1$2(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.fft = function () { + this.throwIfDisposed(); + return fft$2(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Flatten a Tensor to a 1D array. + * @doc {heading: 'Tensors', subheading: 'Classes'} + */ + getGlobalTensorClass().prototype.flatten = function () { + this.throwIfDisposed(); + return reshape$3(this, [this.size]); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.floor = function () { + this.throwIfDisposed(); + return floor$2(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.floorDiv = function (b) { + this.throwIfDisposed(); + return floorDiv$2(this, b); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.gather = function (indices, axis, batchDims) { + this.throwIfDisposed(); + return gather$1(this, indices, axis, batchDims); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.greaterEqual = function (b) { + this.throwIfDisposed(); + return greaterEqual$2(this, b); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.greater = function (b) { + this.throwIfDisposed(); + return greater$3(this, b); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.ifft = function () { + this.throwIfDisposed(); + return ifft$2(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.irfft = function () { + this.throwIfDisposed(); + return irfft(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.isFinite = function () { + this.throwIfDisposed(); + return isFinite$3(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.isInf = function () { + this.throwIfDisposed(); + return isInf$2(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.isNaN = function () { + this.throwIfDisposed(); + return isNaN$3(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.leakyRelu = function (alpha) { + this.throwIfDisposed(); + return leakyRelu$2(this, alpha); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.lessEqual = function (b) { + this.throwIfDisposed(); + return lessEqual$2(this, b); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.less = function (b) { + this.throwIfDisposed(); + return less$3(this, b); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.localResponseNormalization = + function (depthRadius, bias, alpha, beta) { + this.throwIfDisposed(); + return localResponseNormalization(this, depthRadius, bias, alpha, beta); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.logSigmoid = function () { + this.throwIfDisposed(); + return logSigmoid(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.logSoftmax = function (axis) { + this.throwIfDisposed(); + return logSoftmax(this, axis); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.logSumExp = function (axis, keepDims) { + this.throwIfDisposed(); + return logSumExp(this, axis, keepDims); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.log = function () { + this.throwIfDisposed(); + return log$2(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.log1p = function () { + this.throwIfDisposed(); + return log1p$2(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.logicalAnd = function (b) { + this.throwIfDisposed(); + return logicalAnd$2(this, b); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.logicalNot = function () { + this.throwIfDisposed(); + return logicalNot$2(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.logicalOr = function (b) { + this.throwIfDisposed(); + return logicalOr$2(this, b); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.logicalXor = function (b) { + this.throwIfDisposed(); + return logicalXor(this, b); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.matMul = function (b, transposeA, transposeB) { + this.throwIfDisposed(); + return matMul$1(this, b, transposeA, transposeB); + }; + + getGlobalTensorClass().prototype.maxPool = + function (filterSize, strides, pad, dimRoundingMode) { + this.throwIfDisposed(); + return maxPool$2(this, filterSize, strides, pad, dimRoundingMode); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.max = function (axis, keepDims) { + this.throwIfDisposed(); + return max$3(this, axis, keepDims); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.maximum = function (b) { + this.throwIfDisposed(); + return maximum$4(this, b); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.mean = function (axis, keepDims) { + this.throwIfDisposed(); + return mean$3(this, axis, keepDims); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.min = function (axis, keepDims) { + this.throwIfDisposed(); + return min$3(this, axis, keepDims); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.minimum = function (b) { + this.throwIfDisposed(); + return minimum$4(this, b); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.mirrorPad = function (paddings, mode) { + this.throwIfDisposed(); + return mirrorPad$1(this, paddings, mode); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.mod = function (b) { + this.throwIfDisposed(); + return mod$2(this, b); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.mul = function (b) { + this.throwIfDisposed(); + return mul(this, b); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.neg = function () { + this.throwIfDisposed(); + return neg$2(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.norm = function (ord, axis, keepDims) { + this.throwIfDisposed(); + return norm(this, ord, axis, keepDims); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.notEqual = function (b) { + this.throwIfDisposed(); + return notEqual$2(this, b); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.oneHot = function (depth, onValue = 1, offValue = 0) { + this.throwIfDisposed(); + return oneHot$3(this, depth, onValue, offValue); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.onesLike = function () { + this.throwIfDisposed(); + return onesLike$3(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.pad = function (paddings, constantValue) { + this.throwIfDisposed(); + return pad(this, paddings, constantValue); + }; + + getGlobalTensorClass().prototype.pool = function (windowShape, poolingType, padding, dilationRate, strides, dimRoundingMode) { + this.throwIfDisposed(); + return pool$1(this, windowShape, poolingType, padding, dilationRate, strides, dimRoundingMode); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.pow = function (exp) { + this.throwIfDisposed(); + return pow$3(this, exp); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.prelu = function (alpha) { + this.throwIfDisposed(); + return prelu$3(this, alpha); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.prod = function (axis, keepDims) { + this.throwIfDisposed(); + return prod$2(this, axis, keepDims); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.reciprocal = function () { + this.throwIfDisposed(); + return reciprocal$2(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.relu = function () { + this.throwIfDisposed(); + return relu$2(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.relu6 = function () { + this.throwIfDisposed(); + return relu6$2(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Reshapes the tensor into the shape of the provided tensor. + * + * @param x The tensor of required shape. + * + * @doc {heading: 'Tensors', subheading: 'Classes'} + */ + getGlobalTensorClass().prototype.reshapeAs = function (x) { + this.throwIfDisposed(); + return reshape$3(this, x.shape); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.reshape = function (shape) { + this.throwIfDisposed(); + return reshape$3(this, shape); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.resizeBilinear = + function (newShape2D, alignCorners, halfPixelCenters) { + this.throwIfDisposed(); + return resizeBilinear$3(this, newShape2D, alignCorners, halfPixelCenters); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.resizeNearestNeighbor = + function (newShape2D, alignCorners, halfFloatCenters) { + this.throwIfDisposed(); + return resizeNearestNeighbor$2(this, newShape2D, alignCorners, halfFloatCenters); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.reverse = function (axis) { + this.throwIfDisposed(); + return reverse$2(this, axis); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.rfft = function () { + this.throwIfDisposed(); + return rfft(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.round = function () { + this.throwIfDisposed(); + return round$2(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.rsqrt = function () { + this.throwIfDisposed(); + return rsqrt$2(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.selu = function () { + this.throwIfDisposed(); + return selu$2(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.separableConv2d = + function (depthwiseFilter, pointwiseFilter, strides, pad, dilation, dataFormat) { + this.throwIfDisposed(); + return separableConv2d$1(this, depthwiseFilter, pointwiseFilter, strides, pad, dilation, dataFormat); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.sigmoid = function () { + this.throwIfDisposed(); + return sigmoid$2(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.sign = function () { + this.throwIfDisposed(); + return sign$3(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.sin = function () { + this.throwIfDisposed(); + return sin$2(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.sinh = function () { + this.throwIfDisposed(); + return sinh$2(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.slice = function (begin, size) { + this.throwIfDisposed(); + return slice$2(this, begin, size); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.softmax = function (dim) { + this.throwIfDisposed(); + return softmax$3(this, dim); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.softplus = function () { + this.throwIfDisposed(); + return softplus$2(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.spaceToBatchND = function (blockShape, paddings) { + this.throwIfDisposed(); + return spaceToBatchND$2(this, blockShape, paddings); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.split = function (numOrSizeSplits, axis) { + this.throwIfDisposed(); + return split$3(this, numOrSizeSplits, axis); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.sqrt = function () { + this.throwIfDisposed(); + return sqrt$2(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.square = function () { + this.throwIfDisposed(); + return square$2(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.squaredDifference = function (b) { + this.throwIfDisposed(); + return squaredDifference$2(this, b); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.squeeze = function (axis) { + this.throwIfDisposed(); + return squeeze(this, axis); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.stack = function (x, axis) { + this.throwIfDisposed(); + const tensorsToBeStacked = x instanceof Tensor ? [this, x] : [this, ...x]; + return stack(tensorsToBeStacked, axis); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.step = function (alpha) { + this.throwIfDisposed(); + return step$2(this, alpha); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.stridedSlice = function (begin, end, strides, beginMask, endMask, ellipsisMask, newAxisMask, shrinkAxisMask) { + this.throwIfDisposed(); + return stridedSlice$2(this, begin, end, strides, beginMask, endMask, ellipsisMask, newAxisMask, shrinkAxisMask); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.sub = function (b) { + this.throwIfDisposed(); + return sub$2(this, b); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.sum = function (axis, keepDims) { + this.throwIfDisposed(); + return sum$3(this, axis, keepDims); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.tan = function () { + this.throwIfDisposed(); + return tan$2(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.tanh = function () { + this.throwIfDisposed(); + return tanh$2(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.tile = function (reps) { + this.throwIfDisposed(); + return tile$3(this, reps); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Casts the array to type `bool` + * + * @doc {heading: 'Tensors', subheading: 'Classes'} + */ + getGlobalTensorClass().prototype.toBool = function () { + this.throwIfDisposed(); + return cast$3(this, 'bool'); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Casts the array to type `float32` + * + * @doc {heading: 'Tensors', subheading: 'Classes'} + */ + getGlobalTensorClass().prototype.toFloat = function () { + this.throwIfDisposed(); + return cast$3(this, 'float32'); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Casts the array to type `int32` + * + * @doc {heading: 'Tensors', subheading: 'Classes'} + */ + getGlobalTensorClass().prototype.toInt = function () { + this.throwIfDisposed(); + return cast$3(this, 'int32'); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.topk = function (k, sorted) { + this.throwIfDisposed(); + return topk(this, k, sorted); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.transpose = function (perm) { + this.throwIfDisposed(); + return transpose$2(this, perm); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.unique = function (axis) { + this.throwIfDisposed(); + return unique$3(this, axis); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.unsortedSegmentSum = + function (segmentIds, numSegments) { + this.throwIfDisposed(); + return unsortedSegmentSum$2(this, segmentIds, numSegments); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.unstack = function (axis) { + this.throwIfDisposed(); + return unstack(this, axis); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.where = function (condition, x) { + this.throwIfDisposed(); + return where(condition, this, x); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + getGlobalTensorClass().prototype.zerosLike = function () { + this.throwIfDisposed(); + return zerosLike$3(this); + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + /** + * Explicit error types. + * + * See the following link for more information about why the code includes + * calls to setPrototypeOf: + * + * https://github.com/Microsoft/TypeScript-wiki/blob/master/Breaking-Changes.md#extending-built-ins-like-error-array-and-map-may-no-longer-work + */ + // tslint:enable + /** + * Equivalent of Python's AttributeError. + */ + class AttributeError extends Error { + constructor(message) { + super(message); + // Set the prototype explicitly. + Object.setPrototypeOf(this, AttributeError.prototype); + } + } + /** + * Equivalent of Python's RuntimeError. + */ + class RuntimeError extends Error { + constructor(message) { + super(message); + // Set the prototype explicitly. + Object.setPrototypeOf(this, RuntimeError.prototype); + } + } + /** + * Equivalent of Python's ValueError. + */ + class ValueError extends Error { + constructor(message) { + super(message); + // Set the prototype explicitly. + Object.setPrototypeOf(this, ValueError.prototype); + } + } + /** + * Equivalent of Python's NotImplementedError. + */ + class NotImplementedError extends Error { + constructor(message) { + super(message); + // Set the prototype explicitly. + Object.setPrototypeOf(this, NotImplementedError.prototype); + } + } + /** + * Equivalent of Python's AssertionError. + */ + class AssertionError extends Error { + constructor(message) { + super(message); + // Set the prototype explicitly. + Object.setPrototypeOf(this, AssertionError.prototype); + } + } + /** + * Equivalent of Python's IndexError. + */ + class IndexError extends Error { + constructor(message) { + super(message); + // Set the prototype explicitly. + Object.setPrototypeOf(this, IndexError.prototype); + } + } + + /** + * @license + * Copyright 2022 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + /** + * LruCache: A mapping from the String to T. If the number of the entries is + * exceeding the `maxEntries`, the LruCache will delete the least recently + * used entry. + */ + class LruCache { + constructor(maxEntries) { + this.maxEntries = maxEntries || 100; + this.cache = new Map(); + } + /** + * Get the entry for the key and mark it as used recently. + */ + get(key) { + let entry; + if (this.cache.has(key)) { + entry = this.cache.get(key); + this.cache.delete(key); + this.cache.set(key, entry); + } + return entry; + } + /** + * Put the entry into the cache. If the key already existed, mark the key as + * used recently. + */ + put(key, value) { + if (this.cache.has(key)) { + this.cache.delete(key); + } + else if (this.cache.size >= this.maxEntries) { + const keyToDelete = this.cache.keys().next().value; + this.cache.delete(keyToDelete); + } + this.cache.set(key, value); + } + /** + * Get the MaxEntries of the cache. + */ + getMaxEntries() { + return this.maxEntries; + } + /** + * Set the MaxEntries of the cache. If the maxEntries is decreased, reduce + * entries in the cache. + */ + setMaxEntries(maxEntries) { + if (maxEntries < 0) { + throw new Error(`The maxEntries of LRU caches must be at least 0, but got ${maxEntries}.`); + } + if (this.maxEntries > maxEntries) { + for (let i = 0; i < this.maxEntries - maxEntries; i++) { + const keyToDelete = this.cache.keys().next().value; + this.cache.delete(keyToDelete); + } + } + this.maxEntries = maxEntries; + } + } + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + // tslint:enable + /** + * If `value` is an Array, equivalent to Python's `value * numValues`. + * If `value` is not an Array, equivalent to Python's `[value] * numValues` + */ + // tslint:disable-next-line:no-any + function pyListRepeat(value, numValues) { + if (Array.isArray(value)) { + // tslint:disable-next-line:no-any + let newArray = []; + for (let i = 0; i < numValues; i++) { + newArray = newArray.concat(value); + } + return newArray; + } + else { + const newArray = new Array(numValues); + newArray.fill(value); + return newArray; + } + } + function assert(val, message) { + if (!val) { + throw new AssertionError(message); + } + } + /** + * Count the number of elements of the `array` that are equal to `reference`. + */ + function count(array, refernce) { + let counter = 0; + for (const item of array) { + if (item === refernce) { + counter++; + } + } + return counter; + } + /** + * If an array is of length 1, just return the first element. Otherwise, return + * the full array. + * @param tensors + */ + function singletonOrArray(xs) { + if (xs.length === 1) { + return xs[0]; + } + return xs; + } + /** + * Normalizes a list/tensor into a list. + * + * If a tensor is passed, we return + * a list of size 1 containing the tensor. + * + * @param x target object to be normalized. + */ + // tslint:disable-next-line:no-any + function toList(x) { + if (Array.isArray(x)) { + return x; + } + return [x]; + } + /** + * Generate a UID for a list + */ + // tslint:disable-next-line:no-any + function objectListUid(objs) { + const objectList = toList(objs); + let retVal = ''; + for (const obj of objectList) { + if (obj.id == null) { + throw new ValueError(`Object ${obj} passed to objectListUid without an id`); + } + if (retVal !== '') { + retVal = retVal + ', '; + } + retVal = `${retVal}${Math.abs(obj.id)}`; + } + return retVal; + } + /** + * Converts string to snake-case. + * @param name + */ + function toSnakeCase(name) { + const intermediate = name.replace(/(.)([A-Z][a-z0-9]+)/g, '$1_$2'); + const insecure = intermediate.replace(/([a-z])([A-Z])/g, '$1_$2').toLowerCase(); + /* + If the class is private the name starts with "_" which is not secure + for creating scopes. We prefix the name with "private" in this case. + */ + if (insecure[0] !== '_') { + return insecure; + } + return 'private' + insecure; + } + function toCamelCase(identifier) { + // quick return for empty string or single character strings + if (identifier.length <= 1) { + return identifier; + } + // Check for the underscore indicating snake_case + if (identifier.indexOf('_') === -1) { + return identifier; + } + return identifier.replace(/[_]+(\w|$)/g, (m, p1) => p1.toUpperCase()); + } + // tslint:disable-next-line:no-any + let _GLOBAL_CUSTOM_OBJECTS = {}; + function serializeKerasObject(instance) { + if (instance === null || instance === undefined) { + return null; + } + const dict = {}; + dict['className'] = instance.getClassName(); + dict['config'] = instance.getConfig(); + return dict; + } + /** + * Replace ndarray-style scalar objects in serialization objects with numbers. + * + * Background: In some versions of tf.keras, certain scalar values in the HDF5 + * model save file can be serialized as: `{'type': 'ndarray', 'value': num}`, + * where in `num` is a plain number. This method converts such serialization + * to a `number`. + * + * @param config The keras-format serialization object to be processed + * (in place). + */ + function convertNDArrayScalarsInConfig(config) { + if (config == null || typeof config !== 'object') { + return; + } + else if (Array.isArray(config)) { + config.forEach(configItem => convertNDArrayScalarsInConfig(configItem)); + } + else { + const fields = Object.keys(config); + for (const field of fields) { + const value = config[field]; + if (value != null && typeof value === 'object') { + if (!Array.isArray(value) && value['type'] === 'ndarray' && + typeof value['value'] === 'number') { + config[field] = value['value']; + } + else { + convertNDArrayScalarsInConfig(value); + } + } + } + } + } + /** + * Deserialize a saved Keras Object + * @param identifier either a string ID or a saved Keras dictionary + * @param moduleObjects a list of Python class names to object constructors + * @param customObjects a list of Python class names to object constructors + * @param printableModuleName debug text for the object being reconstituted + * @param fastWeightInit Optional flag to use fast weight initialization + * during deserialization. This is applicable to cases in which + * the initialization will be immediately overwritten by loaded weight + * values. Default: `false`. + * @returns a TensorFlow.js Layers object + */ + // tslint:disable:no-any + function deserializeKerasObject(identifier, moduleObjects = {}, customObjects = {}, printableModuleName = 'object', fastWeightInit = false) { + // tslint:enable + if (typeof identifier === 'string') { + const functionName = identifier; + let fn; + if (functionName in customObjects) { + fn = customObjects[functionName]; + } + else if (functionName in _GLOBAL_CUSTOM_OBJECTS) { + fn = _GLOBAL_CUSTOM_OBJECTS[functionName]; + } + else { + fn = moduleObjects[functionName]; + if (fn == null) { + throw new ValueError(`Unknown ${printableModuleName}: ${identifier}. ` + + `This may be due to one of the following reasons:\n` + + `1. The ${printableModuleName} is defined in Python, in which ` + + `case it needs to be ported to TensorFlow.js or your JavaScript ` + + `code.\n` + + `2. The custom ${printableModuleName} is defined in JavaScript, ` + + `but is not registered properly with ` + + `tf.serialization.registerClass().`); + // TODO(cais): Add link to tutorial page on custom layers. + } + } + return fn; + } + else { + // In this case we are dealing with a Keras config dictionary. + const config = identifier; + if (config['className'] == null || config['config'] == null) { + throw new ValueError(`${printableModuleName}: Improper config format: ` + + `${JSON.stringify(config)}.\n` + + `'className' and 'config' must set.`); + } + const className = config['className']; + let cls, fromConfig; + if (className in customObjects) { + [cls, fromConfig] = customObjects[className]; + } + else if (className in _GLOBAL_CUSTOM_OBJECTS) { + [cls, fromConfig] = _GLOBAL_CUSTOM_OBJECTS['className']; + } + else if (className in moduleObjects) { + [cls, fromConfig] = moduleObjects[className]; + } + if (cls == null) { + throw new ValueError(`Unknown ${printableModuleName}: ${className}. ` + + `This may be due to one of the following reasons:\n` + + `1. The ${printableModuleName} is defined in Python, in which ` + + `case it needs to be ported to TensorFlow.js or your JavaScript ` + + `code.\n` + + `2. The custom ${printableModuleName} is defined in JavaScript, ` + + `but is not registered properly with ` + + `tf.serialization.registerClass().`); + // TODO(cais): Add link to tutorial page on custom layers. + } + if (fromConfig != null) { + // Porting notes: Instead of checking to see whether fromConfig accepts + // customObjects, we create a customObjects dictionary and tack it on to + // config['config'] as config['config'].customObjects. Objects can use it, + // if they want. + // tslint:disable-next-line:no-any + const customObjectsCombined = {}; + for (const key of Object.keys(_GLOBAL_CUSTOM_OBJECTS)) { + customObjectsCombined[key] = _GLOBAL_CUSTOM_OBJECTS[key]; + } + for (const key of Object.keys(customObjects)) { + customObjectsCombined[key] = customObjects[key]; + } + // Add the customObjects to config + const nestedConfig = config['config']; + nestedConfig['customObjects'] = customObjectsCombined; + const backupCustomObjects = Object.assign({}, _GLOBAL_CUSTOM_OBJECTS); + for (const key of Object.keys(customObjects)) { + _GLOBAL_CUSTOM_OBJECTS[key] = customObjects[key]; + } + convertNDArrayScalarsInConfig(config['config']); + const returnObj = fromConfig(cls, config['config'], customObjects, fastWeightInit); + _GLOBAL_CUSTOM_OBJECTS = Object.assign({}, backupCustomObjects); + return returnObj; + } + else { + // Then `cls` may be a function returning a class. + // In this case by convention `config` holds + // the kwargs of the function. + const backupCustomObjects = Object.assign({}, _GLOBAL_CUSTOM_OBJECTS); + for (const key of Object.keys(customObjects)) { + _GLOBAL_CUSTOM_OBJECTS[key] = customObjects[key]; + } + // In python this is **config['config'], for tfjs-layers we require + // classes that use this fall-through construction method to take + // a config interface that mimics the expansion of named parameters. + const returnObj = new cls(config['config']); + _GLOBAL_CUSTOM_OBJECTS = Object.assign({}, backupCustomObjects); + return returnObj; + } + } + } + /** + * Compares two numbers for sorting. + * @param a + * @param b + */ + function numberCompare(a, b) { + return (a < b) ? -1 : ((a > b) ? 1 : 0); + } + /** + * Comparison of two numbers for reverse sorting. + * @param a + * @param b + */ + function reverseNumberCompare(a, b) { + return -1 * numberCompare(a, b); + } + /** + * Convert a string into the corresponding DType. + * @param dtype + * @returns An instance of DType. + */ + function stringToDType(dtype) { + switch (dtype) { + case 'float32': + return 'float32'; + default: + throw new ValueError(`Invalid dtype: ${dtype}`); + } + } + /** + * Test the element-by-element equality of two Arrays of strings. + * @param xs First array of strings. + * @param ys Second array of strings. + * @returns Wether the two arrays are all equal, element by element. + */ + function stringsEqual(xs, ys) { + if (xs == null || ys == null) { + return xs === ys; + } + if (xs.length !== ys.length) { + return false; + } + for (let i = 0; i < xs.length; ++i) { + if (xs[i] !== ys[i]) { + return false; + } + } + return true; + } + /** + * Get the unique elements of an array. + * @param xs Array. + * @returns An Array consisting of the unique elements in `xs`. + */ + function unique$2(xs) { + if (xs == null) { + return xs; + } + const out = []; + // TODO(cais): Maybe improve performance by sorting. + for (const x of xs) { + if (out.indexOf(x) === -1) { + out.push(x); + } + } + return out; + } + /** + * Determine if an Object is empty (i.e., does not have own properties). + * @param obj Object + * @returns Whether the Object is empty. + * @throws ValueError: If object is `null` or `undefined`. + */ + function isObjectEmpty(obj) { + if (obj == null) { + throw new ValueError(`Invalid value in obj: ${JSON.stringify(obj)}`); + } + for (const key in obj) { + if (obj.hasOwnProperty(key)) { + return false; + } + } + return true; + } + /** + * Helper function used to build type union/enum run-time checkers. + * @param values The list of allowed values. + * @param label A string name for the type + * @param value The value to test. + * @throws ValueError: If the value is not in values nor `undefined`/`null`. + */ + function checkStringTypeUnionValue(values, label, value) { + if (value == null) { + return; + } + if (values.indexOf(value) < 0) { + throw new ValueError(`${value} is not a valid ${label}. Valid values are ${values} or null/undefined.`); + } + } + /** + * Helper function for verifying the types of inputs. + * + * Ensures that the elements of `x` are all of type `expectedType`. + * Also verifies that the length of `x` is within bounds. + * + * @param x Object to test. + * @param expectedType The string expected type of all of the elements in the + * Array. + * @param minLength Return false if x.length is less than this. + * @param maxLength Return false if x.length is greater than this. + * @returns true if and only if `x` is an `Array` with + * length >= `minLength` and <= `maxLength`. + */ + // tslint:disable:no-any + function checkArrayTypeAndLength(x, expectedType, minLength = 0, maxLength = Infinity) { + assert(minLength >= 0); + assert(maxLength >= minLength); + return (Array.isArray(x) && x.length >= minLength && x.length <= maxLength && + x.every(e => typeof e === expectedType)); + } + // tslint:enable:no-any + /** + * Assert that a value or an array of value are positive integer. + * + * @param value The value being asserted on. May be a single number or an array + * of numbers. + * @param name Name of the value, used to make the error message. + */ + function assertPositiveInteger(value, name) { + if (Array.isArray(value)) { + assert$1(value.length > 0, () => `${name} is unexpectedly an empty array.`); + value.forEach((v, i) => assertPositiveInteger(v, `element ${i + 1} of ${name}`)); + } + else { + assert$1(Number.isInteger(value) && value > 0, () => `Expected ${name} to be a positive integer, but got ` + + `${formatAsFriendlyString(value)}.`); + } + } + /** + * Format a value into a display-friendly, human-readable fashion. + * + * - `null` is formatted as `'null'` + * - Strings are formated with flanking pair of quotes. + * - Arrays are formatted with flanking pair of square brackets. + * + * @param value The value to display. + * @return Formatted string. + */ + // tslint:disable-next-line:no-any + function formatAsFriendlyString(value) { + if (value === null) { + return 'null'; + } + else if (Array.isArray(value)) { + return '[' + value.map(v => formatAsFriendlyString(v)).join(',') + ']'; + } + else if (typeof value === 'string') { + return `"${value}"`; + } + else { + return `${value}`; + } + } + /** + * Returns a function `f2` (decorator) which wraps the original function + * `f`. `f2` guarantees that `f` can be called at most once + * every `waitMs` ms. If `f2` is called more often, it will return + * the last returned result of `f`. + * + * @param f The original function `f` to wrap. + * @param waitMs The time between two consecutive calls to `f` in ms. + */ + function debounce(f, waitMs, nowFunc) { + let lastTime = nowFunc != null ? nowFunc() : now(); + let lastResult; + const f2 = (...args) => { + const now$1 = nowFunc != null ? nowFunc() : now(); + if (now$1 - lastTime < waitMs) { + return lastResult; + } + lastTime = now$1; + lastResult = f(...args); + return lastResult; + }; + return f2; + } + /** + * Returns the fusable activation given a layers identifier. + * + * @param activationName The layers identifier string. + * @return The name of the fusable activation. + */ + function mapActivationToFusedKernel(activationName) { + if (activationName === 'relu') { + return 'relu'; + } + if (activationName === 'linear') { + return 'linear'; + } + if (activationName === 'elu') { + return 'elu'; + } + return null; + } + /** + * Returns the cartesian product of sets of values. + * This works the same as itertools.product in Python. + * + * Example: + * + * filters = [128, 256, 512] + * paddings = ['same', 'valid'] + * + * product = [ [128, 'same'], [128, 'valid'], [256, 'same'], [256, 'valid'], + * [512, 'same'], [512, 'valid']] + * + * @param arrayOfValues List/array of values. + * @return The cartesian product. + */ + function getCartesianProductOfValues(...arrayOfValues) { + assert(arrayOfValues.length > 0, 'arrayOfValues is empty'); + for (const values of arrayOfValues) { + assert(Array.isArray(values), 'one of the values is not an array'); + assert(values.length > 0, 'one of the values is empty'); + } + return arrayOfValues.reduce((products, values) => { + if (products.length === 0) { + return values.map(value => [value]); + } + return values + .map(value => { + return products.map((prevValue) => [...prevValue, value]); + }) + .reduce((flattenedProduct, unflattenedProduct) => { + return flattenedProduct.concat(unflattenedProduct); + }, []); + }, []); + } + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + /** + * Utilities related to persistent state in the backend. + */ + /** + * An ID to track `tf.SymbolicTensor`s and derived classes. + * Required in different places in engine/topology.ts to identify unique + * tensors. + */ + let _nextUniqueTensorId = 0; + function getNextUniqueTensorId() { + return _nextUniqueTensorId++; + } + const _uidPrefixes = {}; + /** + * Provides a unique UID given a string prefix. + * + * @param prefix + */ + function getUid(prefix = '') { + if (!(prefix in _uidPrefixes)) { + _uidPrefixes[prefix] = 0; + } + _uidPrefixes[prefix] += 1; + return prefix + _uidPrefixes[prefix].toString(); + } + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + const VALID_DATA_FORMAT_VALUES = ['channelsFirst', 'channelsLast']; + const VALID_INTERPOLATION_FORMAT_VALUES = ['nearest', 'bilinear']; + const VALID_PADDING_MODE_VALUES = ['valid', 'same', 'causal']; + const VALID_POOL_MODE_VALUES = ['max', 'avg']; + const VALID_BIDIRECTIONAL_MERGE_MODES = ['sum', 'mul', 'concat', 'ave']; + const VALID_SAMPLE_WEIGHT_MODES = ['temporal']; + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + // A map from the requested scoped name of a Tensor to the number of Tensors + // wanting that name so far. This allows enforcing name uniqueness by appending + // an incrementing index, e.g. scope/name, scope/name_1, scope/name_2, etc. + const nameMap = new Map(); + function checkDataFormat(value) { + checkStringTypeUnionValue(VALID_DATA_FORMAT_VALUES, 'DataFormat', value); + } + function checkInterpolationFormat(value) { + checkStringTypeUnionValue(VALID_INTERPOLATION_FORMAT_VALUES, 'InterpolationFormat', value); + } + function checkPaddingMode(value) { + checkStringTypeUnionValue(VALID_PADDING_MODE_VALUES, 'PaddingMode', value); + } + function checkPoolMode(value) { + checkStringTypeUnionValue(VALID_POOL_MODE_VALUES, 'PoolMode', value); + } + const _nameScopeStack = []; + const _nameScopeDivider = '/'; + /** + * Enter namescope, which can be nested. + */ + function nameScope(name, fn) { + _nameScopeStack.push(name); + try { + const val = fn(); + _nameScopeStack.pop(); + return val; + } + catch (e) { + _nameScopeStack.pop(); + throw e; + } + } + /** + * Get the current namescope as a flat, concatenated string. + */ + function currentNameScopePrefix() { + if (_nameScopeStack.length === 0) { + return ''; + } + else { + return _nameScopeStack.join(_nameScopeDivider) + _nameScopeDivider; + } + } + /** + * Get the name a Tensor (or Variable) would have if not uniqueified. + * @param tensorName + * @return Scoped name string. + */ + function getScopedTensorName(tensorName) { + if (!isValidTensorName(tensorName)) { + throw new Error('Not a valid tensor name: \'' + tensorName + '\''); + } + return currentNameScopePrefix() + tensorName; + } + /** + * Get unique names for Tensors and Variables. + * @param scopedName The fully-qualified name of the Tensor, i.e. as produced by + * `getScopedTensorName()`. + * @return A unique version of the given fully scoped name. + * If this is the first time that the scoped name is seen in this session, + * then the given `scopedName` is returned unaltered. If the same name is + * seen again (producing a collision), an incrementing suffix is added to the + * end of the name, so it takes the form 'scope/name_1', 'scope/name_2', etc. + */ + function getUniqueTensorName(scopedName) { + if (!isValidTensorName(scopedName)) { + throw new Error('Not a valid tensor name: \'' + scopedName + '\''); + } + if (!nameMap.has(scopedName)) { + nameMap.set(scopedName, 0); + } + const index = nameMap.get(scopedName); + nameMap.set(scopedName, nameMap.get(scopedName) + 1); + if (index > 0) { + const result = `${scopedName}_${index}`; + // Mark the composed name as used in case someone wants + // to call getUniqueTensorName("name_1"). + nameMap.set(result, 1); + return result; + } + else { + return scopedName; + } + } + const tensorNameRegex = new RegExp(/^[A-Za-z0-9][-A-Za-z0-9\._\/]*$/); + /** + * Determine whether a string is a valid tensor name. + * @param name + * @returns A Boolean indicating whether `name` is a valid tensor name. + */ + function isValidTensorName(name) { + return !!name.match(tensorNameRegex); + } + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + /** + * Determine if a number is an integer. + */ + function isInteger(x) { + return x === parseInt(x.toString(), 10); + } + /** + * Calculate the product of an array of numbers. + * @param array The array to calculate the product over. + * @param begin Beginning index, inclusive. + * @param end Ending index, exclusive. + * @return The product. + */ + function arrayProd(array, begin, end) { + if (begin == null) { + begin = 0; + } + if (end == null) { + end = array.length; + } + let prod = 1; + for (let i = begin; i < end; ++i) { + prod *= array[i]; + } + return prod; + } + /** + * Compute minimum value. + * @param array + * @return minimum value. + */ + function min$2(array) { + // same behavior as tf.min() + if (array.length === 0) { + return Number.NaN; + } + let min = Number.POSITIVE_INFINITY; + for (let i = 0; i < array.length; i++) { + const value = array[i]; + if (value < min) { + min = value; + } + } + return min; + } + /** + * Compute maximum value. + * @param array + * @return maximum value + */ + function max$2(array) { + // same behavior as tf.max() + if (array.length === 0) { + return Number.NaN; + } + let max = Number.NEGATIVE_INFINITY; + for (let i = 0; i < array.length; i++) { + const value = array[i]; + if (value > max) { + max = value; + } + } + return max; + } + /** + * Compute sum of array. + * @param array + * @return The sum. + */ + function sum$2(array) { + let sum = 0; + for (let i = 0; i < array.length; i++) { + const value = array[i]; + sum += value; + } + return sum; + } + /** + * Compute mean of array. + * @param array + * @return The mean. + */ + function mean$1(array) { + return sum$2(array) / array.length; + } + /** + * Compute variance of array. + * @param array + * @return The variance. + */ + function variance(array) { + const meanValue = mean$1(array); + const demeaned = array.map((value) => value - meanValue); + let sumSquare = 0; + for (let i = 0; i < demeaned.length; i++) { + const value = demeaned[i]; + sumSquare += value * value; + } + return sumSquare / array.length; + } + /** + * Compute median of array. + * @param array + * @return The median value. + */ + function median(array) { + const arraySorted = array.slice().sort((a, b) => a - b); + const lowIdx = Math.floor((arraySorted.length - 1) / 2); + const highIdx = Math.ceil((arraySorted.length - 1) / 2); + if (lowIdx === highIdx) { + return arraySorted[lowIdx]; + } + return (arraySorted[lowIdx] + arraySorted[highIdx]) / 2; + } + /** + * Generate an array of integers in [begin, end). + * @param begin Beginning integer, inclusive. + * @param end Ending integer, exclusive. + * @returns Range array. + * @throws ValueError, iff `end` < `begin`. + */ + function range$2(begin, end) { + if (end < begin) { + throw new ValueError(`end (${end}) < begin (${begin}) is forbidden.`); + } + const out = []; + for (let i = begin; i < end; ++i) { + out.push(i); + } + return out; + } + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + let _epsilon; + /** + * Returns the value of the fuzz factor used in numeric expressions. + */ + function epsilon$1() { + if (_epsilon == null) { + _epsilon = backend$1().epsilon(); + } + return _epsilon; + } + /** + * Sets the value of the fuzz factor used in numeric expressions. + * @param e New value of epsilon. + */ + function setEpsilon(e) { + _epsilon = e; + } + /** + * Returns the default image data format convention. + */ + function imageDataFormat() { + return 'channelsLast'; + } + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + // tslint:enable + /* Setting and getting backend from deeplearn.js. */ + // Default deeplearn.js backend is WebGL (GPU). + let backend = 'webgl'; + function setBackend(requestedBackend) { + setBackend$1(requestedBackend); + backend = requestedBackend; + } + function getBackend() { + return backend; + } + /** + * Indicates whether the backend is operating symbolically. + * + * This function will be used to determine how to interpret user code. If + * it returns true, calls to the backend construct a symbolic graph; if + * it returns false, calls to the backend execute immediately. + */ + function isBackendSymbolic() { + return false; + } + /** + * Get the number of elements in a Tensor. + * @param x The Tensor. + * @return Number of elements in `x`. + */ + function countParams(x) { + const shape = x.shape; + if (shape.length > 0) { + return shape.reduce((a, b) => a * b); + } + else { + // Scalar. + return 1; + } + } + /** + * Casts a tensor to a different dtype and returns it. + * @param x Input tensor. + * @param dtype String: 'float32'|'int32'|'bool'. + * @returns Tensor of the specified `dtype`. + */ + function cast$2(x, dtype) { + return cast$3(x, dtype); + } + /** + * Adds a 1-sized dimension at index "axis". + * @param x Input tensor. + * @param axis Position where to add the new axis. + * @returns Result of the dimension expansion. + */ + function expandDims$2(x, axis = -1) { + const outShape = x.shape.slice(); + if (axis < 0) { + axis = outShape.length + axis + 1; + } + outShape.splice(axis, 0, 1); + return reshape$3(x, outShape); + } + /** + * Repeats a 2D tensor. + * + * If `x` has shape `[samples, dim]` and `n` is 2, for example, the output + * will have shape `[samples, 2, dim]`. + * + * @param x Input tensor. + * @param n Integer, number of times to repeat. + * @returns The result of the repeat operation. + * @throws ValueError: If input tensor is not 2D. + */ + function repeat(x, n) { + return tidy(() => { + if (x.shape.length !== 2) { + throw new ValueError(`repeat() expects a rank-2 tensor, but received a ` + + `rank-${x.shape.length} tensor.`); + } + const y = expandDims$2(x, 1); + return tile$2(y, [1, n, 1]); + }); + } + /** + * Flatten a Tensor into 1D. + * @param x Input tensor. + * @return The result of the flattening `x`. + */ + function flatten$1(x) { + const newShape = [arrayProd(x.shape)]; + return reshape$3(x, newShape); + } + /** + * Turn a nD tensor into a 2D tensor with same 0th dimension. + * In other words, it flattens each data samples of a batch. + * + * @param x The tensor to flatten. The rank of this tensor is required to be 2 + * or higher. + * @return The result of the flattening. + */ + function batchFlatten(x) { + if (x.rank <= 1) { + throw new ValueError(`batchFlatten requires a minimum rank of 2. Got rank: ${x.rank}.`); + } + const newShape = [x.shape[0], arrayProd(x.shape, 1)]; + return reshape$3(x, newShape); + } + /** + * Do slicing along the first axis. + * @param array input `tf.Tensor`. + * @param start starting index, inclusive. + * @param size size of the slice along the first axis. + * @returns result of the slicing. + * @throws ValueError: If `array` is of an unsupported subtype of `tf.Tensor`. + */ + function sliceAlongFirstAxis(array, start, size) { + return tidy(() => { + switch (array.rank) { + case 1: + return slice1d(array, start, size); + case 2: + return slice2d(array, [start, 0], [size, array.shape[1]]); + case 3: + return slice3d(array, [start, 0, 0], [size, array.shape[1], array.shape[2]]); + case 4: + return slice4d(array, [start, 0, 0, 0], [size, array.shape[1], array.shape[2], array.shape[3]]); + case 5: + return slice$2(array, [start, 0, 0, 0, 0], [ + size, array.shape[1], array.shape[2], array.shape[3], array.shape[4] + ]); + case 6: + return slice$2(array, [start, 0, 0, 0, 0, 0], [ + size, array.shape[1], array.shape[2], array.shape[3], array.shape[4], + array.shape[5] + ]); + default: + throw new ValueError(`sliceAlongFirstAxis() received an unsupported tensor rank: ` + + `${array.rank}`); + } + }); + } + /** + * Do slicing along the last axis. + * @param array input `tf.Tensor`. + * @param start starting index, inclusive. + * @param size size of the slice along the last axis. + * @returns result of the slicing. + * @throws ValueError: If `array` is of an unsupported subtype of `tf.Tensor`. + */ + function sliceAlongLastAxis(array, start, size) { + return tidy(() => { + switch (array.rank) { + case 1: + return slice1d(array, start, size); + case 2: + return slice2d(array, [0, start], [array.shape[0], size]); + case 3: + return slice3d(array, [0, 0, start], [array.shape[0], array.shape[1], size]); + case 4: + return slice4d(array, [0, 0, 0, start], [array.shape[0], array.shape[1], array.shape[2], size]); + default: + throw new ValueError(`sliceAlongLastAxis() received an unsupported tensor rank: ` + + `${array.rank}`); + } + }); + } + /** + * Do slicing along the sepcified axis. + * @param array input `tf.Tensor`. + * @param start starting index, inclusive. + * @param size of the slice along the chosen axis. + * @param choose an axis. + * @returns result of the slicing. + * @throws ValueError: If `array` is of an unsupported subtype of `tf.Tensor`. + */ + function sliceAlongAxis(array, start, size, axis) { + return tidy(() => { + switch (array.rank) { + case 1: + return slice1d(array, start, size); + case 2: + switch (axis) { + case 1: + return sliceAlongFirstAxis(array, start, size); + case 2: + return sliceAlongLastAxis(array, start, size); + default: + throw new ValueError(`The axis is not within the rank of the tensor ` + + `${axis}`); + } + case 3: + switch (axis) { + case 1: + return sliceAlongFirstAxis(array, start, size); + case 2: + return slice3d(array, [0, start, 0], [array.shape[0], size, array.shape[2]]); + case 3: + return sliceAlongLastAxis(array, start, size); + default: + throw new ValueError(`The axis is not within the rank of the tensor ` + + `${axis}`); + } + case 4: + switch (axis) { + case 1: + return sliceAlongFirstAxis(array, start, size); + case 2: + return slice4d(array, [0, start, 0, 0], [array.shape[0], size, array.shape[2], array.shape[3]]); + case 3: + return slice4d(array, [0, 0, start, 0], [array.shape[0], array.shape[1], size, array.shape[3]]); + case 4: + return sliceAlongLastAxis(array, start, size); + default: + throw new ValueError(`The axis is not within the rank of the tensor ` + + `${axis}`); + } + default: + throw new ValueError(`sliceAlongLastAxis() received an unsupported tensor rank: ` + + `${array.rank}`); + } + }); + } + /** + * Concatenates a list of tensors alongside the specified axis. + * @param tensors `Array` of tensors to concatenate. + * @param axis Concatenation axis. + * @returns The result of the concatenation. + */ + function concatenate$2(tensors, axis = -1) { + let rank; + if (axis < 0) { + rank = tensors[0].rank; + if (rank !== 0) { + axis = rank; + } + else { + axis = 0; + } + } + if (axis === tensors[0].rank) { + // Porting Note: This is necessary because tfc.concat() requires axis to be + // in the interval [-rank, rank). + axis = -1; + } + // Porting Note: Sparse concat is not supported yet. + return concat$2(tensors, axis); + } + /** + * Concatenate two arrays along the first dimension. + * @param a The 1st `tf.Tensor` to concatenate. + * @param b The 2nd `tf.Tensor` to concatenate. + * @returns Result of the concatenation. + * @throws ValueError: If `a` is of an unsupported subtype of `tf.Tensor`. + */ + function concatAlongFirstAxis(a, b) { + switch (a.rank) { + case 1: + return concat1d([a, b]); + case 2: + return concat2d([a, b], 0); + case 3: + return concat3d([a, b], 0); + case 4: + return concat4d([a, b], 0); + default: + throw new ValueError(`concatAlongFirstAxis() received an unsupported ` + + `tensor rank: ${a.rank}`); + } + } + /** + * Creates a tensor by tiling `x` by `n`. + * @param x A tensor. + * @param n An Array of integers or a single integer. If an Array, the length + * must be the same as the number of dimensions in `x`. If a single integer, + * it will be treated as an Array of length 1. + */ + function tile$2(x, n) { + if (!Array.isArray(n)) { + n = [n]; + } + if (x.rank !== n.length) { + throw new ValueError(`The length of input n (${n.length}) does not match ` + + `the number of dimensions in input x (${x.rank})`); + } + return tile$3(x, n); + } + /* Creation of random tensors. */ + /** + * Get a tensor with normal distribution of values. + * + * @param shape Shape of the tensor. + * @param mean mean value of the normal distribution. + * @param stddev standard deviation of the normal distribution. + * @param dtype + * @param seed + * @return The normal tensor. + */ + function randomNormal$1(shape, mean = 0.0, stddev = 1.0, dtype, seed) { + return randomNormal$2(shape, mean, stddev, dtype, seed); + } + /* Linear Algebra */ + /** + * Multiply two tensors and returns the result as a tensor. + * + * For 2D tensors, this is equivalent to matrix multiplication (matMul). + * For tensors of higher ranks, it follows the Theano behavior, + * (e.g. `(2, 3) * (4, 3, 5) -> (2, 4, 5)`). From the Theano documentation: + * + * For N dimensions it is a sum product over the last axis of x and the + * second-to-last of y: + * + * @param a A tensor of at least rank 2. + * @param b A tensor of at least rank 2. + * @param activation (optional) A string identifying the activation + * function. + * @return Result of the dot operation. + */ + function dot$1(a, b, activation, bias) { + if ((a.rank < 2) || (b.rank < 2)) { + throw new NotImplementedError(`dot requires both inputs to be rank >= 2` + + ` but got x shape = ${a.shape} and y shape = ${b.shape}`); + } + if (b.rank >= 3) { + const xLastDim = a.shape.slice(-1)[0]; + const ySecondLastDim = b.shape.slice(-2)[0]; + if (xLastDim !== ySecondLastDim) { + throw new NotImplementedError(`If rank y >= 3, then the second last dim` + + ` of y must equal the last dim of x but got x shape = ${a.shape} and ` + + ` y shape = ${b.shape}`); + } + } + // Handle basic 2D x 2D case. + if ((a.rank === 2) && (b.rank === 2)) { + const transposeA = false; + const transposeB = false; + // tfc.fused.matMul only fuses certain activation functions. Unsupported + // activation functions are treated as 'linear' activations, which is + // equivalent to a no-op. + return matMul({ + a, + b: b, + transposeA, + transposeB, + bias: bias ? reshapeBias(a.rank, bias, imageDataFormat()) : null, + activation + }); + } + else { + // Reshape x into the analogous 2D Tensor. + const aFirstDims = a.shape.slice(); // Holds all but the last dim of x. + const aLastDim = aFirstDims.pop(); + a = reshape$3(a, [-1, aLastDim]); + // Reshape y into the analogous 2D Tensor, and keep track of the + // required dimensions to reproduce the output shape. + const bShape = b.shape.slice(); + const bLastDim = bShape.pop(); + const ySecondLastDim = bShape.pop(); + const yOtherDims = [...bShape, bLastDim]; + // permutation should be like [r-2, 0, 1, 2, ... r-4, r-3, r-1] + // where r is the rank of y. + const perm = Array.from({ length: b.rank }, (_, i) => { + if (i === 0) { + return b.rank - 2; + } + else if (i <= b.rank - 2) { + return i - 1; + } + return i; + }); + b = reshape$3(transpose$2(b, perm), [ySecondLastDim, -1]); + // Multiply x and y as 2D Tensors, and then reshape back to original. + const outputShape = [...aFirstDims, ...yOtherDims]; + const transposeA = false; + const transposeB = false; + return reshape$3(matMul({ + a, + b, + transposeA, + transposeB, + bias: bias ? reshapeBias(a.rank, bias, imageDataFormat()) : null, + activation + }), outputShape); + } + } + /** + * Compute the sign Tensor of an input Tensor. + * + * Elements of the input `tf.Tensor` that are === 0 are mapped to 0. + * Elements of the input `tf.Tensor` that are > 0 are mapped to 1. + * Elements of the input `tf.Tensor` that are < 0 are mapped to -1. + * + * @param x Input `tf.Tensor`. + * @return The sign `tf.Tensor`. + */ + function sign$2(x) { + // TODO(cais): Move to the core. + return tidy(() => { + const zerosLikeX = zerosLike$3(x); + const onesLikeX = onesLike$3(x); + return where(equal$2(x, zerosLikeX), zerosLikeX, where(greater$3(x, zerosLike$3(x)), onesLikeX, mul(-1, onesLikeX))); + }); + } + /** + * Computes the one-hot representation of an integer tensor. + * @param indices nD integer tensor of shape + * `(batch_size, dim1, dim2, ... dim(n-1))` + * @param numClasses Integer, number of classes to consider. + * @returns (n + 1)D one hot representation of the input + * with shape `(batch_size, dim1, dim2, ... dim(n-1), num_classes)` + */ + function oneHot$2(indices, numClasses) { + return tidy(() => { + if (indices.rank !== 1) { + throw new Error('Only 1D one-hot tensors are supported in the ' + + 'deeplearn backend, at present.'); + } + indices = cast$3(indices, 'int32'); + return cast$3(oneHot$3(indices, numClasses), 'float32'); + }); + } + /* Elementary math functions. */ + /** + * Retrieves the elements of indices `indices` in the tensor `reference`. + * @param reference A tensor. + * @param indices An integer tensor of indices or an `Array` of integers. + * @param axis Axis along which to perform the gather operation. + * @returns The result of the gathering as a tensor. + */ + function gather(reference, indices, axis) { + return tidy(() => { + if (Array.isArray(indices)) { + indices = tensor1d(indices, 'int32'); + } + else { + indices = cast$3(indices, 'int32'); + } + return gather$1(reference, indices, axis); + }); + } + /** + * Element-wise square. + * @param x Input tensor. + * @return element-wise x^2 + */ + function square$1(x) { + return mul(x, x); + } + /** + * Element-wise exponentiation. + * + * Porting Note: In PyKeras, `a` (the exponent) is a Python integer, which + * takes advatnage of the backend's (e.g., TensorFlow's) automatic + * conversion to tensor. Here we allow `a` to be either a number or a tensor. + * + * @param x The base tensor. + * @param a The exponent, tensor or number. If a number, it is rounded to the + * nearest integer and converted to a tensor. + * @returns A tensor of the same shape as `x`. + */ + function pow$2(x, a) { + return tidy(() => { + if (typeof (a) === 'number') { + a = scalar(Math.round(a), 'int32'); + } + if (a.dtype !== 'int32') { + throw new NotImplementedError(`Non-int32 dtype (${a.dtype}) is not supported by pow() yet`); + } + return pow$3(x, a); + }); + } + /** + * Reshapes bias tensor according to rank of x. + */ + function reshapeBias(xRank, bias, dataFormat) { + const biasShape = bias.shape; + if (bias.rank !== 1 && bias.rank !== xRank) { + throw new ValueError(`Unexpected bias dimensions: ${bias.rank}` + + `; expected it to be 1 or ${xRank}`); + } + if (xRank === 5) { + if (dataFormat === 'channelsFirst') { + if (biasShape.length === 1) { + return reshape$3(bias, [1, biasShape[0], 1, 1, 1]); + } + else { + return reshape$3(bias, [1, biasShape[3], biasShape[0], biasShape[1], biasShape[2]]); + } + } + else if (dataFormat === 'channelsLast') { + if (biasShape.length === 1) { + return reshape$3(bias, [1, 1, 1, 1, biasShape[0]]); + } + else { + return reshape$3(bias, [1].concat(biasShape)); + } + } + } + else if (xRank === 4) { + if (dataFormat === 'channelsFirst') { + if (biasShape.length === 1) { + return reshape$3(bias, [1, biasShape[0], 1, 1]); + } + else { + return reshape$3(bias, [1, biasShape[2], biasShape[0], biasShape[1]]); + } + } + else if (dataFormat === 'channelsLast') { + if (biasShape.length === 1) { + return reshape$3(bias, [1, 1, 1, biasShape[0]]); + } + else { + return reshape$3(bias, [1].concat(biasShape)); + } + } + } + else if (xRank === 3) { + if (dataFormat === 'channelsFirst') { + if (biasShape.length === 1) { + return reshape$3(bias, [1, biasShape[0], 1]); + } + else { + return reshape$3(bias, [1, biasShape[1], biasShape[0]]); + } + } + else if (dataFormat === 'channelsLast') { + if (biasShape.length === 1) { + return reshape$3(bias, [1, 1, biasShape[0]]); + } + else { + return reshape$3(bias, [1].concat(biasShape)); + } + } + } + else if (xRank < 3) { + return bias; + } + throw new ValueError(`Unsupported input rank by biasAdd: ${bias.rank}`); + } + /* Neural-network operations. */ + /** + * Add a bias to a tensor. + * + * @param x The tensor to add the bias to. + * @param bias The bias to add to `x`. Must be 1D or the same rank as `x`. + * @return Result of the bias adding. + * @throws ValueError: If the rank of `bias` is incorrect. + */ + function biasAdd(x, bias, dataFormat) { + return tidy(() => { + if (dataFormat == null) { + dataFormat = imageDataFormat(); + } + checkDataFormat(dataFormat); + return add$3(x, reshapeBias(x.rank, bias, dataFormat)); + }); + } + /** + * Exponential linear unit (ELU). + * @param x A tensor or variable to compute the activation function for. + * @param alpha: A scalar, a scaling factor for the negative section. + * @return Output of the ELU operation. + */ + function elu$3(x, alpha = 1) { + // TODO(cais): Add support for alpha values other than 1. + if (alpha !== 1) { + throw new NotImplementedError(`Support for alpha values other than 1 (${alpha}) is not implemented ` + + `yet.`); + } + return elu$4(x); + } + /** + * Softsign of a tensor. + * + * Defined as x / (abs(x) + 1), element-wise. + * + * @param x: Input. + * @returns Output. + */ + function softsign(x) { + return tidy(() => div$1(x, add$3(abs$2(x), 1))); + } + /** + * Sets entries in `x` to zero at random, while scaling the entire tensor. + * + * @param x input tensor. + * @param level fraction of the entries in the tensor that will be set to 0. + * @param noiseShape shape of randomly generated keep/drop flags, must be + * broadcastable to the shape of `x`. Optional. + * @param seed random seed to ensure determinism. Optional. + * @returns Result of the dropout operation. + */ + function dropout$1(x, level, noiseShape, seed) { + return tidy(() => dropout$2(x, level, noiseShape, seed)); + } + /** + * Element-wise, segment-wise linear approximation of sigmoid. + * + * Returns `0.` if `x < -2.5`, `1.` if `x > 2.5`. + * In `-2.5 <= x <= 2.5`, returns `0.2 * x + 0.5`. + * + * @param x Input tensor. + * @returns Output tensor. + */ + function hardSigmoid(x) { + return tidy(() => { + const y = add$3(.5, mul(.2, x)); + return clipByValue$2(y, 0, 1); + }); + } + /** + * Invoke `x` in the training phase, and `alt` otherwise. + * + * Porting Note: We do not create placeholder tensors for the `training` + * boolean flag here, because there is no such thing in the TF.js imperative + * backend. + * + * @param x The function to invoke iff `training` is `true`. + * @param alt The function to invoke iff `training` is `false`. + * @param training Boolean flag for whether training phase is active. + * @returns The return value of `x()` if `training` is `true`, or the return + * value of `alt()` if `training` is `false`. + */ + function inTrainPhase(x, alt, training = false) { + return training ? x() : alt(); + } + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + const VALID_FAN_MODE_VALUES = ['fanIn', 'fanOut', 'fanAvg']; + const VALID_DISTRIBUTION_VALUES = ['normal', 'uniform', 'truncatedNormal']; + // We can't easily extract a string[] from the string union type, but we can + // recapitulate the list, enforcing at compile time that the values are valid + // and that we have the right number of them. + /** + * A string array of valid Initializer class names. + * + * This is guaranteed to match the `InitializerClassName` union type. + */ + const initializerClassNames = [ + 'Zeros', 'Ones', 'Constant', 'RandomNormal', 'RandomUniform', + 'TruncatedNormal', 'VarianceScaling', 'Orthogonal', 'Identity' + ]; + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + function checkFanMode(value) { + checkStringTypeUnionValue(VALID_FAN_MODE_VALUES, 'FanMode', value); + } + function checkDistribution(value) { + checkStringTypeUnionValue(VALID_DISTRIBUTION_VALUES, 'Distribution', value); + } + /** + * Initializer base class. + * + * @doc { + * heading: 'Initializers', subheading: 'Classes', namespace: 'initializers'} + */ + class Initializer extends Serializable { + fromConfigUsesCustomObjects() { + return false; + } + getConfig() { + return {}; + } + } + class Zeros extends Initializer { + apply(shape, dtype) { + return zeros$2(shape, dtype); + } + } + /** @nocollapse */ + Zeros.className = 'Zeros'; + registerClass(Zeros); + class Ones extends Initializer { + apply(shape, dtype) { + return ones$1(shape, dtype); + } + } + /** @nocollapse */ + Ones.className = 'Ones'; + registerClass(Ones); + class Constant extends Initializer { + constructor(args) { + super(); + if (typeof args !== 'object') { + throw new ValueError(`Expected argument of type ConstantConfig but got ${args}`); + } + if (args.value === undefined) { + throw new ValueError(`config must have value set but got ${args}`); + } + this.value = args.value; + } + apply(shape, dtype) { + return tidy(() => mul(scalar(this.value), ones$1(shape, dtype))); + } + getConfig() { + return { + value: this.value, + }; + } + } + /** @nocollapse */ + Constant.className = 'Constant'; + registerClass(Constant); + class RandomUniform extends Initializer { + constructor(args) { + super(); + this.DEFAULT_MINVAL = -0.05; + this.DEFAULT_MAXVAL = 0.05; + this.minval = args.minval || this.DEFAULT_MINVAL; + this.maxval = args.maxval || this.DEFAULT_MAXVAL; + this.seed = args.seed; + } + apply(shape, dtype) { + return randomUniform$1(shape, this.minval, this.maxval, dtype, this.seed); + } + getConfig() { + return { minval: this.minval, maxval: this.maxval, seed: this.seed }; + } + } + /** @nocollapse */ + RandomUniform.className = 'RandomUniform'; + registerClass(RandomUniform); + class RandomNormal extends Initializer { + constructor(args) { + super(); + this.DEFAULT_MEAN = 0.; + this.DEFAULT_STDDEV = 0.05; + this.mean = args.mean || this.DEFAULT_MEAN; + this.stddev = args.stddev || this.DEFAULT_STDDEV; + this.seed = args.seed; + } + apply(shape, dtype) { + dtype = dtype || 'float32'; + if (dtype !== 'float32' && dtype !== 'int32') { + throw new NotImplementedError(`randomNormal does not support dType ${dtype}.`); + } + return randomNormal$1(shape, this.mean, this.stddev, dtype, this.seed); + } + getConfig() { + return { mean: this.mean, stddev: this.stddev, seed: this.seed }; + } + } + /** @nocollapse */ + RandomNormal.className = 'RandomNormal'; + registerClass(RandomNormal); + class TruncatedNormal extends Initializer { + constructor(args) { + super(); + this.DEFAULT_MEAN = 0.; + this.DEFAULT_STDDEV = 0.05; + this.mean = args.mean || this.DEFAULT_MEAN; + this.stddev = args.stddev || this.DEFAULT_STDDEV; + this.seed = args.seed; + } + apply(shape, dtype) { + dtype = dtype || 'float32'; + if (dtype !== 'float32' && dtype !== 'int32') { + throw new NotImplementedError(`truncatedNormal does not support dType ${dtype}.`); + } + return truncatedNormal$1(shape, this.mean, this.stddev, dtype, this.seed); + } + getConfig() { + return { mean: this.mean, stddev: this.stddev, seed: this.seed }; + } + } + /** @nocollapse */ + TruncatedNormal.className = 'TruncatedNormal'; + registerClass(TruncatedNormal); + class Identity extends Initializer { + constructor(args) { + super(); + this.gain = args.gain != null ? args.gain : 1.0; + } + apply(shape, dtype) { + return tidy(() => { + if (shape.length !== 2 || shape[0] !== shape[1]) { + throw new ValueError('Identity matrix initializer can only be used for' + + ' 2D square matrices.'); + } + else { + return mul(this.gain, eye(shape[0])); + } + }); + } + getConfig() { + return { gain: this.gain }; + } + } + /** @nocollapse */ + Identity.className = 'Identity'; + registerClass(Identity); + /** + * Computes the number of input and output units for a weight shape. + * @param shape Shape of weight. + * @param dataFormat data format to use for convolution kernels. + * Note that all kernels in Keras are standardized on the + * CHANNEL_LAST ordering (even when inputs are set to CHANNEL_FIRST). + * @return An length-2 array: fanIn, fanOut. + */ + function computeFans(shape, dataFormat = 'channelsLast') { + let fanIn; + let fanOut; + checkDataFormat(dataFormat); + if (shape.length === 2) { + fanIn = shape[0]; + fanOut = shape[1]; + } + else if ([3, 4, 5].indexOf(shape.length) !== -1) { + if (dataFormat === 'channelsFirst') { + const receptiveFieldSize = arrayProd(shape, 2); + fanIn = shape[1] * receptiveFieldSize; + fanOut = shape[0] * receptiveFieldSize; + } + else if (dataFormat === 'channelsLast') { + const receptiveFieldSize = arrayProd(shape, 0, shape.length - 2); + fanIn = shape[shape.length - 2] * receptiveFieldSize; + fanOut = shape[shape.length - 1] * receptiveFieldSize; + } + } + else { + const shapeProd = arrayProd(shape); + fanIn = Math.sqrt(shapeProd); + fanOut = Math.sqrt(shapeProd); + } + return [fanIn, fanOut]; + } + class VarianceScaling extends Initializer { + /** + * Constructor of VarianceScaling. + * @throws ValueError for invalid value in scale. + */ + constructor(args) { + super(); + if (args.scale < 0.0) { + throw new ValueError(`scale must be a positive float. Got: ${args.scale}`); + } + this.scale = args.scale == null ? 1.0 : args.scale; + this.mode = args.mode == null ? 'fanIn' : args.mode; + checkFanMode(this.mode); + this.distribution = + args.distribution == null ? 'normal' : args.distribution; + checkDistribution(this.distribution); + this.seed = args.seed; + } + apply(shape, dtype) { + const fans = computeFans(shape); + const fanIn = fans[0]; + const fanOut = fans[1]; + let scale = this.scale; + if (this.mode === 'fanIn') { + scale /= Math.max(1, fanIn); + } + else if (this.mode === 'fanOut') { + scale /= Math.max(1, fanOut); + } + else { + scale /= Math.max(1, (fanIn + fanOut) / 2); + } + if (this.distribution === 'normal') { + const stddev = Math.sqrt(scale); + dtype = dtype || 'float32'; + if (dtype !== 'float32' && dtype !== 'int32') { + throw new NotImplementedError(`${this.getClassName()} does not support dType ${dtype}.`); + } + return truncatedNormal$1(shape, 0, stddev, dtype, this.seed); + } + else { + const limit = Math.sqrt(3 * scale); + return randomUniform$1(shape, -limit, limit, dtype, this.seed); + } + } + getConfig() { + return { + scale: this.scale, + mode: this.mode, + distribution: this.distribution, + seed: this.seed + }; + } + } + /** @nocollapse */ + VarianceScaling.className = 'VarianceScaling'; + registerClass(VarianceScaling); + class GlorotUniform extends VarianceScaling { + /** + * Constructor of GlorotUniform + * @param scale + * @param mode + * @param distribution + * @param seed + */ + constructor(args) { + super({ + scale: 1.0, + mode: 'fanAvg', + distribution: 'uniform', + seed: args == null ? null : args.seed + }); + } + getClassName() { + // In Python Keras, GlorotUniform is not a class, but a helper method + // that creates a VarianceScaling object. Use 'VarianceScaling' as + // class name to be compatible with that. + return VarianceScaling.className; + } + } + /** @nocollapse */ + GlorotUniform.className = 'GlorotUniform'; + registerClass(GlorotUniform); + class GlorotNormal extends VarianceScaling { + /** + * Constructor of GlorotNormal. + * @param scale + * @param mode + * @param distribution + * @param seed + */ + constructor(args) { + super({ + scale: 1.0, + mode: 'fanAvg', + distribution: 'normal', + seed: args == null ? null : args.seed + }); + } + getClassName() { + // In Python Keras, GlorotNormal is not a class, but a helper method + // that creates a VarianceScaling object. Use 'VarianceScaling' as + // class name to be compatible with that. + return VarianceScaling.className; + } + } + /** @nocollapse */ + GlorotNormal.className = 'GlorotNormal'; + registerClass(GlorotNormal); + class HeNormal extends VarianceScaling { + constructor(args) { + super({ + scale: 2.0, + mode: 'fanIn', + distribution: 'normal', + seed: args == null ? null : args.seed + }); + } + getClassName() { + // In Python Keras, HeNormal is not a class, but a helper method + // that creates a VarianceScaling object. Use 'VarianceScaling' as + // class name to be compatible with that. + return VarianceScaling.className; + } + } + /** @nocollapse */ + HeNormal.className = 'HeNormal'; + registerClass(HeNormal); + class HeUniform extends VarianceScaling { + constructor(args) { + super({ + scale: 2.0, + mode: 'fanIn', + distribution: 'uniform', + seed: args == null ? null : args.seed + }); + } + getClassName() { + // In Python Keras, HeUniform is not a class, but a helper method + // that creates a VarianceScaling object. Use 'VarianceScaling' as + // class name to be compatible with that. + return VarianceScaling.className; + } + } + /** @nocollapse */ + HeUniform.className = 'HeUniform'; + registerClass(HeUniform); + class LeCunNormal extends VarianceScaling { + constructor(args) { + super({ + scale: 1.0, + mode: 'fanIn', + distribution: 'normal', + seed: args == null ? null : args.seed + }); + } + getClassName() { + // In Python Keras, LeCunNormal is not a class, but a helper method + // that creates a VarianceScaling object. Use 'VarianceScaling' as + // class name to be compatible with that. + return VarianceScaling.className; + } + } + /** @nocollapse */ + LeCunNormal.className = 'LeCunNormal'; + registerClass(LeCunNormal); + class LeCunUniform extends VarianceScaling { + constructor(args) { + super({ + scale: 1.0, + mode: 'fanIn', + distribution: 'uniform', + seed: args == null ? null : args.seed + }); + } + getClassName() { + // In Python Keras, LeCunUniform is not a class, but a helper method + // that creates a VarianceScaling object. Use 'VarianceScaling' as + // class name to be compatible with that. + return VarianceScaling.className; + } + } + /** @nocollapse */ + LeCunUniform.className = 'LeCunUniform'; + registerClass(LeCunUniform); + class Orthogonal extends Initializer { + constructor(args) { + super(); + this.DEFAULT_GAIN = 1; + this.ELEMENTS_WARN_SLOW = 2000; + this.gain = args.gain == null ? this.DEFAULT_GAIN : args.gain; + this.seed = args.seed; + } + apply(shape, dtype) { + return tidy(() => { + if (shape.length < 2) { + throw new NotImplementedError('Shape must be at least 2D.'); + } + if (dtype !== 'int32' && dtype !== 'float32' && dtype !== undefined) { + throw new TypeError(`Unsupported data type ${dtype}.`); + } + dtype = dtype; + // flatten the input shape with the last dimension remaining its + // original shape so it works for conv2d + const numRows = sizeFromShape(shape.slice(0, -1)); + const numCols = shape[shape.length - 1]; + const numElements = numRows * numCols; + if (numElements > this.ELEMENTS_WARN_SLOW) { + console.warn(`Orthogonal initializer is being called on a matrix with more ` + + `than ${this.ELEMENTS_WARN_SLOW} (${numElements}) elements: ` + + `Slowness may result.`); + } + const flatShape = [Math.max(numCols, numRows), Math.min(numCols, numRows)]; + // Generate a random matrix + const randNormalMat = randomNormal$1(flatShape, 0, 1, dtype, this.seed); + // Compute QR factorization + const qr = linalg.qr(randNormalMat, false); + let qMat = qr[0]; + const rMat = qr[1]; + // Make Q uniform + const diag = rMat.flatten().stridedSlice([0], [Math.min(numCols, numRows) * Math.min(numCols, numRows)], [Math.min(numCols, numRows) + 1]); + qMat = mul(qMat, diag.sign()); + if (numRows < numCols) { + qMat = qMat.transpose(); + } + return mul(scalar(this.gain), qMat.reshape(shape)); + }); + } + getConfig() { + return { + gain: this.gain, + seed: this.seed, + }; + } + } + /** @nocollapse */ + Orthogonal.className = 'Orthogonal'; + registerClass(Orthogonal); + // Maps the JavaScript-like identifier keys to the corresponding registry + // symbols. + const INITIALIZER_IDENTIFIER_REGISTRY_SYMBOL_MAP = { + 'constant': 'Constant', + 'glorotNormal': 'GlorotNormal', + 'glorotUniform': 'GlorotUniform', + 'heNormal': 'HeNormal', + 'heUniform': 'HeUniform', + 'identity': 'Identity', + 'leCunNormal': 'LeCunNormal', + 'leCunUniform': 'LeCunUniform', + 'ones': 'Ones', + 'orthogonal': 'Orthogonal', + 'randomNormal': 'RandomNormal', + 'randomUniform': 'RandomUniform', + 'truncatedNormal': 'TruncatedNormal', + 'varianceScaling': 'VarianceScaling', + 'zeros': 'Zeros' + }; + function deserializeInitializer(config, customObjects = {}) { + return deserializeKerasObject(config, SerializationMap.getMap().classNameMap, customObjects, 'initializer'); + } + function serializeInitializer(initializer) { + return serializeKerasObject(initializer); + } + function getInitializer(identifier) { + if (typeof identifier === 'string') { + const className = identifier in INITIALIZER_IDENTIFIER_REGISTRY_SYMBOL_MAP ? + INITIALIZER_IDENTIFIER_REGISTRY_SYMBOL_MAP[identifier] : + identifier; + /* We have four 'helper' classes for common initializers that + all get serialized as 'VarianceScaling' and shouldn't go through + the deserializeInitializer pathway. */ + if (className === 'GlorotNormal') { + return new GlorotNormal(); + } + else if (className === 'GlorotUniform') { + return new GlorotUniform(); + } + else if (className === 'HeNormal') { + return new HeNormal(); + } + else if (className === 'HeUniform') { + return new HeUniform(); + } + else if (className === 'LeCunNormal') { + return new LeCunNormal(); + } + else if (className === 'LeCunUniform') { + return new LeCunUniform(); + } + else { + const config = {}; + config['className'] = className; + config['config'] = {}; + return deserializeInitializer(config); + } + } + else if (identifier instanceof Initializer) { + return identifier; + } + else { + return deserializeInitializer(identifier); + } + } + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + // tslint:enable + /** + * Determine whether the input is an Array of Shapes. + */ + function isArrayOfShapes(x) { + return Array.isArray(x) && Array.isArray(x[0]); + } + /** + * Special case of normalizing shapes to lists. + * + * @param x A shape or list of shapes to normalize into a list of Shapes. + * @return A list of Shapes. + */ + function normalizeShapeList(x) { + if (x.length === 0) { + return []; + } + if (!Array.isArray(x[0])) { + return [x]; + } + return x; + } + /** + * Helper function to obtain exactly one Tensor. + * @param xs: A single `tf.Tensor` or an `Array` of `tf.Tensor`s. + * @return A single `tf.Tensor`. If `xs` is an `Array`, return the first one. + * @throws ValueError: If `xs` is an `Array` and its length is not 1. + */ + function getExactlyOneTensor(xs) { + let x; + if (Array.isArray(xs)) { + if (xs.length !== 1) { + throw new ValueError(`Expected Tensor length to be 1; got ${xs.length}`); + } + x = xs[0]; + } + else { + x = xs; + } + return x; + } + /** + * Helper function to obtain exactly on instance of Shape. + * + * @param shapes Input single `Shape` or Array of `Shape`s. + * @returns If input is a single `Shape`, return it unchanged. If the input is + * an `Array` containing exactly one instance of `Shape`, return the instance. + * Otherwise, throw a `ValueError`. + * @throws ValueError: If input is an `Array` of `Shape`s, and its length is not + * 1. + */ + function getExactlyOneShape(shapes) { + if (Array.isArray(shapes) && Array.isArray(shapes[0])) { + if (shapes.length === 1) { + shapes = shapes; + return shapes[0]; + } + else { + throw new ValueError(`Expected exactly 1 Shape; got ${shapes.length}`); + } + } + else { + return shapes; + } + } + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + /** + * Count the elements in an Array of LayerVariables. + * + * @param weights: The LayerVariables of which the constituent numbers are to + * be counted. + * @returns A count of the elements in all the LayerVariables + */ + function countParamsInWeights(weights) { + let count = 0; + for (const weight of weights) { + if (weight.shape.length === 0) { + count += 1; + } + else { + count += weight.shape.reduce((a, b) => a * b); + } + } + return count; + } + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + const DEFAULT_VARIABLE_NAME_PREFIX = 'Variable'; + /** + * A `tf.layers.LayerVariable` is similar to a `tf.Tensor` in that it has a + * dtype and shape, but its value is mutable. The value is itself represented + * as a`tf.Tensor`, and can be read with the `read()` method and updated with + * the `write()` method. + */ + class LayerVariable { + /** + * Construct Variable from a `tf.Tensor`. + * + * If not explicitly named, the Variable will be given a name with the + * prefix 'Variable'. Variable names are unique. In the case of name + * collision, suffixies '_' will be added to the name. + * + * @param val Initial value of the Variable. + * @param name Name of the variable. If `null` or `undefined` is provided, it + * will default a name with the prefix 'Variable'. + * @param constraint Optional, projection function to be applied to the + * variable after optimize updates + * @throws ValueError if `name` is `null` or `undefined`. + */ + constructor(val, dtype = 'float32', name = DEFAULT_VARIABLE_NAME_PREFIX, trainable = true, constraint = null) { + this.dtype = dtype == null ? 'float32' : dtype; + this.shape = val.shape; + this.id = getNextUniqueTensorId(); + name = name == null ? DEFAULT_VARIABLE_NAME_PREFIX : name; + this.originalName = getScopedTensorName(name); + this.name = getUniqueTensorName(this.originalName); + this.trainable_ = trainable; + this.constraint = constraint; + this.val = variable$1(val, this.trainable_, this.name, this.dtype); + } + /** + * Get a snapshot of the Variable's value. + * + * The returned value is a snapshot of the Variable's value at the time of + * the invocation. Future mutations in the value of the tensor will only + * be reflected by future calls to this method. + */ + read() { + this.assertNotDisposed(); + return this.val; + } + /** + * Update the value of the Variable. + * + * @param newVal: The new value to update to. Must be consistent with the + * dtype and shape of the Variable. + * @return This Variable. + */ + write(newVal) { + // TODO(cais): Once TF.js Core supports Tensor.dtype, check dtype match. + this.assertNotDisposed(); + checkShapesMatch(this.val, newVal); + // Skip updating if this is the exact same tensor. + if (this.val.id !== newVal.id) { + this.val.assign(newVal); + if (this.constraint != null) { + this.val.assign(this.constraint.apply(this.val)); + } + } + return this; + } + /** + * Dispose this LayersVariable instance from memory. + */ + dispose() { + this.assertNotDisposed(); + this.val.dispose(); + } + assertNotDisposed() { + if (this.val.isDisposed) { + throw new Error(`LayersVariable ${this.name} is already disposed.`); + } + } + get trainable() { + return this.trainable_; + } + set trainable(trainable) { + this.trainable_ = trainable; + this.val.trainable = trainable; + } + } + function checkShapesMatch(x, y) { + if (x.shape.toString() !== y.shape.toString()) { + throw new Error('Shape mismatch: ' + JSON.stringify(x.shape) + ' vs. ' + + JSON.stringify(y.shape)); + } + } + /** + * Create a Variable. + * @param x The initial value of the `Variable`. + * @param dtype optional, the type of the variable. + * @param name optional, the name of the variable, default provided by + * Variable. + * @param constraint optional, a constraint to be applied after every update. + * @return The newly instantiated `Variable`. + */ + function variable(x, dtype, name, constraint) { + return new LayerVariable(x, dtype, name, true, constraint); + } + /** + * Instantiates an all-zeros Variable and returns it. + * + * @param shape Shape of the tensor. + * @param dtype DType of the tensor. + * @param name Name of the tensor. + * @return An all-zero Variable. + */ + function zerosVariable(shape, dtype, name) { + // TODO(cais): Implement logic for dtype. + return new LayerVariable(zeros$2(shape), dtype, name); + } + /** + * Instantiates an all-zeros tensor of the same shape as another tensor. + * + * @param x The other tensor. + * @param dtype DType of the tensor. + * @param name Name of the tensor. + * @return A newly instantiated Variable. + */ + function zerosLike$2(x, dtype, name) { + return new LayerVariable(zerosLike$3(x), dtype, name); + } + /** + * Instantiates an all-ones tensor and returns it. + * + * @param shape Shape of the tensor. + * @param dtype DType of the tensor. + * @param name Name of the tensor. + * @return An all-ones Variable. + */ + function onesVariable(shape, dtype, name) { + // TODO(cais): Implement logic for dtype. + const allocated = ones$1(shape); + return new LayerVariable(allocated, dtype, name); + } + /** + * Instantiates an all-ones tensor of the same shape as another tensor. + * + * @param x The other tensor. + * @param dtype DType of the tensor. + * @param name Name of the tensor. + * @return A newly instantiated Variable. + */ + function onesLike$2(x, dtype, name) { + const allocated = onesLike$3(x); + return new LayerVariable(allocated, dtype, name); + } + /** + * Instantiate an identity matrix and returns it, as a Variable + * + * @param size Number of rows/columns. + * @param dtype Data type of returned Variable. + * @param name Name of returned Variable. + * @return A Variable, an identity matrix. + */ + function eyeVariable(size, dtype, name) { + return new LayerVariable(eye(size), dtype, name); + } + /** + * Get a Variable with uniform distribution of values. + * @param shape Shape of the tensor. + * @param minval Lower bound of the uniform distribution. + * @param maxval Upper bound of the uniform distribution. + * @param dtype + * @param seed + * @param name Optional name. + * @return The uniform-random Variable. + */ + function randomUniformVariable(shape, minval, maxval, dtype, seed, name = 'randomUniform') { + return new LayerVariable(randomUniform$1(shape, minval, maxval, dtype), dtype, name); + } + /** + * Get a Variable with truncated-normal distribution of values. + * @param shape Shape of the tensor. + * @param mean mean value of the normal distribution. + * @param stddev standard deviation of the normal distribution. + * @param dtype + * @param seed + * @param name Optional name. + * @return The truncated-normal-random Variable. + */ + function truncatedNormalVariable(shape, mean = 0.0, stddev = 1.0, dtype, seed, name = 'truncatedNormal') { + // TODO(cais): Implement logic for dtype and seed once they are supported + // by deeplearn.js. + dtype = dtype || 'float32'; + if (dtype !== 'float32' && dtype !== 'int32') { + throw new NotImplementedError(`randomNormal does not support dType ${dtype}.`); + } + return new LayerVariable(truncatedNormal$1(shape, mean, stddev, dtype, seed), dtype, name); + } + /** + * Get a Variable with normal distribution of values. + * @param shape Shape of the tensor. + * @param mean mean value of the normal distribution. + * @param stddev standard deviation of the normal distribution. + * @param dtype + * @param seed + * @param name Optional name. + * @return The truncated-normal-random Variable. + */ + function randomNormalVariable(shape, mean = 0.0, stddev = 1.0, dtype, seed, name = 'randomNormal') { + dtype = dtype || 'float32'; + if (dtype !== 'float32' && dtype !== 'int32') { + throw new NotImplementedError(`randomNormalVariable does not support dType ${dtype}.`); + } + return new LayerVariable(randomNormal$2(shape, mean, stddev, dtype, seed), dtype, name); + } + /** + * Update the value of a Variable. + * @param x The Variable to be updated. + * @param xNew The new value to update to. + * @return The Variable updated. + */ + function update(x, xNew) { + return x.write(xNew); + } + /** + * Update the value of a Variable by adding an increment. + * @param x The Variable to be updated. + * @param increment The incrment to add to `x`. + * @return The Variable updated. + */ + function updateAdd(x, increment) { + return x.write(add$3(x.read(), increment)); + } + /** + * Update the value of a Variable by subtracting a decrement. + * @param x The Variable to be updated. + * @param decrement The decrement to subtract from `x`. + * @return The Variable updated. + */ + function updateSub(x, decrement) { + return x.write(sub$2(x.read(), decrement)); + } + /** + * Get the values of an array of Variables. + * + * @param tensors An `Array` of `Variable`s to get the values of. + * @return The values of the inputs, as an `Array` of`tf.Tensor`s. + */ + function batchGetValue(xs) { + return xs.map(x => x.read()); + } + /** + * Update the value of multiple Variables at once. + * + * @param variablesAndValues An `Array`, each element is of type + * [Variable, Tensor]. The first item is the + * `Variable` of which the value is to be updated. The second item + * carries the new value. + */ + function batchSetValue(variablesAndValues) { + variablesAndValues.forEach(variableAndValue => { + const variable = variableAndValue[0]; + variable.write(variableAndValue[1]); + }); + } + /** + * Returns the gradients of `variables` w.r.t. the return value of `lossFn`. + * @param lossFn A function which returns a Scalar to be used as the function + * value (i.e., numerator) for differentiation. + * @param variables List of variables to be used as the independent variables + * (i.e., denominator) for differentiation. + * @returns An Array of gradients tensors. + */ + function gradients(lossFn, variables) { + // TODO(cais): The return type signature can be simplified if deeplearn makes + // the corresponding type public. + const variableList = variables.map(variable => variable.read()); + const valudAndGrads = variableGrads(lossFn, variableList); + return variables.map(variable => valudAndGrads.grads[variable.name]); + } + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + /** + * Specifies the ndim, dtype and shape of every input to a layer. + * + * Every layer should expose (if appropriate) an `inputSpec` attribute: + * a list of instances of InputSpec (one per input tensor). + * + * A null entry in a shape is compatible with any dimension, + * a null shape is compatible with any shape. + */ + class InputSpec { + constructor(args) { + this.dtype = args.dtype; + this.shape = args.shape; + /* + TODO(michaelterry): Could throw error if ndim and shape are both defined + (then backport). + */ + if (args.shape != null) { + this.ndim = args.shape.length; + } + else { + this.ndim = args.ndim; + } + this.maxNDim = args.maxNDim; + this.minNDim = args.minNDim; + this.axes = args.axes || {}; + } + } + /** + * `tf.SymbolicTensor` is a placeholder for a Tensor without any concrete value. + * + * They are most often encountered when building a graph of `Layer`s for a + * `tf.LayersModel` and the input data's shape, but not values are known. + * + * @doc {heading: 'Models', 'subheading': 'Classes'} + */ + class SymbolicTensor { + /** + * + * @param dtype + * @param shape + * @param sourceLayer The Layer that produced this symbolic tensor. + * @param inputs The inputs passed to sourceLayer's __call__() method. + * @param nodeIndex + * @param tensorIndex + * @param callArgs The keyword arguments passed to the __call__() method. + * @param name + * @param outputTensorIndex The index of this tensor in the list of outputs + * returned by apply(). + */ + constructor(dtype, shape, sourceLayer, inputs, callArgs, name, outputTensorIndex) { + this.dtype = dtype; + this.shape = shape; + this.sourceLayer = sourceLayer; + this.inputs = inputs; + this.callArgs = callArgs; + this.outputTensorIndex = outputTensorIndex; + this.id = getNextUniqueTensorId(); + if (name != null) { + this.originalName = getScopedTensorName(name); + this.name = getUniqueTensorName(this.originalName); + } + this.rank = shape.length; + } + } + let _nextNodeID = 0; + /** + * A `Node` describes the connectivity between two layers. + * + * Each time a layer is connected to some new input, + * a node is added to `layer.inboundNodes`. + * + * Each time the output of a layer is used by another layer, + * a node is added to `layer.outboundNodes`. + * + * `nodeIndices` and `tensorIndices` are basically fine-grained coordinates + * describing the origin of the `inputTensors`, verifying the following: + * + * `inputTensors[i] == + * inboundLayers[i].inboundNodes[nodeIndices[i]].outputTensors[ + * tensorIndices[i]]` + * + * A node from layer A to layer B is added to: + * A.outboundNodes + * B.inboundNodes + */ + class Node { + constructor(args, + // TODO(michaelterry): Define actual type for this. + callArgs) { + this.callArgs = callArgs; + this.id = _nextNodeID++; + /* + Layer instance (NOT a list). + this is the layer that takes a list of input tensors + and turns them into a list of output tensors. + the current node will be added to + the inboundNodes of outboundLayer. + */ + this.outboundLayer = args.outboundLayer; + /* + The following 3 properties describe where + the input tensors come from: which layers, + and for each layer, which node and which + tensor output of each node. + */ + // List of layer instances. + this.inboundLayers = args.inboundLayers; + // List of integers, 1:1 mapping with inboundLayers. + this.nodeIndices = args.nodeIndices; + // List of integers, 1:1 mapping with inboundLayers. + this.tensorIndices = args.tensorIndices; + /* + Following 2 properties: + tensor inputs and outputs of outboundLayer. + */ + // List of tensors. 1:1 mapping with inboundLayers. + this.inputTensors = args.inputTensors; + // List of tensors, created by outboundLayer.call(). + this.outputTensors = args.outputTensors; + /* + Following 2 properties: input and output masks. + List of tensors, 1:1 mapping with inputTensor. + */ + this.inputMasks = args.inputMasks; + // List of tensors, created by outboundLayer.computeMask(). + this.outputMasks = args.outputMasks; + // Following 2 properties: input and output shapes. + // List of shape tuples, shapes of inputTensors. + this.inputShapes = args.inputShapes; + // List of shape tuples, shapes of outputTensors. + this.outputShapes = args.outputShapes; + // Add nodes to all layers involved. + for (const layer of args.inboundLayers) { + if (layer != null) { + layer.outboundNodes.push(this); + } + } + args.outboundLayer.inboundNodes.push(this); + } + getConfig() { + const inboundNames = []; + for (const layer of this.inboundLayers) { + if (layer != null) { + inboundNames.push(layer.name); + } + else { + inboundNames.push(null); + } + } + return { + outboundLayer: this.outboundLayer ? this.outboundLayer.name : null, + inboundLayers: inboundNames, + nodeIndices: this.nodeIndices, + tensorIndices: this.tensorIndices + }; + } + } + let _nextLayerID = 0; + /** + * A layer is a grouping of operations and weights that can be composed to + * create a `tf.LayersModel`. + * + * Layers are constructed by using the functions under the + * [tf.layers](#Layers-Basic) namespace. + * + * @doc {heading: 'Layers', subheading: 'Classes', namespace: 'layers'} + */ + class Layer extends Serializable { + constructor(args = {}) { + super(); + this._callHook = null; + this._addedWeightNames = []; + // Porting Notes: PyKeras does not have this property in this base Layer + // class. Instead lets Layer subclass set it dynamically and checks the + // value with `hasattr`. In tfjs-layers, we let this be a member of this + // base class. + this._stateful = false; + this.id = _nextLayerID++; + this.activityRegularizer = null; + this.inputSpec = null; + this.supportsMasking = false; + // These properties will be set upon call of this.build() + this._trainableWeights = []; + this._nonTrainableWeights = []; + this._losses = []; + this._updates = []; + this._built = false; + /* + These lists will be filled via successive calls + to this.addInboundNode(). + */ + this.inboundNodes = []; + this.outboundNodes = []; + let name = args.name; + if (!name) { + const prefix = this.getClassName(); + name = toSnakeCase(prefix) + '_' + getUid(prefix); + } + this.name = name; + this.trainable_ = args.trainable == null ? true : args.trainable; + if (args.inputShape != null || args.batchInputShape != null) { + /* + In this case we will later create an input layer + to insert before the current layer + */ + let batchInputShape; + if (args.batchInputShape != null) { + batchInputShape = args.batchInputShape; + } + else if (args.inputShape != null) { + let batchSize = null; + if (args.batchSize != null) { + batchSize = args.batchSize; + } + batchInputShape = [batchSize].concat(args.inputShape); + } + this.batchInputShape = batchInputShape; + // Set dtype. + let dtype = args.dtype; + if (dtype == null) { + dtype = args.inputDType; + } + if (dtype == null) { + dtype = 'float32'; + } + this.dtype = dtype; + } + if (args.weights != null) { + this.initialWeights = args.weights; + } + else { + this.initialWeights = null; + } + // The value of `_refCount` is initialized to null. When the layer is used + // in a symbolic way for the first time, it will be set to 1. + this._refCount = null; + this.fastWeightInitDuringBuild = false; + } + /** + * Converts a layer and its index to a unique (immutable type) name. + * This function is used internally with `this.containerNodes`. + * @param layer The layer. + * @param nodeIndex The layer's position (e.g. via enumerate) in a list of + * nodes. + * + * @returns The unique name. + */ + static nodeKey(layer, nodeIndex) { + return layer.name + '_ib-' + nodeIndex.toString(); + } + /** + * Returns this.inboundNode at index nodeIndex. + * + * Porting note: This is a replacement for _get_node_attribute_at_index() + * @param nodeIndex + * @param attrName The name of the attribute related to request for this node. + */ + getNodeAtIndex(nodeIndex, attrName) { + if (this.inboundNodes.length === 0) { + throw new RuntimeError('The layer has never been called ' + + `and thus has no defined ${attrName}.`); + } + if (this.inboundNodes.length <= nodeIndex) { + throw new ValueError(`Asked to get ${attrName} at node ${nodeIndex}, ` + + `but the layer has only ${this.inboundNodes.length} inbound nodes.`); + } + return this.inboundNodes[nodeIndex]; + } + /** + * Retrieves the input tensor(s) of a layer at a given node. + * + * @param nodeIndex Integer, index of the node from which to retrieve the + * attribute. E.g. `nodeIndex=0` will correspond to the first time the layer + * was called. + * + * @return A tensor (or list of tensors if the layer has multiple inputs). + */ + getInputAt(nodeIndex) { + return singletonOrArray(this.getNodeAtIndex(nodeIndex, 'input').inputTensors); + } + /** + * Retrieves the output tensor(s) of a layer at a given node. + * + * @param nodeIndex Integer, index of the node from which to retrieve the + * attribute. E.g. `nodeIndex=0` will correspond to the first time the layer + * was called. + * + * @return A tensor (or list of tensors if the layer has multiple outputs). + */ + getOutputAt(nodeIndex) { + return singletonOrArray(this.getNodeAtIndex(nodeIndex, 'output').outputTensors); + } + // Properties + /** + * Retrieves the input tensor(s) of a layer. + * + * Only applicable if the layer has exactly one inbound node, + * i.e. if it is connected to one incoming layer. + * + * @return Input tensor or list of input tensors. + * + * @exception AttributeError if the layer is connected to more than one + * incoming layers. + */ + get input() { + if (this.inboundNodes.length > 1) { + throw new AttributeError(`Layer ${this.name}` + + ' has multiple inbound nodes, ' + + 'hence the notion of "layer input" ' + + 'is ill-defined. ' + + 'Use `getInputAt(nodeIndex)` instead.'); + } + else if (this.inboundNodes.length === 0) { + throw new AttributeError(`Layer ${this.name}` + + ' is not connected, no input to return.'); + } + return singletonOrArray(this.getNodeAtIndex(0, 'input').inputTensors); + } + /** + * Retrieves the output tensor(s) of a layer. + * + * Only applicable if the layer has exactly one inbound node, + * i.e. if it is connected to one incoming layer. + * + * @return Output tensor or list of output tensors. + * + * @exception AttributeError if the layer is connected to more than one + * incoming layers. + */ + get output() { + if (this.inboundNodes.length === 0) { + throw new AttributeError(`Layer ${this.name}` + + ' has no inbound nodes.'); + } + if (this.inboundNodes.length > 1) { + throw new AttributeError(`Layer ${this.name}` + + ' has multiple inbound nodes, ' + + 'hence the notion of "layer output" ' + + 'is ill-defined. ' + + 'Use `getOutputAt(nodeIndex)` instead.'); + } + return singletonOrArray(this.getNodeAtIndex(0, 'output').outputTensors); + } + get losses() { + return this._losses; + } + /** + * Retrieves the Layer's current loss values. + * + * Used for regularizers during training. + */ + calculateLosses() { + // Porting Node: This is an augmentation to Layer.loss in PyKeras. + // In PyKeras, Layer.loss returns symbolic tensors. Here a concrete + // Tensor (specifically Scalar) values are returned. This is due to the + // imperative backend. + return this.losses.map(lossFn => lossFn()); + } + get updates() { + return this._updates; + } + get built() { + return this._built; + } + set built(built) { + this._built = built; + } + get trainable() { + return this.trainable_; + } + set trainable(trainable) { + this._trainableWeights.forEach(w => w.trainable = trainable); + this.trainable_ = trainable; + } + get trainableWeights() { + if (this.trainable_) { + return this._trainableWeights.filter(w => w.trainable); + } + else { + return []; + } + } + set trainableWeights(weights) { + this._trainableWeights = weights; + } + get nonTrainableWeights() { + if (this.trainable) { + return this._trainableWeights.filter(w => !w.trainable) + .concat(this._nonTrainableWeights); + } + else { + return this._trainableWeights.concat(this._nonTrainableWeights); + } + } + set nonTrainableWeights(weights) { + this._nonTrainableWeights = weights; + } + /** + * The concatenation of the lists trainableWeights and nonTrainableWeights + * (in this order). + */ + get weights() { + return this.trainableWeights.concat(this.nonTrainableWeights); + } + get stateful() { + return this._stateful; + } + /** + * Reset the states of the layer. + * + * This method of the base Layer class is essentially a no-op. + * Subclasses that are stateful (e.g., stateful RNNs) should override this + * method. + */ + resetStates() { + if (!this.stateful) { + throw new Error('Cannot call the resetStates() method of a non-stateful Layer ' + + 'object.'); + } + } + /** + * Checks compatibility between the layer and provided inputs. + * + * This checks that the tensor(s) `input` + * verify the input assumptions of the layer + * (if any). If not, exceptions are raised. + * + * @param inputs Input tensor or list of input tensors. + * + * @exception ValueError in case of mismatch between + * the provided inputs and the expectations of the layer. + */ + assertInputCompatibility(inputs) { + const inputsList = toList(inputs); + if (this.inputSpec == null || this.inputSpec.length === 0) { + return; + } + const inputSpec = toList(this.inputSpec); + if (inputsList.length !== inputSpec.length) { + throw new ValueError(`Layer ${this.name} expects ${inputSpec.length} inputs, ` + + `but it received ${inputsList.length} input tensors. ` + + `Input received: ${inputs}`); + } + for (let inputIndex = 0; inputIndex < inputsList.length; inputIndex++) { + const x = inputsList[inputIndex]; + const spec = inputSpec[inputIndex]; + if (spec == null) { + continue; + } + // Check ndim. + const ndim = x.rank; + if (spec.ndim != null) { + if (ndim !== spec.ndim) { + throw new ValueError(`Input ${inputIndex} is incompatible with layer ${this.name}: ` + + `expected ndim=${spec.ndim}, found ndim=${ndim}`); + } + } + if (spec.maxNDim != null) { + if (ndim > spec.maxNDim) { + throw new ValueError(`Input ${inputIndex} is incompatible with layer ${this.name}` + + `: expected max_ndim=${spec.maxNDim}, found ndim=${ndim}`); + } + } + if (spec.minNDim != null) { + if (ndim < spec.minNDim) { + throw new ValueError(`Input ${inputIndex} is incompatible with layer ${this.name}` + + `: expected min_ndim=${spec.minNDim}, found ndim=${ndim}.`); + } + } + // Check dtype. + if (spec.dtype != null) { + if (x.dtype !== spec.dtype) { + throw new ValueError(`Input ${inputIndex} is incompatible with layer ${this.name} ` + + `: expected dtype=${spec.dtype}, found dtype=${x.dtype}.`); + } + } + // Check specific shape axes. + if (spec.axes) { + const xShape = x.shape; + for (const key in spec.axes) { + const axis = Number(key); + const value = spec.axes[key]; + // Perform Python-style slicing in case axis < 0; + // TODO(cais): Use https://github.com/alvivi/typescript-underscore to + // ensure type safety through Underscore calls. + const xShapeAtAxis = axis >= 0 ? xShape[axis] : xShape[xShape.length + axis]; + if (value != null && [value, null].indexOf(xShapeAtAxis) === -1) { + throw new ValueError(`Input ${inputIndex} is incompatible with layer ` + + `${this.name}: expected axis ${axis} of input shape to ` + + `have value ${value} but got shape ${xShape}.`); + } + } + } + // Check shape. + if (spec.shape != null) { + for (let i = 0; i < spec.shape.length; ++i) { + const specDim = spec.shape[i]; + const dim = x.shape[i]; + if (specDim != null && dim != null) { + if (specDim !== dim) { + throw new ValueError(`Input ${inputIndex} is incompatible with layer ` + + `${this.name}: expected shape=${spec.shape}, ` + + `found shape=${x.shape}.`); + } + } + } + } + } + } + /** + * This is where the layer's logic lives. + * + * @param inputs Input tensor, or list/tuple of input tensors. + * @param kwargs Additional keyword arguments. + * + * @return A tensor or list/tuple of tensors. + */ + call(inputs, kwargs) { + return inputs; + } + invokeCallHook(inputs, kwargs) { + if (this._callHook != null) { + this._callHook(inputs, kwargs); + } + } + /** + * Set call hook. + * This is currently used for testing only. + * @param callHook + */ + setCallHook(callHook) { + this._callHook = callHook; + } + /** + * Clear call hook. + * This is currently used for testing only. + */ + clearCallHook() { + this._callHook = null; + } + /** + * Builds or executes a `Layer`'s logic. + * + * When called with `tf.Tensor`(s), execute the `Layer`'s computation and + * return Tensor(s). For example: + * + * ```js + * const denseLayer = tf.layers.dense({ + * units: 1, + * kernelInitializer: 'zeros', + * useBias: false + * }); + * + * // Invoke the layer's apply() method with a `tf.Tensor` (with concrete + * // numeric values). + * const input = tf.ones([2, 2]); + * const output = denseLayer.apply(input); + * + * // The output's value is expected to be [[0], [0]], due to the fact that + * // the dense layer has a kernel initialized to all-zeros and does not have + * // a bias. + * output.print(); + * ``` + * + * When called with `tf.SymbolicTensor`(s), this will prepare the layer for + * future execution. This entails internal book-keeping on shapes of + * expected Tensors, wiring layers together, and initializing weights. + * + * Calling `apply` with `tf.SymbolicTensor`s are typically used during the + * building of non-`tf.Sequential` models. For example: + * + * ```js + * const flattenLayer = tf.layers.flatten(); + * const denseLayer = tf.layers.dense({units: 1}); + * + * // Use tf.layers.input() to obtain a SymbolicTensor as input to apply(). + * const input = tf.input({shape: [2, 2]}); + * const output1 = flattenLayer.apply(input); + * + * // output1.shape is [null, 4]. The first dimension is the undetermined + * // batch size. The second dimension comes from flattening the [2, 2] + * // shape. + * console.log(JSON.stringify(output1.shape)); + * + * // The output SymbolicTensor of the flatten layer can be used to call + * // the apply() of the dense layer: + * const output2 = denseLayer.apply(output1); + * + * // output2.shape is [null, 1]. The first dimension is the undetermined + * // batch size. The second dimension matches the number of units of the + * // dense layer. + * console.log(JSON.stringify(output2.shape)); + * + * // The input and output can be used to construct a model that consists + * // of the flatten and dense layers. + * const model = tf.model({inputs: input, outputs: output2}); + * ``` + * + * @param inputs a `tf.Tensor` or `tf.SymbolicTensor` or an Array of them. + * @param kwargs Additional keyword arguments to be passed to `call()`. + * + * @return Output of the layer's `call` method. + * + * @exception ValueError error in case the layer is missing shape information + * for its `build` call. + * + * @doc {heading: 'Models', 'subheading': 'Classes'} + */ + // Porting Note: This is a replacement for __call__() in Python. + apply(inputs, kwargs) { + kwargs = kwargs || {}; + this.assertNotDisposed(); + // Ensure inputs are all the same type. + const inputsList = toList(inputs); + const allAreSymbolic = checkAllSymbolic(inputs); + const noneAreSymbolic = checkNoneSymbolic(inputs); + if (allAreSymbolic === noneAreSymbolic) { + throw new ValueError('Arguments to apply() must be all ' + + 'SymbolicTensors or all Tensors'); + } + // TODO(michaelterry): nameScope() may not be necessary. + return nameScope(this.name, () => { + // Handle laying building (weight creating, input spec locking). + if (!this.built) { + /* + Throw exceptions in case the input is not compatible + with the inputSpec specified in the layer constructor. + */ + this.assertInputCompatibility(inputs); + // Collect input shapes to build layer. + const inputShapes = []; + for (const xElem of toList(inputs)) { + inputShapes.push(xElem.shape); + } + this.build(singletonOrArray(inputShapes)); + this.built = true; + // Load weights that were specified at layer instantiation. + if (this.initialWeights) { + this.setWeights(this.initialWeights); + } + if (this._refCount === null && noneAreSymbolic) { + // The first use of this layer is a non-symbolic call, set ref count + // to 1 so the Layer can be properly disposed if its dispose() method + // is called. + this._refCount = 1; + } + } + /* + Throw exceptions in case the input is not compatible + with the inputSpec set at build time. + */ + this.assertInputCompatibility(inputs); + // Handle mask propagation. + // TODO(michaelterry): Mask propagation not currently implemented. + // Actually call the layer, collecting output(s), mask(s), and shape(s). + if (noneAreSymbolic) { + let output = this.call(inputs, kwargs); + // Apply masks to the output tensors if the layer supports it. + if (this.supportsMasking) { + // TODO(mattsoulanille): pass the input tensors' masks to computeMask + this.setMaskMetadata(inputs, output); + } + // If the layer returns tensors from its inputs, unmodified, + // we copy them to avoid loss of tensor metadata. + const outputList = toList(output); + const outputListCopy = []; + // TODO(michaelterry): This copying may not be necessary given our eager + // backend. + for (let x of outputList) { + if (inputsList.indexOf(x) !== -1) { + x = x.clone(); + } + outputListCopy.push(x); + } + output = singletonOrArray(outputListCopy); + if (this.activityRegularizer != null) { + throw new NotImplementedError('Layer invocation in the presence of activity ' + + 'regularizer(s) is not supported yet.'); + } + // TODO(michaelterry): Call addInboundNode()? + return output; + } + else { + const inputShape = collectInputShape(inputs); + const outputShape = this.computeOutputShape(inputShape); + let output; + const outputDType = guessOutputDType(inputs); + this.warnOnIncompatibleInputShape(Array.isArray(inputs) ? inputShape[0] : + inputShape); + if (outputShape != null && outputShape.length > 0 && + Array.isArray(outputShape[0])) { + // We have multiple output shapes. Create multiple output tensors. + output = outputShape + .map((shape, index) => new SymbolicTensor(outputDType, shape, this, toList(inputs), kwargs, this.name, index)); + } + else { + output = new SymbolicTensor(outputDType, outputShape, this, toList(inputs), kwargs, this.name); + } + /* + Add an inbound node to the layer, so that it keeps track + of the call and of all new variables created during the call. + This also updates the layer history of the output tensor(s). + If the input tensor(s) had no previous history, + this does nothing. + */ + this.addInboundNode(inputs, output, null, null, inputShape, outputShape, kwargs); + this._refCount++; + if (this.activityRegularizer != null) { + throw new NotImplementedError('Layer invocation in the presence of activity ' + + 'regularizer(s) is not supported yet.'); + } + return output; + } + }); + } + /** + * Check compatibility between input shape and this layer's batchInputShape. + * + * Print warning if any incompatibility is found. + * + * @param inputShape Input shape to be checked. + */ + warnOnIncompatibleInputShape(inputShape) { + if (this.batchInputShape == null) { + return; + } + else if (inputShape.length !== this.batchInputShape.length) { + console.warn(`The rank of the input tensor provided (shape: ` + + `${JSON.stringify(inputShape)}) does not match that of the ` + + `batchInputShape (${JSON.stringify(this.batchInputShape)}) ` + + `of the layer ${this.name}`); + } + else { + let dimMismatch = false; + this.batchInputShape.forEach((dimension, i) => { + if (dimension != null && inputShape[i] != null && + inputShape[i] !== dimension) { + dimMismatch = true; + } + }); + if (dimMismatch) { + console.warn(`The shape of the input tensor ` + + `(${JSON.stringify(inputShape)}) does not ` + + `match the expectation of layer ${this.name}: ` + + `${JSON.stringify(this.batchInputShape)}`); + } + } + } + /** + * Retrieves the output shape(s) of a layer. + * + * Only applicable if the layer has only one inbound node, or if all inbound + * nodes have the same output shape. + * + * @returns Output shape or shapes. + * @throws AttributeError: if the layer is connected to more than one incoming + * nodes. + * + * @doc {heading: 'Models', 'subheading': 'Classes'} + */ + get outputShape() { + if (this.inboundNodes == null || this.inboundNodes.length === 0) { + throw new AttributeError(`The layer ${this.name} has never been called and thus has no ` + + `defined output shape.`); + } + const allOutputShapes = []; + for (const node of this.inboundNodes) { + const shapeString = JSON.stringify(node.outputShapes); + if (allOutputShapes.indexOf(shapeString) === -1) { + allOutputShapes.push(shapeString); + } + } + if (allOutputShapes.length === 1) { + const outputShapes = this.inboundNodes[0].outputShapes; + if (Array.isArray(outputShapes) && Array.isArray(outputShapes[0]) && + outputShapes.length === 1) { + return outputShapes[0]; + } + else { + return outputShapes; + } + } + else { + throw new AttributeError(`The layer ${this.name} has multiple inbound nodes with different ` + + `output shapes. Hence the notion of "output shape" is ill-defined ` + + `for the layer.`); + // TODO(cais): Implement getOutputShapeAt(). + } + } + /** + * Counts the total number of numbers (e.g., float32, int32) in the + * weights. + * + * @returns An integer count. + * @throws RuntimeError: If the layer is not built yet (in which case its + * weights are not defined yet.) + * + * @doc {heading: 'Models', 'subheading': 'Classes'} + */ + countParams() { + if (!this.built) { + throw new RuntimeError(`You tried to call countParams() on ${this.name}, ` + + `but the layer is not built yet. Build it first by calling ` + + `build(batchInputShape).`); + } + return countParamsInWeights(this.weights); + } + /** + * Creates the layer weights. + * + * Must be implemented on all layers that have weights. + * + * Called when apply() is called to construct the weights. + * + * @param inputShape A `Shape` or array of `Shape` (unused). + * + * @doc {heading: 'Models', 'subheading': 'Classes'} + */ + build(inputShape) { + this.built = true; + } + /** + * Returns the current values of the weights of the layer. + * + * @param trainableOnly Whether to get the values of only trainable weights. + * @returns Weight values as an `Array` of `tf.Tensor`s. + * + * @doc {heading: 'Models', 'subheading': 'Classes'} + */ + getWeights(trainableOnly = false) { + return batchGetValue(trainableOnly ? this.trainableWeights : this.weights); + } + /** + * Sets the weights of the layer, from Tensors. + * + * @param weights a list of Tensors. The number of arrays and their shape + * must match number of the dimensions of the weights of the layer (i.e. + * it should match the output of `getWeights`). + * + * @exception ValueError If the provided weights list does not match the + * layer's specifications. + * + * @doc {heading: 'Models', 'subheading': 'Classes'} + */ + setWeights(weights) { + tidy(() => { + const params = this.weights; + if (params.length !== weights.length) { + // TODO(cais): Restore the following and use `providedWeights`, instead + // of `weights` in the error message, once the deeplearn.js bug is + // fixed: https://github.com/PAIR-code/deeplearnjs/issues/498 const + // providedWeights = JSON.stringify(weights).slice(0, 50); + throw new ValueError(`You called setWeights(weights) on layer "${this.name}" ` + + `with a weight list of length ${weights.length}, ` + + `but the layer was expecting ${params.length} weights. ` + + `Provided weights: ${weights}...`); + } + if (params.length === 0) { + return; + } + const weightValueTuples = []; + const paramValues = batchGetValue(params); + for (let i = 0; i < paramValues.length; ++i) { + const pv = paramValues[i]; + const p = params[i]; + const w = weights[i]; + if (!arraysEqual(pv.shape, w.shape)) { + throw new ValueError(`Layer weight shape ${pv.shape} ` + + `not compatible with provided weight shape ${w.shape}`); + } + weightValueTuples.push([p, w]); + } + batchSetValue(weightValueTuples); + }); + } + /** + * Adds a weight variable to the layer. + * + * @param name Name of the new weight variable. + * @param shape The shape of the weight. + * @param dtype The dtype of the weight. + * @param initializer An initializer instance. + * @param regularizer A regularizer instance. + * @param trainable Whether the weight should be trained via backprop or not + * (assuming that the layer itself is also trainable). + * @param constraint An optional trainable. + * @return The created weight variable. + * + * @doc {heading: 'Models', 'subheading': 'Classes'} + */ + addWeight(name, shape, dtype, initializer, regularizer, trainable, constraint, getInitializerFunc) { + // Reject duplicate weight names. + if (this._addedWeightNames.indexOf(name) !== -1) { + throw new ValueError(`Duplicate weight name ${name} for layer ${this.name}`); + } + this._addedWeightNames.push(name); + if (dtype == null) { + dtype = 'float32'; + } + if (this.fastWeightInitDuringBuild) { + initializer = getInitializerFunc != null ? getInitializerFunc() : + getInitializer('zeros'); + } + const initValue = initializer.apply(shape, dtype); + const weight = new LayerVariable(initValue, dtype, name, trainable, constraint); + initValue.dispose(); + // Request backend not to dispose the weights of the model on scope() exit. + if (regularizer != null) { + this.addLoss(() => regularizer.apply(weight.read())); + } + if (trainable == null) { + trainable = true; + } + if (trainable) { + this._trainableWeights.push(weight); + } + else { + this._nonTrainableWeights.push(weight); + } + return weight; + } + /** + * Set the fast-weight-initialization flag. + * + * In cases where the initialized weight values will be immediately + * overwritten by loaded weight values during model loading, setting + * the flag to `true` saves unnecessary calls to potentially expensive + * initializers and speeds up the loading process. + * + * @param value Target value of the flag. + */ + setFastWeightInitDuringBuild(value) { + this.fastWeightInitDuringBuild = value; + } + /** + * Add losses to the layer. + * + * The loss may potentially be conditional on some inputs tensors, + * for instance activity losses are conditional on the layer's inputs. + * + * @doc {heading: 'Models', 'subheading': 'Classes'} + */ + addLoss(losses) { + if (losses == null || Array.isArray(losses) && losses.length === 0) { + return; + } + // Update this.losses + losses = toList(losses); + if (this._losses !== undefined && this._losses !== null) { + this.losses.push(...losses); + } + } + /** + * Computes the output shape of the layer. + * + * Assumes that the layer will be built to match that input shape provided. + * + * @param inputShape A shape (tuple of integers) or a list of shape tuples + * (one per output tensor of the layer). Shape tuples can include null for + * free dimensions, instead of an integer. + * + * @doc {heading: 'Models', 'subheading': 'Classes'} + */ + computeOutputShape(inputShape) { + return inputShape; + } + /** + * Computes an output mask tensor. + * + * @param inputs Tensor or list of tensors. + * @param mask Tensor or list of tensors. + * + * @return null or a tensor (or list of tensors, one per output tensor of the + * layer). + */ + computeMask(inputs, mask) { + if (!this.supportsMasking) { + if (mask != null) { + if (Array.isArray(mask)) { + mask.forEach(maskElement => { + if (maskElement != null) { + throw new TypeError(`Layer ${this.name} does not support masking, ` + + 'but was passed an inputMask.'); + } + }); + } + else { + throw new TypeError(`Layer ${this.name} does not support masking, ` + + 'but was passed an inputMask.'); + } + } + // masking not explicitly supported: return null as mask + return null; + } + // if masking is explictly supported, by default + // carry over the input mask + return mask; + } + setMaskMetadata(inputs, outputs, previousMask) { + if (!this.supportsMasking) { + return; + } + const outputMasks = this.computeMask(inputs, previousMask); + const outputsList = toList(outputs); + const outputMasksList = toList(outputMasks); + if (outputsList.length !== outputMasksList.length) { + throw new Error(`${this.name} outputs ${outputsList.length} tensors ` + + `but ${outputsList.length} masks for those tensors`); + } + for (let i = 0; i < outputsList.length; i++) { + outputsList[i].kerasMask = outputMasksList[i]; + } + } + /** + * Internal method to create an inbound node for the layer. + * + * @param inputTensors List of input tensors. + * @param outputTensors List of output tensors. + * @param inputMasks List of input masks (a mask can be a tensor, or null). + * @param outputMasks List of output masks (a mask can be a tensor, or null). + * @param inputShapes List of input shape tuples. + * @param outputShapes List of output shape tuples. + * @param kwargs Dictionary of keyword arguments that were passed to the + * `call` method of the layer at the call that created the node. + */ + addInboundNode(inputTensors, outputTensors, inputMasks, outputMasks, inputShapes, outputShapes, kwargs = null) { + const inputTensorList = toList(inputTensors); + outputTensors = toList(outputTensors); + inputMasks = toList(inputMasks); + outputMasks = toList(outputMasks); + inputShapes = normalizeShapeList(inputShapes); + outputShapes = normalizeShapeList(outputShapes); + // Collect input tensor(s) coordinates. + const inboundLayers = []; + const nodeIndices = []; + const tensorIndices = []; + for (const x of inputTensorList) { + /* + * TODO(michaelterry): Keras adds this value to tensors; it's not + * clear whether we'll use this or not. + */ + inboundLayers.push(x.sourceLayer); + nodeIndices.push(x.nodeIndex); + tensorIndices.push(x.tensorIndex); + } + // Create node, add it to inbound nodes. + // (This call has side effects.) + // tslint:disable-next-line:no-unused-expression + new Node({ + outboundLayer: this, + inboundLayers, + nodeIndices, + tensorIndices, + inputTensors: inputTensorList, + outputTensors, + inputMasks, + outputMasks, + inputShapes, + outputShapes + }, kwargs); + // Update tensor history + for (let i = 0; i < outputTensors.length; i++) { + // TODO(michaelterry: _uses_learning_phase not tracked. + outputTensors[i].sourceLayer = this; + outputTensors[i].nodeIndex = this.inboundNodes.length - 1; + outputTensors[i].tensorIndex = i; + } + } + /** + * Returns the config of the layer. + * + * A layer config is a TS dictionary (serializable) + * containing the configuration of a layer. + * The same layer can be reinstantiated later + * (without its trained weights) from this configuration. + * + * The config of a layer does not include connectivity + * information, nor the layer class name. These are handled + * by 'Container' (one layer of abstraction above). + * + * Porting Note: The TS dictionary follows TS naming standards for + * keys, and uses tfjs-layers type-safe Enums. Serialization methods + * should use a helper function to convert to the pythonic storage + * standard. (see serialization_utils.convertTsToPythonic) + * + * @returns TS dictionary of configuration. + * + * @doc {heading: 'Models', 'subheading': 'Classes'} + */ + getConfig() { + const config = { name: this.name, trainable: this.trainable }; + if (this.batchInputShape != null) { + config['batchInputShape'] = this.batchInputShape; + } + if (this.dtype != null) { + config['dtype'] = this.dtype; + } + return config; + } + /** + * Dispose the weight variables that this Layer instance holds. + * + * @returns {number} Number of disposed variables. + */ + disposeWeights() { + this.weights.forEach(weight => weight.dispose()); + return this.weights.length; + } + assertNotDisposed() { + if (this._refCount === 0) { + throw new Error(`Layer '${this.name}' is already disposed.`); + } + } + /** + * Attempt to dispose layer's weights. + * + * This method decreases the reference count of the Layer object by 1. + * + * A Layer is reference-counted. Its reference count is incremented by 1 + * the first item its `apply()` method is called and when it becomes a part + * of a new `Node` (through calling the `apply()` method on a + * `tf.SymbolicTensor`). + * + * If the reference count of a Layer becomes 0, all the weights will be + * disposed and the underlying memory (e.g., the textures allocated in WebGL) + * will be freed. + * + * Note: If the reference count is greater than 0 after the decrement, the + * weights of the Layer will *not* be disposed. + * + * After a Layer is disposed, it cannot be used in calls such as `apply()`, + * `getWeights()` or `setWeights()` anymore. + * + * @returns A DisposeResult Object with the following fields: + * - refCountAfterDispose: The reference count of the Container after this + * `dispose()` call. + * - numDisposedVariables: Number of `tf.Variable`s (i.e., weights) disposed + * during this `dispose()` call. + * @throws {Error} If the layer is not built yet, or if the layer has already + * been disposed. + * + * @doc {heading: 'Models', 'subheading': 'Classes'} + */ + dispose() { + if (!this.built) { + throw new Error(`Cannot dispose Layer ${this.name} because it has not been ` + + `built yet.`); + } + if (this._refCount === null) { + throw new Error(`Cannot dispose Layer ${this.name} because it has not been used ` + + `yet.`); + } + this.assertNotDisposed(); + let numDisposedVariables = 0; + if (--this._refCount === 0) { + numDisposedVariables = this.disposeWeights(); + } + return { refCountAfterDispose: this._refCount, numDisposedVariables }; + } + } + /** + * Collects the input shape(s) of a list of `tf.Tensor`s or + * `tf.SymbolicTensor`s. + * + * TODO(michaelterry): Update PyKeras docs (backport). + * + * @param inputTensors List of input tensors (or single input tensor). + * + * @return List of shape tuples (or single tuple), one tuple per input. + */ + function collectInputShape(inputTensors) { + inputTensors = + toList(inputTensors); + const shapes = []; + for (const x of inputTensors) { + shapes.push(x.shape); + } + return singletonOrArray(shapes); + } + /** + * Guesses output dtype based on inputs. + * + * At present, just returns 'float32' for any input. + * + * @param inputTensors List of input tensors (or single input tensor). + * + * @return The guessed DType. At present, always returns 'float32'. + */ + function guessOutputDType(inputTensors) { + return 'float32'; + } + /** + * Returns the list of input tensors necessary to compute `tensor`. + * + * Output will always be a list of tensors (potentially with 1 element). + * + * @param tensor The tensor to start from. + * @param layer Origin layer of the tensor. + * @param nodeIndex Origin node index of the tensor. + * + * @return Array of input tensors. + */ + function getSourceInputs(tensor, layer, nodeIndex) { + if (layer == null || (nodeIndex != null && nodeIndex > 0)) { + layer = tensor.sourceLayer; + nodeIndex = tensor.nodeIndex; + } + if (layer.inboundNodes.length === 0) { + return [tensor]; + } + else { + const node = layer.inboundNodes[nodeIndex]; + if (node.inboundLayers.length === 0) { + return node.inputTensors; + } + else { + const sourceTensors = []; + for (let i = 0; i < node.inboundLayers.length; i++) { + const x = node.inputTensors[i]; + const layer = node.inboundLayers[i]; + const nodeIndex = node.nodeIndices[i]; + const previousSources = getSourceInputs(x, layer, nodeIndex); + // Avoid input redundancy. + for (const x of previousSources) { + if (sourceTensors.indexOf(x) === -1) { + sourceTensors.push(x); + } + } + } + return sourceTensors; + } + } + } + function checkAllSymbolic(tensors) { + let allAreSymbolic = true; + for (const tensor of toList(tensors)) { + if (!(tensor instanceof SymbolicTensor)) { + allAreSymbolic = false; + break; + } + } + return allAreSymbolic; + } + function checkNoneSymbolic(tensors) { + let noneAreSymbolic = true; + for (const tensor of toList(tensors)) { + if (tensor instanceof SymbolicTensor) { + noneAreSymbolic = false; + break; + } + } + return noneAreSymbolic; + } + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + class InputLayer extends Layer { + constructor(args) { + super({ + dtype: args.dtype, + name: args.name != null ? args.name : getUid('input').toString() + }); + // Normalize config.batchSize and config.sparse + if (args.batchSize == null) { + args.batchSize = null; + } + if (args.sparse == null) { + args.sparse = false; + } + this.trainable = false; + this.built = true; + this.sparse = args.sparse; + if (args.inputShape != null && args.batchInputShape != null) { + throw new ValueError('Only provide the inputShape OR ' + + 'batchInputShape argument to inputLayer, not both at the same time.'); + } + let batchInputShape = args.batchInputShape; + if (batchInputShape == null) { + if (args.inputShape == null) { + throw new ValueError('An InputLayer should be passed either a ' + + '`batchInputShape` or an `inputShape`.'); + } + else { + batchInputShape = [args.batchSize].concat(args.inputShape); + } + } + else { + // TODO(michaelterry): Backport to PyKeras + if (args.batchSize != null) { + throw new ValueError('Cannot specify batchSize if batchInputShape is ' + + 'specified when creating an InputLayer.'); + } + } + const dtype = args.dtype || 'float32'; + this.batchInputShape = batchInputShape; + this.dtype = dtype; + // TODO(michaelterry): Backport this to PyKeras? + this.inputSpec = [{ shape: batchInputShape }]; + const inputTensor = new SymbolicTensor(this.dtype, this.batchInputShape, this, [], {}, this.name); + inputTensor.nodeIndex = 0; + inputTensor.tensorIndex = 0; + // Create an input node to add to this.outboundNode. + // (This call has side effects.) + // tslint:disable-next-line:no-unused-expression + new Node({ + outboundLayer: this, + inboundLayers: [], + nodeIndices: [], + tensorIndices: [], + inputTensors: [inputTensor], + outputTensors: [inputTensor], + inputMasks: [null], + outputMasks: [null], + inputShapes: [batchInputShape], + outputShapes: [batchInputShape] + }); + } + apply(inputs, kwargs) { + throw new ValueError('Cannot pass any input to an ' + + `InputLayer's apply() method. InputLayer name: ${this.name}`); + } + dispose() { + // dispose() for InputLayer is overridden as no-op. + return { refCountAfterDispose: this._refCount, numDisposedVariables: 0 }; + } + getConfig() { + return { + batchInputShape: this.batchInputShape, + dtype: this.dtype, + sparse: this.sparse, + name: this.name + }; + } + } + /** @nocollapse */ + InputLayer.className = 'InputLayer'; + registerClass(InputLayer); + function Input(config) { + if (config.batchShape == null && config.shape == null) { + throw new Error('Please provide to Input either a `shape`' + + ' or a `batchShape` argument. Note that ' + + '`shape` does not include the batch ' + + 'dimension.'); + } + if (config.batchShape != null && config.shape != null) { + // TODO(michaelterry): Backport to PyKeras. + throw new ValueError('Please provide either a `shape` or `batchShape` ' + + 'argument to Input, but not both.'); + } + let batchShape = config.batchShape; + if (config.shape != null && batchShape == null) { + batchShape = [null].concat(config.shape); + } + let dtype = config.dtype; + if (dtype == null) { + dtype = 'float32'; + } + const inputLayer = new InputLayer({ + batchInputShape: batchShape, + name: config.name, + dtype, + sparse: config.sparse + }); + const outputs = inputLayer.inboundNodes[0].outputTensors; + return outputs[0]; + } + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + /** + * Helper function to check the dtype and shape compatibility of a feed value. + */ + function assertFeedCompatibility(key, val) { + // Check dtype compatibility. + if (key.dtype == null || key.dtype === val.dtype) { + // a. If types match, return val tensor as is. + return val; + } + try { + // b. Attempt to convert to expected type. + return cast$3(val, key.dtype); + } + catch (err) { + // c. If conversion fails, return helpful error. + throw new ValueError(`The dtype of the feed (${val.dtype}) can not be cast to the dtype ` + + `of the key '${key.name}' (${key.dtype}).`); + } + } + /** + * FeedDict: A mapping from unique SymbolicTensors to feed values for them. + * A feed value is a concrete value represented as an `Tensor`. + */ + class FeedDict { + /** + * Constructor, optionally does copy-construction. + * @param feeds An Array of `Feed`s, or another `FeedDict`, in which case + * copy-construction will be performed. + */ + constructor(feeds) { + this.id2Value = {}; + this.id2Mask = {}; + this.name2Id = {}; + if (feeds instanceof FeedDict) { + for (const id in feeds.id2Value) { + this.id2Value[id] = feeds.id2Value[id]; + if (id in feeds.id2Mask) { + this.id2Mask[id] = feeds.id2Mask[id]; + } + } + } + else { + if (feeds == null) { + return; + } + for (const feed of feeds) { + this.add(feed.key, feed.value); + } + } + } + /** + * Add a key-value pair to the FeedDict. + * + * @param key The key of the feed. + * @param value The value of the tensor feed. + * @param mask The value of the mask feed (optional). + * @returns This `FeedDict`. + * @throws ValueError: If the key `SymbolicTensor` already exists in the + * `FeedDict`. + */ + add(key, value, mask) { + if (this.id2Value[key.id] == null) { + this.id2Value[key.id] = assertFeedCompatibility(key, value); + this.name2Id[key.name] = key.id; + if (mask != null) { + this.id2Mask[key.id] = mask; + } + } + else { + throw new ValueError(`Duplicate key: name=${key.name}, id=${key.id}`); + } + return this; + } + /** + * Add a Feed to the FeedDict. + * @param feed The new `Feed` to add. + * @returns This `FeedDict`. + */ + addFeed(feed) { + this.add(feed.key, feed.value); + } + /** + * Probe whether a key already exists in the FeedDict. + * @param key + */ + hasKey(key) { + return this.id2Value[key.id] != null; + } + /** + * Get all the SymbolicTensor available in this FeedDict. + */ + names() { + return Object.keys(this.name2Id); + } + /** + * Get the feed value for given key. + * @param key The SymbolicTensor, or its name (as a string), of which the + * value is sought. + * @returns If `key` exists, the corresponding feed value. + * @throws ValueError: If `key` does not exist in this `FeedDict`. + */ + getValue(key) { + if (key instanceof SymbolicTensor) { + if (this.id2Value[key.id] == null) { + throw new ValueError(`Nonexistent key: ${key.name}`); + } + else { + return this.id2Value[key.id]; + } + } + else { + const id = this.name2Id[key]; + if (id == null) { + throw new ValueError(`Feed dict has no SymbolicTensor name: ${key}`); + } + return this.id2Value[id]; + } + } + /** + * Get the feed mask for given key. + * @param key The SymbolicTensor, or its name (as a string), of which the + * value is sought. + * @returns If `key` exists, the corresponding feed mask. + * @throws ValueError: If `key` does not exist in this `FeedDict`. + */ + getMask(key) { + if (key instanceof SymbolicTensor) { + if (this.id2Value[key.id] == null) { + throw new ValueError(`Nonexistent key: ${key.name}`); + } + else { + return this.id2Mask[key.id]; + } + } + else { + const id = this.name2Id[key]; + if (id == null) { + throw new ValueError(`Feed dict has no SymbolicTensor name: ${key}`); + } + return this.id2Mask[id]; + } + } + /** Dispose all mask Tensors held by this object. */ + disposeMasks() { + if (this.id2Mask != null) { + dispose(this.id2Mask); + } + } + } + // Cache for topologically sorted SymbolicTensors for given execution + // targets (i.e., fetches). + const cachedSorted = new LruCache(); + // Cache for recipient count maps for given execution targets (i.e., fetches). + const cachedRecipientCounts = new LruCache(); + function updateCacheMaxEntries(maxEntries) { + if (cachedSorted != null) { + cachedSorted.setMaxEntries(maxEntries); + } + if (cachedRecipientCounts != null) { + cachedRecipientCounts.setMaxEntries(maxEntries); + } + } + /** + * Execute a SymbolicTensor by using concrete feed values. + * + * A `SymbolicTensor` object is a node in a computation graph of TF.js + * Layers. The object is backed by a source layer and input + * `SymbolicTensor`s to the source layer. This method evaluates + * the `call()` method of the source layer, using concrete values of the + * inputs obtained from either + * * `feedDict`, if the input key exists in `feedDict`, or else, + * * a recursive call to `execute()` itself. + * + * @param x: The `SymbolicTensor` to execute. + * @param feedDict: The feed values, as base condition of the recursion. + * execution. + * @param kwargs: Optional keyword arguments. + * @param probe: A probe object (of interface `ExecutionProbe`) used for + * testing memory footprint of `execute` calls. + * @returns Result of the execution. + * @throws ValueError: If any `SymbolicTensor`s from `InputLayer`s + * encountered during the execution lacks a feed value in `feedDict`. + */ + function execute(fetches, feedDict, kwargs, probe) { + const training = kwargs == null ? false : kwargs['training']; + const arrayFetches = Array.isArray(fetches); + const fetchArray = arrayFetches ? fetches : [fetches]; + const outputNames = fetchArray.map(t => t.name); + const finalOutputs = []; + const feedNames = feedDict.names(); + for (const outputName of outputNames) { + if (feedNames.indexOf(outputName) !== -1) { + finalOutputs.push(feedDict.getValue(outputName)); + } + else { + finalOutputs.push(null); + } + } + if (probe != null) { + // For optional probing of memory footprint during execution. + probe.maxNumTensors = -Infinity; + probe.minNumTensors = Infinity; + } + // Check cache. + const fetchAndFeedKey = outputNames.join(',') + '|' + feedDict.names().sort().join(','); + let sorted = cachedSorted.get(fetchAndFeedKey); + let recipientCounts; + if (sorted == null) { + // Cache doesn't contain the desired combination of fetches. Compute + // topological sort for the combination for the first time. + const out = getTopologicalSortAndRecipientCounts(fetchArray, feedDict); + sorted = out.sorted; + recipientCounts = out.recipientCounts; + // Store results in cache for future use. + cachedSorted.put(fetchAndFeedKey, sorted); + cachedRecipientCounts.put(fetchAndFeedKey, recipientCounts); + } + recipientCounts = {}; + if (!training) { + Object.assign(recipientCounts, cachedRecipientCounts.get(fetchAndFeedKey)); + } + const internalFeedDict = new FeedDict(feedDict); + // Start iterative execution on the topologically-sorted SymbolicTensors. + for (let i = 0; i < sorted.length; ++i) { + if (probe != null) { + // For optional probing of memory usage during execution. + const numTensors = memory().numTensors; + if (numTensors > probe.maxNumTensors) { + probe.maxNumTensors = numTensors; + } + if (numTensors < probe.minNumTensors) { + probe.minNumTensors = numTensors; + } + } + const symbolic = sorted[i]; + const srcLayer = symbolic.sourceLayer; + if (srcLayer instanceof InputLayer) { + continue; + } + const inputValues = []; + const inputMasks = []; + const tensorsToDispose = []; + let maskExists = false; + for (const input of symbolic.inputs) { + const value = internalFeedDict.getValue(input); + const mask = internalFeedDict.getMask(input); + inputValues.push(value); + inputMasks.push(mask); + if (mask != null) { + maskExists = true; + } + if (!training) { + recipientCounts[input.name]--; + if (recipientCounts[input.name] === 0 && !feedDict.hasKey(input) && + outputNames.indexOf(input.name) === -1 && !value.isDisposed && + input.sourceLayer.stateful !== true) { + tensorsToDispose.push(value); + } + } + } + if (maskExists) { + kwargs = kwargs || {}; + kwargs['mask'] = inputMasks[0]; + } + const outputTensors = toList(srcLayer.apply(inputValues, kwargs)); + let outputMask = null; + if (srcLayer.supportsMasking) { + outputMask = srcLayer.computeMask(inputValues, inputMasks); + } + const layerOutputs = getNodeOutputs(symbolic); + const outputSymbolicTensors = Array.isArray(layerOutputs) ? layerOutputs : [layerOutputs]; + for (let i = 0; i < outputSymbolicTensors.length; ++i) { + if (!internalFeedDict.hasKey(outputSymbolicTensors[i])) { + internalFeedDict.add(outputSymbolicTensors[i], outputTensors[i], Array.isArray(outputMask) ? outputMask[0] : outputMask); + } + const index = outputNames.indexOf(outputSymbolicTensors[i].name); + if (index !== -1) { + finalOutputs[index] = outputTensors[i]; + } + } + if (!training) { + // Clean up Tensors that are no longer needed. + dispose(tensorsToDispose); + } + } + // NOTE(cais): Unlike intermediate tensors, we don't discard mask + // tensors as we go, because these tensors are sometimes passed over a + // series of mutliple layers, i.e., not obeying the immediate input + // relations in the graph. If this becomes a memory-usage concern, + // we can improve this in the future. + internalFeedDict.disposeMasks(); + return arrayFetches ? finalOutputs : finalOutputs[0]; + } + /** + * Sort the `SymbolicTensor`s topologically, for an array of fetches. + * + * This function calls getTopologicalSortAndRecipientCountsForOneFetch and + * merges their results. + * + * @param fetch The array of fetches requested. Must be a non-empty array. + * @param feedDict The dictionary of fed values. + * @returns sorted: Topologically-sorted array of SymbolicTensors. + * recipientCounts: Recipient counts for all SymbolicTensors in `sorted`. + */ + function getTopologicalSortAndRecipientCounts(fetches, feedDict) { + assert$1(fetches != null && fetches.length > 0, () => `Expected at least one fetch, got none`); + let finalSorted = []; + let finalRecipientMap = {}; + if (fetches.length === 1) { + // Special-casing 1 fetch for efficiency. + const out = getTopologicalSortAndRecipientCountsForOneFetch(fetches[0], feedDict); + finalSorted = out.sorted; + finalRecipientMap = out.recipientMap; + } + else { + const visited = new Set(); + for (const fetch of fetches) { + const { sorted, recipientMap } = getTopologicalSortAndRecipientCountsForOneFetch(fetch, feedDict); + // Merge sorted SymbolicTensor Arrays. + for (const symbolicTensor of sorted) { + if (!visited.has(symbolicTensor.name)) { + finalSorted.push(symbolicTensor); + visited.add(symbolicTensor.name); + } + } + // Merge recipient maps. + for (const name in recipientMap) { + if (finalRecipientMap[name] == null) { + finalRecipientMap[name] = new Set(); + } + recipientMap[name].forEach(recipient => finalRecipientMap[name].add(recipient)); + } + } + } + return { + sorted: finalSorted, + recipientCounts: recipientMap2Counts(finalRecipientMap) + }; + } + function recipientMap2Counts(recipientMap) { + const recipientCounts = {}; + for (const name in recipientMap) { + recipientCounts[name] = recipientMap[name].size; + } + return recipientCounts; + } + /** + * Sort the `SymbolicTensor`s topologically, for a single fetch. + * + * This helper function processes the upstream SymbolicTensors of a single + * fetch. + * + * @param fetch The single fetch requested. + * @param feedDict The dictionary of fed values. + * @returns sorted: Topologically-sorted array of SymbolicTensors. + * recipientMap: Recipient names for all SymbolicTensors in `sorted`. + */ + function getTopologicalSortAndRecipientCountsForOneFetch(fetch, feedDict) { + const visited = new Set(); + const sorted = []; + const recipientMap = {}; + // Put keys of the feedDict into visited first, so they don't have to be + // walked. This is needed in case where there are feeds for intermediate + // SymbolicTensors of the graph. + for (const key of feedDict.names()) { + visited.add(key); + } + const stack = []; + const marks = []; + // Initial population of stack and marks. + stack.push(fetch); + while (stack.length > 0) { + const top = stack[stack.length - 1]; + if (visited.has(top.name)) { + stack.pop(); + continue; + } + const topIsMarked = marks[marks.length - 1] === stack.length - 1; + if (top.inputs.length === 0 || topIsMarked) { + // Input SymbolicTensor or all children have been visited. + stack.pop(); + sorted.push(top); + visited.add(top.name); + if (topIsMarked) { + marks.pop(); + } + } + else { + // A non-input SymbolicTensor whose upstream SymbolicTensors haven't + // been visited yet. Push them onto the stack. + marks.push(stack.length - 1); + for (const input of top.inputs) { + // Increment the recipient count. Note that this needs to happen + // regardless of whether the SymbolicTensor has been visited before. + if (recipientMap[input.name] == null) { + recipientMap[input.name] = new Set(); + } + recipientMap[input.name].add(top.name); + if (visited.has(input.name)) { + continue; // Avoid repeated visits to the same SymbolicTensor. + } + stack.push(input); + } + } + } + return { sorted, recipientMap }; + } + /** + * Get the symbolic output tensors of the node to which a given fetch belongs. + * @param fetch The fetched symbolic tensor. + * @returns The Array of symbolic tensors output by the node to which `fetch` + * belongs. + */ + function getNodeOutputs(fetch) { + let layerOutputs; + if (fetch.sourceLayer.inboundNodes.length === 1) { + layerOutputs = fetch.sourceLayer.output; + } + else { + let nodeIndex = null; + for (let i = 0; i < fetch.sourceLayer.inboundNodes.length; ++i) { + for (const outputTensor of fetch.sourceLayer.inboundNodes[i] + .outputTensors) { + if (outputTensor.id === fetch.id) { + nodeIndex = i; + break; + } + } + } + layerOutputs = fetch.sourceLayer.getOutputAt(nodeIndex); + } + return layerOutputs; + } + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const ENV$2 = env(); + /** The max number of entries for the caches of layers' topological sort. */ + ENV$2.registerFlag('TOPOLOGICAL_SORT_CACHE_MAX_ENTRIES', () => 100, updateCacheMaxEntries); + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + /** + * Helper function used by many of the Constraints to find the L2Norms. + */ + function calcL2Norms(w, axis) { + return tidy(() => sqrt$2(sum$3(mul(w, w), axis, true))); + } + /** + * Base class for functions that impose constraints on weight values + * + * @doc { + * heading: 'Constraints', + * subheading: 'Classes', + * namespace: 'constraints' + * } + */ + class Constraint extends Serializable { + getConfig() { + return {}; + } + } + class MaxNorm extends Constraint { + constructor(args) { + super(); + this.defaultMaxValue = 2; + this.defaultAxis = 0; + this.maxValue = + args.maxValue != null ? args.maxValue : this.defaultMaxValue; + this.axis = args.axis != null ? args.axis : this.defaultAxis; + } + apply(w) { + return tidy(() => { + const norms = calcL2Norms(w, this.axis); + const desired = clipByValue$2(norms, 0, this.maxValue); + return mul(w, div$1(desired, add$3(epsilon$1(), norms))); + }); + } + getConfig() { + return { maxValue: this.maxValue, axis: this.axis }; + } + } + /** @nocollapse */ + MaxNorm.className = 'MaxNorm'; + registerClass(MaxNorm); + class UnitNorm extends Constraint { + constructor(args) { + super(); + this.defaultAxis = 0; + this.axis = args.axis != null ? args.axis : this.defaultAxis; + } + apply(w) { + return tidy(() => div$1(w, add$3(epsilon$1(), calcL2Norms(w, this.axis)))); + } + getConfig() { + return { axis: this.axis }; + } + } + /** @nocollapse */ + UnitNorm.className = 'UnitNorm'; + registerClass(UnitNorm); + class NonNeg extends Constraint { + apply(w) { + return relu$2(w); + } + } + /** @nocollapse */ + NonNeg.className = 'NonNeg'; + registerClass(NonNeg); + class MinMaxNorm extends Constraint { + constructor(args) { + super(); + this.defaultMinValue = 0.0; + this.defaultMaxValue = 1.0; + this.defaultRate = 1.0; + this.defaultAxis = 0; + this.minValue = + args.minValue != null ? args.minValue : this.defaultMinValue; + this.maxValue = + args.maxValue != null ? args.maxValue : this.defaultMaxValue; + this.rate = args.rate != null ? args.rate : this.defaultRate; + this.axis = args.axis != null ? args.axis : this.defaultAxis; + } + apply(w) { + return tidy(() => { + const norms = calcL2Norms(w, this.axis); + const desired = add$3(mul(this.rate, clipByValue$2(norms, this.minValue, this.maxValue)), mul(1.0 - this.rate, norms)); + return mul(w, div$1(desired, add$3(epsilon$1(), norms))); + }); + } + getConfig() { + return { + minValue: this.minValue, + maxValue: this.maxValue, + rate: this.rate, + axis: this.axis + }; + } + } + /** @nocollapse */ + MinMaxNorm.className = 'MinMaxNorm'; + registerClass(MinMaxNorm); + // Maps the JavaScript-like identifier keys to the corresponding registry + // symbols. + const CONSTRAINT_IDENTIFIER_REGISTRY_SYMBOL_MAP = { + 'maxNorm': 'MaxNorm', + 'minMaxNorm': 'MinMaxNorm', + 'nonNeg': 'NonNeg', + 'unitNorm': 'UnitNorm' + }; + function serializeConstraint(constraint) { + return serializeKerasObject(constraint); + } + function deserializeConstraint(config, customObjects = {}) { + return deserializeKerasObject(config, SerializationMap.getMap().classNameMap, customObjects, 'constraint'); + } + function getConstraint(identifier) { + if (identifier == null) { + return null; + } + if (typeof identifier === 'string') { + const className = identifier in CONSTRAINT_IDENTIFIER_REGISTRY_SYMBOL_MAP ? + CONSTRAINT_IDENTIFIER_REGISTRY_SYMBOL_MAP[identifier] : + identifier; + const config = { className, config: {} }; + return deserializeConstraint(config); + } + else if (identifier instanceof Constraint) { + return identifier; + } + else { + return deserializeConstraint(identifier); + } + } + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + /** + * MaxNorm weight constraint. + * + * Constrains the weights incident to each hidden unit + * to have a norm less than or equal to a desired value. + * + * References + * - [Dropout: A Simple Way to Prevent Neural Networks from Overfitting + * Srivastava, Hinton, et al. + * 2014](http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf) + * + * @doc {heading: 'Constraints',namespace: 'constraints'} + */ + function maxNorm(args) { + return new MaxNorm(args); + } + /** + * Constrains the weights incident to each hidden unit to have unit norm. + * + * @doc {heading: 'Constraints', namespace: 'constraints'} + */ + function unitNorm(args) { + return new UnitNorm(args); + } + /** + * Constrains the weight to be non-negative. + * + * @doc {heading: 'Constraints', namespace: 'constraints'} + */ + function nonNeg() { + return new NonNeg(); + } + /** @doc {heading: 'Constraints', namespace: 'constraints'} */ + function minMaxNorm(config) { + return new MinMaxNorm(config); + } + + var exports_constraints = /*#__PURE__*/Object.freeze({ + __proto__: null, + maxNorm: maxNorm, + minMaxNorm: minMaxNorm, + nonNeg: nonNeg, + unitNorm: unitNorm + }); + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + /** + * Initializer that generates tensors initialized to 0. + * + * @doc {heading: 'Initializers', namespace: 'initializers'} + */ + function zeros$1() { + return new Zeros(); + } + /** + * Initializer that generates tensors initialized to 1. + * + * @doc {heading: 'Initializers', namespace: 'initializers'} + */ + function ones() { + return new Ones(); + } + /** + * Initializer that generates values initialized to some constant. + * + * @doc {heading: 'Initializers', namespace: 'initializers'} + */ + function constant(args) { + return new Constant(args); + } + /** + * Initializer that generates random values initialized to a uniform + * distribution. + * + * Values will be distributed uniformly between the configured minval and + * maxval. + * + * @doc {heading: 'Initializers', namespace: 'initializers'} + */ + function randomUniform(args) { + return new RandomUniform(args); + } + /** + * Initializer that generates random values initialized to a normal + * distribution. + * + * @doc {heading: 'Initializers', namespace: 'initializers'} + */ + function randomNormal(args) { + return new RandomNormal(args); + } + /** + * Initializer that generates random values initialized to a truncated normal + * distribution. + * + * These values are similar to values from a `RandomNormal` except that values + * more than two standard deviations from the mean are discarded and re-drawn. + * This is the recommended initializer for neural network weights and filters. + * + * @doc {heading: 'Initializers', namespace: 'initializers'} + */ + function truncatedNormal(args) { + return new TruncatedNormal(args); + } + /** + * Initializer that generates the identity matrix. + * Only use for square 2D matrices. + * + * @doc {heading: 'Initializers', namespace: 'initializers'} + */ + function identity$2(args) { + return new Identity(args); + } + /** + * Initializer capable of adapting its scale to the shape of weights. + * With distribution=NORMAL, samples are drawn from a truncated normal + * distribution centered on zero, with `stddev = sqrt(scale / n)` where n is: + * - number of input units in the weight tensor, if mode = FAN_IN. + * - number of output units, if mode = FAN_OUT. + * - average of the numbers of input and output units, if mode = FAN_AVG. + * With distribution=UNIFORM, + * samples are drawn from a uniform distribution + * within [-limit, limit], with `limit = sqrt(3 * scale / n)`. + * + * @doc {heading: 'Initializers',namespace: 'initializers'} + */ + function varianceScaling(config) { + return new VarianceScaling(config); + } + /** + * Glorot uniform initializer, also called Xavier uniform initializer. + * It draws samples from a uniform distribution within [-limit, limit] + * where `limit` is `sqrt(6 / (fan_in + fan_out))` + * where `fan_in` is the number of input units in the weight tensor + * and `fan_out` is the number of output units in the weight tensor + * + * Reference: + * Glorot & Bengio, AISTATS 2010 + * http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf. + * + * @doc {heading: 'Initializers', namespace: 'initializers'} + */ + function glorotUniform(args) { + return new GlorotUniform(args); + } + /** + * Glorot normal initializer, also called Xavier normal initializer. + * It draws samples from a truncated normal distribution centered on 0 + * with `stddev = sqrt(2 / (fan_in + fan_out))` + * where `fan_in` is the number of input units in the weight tensor + * and `fan_out` is the number of output units in the weight tensor. + * + * Reference: + * Glorot & Bengio, AISTATS 2010 + * http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf + * + * @doc {heading: 'Initializers', namespace: 'initializers'} + */ + function glorotNormal(args) { + return new GlorotNormal(args); + } + /** + * He normal initializer. + * + * It draws samples from a truncated normal distribution centered on 0 + * with `stddev = sqrt(2 / fanIn)` + * where `fanIn` is the number of input units in the weight tensor. + * + * Reference: + * He et al., http://arxiv.org/abs/1502.01852 + * + * @doc {heading: 'Initializers', namespace: 'initializers'} + */ + function heNormal(args) { + return new HeNormal(args); + } + /** + * He uniform initializer. + * + * It draws samples from a uniform distribution within [-limit, limit] + * where `limit` is `sqrt(6 / fan_in)` + * where `fanIn` is the number of input units in the weight tensor. + * + * Reference: + * He et al., http://arxiv.org/abs/1502.01852 + * + * @doc {heading: 'Initializers',namespace: 'initializers'} + */ + function heUniform(args) { + return new HeUniform(args); + } + /** + * LeCun normal initializer. + * + * It draws samples from a truncated normal distribution centered on 0 + * with `stddev = sqrt(1 / fanIn)` + * where `fanIn` is the number of input units in the weight tensor. + * + * References: + * [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515) + * [Efficient Backprop](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf) + * + * @doc {heading: 'Initializers', namespace: 'initializers'} + */ + function leCunNormal(args) { + return new LeCunNormal(args); + } + /** + * LeCun uniform initializer. + * + * It draws samples from a uniform distribution in the interval + * `[-limit, limit]` with `limit = sqrt(3 / fanIn)`, + * where `fanIn` is the number of input units in the weight tensor. + * + * @doc {heading: 'Initializers', namespace: 'initializers'} + */ + function leCunUniform(args) { + return new LeCunUniform(args); + } + /** + * Initializer that generates a random orthogonal matrix. + * + * Reference: + * [Saxe et al., http://arxiv.org/abs/1312.6120](http://arxiv.org/abs/1312.6120) + * + * @doc {heading: 'Initializers', namespace: 'initializers'} + */ + function orthogonal(args) { + return new Orthogonal(args); + } + + var exports_initializers = /*#__PURE__*/Object.freeze({ + __proto__: null, + constant: constant, + glorotNormal: glorotNormal, + glorotUniform: glorotUniform, + heNormal: heNormal, + heUniform: heUniform, + identity: identity$2, + leCunNormal: leCunNormal, + leCunUniform: leCunUniform, + ones: ones, + orthogonal: orthogonal, + randomNormal: randomNormal, + randomUniform: randomUniform, + truncatedNormal: truncatedNormal, + varianceScaling: varianceScaling, + zeros: zeros$1 + }); + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + /** + * Turn any Scalar values in a Logs object into actual number values. + * + * @param logs The `Logs` object to be resolved in place. + */ + async function resolveScalarsInLogs(logs) { + if (logs == null) { + return; + } + const promises = []; + const keys = []; + const scalarsToDispose = []; + for (const key in logs) { + const value = logs[key]; + if (typeof value !== 'number') { + const valueScalar = value; + promises.push(valueScalar.data()); + keys.push(key); + scalarsToDispose.push(valueScalar); + } + } + if (promises.length > 0) { + const values = await Promise.all(promises); + for (let i = 0; i < values.length; ++i) { + logs[keys[i]] = values[i][0]; + } + // Dispose the original scalar tensors. + dispose(scalarsToDispose); + } + } + /** + * Dispose all Tensors in an UnresolvedLogs object. + * + * @param logs An `UnresolvedLogs` object potentially containing `tf.Tensor`s in + * places where the values can be `tf.Tensor` or `number`. + */ + function disposeTensorsInLogs(logs) { + if (logs == null) { + return; + } + for (const key in logs) { + const value = logs[key]; + if (typeof value !== 'number') { + value.dispose(); + } + } + } + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + /** Verbosity logging level when fitting a model. */ + var ModelLoggingVerbosity; + (function (ModelLoggingVerbosity) { + ModelLoggingVerbosity[ModelLoggingVerbosity["SILENT"] = 0] = "SILENT"; + ModelLoggingVerbosity[ModelLoggingVerbosity["VERBOSE"] = 1] = "VERBOSE"; + })(ModelLoggingVerbosity || (ModelLoggingVerbosity = {})); + /** How often to yield to the main thread when training (in ms). */ + const DEFAULT_YIELD_EVERY_MS = 125; + /** + * Abstract base class used to build new callbacks. + * + * The `logs` dictionary that callback methods take as argument will contain + * keys for quantities relevant to the current batch or epoch. + * + * Currently, the `.fit()` method of the `Sequential` model class + * will include the following quantities in the `logs` that + * it passes to its callbacks: + * + * onEpochEnd: Logs include `acc` and `loss`, and optionally include `valLoss` + * (if validation is enabled in `fit`), and `valAcc` (if validation and + * accuracy monitoring are enabled). + * onBatchBegin: Logs include `size`, the number of samples in the current + * batch. + * onBatchEnd: Logs include `loss`, and optionally `acc` (if accuracy monitoring + * is enabled). + */ + class BaseCallback { + constructor() { + // TODO(michaelterry): This type is a best guess. + this.validationData = null; + } + setParams(params) { + this.params = params; + } + async onEpochBegin(epoch, logs) { } + async onEpochEnd(epoch, logs) { } + async onBatchBegin(batch, logs) { } + async onBatchEnd(batch, logs) { } + async onTrainBegin(logs) { } + async onTrainEnd(logs) { } + // LayersModel needs to call Callback.setModel(), but cannot actually depend + // on Callback because that creates a cyclic dependency. Providing this no-op + // method on BaseCallback breaks the cycle: this way LayersModel can depend on + // BaseCallback but not on Callback. The argument is typed as `Container` + // (the superclass of LayersModel) to avoid recapitulating the cycle. Callback + // overrides this method and enforces that the argument is really a + // LayersModel. + setModel(model) { + // Do nothing. Use Callback instead of BaseCallback to track the model. + } + } + /** + * Container abstracting a list of callbacks. + */ + class CallbackList { + // TODO(cais): When the need arises, uncomment the following lines and + // implement the queue for time values. + // private deltaTBatch: number; + // private deltaTsBatchBegin: Array; + // private deltaTsBatchEnd: Array; + /** + * Constructor of CallbackList. + * @param callbacks Array of `Callback` instances. + * @param queueLength Queue length for keeping running statistics over + * callback execution time. + */ + constructor(callbacks, queueLength = 10) { + // TODO(cais): Make use of queueLength when implementing the queue for time + // values. + if (callbacks == null) { + callbacks = []; + } + this.callbacks = callbacks; + this.queueLength = queueLength; + } + append(callback) { + this.callbacks.push(callback); + } + setParams(params) { + for (const callback of this.callbacks) { + callback.setParams(params); + } + } + setModel(model) { + for (const callback of this.callbacks) { + callback.setModel(model); + } + } + /** + * Called at the start of an epoch. + * @param epoch Index of epoch. + * @param logs Dictionary of logs. + */ + async onEpochBegin(epoch, logs) { + if (logs == null) { + logs = {}; + } + for (const callback of this.callbacks) { + await callback.onEpochBegin(epoch, logs); + } + } + /** + * Called at the end of an epoch. + * @param epoch Index of epoch. + * @param logs Dictionary of logs. + */ + async onEpochEnd(epoch, logs) { + if (logs == null) { + logs = {}; + } + for (const callback of this.callbacks) { + await callback.onEpochEnd(epoch, logs); + } + } + /** + * Called right before processing a batch. + * @param batch Index of batch within the current epoch. + * @param logs Dictionary of logs. + */ + async onBatchBegin(batch, logs) { + if (logs == null) { + logs = {}; + } + for (const callback of this.callbacks) { + await callback.onBatchBegin(batch, logs); + } + } + /** + * Called at the end of a batch. + * @param batch Index of batch within the current epoch. + * @param logs Dictionary of logs. + */ + async onBatchEnd(batch, logs) { + if (logs == null) { + logs = {}; + } + for (const callback of this.callbacks) { + await callback.onBatchEnd(batch, logs); + } + } + /** + * Called at the beginning of training. + * @param logs Dictionary of logs. + */ + async onTrainBegin(logs) { + if (logs == null) { + logs = {}; + } + for (const callback of this.callbacks) { + await callback.onTrainBegin(logs); + } + } + /** + * Called at the end of training. + * @param logs Dictionary of logs. + */ + async onTrainEnd(logs) { + if (logs == null) { + logs = {}; + } + for (const callback of this.callbacks) { + await callback.onTrainEnd(logs); + } + } + } + /** + * Callback that accumulates epoch averages of metrics. + * + * This callback is automatically applied to every LayersModel. + */ + class BaseLogger extends BaseCallback { + constructor() { + super(); + } + async onEpochBegin(epoch) { + this.seen = 0; + this.totals = {}; + } + async onBatchEnd(batch, logs) { + if (logs == null) { + logs = {}; + } + const batchSize = logs['size'] == null ? 0 : logs['size']; + this.seen += batchSize; + for (const key in logs) { + const value = logs[key]; + if (typeof value === 'number') { + if (!this.totals.hasOwnProperty(key)) { + this.totals[key] = 0; + } + this.totals[key] = this.totals[key] + value * batchSize; + } + else { + let oldTotalsToDispose; + if (key in this.totals) { + oldTotalsToDispose = this.totals[key]; + } + else { + this.totals[key] = 0; + } + const total = tidy(() => add$3((this.totals[key]), mul(value, batchSize))); + this.totals[key] = total; + if (oldTotalsToDispose != null) { + oldTotalsToDispose.dispose(); + } + } + } + } + async onEpochEnd(epoch, logs) { + if (logs != null) { + for (const key of this.params['metrics']) { + if (this.totals[key] == null) { + continue; + } + if (typeof this.totals[key] === 'number') { + logs[key] = this.totals[key] / this.seen; + } + else { + tidy(() => { + const log = mul(div$1(1, this.seen), this.totals[key]); + logs[key] = log; + this.totals[key].dispose(); + keep(logs[key]); + }); + } + } + } + } + } + /** + * Callback that records events into a `History` object. This callback is + * automatically applied to every TF.js Layers model. The `History` object + * gets returned by the `fit` method of models. + */ + class History extends BaseCallback { + async onTrainBegin(logs) { + this.epoch = []; + this.history = {}; + } + async onEpochEnd(epoch, logs) { + if (logs == null) { + logs = {}; + } + this.epoch.push(epoch); + for (const key in logs) { + if (this.history[key] == null) { + this.history[key] = []; + } + this.history[key].push(logs[key]); + } + } + /** + * Await the values of all losses and metrics. + */ + async syncData() { + const promises = []; + const keys = []; + const indices = []; + for (const key in this.history) { + const valueArray = this.history[key]; + for (let i = 0; i < valueArray.length; ++i) { + if (typeof valueArray[i] !== 'number') { + const valueScalar = valueArray[i]; + promises.push(valueScalar.data()); + keys.push(key); + indices.push(i); + } + } + } + const values = await Promise.all(promises); + for (let n = 0; n < values.length; ++n) { + const tensorToDispose = this.history[keys[n]][indices[n]]; + tensorToDispose.dispose(); + this.history[keys[n]][indices[n]] = values[n][0]; + } + } + } + /** + * Custom callback for training. + */ + class CustomCallback extends BaseCallback { + constructor(args, yieldEvery) { + super(); + this.currentEpoch = 0; + this.nowFunc = args.nowFunc; + this.nextFrameFunc = args.nextFrameFunc || nextFrame; + this.yieldEvery = yieldEvery || 'auto'; + if (this.yieldEvery === 'auto') { + this.yieldEvery = DEFAULT_YIELD_EVERY_MS; + } + if (this.yieldEvery === 'never' && args.onYield != null) { + throw new Error('yieldEvery is `never` but you provided an `onYield` callback. ' + + 'Either change `yieldEvery` or remove the callback'); + } + if (isNumber(this.yieldEvery)) { + // Decorate `maybeWait` so it will be called at most once every + // `yieldEvery` ms. + this.maybeWait = debounce(this.maybeWait.bind(this), this.yieldEvery, this.nowFunc); + } + this.trainBegin = args.onTrainBegin; + this.trainEnd = args.onTrainEnd; + this.epochBegin = args.onEpochBegin; + this.epochEnd = args.onEpochEnd; + this.batchBegin = args.onBatchBegin; + this.batchEnd = args.onBatchEnd; + this.yield = args.onYield; + } + async maybeWait(epoch, batch, logs) { + const ps = []; + if (this.yield != null) { + await resolveScalarsInLogs(logs); + ps.push(this.yield(epoch, batch, logs)); + } + ps.push(this.nextFrameFunc()); + await Promise.all(ps); + } + async onEpochBegin(epoch, logs) { + this.currentEpoch = epoch; + if (this.epochBegin != null) { + await resolveScalarsInLogs(logs); + await this.epochBegin(epoch, logs); + } + } + async onEpochEnd(epoch, logs) { + const ps = []; + if (this.epochEnd != null) { + await resolveScalarsInLogs(logs); + ps.push(this.epochEnd(epoch, logs)); + } + if (this.yieldEvery === 'epoch') { + ps.push(this.nextFrameFunc()); + } + await Promise.all(ps); + } + async onBatchBegin(batch, logs) { + if (this.batchBegin != null) { + await resolveScalarsInLogs(logs); + await this.batchBegin(batch, logs); + } + } + async onBatchEnd(batch, logs) { + const ps = []; + if (this.batchEnd != null) { + await resolveScalarsInLogs(logs); + ps.push(this.batchEnd(batch, logs)); + } + if (this.yieldEvery === 'batch') { + ps.push(this.nextFrameFunc()); + } + else if (isNumber(this.yieldEvery)) { + ps.push(this.maybeWait(this.currentEpoch, batch, logs)); + } + await Promise.all(ps); + } + async onTrainBegin(logs) { + if (this.trainBegin != null) { + await resolveScalarsInLogs(logs); + await this.trainBegin(logs); + } + } + async onTrainEnd(logs) { + if (this.trainEnd != null) { + await resolveScalarsInLogs(logs); + await this.trainEnd(logs); + } + } + } + /** + * Standardize callbacks or configurations of them to an Array of callbacks. + */ + function standardizeCallbacks(callbacks, yieldEvery) { + if (callbacks == null) { + callbacks = {}; + } + if (callbacks instanceof BaseCallback) { + return [callbacks]; + } + if (Array.isArray(callbacks) && callbacks[0] instanceof BaseCallback) { + return callbacks; + } + // Convert custom callback configs to custom callback objects. + const callbackConfigs = toList(callbacks); + return callbackConfigs.map(callbackConfig => new CustomCallback(callbackConfig, yieldEvery)); + } + /** + * A global registry for callback constructors to be used during + * LayersModel.fit(). + */ + class CallbackConstructorRegistry { + /** + * Blocks public access to constructor. + */ + constructor() { } + /** + * Register a tf.LayersModel.fit() callback constructor. + * + * The registered callback constructor will be used to instantiate + * callbacks for every tf.LayersModel.fit() call afterwards. + * + * @param verbosityLevel Level of verbosity at which the `callbackConstructor` + * is to be reigstered. + * @param callbackConstructor A no-arg constructor for `tf.Callback`. + * @throws Error, if the same callbackConstructor has been registered before, + * either at the same or a different `verbosityLevel`. + */ + static registerCallbackConstructor(verbosityLevel, callbackConstructor) { + assert$1(verbosityLevel >= 0 && Number.isInteger(verbosityLevel), () => `Verbosity level is expected to be an integer >= 0, ` + + `but got ${verbosityLevel}`); + CallbackConstructorRegistry.checkForDuplicate(callbackConstructor); + if (CallbackConstructorRegistry.constructors[verbosityLevel] == null) { + CallbackConstructorRegistry.constructors[verbosityLevel] = []; + } + CallbackConstructorRegistry.constructors[verbosityLevel].push(callbackConstructor); + } + static checkForDuplicate(callbackConstructor) { + for (const levelName in CallbackConstructorRegistry.constructors) { + const constructors = CallbackConstructorRegistry.constructors[+levelName]; + constructors.forEach(ctor => { + if (ctor === callbackConstructor) { + throw new ValueError('Duplicate callback constructor.'); + } + }); + } + } + /** + * Clear all registered callback constructors. + */ + static clear() { + CallbackConstructorRegistry.constructors = {}; + } + /** + * Create callbacks using the registered callback constructors. + * + * Given `verbosityLevel`, all constructors registered at that level or above + * will be called and the instantiated callbacks will be used. + * + * @param verbosityLevel: Level of verbosity. + */ + static createCallbacks(verbosityLevel) { + const constructors = []; + for (const levelName in CallbackConstructorRegistry.constructors) { + const level = +levelName; + if (verbosityLevel >= level) { + constructors.push(...CallbackConstructorRegistry.constructors[level]); + } + } + return constructors.map(ctor => new ctor()); + } + } + CallbackConstructorRegistry.constructors = {}; + function configureCallbacks(callbacks, verbose, epochs, initialEpoch, numTrainSamples, stepsPerEpoch, batchSize, doValidation, callbackMetrics) { + const history = new History(); + const actualCallbacks = [ + new BaseLogger(), ...CallbackConstructorRegistry.createCallbacks(verbose) + ]; + if (callbacks != null) { + actualCallbacks.push(...callbacks); + } + actualCallbacks.push(history); + const callbackList = new CallbackList(actualCallbacks); + // TODO(cais): Figure out when this LayersModel instance can have a + // dynamically + // set property called 'callback_model' as in PyKeras. + callbackList.setParams({ + epochs, + initialEpoch, + samples: numTrainSamples, + steps: stepsPerEpoch, + batchSize, + verbose, + doValidation, + metrics: callbackMetrics, + }); + return { callbackList, history }; + } + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + /** + * Instantiate a layer from a config dictionary. + * @param config dict of the form {class_name: str, config: dict} + * @param customObjects dict mapping class names (or function names) + * of custom (non-Keras) objects to class/functions + * @param fastWeightInit Optional flag to use fast weight initialization + * during deserialization. This is applicable to cases in which + * the initialization will be immediately overwritten by loaded weight + * values. Default: `false`. + * @returns Layer instance (may be LayersModel, Sequential, Layer...) + */ + function deserialize(config, customObjects = {}, fastWeightInit = false) { + return deserializeKerasObject(config, SerializationMap.getMap().classNameMap, customObjects, 'layer', fastWeightInit); + } + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + /** + * Normalizes a tensor wrt the L2 norm alongside the specified axis. + * @param x + * @param axis Axis along which to perform normalization. + */ + function l2Normalize(x, axis) { + return tidy(() => { + if (x.dtype !== 'float32') { + x = cast$3(x, 'float32'); + } + const squareSum = sum$3(square$1(x), axis, true); + const epsilonTensor = fill$2(squareSum.shape, epsilon$1()); + const norm = sqrt$2(maximum$4(squareSum, epsilonTensor)); + return div$1(x, norm); + }); + } + function meanSquaredError$1(yTrue, yPred) { + return tidy(() => mean$3(square$1(sub$2(yPred, yTrue)), -1)); + } + function meanAbsoluteError$1(yTrue, yPred) { + return tidy(() => mean$3(abs$2(sub$2(yPred, yTrue)), -1)); + } + function meanAbsolutePercentageError$1(yTrue, yPred) { + return tidy(() => { + const diff = sub$2(yTrue, yPred); + const clippedTrue = clipByValue$2(abs$2(yTrue), epsilon$1(), Number.MAX_VALUE); + const absResult = abs$2(div$1(diff, clippedTrue)); + return mul(100, mean$3(absResult, -1)); + }); + } + function meanSquaredLogarithmicError(yTrue, yPred) { + return tidy(() => { + const clippedPred = clipByValue$2(yPred, epsilon$1(), Number.MAX_VALUE); + const firstLog = log$2(add$3(1, clippedPred)); + const clippedTrue = clipByValue$2(yTrue, epsilon$1(), Number.MAX_VALUE); + const secondLog = log$2(add$3(1, clippedTrue)); + return mean$3(square$1(sub$2(firstLog, secondLog)), -1); + }); + } + function squaredHinge(yTrue, yPred) { + return tidy(() => { + const maxResult = maximum$4(0, sub$2(1, mul(yTrue, yPred))); + return mean$3(square$1(maxResult), -1); + }); + } + function hinge(yTrue, yPred) { + return tidy(() => { + const maxResult = maximum$4(0, sub$2(1, mul(yTrue, yPred))); + return mean$3(maxResult, -1); + }); + } + function categoricalHinge(yTrue, yPred) { + return tidy(() => { + const pos = sum$3(mul(yTrue, yPred), -1); + const neg = max$3(mul(sub$2(1, yTrue), yPred), -1); + return maximum$4(0, add$3(1, sub$2(neg, pos))); + }); + } + /** + * Logarithm of the hyperbolic cosine of the prediction error. + * + * `log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small `x` and + * to `abs(x) - log(2)` for large `x`. This means that 'logcosh' works mostly + * like the mean squared error, but will not be so strongly affected by the + * occasional wildly incorrect prediction. + */ + function logcosh(yTrue, yPred) { + return tidy(() => { + const log2 = Math.log(2); + const predictionDiff = sub$2(yPred, yTrue); + const logcoshResult = sub$2(add$3(predictionDiff, softplus$2(mul(-2, predictionDiff))), log2); + return mean$3(logcoshResult, -1); + }); + } + function categoricalCrossentropy$2(target, output, fromLogits = false) { + return tidy(() => { + if (fromLogits) { + output = softmax$3(output); + } + else { + // scale preds so that the class probabilities of each sample sum to 1. + const outputSum = sum$3(output, output.shape.length - 1, true); + output = div$1(output, outputSum); + } + output = clipByValue$2(output, epsilon$1(), 1 - epsilon$1()); + return neg$2(sum$3(mul(cast$3(target, 'float32'), log$2(output)), output.shape.length - 1)); + }); + } + /** + * Categorical crossentropy with integer targets. + * + * @param target An integer tensor. + * @param output A tensor resulting from a softmax (unless `fromLogits` is + * `true`, in which case `output` is expected to be the logits). + * @param fromLogits Boolean, whether `output` is the result of a softmax, or is + * a tensor of logits. + */ + function sparseCategoricalCrossentropy$1(target, output, fromLogits = false) { + return tidy(() => { + const flatTarget = cast$3(floor$2(flatten$1(target)), 'int32'); + output = clipByValue$2(output, epsilon$1(), 1 - epsilon$1()); + const outputShape = output.shape; + const oneHotTarget = reshape$3(oneHot$3(flatTarget, outputShape[outputShape.length - 1]), outputShape); + return categoricalCrossentropy$2(oneHotTarget, output, fromLogits); + }); + } + /** + * From TensorFlow's implementation in nn_impl.py: + * + * For brevity, let `x = logits`, `z = labels`. The logistic loss is + * z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x)) + * = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x))) + * = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x))) + * = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x)) + * = (1 - z) * x + log(1 + exp(-x)) + * = x - x * z + log(1 + exp(-x)) + * For x < 0, to avoid overflow in exp(-x), we reformulate the above + * x - x * z + log(1 + exp(-x)) + * = log(exp(x)) - x * z + log(1 + exp(-x)) + * = - x * z + log(1 + exp(x)) + * Hence, to ensure stability and avoid overflow, the implementation uses this + * equivalent formulation + * max(x, 0) - x * z + log(1 + exp(-abs(x))) + * + * @param labels The labels. + * @param logits The logits. + */ + function sigmoidCrossEntropyWithLogits(labels, logits) { + if (!arraysEqual(labels.shape, logits.shape)) { + throw new ValueError(`logits and labels must have the same shape, but got shapes ` + + `${JSON.stringify(labels.shape)} and ${JSON.stringify(logits.shape)}`); + } + return tidy(() => { + // The logistic loss formula from above is + // x - x * z + log(1 + exp(-x)) + // For x < 0, a more numerically stable formula is + // -x * z + log(1 + exp(x)) + // Note that these two expressions can be combined into the following: + // max(x, 0) - x * z + log(1 + exp(-abs(x))) + const reluLogits = relu$2(logits); + const negAbsLogits = neg$2(abs$2(logits)); + return add$3(sub$2(reluLogits, mul(logits, labels)), log1p$2(exp$2(negAbsLogits))); + }); + } + function binaryCrossentropy$2(yTrue, yPred) { + return tidy(() => { + let y; + y = clipByValue$2(yPred, epsilon$1(), 1 - epsilon$1()); + y = log$2(div$1(y, sub$2(1, y))); + return mean$3(sigmoidCrossEntropyWithLogits(yTrue, y), -1); + }); + } + function kullbackLeiblerDivergence(yTrue, yPred) { + return tidy(() => { + const clippedTrue = clipByValue$2(yTrue, epsilon$1(), 1); + const clippedPred = clipByValue$2(yPred, epsilon$1(), 1); + return sum$3(mul(yTrue, log$2(div$1(clippedTrue, clippedPred))), -1); + }); + } + function poisson(yTrue, yPred) { + return tidy(() => { + const logPred = log$2(add$3(epsilon$1(), yPred)); + return mean$3(sub$2(yPred, mul(yTrue, logPred)), -1); + }); + } + function cosineProximity$1(yTrue, yPred) { + return tidy(() => { + const trueNormalized = l2Normalize(yTrue, -1); + const predNormalized = l2Normalize(yPred, -1); + const trueXPred = mul(trueNormalized, predNormalized); + return neg$2(sum$3(trueXPred, -1)); + }); + } + const mse$2 = meanSquaredError$1; + const MSE$2 = meanSquaredError$1; + const mae$1 = meanAbsoluteError$1; + const MAE$1 = meanAbsoluteError$1; + const mape$2 = meanAbsolutePercentageError$1; + const MAPE$2 = meanAbsolutePercentageError$1; + const msle = meanSquaredLogarithmicError; + const MSLE = meanSquaredLogarithmicError; + const kld = kullbackLeiblerDivergence; + const KLD = kullbackLeiblerDivergence; + const cosine$1 = cosineProximity$1; + // TODO(michaelterry): Add deserialize() function. + const lossesMap = { + meanSquaredError: meanSquaredError$1, + meanAbsoluteError: meanAbsoluteError$1, + meanAbsolutePercentageError: meanAbsolutePercentageError$1, + meanSquaredLogarithmicError, + squaredHinge, + hinge, + categoricalHinge, + logcosh, + categoricalCrossentropy: categoricalCrossentropy$2, + sparseCategoricalCrossentropy: sparseCategoricalCrossentropy$1, + binaryCrossentropy: binaryCrossentropy$2, + kullbackLeiblerDivergence, + poisson, + cosineProximity: cosineProximity$1 + }; + // Porting note: This diverges from the PyKeras implementation and may need to + // change based on (de)serialization requirements. + function get$1(identifierOrFn) { + if (typeof identifierOrFn === 'string') { + if (identifierOrFn in lossesMap) { + return lossesMap[identifierOrFn]; + } + let errMsg = `Unknown loss ${identifierOrFn}`; + if (identifierOrFn.toLowerCase().includes('softmaxcrossentropy')) { + errMsg = `Unknown loss ${identifierOrFn}. ` + + 'Use "categoricalCrossentropy" as the string name for ' + + 'tf.losses.softmaxCrossEntropy'; + } + throw new ValueError(errMsg); + } + else { + return identifierOrFn; + } + } + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + function binaryAccuracy$1(yTrue, yPred) { + return tidy(() => { + const threshold = mul(.5, onesLike$3(yPred)); + const yPredThresholded = cast$2(greater$3(yPred, threshold), yTrue.dtype); + return mean$3(equal$2(yTrue, yPredThresholded), -1); + }); + } + function categoricalAccuracy$1(yTrue, yPred) { + return tidy(() => cast$2(equal$2(argMax$2(yTrue, -1), argMax$2(yPred, -1)), 'float32')); + } + function truePositives(yTrue, yPred) { + return tidy(() => { + return cast$3(sum$3(logicalAnd$2(equal$2(yTrue, 1), equal$2(yPred, 1))), 'float32'); + }); + } + function falseNegatives(yTrue, yPred) { + return tidy(() => { + return cast$3(sum$3(logicalAnd$2(equal$2(yTrue, 1), equal$2(yPred, 0))), 'float32'); + }); + } + function falsePositives(yTrue, yPred) { + return tidy(() => { + return cast$3(sum$3(logicalAnd$2(equal$2(yTrue, 0), equal$2(yPred, 1))), 'float32'); + }); + } + function precision$1(yTrue, yPred) { + return tidy(() => { + const tp = truePositives(yTrue, yPred); + const fp = falsePositives(yTrue, yPred); + const denominator = add$3(tp, fp); + return cast$3(where(greater$3(denominator, 0), div$1(tp, denominator), 0), 'float32'); + }); + } + function recall$1(yTrue, yPred) { + return tidy(() => { + const tp = truePositives(yTrue, yPred); + const fn = falseNegatives(yTrue, yPred); + const denominator = add$3(tp, fn); + return cast$3(where(greater$3(denominator, 0), div$1(tp, denominator), 0), 'float32'); + }); + } + function binaryCrossentropy$1(yTrue, yPred) { + return binaryCrossentropy$2(yTrue, yPred); + } + function sparseCategoricalAccuracy$1(yTrue, yPred) { + if (yTrue.rank === yPred.rank) { + yTrue = squeeze(yTrue, [yTrue.rank - 1]); + } + yPred = argMax$2(yPred, -1); + if (yPred.dtype !== yTrue.dtype) { + yPred = cast$3(yPred, yTrue.dtype); + } + return cast$3(equal$2(yTrue, yPred), 'float32'); + } + function topKCategoricalAccuracy(yTrue, yPred) { + throw new NotImplementedError(); + } + function sparseTopKCategoricalAccuracy(yTrue, yPred) { + throw new NotImplementedError(); + } + function r2Score$1(yTrue, yPred) { + return tidy(() => { + const sumSquaresResiduals = yTrue.sub(yPred).square().sum(); + const sumSquares = yTrue.sub(yTrue.mean()).square().sum(); + return scalar(1).sub(sumSquaresResiduals.div(sumSquares)); + }); + } + // Aliases. + const mse$1 = meanSquaredError$1; + const MSE$1 = meanSquaredError$1; + const mae = meanAbsoluteError$1; + const MAE = meanAbsoluteError$1; + const mape$1 = meanAbsolutePercentageError$1; + const MAPE$1 = meanAbsolutePercentageError$1; + const categoricalCrossentropy$1 = categoricalCrossentropy$2; + const cosine = cosineProximity$1; + const sparseCategoricalCrossentropy = sparseCategoricalCrossentropy$1; + // TODO(cais, nielsene): Add serialize(). + const metricsMap = { + binaryAccuracy: binaryAccuracy$1, + categoricalAccuracy: categoricalAccuracy$1, + precision: precision$1, + categoricalCrossentropy: categoricalCrossentropy$1, + sparseCategoricalCrossentropy, + mse: mse$1, + MSE: MSE$1, + mae, + MAE, + mape: mape$1, + MAPE: MAPE$1, + cosine + }; + function get(identifier) { + if (typeof identifier === 'string' && identifier in metricsMap) { + return metricsMap[identifier]; + } + else if (typeof identifier !== 'string' && identifier != null) { + return identifier; + } + else { + throw new ValueError(`Unknown metric ${identifier}`); + } + } + /** + * Get the shortcut function name. + * + * If the fn name is a string, + * directly return the string name. + * If the function is included in metricsMap or lossesMap, + * return key of the map. + * - If the function relative to multiple keys, + * return the first found key as the function name. + * - If the function exists in both lossesMap and metricsMap, + * search lossesMap first. + * If the function is not included in metricsMap or lossesMap, + * return the function name. + * + * @param fn loss function, metric function, or short cut name. + * @returns Loss or Metric name in string. + */ + function getLossOrMetricName(fn) { + assert(fn !== null, `Unknown LossOrMetricFn ${fn}`); + if (typeof fn === 'string') { + return fn; + } + else { + let fnName; + for (const key of Object.keys(lossesMap)) { + if (lossesMap[key] === fn) { + fnName = key; + break; + } + } + if (fnName !== undefined) { + return fnName; + } + for (const key of Object.keys(metricsMap)) { + if (metricsMap[key] === fn) { + fnName = key; + break; + } + } + if (fnName !== undefined) { + return fnName; + } + return fn.name; + } + } + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + // Add (de)serialize() + // Porting note: This diverges from the PyKeras implementation and may need to + // change based on (de)serialization requirements. + function getOptimizer(identifier) { + const optimizerMap = { + 'Adagrad': () => train.adagrad(0.01), + 'Adadelta': () => train.adadelta(1, 0.95, epsilon$1()), + 'Adam': () => train.adam(0.001, 0.9, 0.999, epsilon$1()), + 'Adamax': () => train.adamax(0.002, 0.9, 0.999, epsilon$1(), 0), + 'RMSProp': () => train.rmsprop(0.001, 0.9, 0, epsilon$1()), + 'SGD': () => train.sgd(0.01) + }; + optimizerMap['adagrad'] = optimizerMap['Adagrad']; + optimizerMap['adadelta'] = optimizerMap['Adadelta']; + optimizerMap['adam'] = optimizerMap['Adam']; + optimizerMap['adamax'] = optimizerMap['Adamax']; + optimizerMap['rmsprop'] = optimizerMap['RMSProp']; + optimizerMap['sgd'] = optimizerMap['SGD']; + if (identifier in optimizerMap) { + return optimizerMap[identifier](); + } + throw new ValueError(`Unknown Optimizer ${identifier}`); + } + + /** + * @license + * Copyright 2019 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + /** Utility functions related to user-defined metadata. */ + // Maximum recommended serialized size for user-defined metadata. + // Beyond this limit, a warning message will be printed during model loading and + // saving. + const MAX_USER_DEFINED_METADATA_SERIALIZED_LENGTH = 1 * 1024 * 1024; + /** + * Check validity of user-defined metadata. + * + * @param userDefinedMetadata + * @param modelName Name of the model that the user-defined metadata belongs to. + * Used during construction of error messages. + * @param checkSize Whether to check the size of the metadata is under + * recommended limit. Default: `false`. If `true`, will try stringify the + * JSON object and print a console warning if the serialzied size is above the + * limit. + * @throws Error if `userDefinedMetadata` is not a plain JSON object. + */ + function checkUserDefinedMetadata(userDefinedMetadata, modelName, checkSize = false) { + if (userDefinedMetadata == null || + typeof userDefinedMetadata !== 'object' || + Object.getPrototypeOf(userDefinedMetadata) !== Object.prototype || + !plainObjectCheck(userDefinedMetadata)) { + throw new Error('User-defined metadata is expected to be a JSON object, but is not.'); + } + if (checkSize) { + const out = JSON.stringify(userDefinedMetadata); + if (out.length > MAX_USER_DEFINED_METADATA_SERIALIZED_LENGTH) { + console.warn(`User-defined metadata of model "${modelName}" is too large in ` + + `size (length=${out.length} when serialized). It is not ` + + `recommended to store such large objects in user-defined metadata. ` + + `Please make sure its serialized length is <= ` + + `${MAX_USER_DEFINED_METADATA_SERIALIZED_LENGTH}.`); + } + } + } + /** + * Check if an input is plain JSON object or any valid subfield of it. + * + * @param x The input to be checked. + * @param assertObject Whether to assert `x` is a JSON object, i.e., reject + * cases of arrays and primitives. + * @return Returns `true` if and only if `x` is a plain JSON object, + * a JSON-valid primitive including string, number, boolean and null, + * or an array of the said types. + */ + // tslint:disable-next-line:no-any + function plainObjectCheck(x) { + if (x === null) { + // Note: typeof `null` is 'object', and `null` is valid in JSON. + return true; + } + else if (typeof x === 'object') { + if (Object.getPrototypeOf(x) === Object.prototype) { + // `x` is a JavaScript object and its prototype is Object. + const keys = Object.keys(x); + for (const key of keys) { + if (typeof key !== 'string') { + // JSON keys must be strings. + return false; + } + if (!plainObjectCheck(x[key])) { // Recursive call. + return false; + } + } + return true; + } + else { + // `x` is a JavaScript object but its prototype is not Object. + if (Array.isArray(x)) { + // `x` is a JavaScript array. + for (const item of x) { + if (!plainObjectCheck(item)) { // Recursive call. + return false; + } + } + return true; + } + else { + // `x` is a JavaScript object and its prototype is not Object, + // and it's not an Array. I.e., it's a complex object such as + // `Error` and `Date`. + return false; + } + } + } + else { + // `x` is not a JavaScript object or `null`. + const xType = typeof x; + return xType === 'string' || xType === 'number' || xType === 'boolean'; + } + } + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + /** + * Print the summary of a LayersModel object. + * + * @param model tf.LayersModel instance. + * @param lineLength Total length of printed lines. Set this to adapt to the + * display to different terminal or console sizes. + * @param positions Relative or absolute positions of log elements in each + * line. Each number corresponds to right-most (i.e., ending) position of a + * column. + * If not provided, defaults to `[0.45, 0.85, 1]` for sequential-like + * models and `[0.33, 0.55, 0.67, 1]` for non-sequential like models. + * @param printFn Print function to use. + * It will be called on each line of the summary. You can provide a custom + * function in order to capture the string summary. Defaults to `console.log`. + */ + function printSummary(model, lineLength, positions, + // tslint:disable-next-line:no-any + printFn = console.log) { + const sequentialLike = isModelSequentialLike(model); + // Header names for different log elements. + const toDisplay = ['Layer (type)', 'Input Shape', 'Output shape', 'Param #']; + if (sequentialLike) { + lineLength = lineLength || 90; + positions = positions || [0.32, 0.61, 0.89, 1]; + } + else { + lineLength = lineLength || 115; + positions = positions || [0.24, 0.48, 0.70, 0.80, 1]; + // Header names for different log elements. + } + if (positions[positions.length - 1] <= 1) { + // `positions` is relative. Convert it to absolute positioning. + positions = positions.map(p => Math.floor(lineLength * p)); + } + let relevantNodes; + if (!sequentialLike) { + toDisplay.push('Receives inputs'); + relevantNodes = []; + for (const depth in model.nodesByDepth) { + relevantNodes.push(...model.nodesByDepth[depth]); + } + } + printFn('_'.repeat(lineLength)); + printRow(toDisplay, positions, printFn); + printFn('='.repeat(lineLength)); + const layers = model.layers; + for (let i = 0; i < layers.length; ++i) { + if (sequentialLike) { + printLayerSummary(layers[i], positions, printFn); + } + else { + printLayerSummaryWithConnections(layers[i], positions, relevantNodes, printFn); + } + printFn((i === layers.length - 1 ? '=' : '_').repeat(lineLength)); + } + // tslint:disable-next-line:no-any + model.checkTrainableWeightsConsistency(); + const trainableCount = countTrainableParams(model); + const nonTrainableCount = countParamsInWeights(model.nonTrainableWeights); + printFn(`Total params: ${trainableCount + nonTrainableCount}`); + printFn(`Trainable params: ${trainableCount}`); + printFn(`Non-trainable params: ${nonTrainableCount}`); + printFn('_'.repeat(lineLength)); + } + function countTrainableParams(model) { + let trainableCount; + // tslint:disable:no-any + if (model.collectedTrainableWeights != null) { + trainableCount = + countParamsInWeights(model.collectedTrainableWeights); + } + else { + trainableCount = countParamsInWeights(model.trainableWeights); + } + // tslint:enable:no-any + return trainableCount; + } + function isModelSequentialLike(model) { + let sequentialLike = true; + const nodesByDepth = []; + const nodes = []; + for (const depth in model.nodesByDepth) { + nodesByDepth.push(model.nodesByDepth[depth]); + } + for (const depthNodes of nodesByDepth) { + if (depthNodes.length > 1 || + depthNodes.length === 1 && depthNodes[0].inboundLayers.length > 1) { + sequentialLike = false; + break; + } + nodes.push(...depthNodes); + } + if (sequentialLike) { + // Search for shared layers. + for (const layer of model.layers) { + let flag = false; + for (const node of layer.inboundNodes) { + if (nodes.indexOf(node) !== -1) { + if (flag) { + sequentialLike = false; + break; + } + else { + flag = true; + } + } + } + if (!sequentialLike) { + break; + } + } + } + return sequentialLike; + } + function printRow(fields, positions, + // tslint:disable-next-line:no-any + printFn = console.log) { + let line = ''; + for (let i = 0; i < fields.length; ++i) { + if (i > 0) { + line = line.slice(0, line.length - 1) + ' '; + } + line += fields[i]; + line = line.slice(0, positions[i]); + line += ' '.repeat(positions[i] - line.length); + } + printFn(line); + } + /** + * Prints a summary for a single Layer, without connectivity information. + * + * @param layer: Layer instance to print. + */ + function printLayerSummary(layer, positions, + // tslint:disable-next-line:no-any + printFn) { + let outputShape; + let inputShape; + try { + inputShape = (layer.inboundNodes.map(x => JSON.stringify(x.inputShapes))).join(','); + } + catch (err) { + inputShape = 'multiple'; + } + try { + outputShape = JSON.stringify(layer.outputShape); + } + catch (err) { + outputShape = 'multiple'; + } + const name = layer.name; + const className = layer.getClassName(); + const fields = [`${name} (${className})`, inputShape, + outputShape, layer.countParams().toString()]; + printRow(fields, positions, printFn); + } + /** + * Prints a summary for a single Layer, with connectivity information. + */ + function printLayerSummaryWithConnections(layer, positions, relevantNodes, + // tslint:disable-next-line:no-any + printFn) { + let outputShape; + let inputShape; + try { + inputShape = (layer.inboundNodes.map(x => JSON.stringify(x.inputShapes))).join(','); + } + catch (err) { + inputShape = 'multiple'; + } + try { + outputShape = JSON.stringify(layer.outputShape); + } + catch (err) { + outputShape = 'multiple'; + } + const connections = []; + for (const node of layer.inboundNodes) { + if (relevantNodes != null && relevantNodes.length > 0 && + relevantNodes.indexOf(node) === -1) { + continue; + } + for (let i = 0; i < node.inboundLayers.length; ++i) { + const inboundLayer = node.inboundLayers[i].name; + const inboundLayerIndex = node.nodeIndices[i]; + const inboundTensorIndex = node.tensorIndices[i]; + connections.push(`${inboundLayer}[${inboundLayerIndex}][${inboundTensorIndex}]`); + } + } + const name = layer.name; + const className = layer.getClassName(); + const firstConnection = connections.length === 0 ? '' : connections[0]; + const fields = [ + `${name} (${className})`, inputShape, + outputShape, layer.countParams().toString(), + firstConnection + ]; + printRow(fields, positions, printFn); + for (let i = 1; i < connections.length; ++i) { + printRow(['', '', '', '', connections[i]], positions, printFn); + } + } + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + // tslint:enable + /** + * Test whether a value in an array is the name of a LayersModel or Layer. + * @param key The key name that the value is found under. Note that the key + * may not be at the level immediately above the value, if the value is in a + * nested array. + * @param index Index of the value in the Array that it is found in. + * @param value The value object. + * @returns A boolean indicating whether value is a name. + */ + function isArrayItemInputOrOutputName(key, index, value) { + return (key === 'inboundNodes' || key === 'outputLayers' || + key === 'inputLayers') && + index === 0 && typeof value === 'string'; + } + /** + * Convert a Pythonic config object to TypeScript config object. + * @param pythonicConfig The config object to convert. + * @param key Optional key name of the object being converted. + * @returns Result of the conversion. + */ + function convertPythonicToTs(pythonicConfig, key) { + if (pythonicConfig === null) { + return null; + } + else if (typeof pythonicConfig === 'string') { + return toCamelCase(pythonicConfig); + } + else if ((typeof pythonicConfig === 'number') || + (typeof pythonicConfig === 'boolean')) { + return pythonicConfig; + } + else if (pythonicConfig instanceof Array) { + const tsArray = []; + const arrayLength = pythonicConfig.length; + for (let i = 0; i < arrayLength; ++i) { + const item = pythonicConfig[i]; + if (isArrayItemInputOrOutputName(key, i, item)) { + tsArray.push(item); + } + else { + tsArray.push(convertPythonicToTs(item, key)); + } + } + return tsArray; + } + else { + const tsDict = {}; + for (const pythonicKey of Object.keys(pythonicConfig)) { + const pythonicValue = pythonicConfig[pythonicKey]; + if (pythonicKey === 'name' && typeof pythonicValue === 'string') { + // Special case the 'name' key with a string value. Name values, such as + // the names of LayersModel and Layer instances, should not undergo the + // camel-case conversion. + tsDict[pythonicKey] = pythonicValue; + } + else { + const tsKey = toCamelCase(pythonicKey); + tsDict[tsKey] = convertPythonicToTs(pythonicValue, tsKey); + } + } + return tsDict; + } + } + /** + * Convert a TypeScript config object to Python config object. + * @param tsConfig The config object to convert. + * @param key Optional key name of the object being converted. + * @returns Result of the conversion. + */ + function convertTsToPythonic(tsConfig, key) { + if (tsConfig === null || tsConfig === undefined) { + return null; + } + else if (typeof tsConfig === 'string') { + return toSnakeCase(tsConfig); + } + else if ((typeof tsConfig === 'number') || (typeof tsConfig === 'boolean')) { + return tsConfig; + } + else if (tsConfig instanceof Array) { + const pyArray = []; + const arrayLength = tsConfig.length; + for (let i = 0; i < arrayLength; ++i) { + const item = tsConfig[i]; + if (isArrayItemInputOrOutputName(key, i, item)) { + pyArray.push(item); + } + else { + pyArray.push(convertTsToPythonic(item, key)); + } + } + return pyArray; + } + else { + const pyDict = {}; + for (const tsKey of Object.keys(tsConfig)) { + const tsValue = tsConfig[tsKey]; + const pyKey = toSnakeCase(tsKey); + if ((tsKey === 'name' || tsKey === 'className') && + typeof tsValue === 'string') { + // Special case the 'name' key with a string value. Name values, such as + // the names of LayersModel and Layer instances, should not undergo the + // snake-case conversion. + pyDict[pyKey] = tsValue; + } + else { + pyDict[pyKey] = convertTsToPythonic(tsValue, tsKey); + } + } + return pyDict; + } + } + + /** @license See the LICENSE file. */ + // This code is auto-generated, do not modify this file! + const version$6 = '4.22.0'; + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + // get weights key from tensor map in order to check if it is from keras v3. + // e.g. dense/0 + const isKerasSavedModelFormat = (weights) => { + const keys = Object.keys(weights); + if (keys.length === 0) { + return false; + } + const key = keys[0].split('/'); + return !isNaN(parseInt(key[key.length - 1], 10)); + }; + /** + * A Container is a directed acyclic graph of layers. + * + * It is the topological form of a "model". A LayersModel + * is simply a Container with added training routines. + * + */ + class Container extends Layer { + constructor(args) { + // No args passed to super's constructor. + super({}); + this.containerNodes = new Set(); + this.name = args.name; + if (this.name == null) { + const prefix = this.getClassName().toLowerCase(); + this.name = getUid(prefix); + } + this.supportsMasking = false; + this.trainable_ = true; + // TODO(michaelterry): Initialize perInputLosses/Updates here. + // Container-specific properties. + if (Array.isArray(args.inputs)) { + this.inputs = args.inputs.slice(); + } + else { + this.inputs = [args.inputs]; + } + if (Array.isArray(args.outputs)) { + this.outputs = args.outputs.slice(); + } + else { + this.outputs = [args.outputs]; + } + // Check for redundancy in inputs. + if (unique$2(this.inputs).length !== this.inputs.length) { + throw new ValueError('The list of inputs passed to the model is ' + + 'redundant. All inputs should only appear once. Found: ' + + `${this.inputs.map(x => x.name)}`); + } + // Check for redundancy in outputs. + if (unique$2(this.outputs).length !== this.outputs.length) { + console.warn('The list of outputs passed to the model is redundant. ' + + 'All outputs should only appear once. Found: ' + + `${this.outputs.map(x => x.name)}`); + } + /* + List of initial layers (1 to 1 mapping with this.inputs, hence the same + layer might appear twice) + */ + this.inputLayers = []; + this.inputLayersNodeIndices = []; + this.inputLayersTensorIndices = []; + /* + List of layers (1 to 1 mapping with this.outputs, hence the same layer + might appear twice) + */ + this.outputLayers = []; + this.outputLayersNodeIndices = []; + this.outputLayersTensorIndices = []; + /* + All layers in order of horizontal graph traversal. Entries are unique. + Includes input and output layers. + */ + this.layers = []; + /* + References to container layers that were constructed internally. We need + these to properly dispose of tensors from nested containers. + */ + this.internalContainerRefs = []; + // TODO(michaelterry): Determine if caching still needed with eager + // backend. + /* + This is for performance optimization when calling the Container on new + inputs. Every time the Container is called on a set on input tensors, + we compute the output tensors, output masks and output shapes in one pass, + then cache them here. When one of these outputs is queried later, + we retrieve it from there instead of recomputing it. + */ + // this.outputTensorCache = {}; + // this.outputShapeCache = {}; + // Build this.outputLayers: + for (const x of this.outputs) { + const layer = x.sourceLayer; + const nodeIndex = x.nodeIndex; + const tensorIndex = x.tensorIndex; + this.outputLayers.push(layer); + this.outputLayersNodeIndices.push(nodeIndex); + this.outputLayersTensorIndices.push(tensorIndex); + } + // TODO(michaelterry): Add output mask cache code. + // Build this.inputLayers: + for (const x of this.inputs) { + const layer = x.sourceLayer; + const nodeIndex = x.nodeIndex; + const tensorIndex = x.tensorIndex; + /* + It's supposed to be an input layer, so only one node + and one tensor output. + */ + assert(nodeIndex === 0, 'input layer has >1 nodes'); + assert(tensorIndex === 0, 'input layer has >1 tensors'); + this.inputLayers.push(layer); + this.inputLayersNodeIndices.push(nodeIndex); + this.inputLayersTensorIndices.push(tensorIndex); + } + // Build this.inputNames and this.outputNames. + this.inputNames = []; + this.outputNames = []; + this.feedInputShapes = []; + this.feedInputNames = []; + this.feedOutputNames = []; + for (let i = 0; i < this.inputLayers.length; i++) { + const layer = this.inputLayers[i]; + // Check that layer is an InputLayer. + if (!(layer instanceof InputLayer)) { + throw new TypeError('Input layers to a LayersModel must be InputLayer objects. ' + + `Received inputs: ${args.inputs}. ` + + `Input ${i} (0-based) originates ` + + `from layer type ${layer.getClassName()}.`); + } + this.inputNames.push(layer.name); + this.feedInputShapes.push(layer.batchInputShape); + this.feedInputNames.push(layer.name); + } + for (const layer of this.outputLayers) { + this.outputNames.push(layer.name); + } + this.internalInputShapes = this.inputs.map(x => x.shape); + this.internalOutputShapes = this.outputs.map(x => x.shape); + /* + Container_nodes: set of nodes included in the graph (not all nodes + included in the layers are relevant to the current graph). + */ + // ids of all nodes relevant to the Container: + const nodesDepths = {}; + // To recover nodes from their ID. + const nodeIDToNode = {}; + const layersDepths = {}; + // To layers from their ID. + const layerIDToLayer = {}; + const layerIndices = {}; + const nodesInDecreasingDepth = []; + /** + * Builds a map of the graph of layers. + * + * This recursively updates the map `layerIndices`, + * the list `nodesInDecreasingDepth` and the set `containerNodes`. + * + * @param tensor Some tensor in a graph. + * @param finishedNodes Set of nodes whose subgraphs have been traversed + * completely. Useful to prevent duplicated work. + * @param nodesInProgress Set of nodes that are currently active on the + * recursion stack. Useful to detect cycles. + * @param layer Layer from which `tensor` comes from. If not provided, + * will be obtained from tensor.sourceLayer. + * @param nodeIndex Node index from which `tensor` comes from. + * @param tensorIndex TensorIndex from which `tensor` comes from. + * + * @exception RuntimeError if a cycle is detected. + */ + const buildMapOfGraph = (tensor, finishedNodes, nodesInProgress, layer, nodeIndex, tensorIndex) => { + if (layer == null || nodeIndex == null || tensorIndex == null) { + layer = tensor.sourceLayer; + nodeIndex = tensor.nodeIndex; + tensorIndex = tensor.tensorIndex; + } + const node = layer.inboundNodes[nodeIndex]; + // Prevent cycles. + if (nodesInProgress.indexOf(node) !== -1) { + throw new RuntimeError(`The tensor ${tensor.name} at layer "${layer.name}" ` + + 'is part of a cycle.'); + } + // Don't repeat work for shared subgraphs + if (finishedNodes.indexOf(node) !== -1) { + return; + } + // Update containerNodes. + this.containerNodes.add(Container.nodeKey(layer, nodeIndex)); + // Store the traversal order for layer sorting. + if (!(layer.id in layerIndices)) { + layerIndices[layer.id] = Object.keys(layerIndices).length; + } + if (nodesInProgress.indexOf(node) === -1) { + nodesInProgress.push(node); + } + // Propagate to all previous tensors connected to this node. + const numInboundLayers = node.inboundLayers.length; + for (let i = 0; i < numInboundLayers; i++) { + const x = node.inputTensors[i]; + const layer = node.inboundLayers[i]; + const nodeIndex = node.nodeIndices[i]; + const tensorIndex = node.tensorIndices[i]; + buildMapOfGraph(x, finishedNodes, nodesInProgress, layer, nodeIndex, tensorIndex); + } + finishedNodes.push(node); + while (nodesInProgress.indexOf(node) >= 0) { + nodesInProgress.splice(nodesInProgress.indexOf(node), 1); + } + nodesInDecreasingDepth.push(node); + }; + const finishedNodes = []; + const nodesInProgress = []; + for (const x of this.outputs) { + buildMapOfGraph(x, finishedNodes, nodesInProgress); + } + const reversedNodesInDecreasingDepth = nodesInDecreasingDepth.slice().reverse(); + for (const node of reversedNodesInDecreasingDepth) { + nodeIDToNode[node.id] = node; + // If the depth is not set, the node has no outbound nodes (depth 0). + if (!(node.id in nodesDepths)) { + nodesDepths[node.id] = 0; + } + let depth = nodesDepths[node.id]; + // Update the depth of the corresponding layer + const previousDepth = (layersDepths[node.outboundLayer.id] == null ? + 0 : + layersDepths[node.outboundLayer.id]); + /* + If we've seen this layer before at a higher depth, we should use that + depth instead of the node depth. This is necessary for shared layers + that have inputs at different depth levels in the graph. + */ + depth = Math.max(depth, previousDepth); + layersDepths[node.outboundLayer.id] = depth; + layerIDToLayer[node.outboundLayer.id] = node.outboundLayer; + nodesDepths[node.id] = depth; + // Update the depth of inbound nodes. + for (let i = 0; i < node.inboundLayers.length; i++) { + const inboundLayer = node.inboundLayers[i]; + const nodeIndex = node.nodeIndices[i]; + const inboundNode = inboundLayer.inboundNodes[nodeIndex]; + const previousDepth = (nodesDepths[inboundNode.id] == null ? 0 : + nodesDepths[inboundNode.id]); + nodesDepths[inboundNode.id] = Math.max(depth + 1, previousDepth); + nodeIDToNode[inboundNode.id] = inboundNode; + } + } + // Build a dict {depth: list of nodes with this depth} + const nodesByDepth = {}; + for (const nodeID in nodesDepths) { + const depth = nodesDepths[nodeID]; + if (!(depth in nodesByDepth)) { + nodesByDepth[depth] = []; + } + nodesByDepth[depth].push(nodeIDToNode[nodeID]); + } + // Build a dict {depth: list of layers with this depth} + const layersByDepth = {}; + for (const layerID in layersDepths) { + const depth = layersDepths[layerID]; + if (!(depth in layersByDepth)) { + layersByDepth[depth] = []; + } + layersByDepth[depth].push(layerIDToLayer[layerID]); + } + // Get sorted list of layer depths. + let depthKeys = Object.keys(layersByDepth) + .map(x => parseInt(x, 10)) + .sort(reverseNumberCompare); + // Set this.layers and this.layersByDepth. + this.layers = []; + for (const depth of depthKeys) { + const layersForDepth = layersByDepth[depth]; + // Container.layers needs to have a deterministic order: + // here we order them by traversal order. + layersForDepth.sort((a, b) => { + const aIndex = layerIndices[a.id]; + const bIndex = layerIndices[b.id]; + if (aIndex < bIndex) { + return -1; + } + if (aIndex > bIndex) { + return 1; + } + return 0; + }); + for (const layer of layersForDepth) { + if (layer instanceof Container) { + this.internalContainerRefs.push(layer); + } + this.layers.push(layer); + } + } + this.layersByDepth = layersByDepth; + // Get sorted list of node depths; + depthKeys = Object.keys(nodesByDepth) + .map(x => parseInt(x, 10)) + .sort(reverseNumberCompare); + // Check that all tensors required are computable. + // computable_tensors: all tensors in the graph + // that can be computed from the inputs provided. + const computableTensors = this.inputs.slice(); + // To provide a better error msg. + const layersWithCompleteInput = []; + for (const depth of depthKeys) { + for (const node of nodesByDepth[depth]) { + const layer = node.outboundLayer; + if (layer != null) { + for (const x of node.inputTensors) { + if (computableTensors.indexOf(x) === -1) { + throw new RuntimeError(`Graph disconnected: cannot obtain value for tensor ${x}` + + ` at layer "${layer.name}". ` + + 'The following previous layers were accessed without ' + + `issue: ${layersWithCompleteInput}`); + } + } + for (const x of node.outputTensors) { + computableTensors.push(x); + } + layersWithCompleteInput.push(layer.name); + } + } + } + // Set this.containerNodes and this.nodesByDepth. + this.nodesByDepth = nodesByDepth; + // Ensure name unicity, which will be crucial for serialization + // (since serialized nodes refer to layers by their name). + const allNames = this.layers.map(x => x.name); + for (const name of allNames) { + const numOccurrences = allNames.filter(x => x === name).length; + if (numOccurrences !== 1) { + throw new RuntimeError(`The name "${name}" is used ${numOccurrences} times ` + + 'in the model. All layer names should be unique. Layer names: ' + + JSON.stringify(allNames)); + } + } + // Layer parameters. + // The new container starts with a single inbound node + // for its inputs, and no outbound nodes. + // Will be appended to by future calls to apply(). + this.outboundNodes = []; + // Will be appended to below, and by future calls to apply(). + this.inboundNodes = []; + // Create the node linking internal inputs to internal outputs. + // (This call has side effects.) + // tslint:disable-next-line:no-unused-expression + new Node({ + outboundLayer: this, + inboundLayers: [], + nodeIndices: [], + tensorIndices: [], + inputTensors: this.inputs, + outputTensors: this.outputs, + inputMasks: this.inputs.map(x => null), + outputMasks: this.outputs.map(x => null), + inputShapes: this.inputs.map(x => x.shape), + outputShapes: this.outputs.map(x => x.shape) + }); + this.built = true; + this._refCount = 1; // The ref count of a container always start at 1. + } + assertNotDisposed() { + if (this._refCount === 0) { + throw new Error(`Container '${this.name}' is already disposed.`); + } + } + /** + * Attempt to dispose a LayersModel's weights. + * + * This method decrease the reference count of the LayersModel object by 1. + * + * A LayersModel is reference-counted. Its reference count is incremented by 1 + * when it is first constructed and when it is used as a Layer of another + * LayersModel. + * + * If the reference count of a LayersModel becomes 0, the `dispose` method of + * all its constituent `Layer`s will be called. + * + * Note: If the reference count is greater than 0 after the decrement, the + * `dispose` method of its constituent `Layer`s will *not* be called. + * + * After a LayersModel is disposed, it cannot be used in calls such as + * 'predict`, `evaluate` or `fit` anymore. + * + * @returns A DisposeResult Object with the following fields: + * - refCountAfterDispose: The reference count of the LayersModel after this + * `dispose()` call. + * - numDisposedVariables: Number of `tf.Variable`s (i.e., weights) disposed + * during this `dispose()` call. + * @throws {Error} If the layer is not built yet, or if the LayersModel has + * already been disposed. + */ + dispose() { + this.assertNotDisposed(); + const result = { refCountAfterDispose: null, numDisposedVariables: 0 }; + if (--this._refCount === 0) { + for (const layer of this.layers) { + result.numDisposedVariables += layer.dispose().numDisposedVariables; + } + // Call dispose on each internally created container layer again to ensure + // their refCounts hit zero and their tensors are subsequently deleted. + for (const container of this.internalContainerRefs) { + result.numDisposedVariables += container.dispose().numDisposedVariables; + } + } + result.refCountAfterDispose = this._refCount; + return result; + } + get trainable() { + return this.trainable_; + } + set trainable(trainable) { + this.layers.forEach(layer => { + // tslint:disable-next-line:no-any + layer._trainableWeights + .forEach(w => w.trainable = trainable); + }); + this.trainable_ = trainable; + } + get trainableWeights() { + // Porting Note: This check below is to prevent errors where the + // _trainableWeights inherited from the parent class (Layer) gets + // inadvertently used. + if (this._trainableWeights.length > 0) { + throw new ValueError('Container instance unexpectedly contains _trainableWeights.' + + 'The trainable weights of a Container are a union of the ' + + 'trainable weights of its consituent Layers. Its own ' + + '_trainableWeights must remain an empty Array.'); + } + if (!this.trainable) { + return []; + } + let weights = []; + for (const layer of this.layers) { + weights = weights.concat(layer.trainableWeights); + } + return weights; + } + get nonTrainableWeights() { + const weights = []; + for (const layer of this.layers) { + weights.push(...layer.nonTrainableWeights); + } + if (!this.trainable) { + const trainableWeights = []; + for (const layer of this.layers) { + trainableWeights.push(...layer.trainableWeights); + } + return trainableWeights.concat(weights); + } + return weights; + } + get weights() { + return this.trainableWeights.concat(this.nonTrainableWeights); + } + /** + * Loads all layer weights from a JSON object. + * + * Porting Note: HDF5 weight files cannot be directly loaded in JavaScript / + * TypeScript. The utility script at `scripts/pykeras.py` offers means + * to convert them into JSON strings compatible with this method. + * Porting Note: TensorFlow.js Layers supports only loading by name currently. + * + * @param weights A JSON mapping weight names to weight values as nested + * arrays of numbers, or a `NamedTensorMap`, i.e., a JSON mapping weight + * names to `tf.Tensor` objects. + * @param strict Require that the provided weights exactly match those + * required by the container. Default: `true`. Passing `false` means that + * extra weights and missing weights will be silently ignored. + */ + loadWeights(weights, strict = true) { + const nameToWeight = {}; + let totalWeightsCount = 0; + const modelIsKerasSavedModelFormat = isKerasSavedModelFormat(weights); + if (modelIsKerasSavedModelFormat) { + this.parseWeights(weights); + } + // Check if weights from keras v3. + for (const layer of this.layers) { + for (const [index, weight] of layer.weights.entries()) { + // Parse the name to layerName/index. + // e.g. dense/0, dense/1, dense_1/0, dense_1/1 + const parsedName = modelIsKerasSavedModelFormat ? + `${weight.name.split('/').slice(0, -1).join('/') + '/'}${index}` : + weight.originalName; + if (nameToWeight[parsedName] != null) { + throw new ValueError(`Duplicate weight name: ${parsedName}`); + } + nameToWeight[parsedName] = weight; + totalWeightsCount++; + } + } + const weightValueTuples = []; + for (const name in weights) { + // TF 2.2.0 added cell name to the weight name in the format of + // layer_name/cell_name/weight_name, we need to remove + // the inner cell name. + let validatedName = name; + if (nameToWeight[name] == null) { + const tokens = name.split('/'); + const shortenNameArray = tokens.slice(0, -2).concat([tokens[tokens.length - 1]]); + validatedName = shortenNameArray.join('/'); + } + if (nameToWeight[validatedName] != null) { + weightValueTuples.push([nameToWeight[validatedName], weights[name]]); + } + else if (strict) { + throw new ValueError(`Provided weight data has no target variable: ${name}`); + } + delete nameToWeight[validatedName]; + } + if (strict) { + // Check that all weights are set. + const unsetNames = []; + for (const name in nameToWeight) { + unsetNames.push(name); + } + if (unsetNames.length > 0) { + throw new ValueError(`${unsetNames.length} of ${totalWeightsCount} weights are not set: ` + + `${unsetNames}`); + } + } + batchSetValue(weightValueTuples); + } + parseWeights(weights) { + for (const key in Object.keys(weights)) { + const listParts = key.split('/'); + const list = ['vars', 'layer_checkpoint_dependencies']; + // For keras v3, the weights name are saved based on the folder structure. + // e.g. _backbone/_layer_checkpoint_dependencies/transformer/_self../ + // _output_dense/vars/0 + // Therefore we discard the `vars` and `layer_checkpoint_depencies` within + // the saved name and only keeps the layer name and weights. + // This can help to mapping the actual name of the layers and load each + // weight accordingly. + const newKey = listParts + .map(str => { + if (str.startsWith('_')) { + return str.slice(1); + } + return str; + }) + .filter(str => !list.includes(str)) + .join('/'); + if (newKey !== key) { + weights[newKey] = weights[key]; + delete weights[key]; + } + } + } + /** + * Util shared between different serialization methods. + * @returns LayersModel config with Keras version information added. + */ + updatedConfig() { + const theConfig = this.getConfig(); + const modelConfig = {}; + modelConfig['className'] = this.getClassName(); + modelConfig['config'] = theConfig; + modelConfig['kerasVersion'] = `tfjs-layers ${version$6}`; + // TODO(nielsene): Replace something like K.backend() once + // possible. + modelConfig['backend'] = 'TensorFlow.js'; + return modelConfig; + } + /** + * Returns a JSON string containing the network configuration. + * + * To load a network from a JSON save file, use + * models.modelFromJSON(jsonString); + * @param extraJsonArgs Unused in tfjs-layers, maintained for PyKeras + * @param returnString Whether the return value should be stringified + * (default: `true`). + * @returns a JSON string if `returnString` (default), or a JSON object if + * `!returnString`. + */ + // tslint:disable-next-line:no-any + toJSON(unused, returnString = true) { + const modelConfig = convertTsToPythonic(this.updatedConfig()); + return returnString ? JSON.stringify(modelConfig) : modelConfig; + } + /** + * Call the model on new inputs. + * + * In this case `call` just reapplies all ops in the graph to the new inputs + * (e.g. build a new computational graph from the provided inputs). + * + * @param inputs A tensor or list of tensors. + * @param mask A mask or list of masks. A mask can be either a tensor or null + * (no mask). + * + * @return A tensor if there is a single output, or a list of tensors if there + * are more than one outputs. + */ + call(inputs, kwargs) { + return tidy(() => { + inputs = toList(inputs); + const feedDict = new FeedDict(); + for (let i = 0; i < this.inputs.length; ++i) { + feedDict.add(this.inputs[i], inputs[i]); + } + return execute(this.outputs, feedDict, kwargs); + }); + } + /** + * Computes an output mask tensor. + * + * @param inputs Tensor or list of tensors. + * @param mask Tensor or list of tensors. + * + * @return null or a tensor (or list of tensors, one per output tensor of the + * layer). + */ + computeMask(inputs, mask) { + return tidy(() => { + inputs = toList(inputs); + let masks; + if (mask == null) { + masks = pyListRepeat(null, inputs.length); + } + else { + masks = toList(mask); + } + // TODO(michaelterry): Add support for mask caching. + return this.runInternalGraph(inputs, masks)[1]; + }); + } + /** + * Computes the output shape of the layer. + * + * Assumes that the layer will be built to match that input shape provided. + * + * @param inputShape A shape (tuple of integers) or a list of shape tuples + * (one per output tensor of the layer). Shape tuples can include null for + * free dimensions, instead of an integer. + */ + computeOutputShape(inputShape) { + const inputShapes = normalizeShapeList(inputShape); + if (inputShapes.length !== this.inputLayers.length) { + throw new ValueError(`Invalid inputShape argument ${inputShape}: ` + + `model has ${this.inputLayers.length} tensor inputs.`); + } + // TODO(michaelterry): Add caching + const layersToOutputShapes = {}; + for (let i = 0; i < inputShapes.length; i++) { + const layer = this.inputLayers[i]; + const inputShape = inputShapes[i]; + // It's an input layer: computeOutputShape is identity, + // and there is only one node and one tensor output. + const shapeKey = layer.name + '_0_0'; + layersToOutputShapes[shapeKey] = inputShape; + } + const depthKeys = Object.keys(this.nodesByDepth) + .map(x => parseInt(x, 10)) + .sort(reverseNumberCompare); + // Iterate over nodes, by depth level. + if (depthKeys.length > 1) { + for (const depth of depthKeys) { + const nodes = this.nodesByDepth[depth]; + for (const node of nodes) { + // This is always a single layer, never a list. + const layer = node.outboundLayer; + if (this.inputLayers.map(x => x.id).indexOf(layer.id) !== -1) { + // We've already covered the input layers a few lines above. + continue; + } + // Potentially redundant list, same size of node.inputTensors. + const inputShapes = []; + for (let j = 0; j < node.inboundLayers.length; j++) { + const inboundLayer = node.inboundLayers[j]; + const nodeIndex = node.nodeIndices[j]; + const tensorIndex = node.tensorIndices[j]; + const shapeKey = `${inboundLayer.name}_${nodeIndex}_${tensorIndex}`; + const inputShape = layersToOutputShapes[shapeKey]; + inputShapes.push(inputShape); + } + const outputShape = layer.computeOutputShape(singletonOrArray(inputShapes)); + const outputShapes = normalizeShapeList(outputShape); + const nodeIndex = layer.inboundNodes.indexOf(node); + for (let j = 0; j < outputShapes.length; j++) { + const shapeKey = `${layer.name}_${nodeIndex}_${j}`; + layersToOutputShapes[shapeKey] = outputShapes[j]; + } + } + } + } + // Read final output shapes from layersToOutputShapes. + const outputShapes = []; + const outputShapeKeys = []; + for (let i = 0; i < this.outputLayers.length; i++) { + const layer = this.outputLayers[i]; + const nodeIndex = this.outputLayersNodeIndices[i]; + const tensorIndex = this.outputLayersTensorIndices[i]; + const shapeKey = `${layer.name}_${nodeIndex}_${tensorIndex}`; + outputShapeKeys.push(shapeKey); + } + for (let i = 0; i < outputShapeKeys.length; i++) { + const key = outputShapeKeys[i]; + assert(key in layersToOutputShapes); + outputShapes.push(layersToOutputShapes[key]); + } + // TODO(michaelterry): Update cache + return singletonOrArray(outputShapes); + } + /** + * Computes output tensors for new inputs. + * + * Note: + * - Expects `inputs` to be a list (potentially with 1 element). + * + * @param inputs List of tensors + * @param masks List of masks (tensors or null). + * @return Three lists: outputTensors, outputMasks, outputShapes + */ + runInternalGraph(inputs, masks) { + if (masks == null) { + masks = pyListRepeat(null, inputs.length); + } + // Dictionary mapping reference tensors to tuples + // (computed tensor, compute mask) + // we assume a 1:1 mapping from tensor to mask + // TODO: raise exception when a `.computeMask()` call + // does not return a list the same size as `call` + const tensorMap = {}; + for (let i = 0; i < this.inputs.length; ++i) { + const x = this.inputs[i]; + const y = inputs[i]; + const mask = masks[i]; + tensorMap[x.id] = [y, mask]; + } + const depthKeys = Object.keys(this.nodesByDepth) + .map(x => parseInt(x, 10)) + .sort(reverseNumberCompare); + for (const depth of depthKeys) { + const nodes = this.nodesByDepth[depth]; + for (const node of nodes) { + // This is always a single layer, never a list. + const layer = node.outboundLayer; + const referenceInputTensors = node.inputTensors; + const referenceOutputTensors = node.outputTensors; + // If all previous input tensors are available in tensorMap, + // then call node.inboundLayer on them. + // List of tuples [input, mask]: + const computedData = new Array(); + for (const x of referenceInputTensors) { + if (x.id in tensorMap) { + computedData.push(tensorMap[x.id]); + } + } + if (computedData.length === referenceInputTensors.length) { + // TODO(michaelterry): Add K.name_scope here, if we need it. + let kwargs = {}; + let computedTensors; + let computedMasks; + let outputTensors; + let outputMasks; + // call layer + if (node.callArgs != null) { + kwargs = node.callArgs; + } + if (computedData.length === 1) { + const [computedTensor, computedMask] = computedData[0]; + if (kwargs['mask'] == null) { + kwargs['mask'] = computedMask; + } + outputTensors = + toList(layer.call(computedTensor, kwargs)); + outputMasks = toList(layer.computeMask(computedTensor, computedMask)); + computedTensors = [computedTensor]; + computedMasks = [computedMask]; + } + else { + computedTensors = computedData.map(x => x[0]); + computedMasks = computedData.map(x => x[1]); + if (kwargs['mask'] == null) { + kwargs['mask'] = computedMasks; + } + outputTensors = + toList(layer.call(computedTensors, kwargs)); + outputMasks = toList(layer.computeMask(computedTensors, computedMasks)); + } + if (layer.activityRegularizer) { + throw new NotImplementedError('LayersModel invocation with concrete Tensor value(s) in the ' + + 'presence of activity regularizer(s) is not supported yet.'); + } + // TODO(michaelterry): Add model updates and losses + // Update tensor map. + for (let i = 0; i < referenceOutputTensors.length; ++i) { + const x = referenceOutputTensors[i]; + const y = outputTensors[i]; + const mask = outputMasks[i]; + tensorMap[x.id] = [y, mask]; + } + } + } + } + const outputTensors = []; + const outputMasks = []; + const outputShapes = []; + for (const x of this.outputs) { + assert(x.id in tensorMap, `Could not compute output ${x.name} : ${x.id}`); + const [tensor, mask] = tensorMap[x.id]; + outputShapes.push(tensor.shape); + outputTensors.push(tensor); + outputMasks.push(mask); + } + // TODO(michaelterry): Add support for caches. + return [outputTensors, outputMasks, outputShapes]; + } + /** + * Builds a map of internal node keys to node ordering. + * Used in serializaion a node orderings may change as unused nodes are + * dropped. Porting Note: This helper method was pulled out of getConfig to + * improve readability. + * @param layers An array of Layers in the model. + * @returns Map of Node Keys to index order within the layer. + */ + buildNodeConversionMap(layers) { + const nodeConversionMap = {}; + let keptNodes; + for (const layer of this.layers) { + keptNodes = layer instanceof Container ? 1 : 0; + for (let originalNodeIndex = 0; originalNodeIndex < layer.inboundNodes.length; originalNodeIndex++) { + const nodeKey = Container.nodeKey(layer, originalNodeIndex); + if (this.containerNodes.has(nodeKey)) { + // i.e. we mark it to be saved + nodeConversionMap[nodeKey] = keptNodes; + keptNodes += 1; + } + } + } + return nodeConversionMap; + } + getLayer(nameOrIndex, index) { + if (index != null) { + return this.findLayer(index); + } + else { + if (nameOrIndex == null) { + throw new ValueError('Provide either a layer name or layer index'); + } + if (typeof nameOrIndex === 'number') { + return this.findLayer(nameOrIndex); + } + } + for (const layer of this.layers) { + if (layer.name === nameOrIndex) { + return layer; + } + } + throw new ValueError(`No such layer: ${nameOrIndex}`); + } + findLayer(index) { + if (this.layers.length <= index) { + throw new ValueError(`Was asked to retrieve layer at index ${index}, but model only ` + + `has ${this.layers.length} layer(s).`); + } + else { + return this.layers[index]; + } + } + /** + * Retrieves the Container's current loss values. + * + * Used for regularizers during training. + */ + calculateLosses() { + // Porting Node: This is an augmentation to Container.loss in PyKeras. + // In PyKeras, Container.loss returns symbolic tensors. Here a concrete + // Tensor (specifically Scalar) values are returned. This is due to the + // imperative backend. + return tidy(() => { + const losses = []; + for (const layer of this.layers) { + for (let nodeIndex = 0; nodeIndex < layer.inboundNodes.length; ++nodeIndex) { + const nodeKey = Container.nodeKey(layer, nodeIndex); + if (this.containerNodes.has(nodeKey)) { + losses.push(...layer.calculateLosses()); + } + } + } + // TODO(cais): Add any unconditional model-level losses? + return losses; + }); + } + getConfig() { + const config = { name: this.name }; + // Build a map from layer unique name (self._node_key) + // to the index of the nodes that are saved in the config. + // Only nodes in container_nodes are saved. + const nodeConversionMap = this.buildNodeConversionMap(this.layers); + // Serialize and save the layers in layerConfigs + const layerConfigs = []; + for (const layer of this.layers) { + const layerClassName = layer.getClassName(); + const layerConfig = layer.getConfig(); + const filteredInboundNodes = []; + for (let originalNodeIndex = 0; originalNodeIndex < layer.inboundNodes.length; originalNodeIndex++) { + const node = layer.inboundNodes[originalNodeIndex]; + const nodeKey = Container.nodeKey(layer, originalNodeIndex); + let kwargs = {}; + if (this.containerNodes.has(nodeKey)) { + // The node is relevant to the model: + // add to filteredInboundNodes. + if (node.callArgs) { + try { + JSON.stringify(node.callArgs); + kwargs = node.callArgs; + } + catch (err) { + console.warn(`Layer ${layer.name} was passed ` + + `non-serializable keyword arguments: ` + + `${node.callArgs}. They will not be included ` + + `in the serialized model (and thus will be ` + + `missing at deserialization time).`); + kwargs = {}; + } + } + if (node.inboundLayers.length > 0) { + const nodeData = []; + for (let i = 0; i < node.inboundLayers.length; i++) { + const inboundLayer = node.inboundLayers[i]; + const nodeIndex = node.nodeIndices[i]; + const tensorIndex = node.tensorIndices[i]; + const nodeKey = Container.nodeKey(inboundLayer, nodeIndex); + let newNodeIndex = nodeConversionMap[nodeKey]; + if (newNodeIndex == null) { + newNodeIndex = 0; + } + nodeData.push([inboundLayer.name, newNodeIndex, tensorIndex, kwargs]); + } + filteredInboundNodes.push(nodeData); + } + } + } + const dict = {}; + dict['name'] = layer.name; + dict['className'] = layerClassName; + dict['config'] = layerConfig; + dict['inboundNodes'] = filteredInboundNodes; + layerConfigs.push(dict); + } + config['layers'] = layerConfigs; + // Gather info about inputs and outputs + const modelInputs = []; + for (let i = 0; i < this.inputLayers.length; i++) { + const layer = this.inputLayers[i]; + const nodeIndex = this.inputLayersNodeIndices[i]; + const nodeKey = Container.nodeKey(layer, nodeIndex); + if (!this.containerNodes.has(nodeKey)) { + continue; + } + let newNodeIndex = nodeConversionMap[nodeKey]; + if (newNodeIndex === null || newNodeIndex === undefined) { + newNodeIndex = 0; + } + const tensorIndex = this.inputLayersTensorIndices[i]; + modelInputs.push([layer.name, newNodeIndex, tensorIndex]); + } + config['inputLayers'] = modelInputs; + const modelOutputs = []; + for (let i = 0; i < this.outputLayers.length; i++) { + const layer = this.outputLayers[i]; + const nodeIndex = this.outputLayersNodeIndices[i]; + const nodeKey = Container.nodeKey(layer, nodeIndex); + if (!this.containerNodes.has(nodeKey)) { + continue; + } + let newNodeIndex = nodeConversionMap[nodeKey]; + if (newNodeIndex === null || newNodeIndex === undefined) { + newNodeIndex = 0; + } + const tensorIndex = this.outputLayersTensorIndices[i]; + modelOutputs.push([layer.name, newNodeIndex, tensorIndex]); + } + config['outputLayers'] = modelOutputs; + return config; + } + /** + * Instantiates a LayersModel from its config (output of `get_config()`). + * @param cls the class to create + * @param config LayersModel config dictionary. + * @param customObjects An optional dictionary of custom objects. + * @param fastWeightInit Optional flag to use fast weight initialization + * during deserialization. This is applicable to cases in which + * the initialization will be immediately overwritten by loaded weight + * values. Default: `false`. + * @returns A LayersModel instance. + * @throws ValueError: In case of improperly formatted config dict. + */ + /** @nocollapse */ + static fromConfig(cls, config, customObjects = {}, fastWeightInit = false) { + // Layer instances created during + // the graph reconstruction process + const createdLayers = {}; + // Dictionary mapping layer instances to + // node data that specifies a layer call. + // It acts as a queue that maintains any unprocessed + // layer call until it becomes possible to process it + // (i.e. until the input tensors to the call all exist). + const unprocessedNodes = {}; + function addUnprocessedNode(layer, nodeData) { + if (!(layer.name in unprocessedNodes)) { + unprocessedNodes[layer.name] = [nodeData]; + } + else { + unprocessedNodes[layer.name].push(nodeData); + } + } + function processNode(layer, nodeData) { + const inputTensors = []; + let kwargs; + for (const inputData of nodeData) { + const inboundLayerName = inputData[0]; + const inboundNodeIndex = inputData[1]; + const inboundTensorIndex = inputData[2]; + kwargs = inputData[3] == null ? + {} : + inputData[3]; + if (!(inboundLayerName in createdLayers)) { + addUnprocessedNode(layer, nodeData); + return; + } + const inboundLayer = createdLayers[inboundLayerName]; + if (inboundLayer.inboundNodes.length <= inboundNodeIndex) { + addUnprocessedNode(layer, nodeData); + return; + } + const inboundNode = inboundLayer.inboundNodes[inboundNodeIndex]; + inputTensors.push(inboundNode.outputTensors[inboundTensorIndex]); + } + // Call layer on its inputs, thus creating the node + // and building the layer if needed. + // Note: This has Eager vs Graph Implications. + if (inputTensors.length > 0) { + layer.apply(singletonOrArray(inputTensors), kwargs); // was ** kwargs + } + } + /** + * Deserialize a layer, then call it on appropriate inputs. + * @param layerData: layer config dict. + * @throws ValueError: In case of improperly formatted `layer_data` + * dict. + */ + function processLayer(layerData) { + const layerName = layerData['name']; + // Instantiate layer. + const layer = deserialize(layerData, config['customObjects'] != null ? + config['customObjects'] : + {}); + layer.setFastWeightInitDuringBuild(fastWeightInit); + createdLayers[layerName] = layer; + // Gather layer inputs. + const inboundNodesData = layerData['inboundNodes']; + inboundNodesData.forEach(nodeData => { + if (!(nodeData instanceof Array)) { + throw new ValueError(`Corrupted configuration, expected array for nodeData: ${nodeData}`); + } + // We don't process nodes (i.e. make layer calls) + // on the fly because the inbound node may not yet exist, + // in case of layer shared at different topological depths + // (e.g.a model such as A(B(A(B(x))))) + addUnprocessedNode(layer, nodeData); + }); + } + // First, we create all layers and enqueue nodes to be processed. + const name = config['name']; + const layersFromConfig = config['layers']; + for (const layerData of layersFromConfig) { + processLayer(layerData); + } + // Then we process nodes in order of layer depth. + // Nodes that cannot yet be processed(if the inbound node + // does not yet exist) are re - enqueued, and the process + // is repeated until all nodes are processed. + while (!isObjectEmpty(unprocessedNodes)) { + for (const layerData of layersFromConfig) { + const layer = createdLayers[layerData['name']]; + if (layer.name in unprocessedNodes) { + const currentUnprocessedNodesForLayer = unprocessedNodes[layer.name]; + delete unprocessedNodes[layer.name]; + for (const nodeData of currentUnprocessedNodesForLayer) { + processNode(layer, nodeData); + } + } + } + } + const inputTensors = []; + const outputTensors = []; + const inputLayersFromConfig = config['inputLayers']; + for (const layerData of inputLayersFromConfig) { + const layerName = layerData[0]; + const nodeIndex = layerData[1]; + const tensorIndex = layerData[2]; + assert(layerName in createdLayers); + const layer = createdLayers[layerName]; + const layerOutputTensors = layer.inboundNodes[nodeIndex].outputTensors; + inputTensors.push(layerOutputTensors[tensorIndex]); + } + const outputLayersFromConfig = config['outputLayers']; + for (const layerData of outputLayersFromConfig) { + const layerName = layerData[0]; + const nodeIndex = layerData[1]; + const tensorIndex = layerData[2]; + assert(layerName in createdLayers); + const layer = createdLayers[layerName]; + const layerOutputTensors = layer.inboundNodes[nodeIndex].outputTensors; + outputTensors.push(layerOutputTensors[tensorIndex]); + } + return new cls({ inputs: inputTensors, outputs: outputTensors, name }); + } + /** + * Determine whether the container is stateful. + * + * Porting Note: this is the equivalent of the stateful @property of + * the Container class in PyKeras. + */ + get stateful() { + // Porting Note: This check is to prevent inadvertent setting of the + // _stateful property of the Container instance. + if (this._stateful) { + throw new ValueError('Container instance unexpectedly has _stateful = true. The ' + + 'statefulness of a Container is determined by the Layers it ' + + 'contains. Its _stateful property must remain the default false.'); + } + for (const layer of this.layers) { + if (layer.stateful) { + return true; + } + } + return false; + } + /** + * Reset the state of all stateful constituent layers (if any). + * + * Examples of stateful layers include RNN layers whose `stateful` property + * is set as `true`. + */ + resetStates() { + tidy(() => { + this.layers.forEach(layer => { + // tslint:disable:no-any + if (layer.stateful) { + layer.resetStates(); + } + // tslint:enable:no-any + }); + }); + } + } + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + function standardizeSampleOrClassWeights(xWeight, outputNames, weightType) { + const numOutputs = outputNames.length; + if (xWeight == null || (Array.isArray(xWeight) && xWeight.length === 0)) { + return outputNames.map(name => null); + } + if (numOutputs === 1) { + if (Array.isArray(xWeight) && xWeight.length === 1) { + return xWeight; + } + else if (typeof xWeight === 'object' && outputNames[0] in xWeight) { + return [xWeight[outputNames[0]]]; + } + else { + return [xWeight]; + } + } + if (Array.isArray(xWeight)) { + if (xWeight.length !== numOutputs) { + throw new Error(`Provided ${weightType} is an array of ${xWeight.length} ` + + `element(s), but the model has ${numOutputs} outputs. ` + + `Make sure a set of weights is provided for each model output.`); + } + return xWeight; + } + else if (typeof xWeight === 'object' && Object.keys(xWeight).length > 0 && + typeof xWeight[Object.keys(xWeight)[0]] === + 'object') { + const output = []; + outputNames.forEach(outputName => { + if (outputName in xWeight) { + output.push(xWeight[outputName]); + } + else { + output.push(null); + } + }); + return output; + } + else { + throw new Error(`The model has multiple (${numOutputs}) outputs, ` + + `so ${weightType} must be either an array with ` + + `${numOutputs} elements or an object with ${outputNames} keys. ` + + `Provided ${weightType} not understood: ${JSON.stringify(xWeight)}`); + } + } + /** + * Standardize class weighting objects. + * + * This function takes a single class-weighting object, an array of them, + * or a map from output name to class-weighting object. It compares it to the + * output name(s) of the model, base on which it outputs an array of + * class-weighting objects of which the length matches the number of outputs. + * + * @param classWeight Input class-weighting object(s). + * @param outputNames All output name(s) of the model. + * @return An array of class-weighting objects. The length of the array matches + * the model's number of outputs. + */ + function standardizeClassWeights(classWeight, outputNames) { + return standardizeSampleOrClassWeights(classWeight, outputNames, 'classWeight'); + } + function standardizeSampleWeights(classWeight, outputNames) { + return standardizeSampleOrClassWeights(classWeight, outputNames, 'sampleWeight'); + } + /** + * Standardize by-sample and/or by-class weights for training. + * + * Note that this function operates on one model output at a time. For a model + * with multiple outputs, you must call this function multiple times. + * + * @param y The target tensor that the by-sample and/or by-class weight is for. + * The values of y are assumed to encode the classes, either directly + * as an integer index, or as one-hot encoding. + * @param sampleWeight By-sample weights. + * @param classWeight By-class weights: an object mapping class indices + * (integers) to a weight (float) to apply to the model's loss for the + * samples from this class during training. This can be useful to tell the + * model to "pay more attention" to samples from an under-represented class. + * @param sampleWeightMode The mode for the sample weights. + * @return A Promise of weight tensor, of which the size of the first dimension + * matches that of `y`. + */ + async function standardizeWeights(y, sampleWeight, classWeight, sampleWeightMode) { + if (sampleWeight != null || sampleWeightMode != null) { + // TODO(cais): Once 'temporal' mode is implemented, document it in the doc + // string. + throw new Error('Support sampleWeight is not implemented yet'); + } + if (classWeight != null) { + // Apply class weights per sample. + const yClasses = tidy(() => { + if (y.shape.length === 1) { + // Assume class indices. + return clone(y); + } + else if (y.shape.length === 2) { + if (y.shape[1] > 1) { + // Assume one-hot encoding of classes. + const axis = 1; + return argMax$2(y, axis); + } + else if (y.shape[1] === 1) { + // Class index. + return reshape$3(y, [y.shape[0]]); + } + else { + throw new Error(`Encountered unexpected last-dimension size (${y.shape[1]}) ` + + `during handling of class weights. The size is expected to be ` + + `>= 1.`); + } + } + else { + throw new Error(`Unexpected rank of target (y) tensor (${y.rank}) during ` + + `handling of class weights. The rank is expected to be 1 or 2.`); + } + }); + const yClassIndices = Array.from(await yClasses.data()); + dispose(yClasses); + const classSampleWeight = []; + yClassIndices.forEach(classIndex => { + if (classWeight[classIndex] == null) { + throw new Error(`classWeight must contain all classes in the training data. ` + + `The class ${classIndex} exists in the data but not in ` + + `classWeight`); + } + else { + classSampleWeight.push(classWeight[classIndex]); + } + }); + return tensor1d(classSampleWeight, 'float32'); + } + else { + return null; + } + } + /** + * Apply per-sample weights on the loss values from a number of samples. + * + * @param losses Loss tensor of shape `[batchSize]`. + * @param sampleWeights Per-sample weight tensor of shape `[batchSize]`. + * @returns Tensor of the same shape as`losses`. + */ + function computeWeightedLoss(losses, sampleWeights) { + return mul(losses, sampleWeights); + } + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + // Default batch size used during tensor-based validation. + const DEFAULT_VALIDATION_BATCH_SIZE = 32; + /** + * Standardize the output of a dataset iterator for use by + * LayersModel.fitDataset(). + * + * @param model: A `tf.LayersModel` object. + * @param iteratorOut The output of a dataset iterator. It is required to be + * an object of the form `{xs: TensorOrArrayOrMap, ys: + * TensorOrArrayOrMap}`, where `TensorOrArrayOrMap` is a single `tf.Tensor`, + * a `tf.Tensor[]`, or a flat map from string names to `tf.Tensor`s. + * @returns A flat array of `tf.Tensor` objects: the input `tf.Tensor`s + * followed by the target `tf.Tensor`s. When `tf.Tensor`s are provided + * as a map, the order in the resulting array is taken from the `inputNames` + * and `outputNames` of the model. + */ + function standardizeDataIteratorOutput( + // Type `model` as `any` here to avoid circular dependency w/ + // training.ts. + // tslint:disable-next-line:no-any + model, iteratorOut) { + let xs; + let ys; + const iteratorOutObj = iteratorOut; + xs = iteratorOutObj['xs']; + ys = iteratorOutObj['ys']; + assert$1(xs != null && ys != null, () => 'A Dataset iterator for fitDataset() is expected to generate ' + + 'objects of the form `{xs: xVal, ys: yVal}`, where the two ' + + 'values may be `tf.Tensor`, an array of Tensors, or a map of ' + + 'string to Tensor. The provided Dataset instead generates ' + + `${iteratorOut}`); + const flattenedXs = flattenTensorOrArrayOrMap('input', model.inputNames, xs); + const flattenedYs = flattenTensorOrArrayOrMap('output', model.outputNames, ys); + const batchSize = flattenedXs[0].shape[0]; + assert$1(flattenedXs.length === model.inputs.length, () => `LayersModel has ${model.inputs.length} inputs, but the dataset ` + + `provides ${flattenedXs.length} inputs. (Expected input keys: ` + + `${JSON.stringify(model.inputNames)})`); + assert$1(flattenedYs.length === model.outputs.length, () => `LayersModel has ${model.outputs.length} outputs, but the dataset ` + + `provides ${flattenedYs.length} outputs. (Expected output keys: ` + + `${JSON.stringify(model.outputNames)})`); + for (let xIndex = 0; xIndex < flattenedXs.length; xIndex++) { + assert$1(flattenedXs[xIndex].shape[0] === batchSize, () => `Batch size mismatch: input ` + + `${model.inputNames[xIndex]} has ${flattenedXs[xIndex].shape[0]}; ` + + `expected ${batchSize} based on input ${model.inputNames[0]}.`); + } + for (let yIndex = 0; yIndex < flattenedYs.length; yIndex++) { + assert$1(flattenedYs[yIndex].shape[0] === batchSize, () => `Batch size mismatch: output ` + + `${model.outputNames[yIndex]} has ${flattenedYs[yIndex].shape[0]}; ` + + `expected ${batchSize} based on input ${model.inputNames[0]}.`); + } + return { xs: flattenedXs, ys: flattenedYs }; + } + function flattenTensorOrArrayOrMap(inputOrOutput, names, values) { + if (values instanceof Tensor) { + return [values]; + } + else if (Array.isArray(values)) { + assert$1(values.length === names.length, () => `Received an array of ${values.length} Tensors, but expected ${names.length} to match the ${inputOrOutput} keys ${names}.`); + return values; + } + else { + const result = []; + // Check that all the required keys are available. + for (const name of names) { + if (values[name] == null) { + throw new ValueError(`The feature data generated by the dataset lacks the required ` + + `${inputOrOutput} key '${name}'.`); + } + result.push(values[name]); + } + return result; + } + } + function standardizeTensorValidationData(data) { + if (data.length === 3) { + throw new NotImplementedError('Validation with sample weights is not implemented yet.'); + } + return { xs: data[0], ys: data[1] }; + } + async function fitDataset( + // Type `model` as `any` here to avoid circular dependency w/ + // training.ts. + // tslint:disable-next-line:no-any + model, dataset, args) { + const hasBatchesPerEpoch = args.batchesPerEpoch != null; + assert$1(model.optimizer != null, () => 'You must compile a model before training/testing. Use ' + + 'LayersModel.compile(modelCompileConfig).'); + assert$1(args != null, () => `For fitDataset(), the 2nd argument (config) is required, ` + + `but it is not provided in this call.`); + assert$1(args.epochs != null && args.epochs > 0 && Number.isInteger(args.epochs), () => `For fitDataset(), config.epochs is expected to be a positive ` + + `integer, but got ${args.epochs}`); + assert$1(!hasBatchesPerEpoch || + (args.batchesPerEpoch > 0 && Number.isInteger(args.batchesPerEpoch)), () => `For fitDataset(), config.batchesPerEpoch is expected to be a ` + + `positive integer if specified, but got ${args.batchesPerEpoch}`); + assert$1( + // tslint:disable-next-line:no-any + args['validationSplit'] == null, () => '`validationSplit` is not supported by `fitDataset()`. ' + + 'Use validationData instead.'); + if (model.isTraining) { + throw new Error('Cannot start training because another fit() call is ongoing.'); + } + model.isTraining = true; + try { + const doValidation = args.validationData != null; + let valXs; + let valYs; + if (doValidation) { + if (isDatasetObject(args.validationData)) { + assert$1(args.validationBatches == null || + (args.validationBatches > 0 && + Number.isInteger(args.validationBatches)), () => `For fitDataset() with dataset-based validation, ` + + `config.validationBatches is expected not to be provided, ` + + `or to be a positive integer, ` + + `but got ${args.validationBatches}`); + } + else { + const validationData = standardizeTensorValidationData(args.validationData); + valXs = validationData.xs; + valYs = validationData.ys; + } + } + const trainFunction = model.makeTrainFunction(); + const outLabels = model.getDedupedMetricsNames(); + let callbackMetrics; + if (doValidation) { + callbackMetrics = + outLabels.slice().concat(outLabels.map(n => 'val_' + n)); + } + else { + callbackMetrics = outLabels.slice(); + } + const callbacks = standardizeCallbacks(args.callbacks, args.yieldEvery); + const verbose = args.verbose == null ? 1 : args.verbose; + const { callbackList, history } = configureCallbacks(callbacks, verbose, args.epochs, null, null, getStepsPerEpoch(dataset, args), null, // Batch size determined by the dataset itself. + doValidation, callbackMetrics); + callbackList.setModel(model); + model.history = history; + await callbackList.onTrainBegin(); + model.stopTraining_ = false; + let epoch = args.initialEpoch == null ? 0 : args.initialEpoch; + let dataIterator = await dataset.iterator(); + while (epoch < args.epochs) { + const epochLogs = {}; + await callbackList.onEpochBegin(epoch); + let stepsDone = 0; + let batchIndex = 0; + if (!hasBatchesPerEpoch) { + dataIterator = await dataset.iterator(); + } + while (hasBatchesPerEpoch ? stepsDone < args.batchesPerEpoch : true) { + const iteratorOut = await dataIterator.next(); + // If `batchesPerEpoch` is specified, the dataset should not be + // exhausted until all epoches are done. + if (hasBatchesPerEpoch && iteratorOut.done) { + console.warn('You provided `batchesPerEpoch` as ' + + `${args.batchesPerEpoch}, ` + + 'but your dataset iterator ran out of data after ' + + `${stepsDone} batches; ` + + 'interrupting training. Make sure that your ' + + 'dataset can generate at least `batchesPerEpoch * epochs` ' + + 'batches (in this case, ' + + `${args.batchesPerEpoch * args.epochs} batches). ` + + 'You may need to use the repeat() function when building ' + + 'your dataset.'); + break; + } + if (iteratorOut.value != null) { + const { xs, ys } = standardizeDataIteratorOutput(model, iteratorOut.value); + const batchLogs = {}; + batchLogs['batch'] = batchIndex; + batchLogs['size'] = xs[0].shape[0]; + await callbackList.onBatchBegin(batchIndex, batchLogs); + const sampleWeights = []; + if (args.classWeight != null) { + const standardClassWeights = standardizeClassWeights(args.classWeight, model.outputNames); + for (let i = 0; i < standardClassWeights.length; ++i) { + sampleWeights.push(await standardizeWeights(ys[i], null, standardClassWeights[i])); + } + } + // Train on batch. + const ins = xs.concat(ys).concat(sampleWeights); + const outs = trainFunction(ins); + dispose(ins); + for (let i = 0; i < outLabels.length; ++i) { + const label = outLabels[i]; + const out = outs[i]; + batchLogs[label] = out; + keep(out); + } + await callbackList.onBatchEnd(batchIndex, batchLogs); + disposeTensorsInLogs(batchLogs); + batchIndex++; + stepsDone++; + } + if (hasBatchesPerEpoch ? stepsDone >= args.batchesPerEpoch : + iteratorOut.done) { + // Epoch finished. Perform validation. + if (doValidation) { + let valOuts; + if (isDatasetObject(args.validationData)) { + valOuts = toList(await model.evaluateDataset(args.validationData, { batches: args.validationBatches })); + } + else { + valOuts = toList(model.evaluate(valXs, valYs, { + batchSize: args.validationBatchSize == null ? + DEFAULT_VALIDATION_BATCH_SIZE : + args.validationBatchSize, + verbose: 0 + })); + } + for (let i = 0; i < model.metricsNames.length; ++i) { + epochLogs[`val_${model.metricsNames[i]}`] = valOuts[i]; + } + } + // Call `break` to exit one epoch lopp after validation is done. If + // config.batchesPerEpoch is specified, an epoch while loop will + // stop when `stepsDone >= config.batchesPerEpoch`. When + // config.batchesPerEpoch is not provided, the following `break` is + // required to exit the while lopp after dataset is exhausted. + break; + } + if (model.stopTraining_) { + break; + } + } + await callbackList.onEpochEnd(epoch, epochLogs); + epoch++; + if (model.stopTraining_) { + break; + } + } + await callbackList.onTrainEnd(); + await model.history.syncData(); + return model.history; + } + finally { + model.isTraining = false; + } + } + /** Helper function that determines number of steps (batches) per epoch. */ + function getStepsPerEpoch(dataset, args) { + // Attempt to determine # of batches in an epoch. + let stepsPerEpoch = null; + if (args.batchesPerEpoch != null) { + stepsPerEpoch = args.batchesPerEpoch; + } + else if (Number.isFinite(dataset.size)) { + stepsPerEpoch = dataset.size; + } + return stepsPerEpoch; + } + // Check if provided object is a Dataset object by checking its .iterator + // element. + function isDatasetObject(dataset) { + return (typeof dataset.iterator === 'function'); + } + // Check if provided object is a LazyIterator object by checking it's .next + // element. + function isLazyIteratorObject(iterator) { + return (typeof iterator.next === 'function'); + } + async function evaluateDataset( + // Type `model` as `any` here to avoid circular dependency w/ + // training.ts. + // tslint:disable-next-line:no-any + model, dataset, args) { + args = args || {}; + const hasBatches = args.batches != null; + const f = model.testFunction; + let outs = []; + if (args.verbose > 0) { + throw new NotImplementedError('Verbose mode is not implemented yet.'); + } + assert$1(!hasBatches || (args.batches > 0 && Number.isInteger(args.batches)), () => 'Test loop expects `batches` to be a positive integer, but ' + + `received ${JSON.stringify(args.batches)}`); + const dataIterator = isLazyIteratorObject(dataset) ? + dataset : + await dataset.iterator(); + // Keeps track of number of examples used in this evaluation. + let numExamples = 0; + let batch = 0; + while (hasBatches ? batch < args.batches : true) { + const iteratorOut = await dataIterator.next(); + outs = tidy(() => { + if (iteratorOut.value) { + // TODO(cais): Once real dataset is available, use + // `map(x => standardizeDataIteratorOutput(model, x).map(f)`. + const { xs, ys } = standardizeDataIteratorOutput(model, iteratorOut.value); + const xsAndYs = xs.concat(ys); + const batchOuts = tidy(() => f(xsAndYs)); + dispose(xsAndYs); + if (batch === 0) { + for (let i = 0; i < batchOuts.length; ++i) { + outs.push(scalar(0)); + } + } + const batchSize = xsAndYs[0].shape[0]; + for (let i = 0; i < batchOuts.length; ++i) { + const batchOut = batchOuts[i]; + const oldScalar = outs[i]; + outs[i] = + tidy(() => add$3(outs[i], mul(batchSize, batchOut))); + if (batch > 0) { + dispose(oldScalar); + } + } + dispose(batchOuts); + numExamples += batchSize; + ++batch; + } + return outs; + }); + if (iteratorOut.done) { + if (hasBatches) { + console.warn('Your dataset iterator ran out of data during evaluateDataset(). ' + + 'Interrupting evalution. Make sure that your ' + + 'dataset can generate at least `batches` ' + + `batches (in this case, ${args.batches} batches). ` + + 'You may need to use the repeat() function when building ' + + 'your dataset.'); + } + break; + } + } + for (let i = 0; i < outs.length; ++i) { + const oldScalar = outs[i]; + outs[i] = div$1(outs[i], numExamples); + dispose(oldScalar); + } + return singletonOrArray(outs); + } + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + function checkBatchSize(batchSize) { + assert$1(batchSize > 0 && Number.isInteger(batchSize), () => `batchSize is required to be a positive integer, but got ${batchSize}`); + } + /** + * Slice a Tensor or an Array of Tensors, by start and stop indices. + * + * Porting Note: The `_slice_arrays` function in PyKeras is covered by this + * function and `sliceArraysByIndices()` together. + * + * @param arrays: the input. + * @param start: the starting index (inclusive). + * @param stop: the stopping index (exclusive). + * @returns The result of the slicing. If `arrays` is an `Array` of + * `tf.Tensor`s, the slicing will be applied to all elements of the `Array` + * in the same way. + */ + function sliceArrays(arrays, start, stop) { + if (arrays == null) { + return [null]; + } + else if (Array.isArray(arrays)) { + return arrays.map(array => sliceAlongFirstAxis(array, start, stop - start)); + } + else { // Tensor. + return sliceAlongFirstAxis(arrays, start, stop - start); + } + } + /** + * Slice a Tensor or an Array of Tensors, by random-order indices. + * + * Porting Note: The `_slice_arrays` function in PyKeras is covered by this + * function and `sliceArrays()` together. + * + * @param arrays The input `tf.Tensor` or `Array` of `tf.Tensor`s to slice. + * If an `Array` of `tf.Tensor`s, all `tf.Tensor`s will be sliced in the + * same fashion. + * @param indices The indices to use for slicing along the first (batch) + * dimension. + * @returns Result(s) of the slicing. + */ + function sliceArraysByIndices(arrays, indices) { + return tidy(() => { + if (arrays == null) { + return null; + } + else if (Array.isArray(arrays)) { + return arrays.map(array => sliceArraysByIndices(array, indices)); + } + else { + // TODO(cais): indices should be a pre-constructed Tensor1D to avoid + // tensor1d() calls. + return gather(arrays, indices.dtype === 'int32' ? indices : cast$3(indices, 'int32')); + } + }); + } + /** + * Returns a list of batch indices (tuples of indices). + * @param size: Integer, total size of the data to slice into batches. + * @param batchSize: Integer, batch size. + * @returns An Array of [batchStart, batchEnd] tuples. batchStart is + * inclusive; batchEnd is exclusive. I.e., each batch consists of indices x + * that satisfy batchStart <= x < batchEnd. + */ + function makeBatches(size, batchSize) { + const output = []; + let batchStart = 0; + let batchEnd = null; + while (batchStart < size) { + batchEnd = batchStart + batchSize; + if (batchEnd >= size) { + batchEnd = size; + } + output.push([batchStart, batchEnd]); + batchStart = batchEnd; + } + return output; + } + /** + * Ensure tensors all have a rank of at least 2. + * + * If a tensor has a rank of 1, it is dimension-expanded to rank 2. + * If any tensor has a rank of 0 (i.e., is a scalar), an error will be thrown. + */ + function ensureTensorsRank2OrHigher(tensors) { + const outs = []; + if (tensors instanceof Tensor) { + tensors = [tensors]; + } + // Make Tensors at least 2D. + for (let i = 0; i < tensors.length; ++i) { + const tensor = tensors[i]; + if (tensor.rank === 1) { + outs.push(expandDims$2(tensor, 1)); + } + else if (tensor.rank === 0) { + throw new Error('Expected tensor to be at least 1D, but received a 0D tensor ' + + '(scalar).'); + } + else { + outs.push(tensor); + } + } + return outs; + } + /** + * Compare a set of tensors with a reference (old) set, discard the ones + * in the new set that are not present in the reference set. + * + * This method is used for memory clenaup during calls such as + * LayersModel.fit(). + * + * @param tensors New set which may contain Tensors not present in + * `refTensors`. + * @param refTensors Reference Tensor set. + */ + // TODO(cais, kangyizhang): Deduplicate with tfjs-data. + function disposeNewTensors(tensors, refTensors) { + if (tensors == null) { + return; + } + const oldTensorIds = []; + if (refTensors instanceof Tensor) { + oldTensorIds.push(refTensors.id); + } + else if (Array.isArray(refTensors)) { + refTensors.forEach(t => oldTensorIds.push(t.id)); + } + else if (refTensors != null) { + // `oldTensors` is a map from string name to Tensor. + for (const name in refTensors) { + const oldTensor = refTensors[name]; + oldTensorIds.push(oldTensor.id); + } + } + const tensorsToDispose = []; + if (tensors instanceof Tensor) { + if (oldTensorIds.indexOf(tensors.id) === -1) { + tensorsToDispose.push(tensors); + } + } + else if (Array.isArray(tensors)) { + tensors.forEach(t => { + if (oldTensorIds.indexOf(t.id) === -1) { + tensorsToDispose.push(t); + } + }); + } + else if (tensors != null) { + // `oldTensors` is a map from string name to Tensor. + for (const name in tensors) { + const tensor = tensors[name]; + if (oldTensorIds.indexOf(tensor.id) === -1) { + tensorsToDispose.push(tensor); + } + } + } + tensorsToDispose.forEach(t => { + if (!t.isDisposed) { + t.dispose(); + } + }); + } + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + /** + * Helper function for polymorphic input data: 1. singleton Tensor. + */ + function isDataTensor(x) { + return x instanceof Tensor; + } + /** + * Helper function for polymorphic input data: 2. Array of Tensor. + */ + function isDataArray(x) { + return Array.isArray(x); + } + /** + * Helper function for polymorphic input data: 3. "dict" of Tensor. + */ + function isDataDict(x) { + return !isDataTensor(x) && !isDataArray(x); + } + /** + * Normalizes inputs and targets provided by users. + * @param data User-provided input data (polymorphic). + * @param names An Array of expected Tensor names. + * @param shapes Optional Array of expected Tensor shapes. + * @param checkBatchAxis Whether to check that the batch axis of the arrays + * match the expected value found in `shapes`. + * @param exceptionPrefix String prefix used for exception formatting. + * @returns List of standardized input Tensors (one Tensor per model input). + * @throws ValueError: in case of improperly formatted user data. + */ + function standardizeInputData(data, names, shapes, checkBatchAxis = true, exceptionPrefix = '') { + if (names == null || names.length === 0) { + // Check for the case where the model expected no data, but some data got + // sent. + if (data != null) { + let gotUnexpectedData = false; + if (isDataArray(data) && data.length > 0) { + gotUnexpectedData = true; + } + else if (isDataDict(data)) { + for (const key in data) { + if (data.hasOwnProperty(key)) { + gotUnexpectedData = true; + break; + } + } + } + else { + // `data` is a singleton Tensor in this case. + gotUnexpectedData = true; + } + if (gotUnexpectedData) { + throw new ValueError(`Error when checking model ${exceptionPrefix} expected no data, ` + + `but got ${data}`); + } + } + return []; + } + if (data == null) { + return names.map(name => null); + } + let arrays; + if (isDataDict(data)) { + data = data; + arrays = []; + for (const name of names) { + if (data[name] == null) { + throw new ValueError(`No data provided for "${name}". Need data for each key in: ` + + `${names}`); + } + arrays.push(data[name]); + } + } + else if (isDataArray(data)) { + data = data; + if (data.length !== names.length) { + throw new ValueError(`Error when checking model ${exceptionPrefix}: the Array of ` + + `Tensors that you are passing to your model is not the size the ` + + `model expected. Expected to see ${names.length} Tensor(s), but ` + + `instead got the following list of Tensor(s): ${data}`); + } + arrays = data; + } + else { + data = data; + if (names.length > 1) { + throw new ValueError(`The model ${exceptionPrefix} expects ${names.length} Tensor(s), ` + + `but only received one Tensor. Found: Tensor with shape ${data.shape}`); + } + arrays = [data]; + } + arrays = ensureTensorsRank2OrHigher(arrays); + // Check shape compatibility. + if (shapes != null) { + for (let i = 0; i < names.length; ++i) { + if (shapes[i] == null) { + continue; + } + const array = arrays[i]; + if (array.shape.length !== shapes[i].length) { + throw new ValueError(`Error when checking ${exceptionPrefix}: expected ${names[i]} ` + + `to have ${shapes[i].length} dimension(s). but got array with ` + + `shape ${array.shape}`); + } + for (let j = 0; j < shapes[i].length; ++j) { + if (j === 0 && !checkBatchAxis) { + // Skip the first (batch) axis. + continue; + } + const dim = array.shape[j]; + const refDim = shapes[i][j]; + if (refDim != null && refDim >= 0 && dim !== refDim) { + throw new ValueError(`${exceptionPrefix} expected a batch of elements where each ` + + `example has shape [${shapes[i].slice(1, shapes[i].length)}] ` + + `(i.e.,tensor shape [*,${shapes[i].slice(1, shapes[i].length)}])` + + ` but the ${exceptionPrefix} received an input with ${array.shape[0]}` + + ` examples, each with shape [${array.shape.slice(1, array.shape.length)}]` + + ` (tensor shape [${array.shape}])`); + } + } + } + } + return arrays; + } + /** + * User input validation for Tensors. + * @param inputs `Array` of `tf.Tensor`s for inputs. + * @param targets `Array` of `tf.Tensor`s for targets. + * @param weights Optional `Array` of `tf.Tensor`s for sample weights. + * @throws ValueError: in case of incorrectly formatted data. + */ + function checkArrayLengths(inputs, targets, weights) { + const setX = unique$2(inputs.map(input => input.shape[0])); + setX.sort(); + const setY = unique$2(targets.map(target => target.shape[0])); + setY.sort(); + // TODO(cais): Check `weights` as well. + if (setX.length > 1) { + throw new ValueError(`All input Tensors (x) should have the same number of samples. ` + + `Got array shapes: ` + + `${JSON.stringify(inputs.map(input => input.shape))}`); + } + if (setY.length > 1) { + throw new ValueError(`All target Tensors (y) should have the same number of samples. ` + + `Got array shapes: ` + + `${JSON.stringify(targets.map(target => target.shape))}`); + } + if (setX.length > 0 && setY.length > 0 && !arraysEqual(setX, setY)) { + throw new ValueError(`Input Tensors should have the same number of samples as target ` + + `Tensors. Found ${setX[0]} input sample(s) and ${setY[0]} target ` + + `sample(s).`); + } + } + /** + * Validation on the compatibility of targes and loss functions. + * + * This helps prevent users from using loss functions incorrectly. + * + * @param targets `Array` of `tf.Tensor`s of targets. + * @param lossFns `Array` of loss functions. + * @param outputShapes `Array` of shapes of model outputs. + */ + function checkLossAndTargetCompatibility(targets, lossFns, outputShapes) { + // TODO(cais): Dedicated test coverage? + const keyLosses = [ + meanSquaredError$1, binaryCrossentropy$2, + categoricalCrossentropy$2 + ]; + for (let i = 0; i < targets.length; ++i) { + const y = targets[i]; + const loss = lossFns[i]; + const shape = outputShapes[i]; + if (loss == null) { + continue; + } + if (loss === categoricalCrossentropy$2) { + if (y.shape[y.shape.length - 1] === 1) { + throw new ValueError(`You are passing a target array of shape ${y.shape} while using ` + + `a loss 'categorical_crossentropy'. 'categorical_crossentropy'` + + `expects targets to be binary matrices (1s and 0s) of shape ` + + `[samples, classes].`); + // TODO(cais): Example code in error message. + } + } + if (keyLosses.indexOf(loss) !== -1) { + const slicedYShape = y.shape.slice(1); + const slicedShape = shape.slice(1); + for (let j = 0; j < slicedYShape.length; ++j) { + const targetDim = slicedYShape[j]; + const outDim = slicedShape[j]; + if (outDim != null && targetDim !== outDim) { + throw new ValueError(`A target Tensor with shape ${y.shape} was passed for an ` + + `output of shape ${shape}, while using a loss function that ` + + `expects targets to have the same shape as the output.`); + } + } + } + } + } + /** + * Check inputs provided by the user. + * + * Porting Note: This corresponds to _standardize_input_data() in Python + * Keras. Because of the strong typing in TF.js, we do not need to convert + * the data. Specifically: + * 1) in PyKeras, `data` can be `DataFrame` instances from pandas, for + * example. We don't need to worry about that here because there is no + * widely popular javascript/typesdcript equivalent of pandas (so far). + * If one becomes available in the future, we can add support. + * 2) in PyKeras, inputs can be Python dict. But here we are stipulating + * that the data is either a single `tf.Tensor` or an Array of `tf.Tensor`s. We + * may add support for `Object` data inputs in the future when the need + * arises. + * + * Instead, we perform basic checks for number of parameters and shapes. + * + * @param data: The input data. + * @param names: Name for the inputs, from the model. + * @param shapes: Expected shapes for the input data, from the model. + * @param checkBatchAxis: Whether the size along the batch axis (i.e., the + * first dimension) will be checked for matching. + * @param exceptionPrefix: Execption prefix message, used in generating error + * messages. + * @throws ValueError: on incorrect number of inputs or mismatches in shapes. + */ + function checkInputData(data, names, shapes, checkBatchAxis = true, exceptionPrefix = '') { + let arrays; + if (Array.isArray(data)) { + if (data.length !== names.length) { + throw new ValueError(`Error when checking model ${exceptionPrefix}: the Array of ` + + `Tensors that you are passing to your model is not the size the ` + + `the model expected. Expected to see ${names.length} Tensor(s),` + + ` but instead got ${data.length} Tensors(s).`); + } + arrays = data; + } + else { + if (names.length > 1) { + throw new ValueError(`The model expects ${names.length} ${exceptionPrefix} Tensors, ` + + `but only received one Tensor. Found: array with shape ` + + `${JSON.stringify(data.shape)}.`); + } + arrays = [data]; + } + if (shapes != null) { + for (let i = 0; i < names.length; ++i) { + if (shapes[i] == null) { + continue; + } + const array = arrays[i]; + if (array.shape.length !== shapes[i].length) { + throw new ValueError(`Error when checking ${exceptionPrefix}: expected ${names[i]} ` + + `to have ${shapes[i].length} dimension(s), but got array with ` + + `shape ${JSON.stringify(array.shape)}`); + } + for (let j = 0; j < shapes[i].length; ++j) { + if (j === 0 && !checkBatchAxis) { + continue; + } + const dim = array.shape[j]; + const refDim = shapes[i][j]; + if (refDim != null) { + if (refDim !== dim) { + throw new ValueError(`Error when checking ${exceptionPrefix}: expected ` + + `${names[i]} to have shape ${JSON.stringify(shapes[i])} but ` + + `got array with shape ${JSON.stringify(array.shape)}.`); + } + } + } + } + } + } + /** + * Maps metric functions to model outputs. + * @param metrics An shortcut strings name, metric function, `Array` or dict + * (`Object`) of metric functions. + * @param outputNames An `Array` of the names of model outputs. + * @returns An `Array` (one entry per model output) of `Array` of metric + * functions. For instance, if the model has 2 outputs, and for the first + * output we want to compute `binaryAccuracy` and `binaryCrossentropy`, + * and just `binaryAccuracy` for the second output, the `Array` would look + * like: + * `[[binaryAccuracy, binaryCrossentropy], [binaryAccuracy]]` + * @throws TypeError: incompatible metrics format. + */ + function collectMetrics(metrics, outputNames) { + if (metrics == null || Array.isArray(metrics) && metrics.length === 0) { + return outputNames.map(name => []); + } + let wrappedMetrics; + if (typeof metrics === 'string' || typeof metrics === 'function') { + wrappedMetrics = [metrics]; + } + else if (Array.isArray(metrics) || typeof metrics === 'object') { + wrappedMetrics = metrics; + } + else { + throw new TypeError('Type of metrics argument not understood. Expected an string,' + + `function, Array, or Object, found: ${metrics}`); + } + if (Array.isArray(wrappedMetrics)) { + // We then apply all metrics to all outputs. + return outputNames.map(name => wrappedMetrics); + } + else { + // In this case, metrics is a dict. + const nestedMetrics = []; + for (const name of outputNames) { + let outputMetrics = wrappedMetrics.hasOwnProperty(name) ? wrappedMetrics[name] : []; + if (!Array.isArray(outputMetrics)) { + outputMetrics = [outputMetrics]; + } + nestedMetrics.push(outputMetrics); + } + return nestedMetrics; + } + } + const LAYERS_MODEL_FORMAT_NAME = 'layers-model'; + /** + * A `tf.LayersModel` is a directed, acyclic graph of `tf.Layer`s plus methods + * for training, evaluation, prediction and saving. + * + * `tf.LayersModel` is the basic unit of training, inference and evaluation in + * TensorFlow.js. To create a `tf.LayersModel`, use `tf.LayersModel`. + * + * See also: + * `tf.Sequential`, `tf.loadLayersModel`. + * + * @doc {heading: 'Models', subheading: 'Classes'} + */ + class LayersModel extends Container { + constructor(args) { + super(args); + this.isTraining = false; + } + /** + * Print a text summary of the model's layers. + * + * The summary includes + * - Name and type of all layers that comprise the model. + * - Output shape(s) of the layers + * - Number of weight parameters of each layer + * - If the model has non-sequential-like topology, the inputs each layer + * receives + * - The total number of trainable and non-trainable parameters of the model. + * + * ```js + * const input1 = tf.input({shape: [10]}); + * const input2 = tf.input({shape: [20]}); + * const dense1 = tf.layers.dense({units: 4}).apply(input1); + * const dense2 = tf.layers.dense({units: 8}).apply(input2); + * const concat = tf.layers.concatenate().apply([dense1, dense2]); + * const output = + * tf.layers.dense({units: 3, activation: 'softmax'}).apply(concat); + * + * const model = tf.model({inputs: [input1, input2], outputs: output}); + * model.summary(); + * ``` + * + * @param lineLength Custom line length, in number of characters. + * @param positions Custom widths of each of the columns, as either + * fractions of `lineLength` (e.g., `[0.5, 0.75, 1]`) or absolute number + * of characters (e.g., `[30, 50, 65]`). Each number corresponds to + * right-most (i.e., ending) position of a column. + * @param printFn Custom print function. Can be used to replace the default + * `console.log`. For example, you can use `x => {}` to mute the printed + * messages in the console. + * + * @doc {heading: 'Models', subheading: 'Classes'} + */ + summary(lineLength, positions, printFn = console.log) { + if (!this.built) { + throw new ValueError(`This model has never been called, thus its weights have not been ` + + `created yet. So no summary can be displayed. Build the model ` + + `first (e.g., by calling it on some test data).`); + } + printSummary(this, lineLength, positions, printFn); + } + /** + * Configures and prepares the model for training and evaluation. Compiling + * outfits the model with an optimizer, loss, and/or metrics. Calling `fit` + * or `evaluate` on an un-compiled model will throw an error. + * + * @param args a `ModelCompileArgs` specifying the loss, optimizer, and + * metrics to be used for fitting and evaluating this model. + * + * @doc {heading: 'Models', subheading: 'Classes'} + */ + compile(args) { + if (args.loss == null) { + args.loss = []; + } + this.loss = args.loss; + if (typeof args.optimizer === 'string') { + this.optimizer_ = getOptimizer(args.optimizer); + this.isOptimizerOwned = true; + } + else { + if (!(args.optimizer instanceof Optimizer)) { + throw new ValueError(`User-defined optimizer must be an instance of tf.Optimizer.`); + } + this.optimizer_ = args.optimizer; + this.isOptimizerOwned = false; + } + // TODO(cais): Add lossWeights. + // TODO(cais): Add sampleWeightMode. + // Prepare loss functions. + let lossFunctions = []; + if (!Array.isArray(args.loss) && typeof args.loss !== 'string' && + typeof args.loss !== 'function') { + args.loss = args.loss; + for (const name in args.loss) { + if (this.outputNames.indexOf(name) === -1) { + throw new ValueError(`Unknown entry in loss dictionary: "${name}". ` + + `Only expected the following keys: ${this.outputNames}`); + } + } + for (const name of this.outputNames) { + if (args.loss[name] == null) { + console.warn(`Output "${name}" is missing from loss dictionary. We assume ` + + `this was done on purpose, and we will not be expecting data ` + + `to be passed to ${name} during training`); + } + lossFunctions.push(get$1(args.loss[name])); + } + } + else if (Array.isArray(args.loss)) { + if (args.loss.length !== this.outputs.length) { + throw new ValueError(`When passing an Array as loss, it should have one entry per ` + + `model output. The model has ${this.outputs.length} output(s), ` + + `but you passed loss=${args.loss}.`); + } + const theLosses = args.loss; + lossFunctions = theLosses.map(l => get$1(l)); + } + else { + const lossFunction = get$1(args.loss); + this.outputs.forEach(_ => { + lossFunctions.push(lossFunction); + }); + } + this.lossFunctions = lossFunctions; + this.feedOutputNames = []; + this.feedOutputShapes = []; + this.feedLossFns = []; + for (let i = 0; i < this.outputs.length; ++i) { + // TODO(cais): Logic for skipping target(s). + const shape = this.internalOutputShapes[i]; + const name = this.outputNames[i]; + this.feedOutputNames.push(name); + this.feedOutputShapes.push(shape); + this.feedLossFns.push(this.lossFunctions[i]); + } + // TODO(cais): Add logic for output masks. + // TODO(cais): Add logic for sample weights. + const skipTargetIndices = []; + // Prepare metrics. + this.metrics = args.metrics; + // TODO(cais): Add weightedMetrics. + this.metricsNames = ['loss']; + this.metricsTensors = []; + // Compute total loss. + // Porting Note: In PyKeras, metrics_tensors are symbolic tensor objects. + // Here, metricsTensors are TypeScript functions. This difference is due + // to the difference in symbolic/imperative property of the backends. + nameScope('loss', () => { + for (let i = 0; i < this.outputs.length; ++i) { + if (skipTargetIndices.indexOf(i) !== -1) { + continue; + } + // TODO(cais): Add weightedLoss, sampleWeight and mask. + // The following line should be weightedLoss + const weightedLoss = this.lossFunctions[i]; + if (this.outputs.length > 1) { + this.metricsTensors.push([weightedLoss, i]); + this.metricsNames.push(this.outputNames[i] + '_loss'); + } + } + // Porting Note: Due to the imperative nature of the backend, we calculate + // the regularizer penalties in the totalLossFunction, instead of here. + }); + const nestedMetrics = collectMetrics(args.metrics, this.outputNames); + // TODO(cais): Add nestedWeightedMetrics. + /** + * Helper function used in loop below. + */ + const appendMetric = (outputIndex, metricName, metricTensor) => { + if (this.outputNames.length > 1) { + metricName = this.outputNames[outputIndex] + '_' + metricName; + } + this.metricsNames.push(metricName); + this.metricsTensors.push([metricTensor, outputIndex]); + }; + nameScope('metric', () => { + for (let i = 0; i < this.outputs.length; ++i) { + if (skipTargetIndices.indexOf(i) !== -1) { + continue; + } + const outputMetrics = nestedMetrics[i]; + // TODO(cais): Add weights and outputWeightedMetrics. + // TODO(cais): Add optional arg `weights` to the following function. + const handleMetrics = (metrics) => { + const metricNamePrefix = ''; + let metricName; + let accFn; + let weightedMetricFn; + // TODO(cais): Use 'weights_' for weighted metrics. + for (const metric of metrics) { + if (typeof metric === 'string' && + ['accuracy', 'acc', 'crossentropy', 'ce'].indexOf(metric) !== + -1) { + const outputShape = this.internalOutputShapes[i]; + if (outputShape[outputShape.length - 1] === 1 || + this.lossFunctions[i] === binaryCrossentropy$2) { + // case: binary accuracy/crossentropy. + if (['accuracy', 'acc'].indexOf(metric) !== -1) { + accFn = binaryAccuracy$1; + } + else if (['crossentropy', 'ce'].indexOf(metric) !== -1) { + accFn = binaryCrossentropy$1; + } + } + else if (this.lossFunctions[i] === + sparseCategoricalCrossentropy$1) { + // case: categorical accuracy / crossentropy with sparse + // targets. + if (['accuracy', 'acc'].indexOf(metric) !== -1) { + accFn = sparseCategoricalAccuracy$1; + } + else if (['crossentropy', 'ce'].indexOf(metric) !== -1) { + accFn = sparseCategoricalCrossentropy; + } + } + else { + // case: categorical accuracy / crossentropy. + if (['accuracy', 'acc'].indexOf(metric) !== -1) { + accFn = categoricalAccuracy$1; + } + else if (['crossentropy', 'ce'].indexOf(metric) !== -1) { + accFn = categoricalCrossentropy$1; + } + } + let suffix; + if (['accuracy', 'acc'].indexOf(metric) !== -1) { + suffix = 'acc'; + } + else if (['crossentropy', 'ce'].indexOf(metric) !== -1) { + suffix = 'ce'; + } + // TODO(cais): Add weighting actually. + weightedMetricFn = accFn; + metricName = metricNamePrefix + suffix; + } + else { + const metricFn = get(metric); + // TODO(cais): Add weighting actually. + weightedMetricFn = metricFn; + metricName = + metricNamePrefix + getLossOrMetricName(metric); + } + // TODO(cais): Add weighting and masking to metricResult. + let metricResult; + nameScope(metricName, () => { + metricResult = weightedMetricFn; + }); + appendMetric(i, metricName, metricResult); + } + }; + handleMetrics(outputMetrics); + // TODO(cais): Call handleMetrics with weights. + } + }); + // Porting Notes: Given the imperative backend of tfjs-core, + // there is no need for constructing the symbolic graph and placeholders. + this.collectedTrainableWeights = this.trainableWeights; + } + /** + * Check trainable weights count consistency. + * + * This will raise a warning if `this.trainableWeights` and + * `this.collectedTrainableWeights` are inconsistent (i.e., have different + * numbers of parameters). + * Inconsistency will typically arise when one modifies `model.trainable` + * without calling `model.compile()` again. + */ + checkTrainableWeightsConsistency() { + if (this.collectedTrainableWeights == null) { + return; + } + if (this.trainableWeights.length !== + this.collectedTrainableWeights.length) { + console.warn('Discrepancy between trainableweights and collected trainable ' + + 'weights. Did you set `model.trainable` without calling ' + + '`model.compile()` afterwards?'); + } + } + /** + * Returns the loss value & metrics values for the model in test mode. + * + * Loss and metrics are specified during `compile()`, which needs to happen + * before calls to `evaluate()`. + * + * Computation is done in batches. + * + * ```js + * const model = tf.sequential({ + * layers: [tf.layers.dense({units: 1, inputShape: [10]})] + * }); + * model.compile({optimizer: 'sgd', loss: 'meanSquaredError'}); + * const result = model.evaluate( + * tf.ones([8, 10]), tf.ones([8, 1]), {batchSize: 4}); + * result.print(); + * ``` + * + * @param x `tf.Tensor` of test data, or an `Array` of `tf.Tensor`s if the + * model has multiple inputs. + * @param y `tf.Tensor` of target data, or an `Array` of `tf.Tensor`s if the + * model has multiple outputs. + * @param args A `ModelEvaluateArgs`, containing optional fields. + * + * @return `Scalar` test loss (if the model has a single output and no + * metrics) or `Array` of `Scalar`s (if the model has multiple outputs + * and/or metrics). The attribute `model.metricsNames` + * will give you the display labels for the scalar outputs. + * + * @doc {heading: 'Models', subheading: 'Classes'} + */ + evaluate(x, y, args = {}) { + const batchSize = args.batchSize == null ? 32 : args.batchSize; + checkBatchSize(batchSize); + // TODO(cais): Standardize `config.sampleWeights` as well. + // Validate user data. + const checkBatchAxis = true; + const standardizedOuts = this.standardizeUserDataXY(x, y, checkBatchAxis, batchSize); + try { + // TODO(cais): If uses `useLearningPhase`, set the corresponding element + // of the input to 0. + const ins = standardizedOuts[0].concat(standardizedOuts[1]); + this.makeTestFunction(); + const f = this.testFunction; + const testOuts = this.testLoop(f, ins, batchSize, args.verbose, args.steps); + return singletonOrArray(testOuts); + } + finally { + disposeNewTensors(standardizedOuts[0], x); + disposeNewTensors(standardizedOuts[1], y); + } + } + // TODO(cais): Add code snippet below once real dataset objects are + // available. + /** + * Evaluate model using a dataset object. + * + * Note: Unlike `evaluate()`, this method is asynchronous (`async`). + * + * @param dataset A dataset object. Its `iterator()` method is expected + * to generate a dataset iterator object, the `next()` method of which + * is expected to produce data batches for evaluation. The return value + * of the `next()` call ought to contain a boolean `done` field and a + * `value` field. The `value` field is expected to be an array of two + * `tf.Tensor`s or an array of two nested `tf.Tensor` structures. The former + * case is for models with exactly one input and one output (e.g. + * a sequential model). The latter case is for models with multiple + * inputs and/or multiple outputs. Of the two items in the array, the + * first is the input feature(s) and the second is the output target(s). + * @param args A configuration object for the dataset-based evaluation. + * @returns Loss and metric values as an Array of `Scalar` objects. + * + * @doc {heading: 'Models', subheading: 'Classes'} + */ + async evaluateDataset(dataset, args) { + this.makeTestFunction(); + return evaluateDataset(this, dataset, args); + } + /** + * Get number of samples provided for training, evaluation or prediction. + * + * @param ins Input `tf.Tensor`. + * @param batchSize Integer batch size, optional. + * @param steps Total number of steps (batches of samples) before + * declaring loop finished. Optional. + * @param stepsName The public API's parameter name for `steps`. + * @returns Number of samples provided. + */ + checkNumSamples(ins, batchSize, steps, stepsName = 'steps') { + let numSamples; + if (steps != null) { + numSamples = null; + if (batchSize != null) { + throw new ValueError(`If ${stepsName} is set, batchSize must be null or undefined.` + + `Got batchSize = ${batchSize}`); + } + } + else if (ins != null) { + if (Array.isArray(ins)) { + numSamples = ins[0].shape[0]; + } + else { + numSamples = ins.shape[0]; + } + } + else { + throw new ValueError(`Either the input data should have a defined shape, or ` + + `${stepsName} shoud be specified.`); + } + return numSamples; + } + /** + * Execute internal tensors of the model with input data feed. + * @param inputs Input data feed. Must match the inputs of the model. + * @param outputs Names of the output tensors to be fetched. Must match + * names of the SymbolicTensors that belong to the graph. + * @returns Fetched values for `outputs`. + */ + execute(inputs, outputs) { + if (Array.isArray(outputs) && outputs.length === 0) { + throw new ValueError('`outputs` is an empty Array, which is not allowed.'); + } + const outputsIsArray = Array.isArray(outputs); + const outputNames = (outputsIsArray ? outputs : [outputs]); + const outputSymbolicTensors = this.retrieveSymbolicTensors(outputNames); + // Format the input into a FeedDict. + const feedDict = new FeedDict(); + if (inputs instanceof Tensor) { + inputs = [inputs]; + } + if (Array.isArray(inputs)) { + if (inputs.length !== this.inputs.length) { + throw new ValueError(`The number of inputs provided (${inputs.length}) ` + + `does not match the number of inputs of this model ` + + `(${this.inputs.length}).`); + } + for (let i = 0; i < this.inputs.length; ++i) { + feedDict.add(this.inputs[i], inputs[i]); + } + } + else { + for (const input of this.inputs) { + const tensorValue = inputs[input.name]; + if (tensorValue == null) { + throw new ValueError(`No value is provided for the model's input ${input.name}`); + } + feedDict.add(input, tensorValue); + } + } + // Run execution. + const executeOutputs = execute(outputSymbolicTensors, feedDict); + return outputsIsArray ? executeOutputs : executeOutputs[0]; + } + /** + * Retrieve the model's internal symbolic tensors from symbolic-tensor names. + */ + retrieveSymbolicTensors(symbolicTensorNames) { + const outputSymbolicTensors = pyListRepeat(null, symbolicTensorNames.length); + let outputsRemaining = symbolicTensorNames.length; + for (const layer of this.layers) { + const layerOutputs = Array.isArray(layer.output) ? layer.output : [layer.output]; + const layerOutputNames = layerOutputs.map(output => output.name); + for (let i = 0; i < symbolicTensorNames.length; ++i) { + const index = layerOutputNames.indexOf(symbolicTensorNames[i]); + if (index !== -1) { + outputSymbolicTensors[i] = layerOutputs[index]; + outputsRemaining--; + } + if (outputsRemaining === 0) { + break; + } + } + if (outputsRemaining === 0) { + break; + } + } + if (outputsRemaining > 0) { + const remainingNames = []; + outputSymbolicTensors.forEach((tensor, i) => { + if (tensor == null) { + remainingNames.push(symbolicTensorNames[i]); + } + }); + throw new ValueError(`Cannot find SymbolicTensors for output name(s): ` + + `${JSON.stringify(remainingNames)}`); + } + return outputSymbolicTensors; + } + /** + * Helper method to loop over some data in batches. + * + * Porting Note: Not using the functional approach in the Python equivalent + * due to the imperative backend. + * Porting Note: Does not support step mode currently. + * + * @param ins: input data + * @param batchSize: integer batch size. + * @param verbose: verbosity model + * @returns: Predictions as `tf.Tensor` (if a single output) or an `Array` of + * `tf.Tensor` (if multipe outputs). + */ + predictLoop(ins, batchSize = 32, verbose = false) { + return tidy(() => { + const numSamples = this.checkNumSamples(ins); + if (verbose) { + throw new NotImplementedError('Verbose predictLoop() is not implemented yet.'); + } + // Sample-based predictions. + // Porting Note: Tensor currently does not support sliced assignments as + // in numpy, e.g., x[1:3] = y. Therefore we use concatenation while + // iterating over the batches. + const batches = makeBatches(numSamples, batchSize); + const outsBatches = this.outputs.map(output => []); + // TODO(cais): Can the scope() be pushed down inside the for loop? + for (let batchIndex = 0; batchIndex < batches.length; ++batchIndex) { + const batchOuts = tidy(() => { + const batchStart = batches[batchIndex][0]; + const batchEnd = batches[batchIndex][1]; + // TODO(cais): Take care of the case of the last element is a flag for + // training/test. + const insBatch = sliceArrays(ins, batchStart, batchEnd); + // Construct the feeds for execute(); + const feeds = []; + if (Array.isArray(insBatch)) { + for (let i = 0; i < insBatch.length; ++i) { + feeds.push({ key: this.inputs[i], value: insBatch[i] }); + } + } + else { + feeds.push({ key: this.inputs[0], value: insBatch }); + } + const feedDict = new FeedDict(feeds); + return execute(this.outputs, feedDict); + }); + batchOuts.forEach((batchOut, i) => outsBatches[i].push(batchOut)); + } + return singletonOrArray(outsBatches.map(batches => concat$2(batches, 0))); + }); + } + /** + * Generates output predictions for the input samples. + * + * Computation is done in batches. + * + * Note: the "step" mode of predict() is currently not supported. + * This is because the TensorFlow.js core backend is imperative only. + * + * ```js + * const model = tf.sequential({ + * layers: [tf.layers.dense({units: 1, inputShape: [10]})] + * }); + * model.predict(tf.ones([8, 10]), {batchSize: 4}).print(); + * ``` + * + * @param x The input data, as a Tensor, or an `Array` of `tf.Tensor`s if + * the model has multiple inputs. + * @param args A `ModelPredictArgs` object containing optional fields. + * + * @return Prediction results as a `tf.Tensor`(s). + * + * @exception ValueError In case of mismatch between the provided input data + * and the model's expectations, or in case a stateful model receives a + * number of samples that is not a multiple of the batch size. + * + * @doc {heading: 'Models', subheading: 'Classes'} + */ + predict(x, args = {}) { + const xsRank2OrHigher = ensureTensorsRank2OrHigher(x); + checkInputData(xsRank2OrHigher, this.inputNames, this.feedInputShapes, false); + try { + // TODO(cais): Take care of stateful models. + // if (this.stateful) ... + // TODO(cais): Take care of the learning_phase boolean flag. + // if (this.useLearningPhase) ... + const batchSize = args.batchSize == null ? 32 : args.batchSize; + checkBatchSize(batchSize); + return this.predictLoop(xsRank2OrHigher, batchSize); + } + finally { + disposeNewTensors(xsRank2OrHigher, x); + } + } + /** + * Returns predictions for a single batch of samples. + * + * ```js + * const model = tf.sequential({ + * layers: [tf.layers.dense({units: 1, inputShape: [10]})] + * }); + * model.predictOnBatch(tf.ones([8, 10])).print(); + * ``` + * @param x: Input samples, as a Tensor (for models with exactly one + * input) or an array of Tensors (for models with more than one input). + * @return Tensor(s) of predictions + * + * @doc {heading: 'Models', subheading: 'Classes'} + */ + predictOnBatch(x) { + checkInputData(x, this.inputNames, this.feedInputShapes, true); + // TODO(cais): Take care of the learning_phase boolean flag. + // if (this.useLearningPhase) ... + const batchSize = (Array.isArray(x) ? x[0] : x).shape[0]; + return this.predictLoop(x, batchSize); + } + standardizeUserDataXY(x, y, checkBatchAxis = true, batchSize) { + // TODO(cais): Add sampleWeight, classWeight + if (this.optimizer_ == null) { + throw new RuntimeError('You must compile a model before training/testing. Use ' + + 'LayersModel.compile(modelCompileArgs).'); + } + const outputShapes = []; + for (let i = 0; i < this.feedOutputShapes.length; ++i) { + const outputShape = this.feedOutputShapes[i]; + const lossFn = this.feedLossFns[i]; + if (lossFn === sparseCategoricalCrossentropy$1) { + outputShapes.push(outputShape.slice(0, outputShape.length - 1).concat([1])); + } + else { + // Porting Note: Because of strong typing `lossFn` must be a function. + outputShapes.push(outputShape); + } + } + x = standardizeInputData(x, this.feedInputNames, this.feedInputShapes, false, 'input'); + y = standardizeInputData(y, this.feedOutputNames, outputShapes, false, 'target'); + // TODO(cais): Standardize sampleWeights & classWeights. + checkArrayLengths(x, y, null); + // TODO(cais): Check sampleWeights as well. + checkLossAndTargetCompatibility(y, this.feedLossFns, this.feedOutputShapes); + if (this.stateful && batchSize != null && batchSize > 0) { + if (x[0].shape[0] % batchSize !== 0) { + throw new ValueError(`In a stateful network, you should only pass inputs with a ` + + `number of samples that is divisible by the batch size ` + + `${batchSize}. Found: ${x[0].shape[0]} sample(s).`); + } + } + return [x, y]; + } + async standardizeUserData(x, y, sampleWeight, classWeight, checkBatchAxis = true, batchSize) { + const [standardXs, standardYs] = this.standardizeUserDataXY(x, y, checkBatchAxis, batchSize); + // TODO(cais): Handle sampleWeights. + if (sampleWeight != null) { + throw new Error('sample weight is not supported yet.'); + } + let standardSampleWeights = null; + if (classWeight != null) { + const classWeights = standardizeClassWeights(classWeight, this.outputNames); + standardSampleWeights = []; + for (let i = 0; i < classWeights.length; ++i) { + standardSampleWeights.push(await standardizeWeights(standardYs[i], null, classWeights[i])); + } + } + // TODO(cais): Deal with the case of model.stateful == true. + return [standardXs, standardYs, standardSampleWeights]; + } + /** + * Loop over some test data in batches. + * @param f A Function returning a list of tensors. + * @param ins Array of tensors to be fed to `f`. + * @param batchSize Integer batch size or `null` / `undefined`. + * @param verbose verbosity mode. + * @param steps Total number of steps (batches of samples) before + * declaring test finished. Ignored with the default value of `null` / + * `undefined`. + * @returns Array of Scalars. + */ + testLoop(f, ins, batchSize, verbose = 0, steps) { + return tidy(() => { + const numSamples = this.checkNumSamples(ins, batchSize, steps, 'steps'); + const outs = []; + if (verbose > 0) { + throw new NotImplementedError('Verbose mode is not implemented yet.'); + } + // TODO(cais): Use `indicesForConversionToDense' to prevent slow down. + if (steps != null) { + throw new NotImplementedError('steps mode in testLoop() is not implemented yet'); + } + else { + const batches = makeBatches(numSamples, batchSize); + const indexArray = tensor1d(range$2(0, numSamples)); + for (let batchIndex = 0; batchIndex < batches.length; ++batchIndex) { + const batchStart = batches[batchIndex][0]; + const batchEnd = batches[batchIndex][1]; + const batchIds = sliceAlongFirstAxis(indexArray, batchStart, batchEnd - batchStart); + // TODO(cais): In ins, train flag can be a number, instead of an + // Tensor? Do we need to handle this in tfjs-layers? + const insBatch = sliceArraysByIndices(ins, batchIds); + const batchOuts = f(insBatch); + if (batchIndex === 0) { + for (let i = 0; i < batchOuts.length; ++i) { + outs.push(scalar(0)); + } + } + for (let i = 0; i < batchOuts.length; ++i) { + const batchOut = batchOuts[i]; + outs[i] = + add$3(outs[i], mul(batchEnd - batchStart, batchOut)); + } + } + for (let i = 0; i < outs.length; ++i) { + outs[i] = div$1(outs[i], numSamples); + } + } + return outs; + }); + } + getDedupedMetricsNames() { + const outLabels = this.metricsNames; + // Rename duplicated metrics names (can happen with an output layer + // shared among multiple dataflows). + const dedupedOutLabels = []; + for (let i = 0; i < outLabels.length; ++i) { + const label = outLabels[i]; + let newLabel = label; + if (count(outLabels, label) > 1) { + const dupIndex = count(outLabels.slice(0, i), label); + newLabel += `_${dupIndex}`; + } + dedupedOutLabels.push(newLabel); + } + return dedupedOutLabels; + } + /** + * Creates a function that performs the following actions: + * + * 1. computes the losses + * 2. sums them to get the total loss + * 3. call the optimizer computes the gradients of the LayersModel's + * trainable weights w.r.t. the total loss and update the variables + * 4. calculates the metrics + * 5. returns the values of the losses and metrics. + */ + makeTrainFunction() { + return (data) => { + const lossValues = []; + const inputs = data.slice(0, this.inputs.length); + const targets = data.slice(this.inputs.length, this.inputs.length + this.outputs.length); + const sampleWeights = data.slice(this.inputs.length + this.outputs.length, this.inputs.length + this.outputs.length * 2); + const metricsValues = []; + // Create a function that computes the total loss based on the + // inputs. This function is used for obtaining gradients through + // backprop. + const totalLossFunction = () => { + const feeds = []; + for (let i = 0; i < this.inputs.length; ++i) { + feeds.push({ key: this.inputs[i], value: inputs[i] }); + } + const feedDict = new FeedDict(feeds); + const outputs = execute(this.outputs, feedDict, { 'training': true }); + // TODO(cais): Take care of the case of multiple outputs from a + // single layer? + let totalLoss; + for (let i = 0; i < this.lossFunctions.length; ++i) { + const lossFunction = this.lossFunctions[i]; + let loss = lossFunction(targets[i], outputs[i]); + if (sampleWeights[i] != null) { + loss = computeWeightedLoss(loss, sampleWeights[i]); + } + // TODO(cais): push Scalar instead. + const meanLoss = mean$3(loss); + // TODO(cais): Use a scope() instead, to avoid ownership. + lossValues.push(meanLoss); + if (i === 0) { + totalLoss = loss; + } + else { + totalLoss = add$3(totalLoss, loss); + } + } + // Compute the metrics. + // TODO(cais): These should probably be calculated outside + // totalLossFunction to benefit speed? + for (let i = 0; i < this.metricsTensors.length; ++i) { + let weightedMetric; + if (this.outputs.length > 1 && i < this.outputs.length) { + weightedMetric = lossValues[i]; + } + else { + const metric = this.metricsTensors[i][0]; + const outputIndex = this.metricsTensors[i][1]; + weightedMetric = + mean$3(metric(targets[outputIndex], outputs[outputIndex])); + } + keep(weightedMetric); + // TODO(cais): Use a scope() instead, to avoid ownership. + metricsValues.push(weightedMetric); + } + totalLoss = mean$3(totalLoss); + // Add regularizer penalties. + this.calculateLosses().forEach(regularizerLoss => { + totalLoss = add$3(totalLoss, regularizerLoss); + }); + return totalLoss; + }; + const variables = this.collectedTrainableWeights.map(param => param.read()); + const returnCost = true; + const totalLossValue = this.optimizer_.minimize(totalLossFunction, returnCost, variables); + return [totalLossValue].concat(metricsValues); + }; + } + /** + * Create a function which, when invoked with an array of `tf.Tensor`s as a + * batch of inputs, returns the prespecified loss and metrics of the model + * under the batch of input data. + */ + makeTestFunction() { + this.testFunction = (data) => { + return tidy(() => { + const valOutputs = []; + let totalLoss; + const inputs = data.slice(0, this.inputs.length); + const targets = data.slice(this.inputs.length, this.inputs.length + this.outputs.length); + const feeds = []; + for (let i = 0; i < this.inputs.length; ++i) { + feeds.push({ key: this.inputs[i], value: inputs[i] }); + } + const feedDict = new FeedDict(feeds); + const outputs = execute(this.outputs, feedDict); + // Compute total loss. + for (let i = 0; i < this.lossFunctions.length; ++i) { + const lossFunction = this.lossFunctions[i]; + // TODO(cais): Add sample weighting and replace the simple + // averaging. + const loss = mean$3(lossFunction(targets[i], outputs[i])); + if (i === 0) { + totalLoss = loss; + } + else { + totalLoss = add$3(totalLoss, loss); + } + valOutputs.push(totalLoss); + } + // Compute the metrics. + for (let i = 0; i < this.metricsTensors.length; ++i) { + const metric = this.metricsTensors[i][0]; + const outputIndex = this.metricsTensors[i][1]; + // TODO(cais): Replace K.mean() with a proper weighting function. + const meanMetric = mean$3(metric(targets[outputIndex], outputs[outputIndex])); + valOutputs.push(meanMetric); + } + return valOutputs; + }); + }; + } + /** + * Trains the model for a fixed number of epochs (iterations on a + * dataset). + * + * ```js + * const model = tf.sequential({ + * layers: [tf.layers.dense({units: 1, inputShape: [10]})] + * }); + * model.compile({optimizer: 'sgd', loss: 'meanSquaredError'}); + * for (let i = 1; i < 5 ; ++i) { + * const h = await model.fit(tf.ones([8, 10]), tf.ones([8, 1]), { + * batchSize: 4, + * epochs: 3 + * }); + * console.log("Loss after Epoch " + i + " : " + h.history.loss[0]); + * } + * ``` + * + * @param x `tf.Tensor` of training data, or an array of `tf.Tensor`s if the + * model has multiple inputs. If all inputs in the model are named, you + * can also pass a dictionary mapping input names to `tf.Tensor`s. + * @param y `tf.Tensor` of target (label) data, or an array of `tf.Tensor`s if + * the model has multiple outputs. If all outputs in the model are named, + * you can also pass a dictionary mapping output names to `tf.Tensor`s. + * @param args A `ModelFitArgs`, containing optional fields. + * + * @return A `History` instance. Its `history` attribute contains all + * information collected during training. + * + * @exception ValueError In case of mismatch between the provided input + * data and what the model expects. + * + * @doc {heading: 'Models', subheading: 'Classes'} + */ + async fit(x, y, args = {}) { + if (this.isTraining) { + throw new Error('Cannot start training because another fit() call is ongoing.'); + } + this.isTraining = true; + let inputs; + let targets; + let originalInputs; + let originalTargets; + let inputValX; + let inputValY; + let valX; + let valY; + let sampleWeights; + try { + const batchSize = args.batchSize == null ? 32 : args.batchSize; + checkBatchSize(batchSize); + // Validate user data. + // TODO(cais): Support sampleWeight. + const checkBatchAxis = false; + const standardizedOuts = await this.standardizeUserData(x, y, args.sampleWeight, args.classWeight, checkBatchAxis, batchSize); + inputs = standardizedOuts[0]; + targets = standardizedOuts[1]; + sampleWeights = standardizedOuts[2]; + // Prepare validation data. + let doValidation = false; + let valIns; + if (args.validationData != null && args.validationData.length > 0) { + doValidation = true; + if (args.validationData.length === 2) { + // config.validationData consists of valX and valY. + inputValX = args.validationData[0]; + inputValY = args.validationData[1]; + } + else if (args.validationData.length === 3) { + throw new NotImplementedError('validationData including sample weights is not supported yet.'); + } + else { + throw new ValueError(`When passing validation data, it must contain 2 (valX, valY) ` + + `or 3 (valX, valY, valSampleWeight) items; ` + + `${args.validationData} is invalid.`); + } + const checkBatchAxis = true; + const valStandardized = await this.standardizeUserData(inputValX, inputValY, null, /** Unused sample weights. */ null, /** Unused class weights. */ checkBatchAxis, batchSize); + valX = valStandardized[0]; + valY = valStandardized[1]; + valIns = valX.concat(valY); + // TODO(cais): Add useLearningPhase data properly. + } + else if (args.validationSplit != null && args.validationSplit > 0 && + args.validationSplit < 1) { + doValidation = true; + // Porting Note: In tfjs-layers, inputs[0] is always a Tensor. + const splitAt = Math.floor(inputs[0].shape[0] * (1 - args.validationSplit)); + const originalBatchSize = inputs[0].shape[0]; + valX = sliceArrays(inputs, splitAt, originalBatchSize); + originalInputs = inputs; + inputs = sliceArrays(inputs, 0, splitAt); + valY = sliceArrays(targets, splitAt, originalBatchSize); + originalTargets = targets; + targets = sliceArrays(targets, 0, splitAt); + // TODO(cais): Once sampleWeights becomes available, slice it to get + // valSampleWeights. + valIns = valX.concat(valY); + // TODO(cais): Add useLearningPhase data properly. + } + else if (args.validationSteps != null) { + doValidation = true; + // TODO(cais): Add useLearningPhase. + } + const ins = inputs.concat(targets).concat(sampleWeights); + this.checkTrainableWeightsConsistency(); + // TODO(cais): Handle use_learning_phase and learning_phase? + // Porting Note: Here we see a key deviation of tfjs-layers from + // Keras. + // Due to the imperative nature of tfjs-layers' backend (tfjs-core), + // we do not construct symbolic computation graphs to embody the + // training process. Instead, we define a function that performs the + // training action. In PyKeras, the data (inputs and targets) are fed + // through graph placeholders. In tfjs-layers, the data are fed as + // function arguments. Since the function are defined below in the + // scope, we don't have equivalents of PyKeras's + // `_make_train_funciton`. + const trainFunction = this.makeTrainFunction(); + const outLabels = this.getDedupedMetricsNames(); + let valFunction; + let callbackMetrics; + if (doValidation) { + this.makeTestFunction(); + valFunction = this.testFunction; + callbackMetrics = + outLabels.slice().concat(outLabels.map(n => 'val_' + n)); + } + else { + valFunction = null; + valIns = []; + callbackMetrics = outLabels.slice(); + } + const callbacks = standardizeCallbacks(args.callbacks, args.yieldEvery); + const out = await this.fitLoop(trainFunction, ins, outLabels, batchSize, args.epochs, args.verbose, callbacks, valFunction, valIns, args.shuffle, callbackMetrics, args.initialEpoch, null, null); + return out; + } + finally { + this.isTraining = false; + // Memory clean up. + disposeNewTensors(inputs, x); + disposeNewTensors(targets, y); + disposeNewTensors(originalInputs, x); + disposeNewTensors(originalTargets, y); + disposeNewTensors(valX, inputValX); + disposeNewTensors(valY, inputValY); + if (sampleWeights != null) { + dispose(sampleWeights); + } + } + // TODO(cais): Add value to outLabels. + } + /** + * Abstract fit function for `f(ins)`. + * @param f A Function returning a list of tensors. For training, this + * function is expected to perform the updates to the variables. + * @param ins List of tensors to be fed to `f`. + * @param outLabels List of strings, display names of the outputs of `f`. + * @param batchSize Integer batch size or `== null` if unknown. Default : 32. + * @param epochs Number of times to iterate over the data. Default : 1. + * @param verbose Verbosity mode: 0, 1, or 2. Default: 1. + * @param callbacks List of callbacks to be called during training. + * @param valF Function to call for validation. + * @param valIns List of tensors to be fed to `valF`. + * @param shuffle Whether to shuffle the data at the beginning of every + * epoch. Default : true. + * @param callbackMetrics List of strings, the display names of the metrics + * passed to the callbacks. They should be the concatenation of the + * display names of the outputs of `f` and the list of display names + * of the outputs of `valF`. + * @param initialEpoch Epoch at which to start training (useful for + * resuming a previous training run). Default : 0. + * @param stepsPerEpoch Total number of steps (batches on samples) before + * declaring one epoch finished and starting the next epoch. Ignored with + * the default value of `undefined` or `null`. + * @param validationSteps Number of steps to run validation for (only if + * doing validation from data tensors). Not applicable for tfjs-layers. + * @returns A `History` object. + */ + async fitLoop(f, ins, outLabels, batchSize, epochs, verbose, callbacks, valF, valIns, shuffle$1, callbackMetrics, initialEpoch, stepsPerEpoch, validationSteps) { + if (batchSize == null) { + batchSize = 32; + } + if (epochs == null) { + epochs = 1; + } + if (shuffle$1 == null) { + shuffle$1 = true; + } + if (initialEpoch == null) { + initialEpoch = 0; + } + // TODO(cais): Change const to let below when implementing validation. + let doValidation = false; + if (valF != null && valIns != null) { + doValidation = true; + // TODO(cais): verbose message. + } + if (validationSteps != null) { + doValidation = true; + if (stepsPerEpoch == null) { + throw new ValueError('Can only use `validationSteps` when doing step-wise training, ' + + 'i.e., `stepsPerEpoch` must be set.'); + } + } + const numTrainSamples = this.checkNumSamples(ins, batchSize, stepsPerEpoch, 'steps_per_epoch'); + let indexArray; + if (numTrainSamples != null) { + indexArray = range$2(0, numTrainSamples); + } + if (verbose == null) { + verbose = 1; + } + const { callbackList, history } = configureCallbacks(callbacks, verbose, epochs, initialEpoch, numTrainSamples, stepsPerEpoch, batchSize, doValidation, callbackMetrics); + callbackList.setModel(this); + this.history = history; + await callbackList.onTrainBegin(); + this.stopTraining_ = false; + // TODO(cais): Take care of callbacks.validation_data as in PyKeras. + // TODO(cais): Pre-convert feeds for performance as in PyKeras. + for (let epoch = initialEpoch; epoch < epochs; ++epoch) { + await callbackList.onEpochBegin(epoch); + const epochLogs = {}; + if (stepsPerEpoch != null) { + throw new NotImplementedError('stepsPerEpoch mode is not implemented yet.'); + } + else { + if (shuffle$1 === 'batch') { + throw new NotImplementedError('batch shuffling is not implemneted' + + ' yet'); + } + else if (shuffle$1) { + shuffle(indexArray); + } + // Convert the potentially shuffled indices to Tensor1D, to avoid the + // cost of repeated creation of Array1Ds later on. + const epochIndexArray1D = tensor1d(indexArray); + const batches = makeBatches(numTrainSamples, batchSize); + for (let batchIndex = 0; batchIndex < batches.length; ++batchIndex) { + const batchLogs = {}; + await callbackList.onBatchBegin(batchIndex, batchLogs); + tidy(() => { + const batchStart = batches[batchIndex][0]; + const batchEnd = batches[batchIndex][1]; + const batchIds = sliceAlongFirstAxis(epochIndexArray1D, batchStart, batchEnd - batchStart); + batchLogs['batch'] = batchIndex; + batchLogs['size'] = batchEnd - batchStart; + // TODO(cais): In ins, train flag can be a number, instead of an + // Tensor? Do we need to handle this in tfjs-layers? + const insBatch = sliceArraysByIndices(ins, batchIds); + const outs = f(insBatch); + for (let i = 0; i < outLabels.length; ++i) { + const label = outLabels[i]; + const out = outs[i]; + batchLogs[label] = out; + keep(out); + // TODO(cais): Use scope() to avoid ownership. + } + if (batchIndex === batches.length - 1) { // Last batch. + if (doValidation) { + const valOuts = this.testLoop(valF, valIns, batchSize); + // Porting Notes: In tfjs-layers, valOuts is always an Array. + for (let i = 0; i < outLabels.length; ++i) { + const label = outLabels[i]; + const out = valOuts[i]; + keep(out); + // TODO(cais): Use scope() to avoid ownership. + epochLogs['val_' + label] = out; + } + } + } + }); + await callbackList.onBatchEnd(batchIndex, batchLogs); + disposeTensorsInLogs(batchLogs); + if (this.stopTraining_) { + break; + } + // TODO(cais): return outs as list of Tensor. + } + epochIndexArray1D.dispose(); + } + // TODO(cais): Run validation at the end of the epoch. + await callbackList.onEpochEnd(epoch, epochLogs); + if (this.stopTraining_) { + break; + } + } + await callbackList.onTrainEnd(); + await this.history.syncData(); + return this.history; + } + // TODO(cais): Add code snippet below when it's possible to instantiate + // actual dataset objects. + /** + * Trains the model using a dataset object. + * + * @param dataset A dataset object. Its `iterator()` method is expected + * to generate a dataset iterator object, the `next()` method of which + * is expected to produce data batches for training. The return value + * of the `next()` call ought to contain a boolean `done` field and a + * `value` field. The `value` field is expected to be an array of two + * `tf.Tensor`s or an array of two nested `tf.Tensor` structures. The former + * case is for models with exactly one input and one output (e.g. + * a sequential model). The latter case is for models with multiple + * inputs and/or multiple outputs. + * Of the two items in the array, the first is the input feature(s) and + * the second is the output target(s). + * @param args A `ModelFitDatasetArgs`, containing optional fields. + * + * @return A `History` instance. Its `history` attribute contains all + * information collected during training. + * + * @doc {heading: 'Models', subheading: 'Classes'} + */ + async fitDataset(dataset, args) { + return fitDataset(this, dataset, args); + } + /** + * Runs a single gradient update on a single batch of data. + * + * This method differs from `fit()` and `fitDataset()` in the following + * regards: + * - It operates on exactly one batch of data. + * - It returns only the loss and metric values, instead of + * returning the batch-by-batch loss and metric values. + * - It doesn't support fine-grained options such as verbosity and + * callbacks. + * + * @param x Input data. It could be one of the following: + * - A `tf.Tensor`, or an Array of `tf.Tensor`s (in case the model has + * multiple inputs). + * - An Object mapping input names to corresponding `tf.Tensor` (if the + * model has named inputs). + * @param y Target data. It could be either a `tf.Tensor` or multiple + * `tf.Tensor`s. It should be consistent with `x`. + * @returns Training loss or losses (in case the model has + * multiple outputs), along with metrics (if any), as numbers. + * + * @doc {heading: 'Models', subheading: 'Classes'} + */ + async trainOnBatch(x, y) { + // TODO(cais): Support sampleWeight and classWeight. + // TODO(cais): Support Dataset objects. + const standardizeOut = await this.standardizeUserData(x, y); + const inputs = standardizeOut[0]; + const targets = standardizeOut[1]; + const trainFunction = this.makeTrainFunction(); + const losses = trainFunction(inputs.concat(targets)); + const lossValues = []; + for (const loss of losses) { + const v = await loss.data(); + lossValues.push(v[0]); + } + dispose(losses); + disposeNewTensors(standardizeOut[0], x); + disposeNewTensors(standardizeOut[1], y); + return singletonOrArray(lossValues); + } + /** + * Extract weight values of the model. + * + * @param config: An instance of `io.SaveConfig`, which specifies + * model-saving options such as whether only trainable weights are to be + * saved. + * @returns A `NamedTensorMap` mapping original weight names (i.e., + * non-uniqueified weight names) to their values. + */ + getNamedWeights(config) { + const namedWeights = []; + const trainableOnly = config != null && config.trainableOnly; + const weights = trainableOnly ? this.trainableWeights : this.weights; + const weightValues = this.getWeights(trainableOnly); + for (let i = 0; i < weights.length; ++i) { + if (trainableOnly && !weights[i].trainable) { + // Optionally skip non-trainable weights. + continue; + } + namedWeights.push({ name: weights[i].originalName, tensor: weightValues[i] }); + } + return namedWeights; + } + /** + * Setter used for force stopping of LayersModel.fit() (i.e., training). + * + * Example: + * + * ```js + * const input = tf.input({shape: [10]}); + * const output = tf.layers.dense({units: 1}).apply(input); + * const model = tf.model({inputs: [input], outputs: [output]}); + * model.compile({loss: 'meanSquaredError', optimizer: 'sgd'}); + * const xs = tf.ones([8, 10]); + * const ys = tf.zeros([8, 1]); + * + * const history = await model.fit(xs, ys, { + * epochs: 10, + * callbacks: { + * onEpochEnd: async (epoch, logs) => { + * if (epoch === 2) { + * model.stopTraining = true; + * } + * } + * } + * }); + * + * // There should be only 3 values in the loss array, instead of 10 + * values, + * // due to the stopping after 3 epochs. + * console.log(history.history.loss); + * ``` + */ + set stopTraining(stop) { + this.stopTraining_ = stop; + } + get stopTraining() { + return this.stopTraining_; + } + get optimizer() { + return this.optimizer_; + } + set optimizer(optimizer) { + if (this.optimizer_ !== optimizer) { + this.optimizer_ = optimizer; + this.isOptimizerOwned = false; + } + } + dispose() { + const result = super.dispose(); + if (result.refCountAfterDispose === 0 && this.optimizer != null && + this.isOptimizerOwned) { + const numTensorsBeforeOptmizerDisposal = memory().numTensors; + this.optimizer_.dispose(); + result.numDisposedVariables += + numTensorsBeforeOptmizerDisposal - memory().numTensors; + } + return result; + } + getLossIdentifiers() { + let lossNames; + if (typeof this.loss === 'string') { + lossNames = toSnakeCase(this.loss); + } + else if (Array.isArray(this.loss)) { + for (const loss of this.loss) { + if (typeof loss !== 'string') { + throw new Error('Serialization of non-string loss is not supported.'); + } + } + lossNames = this.loss.map(name => toSnakeCase(name)); + } + else { + const outputNames = Object.keys(this.loss); + lossNames = {}; + const losses = this.loss; + for (const outputName of outputNames) { + if (typeof losses[outputName] === 'string') { + lossNames[outputName] = + toSnakeCase(losses[outputName]); + } + else { + throw new Error('Serialization of non-string loss is not supported.'); + } + } + } + return lossNames; + } + getMetricIdentifiers() { + if (typeof this.metrics === 'string' || + typeof this.metrics === 'function') { + return [toSnakeCase(getLossOrMetricName(this.metrics))]; + } + else if (Array.isArray(this.metrics)) { + return this.metrics.map(metric => toSnakeCase(getLossOrMetricName(metric))); + } + else { + const metricsIdentifiers = {}; + for (const key in this.metrics) { + metricsIdentifiers[key] = + toSnakeCase(getLossOrMetricName(this.metrics[key])); + } + return metricsIdentifiers; + } + } + getTrainingConfig() { + return { + loss: this.getLossIdentifiers(), + metrics: this.getMetricIdentifiers(), + optimizer_config: { + class_name: this.optimizer.getClassName(), + config: this.optimizer.getConfig() + } + }; + // TODO(cais): Add weight_metrics when they are supported. + // TODO(cais): Add sample_weight_mode when it's supported. + // TODO(cais): Add loss_weights when it's supported. + } + loadTrainingConfig(trainingConfig) { + if (trainingConfig.weighted_metrics != null) { + throw new Error('Loading weight_metrics is not supported yet.'); + } + if (trainingConfig.loss_weights != null) { + throw new Error('Loading loss_weights is not supported yet.'); + } + if (trainingConfig.sample_weight_mode != null) { + throw new Error('Loading sample_weight_mode is not supported yet.'); + } + const tsConfig = convertPythonicToTs(trainingConfig.optimizer_config); + const optimizer = deserialize(tsConfig); + let loss; + if (typeof trainingConfig.loss === 'string') { + loss = toCamelCase(trainingConfig.loss); + } + else if (Array.isArray(trainingConfig.loss)) { + loss = trainingConfig.loss.map(lossEntry => toCamelCase(lossEntry)); + } + else if (trainingConfig.loss != null) { + loss = {}; + for (const key in trainingConfig.loss) { + loss[key] = toCamelCase(trainingConfig.loss[key]); + } + } + let metrics; + if (Array.isArray(trainingConfig.metrics)) { + metrics = trainingConfig.metrics.map(metric => toCamelCase(metric)); + } + else if (trainingConfig.metrics != null) { + metrics = {}; + for (const key in trainingConfig.metrics) { + metrics[key] = toCamelCase(trainingConfig.metrics[key]); + } + } + this.compile({ loss, metrics, optimizer }); + } + /** + * Save the configuration and/or weights of the LayersModel. + * + * An `IOHandler` is an object that has a `save` method of the proper + * signature defined. The `save` method manages the storing or + * transmission of serialized data ("artifacts") that represent the + * model's topology and weights onto or via a specific medium, such as + * file downloads, local storage, IndexedDB in the web browser and HTTP + * requests to a server. TensorFlow.js provides `IOHandler` + * implementations for a number of frequently used saving mediums, such as + * `tf.io.browserDownloads` and `tf.io.browserLocalStorage`. See `tf.io` + * for more details. + * + * This method also allows you to refer to certain types of `IOHandler`s + * as URL-like string shortcuts, such as 'localstorage://' and + * 'indexeddb://'. + * + * Example 1: Save `model`'s topology and weights to browser [local + * storage](https://developer.mozilla.org/en-US/docs/Web/API/Window/localStorage); + * then load it back. + * + * ```js + * const model = tf.sequential( + * {layers: [tf.layers.dense({units: 1, inputShape: [3]})]}); + * console.log('Prediction from original model:'); + * model.predict(tf.ones([1, 3])).print(); + * + * const saveResults = await model.save('localstorage://my-model-1'); + * + * const loadedModel = await tf.loadLayersModel('localstorage://my-model-1'); + * console.log('Prediction from loaded model:'); + * loadedModel.predict(tf.ones([1, 3])).print(); + * ``` + * + * Example 2. Saving `model`'s topology and weights to browser + * [IndexedDB](https://developer.mozilla.org/en-US/docs/Web/API/IndexedDB_API); + * then load it back. + * + * ```js + * const model = tf.sequential( + * {layers: [tf.layers.dense({units: 1, inputShape: [3]})]}); + * console.log('Prediction from original model:'); + * model.predict(tf.ones([1, 3])).print(); + * + * const saveResults = await model.save('indexeddb://my-model-1'); + * + * const loadedModel = await tf.loadLayersModel('indexeddb://my-model-1'); + * console.log('Prediction from loaded model:'); + * loadedModel.predict(tf.ones([1, 3])).print(); + * ``` + * + * Example 3. Saving `model`'s topology and weights as two files + * (`my-model-1.json` and `my-model-1.weights.bin`) downloaded from + * browser. + * + * ```js + * const model = tf.sequential( + * {layers: [tf.layers.dense({units: 1, inputShape: [3]})]}); + * const saveResults = await model.save('downloads://my-model-1'); + * ``` + * + * Example 4. Send `model`'s topology and weights to an HTTP server. + * See the documentation of `tf.io.http` for more details + * including specifying request parameters and implementation of the + * server. + * + * ```js + * const model = tf.sequential( + * {layers: [tf.layers.dense({units: 1, inputShape: [3]})]}); + * const saveResults = await model.save('http://my-server/model/upload'); + * ``` + * + * @param handlerOrURL An instance of `IOHandler` or a URL-like, + * scheme-based string shortcut for `IOHandler`. + * @param config Options for saving the model. + * @returns A `Promise` of `SaveResult`, which summarizes the result of + * the saving, such as byte sizes of the saved artifacts for the model's + * topology and weight values. + * + * @doc {heading: 'Models', subheading: 'Classes', ignoreCI: true} + */ + async save(handlerOrURL, config) { + if (typeof handlerOrURL === 'string') { + const handlers = getSaveHandlers(handlerOrURL); + if (handlers.length === 0) { + throw new ValueError(`Cannot find any save handlers for URL '${handlerOrURL}'`); + } + else if (handlers.length > 1) { + throw new ValueError(`Found more than one (${handlers.length}) save handlers for ` + + `URL '${handlerOrURL}'`); + } + handlerOrURL = handlers[0]; + } + if (handlerOrURL.save == null) { + throw new ValueError('LayersModel.save() cannot proceed because the IOHandler ' + + 'provided does not have the `save` attribute defined.'); + } + const weightDataAndSpecs = await encodeWeights(this.getNamedWeights(config)); + const returnString = false; + const unusedArg = null; + const modelConfig = this.toJSON(unusedArg, returnString); + const modelArtifacts = { + modelTopology: modelConfig, + format: LAYERS_MODEL_FORMAT_NAME, + generatedBy: `TensorFlow.js tfjs-layers v${version$6}`, + convertedBy: null, + }; + const includeOptimizer = config == null ? false : config.includeOptimizer; + if (includeOptimizer && this.optimizer != null) { + modelArtifacts.trainingConfig = this.getTrainingConfig(); + const weightType = 'optimizer'; + const { data: optimizerWeightData, specs: optimizerWeightSpecs } = await encodeWeights(await this.optimizer.getWeights(), weightType); + weightDataAndSpecs.specs.push(...optimizerWeightSpecs); + weightDataAndSpecs.data = concatenateArrayBuffers([weightDataAndSpecs.data, optimizerWeightData]); + } + if (this.userDefinedMetadata != null) { + // Check serialized size of user-defined metadata. + const checkSize = true; + checkUserDefinedMetadata(this.userDefinedMetadata, this.name, checkSize); + modelArtifacts.userDefinedMetadata = this.userDefinedMetadata; + } + modelArtifacts.weightData = weightDataAndSpecs.data; + modelArtifacts.weightSpecs = weightDataAndSpecs.specs; + return handlerOrURL.save(modelArtifacts); + } + /** + * Set user-defined metadata. + * + * The set metadata will be serialized together with the topology + * and weights of the model during `save()` calls. + * + * @param setUserDefinedMetadata + */ + setUserDefinedMetadata(userDefinedMetadata) { + checkUserDefinedMetadata(userDefinedMetadata, this.name); + this.userDefinedMetadata = userDefinedMetadata; + } + /** + * Get user-defined metadata. + * + * The metadata is supplied via one of the two routes: + * 1. By calling `setUserDefinedMetadata()`. + * 2. Loaded during model loading (if the model is constructed + * via `tf.loadLayersModel()`.) + * + * If no user-defined metadata is available from either of the + * two routes, this function will return `undefined`. + */ + getUserDefinedMetadata() { + return this.userDefinedMetadata; + } + } + // The class name is 'Model' rather than 'LayersModel' for backwards + // compatibility since this class name shows up in the serialization format. + /** @nocollapse */ + LayersModel.className = 'Model'; + registerClass(LayersModel); + /** + * A `tf.Functional` is an alias to `tf.LayersModel`. + * + * See also: + * `tf.LayersModel`, `tf.Sequential`, `tf.loadLayersModel`. + */ + /** @doc {heading: 'Models', subheading: 'Classes'} */ + class Functional extends LayersModel { + } + Functional.className = 'Functional'; + registerClass(Functional); + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + /** + * Parses a JSON model configuration file and returns a model instance. + * + * ```js + * // This example shows how to serialize a model using `toJSON()` and + * // deserialize it as another model using `tf.models.modelFromJSON()`. + * // Note: this example serializes and deserializes only the topology + * // of the model; the weights of the loaded model will be different + * // from those of the the original model, due to random weight + * // initialization. + * // To load the topology and weights of a model, use `tf.loadLayersModel()`. + * const model1 = tf.sequential(); + * model1.add(tf.layers.repeatVector({inputShape: [2], n: 4})); + * // Serialize `model1` as a JSON object. + * const model1JSON = model1.toJSON(null, false); + * model1.summary(); + * + * const model2 = await tf.models.modelFromJSON(model1JSON); + * model2.summary(); + * ``` + * + * @param modelAndWeightsConfig JSON object or string encoding a model and + * weights configuration. It can also be only the topology JSON of the + * model, in which case the weights will not be loaded. + * @param custom_objects Optional dictionary mapping names + * (strings) to custom classes or functions to be + * considered during deserialization. + * @returns A TensorFlow.js Layers `tf.LayersModel` instance (uncompiled). + */ + async function modelFromJSON(modelAndWeightsConfig, customObjects) { + if (!('modelTopology' in modelAndWeightsConfig)) { + modelAndWeightsConfig = { modelTopology: modelAndWeightsConfig }; + } + modelAndWeightsConfig = modelAndWeightsConfig; + let modelTopology = modelAndWeightsConfig.modelTopology; + if (modelTopology['model_config'] != null) { + // If the model-topology JSON contains a 'model_config' field, then it is + // a full model JSON (e.g., from `keras.Model.save()`), which contains + // not only the model's architecture in its 'model_config' field, but + // additional information such as the model's optimizer. We use only the + // 'model_config' field currently. + modelTopology = modelTopology['model_config']; + } + const tsConfig = convertPythonicToTs(modelTopology); + const model = deserialize(tsConfig, customObjects); + if (modelAndWeightsConfig.weightsManifest != null) { + // Load the weight values keyed by the original tensor names in the model + // file that was loaded. These should match the keys of the weight + // manifest. + const weightValues = await loadWeights(modelAndWeightsConfig.weightsManifest, modelAndWeightsConfig.pathPrefix, model.weights.map(weight => weight.originalName)); + // Map the weights to the unique tensor names generated during model loading + const uniqueWeightValues = {}; + for (const weight of model.weights) { + uniqueWeightValues[weight.originalName] = + weightValues[weight.originalName]; + } + model.loadWeights(uniqueWeightValues); + // Dispose temporary weight values. + dispose(weightValues); + } + return model; + } + /** + * Load a model composed of Layer objects, including its topology and optionally + * weights. See the Tutorial named "How to import a Keras Model" for usage + * examples. + * + * This method is applicable to: + * + * 1. Models created with the `tf.layers.*`, `tf.sequential`, and + * `tf.model` APIs of TensorFlow.js and later saved with the + * `tf.LayersModel.save` method. + * 2. Models converted from Keras or TensorFlow tf.keras using the + * [tensorflowjs_converter](https://github.com/tensorflow/tfjs/tree/master/tfjs-converter). + * + * This mode is *not* applicable to TensorFlow `SavedModel`s or their converted + * forms. For those models, use `tf.loadGraphModel`. + * + * Example 1. Load a model from an HTTP server. + * + * ```js + * const model = await tf.loadLayersModel( + * 'https://storage.googleapis.com/tfjs-models/tfjs/iris_v1/model.json'); + * model.summary(); + * ``` + * + * Example 2: Save `model`'s topology and weights to browser [local + * storage](https://developer.mozilla.org/en-US/docs/Web/API/Window/localStorage); + * then load it back. + * + * ```js + * const model = tf.sequential( + * {layers: [tf.layers.dense({units: 1, inputShape: [3]})]}); + * console.log('Prediction from original model:'); + * model.predict(tf.ones([1, 3])).print(); + * + * const saveResults = await model.save('localstorage://my-model-1'); + * + * const loadedModel = await tf.loadLayersModel('localstorage://my-model-1'); + * console.log('Prediction from loaded model:'); + * loadedModel.predict(tf.ones([1, 3])).print(); + * ``` + * + * Example 3. Saving `model`'s topology and weights to browser + * [IndexedDB](https://developer.mozilla.org/en-US/docs/Web/API/IndexedDB_API); + * then load it back. + * + * ```js + * const model = tf.sequential( + * {layers: [tf.layers.dense({units: 1, inputShape: [3]})]}); + * console.log('Prediction from original model:'); + * model.predict(tf.ones([1, 3])).print(); + * + * const saveResults = await model.save('indexeddb://my-model-1'); + * + * const loadedModel = await tf.loadLayersModel('indexeddb://my-model-1'); + * console.log('Prediction from loaded model:'); + * loadedModel.predict(tf.ones([1, 3])).print(); + * ``` + * + * Example 4. Load a model from user-selected files from HTML + * [file input + * elements](https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input/file). + * + * ```js + * // Note: this code snippet will not work without the HTML elements in the + * // page + * const jsonUpload = document.getElementById('json-upload'); + * const weightsUpload = document.getElementById('weights-upload'); + * + * const model = await tf.loadLayersModel( + * tf.io.browserFiles([jsonUpload.files[0], weightsUpload.files[0]])); + * ``` + * + * @param pathOrIOHandler Can be either of the two formats + * 1. A string path to the `ModelAndWeightsConfig` JSON describing + * the model in the canonical TensorFlow.js format. For file:// + * (tfjs-node-only), http:// and https:// schemas, the path can be + * either absolute or relative. The content of the JSON file is assumed to + * be a JSON object with the following fields and values: + * - 'modelTopology': A JSON object that can be either of: + * 1. a model architecture JSON consistent with the format of the return + * value of `keras.Model.to_json()` + * 2. a full model JSON in the format of `keras.models.save_model()`. + * - 'weightsManifest': A TensorFlow.js weights manifest. + * See the Python converter function `save_model()` for more details. + * It is also assumed that model weights can be accessed from relative + * paths described by the `paths` fields in weights manifest. + * 2. A `tf.io.IOHandler` object that loads model artifacts with its `load` + * method. + * @param options Optional configuration arguments for the model loading, + * including: + * - `strict`: Require that the provided weights exactly match those required + * by the layers. Default true. Passing false means that both extra + * weights and missing weights will be silently ignored. + * - `onProgress`: A progress callback of the form: + * `(fraction: number) => void`. This callback can be used to monitor the + * model-loading process. + * @returns A `Promise` of `tf.LayersModel`, with the topology and weights + * loaded. + * + * @doc {heading: 'Models', subheading: 'Loading'} + */ + async function loadLayersModel(pathOrIOHandler, options) { + if (options == null) { + options = {}; + } + if (typeof pathOrIOHandler === 'string') { + const handlers = getLoadHandlers(pathOrIOHandler, options); + if (handlers.length === 0) { + // For backward compatibility: if no load handler can be found, + // assume it is a relative http path. + // TODO(cais): Reformat the args into a single `LoadOptions` once the core + // is refactored. + handlers.push(browserHTTPRequest(pathOrIOHandler, options)); + } + else if (handlers.length > 1) { + throw new ValueError(`Found more than one (${handlers.length}) load handlers for ` + + `URL '${pathOrIOHandler}'`); + } + pathOrIOHandler = handlers[0]; + } + return loadLayersModelFromIOHandler(pathOrIOHandler, undefined, options); + } + /** + * Load a model and optionally its weights, using an IOHandler object. + * + * @param handler The instance of `IOHandler` to be used during the model + * loading. + * @param customObjects Any optional custom objects to be used during model + * loading. + * @param strict Whether the weight loading will be done in strict mode. + * Default: `true`. + */ + async function loadLayersModelFromIOHandler(handler, customObjects, options) { + if (options == null) { + options = {}; + } + if (handler.load == null) { + throw new ValueError('Cannot proceed with model loading because the IOHandler provided ' + + 'does not have the `load` method implemented.'); + } + const artifacts = await handler.load(); + let modelTopology = artifacts.modelTopology; + if (modelTopology['model_config'] != null) { + modelTopology = modelTopology['model_config']; + } + const strict = options.strict == null ? true : options.strict; + // If weights are provided and the weight-loading mode is strict, use + // fast weight initialization. This skips costly initializers such as + // 'orthogonal' and saves unnecessary computation in cases where + // the initialized weight values will immediately be overwritten by + // loaded weight values. + const fastWeightInit = artifacts.weightData != null && artifacts.weightSpecs != null && strict; + const model = deserialize(convertPythonicToTs(modelTopology), customObjects, fastWeightInit); + const trainingConfig = artifacts.trainingConfig; + if (trainingConfig != null) { + model.loadTrainingConfig(trainingConfig); + } + if (artifacts.userDefinedMetadata != null) { + model.setUserDefinedMetadata(artifacts.userDefinedMetadata); + } + // If weightData is present, load the weights into the model. + if (artifacts.weightData != null) { + // Loading weights requires weightSpecs. + if (artifacts.weightSpecs == null) { + throw new ValueError('LayersModel artifacts contains weight data, but not weight specs. ' + + 'Therefore loading of weights cannot proceed.'); + } + const { modelWeights, optimizerWeights } = decodeModelAndOptimizerWeights(artifacts.weightData, artifacts.weightSpecs); + model.loadWeights(modelWeights, strict); + if (model.optimizer != null && optimizerWeights.length > 0) { + await model.optimizer.setWeights(optimizerWeights); + } + // Dispose temporary weight values. + dispose(modelWeights); + dispose(optimizerWeights.map(w => w.tensor)); + } + return model; + } + function decodeModelAndOptimizerWeights(weightData, specs) { + const name2Tensor = decodeWeights(weightData, specs); + const modelWeights = {}; + const optimizerWeights = []; + specs.forEach(spec => { + if (spec.group === 'optimizer') { + optimizerWeights.push({ name: spec.name, tensor: name2Tensor[spec.name] }); + } + else { + modelWeights[spec.name] = name2Tensor[spec.name]; + } + }); + return { modelWeights, optimizerWeights }; + } + /** + * A model with a stack of layers, feeding linearly from one to the next. + * + * `tf.sequential` is a factory function that creates an instance of + * `tf.Sequential`. + * + * ```js + * // Define a model for linear regression. + * const model = tf.sequential(); + * model.add(tf.layers.dense({units: 1, inputShape: [1]})); + * + * // Prepare the model for training: Specify the loss and the optimizer. + * model.compile({loss: 'meanSquaredError', optimizer: 'sgd'}); + * + * // Generate some synthetic data for training. + * const xs = tf.tensor2d([1, 2, 3, 4], [4, 1]); + * const ys = tf.tensor2d([1, 3, 5, 7], [4, 1]); + * + * // Train the model using the data then do inference on a data point the + * // model hasn't seen: + * await model.fit(xs, ys); + * model.predict(tf.tensor2d([5], [1, 1])).print(); + * ``` + * + * @doc {heading: 'Models', subheading: 'Classes'} + */ + class Sequential extends LayersModel { + constructor(args) { + super({ inputs: [], outputs: [] }); + args = args || {}; + this.trainable = true; + this.built = false; + // Set model name. + this.name = (args.name != null) ? args.name : getUid('sequential_'); + // Add to the model any layers passed to the constructor. + if (args.layers != null) { + for (const layer of args.layers) { + this.add(layer); + } + } + } + // Helper function to Sequential.add Throws if the new output shape will be + // invalid. + checkShape(layer) { + const shape = layer.inboundNodes[0].outputTensors[0].shape; + if (shape.some(x => x < 0)) { + throw new ValueError('Negative dimension size caused by adding layer ' + + `${layer.name} with input shape [` + + `${layer.inboundNodes[0].inputTensors[0].shape}]`); + } + } + /** + * Adds a layer instance on top of the layer stack. + * + * ```js + * const model = tf.sequential(); + * model.add(tf.layers.dense({units: 8, inputShape: [1]})); + * model.add(tf.layers.dense({units: 4, activation: 'relu6'})); + * model.add(tf.layers.dense({units: 1, activation: 'relu6'})); + * // Note that the untrained model is random at this point. + * model.predict(tf.randomNormal([10, 1])).print(); + * ``` + * @param layer Layer instance. + * + * @exception ValueError In case the `layer` argument does not know its + * input shape. + * @exception ValueError In case the `layer` argument has multiple output + * tensors, or is already connected somewhere else (forbidden in + * `Sequential` models). + * + * @doc {heading: 'Models', subheading: 'Classes'} + */ + add(layer) { + const isLayerModelInstance = layer instanceof Sequential || layer instanceof LayersModel; + let modelLayer; + if (isLayerModelInstance) { + modelLayer = layer; + if (modelLayer.outputs.length !== 1) { + throw new ValueError('All layers in a Sequential model ' + + 'should have a single output tensor. ' + + 'For multi-output layers, ' + + 'use the functional API.'); + } + if (modelLayer.inputs.length !== 1) { + throw new ValueError('All layers in a Sequential model ' + + 'should have a single input tensor. ' + + 'For multi-input layers, ' + + 'use the functional API.'); + } + } + if (this.outputs.length === 0) { + // first layer in model: check that it is an input layer + if (layer.inboundNodes.length === 0) { + // create an input layer + if (layer.batchInputShape == null) { + throw new ValueError('The first layer in a Sequential model must ' + + 'get an `inputShape` or `batchInputShape` argument.'); + } + // Instantiate the input layer. + const x = Input({ + batchShape: layer.batchInputShape, + dtype: layer.dtype, + name: layer.name + '_input' + }); + // This will build the current layer and create the node connecting + // the current layer to the input layer we just created. + layer.apply(x); + } + if (isLayerModelInstance) { + this.outputs = modelLayer.outputs; + this.inputs = modelLayer.inputs; + } + else { + if (layer.inboundNodes.length !== 1) { + throw new ValueError('A layer added to a Sequential model must not already be ' + + `connected somewhere else. LayersModel received layer ${layer.name} ` + + `which has ${layer.inboundNodes.length} pre-existing inbound ` + + 'connections.'); + } + if (layer.inboundNodes[0].outputTensors.length !== 1) { + throw new ValueError('All layers in a Sequential model ' + + 'should have a single output tensor. ' + + 'For multi-output layers, ' + + 'use the functional API.'); + } + this.checkShape(layer); + this.outputs = [layer.inboundNodes[0].outputTensors[0]]; + this.inputs = getSourceInputs(this.outputs[0]); + } + this.inboundNodes = []; + // We create an input node, which we will keep updated + // as we add more layers. + // (This call has side effects.) + // tslint:disable-next-line:no-unused-expression + new Node({ + outboundLayer: this, + inboundLayers: [], + nodeIndices: [], + tensorIndices: [], + inputTensors: this.inputs, + outputTensors: this.outputs, + // no model-level masking for now + inputMasks: pyListRepeat(null, this.inputs.length), + outputMasks: [null], + inputShapes: this.inputs.map(x => x.shape), + outputShapes: this.outputs[0].shape + }); + } + else { + const outputTensor = layer.apply(this.outputs[0]); + if (Array.isArray(outputTensor)) { + throw new TypeError('All layers in a Sequential model ' + + 'should have a single output tensor. ' + + 'For multi-output layers, ' + + 'use the functional API.'); + } + this.checkShape(layer); + this.outputs = [outputTensor]; + // update self.inbound_nodes + this.inboundNodes[0].outputTensors = this.outputs; + this.inboundNodes[0].outputShapes = [this.outputs[0].shape]; + } + this.layers.push(layer); + this.built = false; + } + /** + * Removes the last layer in the model. + * + * @exception TypeError if there are no layers in the model. + */ + pop() { + if (this.layers.length === 0) { + throw new TypeError('There are no layers in the model.'); + } + this.layers.pop(); + if (this.layers.length === 0) { + this.outputs = []; + this.inboundNodes = []; + this.outboundNodes = []; + } + else { + const lastLayerIndex = this.layers.length - 1; + this.layers[lastLayerIndex].outboundNodes = []; + this.outputs = [this.layers[lastLayerIndex].output]; + // update self.inbound_nodes + this.inboundNodes[0].outputTensors = this.outputs; + this.inboundNodes[0].outputShapes = [this.outputs[0].shape]; + } + } + call(inputs, kwargs) { + if (this.model == null) { + this.build(); + } + return this.model.call(inputs, kwargs); + } + build(inputShape) { + // Call `getExactlyOneShape` without using its return value, + // to verify that exactly one input shape is provided. + getExactlyOneShape(inputShape); + if (this.inputs.length === 0 || this.outputs.length === 0) { + throw new TypeError('Sequential model cannot be built: model is empty.' + + ' Add some layers first.'); + } + // actually create the model + this.model = new LayersModel({ + inputs: this.inputs, + outputs: this.outputs[0], + name: this.name + '_model' + }); + this.model.trainable = this.trainable; + // mirror model attributes + this.supportsMasking = this.model.supportsMasking; + // TODO(michaelterry): Add caches + this.inputLayers = this.model.inputLayers; + this.inputLayersNodeIndices = this.model.inputLayersNodeIndices; + this.inputLayersTensorIndices = this.model.inputLayersTensorIndices; + this.outputLayers = this.model.outputLayers; + this.outputLayersNodeIndices = this.model.outputLayersNodeIndices; + this.outputLayersTensorIndices = this.model.outputLayersTensorIndices; + this.nodesByDepth = this.model.nodesByDepth; + this.containerNodes = this.model.containerNodes; + this.outputNames = this.model.outputNames; + this.inputNames = this.model.inputNames; + // TODO(michaelterry): Add feedInputNames, feedInputs, if needed. + // TODO(michaelterry): Add callbackModel if needed. + this.built = true; + } + countParams() { + if (!this.built) { + this.build(); + } + return super.countParams(); + } + /** + * Print a text summary of the Sequential model's layers. + * + * The summary includes + * - Name and type of all layers that comprise the model. + * - Output shape(s) of the layers + * - Number of weight parameters of each layer + * - The total number of trainable and non-trainable parameters of the + * model. + * + * ```js + * const model = tf.sequential(); + * model.add( + * tf.layers.dense({units: 100, inputShape: [10], activation: 'relu'})); + * model.add(tf.layers.dense({units: 1, activation: 'sigmoid'})); + * + * model.summary(); + * ``` + * + * @param lineLength Custom line length, in number of characters. + * @param positions Custom widths of each of the columns, as either + * fractions of `lineLength` (e.g., `[0.5, 0.75, 1]`) or absolute number + * of characters (e.g., `[30, 50, 65]`). Each number corresponds to + * right-most (i.e., ending) position of a column. + * @param printFn Custom print function. Can be used to replace the default + * `console.log`. For example, you can use `x => {}` to mute the printed + * messages in the console. + * + * @doc {heading: 'Models', subheading: 'Classes'} + */ + summary(lineLength, positions, printFn = console.log) { + if (!this.built) { + this.build(); + } + super.summary(lineLength, positions, printFn); + } + /** + * Sets the weights of the model. + * + * @param weights Should be a list of Tensors with shapes and types matching + * the output of `model.getWeights()`. + */ + setWeights(weights) { + if (this.model == null) { + this.build(); + } + this.model.setWeights(weights); + } + /** + * Returns the loss value & metrics values for the model in test mode. + * + * Loss and metrics are specified during `compile()`, which needs to happen + * before calls to `evaluate()`. + * + * Computation is done in batches. + * + * ```js + * const model = tf.sequential({ + * layers: [tf.layers.dense({units: 1, inputShape: [10]})] + * }); + * model.compile({optimizer: 'sgd', loss: 'meanSquaredError'}); + * const result = model.evaluate(tf.ones([8, 10]), tf.ones([8, 1]), { + * batchSize: 4, + * }); + * result.print(); + * ``` + * + * @param x `tf.Tensor` of test data, or an `Array` of `tf.Tensor`s if the + * model has multiple inputs. + * @param y `tf.Tensor` of target data, or an `Array` of `tf.Tensor`s if the + * model has multiple outputs. + * @param args A `ModelEvaluateConfig`, containing optional fields. + * + * @return `Scalar` test loss (if the model has a single output and no + * metrics) or `Array` of `Scalar`s (if the model has multiple outputs + * and/or metrics). The attribute `model.metricsNames` + * will give you the display labels for the scalar outputs. + * + * @doc {heading: 'Models', subheading: 'Classes'} + */ + evaluate(x, y, args = {}) { + if (!this.built) { + throw new RuntimeError('The model needs to be compiled before being used.'); + } + return this.model.evaluate(x, y, args); + } + // TODO(cais): Add code snippet below once real dataset objects are + // available. + /** + * Evaluate model using a dataset object. + * + * Note: Unlike `evaluate()`, this method is asynchronous (`async`). + * + * @param dataset A dataset object. Its `iterator()` method is expected + * to generate a dataset iterator object, the `next()` method of which + * is expected to produce data batches for evaluation. The return value + * of the `next()` call ought to contain a boolean `done` field and a + * `value` field. The `value` field is expected to be an array of two + * `tf.Tensor`s or an array of two nested `tf.Tensor` structures. The former + * case is for models with exactly one input and one output (e.g. + * a sequential model). The latter case is for models with multiple + * inputs and/or multiple outputs. Of the two items in the array, the + * first is the input feature(s) and the second is the output target(s). + * @param args A configuration object for the dataset-based evaluation. + * @returns Loss and metric values as an Array of `Scalar` objects. + * + * @doc {heading: 'Models', subheading: 'Classes'} + */ + async evaluateDataset(dataset, args) { + if (!this.built) { + throw new RuntimeError('The model needs to be compiled before being used.'); + } + return this.model.evaluateDataset(dataset, args); + } + /** + * Generates output predictions for the input samples. + * + * Computation is done in batches. + * + * Note: the "step" mode of predict() is currently not supported. + * This is because the TensorFlow.js core backend is imperative only. + * + * ```js + * const model = tf.sequential({ + * layers: [tf.layers.dense({units: 1, inputShape: [10]})] + * }); + * model.predict(tf.ones([2, 10])).print(); + * ``` + * + * @param x The input data, as a Tensor, or an `Array` of `tf.Tensor`s if + * the model has multiple inputs. + * @param conifg A `ModelPredictConfig` object containing optional fields. + * + * @return `tf.Tensor`(s) of predictions. + * + * @exception ValueError In case of mismatch between the provided input data + * and the model's expectations, or in case a stateful model receives a + * number of samples that is not a multiple of the batch size. + * + * @doc {heading: 'Models', subheading: 'Classes'} + */ + predict(x, args = {}) { + if (this.model == null) { + this.build(); + } + return this.model.predict(x, args); + } + /** + * Returns predictions for a single batch of samples. + * + * @param x: Input samples, as a Tensor, or list of Tensors (if the model + * has multiple inputs). + * @return Tensor(s) of predictions + */ + predictOnBatch(x) { + if (this.model == null) { + this.build(); + } + return this.model.predictOnBatch(x); + } + /** + * See `LayersModel.compile`. + * + * @param args + */ + compile(args) { + this.build(); + this.model.compile(args); + this.optimizer_ = this.model.optimizer; + // tslint:disable-next-line:no-any + this.isOptimizerOwned = this.model.isOptimizerOwned; + this.loss = this.model.loss; + this.metrics = this.model.metrics; + // TODO(cais): Add this.lossWeights, this.sampleWeightMode, + // this.weightedMetrics, this.targets. + this.metricsTensors = this.model.metricsTensors; + this.metricsNames = this.model.metricsNames; + // TODO(cais): Add sampleWeights. + } + get optimizer() { + return this.model == null ? undefined : this.model.optimizer; + } + set optimizer(optimizer) { + this.model.optimizer = optimizer; + } + /** + * Trains the model for a fixed number of epochs (iterations on a dataset). + * + * ```js + * const model = tf.sequential({ + * layers: [tf.layers.dense({units: 1, inputShape: [10]})] + * }); + * model.compile({optimizer: 'sgd', loss: 'meanSquaredError'}); + * const history = await model.fit(tf.ones([8, 10]), tf.ones([8, 1]), { + * batchSize: 4, + * epochs: 3 + * }); + * console.log(history.history.loss[0]); + * ``` + * + * @param x `tf.Tensor` of training data, or an array of `tf.Tensor`s if the + * model has multiple inputs. If all inputs in the model are named, you can + * also pass a dictionary mapping input names to `tf.Tensor`s. + * @param y `tf.Tensor` of target (label) data, or an array of `tf.Tensor`s if + * the model has multiple outputs. If all outputs in the model are named, you + * can also pass a dictionary mapping output names to `tf.Tensor`s. + * @param args A `ModelFitConfig`, containing optional fields. + * + * @return A `History` instance. Its `history` attribute contains all + * information collected during training. + * + * @exception ValueError In case of mismatch between the provided input data + * and what the model expects. + * + * @doc {heading: 'Models', subheading: 'Classes'} + */ + async fit(x, y, args = {}) { + if (!this.built) { + throw new RuntimeError('The model needs to be compiled before ' + + 'being used.'); + } + return this.model.fit(x, y, args); + } + /** + * Trains the model using a dataset object. + * + * ```js + * const xArray = [ + * [1, 1, 1, 1, 1, 1, 1, 1, 1], + * [1, 1, 1, 1, 1, 1, 1, 1, 1], + * [1, 1, 1, 1, 1, 1, 1, 1, 1], + * [1, 1, 1, 1, 1, 1, 1, 1, 1], + * ]; + * const yArray = [1, 1, 1, 1]; + * // Create a dataset from the JavaScript array. + * const xDataset = tf.data.array(xArray); + * const yDataset = tf.data.array(yArray); + * // Zip combines the `x` and `y` Datasets into a single Dataset, the + * // iterator of which will return an object containing of two tensors, + * // corresponding to `x` and `y`. The call to `batch(4)` will bundle + * // four such samples into a single object, with the same keys now pointing + * // to tensors that hold 4 examples, organized along the batch dimension. + * // The call to `shuffle(4)` causes each iteration through the dataset to + * // happen in a different order. The size of the shuffle window is 4. + * const xyDataset = tf.data.zip({xs: xDataset, ys: yDataset}) + * .batch(4) + * .shuffle(4); + * const model = tf.sequential({ + * layers: [tf.layers.dense({units: 1, inputShape: [9]})] + * }); + * model.compile({optimizer: 'sgd', loss: 'meanSquaredError'}); + * const history = await model.fitDataset(xyDataset, { + * epochs: 4, + * callbacks: {onEpochEnd: (epoch, logs) => console.log(logs.loss)} + * }); + * ``` + * + * @param dataset A dataset object. Its `iterator()` method is expected to + * generate a dataset iterator object, the `next()` method of which is + * expected to produce data batches for evaluation. The return value of the + * `next()` call ought to contain a boolean `done` field and a `value` + * field. + * + * The `value` field is expected to be an object of with fields + * `xs` and `ys`, which point to the feature tensor and the target tensor, + * respectively. This case is for models with exactly one input and one + * output (e.g. a sequential model). For example: + * ```js + * {value: {xs: xsTensor, ys: ysTensor}, done: false} + * ``` + * + * If the model has multiple inputs, the `xs` field of `value` should + * be an object mapping input names to their respective feature tensors. + * For example: + * ```js + * { + * value: { + * xs: { + * input_1: xsTensor1, + * input_2: xsTensor2 + * }, + * ys: ysTensor + * }, + * done: false + * } + * ``` + * If the model has multiple outputs, the `ys` field of `value` should + * be an object mapping output names to their respective target tensors. + * For example: + * ```js + * { + * value: { + * xs: xsTensor, + * ys: { + * output_1: ysTensor1, + * output_2: ysTensor2 + * }, + * }, + * done: false + * } + * ``` + * @param args A `ModelFitDatasetArgs`, containing optional fields. + * + * @return A `History` instance. Its `history` attribute contains all + * information collected during training. + * + * @doc {heading: 'Models', subheading: 'Classes', ignoreCI: true} + */ + async fitDataset(dataset, args) { + if (!this.built) { + throw new RuntimeError('The model needs to be compiled before ' + + 'being used.'); + } + return this.model.fitDataset(dataset, args); + } + /** + * Runs a single gradient update on a single batch of data. + * + * This method differs from `fit()` and `fitDataset()` in the following + * regards: + * - It operates on exactly one batch of data. + * - It returns only the loss and metric values, instead of + * returning the batch-by-batch loss and metric values. + * - It doesn't support fine-grained options such as verbosity and + * callbacks. + * + * @param x Input data. It could be one of the following: + * - A `tf.Tensor`, or an Array of `tf.Tensor`s (in case the model has + * multiple inputs). + * - An Object mapping input names to corresponding `tf.Tensor` (if the + * model has named inputs). + * @param y Target data. It could be either a `tf.Tensor` or multiple + * `tf.Tensor`s. It should be consistent with `x`. + * @returns Training loss or losses (in case the model has + * multiple outputs), along with metrics (if any), as numbers. + * + * @doc {heading: 'Models', subheading: 'Classes'} + */ + async trainOnBatch(x, y) { + return this.model.trainOnBatch(x, y); + } + /* See parent class for JsDoc */ + /** @nocollapse */ + static fromConfig(cls, config, customObjects = {}, fastWeightInit = false) { + let configArray; + let extraModelConfig = {}; + if (config instanceof Array) { + if (!(config[0].className != null) || + config[0]['className'] === 'Merge') { + throw new ValueError('Legacy serialization format not supported yet.'); + } + configArray = config; + } + else { + assert$1(config['layers'] != null, () => `When the config data for a Sequential model is not an Array, ` + + `it must be an Object that contains the 'layers' field.`); + configArray = config['layers']; + delete config['layers']; + extraModelConfig = config; + } + const model = new cls(extraModelConfig); + if (!(model instanceof Sequential)) { + throw new NotImplementedError(`Sequential.fromConfig called on non-Sequential input: ${model}`); + } + for (const conf of configArray) { + const customObjects = undefined; + const layer = deserialize(conf, customObjects, fastWeightInit); + if (fastWeightInit) { + layer.setFastWeightInitDuringBuild(true); + } + model.add(layer); + } + return model; + } + /** + * Setter used for force stopping of LayersModel.fit() (i.e., training). + * + * Example: + * + * ```js + * const model = tf.sequential(); + * model.add(tf.layers.dense({units: 1, inputShape: [10]})); + * model.compile({loss: 'meanSquaredError', optimizer: 'sgd'}); + * const xs = tf.ones([8, 10]); + * const ys = tf.zeros([8, 1]); + * + * const history = await model.fit(xs, ys, { + * epochs: 10, + * callbacks: { + * onEpochEnd: async (epoch, logs) => { + * if (epoch === 2) { + * model.stopTraining = true; + * } + * } + * } + * }); + * + * // There should be only 3 values in the loss array, instead of 10 values, + * // due to the stopping after 3 epochs. + * console.log(history.history.loss); + * ``` + */ + set stopTraining(stop) { + // TODO(cais): When refactoring to remove the composition pattern happens, + // remove this method overriding. + if (this.model == null) { + throw new ValueError('Cannot set the stopTraining property of a sequential model before ' + + 'it is compiled.'); + } + this.model.stopTraining = stop; + } + get stopTraining() { + if (this.model == null) { + throw new ValueError('Cannot get the stopTraining property of a sequential model before ' + + 'it is compiled.'); + } + return this.model.stopTraining; + } + // TODO(cais): Override get trainableWeights() here + // tslint:disable-next-line:no-any + getConfig() { + // NOTE(cais): We override the return type of getConfig() to `any` here, + // because the `Sequential` class is a special case among `Container` + // subtypes in that its getConfig() method returns an Array (not a + // dict). + const layers = []; + for (const layer of this.layers) { + const dict = {}; + dict['className'] = layer.getClassName(); + dict['config'] = layer.getConfig(); + layers.push(dict); + } + return { name: this.name, layers }; + } + } + /** @nocollapse */ + Sequential.className = 'Sequential'; + registerClass(Sequential); + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + // TODO(cais): Add doc string to all the public static functions in this + // class; include exectuable JavaScript code snippets where applicable + // (b/74074458). + // LayersModel and related factory methods. + /** + * A model is a data structure that consists of `Layers` and defines inputs + * and outputs. + * + * The key difference between `tf.model` and `tf.sequential` is that + * `tf.model` is more generic, supporting an arbitrary graph (without + * cycles) of layers. `tf.sequential` is less generic and supports only a linear + * stack of layers. + * + * When creating a `tf.LayersModel`, specify its input(s) and output(s). Layers + * are used to wire input(s) to output(s). + * + * For example, the following code snippet defines a model consisting of + * two `dense` layers, with 10 and 4 units, respectively. + * + * ```js + * // Define input, which has a size of 5 (not including batch dimension). + * const input = tf.input({shape: [5]}); + * + * // First dense layer uses relu activation. + * const denseLayer1 = tf.layers.dense({units: 10, activation: 'relu'}); + * // Second dense layer uses softmax activation. + * const denseLayer2 = tf.layers.dense({units: 4, activation: 'softmax'}); + * + * // Obtain the output symbolic tensor by applying the layers on the input. + * const output = denseLayer2.apply(denseLayer1.apply(input)); + * + * // Create the model based on the inputs. + * const model = tf.model({inputs: input, outputs: output}); + * + * // The model can be used for training, evaluation and prediction. + * // For example, the following line runs prediction with the model on + * // some fake data. + * model.predict(tf.ones([2, 5])).print(); + * ``` + * See also: + * `tf.sequential`, `tf.loadLayersModel`. + * + * @doc {heading: 'Models', subheading: 'Creation'} + */ + function model(args) { + return new LayersModel(args); + } + /** + * Creates a `tf.Sequential` model. A sequential model is any model where the + * outputs of one layer are the inputs to the next layer, i.e. the model + * topology is a simple 'stack' of layers, with no branching or skipping. + * + * This means that the first layer passed to a `tf.Sequential` model should have + * a defined input shape. What that means is that it should have received an + * `inputShape` or `batchInputShape` argument, or for some type of layers + * (recurrent, Dense...) an `inputDim` argument. + * + * The key difference between `tf.model` and `tf.sequential` is that + * `tf.sequential` is less generic, supporting only a linear stack of layers. + * `tf.model` is more generic and supports an arbitrary graph (without + * cycles) of layers. + * + * Examples: + * + * ```js + * const model = tf.sequential(); + * + * // First layer must have an input shape defined. + * model.add(tf.layers.dense({units: 32, inputShape: [50]})); + * // Afterwards, TF.js does automatic shape inference. + * model.add(tf.layers.dense({units: 4})); + * + * // Inspect the inferred shape of the model's output, which equals + * // `[null, 4]`. The 1st dimension is the undetermined batch dimension; the + * // 2nd is the output size of the model's last layer. + * console.log(JSON.stringify(model.outputs[0].shape)); + * ``` + * + * It is also possible to specify a batch size (with potentially undetermined + * batch dimension, denoted by "null") for the first layer using the + * `batchInputShape` key. The following example is equivalent to the above: + * + * ```js + * const model = tf.sequential(); + * + * // First layer must have a defined input shape + * model.add(tf.layers.dense({units: 32, batchInputShape: [null, 50]})); + * // Afterwards, TF.js does automatic shape inference. + * model.add(tf.layers.dense({units: 4})); + * + * // Inspect the inferred shape of the model's output. + * console.log(JSON.stringify(model.outputs[0].shape)); + * ``` + * + * You can also use an `Array` of already-constructed `Layer`s to create + * a `tf.Sequential` model: + * + * ```js + * const model = tf.sequential({ + * layers: [tf.layers.dense({units: 32, inputShape: [50]}), + * tf.layers.dense({units: 4})] + * }); + * console.log(JSON.stringify(model.outputs[0].shape)); + * ``` + * + * @doc {heading: 'Models', subheading: 'Creation'} + */ + function sequential(config) { + return new Sequential(config); + } + /** + * Used to instantiate an input to a model as a `tf.SymbolicTensor`. + * + * Users should call the `input` factory function for + * consistency with other generator functions. + * + * Example: + * + * ```js + * // Defines a simple logistic regression model with 32 dimensional input + * // and 3 dimensional output. + * const x = tf.input({shape: [32]}); + * const y = tf.layers.dense({units: 3, activation: 'softmax'}).apply(x); + * const model = tf.model({inputs: x, outputs: y}); + * model.predict(tf.ones([2, 32])).print(); + * ``` + * + * Note: `input` is only necessary when using `model`. When using + * `sequential`, specify `inputShape` for the first layer or use `inputLayer` + * as the first layer. + * + * @doc {heading: 'Models', subheading: 'Inputs'} + */ + function input(config) { + return Input(config); + } + function registerCallbackConstructor(verbosityLevel, callbackConstructor) { + CallbackConstructorRegistry.registerCallbackConstructor(verbosityLevel, callbackConstructor); + } + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + /** + * Base class for Activations. + * + * Special note: due to cross-language compatibility reasons, the + * static readonly className field in this family of classes must be set to + * the initialLowerCamelCase name of the activation. + */ + let Activation$1 = class Activation extends Serializable { + getConfig() { + return {}; + } + }; + /** + * Exponential linear unit (ELU). + * Reference: https://arxiv.org/abs/1511.07289 + */ + class Elu extends Activation$1 { + /** + * Calculate the activation function. + * + * @param x: Input. + * @param alpha: Scaling factor the negative section. + * @return Output of the ELU activation. + */ + apply(x, alpha = 1) { + return elu$3(x, alpha); + } + } + /** @nocollapse */ + Elu.className = 'elu'; + registerClass(Elu); + /** + * Scaled Exponential Linear Unit. (Klambauer et al., 2017). + * Reference: Self-Normalizing Neural Networks, https://arxiv.org/abs/1706.02515 + * Notes: + * - To be used together with the initialization "lecunNormal". + * - To be used together with the dropout variant "AlphaDropout". + */ + class Selu extends Activation$1 { + apply(x) { + return selu$2(x); + } + } + /** @nocollapse */ + Selu.className = 'selu'; + registerClass(Selu); + /** + * Rectified linear unit + */ + class Relu extends Activation$1 { + apply(x) { + return relu$2(x); + } + } + /** @nocollapse */ + Relu.className = 'relu'; + registerClass(Relu); + /** + * Rectified linear unit activation maxing out at 6.0. + */ + class Relu6 extends Activation$1 { + apply(x) { + return tidy(() => minimum$4(6.0, relu$2(x))); + } + } + /** @nocollapse */ + Relu6.className = 'relu6'; + registerClass(Relu6); + //* Linear activation (no-op) */ + class Linear extends Activation$1 { + apply(x) { + return x; + } + } + /** @nocollapse */ + Linear.className = 'linear'; + registerClass(Linear); + /** + * Sigmoid activation function. + */ + class Sigmoid extends Activation$1 { + apply(x) { + return sigmoid$2(x); + } + } + /** @nocollapse */ + Sigmoid.className = 'sigmoid'; + registerClass(Sigmoid); + /** + * Segment-wise linear approximation of sigmoid. + */ + class HardSigmoid extends Activation$1 { + apply(x) { + return hardSigmoid(x); + } + } + /** @nocollapse */ + HardSigmoid.className = 'hardSigmoid'; + registerClass(HardSigmoid); + /** + * Softplus activation function. + */ + class Softplus extends Activation$1 { + apply(x) { + return softplus$2(x); + } + } + /** @nocollapse */ + Softplus.className = 'softplus'; + registerClass(Softplus); + /** + * Softsign activation function. + */ + class Softsign extends Activation$1 { + apply(x) { + return softsign(x); + } + } + /** @nocollapse */ + Softsign.className = 'softsign'; + registerClass(Softsign); + /** + * Hyperbolic tangent function. + */ + class Tanh extends Activation$1 { + apply(x) { + return tanh$2(x); + } + } + /** @nocollapse */ + Tanh.className = 'tanh'; + registerClass(Tanh); + /** + * Softmax activation function + */ + let Softmax$1 = class Softmax extends Activation$1 { + /** + * Calculate the activation function. + * + * @param x Tensor. + * @param axis Integer, axis along which the softmax normalization is applied. + * Invalid if < 2, as softmax across 1 (the batch dimension) is assumed to be + * an error. + * + * @returns a Tensor of the same shape as x + * + * @throws ValueError: In case `dim(x) < 2`. + */ + apply(x, axis = (-1)) { + return softmax$3(x, axis); + } + }; + /** @nocollapse */ + Softmax$1.className = 'softmax'; + registerClass(Softmax$1); + /** + * Log softmax activation function + */ + class LogSoftmax extends Activation$1 { + /** + * Calculate the activation function of log softmax: + * log( exp(x_i) / sum(exp(x)) ) + * + * @param x Tensor. + * @param axis Integer, axis along which the softmax normalization is applied. + * Invalid if < 2, as softmax across 1 (the batch dimension) is assumed to be + * an error. + * + * @returns a Tensor of the same shape as x + * + * @throws ValueError: In case `dim(x) < 2`. + */ + apply(x, axis = (-1)) { + return logSoftmax(x, axis); + } + } + /** @nocollapse */ + LogSoftmax.className = 'logSoftmax'; + registerClass(LogSoftmax); + /** + * Gelu activation function + */ + class Gelu extends Activation$1 { + /** + * Calculate the activation function. + * + * @param x Tensor. + * @returns a Tensor of the same shape as x + */ + apply(x) { + return tidy(() => { + return tidy(() => { + const sqrtTwo = Math.sqrt(2); + // Compute Φ(x) using the erf function + const cdf = mul(0.5, add$3(1, erf$2(div$1(x, sqrtTwo)))); + // Compute GELU(x) = x * Φ(x) + return mul(x, cdf); + }); + }); + } + } + /** @nocollapse */ + Gelu.className = 'gelu'; + registerClass(Gelu); + /** + * GeluNew activation function + */ + class GeluNew extends Activation$1 { + /** + * Calculate the activation function. + * + * @param x Tensor. + * @returns a Tensor of the same shape as x + */ + apply(x) { + return tidy(() => { + return mul(0.5, mul(x, add$3(1, tanh$2(mul(sqrt$2(div$1(2, Math.PI)), add$3(x, mul(0.044715, pow$3(x, 3)))))))); + }); + } + } + /** @nocollapse */ + GeluNew.className = 'gelu_new'; + registerClass(GeluNew); + /** + * Mish activation function + */ + class Mish extends Activation$1 { + /** + * Calculate the activation function. + * + * @param x Tensor. + * @returns a Tensor of the same shape as x + */ + apply(x) { + return tidy(() => mul(x, tanh$2(softplus$2(x)))); + } + } + /** @nocollapse */ + Mish.className = 'mish'; + registerClass(Mish); + /** + * Swish activation function + */ + class Swish extends Activation$1 { + /** + * Calculate the activation function. + * + * @param x Tensor. + * @param alpha Scaling factor for the sigmoid function. + * @returns a Tensor of the same shape as x + */ + apply(x, alpha = 1) { + return tidy(() => mul(sigmoid$2(mul(x, alpha)), x)); + } + } + /** @nocollapse */ + Swish.className = 'swish'; + registerClass(Swish); + function serializeActivation(activation) { + return activation.getClassName(); + } + function deserializeActivation(config, customObjects = {}) { + return deserializeKerasObject(config, SerializationMap.getMap().classNameMap, customObjects, 'activation'); + } + function getActivation(identifier) { + if (identifier == null) { + const config = {}; + config['className'] = 'linear'; + config['config'] = {}; + return deserializeActivation(config); + } + if (typeof identifier === 'string') { + const config = {}; + config['className'] = identifier; + config['config'] = {}; + return deserializeActivation(config); + } + else if (identifier instanceof Activation$1) { + return identifier; + } + else { + return deserializeActivation(identifier); + } + } + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + function assertObjectArgs(args) { + if (args != null && typeof args !== 'object') { + throw new Error(`Argument to L1L2 regularizer's constructor is expected to be an ` + + `object, but received: ${args}`); + } + } + /** + * Regularizer base class. + */ + class Regularizer extends Serializable { + } + class L1L2 extends Regularizer { + constructor(args) { + super(); + assertObjectArgs(args); + this.l1 = args == null || args.l1 == null ? 0.01 : args.l1; + this.l2 = args == null || args.l2 == null ? 0.01 : args.l2; + this.hasL1 = this.l1 !== 0; + this.hasL2 = this.l2 !== 0; + } + /** + * Porting note: Renamed from __call__. + * @param x Variable of which to calculate the regularization score. + */ + apply(x) { + return tidy(() => { + let regularization = zeros$2([1]); + if (this.hasL1) { + regularization = add$3(regularization, sum$3(mul(this.l1, abs$2(x)))); + } + if (this.hasL2) { + regularization = + add$3(regularization, sum$3(mul(this.l2, square$1(x)))); + } + return reshape$3(regularization, []); + }); + } + getConfig() { + return { 'l1': this.l1, 'l2': this.l2 }; + } + /** @nocollapse */ + static fromConfig(cls, config) { + return new cls({ l1: config['l1'], l2: config['l2'] }); + } + } + /** @nocollapse */ + L1L2.className = 'L1L2'; + registerClass(L1L2); + function l1$1(args) { + assertObjectArgs(args); + return new L1L2({ l1: args != null ? args.l1 : null, l2: 0 }); + } + function l2$1(args) { + assertObjectArgs(args); + return new L1L2({ l2: args != null ? args.l2 : null, l1: 0 }); + } + // Maps the JavaScript-like identifier keys to the corresponding keras symbols. + const REGULARIZER_IDENTIFIER_REGISTRY_SYMBOL_MAP = { + 'l1l2': 'L1L2' + }; + function serializeRegularizer(constraint) { + return serializeKerasObject(constraint); + } + function deserializeRegularizer(config, customObjects = {}) { + return deserializeKerasObject(config, SerializationMap.getMap().classNameMap, customObjects, 'regularizer'); + } + function getRegularizer(identifier) { + if (identifier == null) { + return null; + } + if (typeof identifier === 'string') { + const className = identifier in REGULARIZER_IDENTIFIER_REGISTRY_SYMBOL_MAP ? + REGULARIZER_IDENTIFIER_REGISTRY_SYMBOL_MAP[identifier] : + identifier; + const config = { className, config: {} }; + return deserializeRegularizer(config); + } + else if (identifier instanceof Regularizer) { + return identifier; + } + else { + return deserializeRegularizer(identifier); + } + } + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + class ReLU extends Layer { + constructor(args) { + super(args == null ? {} : args); + this.supportsMasking = true; + if (args != null) { + this.maxValue = args.maxValue; + } + } + call(inputs, kwargs) { + inputs = getExactlyOneTensor(inputs); + let output = relu$2(inputs); + if (this.maxValue != null) { + output = clipByValue$2(output, 0, this.maxValue); + } + return output; + } + computeOutputShape(inputShape) { + return inputShape; + } + getConfig() { + const config = { maxValue: this.maxValue }; + const baseConfig = super.getConfig(); + Object.assign(config, baseConfig); + return config; + } + } + /** @nocollapse */ + ReLU.className = 'ReLU'; + registerClass(ReLU); + class LeakyReLU extends Layer { + constructor(args) { + super(args == null ? {} : args); + this.DEFAULT_ALPHA = 0.3; + if (args == null) { + args = {}; + } + this.alpha = args.alpha == null ? this.DEFAULT_ALPHA : args.alpha; + } + call(inputs, kwargs) { + const x = getExactlyOneTensor(inputs); + return leakyRelu$2(x, this.alpha); + } + computeOutputShape(inputShape) { + return inputShape; + } + getConfig() { + const config = { alpha: this.alpha }; + const baseConfig = super.getConfig(); + Object.assign(config, baseConfig); + return config; + } + } + /** @nocollapse */ + LeakyReLU.className = 'LeakyReLU'; + registerClass(LeakyReLU); + class PReLU extends Layer { + constructor(args) { + super(args == null ? {} : args); + this.DEFAULT_ALPHA_INITIALIZER = 'zeros'; + if (args == null) { + args = {}; + } + this.supportsMasking = true; + this.alphaInitializer = + getInitializer(args.alphaInitializer || this.DEFAULT_ALPHA_INITIALIZER); + this.alphaRegularizer = getRegularizer(args.alphaRegularizer); + this.alphaConstraint = getConstraint(args.alphaConstraint); + if (args.sharedAxes == null) { + this.sharedAxes = null; + } + else if (Array.isArray(args.sharedAxes)) { + this.sharedAxes = args.sharedAxes; + } + else if (typeof args.sharedAxes === 'number') { + this.sharedAxes = [args.sharedAxes]; + } + else { + throw new ValueError(`Expected sharedAxes to be a number or an array of numbers, ` + + `but got ${args.sharedAxes}`); + } + } + build(inputShape) { + inputShape = getExactlyOneShape(inputShape); + const paramShape = inputShape.slice(1); + if (this.sharedAxes != null) { + for (const i of this.sharedAxes) { + paramShape[i - 1] = 1; + } + } + this.alpha = this.addWeight('alpha', paramShape, 'float32', this.alphaInitializer, this.alphaRegularizer, true, this.alphaConstraint); + // Set input spec. + const axes = {}; + if (this.sharedAxes != null) { + for (let i = 1; i < inputShape.length; ++i) { + axes[i] = inputShape[i]; + } + } + this.inputSpec = [new InputSpec({ + ndim: inputShape.length, + axes, + })]; + this.built = true; + } + call(inputs, kwargs) { + inputs = getExactlyOneTensor(inputs); + return prelu$3(inputs, this.alpha.read()); + } + getConfig() { + const config = { + alphaInitializer: serializeInitializer(this.alphaInitializer), + alphaRegularizer: serializeRegularizer(this.alphaRegularizer), + alphaConstraint: serializeConstraint(this.alphaConstraint), + sharedAxes: this.sharedAxes + }; + const baseConfig = super.getConfig(); + Object.assign(config, baseConfig); + return config; + } + } + /** @nocollapse */ + PReLU.className = 'PReLU'; + registerClass(PReLU); + let ELU$3 = class ELU extends Layer { + constructor(args) { + super(args == null ? {} : args); + this.DEFAULT_ALPHA = 1.0; + if (args == null) { + args = {}; + } + if (args.alpha != null && args.alpha !== this.DEFAULT_ALPHA) { + throw new NotImplementedError(`Non-default alpha value (${args.alpha}) is not supported by the ` + + `ELU layer yet.`); + } + this.alpha = args.alpha == null ? this.DEFAULT_ALPHA : args.alpha; + } + call(inputs, kwargs) { + const x = getExactlyOneTensor(inputs); + return elu$4(x); + } + computeOutputShape(inputShape) { + return inputShape; + } + getConfig() { + const config = { alpha: this.alpha }; + const baseConfig = super.getConfig(); + Object.assign(config, baseConfig); + return config; + } + }; + /** @nocollapse */ + ELU$3.className = 'ELU'; + registerClass(ELU$3); + class ThresholdedReLU extends Layer { + constructor(args) { + super(args == null ? {} : args); + this.DEFAULT_THETA = 1.0; + if (args == null) { + args = {}; + } + this.theta = args.theta == null ? this.DEFAULT_THETA : args.theta; + } + call(inputs, kwargs) { + const x = getExactlyOneTensor(inputs); + return mul(x, cast$3(greater$3(x, this.theta), 'float32')); + } + computeOutputShape(inputShape) { + return inputShape; + } + getConfig() { + const config = { theta: this.theta }; + const baseConfig = super.getConfig(); + Object.assign(config, baseConfig); + return config; + } + } + /** @nocollapse */ + ThresholdedReLU.className = 'ThresholdedReLU'; + registerClass(ThresholdedReLU); + class Softmax extends Layer { + constructor(args) { + super(args == null ? {} : args); + this.DEFAULT_AXIS = 1.0; + if (args == null) { + args = {}; + } + this.softmax = new Softmax$1().apply; + this.axis = args.axis == null ? this.DEFAULT_AXIS : args.axis; + } + call(inputs, kwargs) { + // TODO(pforderique): Add tests for when `this.axis` is a number[]. + return tidy(() => { + let x = getExactlyOneTensor(inputs); + const mask = kwargs['mask']; + if (mask != null) { + // Since mask is 1.0 for positions we want to keep and 0.0 for masked + // positions, this operation will create a tensor which is 0.0 for + // positions we want to attend and -1e.9 for masked positions. + const adder = mul(sub$2(ones$1(x.shape), cast$3(mask, x.dtype)), scalar(-1e9)); + // Since we are adding it to the raw scores before the softmax, this + // is effectively the same as removing these entirely. + x = add$3(x, adder); + } + if (this.axis instanceof Array) { + if (this.axis.length > 1) { + return exp$2(sub$2(x, logSumExp(x, this.axis, true))); + } + else { + return this.softmax(x, this.axis[0]); + } + } + return this.softmax(x, this.axis); + }); + } + computeOutputShape(inputShape) { + return inputShape; + } + getConfig() { + const config = { axis: this.axis }; + const baseConfig = super.getConfig(); + Object.assign(config, baseConfig); + return config; + } + } + /** @nocollapse */ + Softmax.className = 'Softmax'; + registerClass(Softmax); + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + /** + * Transforms a single number of array of numbers into an array of numbers. + * @param value + * @param n: The size of the tuple to be returned. + * @param name: Name of the parameter, used for generating error messages. + * @returns An array of numbers. + */ + function normalizeArray(value, n, name) { + if (typeof value === 'number') { + return pyListRepeat(value, n); + } + else { + if (value.length !== n) { + throw new ValueError(`The ${name} argument must be an integer or tuple of ${n} integers.` + + ` Received: ${value.length} elements.`); + } + for (let i = 0; i < n; ++i) { + const singleValue = value[i]; + if (!isInteger(singleValue)) { + throw new ValueError(`The ${name} argument must be an integer or tuple of ${n}` + + ` integers. Received: ${JSON.stringify(value)} including a` + + ` non-integer number ${singleValue}`); + } + } + return value; + } + } + /** + * Determines output length of a convolution given input length. + * @param inputLength + * @param filterSize + * @param padding + * @param stride + * @param dilation: dilation rate. + */ + function convOutputLength(inputLength, filterSize, padding, stride, dilation = 1) { + if (inputLength == null) { + return inputLength; + } + const dilatedFilterSize = filterSize + (filterSize - 1) * (dilation - 1); + let outputLength; + if (padding === 'same') { + outputLength = inputLength; + } + else { // VALID + outputLength = inputLength - dilatedFilterSize + 1; + } + return Math.floor((outputLength + stride - 1) / stride); + } + function deconvLength(dimSize, strideSize, kernelSize, padding) { + if (dimSize == null) { + return null; + } + if (padding === 'valid') { + dimSize = dimSize * strideSize + max$2([kernelSize - strideSize, 0]); + } + else if (padding === 'same') { + dimSize = dimSize * strideSize; + } + else { + throw new ValueError(`Unsupport padding mode: ${padding}.`); + } + return dimSize; + } + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + /** + * Transpose and cast the input before the conv2d. + * @param x Input image tensor. + * @param dataFormat + */ + function preprocessConv2DInput(x, dataFormat) { + // TODO(cais): Cast type to float32 if not. + return tidy(() => { + checkDataFormat(dataFormat); + if (dataFormat === 'channelsFirst') { + return transpose$2(x, [0, 2, 3, 1]); // NCHW -> NHWC. + } + else { + return x; + } + }); + } + /** + * Transpose and cast the input before the conv3d. + * @param x Input image tensor. + * @param dataFormat + */ + function preprocessConv3DInput(x, dataFormat) { + return tidy(() => { + checkDataFormat(dataFormat); + if (dataFormat === 'channelsFirst') { + return transpose$2(x, [0, 2, 3, 4, 1]); // NCDHW -> NDHWC. + } + else { + return x; + } + }); + } + /** + * 1D-convolution with bias added. + * + * Porting Note: This function does not exist in the Python Keras backend. + * It is exactly the same as `conv2d`, except the added `bias`. + * + * @param x Input tensor, rank-3, of shape `[batchSize, width, inChannels]`. + * @param kernel Kernel, rank-3, of shape `[filterWidth, inDepth, outDepth]`. + * @param bias Bias, rank-3, of shape `[outDepth]`. + * @param strides + * @param padding Padding mode. + * @param dataFormat Data format. + * @param dilationRate + * @returns The result of the 1D convolution. + * @throws ValueError, if `x`, `kernel` or `bias` is not of the correct rank. + */ + function conv1dWithBias(x, kernel, bias, strides = 1, padding = 'valid', dataFormat, dilationRate = 1) { + return tidy(() => { + if (dataFormat == null) { + dataFormat = imageDataFormat(); + } + checkDataFormat(dataFormat); + // Check the ranks of x, kernel and bias. + if (x.shape.length !== 3) { + throw new ValueError(`The input of a conv1dWithBias operation should be 3, but is ` + + `${x.shape.length} instead.`); + } + if (kernel.shape.length !== 3) { + throw new ValueError(`The kernel for a conv1dWithBias operation should be 3, but is ` + + `${kernel.shape.length} instead`); + } + if (bias != null && bias.shape.length !== 1) { + throw new ValueError(`The bias for a conv1dWithBias operation should be 1, but is ` + + `${bias.shape.length} instead`); + } + // TODO(cais): Support CAUSAL padding mode. + if (dataFormat === 'channelsFirst') { + x = transpose$2(x, [0, 2, 1]); // NCW -> NWC. + } + if (padding === 'causal') { + throw new NotImplementedError('The support for CAUSAL padding mode in conv1dWithBias is not ' + + 'implemented yet.'); + } + let y = conv1d$2(x, kernel, strides, padding === 'same' ? 'same' : 'valid', 'NWC', dilationRate); + if (bias != null) { + y = biasAdd(y, bias); + } + return y; + }); + } + /** + * 1D-convolution. + * + * @param x Input tensor, rank-3, of shape `[batchSize, width, inChannels]`. + * @param kernel Kernel, rank-3, of shape `[filterWidth, inDepth, outDepth]`.s + * @param strides + * @param padding Padding mode. + * @param dataFormat Data format. + * @param dilationRate + * @returns The result of the 1D convolution. + * @throws ValueError, if `x`, `kernel` or `bias` is not of the correct rank. + */ + function conv1d$1(x, kernel, strides = 1, padding = 'valid', dataFormat, dilationRate = 1) { + return tidy(() => { + checkDataFormat(dataFormat); + return conv1dWithBias(x, kernel, null, strides, padding, dataFormat, dilationRate); + }); + } + /** + * 2D Convolution + * @param x + * @param kernel kernel of the convolution. + * @param strides strides array. + * @param padding padding mode. Default to 'valid'. + * @param dataFormat data format. Defaults to 'channelsLast'. + * @param dilationRate dilation rate array. + * @returns Result of the 2D pooling. + */ + function conv2d$2(x, kernel, strides = [1, 1], padding = 'valid', dataFormat, dilationRate) { + return tidy(() => { + checkDataFormat(dataFormat); + return conv2dWithBiasActivation(x, kernel, null, strides, padding, dataFormat, dilationRate); + }); + } + /** + * 2D Convolution with an added bias and optional activation. + * Note: This function does not exist in the Python Keras Backend. This function + * is exactly the same as `conv2d`, except the added `bias`. + */ + function conv2dWithBiasActivation(x, kernel, bias, strides = [1, 1], padding = 'valid', dataFormat, dilationRate, activation = null) { + return tidy(() => { + if (dataFormat == null) { + dataFormat = imageDataFormat(); + } + checkDataFormat(dataFormat); + if (x.rank !== 3 && x.rank !== 4) { + throw new ValueError(`conv2dWithBiasActivation expects input to be of rank 3 or 4, ` + + `but received ${x.rank}.`); + } + if (kernel.rank !== 3 && kernel.rank !== 4) { + throw new ValueError(`conv2dWithBiasActivation expects kernel to be of rank 3 or 4, ` + + `but received ${x.rank}.`); + } + let y = preprocessConv2DInput(x, dataFormat); + if (padding === 'causal') { + throw new NotImplementedError('The support for CAUSAL padding mode in conv1dWithBias is not ' + + 'implemented yet.'); + } + y = conv2d$3({ + x: y, + filter: kernel, + strides: strides, + pad: padding === 'same' ? 'same' : 'valid', + dilations: dilationRate, + dataFormat: 'NHWC', + bias, + activation + }); + if (dataFormat === 'channelsFirst') { + y = transpose$2(y, [0, 3, 1, 2]); + } + return y; + }); + } + /** + * 3D Convolution. + * @param x + * @param kernel kernel of the convolution. + * @param strides strides array. + * @param padding padding mode. Default to 'valid'. + * @param dataFormat data format. Defaults to 'channelsLast'. + * @param dilationRate dilation rate array. + * @returns Result of the 3D convolution. + */ + function conv3d$1(x, kernel, strides = [1, 1, 1], padding = 'valid', dataFormat, dilationRate) { + return tidy(() => { + checkDataFormat(dataFormat); + return conv3dWithBias(x, kernel, null, strides, padding, dataFormat, dilationRate); + }); + } + /** + * 3D Convolution with an added bias. + * Note: This function does not exist in the Python Keras Backend. This function + * is exactly the same as `conv3d`, except the added `bias`. + */ + function conv3dWithBias(x, kernel, bias, strides = [1, 1, 1], padding = 'valid', dataFormat, dilationRate) { + return tidy(() => { + if (dataFormat == null) { + dataFormat = imageDataFormat(); + } + checkDataFormat(dataFormat); + if (x.rank !== 4 && x.rank !== 5) { + throw new ValueError(`conv3dWithBias expects input to be of rank 4 or 5, but received ` + + `${x.rank}.`); + } + if (kernel.rank !== 4 && kernel.rank !== 5) { + throw new ValueError(`conv3dWithBias expects kernel to be of rank 4 or 5, but received ` + + `${x.rank}.`); + } + let y = preprocessConv3DInput(x, dataFormat); + if (padding === 'causal') { + throw new NotImplementedError('The support for CAUSAL padding mode in conv3dWithBias is not ' + + 'implemented yet.'); + } + y = conv3d$2(y, kernel, strides, padding === 'same' ? 'same' : 'valid', 'NDHWC', dilationRate); + if (bias != null) { + y = biasAdd(y, bias); + } + if (dataFormat === 'channelsFirst') { + y = transpose$2(y, [0, 4, 1, 2, 3]); + } + return y; + }); + } + /** + * Abstract convolution layer. + */ + class BaseConv extends Layer { + constructor(rank, args) { + super(args); + this.bias = null; + this.DEFAULT_KERNEL_INITIALIZER = 'glorotNormal'; + this.DEFAULT_BIAS_INITIALIZER = 'zeros'; + BaseConv.verifyArgs(args); + this.rank = rank; + assertPositiveInteger(this.rank, 'rank'); + if (this.rank !== 1 && this.rank !== 2 && this.rank !== 3) { + throw new NotImplementedError(`Convolution layer for rank other than 1, 2, or 3 (${this.rank}) is ` + + `not implemented yet.`); + } + this.kernelSize = normalizeArray(args.kernelSize, rank, 'kernelSize'); + this.strides = normalizeArray(args.strides == null ? 1 : args.strides, rank, 'strides'); + this.padding = args.padding == null ? 'valid' : args.padding; + checkPaddingMode(this.padding); + this.dataFormat = + args.dataFormat == null ? 'channelsLast' : args.dataFormat; + checkDataFormat(this.dataFormat); + this.activation = getActivation(args.activation); + this.useBias = args.useBias == null ? true : args.useBias; + this.biasInitializer = + getInitializer(args.biasInitializer || this.DEFAULT_BIAS_INITIALIZER); + this.biasConstraint = getConstraint(args.biasConstraint); + this.biasRegularizer = getRegularizer(args.biasRegularizer); + this.activityRegularizer = getRegularizer(args.activityRegularizer); + this.dilationRate = normalizeArray(args.dilationRate == null ? 1 : args.dilationRate, rank, 'dilationRate'); + if (this.rank === 1 && + (Array.isArray(this.dilationRate) && this.dilationRate.length !== 1)) { + throw new ValueError(`dilationRate must be a number or an array of a single number ` + + `for 1D convolution, but received ` + + `${JSON.stringify(this.dilationRate)}`); + } + else if (this.rank === 2) { + if (typeof this.dilationRate === 'number') { + this.dilationRate = [this.dilationRate, this.dilationRate]; + } + else if (this.dilationRate.length !== 2) { + throw new ValueError(`dilationRate must be a number or array of two numbers for 2D ` + + `convolution, but received ${JSON.stringify(this.dilationRate)}`); + } + } + else if (this.rank === 3) { + if (typeof this.dilationRate === 'number') { + this.dilationRate = + [this.dilationRate, this.dilationRate, this.dilationRate]; + } + else if (this.dilationRate.length !== 3) { + throw new ValueError(`dilationRate must be a number or array of three numbers for 3D ` + + `convolution, but received ${JSON.stringify(this.dilationRate)}`); + } + } + } + static verifyArgs(args) { + // Check config.kernelSize type and shape. + assert('kernelSize' in args, `required key 'kernelSize' not in config`); + if (typeof args.kernelSize !== 'number' && + !checkArrayTypeAndLength(args.kernelSize, 'number', 1, 3)) { + throw new ValueError(`BaseConv expects config.kernelSize to be number or number[] with ` + + `length 1, 2, or 3, but received ${JSON.stringify(args.kernelSize)}.`); + } + } + getConfig() { + const config = { + kernelSize: this.kernelSize, + strides: this.strides, + padding: this.padding, + dataFormat: this.dataFormat, + dilationRate: this.dilationRate, + activation: serializeActivation(this.activation), + useBias: this.useBias, + biasInitializer: serializeInitializer(this.biasInitializer), + biasRegularizer: serializeRegularizer(this.biasRegularizer), + activityRegularizer: serializeRegularizer(this.activityRegularizer), + biasConstraint: serializeConstraint(this.biasConstraint) + }; + const baseConfig = super.getConfig(); + Object.assign(config, baseConfig); + return config; + } + } + /** + * Abstract nD convolution layer. Ancestor of convolution layers which reduce + * across channels, i.e., Conv1D and Conv2D, but not DepthwiseConv2D. + */ + class Conv extends BaseConv { + constructor(rank, args) { + super(rank, args); + this.kernel = null; + Conv.verifyArgs(args); + this.filters = args.filters; + assertPositiveInteger(this.filters, 'filters'); + this.kernelInitializer = getInitializer(args.kernelInitializer || this.DEFAULT_KERNEL_INITIALIZER); + this.kernelConstraint = getConstraint(args.kernelConstraint); + this.kernelRegularizer = getRegularizer(args.kernelRegularizer); + } + build(inputShape) { + inputShape = getExactlyOneShape(inputShape); + const channelAxis = this.dataFormat === 'channelsFirst' ? 1 : inputShape.length - 1; + if (inputShape[channelAxis] == null) { + throw new ValueError(`The channel dimension of the input should be defined. ` + + `Found ${inputShape[channelAxis]}`); + } + const inputDim = inputShape[channelAxis]; + const kernelShape = this.kernelSize.concat([inputDim, this.filters]); + this.kernel = this.addWeight('kernel', kernelShape, null, this.kernelInitializer, this.kernelRegularizer, true, this.kernelConstraint); + if (this.useBias) { + this.bias = this.addWeight('bias', [this.filters], null, this.biasInitializer, this.biasRegularizer, true, this.biasConstraint); + } + this.inputSpec = [{ ndim: this.rank + 2, axes: { [channelAxis]: inputDim } }]; + this.built = true; + } + call(inputs, kwargs) { + return tidy(() => { + inputs = getExactlyOneTensor(inputs); + let outputs; + const biasValue = this.bias == null ? null : this.bias.read(); + const fusedActivationName = mapActivationToFusedKernel(this.activation.getClassName()); + if (fusedActivationName != null && this.rank === 2) { + outputs = conv2dWithBiasActivation(inputs, this.kernel.read(), biasValue, this.strides, this.padding, this.dataFormat, this.dilationRate, fusedActivationName); + } + else { + if (this.rank === 1) { + outputs = conv1dWithBias(inputs, this.kernel.read(), biasValue, this.strides[0], this.padding, this.dataFormat, this.dilationRate[0]); + } + else if (this.rank === 2) { + // TODO(cais): Move up to constructor. + outputs = conv2dWithBiasActivation(inputs, this.kernel.read(), biasValue, this.strides, this.padding, this.dataFormat, this.dilationRate); + } + else if (this.rank === 3) { + outputs = conv3dWithBias(inputs, this.kernel.read(), biasValue, this.strides, this.padding, this.dataFormat, this.dilationRate); + } + else { + throw new NotImplementedError('convolutions greater than 3D are not implemented yet.'); + } + if (this.activation != null) { + outputs = this.activation.apply(outputs); + } + } + return outputs; + }); + } + computeOutputShape(inputShape) { + inputShape = getExactlyOneShape(inputShape); + const newSpace = []; + const space = (this.dataFormat === 'channelsLast') ? + inputShape.slice(1, inputShape.length - 1) : + inputShape.slice(2); + for (let i = 0; i < space.length; ++i) { + const newDim = convOutputLength(space[i], this.kernelSize[i], this.padding, this.strides[i], typeof this.dilationRate === 'number' ? this.dilationRate : + this.dilationRate[i]); + newSpace.push(newDim); + } + let outputShape = [inputShape[0]]; + if (this.dataFormat === 'channelsLast') { + outputShape = outputShape.concat(newSpace); + outputShape.push(this.filters); + } + else { + outputShape.push(this.filters); + outputShape = outputShape.concat(newSpace); + } + return outputShape; + } + getConfig() { + const config = { + filters: this.filters, + kernelInitializer: serializeInitializer(this.kernelInitializer), + kernelRegularizer: serializeRegularizer(this.kernelRegularizer), + kernelConstraint: serializeConstraint(this.kernelConstraint) + }; + const baseConfig = super.getConfig(); + Object.assign(config, baseConfig); + return config; + } + static verifyArgs(args) { + // Check config.filters type, shape, and value. + if (!('filters' in args) || typeof args.filters !== 'number' || + args.filters < 1) { + throw new ValueError(`Convolution layer expected config.filters to be a 'number' > 0 ` + + `but got ${JSON.stringify(args.filters)}`); + } + } + } + class Conv2D extends Conv { + constructor(args) { + super(2, args); + Conv2D.verifyArgs(args); + } + getConfig() { + const config = super.getConfig(); + delete config['rank']; + return config; + } + static verifyArgs(args) { + // config.kernelSize must be a number or array of numbers. + if ((typeof args.kernelSize !== 'number') && + !checkArrayTypeAndLength(args.kernelSize, 'number', 1, 2)) { + throw new ValueError(`Conv2D expects config.kernelSize to be number or number[] with ` + + `length 1 or 2, but received ${JSON.stringify(args.kernelSize)}.`); + } + } + } + /** @nocollapse */ + Conv2D.className = 'Conv2D'; + registerClass(Conv2D); + class Conv3D extends Conv { + constructor(args) { + super(3, args); + Conv3D.verifyArgs(args); + } + getConfig() { + const config = super.getConfig(); + delete config['rank']; + return config; + } + static verifyArgs(args) { + // config.kernelSize must be a number or array of numbers. + if (typeof args.kernelSize !== 'number') { + if (!(Array.isArray(args.kernelSize) && + (args.kernelSize.length === 1 || args.kernelSize.length === 3))) { + throw new ValueError(`Conv3D expects config.kernelSize to be number or` + + ` [number, number, number], but received ${JSON.stringify(args.kernelSize)}.`); + } + } + } + } + /** @nocollapse */ + Conv3D.className = 'Conv3D'; + registerClass(Conv3D); + class Conv2DTranspose extends Conv2D { + constructor(args) { + super(args); + this.inputSpec = [new InputSpec({ ndim: 4 })]; + if (this.padding !== 'same' && this.padding !== 'valid') { + throw new ValueError(`Conv2DTranspose currently supports only padding modes 'same' ` + + `and 'valid', but received padding mode ${this.padding}`); + } + } + build(inputShape) { + inputShape = getExactlyOneShape(inputShape); + if (inputShape.length !== 4) { + throw new ValueError('Input should have rank 4; Received input shape: ' + + JSON.stringify(inputShape)); + } + const channelAxis = this.dataFormat === 'channelsFirst' ? 1 : inputShape.length - 1; + if (inputShape[channelAxis] == null) { + throw new ValueError('The channel dimension of the inputs should be defined. ' + + 'Found `None`.'); + } + const inputDim = inputShape[channelAxis]; + const kernelShape = this.kernelSize.concat([this.filters, inputDim]); + this.kernel = this.addWeight('kernel', kernelShape, 'float32', this.kernelInitializer, this.kernelRegularizer, true, this.kernelConstraint); + if (this.useBias) { + this.bias = this.addWeight('bias', [this.filters], 'float32', this.biasInitializer, this.biasRegularizer, true, this.biasConstraint); + } + // Set input spec. + this.inputSpec = + [new InputSpec({ ndim: 4, axes: { [channelAxis]: inputDim } })]; + this.built = true; + } + call(inputs, kwargs) { + return tidy(() => { + let input = getExactlyOneTensor(inputs); + if (input.shape.length !== 4) { + throw new ValueError(`Conv2DTranspose.call() expects input tensor to be rank-4, but ` + + `received a tensor of rank-${input.shape.length}`); + } + const inputShape = input.shape; + const batchSize = inputShape[0]; + let hAxis; + let wAxis; + if (this.dataFormat === 'channelsFirst') { + hAxis = 2; + wAxis = 3; + } + else { + hAxis = 1; + wAxis = 2; + } + const height = inputShape[hAxis]; + const width = inputShape[wAxis]; + const kernelH = this.kernelSize[0]; + const kernelW = this.kernelSize[1]; + const strideH = this.strides[0]; + const strideW = this.strides[1]; + // Infer the dynamic output shape. + const outHeight = deconvLength(height, strideH, kernelH, this.padding); + const outWidth = deconvLength(width, strideW, kernelW, this.padding); + // Porting Note: We don't branch based on `this.dataFormat` here, + // because + // the tjfs-core function `conv2dTranspose` called below always + // assumes channelsLast. + const outputShape = [batchSize, outHeight, outWidth, this.filters]; + if (this.dataFormat !== 'channelsLast') { + input = transpose$2(input, [0, 2, 3, 1]); + } + let outputs = conv2dTranspose$1(input, this.kernel.read(), outputShape, this.strides, this.padding); + if (this.dataFormat !== 'channelsLast') { + outputs = transpose$2(outputs, [0, 3, 1, 2]); + } + if (this.bias != null) { + outputs = + biasAdd(outputs, this.bias.read(), this.dataFormat); + } + if (this.activation != null) { + outputs = this.activation.apply(outputs); + } + return outputs; + }); + } + computeOutputShape(inputShape) { + inputShape = getExactlyOneShape(inputShape); + const outputShape = inputShape.slice(); + let channelAxis; + let heightAxis; + let widthAxis; + if (this.dataFormat === 'channelsFirst') { + channelAxis = 1; + heightAxis = 2; + widthAxis = 3; + } + else { + channelAxis = 3; + heightAxis = 1; + widthAxis = 2; + } + const kernelH = this.kernelSize[0]; + const kernelW = this.kernelSize[1]; + const strideH = this.strides[0]; + const strideW = this.strides[1]; + outputShape[channelAxis] = this.filters; + outputShape[heightAxis] = + deconvLength(outputShape[heightAxis], strideH, kernelH, this.padding); + outputShape[widthAxis] = + deconvLength(outputShape[widthAxis], strideW, kernelW, this.padding); + return outputShape; + } + getConfig() { + const config = super.getConfig(); + delete config['dilationRate']; + return config; + } + } + /** @nocollapse */ + Conv2DTranspose.className = 'Conv2DTranspose'; + registerClass(Conv2DTranspose); + class Conv3DTranspose extends Conv3D { + constructor(args) { + super(args); + this.inputSpec = [new InputSpec({ ndim: 5 })]; + if (this.padding !== 'same' && this.padding !== 'valid') { + throw new ValueError(`Conv3DTranspose currently supports only padding modes 'same' ` + + `and 'valid', but received padding mode ${this.padding}`); + } + } + build(inputShape) { + inputShape = getExactlyOneShape(inputShape); + if (inputShape.length !== 5) { + throw new ValueError('Input should have rank 5; Received input shape: ' + + JSON.stringify(inputShape)); + } + const channelAxis = this.dataFormat === 'channelsFirst' ? 1 : inputShape.length - 1; + if (inputShape[channelAxis] == null) { + throw new ValueError('The channel dimension of the inputs should be defined. ' + + 'Found `None`.'); + } + const inputDim = inputShape[channelAxis]; + const kernelShape = this.kernelSize.concat([this.filters, inputDim]); + this.kernel = this.addWeight('kernel', kernelShape, 'float32', this.kernelInitializer, this.kernelRegularizer, true, this.kernelConstraint); + if (this.useBias) { + this.bias = this.addWeight('bias', [this.filters], 'float32', this.biasInitializer, this.biasRegularizer, true, this.biasConstraint); + } + // Set input spec. + this.inputSpec = + [new InputSpec({ ndim: 5, axes: { [channelAxis]: inputDim } })]; + this.built = true; + } + call(inputs, kwargs) { + return tidy(() => { + let input = getExactlyOneTensor(inputs); + if (input.shape.length !== 5) { + throw new ValueError(`Conv3DTranspose.call() expects input tensor to be rank-4, but ` + + `received a tensor of rank-${input.shape.length}`); + } + const inputShape = input.shape; + const batchSize = inputShape[0]; + let hAxis; + let wAxis; + let dAxis; + if (this.dataFormat === 'channelsFirst') { + dAxis = 2; + hAxis = 3; + wAxis = 4; + } + else { + dAxis = 1; + hAxis = 2; + wAxis = 3; + } + const depth = inputShape[dAxis]; + const height = inputShape[hAxis]; + const width = inputShape[wAxis]; + const kernelD = this.kernelSize[0]; + const kernelH = this.kernelSize[1]; + const kernelW = this.kernelSize[2]; + const strideD = this.strides[0]; + const strideH = this.strides[1]; + const strideW = this.strides[2]; + // Infer the dynamic output shape. + const outDepth = deconvLength(depth, strideD, kernelD, this.padding); + const outHeight = deconvLength(height, strideH, kernelH, this.padding); + const outWidth = deconvLength(width, strideW, kernelW, this.padding); + // Same as `conv2dTranspose`. We always assumes channelsLast. + const outputShape = [batchSize, outDepth, outHeight, outWidth, this.filters]; + if (this.dataFormat !== 'channelsLast') { + input = transpose$2(input, [0, 2, 3, 4, 1]); + } + let outputs = conv3dTranspose$1(input, this.kernel.read(), outputShape, this.strides, this.padding); + if (this.dataFormat !== 'channelsLast') { + outputs = transpose$2(outputs, [0, 4, 1, 2, 3]); + } + if (this.bias !== null) { + outputs = + biasAdd(outputs, this.bias.read(), this.dataFormat); + } + if (this.activation !== null) { + outputs = this.activation.apply(outputs); + } + return outputs; + }); + } + computeOutputShape(inputShape) { + inputShape = getExactlyOneShape(inputShape); + const outputShape = inputShape.slice(); + let channelAxis; + let depthAxis; + let heightAxis; + let widthAxis; + if (this.dataFormat === 'channelsFirst') { + channelAxis = 1; + depthAxis = 2; + heightAxis = 3; + widthAxis = 4; + } + else { + channelAxis = 4; + depthAxis = 1; + heightAxis = 2; + widthAxis = 3; + } + const kernelD = this.kernelSize[0]; + const kernelH = this.kernelSize[1]; + const kernelW = this.kernelSize[2]; + const strideD = this.strides[0]; + const strideH = this.strides[1]; + const strideW = this.strides[2]; + outputShape[channelAxis] = this.filters; + outputShape[depthAxis] = + deconvLength(outputShape[depthAxis], strideD, kernelD, this.padding); + outputShape[heightAxis] = + deconvLength(outputShape[heightAxis], strideH, kernelH, this.padding); + outputShape[widthAxis] = + deconvLength(outputShape[widthAxis], strideW, kernelW, this.padding); + return outputShape; + } + getConfig() { + const config = super.getConfig(); + delete config['dilationRate']; + return config; + } + } + /** @nocollapse */ + Conv3DTranspose.className = 'Conv3DTranspose'; + registerClass(Conv3DTranspose); + class SeparableConv extends Conv { + constructor(rank, config) { + super(rank, config); + this.DEFAULT_DEPTHWISE_INITIALIZER = 'glorotUniform'; + this.DEFAULT_POINTWISE_INITIALIZER = 'glorotUniform'; + this.depthwiseKernel = null; + this.pointwiseKernel = null; + if (config.filters == null) { + throw new ValueError('The `filters` configuration field is required by SeparableConv, ' + + 'but is unspecified.'); + } + if (config.kernelInitializer != null || config.kernelRegularizer != null || + config.kernelConstraint != null) { + throw new ValueError('Fields kernelInitializer, kernelRegularizer and kernelConstraint ' + + 'are invalid for SeparableConv2D. Use depthwiseInitializer, ' + + 'depthwiseRegularizer, depthwiseConstraint, pointwiseInitializer, ' + + 'pointwiseRegularizer and pointwiseConstraint instead.'); + } + if (config.padding != null && config.padding !== 'same' && + config.padding !== 'valid') { + throw new ValueError(`SeparableConv${this.rank}D supports only padding modes: ` + + `'same' and 'valid', but received ${JSON.stringify(config.padding)}`); + } + this.depthMultiplier = + config.depthMultiplier == null ? 1 : config.depthMultiplier; + this.depthwiseInitializer = getInitializer(config.depthwiseInitializer || this.DEFAULT_DEPTHWISE_INITIALIZER); + this.depthwiseRegularizer = getRegularizer(config.depthwiseRegularizer); + this.depthwiseConstraint = getConstraint(config.depthwiseConstraint); + this.pointwiseInitializer = getInitializer(config.depthwiseInitializer || this.DEFAULT_POINTWISE_INITIALIZER); + this.pointwiseRegularizer = getRegularizer(config.pointwiseRegularizer); + this.pointwiseConstraint = getConstraint(config.pointwiseConstraint); + } + build(inputShape) { + inputShape = getExactlyOneShape(inputShape); + if (inputShape.length < this.rank + 2) { + throw new ValueError(`Inputs to SeparableConv${this.rank}D should have rank ` + + `${this.rank + 2}, but received input shape: ` + + `${JSON.stringify(inputShape)}`); + } + const channelAxis = this.dataFormat === 'channelsFirst' ? 1 : inputShape.length - 1; + if (inputShape[channelAxis] == null || inputShape[channelAxis] < 0) { + throw new ValueError(`The channel dimension of the inputs should be defined, ` + + `but found ${JSON.stringify(inputShape[channelAxis])}`); + } + const inputDim = inputShape[channelAxis]; + const depthwiseKernelShape = this.kernelSize.concat([inputDim, this.depthMultiplier]); + const pointwiseKernelShape = []; + for (let i = 0; i < this.rank; ++i) { + pointwiseKernelShape.push(1); + } + pointwiseKernelShape.push(inputDim * this.depthMultiplier, this.filters); + const trainable = true; + this.depthwiseKernel = this.addWeight('depthwise_kernel', depthwiseKernelShape, 'float32', this.depthwiseInitializer, this.depthwiseRegularizer, trainable, this.depthwiseConstraint); + this.pointwiseKernel = this.addWeight('pointwise_kernel', pointwiseKernelShape, 'float32', this.pointwiseInitializer, this.pointwiseRegularizer, trainable, this.pointwiseConstraint); + if (this.useBias) { + this.bias = this.addWeight('bias', [this.filters], 'float32', this.biasInitializer, this.biasRegularizer, trainable, this.biasConstraint); + } + else { + this.bias = null; + } + this.inputSpec = + [new InputSpec({ ndim: this.rank + 2, axes: { [channelAxis]: inputDim } })]; + this.built = true; + } + call(inputs, kwargs) { + return tidy(() => { + inputs = getExactlyOneTensor(inputs); + let output; + if (this.rank === 1) { + throw new NotImplementedError('1D separable convolution is not implemented yet.'); + } + else if (this.rank === 2) { + if (this.dataFormat === 'channelsFirst') { + inputs = transpose$2(inputs, [0, 2, 3, 1]); // NCHW -> NHWC. + } + output = separableConv2d$1(inputs, this.depthwiseKernel.read(), this.pointwiseKernel.read(), this.strides, this.padding, this.dilationRate, 'NHWC'); + } + if (this.useBias) { + output = biasAdd(output, this.bias.read(), this.dataFormat); + } + if (this.activation != null) { + output = this.activation.apply(output); + } + if (this.dataFormat === 'channelsFirst') { + output = transpose$2(output, [0, 3, 1, 2]); // NHWC -> NCHW. + } + return output; + }); + } + getConfig() { + const config = super.getConfig(); + delete config['rank']; + delete config['kernelInitializer']; + delete config['kernelRegularizer']; + delete config['kernelConstraint']; + config['depthwiseInitializer'] = + serializeInitializer(this.depthwiseInitializer); + config['pointwiseInitializer'] = + serializeInitializer(this.pointwiseInitializer); + config['depthwiseRegularizer'] = + serializeRegularizer(this.depthwiseRegularizer); + config['pointwiseRegularizer'] = + serializeRegularizer(this.pointwiseRegularizer); + config['depthwiseConstraint'] = + serializeConstraint(this.depthwiseConstraint); + config['pointwiseConstraint'] = + serializeConstraint(this.pointwiseConstraint); + return config; + } + } + /** @nocollapse */ + SeparableConv.className = 'SeparableConv'; + class SeparableConv2D extends SeparableConv { + constructor(args) { + super(2, args); + } + } + /** @nocollapse */ + SeparableConv2D.className = 'SeparableConv2D'; + registerClass(SeparableConv2D); + class Conv1D extends Conv { + constructor(args) { + super(1, args); + Conv1D.verifyArgs(args); + this.inputSpec = [{ ndim: 3 }]; + } + getConfig() { + const config = super.getConfig(); + delete config['rank']; + delete config['dataFormat']; + return config; + } + static verifyArgs(args) { + // config.kernelSize must be a number or array of numbers. + if (typeof args.kernelSize !== 'number' && + !checkArrayTypeAndLength(args.kernelSize, 'number', 1, 1)) { + throw new ValueError(`Conv1D expects config.kernelSize to be number or number[] with ` + + `length 1, but received ${JSON.stringify(args.kernelSize)}.`); + } + } + } + /** @nocollapse */ + Conv1D.className = 'Conv1D'; + registerClass(Conv1D); + class Cropping2D extends Layer { + constructor(args) { + super(args); + if (typeof args.cropping === 'number') { + this.cropping = + [[args.cropping, args.cropping], [args.cropping, args.cropping]]; + } + else if (typeof args.cropping[0] === 'number') { + this.cropping = [ + [args.cropping[0], args.cropping[0]], + [args.cropping[1], args.cropping[1]] + ]; + } + else { + this.cropping = args.cropping; + } + this.dataFormat = + args.dataFormat === undefined ? 'channelsLast' : args.dataFormat; + this.inputSpec = [{ ndim: 4 }]; + } + computeOutputShape(inputShape) { + if (this.dataFormat === 'channelsFirst') { + return [ + inputShape[0], inputShape[1], + inputShape[2] - this.cropping[0][0] - this.cropping[0][1], + inputShape[3] - this.cropping[1][0] - this.cropping[1][1] + ]; + } + else { + return [ + inputShape[0], + inputShape[1] - this.cropping[0][0] - this.cropping[0][1], + inputShape[2] - this.cropping[1][0] - this.cropping[1][1], inputShape[3] + ]; + } + } + call(inputs, kwargs) { + return tidy(() => { + inputs = getExactlyOneTensor(inputs); + if (this.dataFormat === 'channelsLast') { + const hSliced = sliceAlongAxis(inputs, this.cropping[0][0], inputs.shape[1] - this.cropping[0][0] - this.cropping[0][1], 2); + return sliceAlongAxis(hSliced, this.cropping[1][0], inputs.shape[2] - this.cropping[1][1] - this.cropping[1][0], 3); + } + else { + const hSliced = sliceAlongAxis(inputs, this.cropping[0][0], inputs.shape[2] - this.cropping[0][0] - this.cropping[0][1], 3); + return sliceAlongAxis(hSliced, this.cropping[1][0], inputs.shape[3] - this.cropping[1][1] - this.cropping[1][0], 4); + } + }); + } + getConfig() { + const config = { cropping: this.cropping, dataFormat: this.dataFormat }; + const baseConfig = super.getConfig(); + Object.assign(config, baseConfig); + return config; + } + } + /** @nocollapse */ + Cropping2D.className = 'Cropping2D'; + registerClass(Cropping2D); + class UpSampling2D extends Layer { + constructor(args) { + super(args); + this.DEFAULT_SIZE = [2, 2]; + this.inputSpec = [{ ndim: 4 }]; + this.size = args.size == null ? this.DEFAULT_SIZE : args.size; + this.dataFormat = + args.dataFormat == null ? 'channelsLast' : args.dataFormat; + checkDataFormat(this.dataFormat); + this.interpolation = + args.interpolation == null ? 'nearest' : args.interpolation; + checkInterpolationFormat(this.interpolation); + } + computeOutputShape(inputShape) { + if (this.dataFormat === 'channelsFirst') { + const height = inputShape[2] == null ? null : this.size[0] * inputShape[2]; + const width = inputShape[3] == null ? null : this.size[1] * inputShape[3]; + return [inputShape[0], inputShape[1], height, width]; + } + else { + const height = inputShape[1] == null ? null : this.size[0] * inputShape[1]; + const width = inputShape[2] == null ? null : this.size[1] * inputShape[2]; + return [inputShape[0], height, width, inputShape[3]]; + } + } + call(inputs, kwargs) { + return tidy(() => { + let input = getExactlyOneTensor(inputs); + const inputShape = input.shape; + if (this.dataFormat === 'channelsFirst') { + input = transpose$2(input, [0, 2, 3, 1]); + const height = this.size[0] * inputShape[2]; + const width = this.size[1] * inputShape[3]; + const resized = this.interpolation === 'nearest' ? + image$1.resizeNearestNeighbor(input, [height, width]) : + image$1.resizeBilinear(input, [height, width]); + return transpose$2(resized, [0, 3, 1, 2]); + } + else { + const height = this.size[0] * inputShape[1]; + const width = this.size[1] * inputShape[2]; + return this.interpolation === 'nearest' ? + image$1.resizeNearestNeighbor(input, [height, width]) : + image$1.resizeBilinear(input, [height, width]); + } + }); + } + getConfig() { + const config = { + size: this.size, + dataFormat: this.dataFormat, + interpolation: this.interpolation + }; + const baseConfig = super.getConfig(); + Object.assign(config, baseConfig); + return config; + } + } + /** @nocollapse */ + UpSampling2D.className = 'UpSampling2D'; + registerClass(UpSampling2D); + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + /** + * 2D convolution with separable filters. + * @param x Input tensor. + * @param depthwiseKernel Convolution kernel for depthwise convolution. + * @param strides Strides (Array of two integers). + * @param padding Padding model. + * @param dataFormat Data format. + * @param dilationRate Array of two integers, dilation rates for the separable + * convolution. + * @returns Output tensor. + * @throws ValueError If depthwiseKernel is not a 4D array. + */ + function depthwiseConv2d$1(x, depthwiseKernel, strides = [1, 1], padding = 'valid', dataFormat, dilationRate) { + return tidy(() => { + if (dataFormat == null) { + dataFormat = imageDataFormat(); + } + checkDataFormat(dataFormat); + let y = preprocessConv2DInput(x, dataFormat); + if (x.rank !== 4) { + throw new ValueError(`Input for depthwiseConv2d is required to be 4-D, but is instead ` + + `${x.rank}-D`); + } + if (depthwiseKernel.rank !== 4) { + throw new ValueError(`depthwiseKernel is required to be 4-D, but is instead ` + + `${depthwiseKernel.rank}-D`); + } + y = depthwiseConv2d$3(y, depthwiseKernel, strides, padding === 'same' ? 'same' : 'valid', 'NHWC', dilationRate); + if (dataFormat === 'channelsFirst') { + y = transpose$2(y, [0, 3, 1, 2]); + } + return y; + }); + } + class DepthwiseConv2D extends BaseConv { + constructor(args) { + super(2, args); + this.depthwiseKernel = null; + this.depthMultiplier = + args.depthMultiplier == null ? 1 : args.depthMultiplier; + this.depthwiseInitializer = getInitializer(args.depthwiseInitializer || this.DEFAULT_KERNEL_INITIALIZER); + this.depthwiseConstraint = getConstraint(args.depthwiseConstraint); + this.depthwiseRegularizer = getRegularizer(args.depthwiseRegularizer); + } + build(inputShape) { + inputShape = getExactlyOneShape(inputShape); + if (inputShape.length < 4) { + throw new ValueError(`Inputs to DepthwiseConv2D should have rank 4. ` + + `Received input shape: ${JSON.stringify(inputShape)}.`); + } + const channelAxis = this.dataFormat === 'channelsFirst' ? 1 : 3; + if (inputShape[channelAxis] == null || inputShape[channelAxis] < 0) { + throw new ValueError('The channel dimension of the inputs to DepthwiseConv2D should ' + + `be defined, but is not (${inputShape[channelAxis]}).`); + } + const inputDim = inputShape[channelAxis]; + const depthwiseKernelShape = [ + this.kernelSize[0], this.kernelSize[1], inputDim, this.depthMultiplier + ]; + this.depthwiseKernel = this.addWeight('depthwise_kernel', depthwiseKernelShape, null, this.depthwiseInitializer, this.depthwiseRegularizer, true, this.depthwiseConstraint); + if (this.useBias) { + this.bias = this.addWeight('bias', [inputDim * this.depthMultiplier], null, this.biasInitializer, this.biasRegularizer, true, this.biasConstraint); + } + else { + this.bias = null; + } + this.built = true; + } + call(inputs, kwargs) { + return tidy(() => { + inputs = getExactlyOneTensor(inputs); + let outputs = depthwiseConv2d$1(inputs, this.depthwiseKernel.read(), this.strides, this.padding, this.dataFormat, null); + // TODO(cais): Add support for dilation. + if (this.useBias) { + outputs = biasAdd(outputs, this.bias.read(), this.dataFormat); + } + if (this.activation != null) { + outputs = this.activation.apply(outputs); + } + return outputs; + }); + } + computeOutputShape(inputShape) { + inputShape = getExactlyOneShape(inputShape); + const rows = this.dataFormat === 'channelsFirst' ? inputShape[2] : inputShape[1]; + const cols = this.dataFormat === 'channelsFirst' ? inputShape[3] : inputShape[2]; + const outFilters = this.dataFormat === 'channelsFirst' ? + inputShape[1] * this.depthMultiplier : + inputShape[3] * this.depthMultiplier; + const outRows = convOutputLength(rows, this.kernelSize[0], this.padding, this.strides[0]); + const outCols = convOutputLength(cols, this.kernelSize[1], this.padding, this.strides[1]); + if (this.dataFormat === 'channelsFirst') { + return [inputShape[0], outFilters, outRows, outCols]; + } + else { + // In this case, assume 'channelsLast'. + return [inputShape[0], outRows, outCols, outFilters]; + } + } + getConfig() { + const config = super.getConfig(); + config['depthMultiplier'] = this.depthMultiplier; + config['depthwiseInitializer'] = + serializeInitializer(this.depthwiseInitializer); + config['depthwiseRegularizer'] = + serializeRegularizer(this.depthwiseRegularizer); + config['depthwiseConstraint'] = + serializeConstraint(this.depthwiseRegularizer); + return config; + } + } + /** @nocollapse */ + DepthwiseConv2D.className = 'DepthwiseConv2D'; + registerClass(DepthwiseConv2D); + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + /** + * Standardize `apply()` args to a single list of tensor inputs. + * + * When running a model loaded from file, the input tensors `initialState` and + * `constants` are passed to `RNN.apply()` as part of `inputs` instead of the + * dedicated kwargs fields. `inputs` consists of + * `[inputs, initialState0, initialState1, ..., constant0, constant1]` in this + * case. + * This method makes sure that arguments are + * separated and that `initialState` and `constants` are `Array`s of tensors + * (or None). + * + * @param inputs Tensor or `Array` of tensors. + * @param initialState Tensor or `Array` of tensors or `null`/`undefined`. + * @param constants Tensor or `Array` of tensors or `null`/`undefined`. + * @returns An object consisting of + * inputs: A tensor. + * initialState: `Array` of tensors or `null`. + * constants: `Array` of tensors or `null`. + * @throws ValueError, if `inputs` is an `Array` but either `initialState` or + * `constants` is provided. + */ + function standardizeArgs(inputs, initialState, constants, numConstants) { + if (Array.isArray(inputs)) { + if (initialState != null || constants != null) { + throw new ValueError('When inputs is an array, neither initialState or constants ' + + 'should be provided'); + } + if (numConstants != null) { + constants = inputs.slice(inputs.length - numConstants, inputs.length); + inputs = inputs.slice(0, inputs.length - numConstants); + } + if (inputs.length > 1) { + initialState = inputs.slice(1, inputs.length); + } + inputs = inputs[0]; + } + function toListOrNull(x) { + if (x == null || Array.isArray(x)) { + return x; + } + else { + return [x]; + } + } + initialState = toListOrNull(initialState); + constants = toListOrNull(constants); + return { inputs, initialState, constants }; + } + /** + * Iterates over the time dimension of a tensor. + * + * @param stepFunction RNN step function. + * Parameters: + * inputs: tensor with shape `[samples, ...]` (no time dimension), + * representing input for the batch of samples at a certain time step. + * states: an Array of tensors. + * Returns: + * outputs: tensor with shape `[samples, outputDim]` (no time dimension). + * newStates: list of tensors, same length and shapes as `states`. The first + * state in the list must be the output tensor at the previous timestep. + * @param inputs Tensor of temporal data of shape `[samples, time, ...]` (at + * least 3D). + * @param initialStates Tensor with shape `[samples, outputDim]` (no time + * dimension), containing the initial values of the states used in the step + * function. + * @param goBackwards If `true`, do the iteration over the time dimension in + * reverse order and return the reversed sequence. + * @param mask Binary tensor with shape `[sample, time, 1]`, with a zero for + * every element that is masked. + * @param constants An Array of constant values passed at each step. + * @param unroll Whether to unroll the RNN or to use a symbolic loop. *Not* + * applicable to this imperative deeplearn.js backend. Its value is ignored. + * @param needPerStepOutputs Whether the per-step outputs are to be + * concatenated into a single tensor and returned (as the second return + * value). Default: `false`. This arg is included so that the relatively + * expensive concatenation of the stepwise outputs can be omitted unless + * the stepwise outputs need to be kept (e.g., for an LSTM layer of which + * `returnSequence` is `true`.) + * @returns An Array: `[lastOutput, outputs, newStates]`. + * lastOutput: the lastest output of the RNN, of shape `[samples, ...]`. + * outputs: tensor with shape `[samples, time, ...]` where each entry + * `output[s, t]` is the output of the step function at time `t` for sample + * `s`. This return value is provided if and only if the + * `needPerStepOutputs` is set as `true`. If it is set as `false`, this + * return value will be `undefined`. + * newStates: Array of tensors, latest states returned by the step function, + * of shape `(samples, ...)`. + * @throws ValueError If input dimension is less than 3. + * + * TODO(nielsene): This needs to be tidy-ed. + */ + function rnn$1(stepFunction, inputs, initialStates, goBackwards = false, mask, constants, unroll = false, needPerStepOutputs = false) { + return tidy(() => { + const ndim = inputs.shape.length; + if (ndim < 3) { + throw new ValueError(`Input should be at least 3D, but is ${ndim}D.`); + } + // Transpose to time-major, i.e., from [batch, time, ...] to [time, batch, + // ...]. + const axes = [1, 0].concat(range$2(2, ndim)); + inputs = transpose$2(inputs, axes); + if (constants != null) { + throw new NotImplementedError('The rnn() functoin of the deeplearn.js backend does not support ' + + 'constants yet.'); + } + // Porting Note: the unroll option is ignored by the imperative backend. + if (unroll) { + console.warn('Backend rnn(): the unroll = true option is not applicable to the ' + + 'imperative deeplearn.js backend.'); + } + if (mask != null) { + mask = cast$3(cast$3(mask, 'bool'), 'float32'); + if (mask.rank === ndim - 1) { + mask = expandDims$3(mask, -1); + } + mask = transpose$2(mask, axes); + } + if (goBackwards) { + inputs = reverse$2(inputs, 0); + if (mask != null) { + mask = reverse$2(mask, 0); + } + } + // Porting Note: PyKeras with TensorFlow backend uses a symbolic loop + // (tf.while_loop). But for the imperative deeplearn.js backend, we just + // use the usual TypeScript control flow to iterate over the time steps in + // the inputs. + // Porting Note: PyKeras patches a "_use_learning_phase" attribute to + // outputs. + // This is not idiomatic in TypeScript. The info regarding whether we are + // in a learning (i.e., training) phase for RNN is passed in a different + // way. + const perStepOutputs = []; + let lastOutput; + let states = initialStates; + const timeSteps = inputs.shape[0]; + const perStepInputs = unstack(inputs); + let perStepMasks; + if (mask != null) { + perStepMasks = unstack(mask); + } + for (let t = 0; t < timeSteps; ++t) { + const currentInput = perStepInputs[t]; + const stepOutputs = tidy(() => stepFunction(currentInput, states)); + if (mask == null) { + lastOutput = stepOutputs[0]; + states = stepOutputs[1]; + } + else { + const maskedOutputs = tidy(() => { + const stepMask = perStepMasks[t]; + const negStepMask = sub$2(onesLike$3(stepMask), stepMask); + // TODO(cais): Would tfc.where() be better for performance? + const output = add$3(mul(stepOutputs[0], stepMask), mul(states[0], negStepMask)); + const newStates = states.map((state, i) => { + return add$3(mul(stepOutputs[1][i], stepMask), mul(state, negStepMask)); + }); + return { output, newStates }; + }); + lastOutput = maskedOutputs.output; + states = maskedOutputs.newStates; + } + if (needPerStepOutputs) { + perStepOutputs.push(lastOutput); + } + } + let outputs; + if (needPerStepOutputs) { + const axis = 1; + outputs = stack(perStepOutputs, axis); + } + return [lastOutput, outputs, states]; + }); + } + class RNN extends Layer { + constructor(args) { + super(args); + let cell; + if (args.cell == null) { + throw new ValueError('cell property is missing for the constructor of RNN.'); + } + else if (Array.isArray(args.cell)) { + cell = new StackedRNNCells({ cells: args.cell }); + } + else { + cell = args.cell; + } + if (cell.stateSize == null) { + throw new ValueError('The RNN cell should have an attribute `stateSize` (tuple of ' + + 'integers, one integer per RNN state).'); + } + this.cell = cell; + this.returnSequences = + args.returnSequences == null ? false : args.returnSequences; + this.returnState = args.returnState == null ? false : args.returnState; + this.goBackwards = args.goBackwards == null ? false : args.goBackwards; + this._stateful = args.stateful == null ? false : args.stateful; + this.unroll = args.unroll == null ? false : args.unroll; + this.supportsMasking = true; + this.inputSpec = [new InputSpec({ ndim: 3 })]; + this.stateSpec = null; + this.states_ = null; + // TODO(cais): Add constantsSpec and numConstants. + this.numConstants = null; + // TODO(cais): Look into the use of initial_state in the kwargs of the + // constructor. + this.keptStates = []; + } + // Porting Note: This is the equivalent of `RNN.states` property getter in + // PyKeras. + getStates() { + if (this.states_ == null) { + const numStates = Array.isArray(this.cell.stateSize) ? this.cell.stateSize.length : 1; + return range$2(0, numStates).map(x => null); + } + else { + return this.states_; + } + } + // Porting Note: This is the equivalent of the `RNN.states` property setter in + // PyKeras. + setStates(states) { + this.states_ = states; + } + computeOutputShape(inputShape) { + if (isArrayOfShapes(inputShape)) { + inputShape = inputShape[0]; + } + inputShape = inputShape; + // TODO(cais): Remove the casting once stacked RNN cells become supported. + let stateSize = this.cell.stateSize; + if (!Array.isArray(stateSize)) { + stateSize = [stateSize]; + } + const outputDim = stateSize[0]; + let outputShape; + if (this.returnSequences) { + outputShape = [inputShape[0], inputShape[1], outputDim]; + } + else { + outputShape = [inputShape[0], outputDim]; + } + if (this.returnState) { + const stateShape = []; + for (const dim of stateSize) { + stateShape.push([inputShape[0], dim]); + } + return [outputShape].concat(stateShape); + } + else { + return outputShape; + } + } + computeMask(inputs, mask) { + return tidy(() => { + if (Array.isArray(mask)) { + mask = mask[0]; + } + const outputMask = this.returnSequences ? mask : null; + if (this.returnState) { + const stateMask = this.states.map(s => null); + return [outputMask].concat(stateMask); + } + else { + return outputMask; + } + }); + } + /** + * Get the current state tensors of the RNN. + * + * If the state hasn't been set, return an array of `null`s of the correct + * length. + */ + get states() { + if (this.states_ == null) { + const numStates = Array.isArray(this.cell.stateSize) ? this.cell.stateSize.length : 1; + const output = []; + for (let i = 0; i < numStates; ++i) { + output.push(null); + } + return output; + } + else { + return this.states_; + } + } + set states(s) { + this.states_ = s; + } + build(inputShape) { + // Note inputShape will be an Array of Shapes of initial states and + // constants if these are passed in apply(). + const constantShape = null; + if (this.numConstants != null) { + throw new NotImplementedError('Constants support is not implemented in RNN yet.'); + } + if (isArrayOfShapes(inputShape)) { + inputShape = inputShape[0]; + } + inputShape = inputShape; + const batchSize = this.stateful ? inputShape[0] : null; + const inputDim = inputShape.slice(2); + this.inputSpec[0] = new InputSpec({ shape: [batchSize, null, ...inputDim] }); + // Allow cell (if RNNCell Layer) to build before we set or validate + // stateSpec. + const stepInputShape = [inputShape[0]].concat(inputShape.slice(2)); + if (constantShape != null) { + throw new NotImplementedError('Constants support is not implemented in RNN yet.'); + } + else { + this.cell.build(stepInputShape); + } + // Set or validate stateSpec. + let stateSize; + if (Array.isArray(this.cell.stateSize)) { + stateSize = this.cell.stateSize; + } + else { + stateSize = [this.cell.stateSize]; + } + if (this.stateSpec != null) { + if (!arraysEqual(this.stateSpec.map(spec => spec.shape[spec.shape.length - 1]), stateSize)) { + throw new ValueError(`An initialState was passed that is not compatible with ` + + `cell.stateSize. Received stateSpec=${this.stateSpec}; ` + + `However cell.stateSize is ${this.cell.stateSize}`); + } + } + else { + this.stateSpec = + stateSize.map(dim => new InputSpec({ shape: [null, dim] })); + } + if (this.stateful) { + this.resetStates(); + } + } + /** + * Reset the state tensors of the RNN. + * + * If the `states` argument is `undefined` or `null`, will set the + * state tensor(s) of the RNN to all-zero tensors of the appropriate + * shape(s). + * + * If `states` is provided, will set the state tensors of the RNN to its + * value. + * + * @param states Optional externally-provided initial states. + * @param training Whether this call is done during training. For stateful + * RNNs, this affects whether the old states are kept or discarded. In + * particular, if `training` is `true`, the old states will be kept so + * that subsequent backpropgataion through time (BPTT) may work properly. + * Else, the old states will be discarded. + */ + resetStates(states, training = false) { + tidy(() => { + if (!this.stateful) { + throw new AttributeError('Cannot call resetStates() on an RNN Layer that is not stateful.'); + } + const batchSize = this.inputSpec[0].shape[0]; + if (batchSize == null) { + throw new ValueError('If an RNN is stateful, it needs to know its batch size. Specify ' + + 'the batch size of your input tensors: \n' + + '- If using a Sequential model, specify the batch size by ' + + 'passing a `batchInputShape` option to your first layer.\n' + + '- If using the functional API, specify the batch size by ' + + 'passing a `batchShape` option to your Input layer.'); + } + // Initialize state if null. + if (this.states_ == null) { + if (Array.isArray(this.cell.stateSize)) { + this.states_ = + this.cell.stateSize.map(dim => zeros$2([batchSize, dim])); + } + else { + this.states_ = [zeros$2([batchSize, this.cell.stateSize])]; + } + } + else if (states == null) { + // Dispose old state tensors. + dispose(this.states_); + // For stateful RNNs, fully dispose kept old states. + if (this.keptStates != null) { + dispose(this.keptStates); + this.keptStates = []; + } + if (Array.isArray(this.cell.stateSize)) { + this.states_ = + this.cell.stateSize.map(dim => zeros$2([batchSize, dim])); + } + else { + this.states_[0] = zeros$2([batchSize, this.cell.stateSize]); + } + } + else { + if (!Array.isArray(states)) { + states = [states]; + } + if (states.length !== this.states_.length) { + throw new ValueError(`Layer ${this.name} expects ${this.states_.length} state(s), ` + + `but it received ${states.length} state value(s). Input ` + + `received: ${states}`); + } + if (training === true) { + // Store old state tensors for complete disposal later, i.e., during + // the next no-arg call to this method. We do not dispose the old + // states immediately because that BPTT (among other things) require + // them. + this.keptStates.push(this.states_.slice()); + } + else { + dispose(this.states_); + } + for (let index = 0; index < this.states_.length; ++index) { + const value = states[index]; + const dim = Array.isArray(this.cell.stateSize) ? + this.cell.stateSize[index] : + this.cell.stateSize; + const expectedShape = [batchSize, dim]; + if (!arraysEqual(value.shape, expectedShape)) { + throw new ValueError(`State ${index} is incompatible with layer ${this.name}: ` + + `expected shape=${expectedShape}, received shape=${value.shape}`); + } + this.states_[index] = value; + } + } + this.states_ = this.states_.map(state => keep(state.clone())); + }); + } + apply(inputs, kwargs) { + // TODO(cais): Figure out whether initialState is in kwargs or inputs. + let initialState = kwargs == null ? null : kwargs['initialState']; + let constants = kwargs == null ? null : kwargs['constants']; + if (kwargs == null) { + kwargs = {}; + } + const standardized = standardizeArgs(inputs, initialState, constants, this.numConstants); + inputs = standardized.inputs; + initialState = standardized.initialState; + constants = standardized.constants; + // If any of `initial_state` or `constants` are specified and are + // `tf.SymbolicTensor`s, then add them to the inputs and temporarily modify + // the input_spec to include them. + let additionalInputs = []; + let additionalSpecs = []; + if (initialState != null) { + kwargs['initialState'] = initialState; + additionalInputs = additionalInputs.concat(initialState); + this.stateSpec = []; + for (const state of initialState) { + this.stateSpec.push(new InputSpec({ shape: state.shape })); + } + // TODO(cais): Use the following instead. + // this.stateSpec = initialState.map(state => new InputSpec({shape: + // state.shape})); + additionalSpecs = additionalSpecs.concat(this.stateSpec); + } + if (constants != null) { + kwargs['constants'] = constants; + additionalInputs = additionalInputs.concat(constants); + // TODO(cais): Add this.constantsSpec. + this.numConstants = constants.length; + } + const isTensor = additionalInputs[0] instanceof SymbolicTensor; + if (isTensor) { + // Compute full input spec, including state and constants. + const fullInput = [inputs].concat(additionalInputs); + const fullInputSpec = this.inputSpec.concat(additionalSpecs); + // Perform the call with temporarily replaced inputSpec. + const originalInputSpec = this.inputSpec; + this.inputSpec = fullInputSpec; + const output = super.apply(fullInput, kwargs); + this.inputSpec = originalInputSpec; + return output; + } + else { + return super.apply(inputs, kwargs); + } + } + // tslint:disable-next-line:no-any + call(inputs, kwargs) { + // Input shape: `[samples, time (padded with zeros), input_dim]`. + // Note that the .build() method of subclasses **must** define + // this.inputSpec and this.stateSpec owith complete input shapes. + return tidy(() => { + const mask = kwargs == null ? null : kwargs['mask']; + const training = kwargs == null ? null : kwargs['training']; + let initialState = kwargs == null ? null : kwargs['initialState']; + inputs = getExactlyOneTensor(inputs); + if (initialState == null) { + if (this.stateful) { + initialState = this.states_; + } + else { + initialState = this.getInitialState(inputs); + } + } + const numStates = Array.isArray(this.cell.stateSize) ? this.cell.stateSize.length : 1; + if (initialState.length !== numStates) { + throw new ValueError(`RNN Layer has ${numStates} state(s) but was passed ` + + `${initialState.length} initial state(s).`); + } + if (this.unroll) { + console.warn('Ignoring unroll = true for RNN layer, due to imperative backend.'); + } + const cellCallKwargs = { training }; + // TODO(cais): Add support for constants. + const step = (inputs, states) => { + // `inputs` and `states` are concatenated to form a single `Array` of + // `tf.Tensor`s as the input to `cell.call()`. + const outputs = this.cell.call([inputs].concat(states), cellCallKwargs); + // Marshall the return value into output and new states. + return [outputs[0], outputs.slice(1)]; + }; + // TODO(cais): Add support for constants. + const rnnOutputs = rnn$1(step, inputs, initialState, this.goBackwards, mask, null, this.unroll, this.returnSequences); + const lastOutput = rnnOutputs[0]; + const outputs = rnnOutputs[1]; + const states = rnnOutputs[2]; + if (this.stateful) { + this.resetStates(states, training); + } + const output = this.returnSequences ? outputs : lastOutput; + // TODO(cais): Property set learning phase flag. + if (this.returnState) { + return [output].concat(states); + } + else { + return output; + } + }); + } + getInitialState(inputs) { + return tidy(() => { + // Build an all-zero tensor of shape [samples, outputDim]. + // [Samples, timeSteps, inputDim]. + let initialState = zeros$2(inputs.shape); + // [Samples]. + initialState = sum$3(initialState, [1, 2]); + initialState = expandDims$2(initialState); // [Samples, 1]. + if (Array.isArray(this.cell.stateSize)) { + return this.cell.stateSize.map(dim => dim > 1 ? tile$2(initialState, [1, dim]) : initialState); + } + else { + return this.cell.stateSize > 1 ? + [tile$2(initialState, [1, this.cell.stateSize])] : + [initialState]; + } + }); + } + get trainableWeights() { + if (!this.trainable) { + return []; + } + // Porting Note: In TypeScript, `this` is always an instance of `Layer`. + return this.cell.trainableWeights; + } + get nonTrainableWeights() { + // Porting Note: In TypeScript, `this` is always an instance of `Layer`. + if (!this.trainable) { + return this.cell.weights; + } + return this.cell.nonTrainableWeights; + } + setFastWeightInitDuringBuild(value) { + super.setFastWeightInitDuringBuild(value); + if (this.cell != null) { + this.cell.setFastWeightInitDuringBuild(value); + } + } + getConfig() { + const baseConfig = super.getConfig(); + const config = { + returnSequences: this.returnSequences, + returnState: this.returnState, + goBackwards: this.goBackwards, + stateful: this.stateful, + unroll: this.unroll, + }; + if (this.numConstants != null) { + config['numConstants'] = this.numConstants; + } + const cellConfig = this.cell.getConfig(); + if (this.getClassName() === RNN.className) { + config['cell'] = { + 'className': this.cell.getClassName(), + 'config': cellConfig, + }; + } + // this order is necessary, to prevent cell name from replacing layer name + return Object.assign(Object.assign(Object.assign({}, cellConfig), baseConfig), config); + } + /** @nocollapse */ + static fromConfig(cls, config, customObjects = {}) { + const cellConfig = config['cell']; + const cell = deserialize(cellConfig, customObjects); + return new cls(Object.assign(config, { cell })); + } + } + /** @nocollapse */ + RNN.className = 'RNN'; + registerClass(RNN); + // Porting Note: This is a common parent class for RNN cells. There is no + // equivalent of this in PyKeras. Having a common parent class forgoes the + // need for `has_attr(cell, ...)` checks or its TypeScript equivalent. + /** + * An RNNCell layer. + * + * @doc {heading: 'Layers', subheading: 'Classes'} + */ + class RNNCell extends Layer { + } + class SimpleRNNCell extends RNNCell { + constructor(args) { + super(args); + this.DEFAULT_ACTIVATION = 'tanh'; + this.DEFAULT_KERNEL_INITIALIZER = 'glorotNormal'; + this.DEFAULT_RECURRENT_INITIALIZER = 'orthogonal'; + this.DEFAULT_BIAS_INITIALIZER = 'zeros'; + this.units = args.units; + assertPositiveInteger(this.units, `units`); + this.activation = getActivation(args.activation == null ? this.DEFAULT_ACTIVATION : args.activation); + this.useBias = args.useBias == null ? true : args.useBias; + this.kernelInitializer = getInitializer(args.kernelInitializer || this.DEFAULT_KERNEL_INITIALIZER); + this.recurrentInitializer = getInitializer(args.recurrentInitializer || this.DEFAULT_RECURRENT_INITIALIZER); + this.biasInitializer = + getInitializer(args.biasInitializer || this.DEFAULT_BIAS_INITIALIZER); + this.kernelRegularizer = getRegularizer(args.kernelRegularizer); + this.recurrentRegularizer = getRegularizer(args.recurrentRegularizer); + this.biasRegularizer = getRegularizer(args.biasRegularizer); + this.kernelConstraint = getConstraint(args.kernelConstraint); + this.recurrentConstraint = getConstraint(args.recurrentConstraint); + this.biasConstraint = getConstraint(args.biasConstraint); + this.dropout = min$2([1, max$2([0, args.dropout == null ? 0 : args.dropout])]); + this.recurrentDropout = min$2([ + 1, + max$2([0, args.recurrentDropout == null ? 0 : args.recurrentDropout]) + ]); + this.dropoutFunc = args.dropoutFunc; + this.stateSize = this.units; + this.dropoutMask = null; + this.recurrentDropoutMask = null; + } + build(inputShape) { + inputShape = getExactlyOneShape(inputShape); + // TODO(cais): Use regularizer. + this.kernel = this.addWeight('kernel', [inputShape[inputShape.length - 1], this.units], null, this.kernelInitializer, this.kernelRegularizer, true, this.kernelConstraint); + this.recurrentKernel = this.addWeight('recurrent_kernel', [this.units, this.units], null, this.recurrentInitializer, this.recurrentRegularizer, true, this.recurrentConstraint); + if (this.useBias) { + this.bias = this.addWeight('bias', [this.units], null, this.biasInitializer, this.biasRegularizer, true, this.biasConstraint); + } + else { + this.bias = null; + } + this.built = true; + } + // Porting Note: PyKeras' equivalent of this method takes two tensor inputs: + // `inputs` and `states`. Here, the two tensors are combined into an + // `Tensor[]` Array as the first input argument. + // Similarly, PyKeras' equivalent of this method returns two values: + // `output` and `[output]`. Here the two are combined into one length-2 + // `Tensor[]`, consisting of `output` repeated. + call(inputs, kwargs) { + return tidy(() => { + inputs = inputs; + if (inputs.length !== 2) { + throw new ValueError(`SimpleRNNCell expects 2 input Tensors, got ${inputs.length}.`); + } + let prevOutput = inputs[1]; + inputs = inputs[0]; + const training = kwargs['training'] == null ? false : kwargs['training']; + if (0 < this.dropout && this.dropout < 1 && this.dropoutMask == null) { + this.dropoutMask = generateDropoutMask({ + ones: () => onesLike$3(inputs), + rate: this.dropout, + training, + dropoutFunc: this.dropoutFunc, + }); + } + if (0 < this.recurrentDropout && this.recurrentDropout < 1 && + this.recurrentDropoutMask == null) { + this.recurrentDropoutMask = generateDropoutMask({ + ones: () => onesLike$3(prevOutput), + rate: this.recurrentDropout, + training, + dropoutFunc: this.dropoutFunc, + }); + } + let h; + const dpMask = this.dropoutMask; + const recDpMask = this.recurrentDropoutMask; + if (dpMask != null) { + h = dot$1(mul(inputs, dpMask), this.kernel.read()); + } + else { + h = dot$1(inputs, this.kernel.read()); + } + if (this.bias != null) { + h = biasAdd(h, this.bias.read()); + } + if (recDpMask != null) { + prevOutput = mul(prevOutput, recDpMask); + } + let output = add$3(h, dot$1(prevOutput, this.recurrentKernel.read())); + if (this.activation != null) { + output = this.activation.apply(output); + } + // TODO(cais): Properly set learning phase on output tensor? + return [output, output]; + }); + } + getConfig() { + const baseConfig = super.getConfig(); + const config = { + units: this.units, + activation: serializeActivation(this.activation), + useBias: this.useBias, + kernelInitializer: serializeInitializer(this.kernelInitializer), + recurrentInitializer: serializeInitializer(this.recurrentInitializer), + biasInitializer: serializeInitializer(this.biasInitializer), + kernelRegularizer: serializeRegularizer(this.kernelRegularizer), + recurrentRegularizer: serializeRegularizer(this.recurrentRegularizer), + biasRegularizer: serializeRegularizer(this.biasRegularizer), + activityRegularizer: serializeRegularizer(this.activityRegularizer), + kernelConstraint: serializeConstraint(this.kernelConstraint), + recurrentConstraint: serializeConstraint(this.recurrentConstraint), + biasConstraint: serializeConstraint(this.biasConstraint), + dropout: this.dropout, + recurrentDropout: this.recurrentDropout, + }; + return Object.assign(Object.assign({}, baseConfig), config); + } + } + /** @nocollapse */ + SimpleRNNCell.className = 'SimpleRNNCell'; + registerClass(SimpleRNNCell); + class SimpleRNN extends RNN { + constructor(args) { + args.cell = new SimpleRNNCell(args); + super(args); + // TODO(cais): Add activityRegularizer. + } + call(inputs, kwargs) { + return tidy(() => { + if (this.cell.dropoutMask != null) { + dispose(this.cell.dropoutMask); + this.cell.dropoutMask = null; + } + if (this.cell.recurrentDropoutMask != null) { + dispose(this.cell.recurrentDropoutMask); + this.cell.recurrentDropoutMask = null; + } + const mask = kwargs == null ? null : kwargs['mask']; + const training = kwargs == null ? null : kwargs['training']; + const initialState = kwargs == null ? null : kwargs['initialState']; + return super.call(inputs, { mask, training, initialState }); + }); + } + /** @nocollapse */ + static fromConfig(cls, config) { + return new cls(config); + } + } + /** @nocollapse */ + SimpleRNN.className = 'SimpleRNN'; + registerClass(SimpleRNN); + class GRUCell extends RNNCell { + constructor(args) { + super(args); + this.DEFAULT_ACTIVATION = 'tanh'; + this.DEFAULT_RECURRENT_ACTIVATION = 'hardSigmoid'; + this.DEFAULT_KERNEL_INITIALIZER = 'glorotNormal'; + this.DEFAULT_RECURRENT_INITIALIZER = 'orthogonal'; + this.DEFAULT_BIAS_INITIALIZER = 'zeros'; + if (args.resetAfter) { + throw new ValueError(`GRUCell does not support reset_after parameter set to true.`); + } + this.units = args.units; + assertPositiveInteger(this.units, 'units'); + this.activation = getActivation(args.activation === undefined ? this.DEFAULT_ACTIVATION : + args.activation); + this.recurrentActivation = getActivation(args.recurrentActivation === undefined ? + this.DEFAULT_RECURRENT_ACTIVATION : + args.recurrentActivation); + this.useBias = args.useBias == null ? true : args.useBias; + this.kernelInitializer = getInitializer(args.kernelInitializer || this.DEFAULT_KERNEL_INITIALIZER); + this.recurrentInitializer = getInitializer(args.recurrentInitializer || this.DEFAULT_RECURRENT_INITIALIZER); + this.biasInitializer = + getInitializer(args.biasInitializer || this.DEFAULT_BIAS_INITIALIZER); + this.kernelRegularizer = getRegularizer(args.kernelRegularizer); + this.recurrentRegularizer = getRegularizer(args.recurrentRegularizer); + this.biasRegularizer = getRegularizer(args.biasRegularizer); + this.kernelConstraint = getConstraint(args.kernelConstraint); + this.recurrentConstraint = getConstraint(args.recurrentConstraint); + this.biasConstraint = getConstraint(args.biasConstraint); + this.dropout = min$2([1, max$2([0, args.dropout == null ? 0 : args.dropout])]); + this.recurrentDropout = min$2([ + 1, + max$2([0, args.recurrentDropout == null ? 0 : args.recurrentDropout]) + ]); + this.dropoutFunc = args.dropoutFunc; + this.implementation = args.implementation; + this.stateSize = this.units; + this.dropoutMask = null; + this.recurrentDropoutMask = null; + } + build(inputShape) { + inputShape = getExactlyOneShape(inputShape); + const inputDim = inputShape[inputShape.length - 1]; + this.kernel = this.addWeight('kernel', [inputDim, this.units * 3], null, this.kernelInitializer, this.kernelRegularizer, true, this.kernelConstraint); + this.recurrentKernel = this.addWeight('recurrent_kernel', [this.units, this.units * 3], null, this.recurrentInitializer, this.recurrentRegularizer, true, this.recurrentConstraint); + if (this.useBias) { + this.bias = this.addWeight('bias', [this.units * 3], null, this.biasInitializer, this.biasRegularizer, true, this.biasConstraint); + } + else { + this.bias = null; + } + // Porting Notes: Unlike the PyKeras implementation, we perform slicing + // of the weights and bias in the call() method, at execution time. + this.built = true; + } + call(inputs, kwargs) { + return tidy(() => { + inputs = inputs; + if (inputs.length !== 2) { + throw new ValueError(`GRUCell expects 2 input Tensors (inputs, h, c), got ` + + `${inputs.length}.`); + } + const training = kwargs['training'] == null ? false : kwargs['training']; + let hTMinus1 = inputs[1]; // Previous memory state. + inputs = inputs[0]; + // Note: For superior performance, TensorFlow.js always uses + // implementation 2, regardless of the actual value of + // config.implementation. + if (0 < this.dropout && this.dropout < 1 && this.dropoutMask == null) { + this.dropoutMask = generateDropoutMask({ + ones: () => onesLike$3(inputs), + rate: this.dropout, + training, + count: 3, + dropoutFunc: this.dropoutFunc, + }); + } + if (0 < this.recurrentDropout && this.recurrentDropout < 1 && + this.recurrentDropoutMask == null) { + this.recurrentDropoutMask = generateDropoutMask({ + ones: () => onesLike$3(hTMinus1), + rate: this.recurrentDropout, + training, + count: 3, + dropoutFunc: this.dropoutFunc, + }); + } + const dpMask = this.dropoutMask; + const recDpMask = this.recurrentDropoutMask; + let z; + let r; + let hh; + if (0 < this.dropout && this.dropout < 1) { + inputs = mul(inputs, dpMask[0]); + } + let matrixX = dot$1(inputs, this.kernel.read()); + if (this.useBias) { + matrixX = biasAdd(matrixX, this.bias.read()); + } + if (0 < this.recurrentDropout && this.recurrentDropout < 1) { + hTMinus1 = mul(hTMinus1, recDpMask[0]); + } + const recurrentKernelValue = this.recurrentKernel.read(); + const [rk1, rk2] = split$3(recurrentKernelValue, [2 * this.units, this.units], recurrentKernelValue.rank - 1); + const matrixInner = dot$1(hTMinus1, rk1); + const [xZ, xR, xH] = split$3(matrixX, 3, matrixX.rank - 1); + const [recurrentZ, recurrentR] = split$3(matrixInner, 2, matrixInner.rank - 1); + z = this.recurrentActivation.apply(add$3(xZ, recurrentZ)); + r = this.recurrentActivation.apply(add$3(xR, recurrentR)); + const recurrentH = dot$1(mul(r, hTMinus1), rk2); + hh = this.activation.apply(add$3(xH, recurrentH)); + const h = add$3(mul(z, hTMinus1), mul(add$3(1, neg$2(z)), hh)); + // TODO(cais): Add use_learning_phase flag properly. + return [h, h]; + }); + } + getConfig() { + const baseConfig = super.getConfig(); + const config = { + units: this.units, + activation: serializeActivation(this.activation), + recurrentActivation: serializeActivation(this.recurrentActivation), + useBias: this.useBias, + kernelInitializer: serializeInitializer(this.kernelInitializer), + recurrentInitializer: serializeInitializer(this.recurrentInitializer), + biasInitializer: serializeInitializer(this.biasInitializer), + kernelRegularizer: serializeRegularizer(this.kernelRegularizer), + recurrentRegularizer: serializeRegularizer(this.recurrentRegularizer), + biasRegularizer: serializeRegularizer(this.biasRegularizer), + activityRegularizer: serializeRegularizer(this.activityRegularizer), + kernelConstraint: serializeConstraint(this.kernelConstraint), + recurrentConstraint: serializeConstraint(this.recurrentConstraint), + biasConstraint: serializeConstraint(this.biasConstraint), + dropout: this.dropout, + recurrentDropout: this.recurrentDropout, + implementation: this.implementation, + resetAfter: false + }; + return Object.assign(Object.assign({}, baseConfig), config); + } + } + /** @nocollapse */ + GRUCell.className = 'GRUCell'; + registerClass(GRUCell); + class GRU extends RNN { + constructor(args) { + if (args.implementation === 0) { + console.warn('`implementation=0` has been deprecated, and now defaults to ' + + '`implementation=1`. Please update your layer call.'); + } + args.cell = new GRUCell(args); + super(args); + // TODO(cais): Add activityRegularizer. + } + call(inputs, kwargs) { + return tidy(() => { + if (this.cell.dropoutMask != null) { + dispose(this.cell.dropoutMask); + this.cell.dropoutMask = null; + } + if (this.cell.recurrentDropoutMask != null) { + dispose(this.cell.recurrentDropoutMask); + this.cell.recurrentDropoutMask = null; + } + const mask = kwargs == null ? null : kwargs['mask']; + const training = kwargs == null ? null : kwargs['training']; + const initialState = kwargs == null ? null : kwargs['initialState']; + return super.call(inputs, { mask, training, initialState }); + }); + } + /** @nocollapse */ + static fromConfig(cls, config) { + if (config['implmentation'] === 0) { + config['implementation'] = 1; + } + return new cls(config); + } + } + /** @nocollapse */ + GRU.className = 'GRU'; + registerClass(GRU); + class LSTMCell extends RNNCell { + constructor(args) { + super(args); + this.DEFAULT_ACTIVATION = 'tanh'; + this.DEFAULT_RECURRENT_ACTIVATION = 'hardSigmoid'; + this.DEFAULT_KERNEL_INITIALIZER = 'glorotNormal'; + this.DEFAULT_RECURRENT_INITIALIZER = 'orthogonal'; + this.DEFAULT_BIAS_INITIALIZER = 'zeros'; + this.units = args.units; + assertPositiveInteger(this.units, 'units'); + this.activation = getActivation(args.activation === undefined ? this.DEFAULT_ACTIVATION : + args.activation); + this.recurrentActivation = getActivation(args.recurrentActivation === undefined ? + this.DEFAULT_RECURRENT_ACTIVATION : + args.recurrentActivation); + this.useBias = args.useBias == null ? true : args.useBias; + this.kernelInitializer = getInitializer(args.kernelInitializer || this.DEFAULT_KERNEL_INITIALIZER); + this.recurrentInitializer = getInitializer(args.recurrentInitializer || this.DEFAULT_RECURRENT_INITIALIZER); + this.biasInitializer = + getInitializer(args.biasInitializer || this.DEFAULT_BIAS_INITIALIZER); + this.unitForgetBias = args.unitForgetBias; + this.kernelRegularizer = getRegularizer(args.kernelRegularizer); + this.recurrentRegularizer = getRegularizer(args.recurrentRegularizer); + this.biasRegularizer = getRegularizer(args.biasRegularizer); + this.kernelConstraint = getConstraint(args.kernelConstraint); + this.recurrentConstraint = getConstraint(args.recurrentConstraint); + this.biasConstraint = getConstraint(args.biasConstraint); + this.dropout = min$2([1, max$2([0, args.dropout == null ? 0 : args.dropout])]); + this.recurrentDropout = min$2([ + 1, + max$2([0, args.recurrentDropout == null ? 0 : args.recurrentDropout]) + ]); + this.dropoutFunc = args.dropoutFunc; + this.implementation = args.implementation; + this.stateSize = [this.units, this.units]; + this.dropoutMask = null; + this.recurrentDropoutMask = null; + } + build(inputShape) { + var _a; + inputShape = getExactlyOneShape(inputShape); + const inputDim = inputShape[inputShape.length - 1]; + this.kernel = this.addWeight('kernel', [inputDim, this.units * 4], null, this.kernelInitializer, this.kernelRegularizer, true, this.kernelConstraint); + this.recurrentKernel = this.addWeight('recurrent_kernel', [this.units, this.units * 4], null, this.recurrentInitializer, this.recurrentRegularizer, true, this.recurrentConstraint); + let biasInitializer; + if (this.useBias) { + if (this.unitForgetBias) { + const capturedBiasInit = this.biasInitializer; + const capturedUnits = this.units; + biasInitializer = new (_a = class CustomInit extends Initializer { + apply(shape, dtype) { + // TODO(cais): More informative variable names? + const bI = capturedBiasInit.apply([capturedUnits]); + const bF = (new Ones()).apply([capturedUnits]); + const bCAndH = capturedBiasInit.apply([capturedUnits * 2]); + return concatAlongFirstAxis(concatAlongFirstAxis(bI, bF), bCAndH); + } + }, + /** @nocollapse */ + _a.className = 'CustomInit', + _a)(); + } + else { + biasInitializer = this.biasInitializer; + } + this.bias = this.addWeight('bias', [this.units * 4], null, biasInitializer, this.biasRegularizer, true, this.biasConstraint); + } + else { + this.bias = null; + } + // Porting Notes: Unlike the PyKeras implementation, we perform slicing + // of the weights and bias in the call() method, at execution time. + this.built = true; + } + call(inputs, kwargs) { + return tidy(() => { + const training = kwargs['training'] == null ? false : kwargs['training']; + inputs = inputs; + if (inputs.length !== 3) { + throw new ValueError(`LSTMCell expects 3 input Tensors (inputs, h, c), got ` + + `${inputs.length}.`); + } + let hTMinus1 = inputs[1]; // Previous memory state. + const cTMinus1 = inputs[2]; // Previous carry state. + inputs = inputs[0]; + if (0 < this.dropout && this.dropout < 1 && this.dropoutMask == null) { + this.dropoutMask = generateDropoutMask({ + ones: () => onesLike$3(inputs), + rate: this.dropout, + training, + count: 4, + dropoutFunc: this.dropoutFunc + }); + } + if (0 < this.recurrentDropout && this.recurrentDropout < 1 && + this.recurrentDropoutMask == null) { + this.recurrentDropoutMask = generateDropoutMask({ + ones: () => onesLike$3(hTMinus1), + rate: this.recurrentDropout, + training, + count: 4, + dropoutFunc: this.dropoutFunc + }); + } + const dpMask = this.dropoutMask; + const recDpMask = this.recurrentDropoutMask; + // Note: For superior performance, TensorFlow.js always uses + // implementation 2 regardless of the actual value of + // config.implementation. + let i; + let f; + let c; + let o; + if (0 < this.dropout && this.dropout < 1) { + inputs = mul(inputs, dpMask[0]); + } + let z = dot$1(inputs, this.kernel.read()); + if (0 < this.recurrentDropout && this.recurrentDropout < 1) { + hTMinus1 = mul(hTMinus1, recDpMask[0]); + } + z = add$3(z, dot$1(hTMinus1, this.recurrentKernel.read())); + if (this.useBias) { + z = biasAdd(z, this.bias.read()); + } + const [z0, z1, z2, z3] = split$3(z, 4, z.rank - 1); + i = this.recurrentActivation.apply(z0); + f = this.recurrentActivation.apply(z1); + c = add$3(mul(f, cTMinus1), mul(i, this.activation.apply(z2))); + o = this.recurrentActivation.apply(z3); + const h = mul(o, this.activation.apply(c)); + // TODO(cais): Add use_learning_phase flag properly. + return [h, h, c]; + }); + } + getConfig() { + const baseConfig = super.getConfig(); + const config = { + units: this.units, + activation: serializeActivation(this.activation), + recurrentActivation: serializeActivation(this.recurrentActivation), + useBias: this.useBias, + kernelInitializer: serializeInitializer(this.kernelInitializer), + recurrentInitializer: serializeInitializer(this.recurrentInitializer), + biasInitializer: serializeInitializer(this.biasInitializer), + unitForgetBias: this.unitForgetBias, + kernelRegularizer: serializeRegularizer(this.kernelRegularizer), + recurrentRegularizer: serializeRegularizer(this.recurrentRegularizer), + biasRegularizer: serializeRegularizer(this.biasRegularizer), + activityRegularizer: serializeRegularizer(this.activityRegularizer), + kernelConstraint: serializeConstraint(this.kernelConstraint), + recurrentConstraint: serializeConstraint(this.recurrentConstraint), + biasConstraint: serializeConstraint(this.biasConstraint), + dropout: this.dropout, + recurrentDropout: this.recurrentDropout, + implementation: this.implementation, + }; + return Object.assign(Object.assign({}, baseConfig), config); + } + } + /** @nocollapse */ + LSTMCell.className = 'LSTMCell'; + registerClass(LSTMCell); + class LSTM extends RNN { + constructor(args) { + if (args.implementation === 0) { + console.warn('`implementation=0` has been deprecated, and now defaults to ' + + '`implementation=1`. Please update your layer call.'); + } + args.cell = new LSTMCell(args); + super(args); + // TODO(cais): Add activityRegularizer. + } + call(inputs, kwargs) { + return tidy(() => { + if (this.cell.dropoutMask != null) { + dispose(this.cell.dropoutMask); + this.cell.dropoutMask = null; + } + if (this.cell.recurrentDropoutMask != null) { + dispose(this.cell.recurrentDropoutMask); + this.cell.recurrentDropoutMask = null; + } + const mask = kwargs == null ? null : kwargs['mask']; + const training = kwargs == null ? null : kwargs['training']; + const initialState = kwargs == null ? null : kwargs['initialState']; + return super.call(inputs, { mask, training, initialState }); + }); + } + /** @nocollapse */ + static fromConfig(cls, config) { + if (config['implmentation'] === 0) { + config['implementation'] = 1; + } + return new cls(config); + } + } + /** @nocollapse */ + LSTM.className = 'LSTM'; + registerClass(LSTM); + class StackedRNNCells extends RNNCell { + constructor(args) { + super(args); + this.cells = args.cells; + } + get stateSize() { + // States are a flat list in reverse order of the cell stack. + // This allows preserving the requirement `stack.statesize[0] === + // outputDim`. E.g., states of a 2-layer LSTM would be `[h2, c2, h1, c1]`, + // assuming one LSTM has states `[h, c]`. + const stateSize = []; + for (const cell of this.cells.slice().reverse()) { + if (Array.isArray(cell.stateSize)) { + stateSize.push(...cell.stateSize); + } + else { + stateSize.push(cell.stateSize); + } + } + return stateSize; + } + call(inputs, kwargs) { + return tidy(() => { + inputs = inputs; + let states = inputs.slice(1); + // Recover per-cell states. + const nestedStates = []; + for (const cell of this.cells.slice().reverse()) { + if (Array.isArray(cell.stateSize)) { + nestedStates.push(states.splice(0, cell.stateSize.length)); + } + else { + nestedStates.push(states.splice(0, 1)); + } + } + nestedStates.reverse(); + // Call the cells in order and store the returned states. + const newNestedStates = []; + let callInputs; + for (let i = 0; i < this.cells.length; ++i) { + const cell = this.cells[i]; + states = nestedStates[i]; + // TODO(cais): Take care of constants. + if (i === 0) { + callInputs = [inputs[0]].concat(states); + } + else { + callInputs = [callInputs[0]].concat(states); + } + callInputs = cell.call(callInputs, kwargs); + newNestedStates.push(callInputs.slice(1)); + } + // Format the new states as a flat list in reverse cell order. + states = []; + for (const cellStates of newNestedStates.slice().reverse()) { + states.push(...cellStates); + } + return [callInputs[0]].concat(states); + }); + } + build(inputShape) { + if (isArrayOfShapes(inputShape)) { + // TODO(cais): Take care of input constants. + // const constantShape = inputShape.slice(1); + inputShape = inputShape[0]; + } + inputShape = inputShape; + let outputDim; + this.cells.forEach((cell, i) => { + nameScope(`RNNCell_${i}`, () => { + // TODO(cais): Take care of input constants. + cell.build(inputShape); + if (Array.isArray(cell.stateSize)) { + outputDim = cell.stateSize[0]; + } + else { + outputDim = cell.stateSize; + } + inputShape = [inputShape[0], outputDim]; + }); + }); + this.built = true; + } + getConfig() { + const baseConfig = super.getConfig(); + const getCellConfig = (cell) => { + return { + 'className': cell.getClassName(), + 'config': cell.getConfig(), + }; + }; + const cellConfigs = this.cells.map(getCellConfig); + const config = { 'cells': cellConfigs }; + return Object.assign(Object.assign({}, baseConfig), config); + } + /** @nocollapse */ + static fromConfig(cls, config, customObjects = {}) { + const cells = []; + for (const cellConfig of config['cells']) { + cells.push(deserialize(cellConfig, customObjects)); + } + return new cls({ cells }); + } + get trainableWeights() { + if (!this.trainable) { + return []; + } + const weights = []; + for (const cell of this.cells) { + weights.push(...cell.trainableWeights); + } + return weights; + } + get nonTrainableWeights() { + const weights = []; + for (const cell of this.cells) { + weights.push(...cell.nonTrainableWeights); + } + if (!this.trainable) { + const trainableWeights = []; + for (const cell of this.cells) { + trainableWeights.push(...cell.trainableWeights); + } + return trainableWeights.concat(weights); + } + return weights; + } + /** + * Retrieve the weights of a the model. + * + * @returns A flat `Array` of `tf.Tensor`s. + */ + getWeights() { + const weights = []; + for (const cell of this.cells) { + weights.push(...cell.weights); + } + return batchGetValue(weights); + } + /** + * Set the weights of the model. + * + * @param weights An `Array` of `tf.Tensor`s with shapes and types matching + * the output of `getWeights()`. + */ + setWeights(weights) { + const tuples = []; + for (const cell of this.cells) { + const numParams = cell.weights.length; + const inputWeights = weights.splice(numParams); + for (let i = 0; i < cell.weights.length; ++i) { + tuples.push([cell.weights[i], inputWeights[i]]); + } + } + batchSetValue(tuples); + } + } + /** @nocollapse */ + StackedRNNCells.className = 'StackedRNNCells'; + registerClass(StackedRNNCells); + function generateDropoutMask(args) { + const { ones, rate, training = false, count = 1, dropoutFunc } = args; + const droppedInputs = () => dropoutFunc != null ? dropoutFunc(ones(), rate) : dropout$1(ones(), rate); + const createMask = () => inTrainPhase(droppedInputs, ones, training); + // just in case count is provided with null or undefined + if (!count || count <= 1) { + return keep(createMask().clone()); + } + const masks = Array(count).fill(undefined).map(createMask); + return masks.map(m => keep(m.clone())); + } + + /** + * @license + * Copyright 2020 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + var __rest = (undefined && undefined.__rest) || function (s, e) { + var t = {}; + for (var p in s) if (Object.prototype.hasOwnProperty.call(s, p) && e.indexOf(p) < 0) + t[p] = s[p]; + if (s != null && typeof Object.getOwnPropertySymbols === "function") + for (var i = 0, p = Object.getOwnPropertySymbols(s); i < p.length; i++) { + if (e.indexOf(p[i]) < 0 && Object.prototype.propertyIsEnumerable.call(s, p[i])) + t[p[i]] = s[p[i]]; + } + return t; + }; + class ConvRNN2DCell extends RNNCell { + } + /** + * Base class for convolutional-recurrent layers. + */ + class ConvRNN2D extends RNN { + constructor(args) { + if (args.unroll) { + throw new NotImplementedError('Unrolling is not possible with convolutional RNNs.'); + } + if (Array.isArray(args.cell)) { + throw new NotImplementedError('It is not possible at the moment to stack convolutional cells.'); + } + super(args); + this.inputSpec = [new InputSpec({ ndim: 5 })]; + } + call(inputs, kwargs) { + return tidy(() => { + if (this.cell.dropoutMask != null) { + dispose(this.cell.dropoutMask); + this.cell.dropoutMask = null; + } + if (this.cell.recurrentDropoutMask != null) { + dispose(this.cell.recurrentDropoutMask); + this.cell.recurrentDropoutMask = null; + } + if (kwargs && kwargs['constants']) { + throw new ValueError('ConvRNN2D cell does not support constants'); + } + const mask = kwargs == null ? null : kwargs['mask']; + const training = kwargs == null ? null : kwargs['training']; + const initialState = kwargs == null ? null : kwargs['initialState']; + return super.call(inputs, { mask, training, initialState }); + }); + } + computeOutputShape(inputShape) { + let outShape = this.computeSingleOutputShape(inputShape); + if (!this.returnSequences) { + outShape = [outShape[0], ...outShape.slice(2)]; + } + if (this.returnState) { + outShape = + [outShape, ...Array(2).fill([inputShape[0], ...outShape.slice(-3)])]; + } + return outShape; + } + getInitialState(inputs) { + return tidy(() => { + const { stateSize } = this.cell; + const inputShape = inputs.shape; + const outputShape = this.computeSingleOutputShape(inputShape); + const stateShape = [outputShape[0], ...outputShape.slice(2)]; + const initialState = zeros$2(stateShape); + if (Array.isArray(stateSize)) { + return Array(stateSize.length).fill(initialState); + } + return [initialState]; + }); + } + resetStates(states, training = false) { + tidy(() => { + if (!this.stateful) { + throw new AttributeError('Cannot call resetStates() on an RNN Layer that is not stateful.'); + } + const inputShape = this.inputSpec[0].shape; + const outputShape = this.computeSingleOutputShape(inputShape); + const stateShape = [outputShape[0], ...outputShape.slice(2)]; + const batchSize = inputShape[0]; + if (batchSize == null) { + throw new ValueError('If an RNN is stateful, it needs to know its batch size. Specify ' + + 'the batch size of your input tensors: \n' + + '- If using a Sequential model, specify the batch size by ' + + 'passing a `batchInputShape` option to your first layer.\n' + + '- If using the functional API, specify the batch size by ' + + 'passing a `batchShape` option to your Input layer.'); + } + // Initialize state if null. + if (this.getStates() == null) { + if (Array.isArray(this.cell.stateSize)) { + this.states_ = this.cell.stateSize.map(() => zeros$2(stateShape)); + } + else { + this.states_ = [zeros$2(stateShape)]; + } + } + else if (states == null) { + // Dispose old state tensors. + dispose(this.states_); + // For stateful RNNs, fully dispose kept old states. + if (this.keptStates != null) { + dispose(this.keptStates); + this.keptStates = []; + } + if (Array.isArray(this.cell.stateSize)) { + this.states_ = this.cell.stateSize.map(() => zeros$2(stateShape)); + } + else { + this.states_[0] = zeros$2(stateShape); + } + } + else { + if (!Array.isArray(states)) { + states = [states]; + } + if (states.length !== this.states_.length) { + throw new ValueError(`Layer ${this.name} expects ${this.states_.length} state(s), ` + + `but it received ${states.length} state value(s). Input ` + + `received: ${states}`); + } + if (training) { + // Store old state tensors for complete disposal later, i.e., during + // the next no-arg call to this method. We do not dispose the old + // states immediately because that BPTT (among other things) require + // them. + this.keptStates.push(this.states_.slice()); + } + else { + dispose(this.states_); + } + for (let index = 0; index < this.states_.length; ++index) { + const value = states[index]; + const expectedShape = stateShape; + if (!arraysEqual(value.shape, expectedShape)) { + throw new ValueError(`State ${index} is incompatible with layer ${this.name}: ` + + `expected shape=${expectedShape}, received shape=${value.shape}`); + } + this.states_[index] = value; + } + } + this.states_ = this.states_.map(state => keep(state.clone())); + }); + } + computeSingleOutputShape(inputShape) { + const { dataFormat, filters, kernelSize, padding, strides, dilationRate } = this.cell; + const isChannelsFirst = dataFormat === 'channelsFirst'; + const h = inputShape[isChannelsFirst ? 3 : 2]; + const w = inputShape[isChannelsFirst ? 4 : 3]; + const hOut = convOutputLength(h, kernelSize[0], padding, strides[0], dilationRate[0]); + const wOut = convOutputLength(w, kernelSize[1], padding, strides[1], dilationRate[1]); + const outShape = [ + ...inputShape.slice(0, 2), + ...(isChannelsFirst ? [filters, hOut, wOut] : [hOut, wOut, filters]) + ]; + return outShape; + } + } + /** @nocollapse */ + ConvRNN2D.className = 'ConvRNN2D'; + class ConvLSTM2DCell extends LSTMCell { + constructor(args) { + const { filters, kernelSize, strides, padding, dataFormat, dilationRate, } = args; + super(Object.assign(Object.assign({}, args), { units: filters })); + this.filters = filters; + assertPositiveInteger(this.filters, 'filters'); + this.kernelSize = normalizeArray(kernelSize, 2, 'kernelSize'); + this.kernelSize.forEach(size => assertPositiveInteger(size, 'kernelSize')); + this.strides = normalizeArray(strides || 1, 2, 'strides'); + this.strides.forEach(stride => assertPositiveInteger(stride, 'strides')); + this.padding = padding || 'valid'; + checkPaddingMode(this.padding); + this.dataFormat = dataFormat || 'channelsLast'; + checkDataFormat(this.dataFormat); + this.dilationRate = normalizeArray(dilationRate || 1, 2, 'dilationRate'); + this.dilationRate.forEach(rate => assertPositiveInteger(rate, 'dilationRate')); + } + build(inputShape) { + var _a; + inputShape = getExactlyOneShape(inputShape); + const channelAxis = this.dataFormat === 'channelsFirst' ? 1 : inputShape.length - 1; + if (inputShape[channelAxis] == null) { + throw new ValueError(`The channel dimension of the input should be defined. ` + + `Found ${inputShape[channelAxis]}`); + } + const inputDim = inputShape[channelAxis]; + const numOfKernels = 4; + const kernelShape = this.kernelSize.concat([inputDim, this.filters * numOfKernels]); + this.kernel = this.addWeight('kernel', kernelShape, null, this.kernelInitializer, this.kernelRegularizer, true, this.kernelConstraint); + const recurrentKernelShape = this.kernelSize.concat([this.filters, this.filters * numOfKernels]); + this.recurrentKernel = this.addWeight('recurrent_kernel', recurrentKernelShape, null, this.recurrentInitializer, this.recurrentRegularizer, true, this.recurrentConstraint); + if (this.useBias) { + let biasInitializer; + if (this.unitForgetBias) { + const init = this.biasInitializer; + const filters = this.filters; + biasInitializer = new (_a = class CustomInit extends Initializer { + apply(shape, dtype) { + const biasI = init.apply([filters]); + const biasF = ones$1([filters]); + const biasCAndO = init.apply([filters * 2]); + return concatenate$2([biasI, biasF, biasCAndO]); + } + }, + /** @nocollapse */ + _a.className = 'CustomInit', + _a)(); + } + else { + biasInitializer = this.biasInitializer; + } + this.bias = this.addWeight('bias', [this.filters * numOfKernels], null, biasInitializer, this.biasRegularizer, true, this.biasConstraint); + } + this.built = true; + } + call(inputs, kwargs) { + return tidy(() => { + if (inputs.length !== 3) { + throw new ValueError(`ConvLSTM2DCell expects 3 input Tensors (inputs, h, c), got ` + + `${inputs.length}.`); + } + const training = kwargs['training'] || false; + const x = inputs[0]; // Current input + const hTMinus1 = inputs[1]; // Previous memory state. + const cTMinus1 = inputs[2]; // Previous carry state. + const numOfKernels = 4; + if (0 < this.dropout && this.dropout < 1 && this.dropoutMask == null) { + this.dropoutMask = generateDropoutMask({ + ones: () => onesLike$3(x), + rate: this.dropout, + training, + count: numOfKernels, + dropoutFunc: this.dropoutFunc + }); + } + const dropoutMask = this.dropoutMask; + const applyDropout = (x, mask, index) => { + if (!mask || !mask[index]) { + return x; + } + return mul(mask[index], x); + }; + let xI = applyDropout(x, dropoutMask, 0); + let xF = applyDropout(x, dropoutMask, 1); + let xC = applyDropout(x, dropoutMask, 2); + let xO = applyDropout(x, dropoutMask, 3); + if (0 < this.recurrentDropout && this.recurrentDropout < 1 && + this.recurrentDropoutMask == null) { + this.recurrentDropoutMask = generateDropoutMask({ + ones: () => onesLike$3(hTMinus1), + rate: this.recurrentDropout, + training, + count: numOfKernels, + dropoutFunc: this.dropoutFunc + }); + } + const recDropoutMask = this.recurrentDropoutMask; + let hI = applyDropout(hTMinus1, recDropoutMask, 0); + let hF = applyDropout(hTMinus1, recDropoutMask, 1); + let hC = applyDropout(hTMinus1, recDropoutMask, 2); + let hO = applyDropout(hTMinus1, recDropoutMask, 3); + const kernelChannelAxis = 3; + const [kernelI, kernelF, kernelC, kernelO] = split$3(this.kernel.read(), numOfKernels, kernelChannelAxis); + const [biasI, biasF, biasC, biasO] = this.useBias ? + split$3(this.bias.read(), numOfKernels) : + [null, null, null, null]; + xI = this.inputConv(xI, kernelI, biasI, this.padding); + xF = this.inputConv(xF, kernelF, biasF, this.padding); + xC = this.inputConv(xC, kernelC, biasC, this.padding); + xO = this.inputConv(xO, kernelO, biasO, this.padding); + const [recKernelI, recKernelF, recKernelC, recKernelO] = split$3(this.recurrentKernel.read(), numOfKernels, kernelChannelAxis); + hI = this.recurrentConv(hI, recKernelI); + hF = this.recurrentConv(hF, recKernelF); + hC = this.recurrentConv(hC, recKernelC); + hO = this.recurrentConv(hO, recKernelO); + const i = this.recurrentActivation.apply(add$3(xI, hI)); + const f = this.recurrentActivation.apply(add$3(xF, hF)); + const c = add$3(mul(f, cTMinus1), mul(i, this.activation.apply(add$3(xC, hC)))); + const h = mul(this.recurrentActivation.apply(add$3(xO, hO)), this.activation.apply(c)); + return [h, h, c]; + }); + } + getConfig() { + const _a = super.getConfig(), { 'units': _ } = _a, baseConfig = __rest(_a, ['units']); + const config = { + filters: this.filters, + kernelSize: this.kernelSize, + padding: this.padding, + dataFormat: this.dataFormat, + dilationRate: this.dilationRate, + strides: this.strides, + }; + return Object.assign(Object.assign({}, baseConfig), config); + } + inputConv(x, w, b, padding) { + const out = conv2d$4(x, w, this.strides, (padding || 'valid'), this.dataFormat === 'channelsFirst' ? 'NCHW' : 'NHWC', this.dilationRate); + if (b) { + return biasAdd(out, b, this.dataFormat); + } + return out; + } + recurrentConv(x, w) { + const strides = 1; + return conv2d$4(x, w, strides, 'same', this.dataFormat === 'channelsFirst' ? 'NCHW' : 'NHWC'); + } + } + /** @nocollapse */ + ConvLSTM2DCell.className = 'ConvLSTM2DCell'; + registerClass(ConvLSTM2DCell); + class ConvLSTM2D extends ConvRNN2D { + constructor(args) { + const cell = new ConvLSTM2DCell(args); + super(Object.assign(Object.assign({}, args), { cell })); + } + /** @nocollapse */ + static fromConfig(cls, config) { + return new cls(config); + } + } + /** @nocollapse */ + ConvLSTM2D.className = 'ConvLSTM2D'; + registerClass(ConvLSTM2D); + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + class Dropout extends Layer { + constructor(args) { + super(args); + this.rate = Math.max(Math.min(args.rate, 1), 0); + // So that the scalar doesn't get tidied up between executions. + this.noiseShape = args.noiseShape; + this.seed = args.seed; + this.supportsMasking = true; + } + getNoiseShape(input) { + if (this.noiseShape == null) { + return this.noiseShape; + } + const inputShape = input.shape; + const noiseShape = []; + for (let i = 0; i < this.noiseShape.length; ++i) { + noiseShape.push(this.noiseShape[i] == null ? inputShape[i] : this.noiseShape[i]); + } + return noiseShape; + } + call(inputs, kwargs) { + return tidy(() => { + this.invokeCallHook(inputs, kwargs); + const input = getExactlyOneTensor(inputs); + if (0 < this.rate && this.rate < 1) { + const training = kwargs['training'] == null ? false : kwargs['training']; + const noiseShape = this.getNoiseShape(input); + const output = inTrainPhase(() => dropout$1(input, this.rate, noiseShape, this.seed), () => input, training); + return output; + } + return inputs; + }); + } + getConfig() { + const config = { + rate: this.rate, + noiseShape: this.noiseShape, + seed: this.seed, + }; + const baseConfig = super.getConfig(); + Object.assign(config, baseConfig); + return config; + } + dispose() { + return super.dispose(); + } + } + /** @nocollapse */ + Dropout.className = 'Dropout'; + registerClass(Dropout); + class SpatialDropout1D extends Dropout { + constructor(args) { + super(args); + this.inputSpec = [{ ndim: 3 }]; + } + getNoiseShape(input) { + const inputShape = input.shape; + return [inputShape[0], 1, inputShape[2]]; + } + } + /** @nocollapse */ + SpatialDropout1D.className = 'SpatialDropout1D'; + registerClass(SpatialDropout1D); + class Dense extends Layer { + constructor(args) { + super(args); + // Default activation: Linear (none). + this.activation = null; + this.useBias = true; + this.kernel = null; + this.bias = null; + this.DEFAULT_KERNEL_INITIALIZER = 'glorotNormal'; + this.DEFAULT_BIAS_INITIALIZER = 'zeros'; + if (args.batchInputShape == null && args.inputShape == null && + args.inputDim != null) { + // This logic is copied from Layer's constructor, since we can't + // do exactly what the Python constructor does for Dense(). + let batchSize = null; + if (args.batchSize != null) { + batchSize = args.batchSize; + } + this.batchInputShape = [batchSize, args.inputDim]; + } + this.units = args.units; + assertPositiveInteger(this.units, 'units'); + this.activation = getActivation(args.activation); + if (args.useBias != null) { + this.useBias = args.useBias; + } + this.kernelInitializer = getInitializer(args.kernelInitializer || this.DEFAULT_KERNEL_INITIALIZER); + this.biasInitializer = + getInitializer(args.biasInitializer || this.DEFAULT_BIAS_INITIALIZER); + this.kernelConstraint = getConstraint(args.kernelConstraint); + this.biasConstraint = getConstraint(args.biasConstraint); + this.kernelRegularizer = getRegularizer(args.kernelRegularizer); + this.biasRegularizer = getRegularizer(args.biasRegularizer); + this.activityRegularizer = getRegularizer(args.activityRegularizer); + this.supportsMasking = true; + this.inputSpec = [{ minNDim: 2 }]; + } + build(inputShape) { + inputShape = getExactlyOneShape(inputShape); + const inputLastDim = inputShape[inputShape.length - 1]; + if (this.kernel == null) { + this.kernel = this.addWeight('kernel', [inputLastDim, this.units], null, this.kernelInitializer, this.kernelRegularizer, true, this.kernelConstraint); + if (this.useBias) { + this.bias = this.addWeight('bias', [this.units], null, this.biasInitializer, this.biasRegularizer, true, this.biasConstraint); + } + } + this.inputSpec = [{ minNDim: 2, axes: { [-1]: inputLastDim } }]; + this.built = true; + } + computeOutputShape(inputShape) { + inputShape = getExactlyOneShape(inputShape); + const outputShape = inputShape.slice(); + outputShape[outputShape.length - 1] = this.units; + return outputShape; + } + call(inputs, kwargs) { + return tidy(() => { + this.invokeCallHook(inputs, kwargs); + // Dense layer accepts only a single input. + const input = getExactlyOneTensor(inputs); + const fusedActivationName = mapActivationToFusedKernel(this.activation.getClassName()); + let output; + if (fusedActivationName != null) { + output = dot$1(input, this.kernel.read(), fusedActivationName, this.bias ? this.bias.read() : null); + } + else { + output = dot$1(input, this.kernel.read()); + if (this.bias != null) { + output = biasAdd(output, this.bias.read()); + } + if (this.activation != null) { + output = this.activation.apply(output); + } + } + return output; + }); + } + getConfig() { + const config = { + units: this.units, + activation: serializeActivation(this.activation), + useBias: this.useBias, + kernelInitializer: serializeInitializer(this.kernelInitializer), + biasInitializer: serializeInitializer(this.biasInitializer), + kernelRegularizer: serializeRegularizer(this.kernelRegularizer), + biasRegularizer: serializeRegularizer(this.biasRegularizer), + activityRegularizer: serializeRegularizer(this.activityRegularizer), + kernelConstraint: serializeConstraint(this.kernelConstraint), + biasConstraint: serializeConstraint(this.biasConstraint) + }; + const baseConfig = super.getConfig(); + Object.assign(config, baseConfig); + return config; + } + } + /** @nocollapse */ + Dense.className = 'Dense'; + registerClass(Dense); + class Flatten extends Layer { + constructor(args) { + args = args || {}; + super(args); + this.inputSpec = [{ minNDim: 3 }]; + this.dataFormat = args.dataFormat; + } + computeOutputShape(inputShape) { + inputShape = getExactlyOneShape(inputShape); + for (const dim of inputShape.slice(1)) { + if (dim == null) { + throw new ValueError(`The shape of the input to "Flatten" is not fully defined ` + + `(got ${inputShape.slice(1)}). Make sure to pass a complete ` + + `"input_shape" or "batch_input_shape" argument to the first ` + + `layer in your model.`); + } + } + return [inputShape[0], arrayProd(inputShape, 1)]; + } + call(inputs, kwargs) { + return tidy(() => { + this.invokeCallHook(inputs, kwargs); + let input = getExactlyOneTensor(inputs); + if (this.dataFormat === 'channelsFirst' && input.rank > 1) { + const permutation = [0]; + for (let i = 2; i < input.rank; ++i) { + permutation.push(i); + } + permutation.push(1); + input = transpose$2(input, permutation); + } + return batchFlatten(input); + }); + } + getConfig() { + const config = {}; + if (this.dataFormat != null) { + config['dataFormat'] = this.dataFormat; + } + const baseConfig = super.getConfig(); + Object.assign(config, baseConfig); + return config; + } + } + /** @nocollapse */ + Flatten.className = 'Flatten'; + registerClass(Flatten); + class Activation extends Layer { + constructor(args) { + super(args); + this.supportsMasking = true; + this.activation = getActivation(args.activation); + } + call(inputs, kwargs) { + return tidy(() => { + this.invokeCallHook(inputs, kwargs); + const input = getExactlyOneTensor(inputs); + return this.activation.apply(input); + }); + } + getConfig() { + const config = { activation: serializeActivation(this.activation) }; + const baseConfig = super.getConfig(); + Object.assign(config, baseConfig); + return config; + } + } + /** @nocollapse */ + Activation.className = 'Activation'; + registerClass(Activation); + class RepeatVector extends Layer { + constructor(args) { + super(args); + this.n = args.n; + this.inputSpec = [{ ndim: 2 }]; + } + computeOutputShape(inputShape) { + return [inputShape[0], this.n, inputShape[1]]; + } + call(inputs, kwargs) { + return tidy(() => { + inputs = getExactlyOneTensor(inputs); + return repeat(inputs, this.n); + }); + } + getConfig() { + const config = { + n: this.n, + }; + const baseConfig = super.getConfig(); + Object.assign(config, baseConfig); + return config; + } + } + /** @nocollapse */ + RepeatVector.className = 'RepeatVector'; + registerClass(RepeatVector); + class Reshape extends Layer { + constructor(args) { + super(args); + this.targetShape = args.targetShape; + // Make sure that all unknown dimensions are represented as `null`. + for (let i = 0; i < this.targetShape.length; ++i) { + if (this.isUnknown(this.targetShape[i])) { + this.targetShape[i] = null; + } + } + } + isUnknown(dim) { + return dim < 0 || dim == null; + } + /** + * Finds and replaces a missing dimension in output shape. + * + * This is a near direct port of the internal Numpy function + * `_fix_unknown_dimension` in `numpy/core/src/multiarray/shape.c`. + * + * @param inputShape: Original shape of array begin reshape. + * @param outputShape: Target shape of the array, with at most a single + * `null` or negative number, which indicates an underdetermined dimension + * that should be derived from `inputShape` and the known dimensions of + * `outputShape`. + * @returns: The output shape with `null` replaced with its computed value. + * @throws: ValueError: If `inputShape` and `outputShape` do not match. + */ + fixUnknownDimension(inputShape, outputShape) { + const errorMsg = 'Total size of new array must be unchanged.'; + const finalShape = outputShape.slice(); + let known = 1; + let unknown = null; + for (let i = 0; i < finalShape.length; ++i) { + const dim = finalShape[i]; + if (this.isUnknown(dim)) { + if (unknown === null) { + unknown = i; + } + else { + throw new ValueError('Can only specifiy one unknown dimension.'); + } + } + else { + known *= dim; + } + } + const originalSize = arrayProd(inputShape); + if (unknown !== null) { + if (known === 0 || originalSize % known !== 0) { + throw new ValueError(errorMsg); + } + finalShape[unknown] = originalSize / known; + } + else if (originalSize !== known) { + throw new ValueError(errorMsg); + } + return finalShape; + } + computeOutputShape(inputShape) { + let anyUnknownDims = false; + for (let i = 0; i < inputShape.length; ++i) { + if (this.isUnknown(inputShape[i])) { + anyUnknownDims = true; + break; + } + } + if (anyUnknownDims) { + return inputShape.slice(0, 1).concat(this.targetShape); + } + else { + return inputShape.slice(0, 1).concat(this.fixUnknownDimension(inputShape.slice(1), this.targetShape)); + } + } + call(inputs, kwargs) { + return tidy(() => { + this.invokeCallHook(inputs, kwargs); + const input = getExactlyOneTensor(inputs); + const inputShape = input.shape; + const outputShape = inputShape.slice(0, 1).concat(this.fixUnknownDimension(inputShape.slice(1), this.targetShape)); + return reshape$3(input, outputShape); + }); + } + getConfig() { + const config = { + targetShape: this.targetShape, + }; + const baseConfig = super.getConfig(); + Object.assign(config, baseConfig); + return config; + } + } + /** @nocollapse */ + Reshape.className = 'Reshape'; + registerClass(Reshape); + class Permute extends Layer { + constructor(args) { + super(args); + if (args.dims == null) { + throw new Error('Required configuration field `dims` is missing during Permute ' + + 'constructor call.'); + } + if (!Array.isArray(args.dims)) { + throw new Error('Permute constructor requires `dims` to be an Array, but received ' + + `${args.dims} instead.`); + } + // Check the validity of the permutation indices. + const expectedSortedIndices = range$2(1, args.dims.length + 1); + if (!arraysEqual(args.dims.slice().sort(), expectedSortedIndices)) { + throw new Error('Invalid permutation `dims`: ' + JSON.stringify(args.dims) + + ' `dims` must contain consecutive integers starting from 1.'); + } + this.dims = args.dims; + this.dimsIncludingBatch = [0].concat(this.dims); + this.inputSpec = [new InputSpec({ ndim: this.dims.length + 1 })]; + } + computeOutputShape(inputShape) { + inputShape = getExactlyOneShape(inputShape); + const outputShape = inputShape.slice(); + this.dims.forEach((dim, i) => { + outputShape[i + 1] = inputShape[dim]; + }); + return outputShape; + } + call(inputs, kwargs) { + return transpose$2(getExactlyOneTensor(inputs), this.dimsIncludingBatch); + } + getConfig() { + const config = { + dims: this.dims, + }; + const baseConfig = super.getConfig(); + Object.assign(config, baseConfig); + return config; + } + } + /** @nocollapse */ + Permute.className = 'Permute'; + registerClass(Permute); + class Masking extends Layer { + constructor(args) { + super(args == null ? {} : args); + this.supportsMasking = true; + if (args != null) { + this.maskValue = args.maskValue == null ? 0 : args.maskValue; + } + else { + this.maskValue = 0; + } + } + computeOutputShape(inputShape) { + return inputShape; + } + getConfig() { + const baseConfig = super.getConfig(); + const config = { maskValue: this.maskValue }; + Object.assign(config, baseConfig); + return config; + } + computeMask(inputs, mask) { + const input = getExactlyOneTensor(inputs); + const axis = -1; + return any$2(notEqual$2(input, this.maskValue), axis); + } + call(inputs, kwargs) { + return tidy(() => { + this.invokeCallHook(inputs, kwargs); + const input = getExactlyOneTensor(inputs); + const axis = -1; + const keepDims = true; + const booleanMask = any$2(notEqual$2(input, this.maskValue), axis, keepDims); + const output = mul(input, cast$3(booleanMask, input.dtype)); + return output; + }); + } + } + /** @nocollapse */ + Masking.className = 'Masking'; + registerClass(Masking); + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + class Embedding extends Layer { + constructor(args) { + super(args); + this.embeddings = null; + this.DEFAULT_EMBEDDINGS_INITIALIZER = 'randomUniform'; + if (args.batchInputShape == null && args.inputShape == null) { + // Porting Note: This logic is copied from Layer's constructor, since we + // can't do exactly what the Python constructor does for Embedding(). + // Specifically, the super constructor can not be called after the + // mutation of the `config` argument. + let batchSize = null; + if (args.batchSize != null) { + batchSize = args.batchSize; + } + if (args.inputLength == null) { + // Fix super-constructor to what it would have done if + // 'config.inputShape' were (None, ) + this.batchInputShape = [batchSize, null]; + } + else { + // Fix super-constructor to what it would have done if + // 'config.inputShape' were (config.inputLength, ) + this.batchInputShape = + [batchSize].concat(toList(args.inputLength)); + } + } + this.inputDim = args.inputDim; + assertPositiveInteger(this.inputDim, 'inputDim'); + this.outputDim = args.outputDim; + assertPositiveInteger(this.outputDim, 'outputDim'); + this.embeddingsInitializer = getInitializer(args.embeddingsInitializer || this.DEFAULT_EMBEDDINGS_INITIALIZER); + this.embeddingsRegularizer = getRegularizer(args.embeddingsRegularizer); + this.activityRegularizer = getRegularizer(args.activityRegularizer); + this.embeddingsConstraint = getConstraint(args.embeddingsConstraint); + this.maskZero = args.maskZero; + this.supportsMasking = args.maskZero; + this.inputLength = args.inputLength; + } + build(inputShape) { + this.embeddings = this.addWeight('embeddings', [this.inputDim, this.outputDim], this.dtype, this.embeddingsInitializer, this.embeddingsRegularizer, true, this.embeddingsConstraint); + this.built = true; + } + // Override warnOnIncompatibleInputShape because an embedding layer allows + // the input to have varying ranks. + warnOnIncompatibleInputShape(inputShape) { } + computeMask(inputs, mask) { + return tidy(() => { + if (!this.maskZero) { + return null; + } + else { + inputs = getExactlyOneTensor(inputs); + return notEqual$2(inputs, zerosLike$3(inputs)); + } + }); + } + computeOutputShape(inputShape) { + inputShape = getExactlyOneShape(inputShape); + if (this.inputLength == null) { + return [...inputShape, this.outputDim]; + } + // inputLength can be an array if input is 3D or higher. + const inLens = toList(this.inputLength); + if (inLens.length !== inputShape.length - 1) { + throw new ValueError(`"inputLength" is ${this.inputLength}, but received ` + + `input shape has shape ${inputShape}`); + } + else { + let i = 0; + for (let k = 0; k < inLens.length; ++k) { + const s1 = inLens[k]; + const s2 = inputShape[k + 1]; + if ((s1 != null) && (s2 != null) && (s1 !== s2)) { + throw new ValueError(`"inputLength" is ${this.inputLength}, but received ` + + `input shape has shape ${inputShape}`); + } + else if (s1 == null) { + inLens[i] = s2; + } + i++; + } + } + return [inputShape[0], ...inLens, this.outputDim]; + } + call(inputs, kwargs) { + return tidy(() => { + this.invokeCallHook(inputs, kwargs); + // Embedding layer accepts only a single input. + let input = getExactlyOneTensor(inputs); + if (input.dtype !== 'int32') { + input = cast$2(input, 'int32'); + } + const output = gather(this.embeddings.read(), reshape$3(input, [input.size])); + return reshape$3(output, getExactlyOneShape(this.computeOutputShape(input.shape))); + }); + } + getConfig() { + const config = { + inputDim: this.inputDim, + outputDim: this.outputDim, + embeddingsInitializer: serializeInitializer(this.embeddingsInitializer), + embeddingsRegularizer: serializeRegularizer(this.embeddingsRegularizer), + activityRegularizer: serializeRegularizer(this.activityRegularizer), + embeddingsConstraint: serializeConstraint(this.embeddingsConstraint), + maskZero: this.maskZero, + inputLength: this.inputLength + }; + const baseConfig = super.getConfig(); + Object.assign(config, baseConfig); + return config; + } + } + /** @nocollapse */ + Embedding.className = 'Embedding'; + registerClass(Embedding); + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + /** + * Generic Merge layer for element-wise merge functions. + * + * Used to implement `Sum`, `Average`, `Concatenate`, etc. + */ + class Merge extends Layer { + constructor(args) { + super(args || {}); + this.supportsMasking = true; + } + /** + * Logic for merging multiple tensors, to be overridden by subclasses. + * @param inputs + */ + mergeFunction(inputs) { + throw new NotImplementedError(); + } + /** + * Computes the shape of the result of an elementwise operation. + * + * @param shape1: Shape of the first tensor. + * @param shape2: Shape of the second tensor. + * @returns Expected output shape when an elementwise operation is carried + * out on 2 tensors with shapes `shape1` and `shape2`. + * @throws ValueError: If `shape1` and `shape2` are not compatible for + * element-wise operations. + */ + computeElementwiseOpOutputShape(shape1, shape2) { + if (shape1 == null || shape2 == null) { + return null; + } + else if (shape1.length < shape2.length) { + return this.computeElementwiseOpOutputShape(shape2, shape1); + } + else if (shape2.length === 0) { + return shape1; + } + const outputShape = shape1.slice(0, shape1.length - shape2.length); + for (let k = 0; k < shape2.length; ++k) { + const i = shape1[shape1.length - shape2.length + k]; + const j = shape2[k]; + if (i == null || j == null || i < 0 || j < 0) { + outputShape.push(null); + } + else if (i === 1) { + outputShape.push(j); + } + else if (j === 1) { + outputShape.push(i); + } + else { + if (i !== j) { + throw new ValueError('Operands could not be broadcast together with shapes ' + + JSON.stringify(shape1) + ' ' + JSON.stringify(shape2)); + } + outputShape.push(i); + } + } + return outputShape; + } + build(inputShape) { + // Used purely for shape validation. + if (Array.isArray(inputShape) && !Array.isArray(inputShape[0])) { + // Make sure that inputShape is an Array of shape. + inputShape = [getExactlyOneShape(inputShape)]; + } + inputShape = inputShape; + if (inputShape.length < 2) { + throw new ValueError('A merge layer should be called on an Array of at least 2 inputs.' + + ` Got ${inputShape.length} input(s).`); + } + // Make sure that there is at most one unique batch size among the input + // shapes. + let batchSizes = []; + for (const shape of inputShape) { + if (shape != null && shape[0] !== null) { + batchSizes.push(shape[0]); + } + } + batchSizes = unique$2(batchSizes); + if (batchSizes.length > 1) { + throw new ValueError(`Can not merge tensors with different batch sizes. ` + + `Got tensors with shapes: ${JSON.stringify(inputShape)}.`); + } + let outputShape = inputShape[0] == null ? null : inputShape[0].slice(1); + for (let i = 1; i < inputShape.length; ++i) { + const shape = inputShape[i] == null ? null : inputShape[i].slice(1); + outputShape = this.computeElementwiseOpOutputShape(outputShape, shape); + } + // If the inputs have different ranks, we have to reshape them to make them + // broadcastable. + const allRanks = inputShape.map(shape => shape.length); + if (inputShape.indexOf(null) === -1 && + unique$2(allRanks).length === 1) { + this.reshapeRequired = false; + } + else { + this.reshapeRequired = true; + } + } + call(inputs, kwargs) { + return tidy(() => { + inputs = inputs; + if (this.reshapeRequired) { + const reshapedInputs = []; + const inputDims = inputs.map(input => input.rank); + if (inputDims.indexOf(null) === -1) { + // If ranks of all inputs are available, we simply expand each of them + // at axis=1 until all of them have the same rank. + const maxNDim = max$2(inputDims); + for (let x of inputs) { + const xNDim = x.rank; + for (let k = 0; k < maxNDim - xNDim; ++k) { + x = expandDims$2(x, 1); + } + reshapedInputs.push(x); + } + return this.mergeFunction(reshapedInputs); + } + else { + // Transpose all inputs so that batch size is the last dimension. + // [batchSize, dim1, dim2, ...] -> [dim1, dim2, ..., batchSize] + let transposed = false; + for (const x of inputs) { + const xNDim = x.rank; + if (xNDim == null) { + const xShape = x.shape; + const batchSize = xShape[0]; + const newShape = xShape.slice(1).concat([batchSize]); + let xTransposed = reshape$3(x, [batchSize].concat(arrayProd(xShape.slice(1)))); + xTransposed = transpose$2(xTransposed, [1, 0]); + xTransposed = reshape$3(xTransposed, newShape); + reshapedInputs.push(xTransposed); + transposed = true; + } + else if (xNDim > 1) { + const dims = range$2(1, xNDim).concat([0]); + reshapedInputs.push(transpose$2(x, dims)); + transposed = true; + } + else { + // We don't transpose inputs if they are 1D vectors or scalars. + reshapedInputs.push(x); + } + } + let y = this.mergeFunction(reshapedInputs); + const yNDim = y.rank; + if (transposed) { + // If inputs have been transposed, we have to transpose the output + // too. + if (yNDim == null) { + const yShape = y.shape; + const yNDim = yShape.length; + const batchSize = yShape[yNDim - 1]; + const newShape = [batchSize].concat(yShape.slice(0, yShape.length - 1)); + y = reshape$3(transpose$2(reshape$3(y, [-1, batchSize]), [1, 0]), newShape); + } + else if (yNDim > 1) { + const dims = [yNDim - 1].concat(range$2(0, yNDim - 1)); + y = transpose$2(y, dims); + } + } + return y; + } + } + else { + return this.mergeFunction(inputs); + } + }); + } + computeOutputShape(inputShape) { + inputShape = inputShape; + let outputShape; + if (inputShape[0] == null) { + outputShape = null; + } + else { + outputShape = inputShape[0].slice(1); + } + for (let i = 1; i < inputShape.length; ++i) { + const shape = inputShape[i] == null ? null : inputShape[i].slice(1); + outputShape = this.computeElementwiseOpOutputShape(outputShape, shape); + } + let batchSizes = []; + for (const shape of inputShape) { + if (shape != null && shape[0] !== null) { + batchSizes.push(shape[0]); + } + } + batchSizes = unique$2(batchSizes); + if (batchSizes.length === 1) { + outputShape = batchSizes.concat(outputShape); + } + else { + outputShape = [null].concat(outputShape); + } + return outputShape; + } + computeMask(inputs, mask) { + return tidy(() => { + if (mask == null) { + return null; + } + if (!Array.isArray(mask)) { + throw new ValueError('`mask` should be an Array'); + } + if (!Array.isArray(inputs)) { + throw new ValueError('`inputs` should be an Array'); + } + if (mask.length !== inputs.length) { + throw new ValueError(`The Array 'inputs' and 'mask' are expected to have the same ` + + `length, but have different lengths ` + + `(${inputs.length} vs ${mask.length})`); + } + if (mask.every(m => m == null)) { + return null; + } + mask = mask.map(m => m == null ? m : expandDims$3(m, 0)); + let output = mask[0]; + for (let i = 1; i < mask.length - 1; ++i) { + output = logicalAnd$2(output, mask[i]); + } + return output; + }); + } + } + class Add extends Merge { + constructor(args) { + super(args); + } + mergeFunction(inputs) { + return tidy(() => { + let output = inputs[0].clone(); + for (let i = 1; i < inputs.length; ++i) { + output = add$3(output, inputs[i]); + } + return output; + }); + } + } + /** @nocollapse */ + Add.className = 'Add'; + registerClass(Add); + /** + * Calculate the element-wise sum of inputs, which all have the same shape. + * + * This function can be invoked in three ways. + * + * 1. Construct an instance of `Add` layer, by using no input argument + * or a single configuration argument. The resultant `Add` layer can then + * be used on `tf.SymbolicTensor`s or `tf.Tensor`s. For example: + * + * ```js + * const addLayer = tf.layers.add(); + * + * // The layer can be applied to inputs. + * const input1 = tf.input({shape: [2, 2]}); + * const input2 = tf.input({shape: [2, 2]}); + * const output = addLayer.apply([input1, input2]); + * console.log(output.shape); + * // You get [null, 2, 2], with the first dimension as the undetermined batch + * // dimension. + * ``` + * + * 2. Invoke directly on an `Array` of `tf.SymbolicTensor`s. This constructs + * an `Layer` object internally and calls its `apply` method on the inputs, + * generating a new `tf.SymbolicTensor`. For example: + * + * ```js + * const input1 = tf.input({shape: [2, 2]}); + * const input2 = tf.input({shape: [2, 2]}); + * const output = tf.layers.add([input1, input2]); + * console.log(output.shape); + * // You get [null, 2, 2], with the first dimension as the undetermined batch + * // dimension. + * ``` + * + * 3. Invoke directly on `tf.Tensor`s, i.e., concrete values. This constructs + * an `Layer` object internally and calls its `apply` method on the inputs, + * generating a new `tf.Tensor` as the result of the computation. For + * example: + * + * ```js + * const input1 = tf.tensor2d([1, 2, 3, 4], [2, 2]); + * const input2 = tf.tensor2d([10, 20, 30, 40], [2, 2]); + * tf.layers.add([input1, input2]).print(); + * // Gives [[11, 22], [33, 44]]. + * + */ + function add$2(config) { + if (Array.isArray(config)) { + const layer = new Add({}); + return layer.apply(config); + } + else { + return new Add(config); + } + } + class Multiply extends Merge { + constructor(args) { + super(args); + } + mergeFunction(inputs) { + return tidy(() => { + let output = inputs[0].clone(); + for (let i = 1; i < inputs.length; ++i) { + output = mul(output, inputs[i]); + } + return output; + }); + } + } + /** @nocollapse */ + Multiply.className = 'Multiply'; + registerClass(Multiply); + /** + * Calculate the element-wise product of inputs, which all have the same shape. + * + * This function can be invoked in three ways. + * + * 1. Construct an instance of `Multiply` layer, by using no input argument + * or a single configuration argument. The resultant `Multiply` layer can + * then be used on `tf.SymbolicTensor`s or `tf.Tensor`s. For example: + * + * ```js + * const multiplyLayer = tf.layers.multiply(); + * + * // The layer can be applied to inputs. + * const input1 = tf.input({shape: [2, 2]}); + * const input2 = tf.input({shape: [2, 2]}); + * const output = multiplyLayer.apply([input1, input2]); + * console.log(output.shape); + * // You get [null, 2, 2], with the first dimension as the undetermined batch + * // dimension. + * ``` + * + * 2. Invoke directly on an `Array` of `tf.SymbolicTensor`s. This constructs + * an `Layer` object internally and calls its `apply` method on the inputs, + * generating a new `tf.SymbolicTensor`. For example: + * + * ```js + * const input1 = tf.input({shape: [2, 2]}); + * const input2 = tf.input({shape: [2, 2]}); + * const output = tf.layers.multiply([input1, input2]); + * console.log(output.shape); + * // You get [null, 2, 2], with the first dimension as the undetermined batch + * // dimension. + * ``` + * + * 3. Invoke directly on `tf.Tensor`s, i.e., concrete values. This constructs + * an `Layer` object internally and calls its `apply` method on the inputs, + * generating a new `tf.Tensor` as the result of the computation. For + * example: + * + * ```js + * const input1 = tf.tensor2d([1, 2, 3, 4], [2, 2]); + * const input2 = tf.tensor2d([10, 20, 30, 40], [2, 2]); + * tf.layers.multiply([input1, input2]).print(); + * // Gives [[10, 40], [90, 160]]. + * + */ + function multiply$3(config) { + if (Array.isArray(config)) { + const layer = new Multiply({}); + return layer.apply(config); + } + else { + return new Multiply(config); + } + } + class Average extends Merge { + constructor(args) { + super(args); + } + mergeFunction(inputs) { + return tidy(() => { + let output = inputs[0].clone(); + for (let i = 1; i < inputs.length; ++i) { + output = add$3(output, inputs[i]); + } + return mul(1 / inputs.length, output); + }); + } + } + /** @nocollapse */ + Average.className = 'Average'; + registerClass(Average); + /** + * Calculate the element-wise arithmetic mean of inputs, which all have the same + * shape. + * + * This function can be invoked in three ways. + * + * 1. Construct an instance of `Average` layer, by using no input argument + * or a single configuration argument. The resultant `Average` layer can then + * be used on `tf.SymbolicTensor`s or `tf.Tensor`s. For example: + * + * ```js + * const averageLayer = tf.layers.average(); + * + * // The layer can be applied to inputs. + * const input1 = tf.input({shape: [2, 2]}); + * const input2 = tf.input({shape: [2, 2]}); + * const output = averageLayer.apply([input1, input2]); + * console.log(output.shape); + * // You get [null, 2, 2], with the first dimension as the undetermined batch + * // dimension. + * ``` + * + * 2. Invoke directly on an `Array` of `tf.SymbolicTensor`s. This constructs + * an `Layer` object internally and calls its `apply` method on the inputs, + * generating a new `tf.SymbolicTensor`. For example: + * + * ```js + * const input1 = tf.input({shape: [2, 2]}); + * const input2 = tf.input({shape: [2, 2]}); + * const output = tf.layers.average([input1, input2]); + * console.log(output.shape); + * // You get [null, 2, 2], with the first dimension as the undetermined batch + * // dimension. + * ``` + * + * 3. Invoke directly on `tf.Tensor`s, i.e., concrete values. This constructs + * an `Layer` object internally and calls its `apply` method on the inputs, + * generating a new `tf.Tensor` as the result of the computation. For + * example: + * + * ```js + * const input1 = tf.tensor2d([1, 2, 3, 4], [2, 2]); + * const input2 = tf.tensor2d([10, 20, 30, 40], [2, 2]); + * tf.layers.average([input1, input2]).print(); + * // Gives [[5.5, 11], [16.5, 22]]. + * + */ + function average$1(config) { + if (Array.isArray(config)) { + const layer = new Average({}); + return layer.apply(config); + } + else { + return new Average(config); + } + } + class Maximum extends Merge { + constructor(args) { + super(args); + } + mergeFunction(inputs) { + return tidy(() => { + let output = inputs[0]; + for (let i = 1; i < inputs.length; ++i) { + output = maximum$4(output, inputs[i]); + } + return output; + }); + } + } + /** @nocollapse */ + Maximum.className = 'Maximum'; + registerClass(Maximum); + /** + * Calculate the element-wise maximum of inputs, which all have the same shape. + * + * This function can be invoked in three ways. + * + * 1. Construct an instance of `Maximum` layer, by using no input argument + * or a single configuration argument. The resultant `Maximum` layer can then + * be used on `tf.SymbolicTensor`s or `tf.Tensor`s. For example: + * + * ```js + * const maximumLayer = tf.layers.maximum(); + * + * // The layer can be applied to inputs. + * const input1 = tf.input({shape: [2, 2]}); + * const input2 = tf.input({shape: [2, 2]}); + * const output = maximumLayer.apply([input1, input2]); + * console.log(output.shape); + * // You get [null, 2, 2], with the first dimension as the undetermined batch + * // dimension. + * ``` + * + * 2. Invoke directly on an `Array` of `tf.SymbolicTensor`s. This constructs + * an `Layer` object internally and calls its `apply` method on the inputs, + * generating a new `tf.SymbolicTensor`. For example: + * + * ```js + * const input1 = tf.input({shape: [2, 2]}); + * const input2 = tf.input({shape: [2, 2]}); + * const output = tf.layers.maximum([input1, input2]); + * console.log(output.shape); + * // You get [null, 2, 2], with the first dimension as the undetermined batch + * // dimension. + * ``` + * + * 3. Invoke directly on `tf.Tensor`s, i.e., concrete values. This constructs + * an `Layer` object internally and calls its `apply` method on the inputs, + * generating a new `tf.Tensor` as the result of the computation. For + * example: + * + * ```js + * const input1 = tf.tensor2d([1, 20, 3, 40], [2, 2]); + * const input2 = tf.tensor2d([10, 2, 30, 4], [2, 2]); + * tf.layers.maximum([input1, input2]).print(); + * // Gives [[10, 20], [30, 40]]. + * + */ + function maximum$3(config) { + if (Array.isArray(config)) { + const layer = new Maximum({}); + return layer.apply(config); + } + else { + return new Maximum(config); + } + } + class Minimum extends Merge { + constructor(args) { + super(args); + } + mergeFunction(inputs) { + return tidy(() => { + let output = inputs[0]; + for (let i = 1; i < inputs.length; ++i) { + output = minimum$4(output, inputs[i]); + } + return output; + }); + } + } + /** @nocollapse */ + Minimum.className = 'Minimum'; + registerClass(Minimum); + /** + * Calculate the element-wise minimum of inputs, which all have the same shape. + * + * This function can be invoked in three ways. + * + * 1. Construct an instance of `Minimum` layer, by using no input argument + * or a single configuration argument. The resultant `Minimum` layer can then + * be used on `tf.SymbolicTensor`s or `tf.Tensor`s. For example: + * + * ```js + * const minimumLayer = tf.layers.minimum(); + * + * // The layer can be applied to inputs. + * const input1 = tf.input({shape: [2, 2]}); + * const input2 = tf.input({shape: [2, 2]}); + * const output = minimumLayer.apply([input1, input2]); + * console.log(output.shape); + * // You get [null, 2, 2], with the first dimension as the undetermined batch + * // dimension. + * ``` + * + * 2. Invoke directly on an `Array` of `tf.SymbolicTensor`s. This constructs + * an `Layer` object internally and calls its `apply` method on the inputs, + * generating a new `tf.SymbolicTensor`. For example: + * + * ```js + * const input1 = tf.input({shape: [2, 2]}); + * const input2 = tf.input({shape: [2, 2]}); + * const output = tf.layers.minimum([input1, input2]); + * console.log(output.shape); + * // You get [null, 2, 2], with the first dimension as the undetermined batch + * // dimension. + * ``` + * + * 3. Invoke directly on `tf.Tensor`s, i.e., concrete values. This constructs + * an `Layer` object internally and calls its `apply` method on the inputs, + * generating a new `tf.Tensor` as the result of the computation. For + * example: + * + * ```js + * const input1 = tf.tensor2d([1, 20, 3, 40], [2, 2]); + * const input2 = tf.tensor2d([10, 2, 30, 4], [2, 2]); + * tf.layers.minimum([input1, input2]).print(); + * // Gives [[1, 2], [3, 4]]. + * + */ + function minimum$3(config) { + if (Array.isArray(config)) { + const layer = new Minimum({}); + return layer.apply(config); + } + else { + return new Minimum(config); + } + } + class Concatenate extends Merge { + constructor(args) { + super(args); + this.DEFAULT_AXIS = -1; + if (args == null) { + args = {}; + } + this.axis = args.axis == null ? this.DEFAULT_AXIS : args.axis; + this.supportsMasking = true; + this.reshapeRequired = false; + } + build(inputShape) { + // Used purely for shape validation.] + if (!(Array.isArray(inputShape) && Array.isArray(inputShape[0])) || + inputShape.length === 1) { + throw new ValueError('A `Concatenate` layer should be called on a list of at least 2 ' + + 'inputs'); + } + inputShape = inputShape; + let allNoneShape = true; + for (const shape of inputShape) { + if (shape != null) { + allNoneShape = false; + break; + } + } + if (allNoneShape) { + return; + } + const shapeSet = []; + for (let i = 0; i < inputShape.length; ++i) { + const shapeWithoutConcatAxis = inputShape[i].slice(); + shapeWithoutConcatAxis.splice(this.axis, 1); + let exists = false; + for (const shape of shapeSet) { + if (arraysEqual(shape, shapeWithoutConcatAxis)) { + exists = true; + break; + } + } + if (!exists) { + shapeSet.push(shapeWithoutConcatAxis); + } + } + if (shapeSet.length > 1) { + throw new ValueError('A `Concatenate` layer requires inputs with matching shapes ' + + 'except for the concat axis. Got input shapes: ' + + JSON.stringify(inputShape)); + } + } + mergeFunction(inputs) { + return tidy(() => { + return concatenate$2(inputs, this.axis); + }); + } + computeOutputShape(inputShape) { + if (!(Array.isArray(inputShape) && Array.isArray(inputShape[0]))) { + throw new ValueError('A `Concatenate` layer should be called on a list of inputs.'); + } + const inputShapes = inputShape; + const outputShape = inputShapes[0].slice(); + const axis = this.axis < 0 ? outputShape.length + this.axis : this.axis; + // Porting Note: the line above is because TypeScript doesn't support + // negative indices. + for (const shape of inputShapes.slice(1)) { + if (outputShape[axis] == null || shape[axis] == null) { + outputShape[axis] = null; + break; + } + outputShape[axis] += shape[axis]; + } + return outputShape; + } + computeMask(inputs, mask) { + if (mask == null) { + return null; + } + if (!Array.isArray(mask)) { + throw new ValueError('`mask` should be an array for Concatenate'); + } + if (!Array.isArray(inputs)) { + throw new ValueError('`inputs` should be an array for Concatenate'); + } + if (mask.length !== inputs.length) { + throw new ValueError(`Mismatch in the length of mask (${mask.length}) ` + + `and the legnth of inputs (${inputs.length})`); + } + return tidy(() => { + let allNullMasks = true; + mask.forEach(m => { + if (m != null) { + allNullMasks = false; + return; + } + }); + if (allNullMasks) { + return null; + } + const outputMasks = []; + for (let i = 0; i < inputs.length; ++i) { + if (mask[i] == null) { + // Input is unmasked. Append all 1's to masks. + outputMasks.push(cast$3(onesLike$3(inputs[i]), 'bool')); + } + else if (mask[i].rank < inputs[i].rank) { + // Mask is smaller than the input, expand it. + outputMasks.push(expandDims$3(mask[i], -1)); + } + else { + outputMasks.push(mask[i]); + } + } + const concatenatedMasks = concat$2(outputMasks, this.axis); + return all$2(concatenatedMasks, -1, false); + }); + } + getConfig() { + const config = { + 'axis': this.axis, + }; + const baseConfig = super.getConfig(); + Object.assign(config, baseConfig); + return config; + } + } + /** @nocollapse */ + Concatenate.className = 'Concatenate'; + registerClass(Concatenate); + /** + * Concatenate an `Array` of inputs. + * + * This function can be invoked in three ways. + * + * 1. Construct an instance of `Concatenate` layer, by using no input argument + * or a single configuration argument. The resultant `Concatenate` layer can + * then be used on `tf.SymbolicTensor`s or `tf.Tensor`s. For example: + * + * ```js + * const concatLayer = tf.layers.concatenate(); + * + * // The layer can be applied to inputs. + * const input1 = tf.input({shape: [2, 3]}); + * const input2 = tf.input({shape: [2, 4]}); + * const output = concatLayer.apply([input1, input2]); + * console.log(output.shape); + * // You get [null, 2, 7], with the first dimension as the undetermined batch + * // dimension and the last dimension as the result of concatenating the + * // last dimensions of the two inputs. + * ``` + * + * 2. Invoke directly on an `Array` of `tf.SymbolicTensor`s. This constructs + * an `Layer` object internally and calls its `apply` method on the inputs, + * generating a new `tf.SymbolicTensor`. For example: + * + * ```js + * const input1 = tf.input({shape: [2, 3]}); + * const input2 = tf.input({shape: [2, 4]}); + * const output = tf.layers.concatenate([input1, input2]); + * console.log(output.shape); + * // You get [null, 2, 2], with the first dimension as the undetermined batch + * // dimension and the last dimension as the result of concatenating the + * // last dimensions of the two inputs. + * ``` + * + * 3. Invoke directly on `tf.Tensor`s, i.e., concrete values. This constructs + * an `Layer` object internally and calls its `apply` method on the inputs, + * generating a new `tf.Tensor` as the result of the computation. For + * example: + * + * ```js + * const input1 = tf.tensor2d([[1, 2], [3, 4]], [2, 2]); + * const input2 = tf.tensor2d([[10, 20], [30, 40]], [2, 2]); + * tf.layers.concatenate([input1, input2]).print(); + * // Gives [[1, 2, 10, 20], [3, 4, 30, 40]]. + * + */ + function concatenate$1(config) { + if (Array.isArray(config)) { + const layer = new Concatenate({}); + return layer.apply(config); + } + else { + return new Concatenate(config); + } + } + /** + * Interpretable potentially negative axis index. + * + * For example, given axis = -1, and dim = 3, this function will return 2. + * + * @param axis The axis index, may be a positive, zero or negative integer. + * @param dim Total number of dimensions, a positive integer. + * @returns A non-negative axis index equivalent to the input `axis`. + */ + function interpretAxis(axis, dim) { + while (axis < 0) { + axis += dim; + } + return axis; + } + function batchDot(x, y, axes) { + if (x.shape.length > 3 || y.shape.length > 3) { + throw new NotImplementedError('batchDot is not implemented for tensors of 4D or higher rank yet'); + } + assert$1(x.shape.length >= 2, () => `batchDot requires the rank of x to be >= 2, ` + + `but got ${x.shape.length}`); + assert$1(x.shape.length >= 2, () => `batchDot requires the rank of y to be >= 2, ` + + `but got ${y.shape.length}`); + if (typeof axes === 'number') { + axes = [axes, axes]; + } + if (x.dtype === 'complex64' || y.dtype === 'complex64') { + throw new NotImplementedError('batchDot is not implemented for complex64-type Tensors yet.'); + } + const xNDim = x.shape.length; + const yNDim = y.shape.length; + if (axes == null) { + // Behave like batchMatmul by default. + axes = [xNDim - 1, yNDim - 2]; + } + const axesArray = axes; + return tidy(() => { + let diff; + if (xNDim > yNDim) { + diff = xNDim - yNDim; + const diffShape = []; + for (let i = 0; i < diff; ++i) { + diffShape.push(1); + } + y = reshape$3(y, y.shape.concat(diffShape)); + } + else if (yNDim > xNDim) { + diff = yNDim - xNDim; + const diffShape = []; + for (let i = 0; i < diff; ++i) { + diffShape.push(1); + } + x = reshape$3(x, x.shape.concat(diffShape)); + } + else { + diff = 0; + } + let out; + if (x.shape.length === 2 && y.shape.length === 2) { + if (axesArray[0] === axesArray[1]) { + out = sum$3(mul(x, y), axesArray[0]); + } + else { + out = sum$3(mul(transpose$2(x, [1, 0]), y), axesArray[1]); + } + } + else { + const adjX = axesArray[0] !== x.shape.length - 1; + const adjY = axesArray[1] === y.shape.length - 1; + out = matMul$1(x, y, adjX, adjY); + } + if (diff > 0) { + let idx; + if (xNDim > yNDim) { + idx = xNDim + yNDim - 3; + } + else { + idx = xNDim - 1; + } + const squeezeAxes = []; + for (let i = idx; i < idx + diff; ++i) { + squeezeAxes.push(i); + } + out = squeeze(out, squeezeAxes); + } + if (out.shape.length === 1) { + out = expandDims$3(out, 1); + } + return out; + }); + } + class Dot extends Merge { + constructor(args) { + super(args); + this.axes = args.axes; + this.normalize = args.normalize == null ? false : args.normalize; + this.supportsMasking = true; + this.reshapeRequired = false; + } + build(inputShape) { + assert$1(Array.isArray(inputShape) && inputShape.length === 2 && + Array.isArray(inputShape[0]) && Array.isArray(inputShape[1]), () => 'A `Dot` layer should be called on a list of exactly 2 inputs.'); + const shape1 = inputShape[0]; + const shape2 = inputShape[1]; + if (shape1.length > 3 || shape2.length > 3) { + throw new NotImplementedError('Dot layer does not support tensors of 4D or higher rank yet.'); + } + const axes = this.interpretAxes(shape1, shape2); + if (shape1[axes[0]] !== shape2[axes[1]]) { + throw new ValueError(`Dimension incompatibility: ` + + `${shape1[axes[0]]} !== ${shape2[axes[1]]}`); + } + } + mergeFunction(inputs) { + if (inputs.length !== 2) { + throw new ValueError('A `Dot` layer must be called on exactly 2 inputs, ' + + `but received ${inputs.length} input(s).`); + } + let x1 = inputs[0]; + let x2 = inputs[1]; + let axes; + if (!Array.isArray(this.axes)) { + axes = [ + interpretAxis(this.axes, x1.shape.length), + interpretAxis(this.axes, x2.shape.length) + ]; + } + else { + axes = this.axes.map((axis, i) => interpretAxis(axis, inputs[i].shape.length)); + } + if (this.normalize) { + x1 = l2Normalize(x1, axes[0]); + x2 = l2Normalize(x2, axes[1]); + } + return batchDot(x1, x2, axes); + } + interpretAxes(shape1, shape2) { + let axes; + if (!Array.isArray(this.axes)) { + // `this.axes` is a single integer. + axes = [ + interpretAxis(this.axes, shape1.length), + interpretAxis(this.axes, shape2.length) + ]; + } + else { + // `this.axes` is an Array of integers. + axes = this.axes; + } + return axes; + } + computeOutputShape(inputShape) { + assert$1(Array.isArray(inputShape) && inputShape.length === 2 && + Array.isArray(inputShape[0]) && Array.isArray(inputShape[1]), () => 'A `Dot` layer should be called on a list of exactly 2 inputs.'); + const shape1 = inputShape[0].slice(); + const shape2 = inputShape[1].slice(); + if (shape1.length > 3 || shape2.length > 3) { + throw new NotImplementedError('Dot layer does not support tensors of 4D or higher rank yet.'); + } + const axes = this.interpretAxes(shape1, shape2); + shape1.splice(axes[0], 1); + shape2.splice(axes[1], 1); + shape2.splice(0, 1); + const outputShape = shape1.concat(shape2); + if (outputShape.length === 1) { + outputShape.push(1); + } + return outputShape; + } + computeMask(inputs, mask) { + return null; + } + getConfig() { + const config = { + 'axes': this.axes, + 'normalize': this.normalize + }; + const baseConfig = super.getConfig(); + Object.assign(config, baseConfig); + return config; + } + } + /** @nocollapse */ + Dot.className = 'Dot'; + registerClass(Dot); + // TODO(cais): Add functional interfaces for the merge layers. + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + class GaussianNoise extends Layer { + constructor(args) { + super(args); + this.supportsMasking = true; + this.stddev = args.stddev; + } + computeOutputShape(inputShape) { + return inputShape; + } + getConfig() { + const baseConfig = super.getConfig(); + const config = { stddev: this.stddev }; + Object.assign(config, baseConfig); + return config; + } + call(inputs, kwargs) { + return tidy(() => { + this.invokeCallHook(inputs, kwargs); + const input = getExactlyOneTensor(inputs); + const noised = () => add$3(randomNormal$1(input.shape, 0, this.stddev), input); + const output = inTrainPhase(noised, () => input, kwargs['training'] || false); + return output; + }); + } + } + /** @nocollapse */ + GaussianNoise.className = 'GaussianNoise'; + registerClass(GaussianNoise); + class GaussianDropout extends Layer { + constructor(args) { + super(args); + this.supportsMasking = true; + this.rate = args.rate; + } + computeOutputShape(inputShape) { + return inputShape; + } + getConfig() { + const baseConfig = super.getConfig(); + const config = { rate: this.rate }; + Object.assign(config, baseConfig); + return config; + } + call(inputs, kwargs) { + return tidy(() => { + this.invokeCallHook(inputs, kwargs); + const input = getExactlyOneTensor(inputs); + if (this.rate > 0 && this.rate < 1) { + const noised = () => { + const stddev = Math.sqrt(this.rate / (1 - this.rate)); + return mul(input, randomNormal$1(input.shape, 1, stddev)); + }; + return inTrainPhase(noised, () => input, kwargs['training'] || false); + } + return input; + }); + } + } + /** @nocollapse */ + GaussianDropout.className = 'GaussianDropout'; + registerClass(GaussianDropout); + /** + * Applies Alpha Dropout to the input. + * + * As it is a regularization layer, it is only active at training time. + * + * Alpha Dropout is a `Dropout` that keeps mean and variance of inputs + * to their original values, in order to ensure the self-normalizing property + * even after this dropout. + * Alpha Dropout fits well to Scaled Exponential Linear Units + * by randomly setting activations to the negative saturation value. + * + * Arguments: + * - `rate`: float, drop probability (as with `Dropout`). + * The multiplicative noise will have + * standard deviation `sqrt(rate / (1 - rate))`. + * - `noise_shape`: A 1-D `Tensor` of type `int32`, representing the + * shape for randomly generated keep/drop flags. + * + * Input shape: + * Arbitrary. Use the keyword argument `inputShape` + * (tuple of integers, does not include the samples axis) + * when using this layer as the first layer in a model. + * + * Output shape: + * Same shape as input. + * + * References: + * - [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515) + */ + class AlphaDropout extends Layer { + constructor(args) { + super(args); + this.supportsMasking = true; + this.rate = args.rate; + this.noiseShape = args.noiseShape; + } + _getNoiseShape(inputs) { + return this.noiseShape || getExactlyOneTensor(inputs).shape; + } + computeOutputShape(inputShape) { + return inputShape; + } + getConfig() { + const baseConfig = super.getConfig(); + const config = { rate: this.rate }; + Object.assign(config, baseConfig); + return config; + } + call(inputs, kwargs) { + return tidy(() => { + if (this.rate < 1 && this.rate > 0) { + const noiseShape = this._getNoiseShape(inputs); + const droppedInputs = () => { + const input = getExactlyOneTensor(inputs); + const alpha = 1.6732632423543772848170429916717; + const scale = 1.0507009873554804934193349852946; + const alphaP = -alpha * scale; + let keptIdx = greaterEqual$2(randomUniform$1(noiseShape), this.rate); + keptIdx = cast$2(keptIdx, 'float32'); // get default dtype. + // Get affine transformation params. + const a = ((1 - this.rate) * (1 + this.rate * alphaP ** 2)) ** -0.5; + const b = -a * alphaP * this.rate; + // Apply mask. + const x = add$3(mul(input, keptIdx), mul(add$3(keptIdx, -1), alphaP)); + return add$3(mul(x, a), b); + }; + return inTrainPhase(droppedInputs, () => getExactlyOneTensor(inputs), kwargs['training'] || false); + } + return inputs; + }); + } + } + /** @nocollapse */ + AlphaDropout.className = 'AlphaDropout'; + registerClass(AlphaDropout); + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + /** + * Applies batch normalization on x given mean, var, beta and gamma. + * + * I.e. returns: + * `output = (x - mean) / (sqrt(var) + epsilon) * gamma + beta` + * + * @param x Input tensor. + * @param mean Mean of batch. + * @param variance Variance of batch. + * @param beta Tensor with which to center the input. + * @param gamma Tensor by which to scale the input. + * @param epsilon Fuzz factor. + * @returns The result of the batch normalization. + */ + function batchNormalization$1(x, mean, variance, beta, gamma, epsilon = 1e-3) { + let out; + if (x.rank === 2) { + out = batchNorm2d(x, mean, variance, beta, gamma, epsilon); + } + else if (x.rank === 3) { + // TODO(cais): Check rank; give proper error message. + out = batchNorm3d(x, mean, variance, beta, gamma, epsilon); + } + else if (x.rank === 4) { + out = batchNorm4d(x, mean, variance, beta, gamma, epsilon); + } + else { + throw new NotImplementedError(`batchNormalization is not implemented for array of rank ${x.rank} ` + + `yet`); + } + return out; + } + /** + * Non-broadcasting batch normalization for use in training (not inference). + * + * The input is normalized to zero mean and unit variance along the + * `reductionAxes`, followed by scaling with `gamma` and shifted by `beta`. + * The result of that is returned as the first element + * of the returned `Array`. The other two elements are the mean and variance, + * respectively. + * + * @param x Input tensor to be normalized. + * @param gamma Tensor by which to scale the input. + * @param beta Tensor by which to center the input. + * @param reductionAxes Axes over which to normalize. + * @param epsilon Fuzz factor. + * @returns An `Array` of three `Tensors`: + * [normalized tensor, mean of input, variance of input]. + */ + function regularNormalizeBatchInTraining(x, gamma, beta, reductionAxes, epsilon = 1e-3) { + return tidy(() => { + const meanAndVariance = moments(x, reductionAxes); + const mean = meanAndVariance.mean; + const variance = meanAndVariance.variance; + const normed = batchNormalization$1(x, mean, variance, beta, gamma, epsilon); + return [normed, mean, variance]; + }); + } + /** + * Broadcasting batch normalization for use in training (not inference). + * + * The input is normalized to zero mean and unit variance along the + * `reductionAxes`, followed by scaling with `gamma` and shifted by `beta`. + * The result of that is returned as the first element + * of the returned `Array`. The other two elements are the mean and variance, + * respectively. + * + * @param x Input tensor to be normalized. + * @param gamma Tensor by which to scale the input. + * @param beta Tensor by which to center the input. + * @param reductionAxes Axes over which to normalize. + * @param epsilon Fuzz factor. + * @returns An `Array` of three `Tensors`: + * [normalized tensor, mean of input, variance of input]. + */ + function broadcastNormalizeBatchInTraining(x, gamma, beta, reductionAxes, epsilon = 1e-3) { + return tidy(() => { + const meanAndVariance = moments(x, reductionAxes); + const mean = meanAndVariance.mean; + const variance = meanAndVariance.variance; + const targetShape = []; + for (const axis of range$2(0, x.rank)) { + if (reductionAxes.indexOf(axis) !== -1) { + targetShape.push(1); + } + else { + targetShape.push(x.shape[axis]); + } + } + const broadcastMean = reshape$3(mean, targetShape); + const broadcastVariance = reshape$3(variance, targetShape); + const broadcastGamma = gamma == null ? null : reshape$3(gamma, targetShape); + const broadcastBeta = beta == null ? null : reshape$3(beta, targetShape); + const normed = batchNormalization$1(x, broadcastMean, broadcastVariance, broadcastBeta, broadcastGamma, epsilon); + return [normed, mean, variance]; + }); + } + /** + * Batch normalization for use in training (not inference). + * + * @param x Input tensor to be normalized. + * @param gamma Tensor by which to scale the input. + * @param beta Tensor by which to center the input. + * @param reductionAxes Axes over which to normalize. + * @param epsilon Fuzz factor. + * @returns An `Array` of three `Tensors`: + * [normalized tensor, mean of input, variance of input]. + */ + function normalizeBatchInTraining(x, gamma, beta, reductionAxes, epsilon = 1e-3) { + if (arraysEqual(reductionAxes.slice().sort(), range$2(0, x.rank - 1))) { + return regularNormalizeBatchInTraining(x, gamma, beta, reductionAxes, epsilon); + } + else { + return broadcastNormalizeBatchInTraining(x, gamma, beta, reductionAxes, epsilon); + } + } + class BatchNormalization extends Layer { + constructor(args) { + if (args == null) { + args = {}; + } + super(args); + this.supportsMasking = true; + this.axis = args.axis == null ? -1 : args.axis; + this.momentum = args.momentum == null ? 0.99 : args.momentum; + this.epsilon = args.epsilon == null ? 1e-3 : args.epsilon; + this.center = args.center == null ? true : args.center; + this.scale = args.scale == null ? true : args.scale; + this.betaInitializer = getInitializer(args.betaInitializer || 'zeros'); + this.gammaInitializer = getInitializer(args.gammaInitializer || 'ones'); + this.movingMeanInitializer = + getInitializer(args.movingMeanInitializer || 'zeros'); + this.movingVarianceInitializer = + getInitializer(args.movingVarianceInitializer || 'ones'); + this.betaConstraint = getConstraint(args.betaConstraint); + this.gammaConstraint = getConstraint(args.gammaConstraint); + this.betaRegularizer = getRegularizer(args.betaRegularizer); + this.gammaRegularizer = getRegularizer(args.gammaRegularizer); + } + build(inputShape) { + inputShape = getExactlyOneShape(inputShape); + const axis = this.axis >= 0 ? this.axis : (this.axis + inputShape.length); + const dim = inputShape[axis]; + if (dim == null) { + throw new ValueError(`Axis ${axis} of input tensor should have a defined dimension but ` + + `the layer received an input with shape ` + + `${JSON.stringify(inputShape)}.`); + } + this.inputSpec = + [new InputSpec({ ndim: inputShape.length, axes: { [axis]: dim } })]; + const shape = [dim]; + if (this.scale) { + this.gamma = this.addWeight('gamma', shape, null, this.gammaInitializer, this.gammaRegularizer, true, this.gammaConstraint); + } + if (this.center) { + this.beta = this.addWeight('beta', shape, null, this.betaInitializer, this.betaRegularizer, true, this.betaConstraint); + } + this.movingMean = this.addWeight('moving_mean', shape, null, this.movingMeanInitializer, null, false); + this.movingVariance = this.addWeight('moving_variance', shape, null, this.movingVarianceInitializer, null, false); + this.built = true; + } + call(inputs, kwargs) { + return tidy(() => { + const training = kwargs['training'] == null ? false : kwargs['training']; + const input = getExactlyOneTensor(inputs); + const inputShape = input.shape; + const ndim = inputShape.length; + const reductionAxes = range$2(0, ndim); + const axis = this.axis >= 0 ? this.axis : (this.axis + ndim); + reductionAxes.splice(axis, 1); + const broadcastShape = pyListRepeat(1, ndim); + broadcastShape[axis] = inputShape[axis]; + const sortedReductionAxes = reductionAxes.slice(); + sortedReductionAxes.sort(); + const needsBroadcasting = !arraysEqual(sortedReductionAxes, range$2(0, ndim).slice(0, ndim - 1)); + const normalizeInference = () => { + if (needsBroadcasting) { + const broadcastMovingMean = reshape$3(this.movingMean.read(), broadcastShape); + const broadcastMovingVariance = reshape$3(this.movingVariance.read(), broadcastShape); + const broadcastBeta = this.center ? reshape$3(this.beta.read(), broadcastShape) : null; + const broadcastGamma = this.scale ? reshape$3(this.gamma.read(), broadcastShape) : null; + return batchNormalization$1(input, broadcastMovingMean, broadcastMovingVariance, broadcastBeta, broadcastGamma, this.epsilon); + } + else { + return batchNormalization$1(input, this.movingMean.read(), this.movingVariance.read(), this.beta == null ? null : this.beta.read(), this.gamma == null ? null : this.gamma.read(), this.epsilon); + } + }; + if (!training) { + return normalizeInference(); + } + const [normedTraining, mean, variance] = normalizeBatchInTraining(input, this.gamma.read(), this.beta.read(), reductionAxes, this.epsilon); + const doMovingAverage = (variable, value, momentum) => { + tidy(() => { + const decay = 1 - momentum; + const origValue = variable.read(); + const updateDelta = mul(sub$2(origValue, value), decay); + variable.write(sub$2(origValue, updateDelta)); + }); + }; + // Perform updates to moving mean and moving variance for training. + // Porting Note: In PyKeras, these updates to `movingMean` and + // `movingAverage` are done as a deferred Graph, added to the `Layer`'s + // `update`s using the `add_update()` method. Here we do it imperatively + // and encapsulate the updates in a function that is invoked + // immediately. + const updateMovingMeanAndVariance = () => { + doMovingAverage(this.movingMean, mean, this.momentum); + doMovingAverage(this.movingVariance, variance, this.momentum); + }; + updateMovingMeanAndVariance(); + return normedTraining; + }); + } + getConfig() { + const config = { + axis: this.axis, + momentum: this.momentum, + epsilon: this.epsilon, + center: this.center, + scale: this.scale, + betaInitializer: serializeInitializer(this.betaInitializer), + gammaInitializer: serializeInitializer(this.gammaInitializer), + movingMeanInitializer: serializeInitializer(this.movingMeanInitializer), + movingVarianceInitializer: serializeInitializer(this.movingVarianceInitializer), + betaRegularizer: serializeRegularizer(this.betaRegularizer), + gammaRegularizer: serializeRegularizer(this.gammaRegularizer), + betaConstraint: serializeConstraint(this.betaConstraint), + gammaConstraint: serializeConstraint(this.gammaConstraint) + }; + const baseConfig = super.getConfig(); + Object.assign(config, baseConfig); + return config; + } + } + /** @nocollapse */ + BatchNormalization.className = 'BatchNormalization'; + registerClass(BatchNormalization); + class LayerNormalization extends Layer { + constructor(args) { + if (args == null) { + args = {}; + } + super(args); + this.axis = args.axis == null ? -1 : args.axis; + if (typeof this.axis === 'number') { + if (!Number.isInteger(this.axis)) { + throw new Error(`Expected axis to be an integer, but received ${this.axis}`); + } + } + else if (Array.isArray(this.axis)) { + for (const axis of this.axis) { + if (!Number.isInteger(axis)) { + throw new Error(`Expected axis to be an array of integers, ` + + `but received ${JSON.stringify(this.axis)}`); + } + } + } + else { + throw new Error(`Expected axis to be an integer or an array of integers, ` + + `but received ${JSON.stringify(this.axis)}`); + } + this.epsilon = args.epsilon == null ? 1e-3 : args.epsilon; + this.center = args.center == null ? true : args.center; + this.scale = args.scale == null ? true : args.scale; + this.betaInitializer = getInitializer(args.betaInitializer || 'zeros'); + this.gammaInitializer = getInitializer(args.gammaInitializer || 'ones'); + this.betaRegularizer = getRegularizer(args.betaRegularizer); + this.gammaRegularizer = getRegularizer(args.gammaRegularizer); + this.supportsMasking = true; + } + build(inputShape) { + inputShape = getExactlyOneShape(inputShape); + const nDims = inputShape.length; + // Convert axis to array and resolve negatives. + if (typeof this.axis === 'number') { + this.axis = [this.axis]; + } + for (let i = 0; i < this.axis.length; ++i) { + if (this.axis[i] < 0) { + this.axis[i] += nDims; + } + } + // Further validate axes. + for (const axis of this.axis) { + if (axis < 0 || axis >= nDims) { + throw new Error(`Invalid axis: ${axis}`); + } + } + if (this.axis.length !== unique$2(this.axis).length) { + throw new Error(`Found duplicate axes in: ${this.axis}`); + } + const paramShape = this.axis.map(axis => inputShape[axis]); + const trainable = true; + if (this.scale) { + this.gamma = this.addWeight('gamma', paramShape, 'float32', this.gammaInitializer, this.gammaRegularizer, trainable); + } + else { + this.gamma = null; + } + if (this.center) { + this.beta = this.addWeight('beta', paramShape, 'float32', this.betaInitializer, this.betaRegularizer, trainable); + } + else { + this.beta = null; + } + this.built = true; + } + call(inputs, kwargs) { + const input = getExactlyOneTensor(inputs); + const inputShape = input.shape; + const nDims = inputShape.length; + return tidy(() => { + const keepDims = true; + let { mean, variance } = moments(input, this.axis, keepDims); + const broadcastShape = pyListRepeat(1, nDims); + for (const dim of this.axis) { + broadcastShape[dim] = inputShape[dim]; + } + const broadcast = (v) => { + if (v != null && v.shape.length !== nDims) { + return reshape$3(v, broadcastShape); + } + else { + return v; + } + }; + let scale = this.scale ? broadcast(this.gamma.read()) : null; + let offset = this.center ? broadcast(this.beta.read()) : null; + // TODO(https://github.com/tensorflow/tfjs/issues/2120): The tiling below + // is a workaround for the limitation of core's batchNormalization?d don't + // support broadcasting in their gradients. In addition, the tiling is + // necessary to ensure correctness on the browser CPU backend regardless + // of forward or backward computation. Remove this workaround once the + // limitation is addressed. See . + const momentsTiling = []; + const scaleOffsetTiling = []; + for (let i = 0; i < nDims; ++i) { + if (this.axis.indexOf(i) !== -1) { + momentsTiling.push(inputShape[i]); + scaleOffsetTiling.push(1); + } + else { + momentsTiling.push(1); + scaleOffsetTiling.push(inputShape[i]); + } + } + mean = tile$3(mean, momentsTiling); + variance = tile$3(variance, momentsTiling); + if (scale != null) { + scale = tile$3(scale, scaleOffsetTiling); + } + if (offset != null) { + offset = tile$3(offset, scaleOffsetTiling); + } + return batchNormalization$1(input, mean, variance, offset, scale, this.epsilon); + }); + } + getConfig() { + const config = { + axis: this.axis, + epsilon: this.epsilon, + center: this.center, + scale: this.scale, + betaInitializer: serializeInitializer(this.betaInitializer), + gammaInitializer: serializeInitializer(this.gammaInitializer), + betaRegularizer: serializeRegularizer(this.betaRegularizer), + gammaRegularizer: serializeRegularizer(this.gammaRegularizer) + }; + const baseConfig = super.getConfig(); + Object.assign(config, baseConfig); + return config; + } + } + /** @nocollapse */ + LayerNormalization.className = 'LayerNormalization'; + registerClass(LayerNormalization); + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + /** + * Pads the middle dimension of a 3D tensor. + * + * @param x Input `tf.Tensor` to be padded. + * @param padding `Array` of 2 integers, how many zeros to add at the start and + * end of the middle dimension (i.e., dimension 1). + * @return A padded 3D `tf.Tensor`. + */ + function temporalPadding(x, padding) { + return tidy(() => { + if (x.rank !== 3) { + throw new ValueError(`temporalPadding expects input tensor to be 3-D, but received a ` + + `${x.rank}-D tensor.`); + } + if (padding == null) { + padding = [1, 1]; + } + if (padding.length !== 2) { + throw new ValueError(`temporalPadding expects input padding pattern to be a length-2 ` + + `array, but received a length-${padding.length} array.`); + } + const pattern = [[0, 0], padding, [0, 0]]; + return pad(x, pattern); + }); + } + /** + * Pads the 2nd and 3rd dimensions of a 4D tensor. + * + * @param x Input `tf.Tensor` to be padded. + * @param padding `Array` of two `Array`s, each of which is an `Array` of two + * integers. The amount of padding at the beginning and end of the 2nd and 3rd + * dimensions, respectively. + * @param dataFormat 'channelsLast' (default) or 'channelsFirst'. + * @return Padded 4D `tf.Tensor`. + */ + function spatial2dPadding(x, padding, dataFormat) { + return tidy(() => { + if (x.rank !== 4) { + throw new ValueError(`temporalPadding expects input tensor to be 4-D, but received a ` + + `${x.rank}-D tensor.`); + } + if (padding == null) { + padding = [[1, 1], [1, 1]]; + } + if (padding.length !== 2 || padding[0].length !== 2 || + padding[1].length !== 2) { + throw new ValueError('spatial2dPadding expects `padding` to be an Array of two Arrays, ' + + 'each of which is an Array of two integers.'); + } + if (dataFormat == null) { + dataFormat = imageDataFormat(); + } + if (dataFormat !== 'channelsLast' && dataFormat !== 'channelsFirst') { + throw new ValueError(`Unknown data format: ${dataFormat}. ` + + `Supported data formats are 'channelsLast' and 'channelsFirst.`); + } + let pattern; + if (dataFormat === 'channelsFirst') { + pattern = [[0, 0], [0, 0], padding[0], padding[1]]; + } + else { + pattern = [[0, 0], padding[0], padding[1], [0, 0]]; + } + return pad(x, pattern); + }); + } + class ZeroPadding2D extends Layer { + constructor(args) { + if (args == null) { + args = {}; + } + super(args); + this.dataFormat = + args.dataFormat == null ? imageDataFormat() : args.dataFormat; + // TODO(cais): Maybe refactor the following logic surrounding `padding` + // into a helper method. + if (args.padding == null) { + this.padding = [[1, 1], [1, 1]]; + } + else if (typeof args.padding === 'number') { + this.padding = + [[args.padding, args.padding], [args.padding, args.padding]]; + } + else { + args.padding = args.padding; + if (args.padding.length !== 2) { + throw new ValueError(`ZeroPadding2D expects padding to be a length-2 array, but ` + + `received a length-${args.padding.length} array.`); + } + let heightPadding; + let widthPadding; + if (typeof args.padding[0] === 'number') { + heightPadding = [args.padding[0], args.padding[0]]; + widthPadding = [args.padding[1], args.padding[1]]; + } + else { + args.padding = args.padding; + if (args.padding[0].length !== 2) { + throw new ValueError(`ZeroPadding2D expects height padding to be a length-2 array, ` + + `but received a length-${args.padding[0].length} array.`); + } + heightPadding = args.padding[0]; + if (args.padding[1].length !== 2) { + throw new ValueError(`ZeroPadding2D expects width padding to be a length-2 array, ` + + `but received a length-${args.padding[1].length} array.`); + } + widthPadding = args.padding[1]; + } + this.padding = [heightPadding, widthPadding]; + } + this.inputSpec = [new InputSpec({ ndim: 4 })]; + } + computeOutputShape(inputShape) { + inputShape = getExactlyOneShape(inputShape); + let rows; + let cols; + if (this.dataFormat === 'channelsFirst') { + if (inputShape[2] != null && inputShape[2] >= 0) { + rows = inputShape[2] + this.padding[0][0] + this.padding[0][1]; + } + else { + rows = null; + } + if (inputShape[3] != null && inputShape[3] >= 0) { + cols = inputShape[3] + this.padding[1][0] + this.padding[1][1]; + } + else { + cols = null; + } + return [inputShape[0], inputShape[1], rows, cols]; + } + else { + if (inputShape[1] != null && inputShape[1] >= 0) { + rows = inputShape[1] + this.padding[0][0] + this.padding[0][1]; + } + else { + rows = null; + } + if (inputShape[2] != null && inputShape[2] >= 0) { + cols = inputShape[2] + this.padding[1][0] + this.padding[1][1]; + } + else { + cols = null; + } + return [inputShape[0], rows, cols, inputShape[3]]; + } + } + call(inputs, kwargs) { + return tidy(() => spatial2dPadding(getExactlyOneTensor(inputs), this.padding, this.dataFormat)); + } + getConfig() { + const config = { + padding: this.padding, + dataFormat: this.dataFormat, + }; + const baseConfig = super.getConfig(); + Object.assign(config, baseConfig); + return config; + } + } + /** @nocollapse */ + ZeroPadding2D.className = 'ZeroPadding2D'; + registerClass(ZeroPadding2D); + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + /** + * 2D pooling. + * @param x + * @param poolSize + * @param strides strides. Defaults to [1, 1]. + * @param padding padding. Defaults to 'valid'. + * @param dataFormat data format. Defaults to 'channelsLast'. + * @param poolMode Mode of pooling. Defaults to 'max'. + * @returns Result of the 2D pooling. + */ + function pool2d(x, poolSize, strides, padding, dataFormat, poolMode) { + return tidy(() => { + checkDataFormat(dataFormat); + checkPoolMode(poolMode); + checkPaddingMode(padding); + if (strides == null) { + strides = [1, 1]; + } + if (padding == null) { + padding = 'valid'; + } + if (dataFormat == null) { + dataFormat = imageDataFormat(); + } + if (poolMode == null) { + poolMode = 'max'; + } + // TODO(cais): Remove the preprocessing step once deeplearn.js supports + // dataFormat as an input argument. + x = preprocessConv2DInput(x, dataFormat); // x is NHWC after preprocessing. + let y; + const paddingString = (padding === 'same') ? 'same' : 'valid'; + if (poolMode === 'max') { + // TODO(cais): Rank check? + y = maxPool$2(x, poolSize, strides, paddingString); + } + else { // 'avg' + // TODO(cais): Check the dtype and rank of x and give clear error message + // if those are incorrect. + y = avgPool$2( + // TODO(cais): Rank check? + x, poolSize, strides, paddingString); + } + if (dataFormat === 'channelsFirst') { + y = transpose$2(y, [0, 3, 1, 2]); // NHWC -> NCHW. + } + return y; + }); + } + /** + * 3D pooling. + * @param x + * @param poolSize. Default to [1, 1, 1]. + * @param strides strides. Defaults to [1, 1, 1]. + * @param padding padding. Defaults to 'valid'. + * @param dataFormat data format. Defaults to 'channelsLast'. + * @param poolMode Mode of pooling. Defaults to 'max'. + * @returns Result of the 3D pooling. + */ + function pool3d$1(x, poolSize, strides, padding, dataFormat, poolMode) { + return tidy(() => { + checkDataFormat(dataFormat); + checkPoolMode(poolMode); + checkPaddingMode(padding); + if (strides == null) { + strides = [1, 1, 1]; + } + if (padding == null) { + padding = 'valid'; + } + if (dataFormat == null) { + dataFormat = imageDataFormat(); + } + if (poolMode == null) { + poolMode = 'max'; + } + // x is NDHWC after preprocessing. + x = preprocessConv3DInput(x, dataFormat); + let y; + const paddingString = (padding === 'same') ? 'same' : 'valid'; + if (poolMode === 'max') { + y = maxPool3d$1(x, poolSize, strides, paddingString); + } + else { // 'avg' + y = avgPool3d$1(x, poolSize, strides, paddingString); + } + if (dataFormat === 'channelsFirst') { + y = transpose$2(y, [0, 4, 1, 2, 3]); // NDHWC -> NCDHW. + } + return y; + }); + } + /** + * Abstract class for different pooling 1D layers. + */ + class Pooling1D extends Layer { + /** + * + * @param args Parameters for the Pooling layer. + * + * config.poolSize defaults to 2. + */ + constructor(args) { + if (args.poolSize == null) { + args.poolSize = 2; + } + super(args); + if (typeof args.poolSize === 'number') { + this.poolSize = [args.poolSize]; + } + else if (Array.isArray(args.poolSize) && + args.poolSize.length === 1 && + typeof args.poolSize[0] === 'number') { + this.poolSize = args.poolSize; + } + else { + throw new ValueError(`poolSize for 1D convolutional layer must be a number or an ` + + `Array of a single number, but received ` + + `${JSON.stringify(args.poolSize)}`); + } + assertPositiveInteger(this.poolSize, 'poolSize'); + if (args.strides == null) { + this.strides = this.poolSize; + } + else { + if (typeof args.strides === 'number') { + this.strides = [args.strides]; + } + else if (Array.isArray(args.strides) && + args.strides.length === 1 && + typeof args.strides[0] === 'number') { + this.strides = args.strides; + } + else { + throw new ValueError(`strides for 1D convolutional layer must be a number or an ` + + `Array of a single number, but received ` + + `${JSON.stringify(args.strides)}`); + } + } + assertPositiveInteger(this.strides, 'strides'); + this.padding = args.padding == null ? 'valid' : args.padding; + checkPaddingMode(this.padding); + this.inputSpec = [new InputSpec({ ndim: 3 })]; + } + computeOutputShape(inputShape) { + inputShape = getExactlyOneShape(inputShape); + const length = convOutputLength(inputShape[1], this.poolSize[0], this.padding, this.strides[0]); + return [inputShape[0], length, inputShape[2]]; + } + call(inputs, kwargs) { + return tidy(() => { + this.invokeCallHook(inputs, kwargs); + // Add dummy last dimension. + inputs = expandDims$2(getExactlyOneTensor(inputs), 2); + const output = this.poolingFunction(getExactlyOneTensor(inputs), [this.poolSize[0], 1], [this.strides[0], 1], this.padding, 'channelsLast'); + // Remove dummy last dimension. + return squeeze(output, [2]); + }); + } + getConfig() { + const config = { + poolSize: this.poolSize, + padding: this.padding, + strides: this.strides, + }; + const baseConfig = super.getConfig(); + Object.assign(config, baseConfig); + return config; + } + } + class MaxPooling1D extends Pooling1D { + constructor(args) { + super(args); + } + poolingFunction(inputs, poolSize, strides, padding, dataFormat) { + checkDataFormat(dataFormat); + checkPaddingMode(padding); + return pool2d(inputs, poolSize, strides, padding, dataFormat, 'max'); + } + } + /** @nocollapse */ + MaxPooling1D.className = 'MaxPooling1D'; + registerClass(MaxPooling1D); + class AveragePooling1D extends Pooling1D { + constructor(args) { + super(args); + } + poolingFunction(inputs, poolSize, strides, padding, dataFormat) { + checkDataFormat(dataFormat); + checkPaddingMode(padding); + return pool2d(inputs, poolSize, strides, padding, dataFormat, 'avg'); + } + } + /** @nocollapse */ + AveragePooling1D.className = 'AveragePooling1D'; + registerClass(AveragePooling1D); + /** + * Abstract class for different pooling 2D layers. + */ + class Pooling2D extends Layer { + constructor(args) { + if (args.poolSize == null) { + args.poolSize = [2, 2]; + } + super(args); + this.poolSize = Array.isArray(args.poolSize) ? + args.poolSize : + [args.poolSize, args.poolSize]; + if (args.strides == null) { + this.strides = this.poolSize; + } + else if (Array.isArray(args.strides)) { + if (args.strides.length !== 2) { + throw new ValueError(`If the strides property of a 2D pooling layer is an Array, ` + + `it is expected to have a length of 2, but received length ` + + `${args.strides.length}.`); + } + this.strides = args.strides; + } + else { + // `config.strides` is a number. + this.strides = [args.strides, args.strides]; + } + assertPositiveInteger(this.poolSize, 'poolSize'); + assertPositiveInteger(this.strides, 'strides'); + this.padding = args.padding == null ? 'valid' : args.padding; + this.dataFormat = + args.dataFormat == null ? 'channelsLast' : args.dataFormat; + checkDataFormat(this.dataFormat); + checkPaddingMode(this.padding); + this.inputSpec = [new InputSpec({ ndim: 4 })]; + } + computeOutputShape(inputShape) { + inputShape = getExactlyOneShape(inputShape); + let rows = this.dataFormat === 'channelsFirst' ? inputShape[2] : inputShape[1]; + let cols = this.dataFormat === 'channelsFirst' ? inputShape[3] : inputShape[2]; + rows = + convOutputLength(rows, this.poolSize[0], this.padding, this.strides[0]); + cols = + convOutputLength(cols, this.poolSize[1], this.padding, this.strides[1]); + if (this.dataFormat === 'channelsFirst') { + return [inputShape[0], inputShape[1], rows, cols]; + } + else { + return [inputShape[0], rows, cols, inputShape[3]]; + } + } + call(inputs, kwargs) { + return tidy(() => { + this.invokeCallHook(inputs, kwargs); + return this.poolingFunction(getExactlyOneTensor(inputs), this.poolSize, this.strides, this.padding, this.dataFormat); + }); + } + getConfig() { + const config = { + poolSize: this.poolSize, + padding: this.padding, + strides: this.strides, + dataFormat: this.dataFormat + }; + const baseConfig = super.getConfig(); + Object.assign(config, baseConfig); + return config; + } + } + class MaxPooling2D extends Pooling2D { + constructor(args) { + super(args); + } + poolingFunction(inputs, poolSize, strides, padding, dataFormat) { + checkDataFormat(dataFormat); + checkPaddingMode(padding); + return pool2d(inputs, poolSize, strides, padding, dataFormat, 'max'); + } + } + /** @nocollapse */ + MaxPooling2D.className = 'MaxPooling2D'; + registerClass(MaxPooling2D); + class AveragePooling2D extends Pooling2D { + constructor(args) { + super(args); + } + poolingFunction(inputs, poolSize, strides, padding, dataFormat) { + checkDataFormat(dataFormat); + checkPaddingMode(padding); + return pool2d(inputs, poolSize, strides, padding, dataFormat, 'avg'); + } + } + /** @nocollapse */ + AveragePooling2D.className = 'AveragePooling2D'; + registerClass(AveragePooling2D); + /** + * Abstract class for different pooling 3D layers. + */ + class Pooling3D extends Layer { + constructor(args) { + if (args.poolSize == null) { + args.poolSize = [2, 2, 2]; + } + super(args); + this.poolSize = Array.isArray(args.poolSize) ? + args.poolSize : + [args.poolSize, args.poolSize, args.poolSize]; + if (args.strides == null) { + this.strides = this.poolSize; + } + else if (Array.isArray(args.strides)) { + if (args.strides.length !== 3) { + throw new ValueError(`If the strides property of a 3D pooling layer is an Array, ` + + `it is expected to have a length of 3, but received length ` + + `${args.strides.length}.`); + } + this.strides = args.strides; + } + else { + // `config.strides` is a number. + this.strides = [args.strides, args.strides, args.strides]; + } + assertPositiveInteger(this.poolSize, 'poolSize'); + assertPositiveInteger(this.strides, 'strides'); + this.padding = args.padding == null ? 'valid' : args.padding; + this.dataFormat = + args.dataFormat == null ? 'channelsLast' : args.dataFormat; + checkDataFormat(this.dataFormat); + checkPaddingMode(this.padding); + this.inputSpec = [new InputSpec({ ndim: 5 })]; + } + computeOutputShape(inputShape) { + inputShape = getExactlyOneShape(inputShape); + let depths = this.dataFormat === 'channelsFirst' ? inputShape[2] : inputShape[1]; + let rows = this.dataFormat === 'channelsFirst' ? inputShape[3] : inputShape[2]; + let cols = this.dataFormat === 'channelsFirst' ? inputShape[4] : inputShape[3]; + depths = convOutputLength(depths, this.poolSize[0], this.padding, this.strides[0]); + rows = + convOutputLength(rows, this.poolSize[1], this.padding, this.strides[1]); + cols = + convOutputLength(cols, this.poolSize[2], this.padding, this.strides[2]); + if (this.dataFormat === 'channelsFirst') { + return [inputShape[0], inputShape[1], depths, rows, cols]; + } + else { + return [inputShape[0], depths, rows, cols, inputShape[4]]; + } + } + call(inputs, kwargs) { + return tidy(() => { + this.invokeCallHook(inputs, kwargs); + return this.poolingFunction(getExactlyOneTensor(inputs), this.poolSize, this.strides, this.padding, this.dataFormat); + }); + } + getConfig() { + const config = { + poolSize: this.poolSize, + padding: this.padding, + strides: this.strides, + dataFormat: this.dataFormat + }; + const baseConfig = super.getConfig(); + Object.assign(config, baseConfig); + return config; + } + } + class MaxPooling3D extends Pooling3D { + constructor(args) { + super(args); + } + poolingFunction(inputs, poolSize, strides, padding, dataFormat) { + checkDataFormat(dataFormat); + checkPaddingMode(padding); + return pool3d$1(inputs, poolSize, strides, padding, dataFormat, 'max'); + } + } + /** @nocollapse */ + MaxPooling3D.className = 'MaxPooling3D'; + registerClass(MaxPooling3D); + class AveragePooling3D extends Pooling3D { + constructor(args) { + super(args); + } + poolingFunction(inputs, poolSize, strides, padding, dataFormat) { + checkDataFormat(dataFormat); + checkPaddingMode(padding); + return pool3d$1(inputs, poolSize, strides, padding, dataFormat, 'avg'); + } + } + /** @nocollapse */ + AveragePooling3D.className = 'AveragePooling3D'; + registerClass(AveragePooling3D); + /** + * Abstract class for different global pooling 1D layers. + */ + class GlobalPooling1D extends Layer { + constructor(args) { + super(args); + this.inputSpec = [new InputSpec({ ndim: 3 })]; + } + computeOutputShape(inputShape) { + return [inputShape[0], inputShape[2]]; + } + call(inputs, kwargs) { + throw new NotImplementedError(); + } + } + class GlobalAveragePooling1D extends GlobalPooling1D { + constructor(args) { + super(args || {}); + } + call(inputs, kwargs) { + return tidy(() => { + const input = getExactlyOneTensor(inputs); + return mean$3(input, 1); + }); + } + } + /** @nocollapse */ + GlobalAveragePooling1D.className = 'GlobalAveragePooling1D'; + registerClass(GlobalAveragePooling1D); + class GlobalMaxPooling1D extends GlobalPooling1D { + constructor(args) { + super(args || {}); + } + call(inputs, kwargs) { + return tidy(() => { + const input = getExactlyOneTensor(inputs); + return max$3(input, 1); + }); + } + } + /** @nocollapse */ + GlobalMaxPooling1D.className = 'GlobalMaxPooling1D'; + registerClass(GlobalMaxPooling1D); + /** + * Abstract class for different global pooling 2D layers. + */ + class GlobalPooling2D extends Layer { + constructor(args) { + super(args); + this.dataFormat = + args.dataFormat == null ? 'channelsLast' : args.dataFormat; + checkDataFormat(this.dataFormat); + this.inputSpec = [new InputSpec({ ndim: 4 })]; + } + computeOutputShape(inputShape) { + inputShape = inputShape; + if (this.dataFormat === 'channelsLast') { + return [inputShape[0], inputShape[3]]; + } + else { + return [inputShape[0], inputShape[1]]; + } + } + call(inputs, kwargs) { + throw new NotImplementedError(); + } + getConfig() { + const config = { dataFormat: this.dataFormat }; + const baseConfig = super.getConfig(); + Object.assign(config, baseConfig); + return config; + } + } + class GlobalAveragePooling2D extends GlobalPooling2D { + call(inputs, kwargs) { + return tidy(() => { + const input = getExactlyOneTensor(inputs); + if (this.dataFormat === 'channelsLast') { + return mean$3(input, [1, 2]); + } + else { + return mean$3(input, [2, 3]); + } + }); + } + } + /** @nocollapse */ + GlobalAveragePooling2D.className = 'GlobalAveragePooling2D'; + registerClass(GlobalAveragePooling2D); + class GlobalMaxPooling2D extends GlobalPooling2D { + call(inputs, kwargs) { + return tidy(() => { + const input = getExactlyOneTensor(inputs); + if (this.dataFormat === 'channelsLast') { + return max$3(input, [1, 2]); + } + else { + return max$3(input, [2, 3]); + } + }); + } + } + /** @nocollapse */ + GlobalMaxPooling2D.className = 'GlobalMaxPooling2D'; + registerClass(GlobalMaxPooling2D); + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + /** + * Abstract wrapper base class. + * + * Wrappers take another layer and augment it in various ways. + * Do not use this class as a layer, it is only an abstract base class. + * Two usable wrappers are the `TimeDistributed` and `Bidirectional` wrappers. + */ + class Wrapper extends Layer { + constructor(args) { + // Porting Note: In PyKeras, `self.layer` is set prior to the calling + // `super()`. But we can't do that here due to TypeScript's restriction. + // See: https://github.com/Microsoft/TypeScript/issues/8277 + // As a result, we have to add checks in `get trainable()` and + // `set trainable()` below in order to prevent using `this.layer` when + // its value is `undefined`. The super constructor does use the getter + // and the setter of `this.layer`. + super(args); + this.layer = args.layer; + } + build(inputShape) { + this.built = true; + } + // TODO(cais): Implement activityRegularizer getter. + get trainable() { + // Porting Note: the check of `this.layer` here is necessary due to the + // way the `constructor` of this class is written (see Porting Note + // above). + if (this.layer != null) { + return this.layer.trainable; + } + else { + return false; + } + } + set trainable(value) { + // Porting Note: the check of `this.layer` here is necessary due to the + // way the `constructor` of this class is written (see Porting Note + // above). + if (this.layer != null) { + this.layer.trainable = value; + } + } + get trainableWeights() { + return this.layer.trainableWeights; + } + // TODO(cais): Implement setter for trainableWeights. + get nonTrainableWeights() { + return this.layer.nonTrainableWeights; + } + // TODO(cais): Implement setter for nonTrainableWeights. + get updates() { + // tslint:disable-next-line:no-any + return this.layer._updates; + } + // TODO(cais): Implement getUpdatesFor(). + get losses() { + return this.layer.losses; + } + // TODO(cais): Implement getLossesFor(). + getWeights() { + return this.layer.getWeights(); + } + setWeights(weights) { + this.layer.setWeights(weights); + } + getConfig() { + const config = { + 'layer': { + 'className': this.layer.getClassName(), + 'config': this.layer.getConfig(), + } + }; + const baseConfig = super.getConfig(); + Object.assign(config, baseConfig); + return config; + } + setFastWeightInitDuringBuild(value) { + super.setFastWeightInitDuringBuild(value); + if (this.layer != null) { + this.layer.setFastWeightInitDuringBuild(value); + } + } + /** @nocollapse */ + static fromConfig(cls, config, customObjects = {}) { + const layerConfig = config['layer']; + const layer = deserialize(layerConfig, customObjects); + delete config['layer']; + const newConfig = { layer }; + Object.assign(newConfig, config); + return new cls(newConfig); + } + } + class TimeDistributed extends Wrapper { + constructor(args) { + super(args); + this.supportsMasking = true; + } + build(inputShape) { + inputShape = getExactlyOneShape(inputShape); + if (inputShape.length < 3) { + throw new ValueError(`TimeDistributed layer expects an input shape >= 3D, but received ` + + `input shape ${JSON.stringify(inputShape)}`); + } + this.inputSpec = [{ shape: inputShape }]; + const childInputShape = [inputShape[0]].concat(inputShape.slice(2)); + if (!this.layer.built) { + this.layer.build(childInputShape); + this.layer.built = true; + } + super.build(inputShape); + } + computeOutputShape(inputShape) { + inputShape = getExactlyOneShape(inputShape); + const childInputShape = [inputShape[0]].concat(inputShape.slice(2)); + const childOutputShape = this.layer.computeOutputShape(childInputShape); + const timesteps = inputShape[1]; + return [childOutputShape[0], timesteps].concat(childOutputShape.slice(1)); + } + call(inputs, kwargs) { + return tidy(() => { + // TODO(cais): Add 'training' and 'useLearningPhase' to kwargs. + inputs = getExactlyOneTensor(inputs); + // Porting Note: In tfjs-layers, `inputs` are always concrete tensor + // values. Hence the inputs can't have an undetermined first (batch) + // dimension, which is why we always use the K.rnn approach here. + const step = (inputs, states) => { + // TODO(cais): Add useLearningPhase. + // NOTE(cais): `layer.call` may return a length-1 array of Tensor in + // some cases (e.g., `layer` is a `Sequential` instance), which is + // why `getExactlyOneTensor` is used below. + const output = getExactlyOneTensor(this.layer.call(inputs, kwargs)); + return [output, []]; + }; + const rnnOutputs = rnn$1(step, inputs, [], false /* goBackwards */, null /* mask */, null /* constants */, false /* unroll */, true /* needPerStepOutputs */); + const y = rnnOutputs[1]; + // TODO(cais): Add activity regularization. + // TODO(cais): Add useLearningPhase. + return y; + }); + } + } + /** @nocollapse */ + TimeDistributed.className = 'TimeDistributed'; + registerClass(TimeDistributed); + function checkBidirectionalMergeMode(value) { + checkStringTypeUnionValue(VALID_BIDIRECTIONAL_MERGE_MODES, 'BidirectionalMergeMode', value); + } + const DEFAULT_BIDIRECTIONAL_MERGE_MODE = 'concat'; + class Bidirectional extends Wrapper { + constructor(args) { + super(args); + // Note: When creating `this.forwardLayer`, the original Layer object + // (`config.layer`) ought to be cloned. This is why we call + // `getConfig()` followed by `deserialize()`. Without this cloning, + // the layer names saved during serialization will incorrectly contain + // the 'forward_' prefix. In Python Keras, this is done using + // `copy.copy` (shallow copy), which does not have a simple equivalent + // in JavaScript. JavaScript's `Object.assign()` does not copy + // methods. + const layerConfig = args.layer.getConfig(); + const forwDict = {}; + forwDict['className'] = args.layer.getClassName(); + forwDict['config'] = layerConfig; + this.forwardLayer = deserialize(forwDict); + layerConfig['goBackwards'] = + layerConfig['goBackwards'] === true ? false : true; + const backDict = {}; + backDict['className'] = args.layer.getClassName(); + backDict['config'] = layerConfig; + this.backwardLayer = deserialize(backDict); + this.forwardLayer.name = 'forward_' + this.forwardLayer.name; + this.backwardLayer.name = 'backward_' + this.backwardLayer.name; + this.mergeMode = args.mergeMode === undefined ? + DEFAULT_BIDIRECTIONAL_MERGE_MODE : + args.mergeMode; + checkBidirectionalMergeMode(this.mergeMode); + if (args.weights) { + throw new NotImplementedError('weights support is not implemented for Bidirectional layer yet.'); + } + this._stateful = args.layer.stateful; + this.returnSequences = args.layer.returnSequences; + this.returnState = args.layer.returnState; + this.supportsMasking = true; + this._trainable = true; + this.inputSpec = args.layer.inputSpec; + this.numConstants = null; + } + get trainable() { + return this._trainable; + } + set trainable(value) { + // Porting Note: the check of `this.layer` here is necessary due to the + // way the `constructor` of this class is written (see Porting Note + // above). + this._trainable = value; + if (this.forwardLayer != null) { + this.forwardLayer.trainable = value; + } + if (this.backwardLayer != null) { + this.backwardLayer.trainable = value; + } + } + getWeights() { + return this.forwardLayer.getWeights().concat(this.backwardLayer.getWeights()); + } + setWeights(weights) { + const numWeights = weights.length; + const numeightsOver2 = Math.floor(numWeights / 2); + this.forwardLayer.setWeights(weights.slice(0, numeightsOver2)); + this.backwardLayer.setWeights(weights.slice(numeightsOver2)); + } + computeOutputShape(inputShape) { + let layerShapes = this.forwardLayer.computeOutputShape(inputShape); + if (!(Array.isArray(layerShapes) && Array.isArray(layerShapes[0]))) { + layerShapes = [layerShapes]; + } + layerShapes = layerShapes; + let outputShape; + let outputShapes; + let stateShape; + if (this.returnState) { + stateShape = layerShapes.slice(1); + outputShape = layerShapes[0]; + } + else { + outputShape = layerShapes[0]; + } + outputShape = outputShape; + if (this.mergeMode === 'concat') { + outputShape[outputShape.length - 1] *= 2; + outputShapes = [outputShape]; + } + else if (this.mergeMode == null) { + outputShapes = [outputShape, outputShape.slice()]; + } + else { + outputShapes = [outputShape]; + } + if (this.returnState) { + if (this.mergeMode == null) { + return outputShapes.concat(stateShape).concat(stateShape.slice()); + } + return [outputShape].concat(stateShape).concat(stateShape.slice()); + } + return singletonOrArray(outputShapes); + } + apply(inputs, kwargs) { + let initialState = kwargs == null ? null : kwargs['initialState']; + let constants = kwargs == null ? null : kwargs['constants']; + if (kwargs == null) { + kwargs = {}; + } + const standardized = standardizeArgs(inputs, initialState, constants, this.numConstants); + inputs = standardized.inputs; + initialState = standardized.initialState; + constants = standardized.constants; + if (Array.isArray(inputs)) { + initialState = inputs.slice(1); + inputs = inputs[0]; + } + if ((initialState == null || initialState.length === 0) && + constants == null) { + return super.apply(inputs, kwargs); + } + const additionalInputs = []; + const additionalSpecs = []; + if (initialState != null) { + const numStates = initialState.length; + if (numStates % 2 > 0) { + throw new ValueError('When passing `initialState` to a Bidrectional RNN, ' + + 'the state should be an Array containing the states of ' + + 'the underlying RNNs.'); + } + kwargs['initialState'] = initialState; + additionalInputs.push(...initialState); + const stateSpecs = initialState + .map(state => new InputSpec({ shape: state.shape })); + this.forwardLayer.stateSpec = stateSpecs.slice(0, numStates / 2); + this.backwardLayer.stateSpec = stateSpecs.slice(numStates / 2); + additionalSpecs.push(...stateSpecs); + } + if (constants != null) { + throw new NotImplementedError('Support for constants in Bidirectional layers is not ' + + 'implemented yet.'); + } + const isSymbolicTensor = additionalInputs[0] instanceof SymbolicTensor; + for (const tensor of additionalInputs) { + if (tensor instanceof SymbolicTensor !== isSymbolicTensor) { + throw new ValueError('The initial state of a Bidirectional layer cannot be ' + + 'specified as a mix of symbolic and non-symbolic tensors'); + } + } + if (isSymbolicTensor) { + // Compute the full input and specs, including the states. + const fullInput = [inputs].concat(additionalInputs); + const fullInputSpec = this.inputSpec.concat(additionalSpecs); + // Perform the call temporarily and replace inputSpec. + // Note: with initial states symbolic calls and non-symbolic calls to + // this method differ in how the initial states are passed. For + // symbolic calls, the initial states are passed in the first arg, as + // an Array of SymbolicTensors; for non-symbolic calls, they are + // passed in the second arg as a part of the kwargs. Hence the need to + // temporarily modify inputSpec here. + // TODO(cais): Make refactoring so that this hacky code below is no + // longer needed. + const originalInputSpec = this.inputSpec; + this.inputSpec = fullInputSpec; + const output = super.apply(fullInput, kwargs); + this.inputSpec = originalInputSpec; + return output; + } + else { + return super.apply(inputs, kwargs); + } + } + call(inputs, kwargs) { + return tidy(() => { + const initialState = kwargs['initialState']; + let y; + let yRev; + if (initialState == null) { + y = this.forwardLayer.call(inputs, kwargs); + yRev = this.backwardLayer.call(inputs, kwargs); + } + else { + const forwardState = initialState.slice(0, initialState.length / 2); + const backwardState = initialState.slice(initialState.length / 2); + y = this.forwardLayer.call(inputs, Object.assign(kwargs, { initialState: forwardState })); + yRev = this.backwardLayer.call(inputs, Object.assign(kwargs, { initialState: backwardState })); + } + let states; + if (this.returnState) { + if (Array.isArray(y)) { + states = y.slice(1).concat(yRev.slice(1)); + } + else { + } + y = y[0]; + yRev = yRev[0]; + } + if (this.returnSequences) { + yRev = reverse$2(yRev, 1); + } + let output; + if (this.mergeMode === 'concat') { + output = concatenate$2([y, yRev]); + } + else if (this.mergeMode === 'sum') { + output = add$3(y, yRev); + } + else if (this.mergeMode === 'ave') { + output = mul(.5, add$3(y, yRev)); + } + else if (this.mergeMode === 'mul') { + output = mul(y, yRev); + } + else if (this.mergeMode == null) { + output = [y, yRev]; + } + // TODO(cais): Properly set learning phase. + if (this.returnState) { + if (this.mergeMode == null) { + return output.concat(states); + } + return [output].concat(states); + } + return output; + }); + } + resetStates(states) { + this.forwardLayer.resetStates(); + this.backwardLayer.resetStates(); + } + build(inputShape) { + nameScope(this.forwardLayer.name, () => { + this.forwardLayer.build(inputShape); + }); + nameScope(this.backwardLayer.name, () => { + this.backwardLayer.build(inputShape); + }); + this.built = true; + } + computeMask(inputs, mask) { + if (Array.isArray(mask)) { + mask = mask[0]; + } + let outputMask; + if (this.returnSequences) { + if (this.mergeMode == null) { + outputMask = [mask, mask]; + } + else { + outputMask = mask; + } + } + else { + if (this.mergeMode == null) { + outputMask = [null, null]; + } + else { + outputMask = null; + } + } + if (this.returnState) { + const states = this.forwardLayer.states; + const stateMask = states.map(state => null); + if (Array.isArray(outputMask)) { + return outputMask.concat(stateMask).concat(stateMask); + } + else { + return [outputMask].concat(stateMask).concat(stateMask); + } + } + else { + return outputMask; + } + } + get trainableWeights() { + return this.forwardLayer.trainableWeights.concat(this.backwardLayer.trainableWeights); + } + get nonTrainableWeights() { + return this.forwardLayer.nonTrainableWeights.concat(this.backwardLayer.nonTrainableWeights); + } + // TODO(cais): Implement constraints(). + setFastWeightInitDuringBuild(value) { + super.setFastWeightInitDuringBuild(value); + if (this.forwardLayer != null) { + this.forwardLayer.setFastWeightInitDuringBuild(value); + } + if (this.backwardLayer != null) { + this.backwardLayer.setFastWeightInitDuringBuild(value); + } + } + getConfig() { + const config = { + 'mergeMode': this.mergeMode, + }; + // TODO(cais): Add logic for `numConstants` once the property is added. + const baseConfig = super.getConfig(); + Object.assign(config, baseConfig); + return config; + } + /** @nocollapse */ + static fromConfig(cls, config) { + const rnnLayer = deserialize(config['layer']); + delete config['layer']; + // TODO(cais): Add logic for `numConstants` once the property is added. + if (config['numConstants'] != null) { + throw new NotImplementedError(`Deserialization of a Bidirectional layer with numConstants ` + + `present is not supported yet.`); + } + // tslint:disable-next-line:no-any + const newConfig = config; + newConfig['layer'] = rnnLayer; + return new cls(newConfig); + } + } + /** @nocollapse */ + Bidirectional.className = 'Bidirectional'; + registerClass(Bidirectional); + + /** + * @license + * Copyright 2022 CodeSmith LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + /** + * Preprocessing Rescaling Layer + * + * This rescales images by a scaling and offset factor + */ + class Rescaling extends Layer { + constructor(args) { + super(args); + this.scale = args.scale; + if (args.offset) { + this.offset = args.offset; + } + else { + this.offset = 0; + } + } + getConfig() { + const config = { + 'scale': this.scale, + 'offset': this.offset + }; + const baseConfig = super.getConfig(); + Object.assign(config, baseConfig); + return config; + } + call(inputs, kwargs) { + return tidy(() => { + inputs = getExactlyOneTensor(inputs); + if (inputs.dtype !== 'float32') { + inputs = cast$2(inputs, 'float32'); + } + return add$3(mul(inputs, this.scale), this.offset); + }); + } + } + /** @nocollapse */ + Rescaling.className = 'Rescaling'; + registerClass(Rescaling); + + /** + * @license + * Copyright 2022 CodeSmith LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + const { resizeBilinear: resizeBilinear$2, cropAndResize: cropAndResize$2 } = image$1; + class CenterCrop extends Layer { + constructor(args) { + super(args); + this.height = args.height; + this.width = args.width; + } + centerCrop(inputs, hBuffer, wBuffer, height, width, inputHeight, inputWidth, dtype) { + return tidy(() => { + let input; + let isRank3 = false; + const top = hBuffer / inputHeight; + const left = wBuffer / inputWidth; + const bottom = ((height) + hBuffer) / inputHeight; + const right = ((width) + wBuffer) / inputWidth; + const bound = [top, left, bottom, right]; + const boxesArr = []; + if (inputs.rank === 3) { + isRank3 = true; + input = stack([inputs]); + } + else { + input = inputs; + } + for (let i = 0; i < input.shape[0]; i++) { + boxesArr.push(bound); + } + const boxes = tensor(boxesArr, [boxesArr.length, 4]); + const boxInd = range$3(0, boxesArr.length, 1, 'int32'); + const cropSize = [height, width]; + const cropped = cropAndResize$2(input, boxes, boxInd, cropSize, 'nearest'); + if (isRank3) { + return cast$2(getExactlyOneTensor(unstack(cropped)), dtype); + } + return cast$2(cropped, dtype); + }); + } + upsize(inputs, height, width, dtype) { + return tidy(() => { + const outputs = resizeBilinear$2(inputs, [height, width]); + return cast$2(outputs, dtype); + }); + } + call(inputs, kwargs) { + return tidy(() => { + const rankedInputs = getExactlyOneTensor(inputs); + const dtype = rankedInputs.dtype; + const inputShape = rankedInputs.shape; + const inputHeight = inputShape[inputShape.length - 3]; + const inputWidth = inputShape[inputShape.length - 2]; + let hBuffer = 0; + if (inputHeight !== this.height) { + hBuffer = Math.floor((inputHeight - this.height) / 2); + } + let wBuffer = 0; + if (inputWidth !== this.width) { + wBuffer = Math.floor((inputWidth - this.width) / 2); + if (wBuffer === 0) { + wBuffer = 1; + } + } + if (hBuffer >= 0 && wBuffer >= 0) { + return this.centerCrop(rankedInputs, hBuffer, wBuffer, this.height, this.width, inputHeight, inputWidth, dtype); + } + else { + return this.upsize(inputs, this.height, this.width, dtype); + } + }); + } + getConfig() { + const config = { + 'height': this.height, + 'width': this.width + }; + const baseConfig = super.getConfig(); + Object.assign(config, baseConfig); + return config; + } + computeOutputShape(inputShape) { + inputShape = getExactlyOneShape(inputShape); + const hAxis = inputShape.length - 3; + const wAxis = inputShape.length - 2; + inputShape[hAxis] = this.height; + inputShape[wAxis] = this.width; + return inputShape; + } + } + /** @nocollapse */ + CenterCrop.className = 'CenterCrop'; + registerClass(CenterCrop); + + /** + * @license + * Copyright 2022 CodeSmith LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + function encodeCategoricalInputs(inputs, outputMode, depth, weights) { + let input = getExactlyOneTensor(inputs); + if (input.dtype !== 'int32') { + input = cast$2(input, 'int32'); + } + if (outputMode === 'int') { + return input; + } + const originalShape = input.shape; + if (input.rank === 0) { + input = expandDims$3(input, -1); + } + if (outputMode === 'oneHot') { + if (input.shape[input.shape.length - 1] !== 1) { + input = expandDims$3(input, -1); + } + } + if (input.rank > 2) { + throw new ValueError(`When outputMode is not int, maximum output rank is 2` + + ` Received outputMode ${outputMode} and input shape ${originalShape}` + + ` which would result in output rank ${input.rank}.`); + } + const binaryOutput = ['multiHot', 'oneHot'].includes(outputMode); + const denseBincountInput = input; + let binCounts; + if ((typeof weights) !== 'undefined' && outputMode === 'count') { + binCounts = denseBincount$2(denseBincountInput, weights, depth, binaryOutput); + } + else { + binCounts = denseBincount$2(denseBincountInput, [], depth, binaryOutput); + } + if (outputMode !== 'tfIdf') { + return binCounts; + } + if (weights) { + return mul(binCounts, weights); + } + else { + throw new ValueError(`When outputMode is 'tfIdf', weights must be provided.`); + } + } + + /** + * @license + * Copyright 2022 CodeSmith LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + class CategoryEncoding extends Layer { + constructor(args) { + super(args); + this.numTokens = args.numTokens; + if (args.outputMode) { + this.outputMode = args.outputMode; + } + else { + this.outputMode = 'multiHot'; + } + } + getConfig() { + const config = { + 'numTokens': this.numTokens, + 'outputMode': this.outputMode, + }; + const baseConfig = super.getConfig(); + Object.assign(config, baseConfig); + return config; + } + computeOutputShape(inputShape) { + inputShape = getExactlyOneShape(inputShape); + if (inputShape == null) { + return [this.numTokens]; + } + if (this.outputMode === 'oneHot' && inputShape[inputShape.length - 1] !== 1) { + inputShape.push(this.numTokens); + return inputShape; + } + inputShape[inputShape.length - 1] = this.numTokens; + return inputShape; + } + call(inputs, kwargs) { + return tidy(() => { + inputs = getExactlyOneTensor(inputs); + if (inputs.dtype !== 'int32') { + inputs = cast$2(inputs, 'int32'); + } + let countWeights; + if ((typeof kwargs['countWeights']) !== 'undefined') { + if (this.outputMode !== 'count') { + throw new ValueError(`countWeights is not used when outputMode !== count. + Received countWeights=${kwargs['countWeights']}`); + } + countWeights + = getExactlyOneTensor(kwargs['countWeights']); + } + const maxValue = max$3(inputs); + const minValue = min$3(inputs); + const greaterEqualMax = greater$3(this.numTokens, maxValue) + .bufferSync().get(0); + const greaterMin = greaterEqual$2(minValue, 0).bufferSync().get(0); + if (!(greaterEqualMax && greaterMin)) { + throw new ValueError('Input values must be between 0 < values <=' + + ` numTokens with numTokens=${this.numTokens}`); + } + return encodeCategoricalInputs(inputs, this.outputMode, this.numTokens, countWeights); + }); + } + } + /** @nocollapse */ + CategoryEncoding.className = 'CategoryEncoding'; + registerClass(CategoryEncoding); + + /** + * @license + * Copyright 2022 CodeSmith LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + // tf methods unimplemented in tfjs: 'bicubic', 'area', 'lanczos3', 'lanczos5', + // 'gaussian', 'mitchellcubic' + const INTERPOLATION_KEYS$1 = ['bilinear', 'nearest']; + const INTERPOLATION_METHODS$1 = new Set(INTERPOLATION_KEYS$1); + /** + * Preprocessing Resizing Layer + * + * This resizes images by a scaling and offset factor + */ + class Resizing extends Layer { + constructor(args) { + super(args); + this.height = args.height; + this.width = args.width; + if (args.interpolation) { + if (INTERPOLATION_METHODS$1.has(args.interpolation)) { + this.interpolation = args.interpolation; + } + else { + throw new ValueError(`Invalid interpolation parameter: ${args.interpolation} is not implemented`); + } + } + else { + this.interpolation = 'bilinear'; + } + this.cropToAspectRatio = Boolean(args.cropToAspectRatio); + } + computeOutputShape(inputShape) { + inputShape = getExactlyOneShape(inputShape); + const numChannels = inputShape[2]; + return [this.height, this.width, numChannels]; + } + getConfig() { + const config = { + 'height': this.height, + 'width': this.width, + 'interpolation': this.interpolation, + 'cropToAspectRatio': this.cropToAspectRatio + }; + const baseConfig = super.getConfig(); + Object.assign(config, baseConfig); + return config; + } + call(inputs, kwargs) { + return tidy(() => { + const size = [this.height, this.width]; + if (this.interpolation === 'bilinear') { + return image$1.resizeBilinear(inputs, size, !this.cropToAspectRatio); + } + else if (this.interpolation === 'nearest') { + return image$1.resizeNearestNeighbor(inputs, size, !this.cropToAspectRatio); + } + else { + throw new Error(`Interpolation is ${this.interpolation} but only ${[...INTERPOLATION_METHODS$1]} are supported`); + } + }); + } + } + /** @nocollapse */ + Resizing.className = 'Resizing'; + registerClass(Resizing); + + /** + * @license + * Copyright 2023 CodeSmith LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + /** + * Keeps track of seed and handles pseudorandomness + * Instance created in BaseRandomLayer class + * Utilized for random preprocessing layers + */ + class RandomSeed { + constructor(seed) { + this.seed = seed; + } + next() { + if (this.seed === undefined) { + return undefined; + } + return this.seed++; + } + } + RandomSeed.className = 'RandomSeed'; + + /** + * @license + * Copyright 2023 CodeSmith LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + class BaseRandomLayer extends Layer { + constructor(args) { + super(args); + this.randomGenerator = new RandomSeed(args.seed); + } + getConfig() { + const config = { + 'seed': this.randomGenerator.seed + }; + const baseConfig = super.getConfig(); + Object.assign(config, baseConfig); + return config; + } + } + // A layer handle the random number creation and savemodel behavior. + /** @nocollapse */ + BaseRandomLayer.className = 'BaseRandomLayer'; + + /** + * @license + * Copyright 2023 CodeSmith LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + const INTERPOLATION_KEYS = ['bilinear', 'nearest']; + const INTERPOLATION_METHODS = new Set(INTERPOLATION_KEYS); + /** + * Preprocessing Layer with randomly varies image during training + * + * This layer randomly adjusts the width of a batch of images of a + * batch of images by a random factor. + * + * The input should be a 3D (unbatched) or + * 4D (batched) tensor in the `"channels_last"` image data format. Input pixel + * values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and of integer + * or floating point dtype. By default, the layer will output floats. + * + * tf methods implemented in tfjs: 'bilinear', 'nearest', + * tf methods unimplemented in tfjs: 'bicubic', 'area', 'lanczos3', 'lanczos5', + * 'gaussian', 'mitchellcubic' + * + */ + class RandomWidth extends BaseRandomLayer { + constructor(args) { + super(args); + const { factor, interpolation = 'bilinear' } = args; + this.factor = factor; + if (Array.isArray(this.factor) && this.factor.length === 2) { + this.widthLower = this.factor[0]; + this.widthUpper = this.factor[1]; + } + else if (!Array.isArray(this.factor) && this.factor > 0) { + this.widthLower = -this.factor; + this.widthUpper = this.factor; + } + else { + throw new ValueError(`Invalid factor: ${this.factor}. Must be positive number or tuple of 2 numbers`); + } + if (this.widthLower < -1.0 || this.widthUpper < -1.0) { + throw new ValueError(`factor must have values larger than -1. Got: ${this.factor}`); + } + if (this.widthUpper < this.widthLower) { + throw new ValueError(`factor cannot have upper bound less than lower bound. + Got upper bound: ${this.widthUpper}. + Got lower bound: ${this.widthLower} + `); + } + if (interpolation) { + if (INTERPOLATION_METHODS.has(interpolation)) { + this.interpolation = interpolation; + } + else { + throw new ValueError(`Invalid interpolation parameter: ${interpolation} is not implemented`); + } + } + } + getConfig() { + const config = { + 'factor': this.factor, + 'interpolation': this.interpolation, + }; + const baseConfig = super.getConfig(); + Object.assign(config, baseConfig); + return config; + } + computeOutputShape(inputShape) { + inputShape = getExactlyOneShape(inputShape); + const numChannels = inputShape[2]; + return [this.imgHeight, -1, numChannels]; + } + call(inputs, kwargs) { + return tidy(() => { + const input = getExactlyOneTensor(inputs); + this.imgHeight = input.shape[input.shape.length - 3]; + const imgWidth = input.shape[input.shape.length - 2]; + this.widthFactor = randomUniform$1([1], (1.0 + this.widthLower), (1.0 + this.widthUpper), 'float32', this.randomGenerator.next()); + let adjustedWidth = this.widthFactor.dataSync()[0] * imgWidth; + adjustedWidth = Math.round(adjustedWidth); + const size = [this.imgHeight, adjustedWidth]; + switch (this.interpolation) { + case 'bilinear': + return image$1.resizeBilinear(inputs, size); + case 'nearest': + return image$1.resizeNearestNeighbor(inputs, size); + default: + throw new Error(`Interpolation is ${this.interpolation} + but only ${[...INTERPOLATION_METHODS]} are supported`); + } + }); + } + } + /** @nocollapse */ + RandomWidth.className = 'RandomWidth'; + registerClass(RandomWidth); + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + // TODO(cais): Add doc string to all the public static functions in this + // class; include exectuable JavaScript code snippets where applicable + // (b/74074458). + // Input Layer. + /** + * An input layer is an entry point into a `tf.LayersModel`. + * + * `InputLayer` is generated automatically for `tf.Sequential` models by + * specifying the `inputshape` or `batchInputShape` for the first layer. It + * should not be specified explicitly. However, it can be useful sometimes, + * e.g., when constructing a sequential model from a subset of another + * sequential model's layers. Like the code snippet below shows. + * + * ```js + * // Define a model which simply adds two inputs. + * const model1 = tf.sequential(); + * model1.add(tf.layers.dense({inputShape: [4], units: 3, activation: 'relu'})); + * model1.add(tf.layers.dense({units: 1, activation: 'sigmoid'})); + * model1.summary(); + * model1.predict(tf.zeros([1, 4])).print(); + * + * // Construct another model, reusing the second layer of `model1` while + * // not using the first layer of `model1`. Note that you cannot add the second + * // layer of `model` directly as the first layer of the new sequential model, + * // because doing so will lead to an error related to the fact that the layer + * // is not an input layer. Instead, you need to create an `inputLayer` and add + * // it to the new sequential model before adding the reused layer. + * const model2 = tf.sequential(); + * // Use an inputShape that matches the input shape of `model1`'s second + * // layer. + * model2.add(tf.layers.inputLayer({inputShape: [3]})); + * model2.add(model1.layers[1]); + * model2.summary(); + * model2.predict(tf.zeros([1, 3])).print(); + * ``` + * + * @doc {heading: 'Layers', subheading: 'Inputs', namespace: 'layers'} + */ + function inputLayer(args) { + return new InputLayer(args); + } + // Advanced Activation Layers. + /** + * Exponential Linear Unit (ELU). + * + * It follows: + * `f(x) = alpha * (exp(x) - 1.) for x < 0`, + * `f(x) = x for x >= 0`. + * + * Input shape: + * Arbitrary. Use the configuration `inputShape` when using this layer as the + * first layer in a model. + * + * Output shape: + * Same shape as the input. + * + * References: + * - [Fast and Accurate Deep Network Learning by Exponential Linear Units + * (ELUs)](https://arxiv.org/abs/1511.07289v1) + * + * @doc { + * heading: 'Layers', + * subheading: 'Advanced Activation', + * namespace: 'layers' + * } + */ + function elu$2(args) { + return new ELU$3(args); + } + /** + * Rectified Linear Unit activation function. + * + * Input shape: + * Arbitrary. Use the config field `inputShape` (Array of integers, does + * not include the sample axis) when using this layer as the first layer + * in a model. + * + * Output shape: + * Same shape as the input. + * + * @doc { + * heading: 'Layers', + * subheading: 'Advanced Activation', + * namespace: 'layers' + * } + */ + function reLU(args) { + return new ReLU(args); + } + /** + * Leaky version of a rectified linear unit. + * + * It allows a small gradient when the unit is not active: + * `f(x) = alpha * x for x < 0.` + * `f(x) = x for x >= 0.` + * + * Input shape: + * Arbitrary. Use the configuration `inputShape` when using this layer as the + * first layer in a model. + * + * Output shape: + * Same shape as the input. + * + * @doc { + * heading: 'Layers', + * subheading: 'Advanced Activation', + * namespace: 'layers' + * } + */ + function leakyReLU(args) { + return new LeakyReLU(args); + } + /** + * Parameterized version of a leaky rectified linear unit. + * + * It follows + * `f(x) = alpha * x for x < 0.` + * `f(x) = x for x >= 0.` + * wherein `alpha` is a trainable weight. + * + * Input shape: + * Arbitrary. Use the configuration `inputShape` when using this layer as the + * first layer in a model. + * + * Output shape: + * Same shape as the input. + * + * @doc { + * heading: 'Layers', + * subheading: 'Advanced Activation', + * namespace: 'layers' + * } + */ + function prelu$2(args) { + return new PReLU(args); + } + /** + * Softmax activation layer. + * + * Input shape: + * Arbitrary. Use the configuration `inputShape` when using this layer as the + * first layer in a model. + * + * Output shape: + * Same shape as the input. + * + * @doc { + * heading: 'Layers', + * subheading: 'Advanced Activation', + * namespace: 'layers' + * } + */ + function softmax$2(args) { + return new Softmax(args); + } + /** + * Thresholded Rectified Linear Unit. + * + * It follows: + * `f(x) = x for x > theta`, + * `f(x) = 0 otherwise`. + * + * Input shape: + * Arbitrary. Use the configuration `inputShape` when using this layer as the + * first layer in a model. + * + * Output shape: + * Same shape as the input. + * + * References: + * - [Zero-Bias Autoencoders and the Benefits of Co-Adapting + * Features](http://arxiv.org/abs/1402.3337) + * + * @doc { + * heading: 'Layers', + * subheading: 'Advanced Activation', + * namespace: 'layers' + * } + */ + function thresholdedReLU(args) { + return new ThresholdedReLU(args); + } + // Convolutional Layers. + /** + * 1D convolution layer (e.g., temporal convolution). + * + * This layer creates a convolution kernel that is convolved + * with the layer input over a single spatial (or temporal) dimension + * to produce a tensor of outputs. + * + * If `use_bias` is True, a bias vector is created and added to the outputs. + * + * If `activation` is not `null`, it is applied to the outputs as well. + * + * When using this layer as the first layer in a model, provide an + * `inputShape` argument `Array` or `null`. + * + * For example, `inputShape` would be: + * - `[10, 128]` for sequences of 10 vectors of 128-dimensional vectors + * - `[null, 128]` for variable-length sequences of 128-dimensional vectors. + * + * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'} + */ + function conv1d(args) { + return new Conv1D(args); + } + /** + * 2D convolution layer (e.g. spatial convolution over images). + * + * This layer creates a convolution kernel that is convolved + * with the layer input to produce a tensor of outputs. + * + * If `useBias` is True, a bias vector is created and added to the outputs. + * + * If `activation` is not `null`, it is applied to the outputs as well. + * + * When using this layer as the first layer in a model, + * provide the keyword argument `inputShape` + * (Array of integers, does not include the sample axis), + * e.g. `inputShape=[128, 128, 3]` for 128x128 RGB pictures + * in `dataFormat='channelsLast'`. + * + * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'} + */ + function conv2d$1(args) { + return new Conv2D(args); + } + /** + * Transposed convolutional layer (sometimes called Deconvolution). + * + * The need for transposed convolutions generally arises + * from the desire to use a transformation going in the opposite direction of + * a normal convolution, i.e., from something that has the shape of the output + * of some convolution to something that has the shape of its input while + * maintaining a connectivity pattern that is compatible with said + * convolution. + * + * When using this layer as the first layer in a model, provide the + * configuration `inputShape` (`Array` of integers, does not include the + * sample axis), e.g., `inputShape: [128, 128, 3]` for 128x128 RGB pictures in + * `dataFormat: 'channelsLast'`. + * + * Input shape: + * 4D tensor with shape: + * `[batch, channels, rows, cols]` if `dataFormat` is `'channelsFirst'`. + * or 4D tensor with shape + * `[batch, rows, cols, channels]` if `dataFormat` is `'channelsLast'`. + * + * Output shape: + * 4D tensor with shape: + * `[batch, filters, newRows, newCols]` if `dataFormat` is + * `'channelsFirst'`. or 4D tensor with shape: + * `[batch, newRows, newCols, filters]` if `dataFormat` is `'channelsLast'`. + * + * References: + * - [A guide to convolution arithmetic for deep + * learning](https://arxiv.org/abs/1603.07285v1) + * - [Deconvolutional + * Networks](http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf) + * + * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'} + */ + function conv2dTranspose(args) { + return new Conv2DTranspose(args); + } + /** + * 3D convolution layer (e.g. spatial convolution over volumes). + * + * This layer creates a convolution kernel that is convolved + * with the layer input to produce a tensor of outputs. + * + * If `useBias` is True, a bias vector is created and added to the outputs. + * + * If `activation` is not `null`, it is applied to the outputs as well. + * + * When using this layer as the first layer in a model, + * provide the keyword argument `inputShape` + * (Array of integers, does not include the sample axis), + * e.g. `inputShape=[128, 128, 128, 1]` for 128x128x128 grayscale volumes + * in `dataFormat='channelsLast'`. + * + * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'} + */ + function conv3d(args) { + return new Conv3D(args); + } + function conv3dTranspose(args) { + return new Conv3DTranspose(args); + } + /** + * Depthwise separable 2D convolution. + * + * Separable convolution consists of first performing + * a depthwise spatial convolution + * (which acts on each input channel separately) + * followed by a pointwise convolution which mixes together the resulting + * output channels. The `depthMultiplier` argument controls how many + * output channels are generated per input channel in the depthwise step. + * + * Intuitively, separable convolutions can be understood as + * a way to factorize a convolution kernel into two smaller kernels, + * or as an extreme version of an Inception block. + * + * Input shape: + * 4D tensor with shape: + * `[batch, channels, rows, cols]` if data_format='channelsFirst' + * or 4D tensor with shape: + * `[batch, rows, cols, channels]` if data_format='channelsLast'. + * + * Output shape: + * 4D tensor with shape: + * `[batch, filters, newRows, newCols]` if data_format='channelsFirst' + * or 4D tensor with shape: + * `[batch, newRows, newCols, filters]` if data_format='channelsLast'. + * `rows` and `cols` values might have changed due to padding. + * + * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'} + */ + function separableConv2d(args) { + return new SeparableConv2D(args); + } + /** + * Cropping layer for 2D input (e.g., image). + * + * This layer can crop an input + * at the top, bottom, left and right side of an image tensor. + * + * Input shape: + * 4D tensor with shape: + * - If `dataFormat` is `"channelsLast"`: + * `[batch, rows, cols, channels]` + * - If `data_format` is `"channels_first"`: + * `[batch, channels, rows, cols]`. + * + * Output shape: + * 4D with shape: + * - If `dataFormat` is `"channelsLast"`: + * `[batch, croppedRows, croppedCols, channels]` + * - If `dataFormat` is `"channelsFirst"`: + * `[batch, channels, croppedRows, croppedCols]`. + * + * Examples + * ```js + * + * const model = tf.sequential(); + * model.add(tf.layers.cropping2D({cropping:[[2, 2], [2, 2]], + * inputShape: [128, 128, 3]})); + * //now output shape is [batch, 124, 124, 3] + * ``` + * + * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'} + */ + function cropping2D(args) { + return new Cropping2D(args); + } + /** + * Upsampling layer for 2D inputs. + * + * Repeats the rows and columns of the data + * by size[0] and size[1] respectively. + * + * + * Input shape: + * 4D tensor with shape: + * - If `dataFormat` is `"channelsLast"`: + * `[batch, rows, cols, channels]` + * - If `dataFormat` is `"channelsFirst"`: + * `[batch, channels, rows, cols]` + * + * Output shape: + * 4D tensor with shape: + * - If `dataFormat` is `"channelsLast"`: + * `[batch, upsampledRows, upsampledCols, channels]` + * - If `dataFormat` is `"channelsFirst"`: + * `[batch, channels, upsampledRows, upsampledCols]` + * + * + * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'} + */ + function upSampling2d(args) { + return new UpSampling2D(args); + } + // Convolutional(depthwise) Layers. + /** + * Depthwise separable 2D convolution. + * + * Depthwise Separable convolutions consists in performing just the first step + * in a depthwise spatial convolution (which acts on each input channel + * separately). The `depthMultiplier` argument controls how many output channels + * are generated per input channel in the depthwise step. + * + * @doc {heading: 'Layers', subheading: 'Convolutional', namespace: 'layers'} + */ + function depthwiseConv2d(args) { + return new DepthwiseConv2D(args); + } + // Basic Layers. + /** + * Applies an activation function to an output. + * + * This layer applies element-wise activation function. Other layers, notably + * `dense` can also apply activation functions. Use this isolated activation + * function to extract the values before and after the + * activation. For instance: + * + * ```js + * const input = tf.input({shape: [5]}); + * const denseLayer = tf.layers.dense({units: 1}); + * const activationLayer = tf.layers.activation({activation: 'relu6'}); + * + * // Obtain the output symbolic tensors by applying the layers in order. + * const denseOutput = denseLayer.apply(input); + * const activationOutput = activationLayer.apply(denseOutput); + * + * // Create the model based on the inputs. + * const model = tf.model({ + * inputs: input, + * outputs: [denseOutput, activationOutput] + * }); + * + * // Collect both outputs and print separately. + * const [denseOut, activationOut] = model.predict(tf.randomNormal([6, 5])); + * denseOut.print(); + * activationOut.print(); + * ``` + * + * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'} + */ + function activation(args) { + return new Activation(args); + } + /** + * Creates a dense (fully connected) layer. + * + * This layer implements the operation: + * `output = activation(dot(input, kernel) + bias)` + * + * `activation` is the element-wise activation function + * passed as the `activation` argument. + * + * `kernel` is a weights matrix created by the layer. + * + * `bias` is a bias vector created by the layer (only applicable if `useBias` + * is `true`). + * + * **Input shape:** + * + * nD `tf.Tensor` with shape: `(batchSize, ..., inputDim)`. + * + * The most common situation would be + * a 2D input with shape `(batchSize, inputDim)`. + * + * **Output shape:** + * + * nD tensor with shape: `(batchSize, ..., units)`. + * + * For instance, for a 2D input with shape `(batchSize, inputDim)`, + * the output would have shape `(batchSize, units)`. + * + * Note: if the input to the layer has a rank greater than 2, then it is + * flattened prior to the initial dot product with the kernel. + * + * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'} + */ + function dense(args) { + return new Dense(args); + } + /** + * Applies + * [dropout](http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf) to + * the input. + * + * Dropout consists in randomly setting a fraction `rate` of input units to 0 at + * each update during training time, which helps prevent overfitting. + * + * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'} + */ + function dropout(args) { + return new Dropout(args); + } + /** + * Spatial 1D version of Dropout. + * + * This Layer type performs the same function as the Dropout layer, but it drops + * entire 1D feature maps instead of individual elements. For example, if an + * input example consists of 3 timesteps and the feature map for each timestep + * has a size of 4, a `spatialDropout1d` layer may zero out the feature maps + * of the 1st timesteps and 2nd timesteps completely while sparing all feature + * elements of the 3rd timestep. + * + * If adjacent frames (timesteps) are strongly correlated (as is normally the + * case in early convolution layers), regular dropout will not regularize the + * activation and will otherwise just result in merely an effective learning + * rate decrease. In this case, `spatialDropout1d` will help promote + * independence among feature maps and should be used instead. + * + * **Arguments:** + * rate: A floating-point number >=0 and <=1. Fraction of the input elements + * to drop. + * + * **Input shape:** + * 3D tensor with shape `(samples, timesteps, channels)`. + * + * **Output shape:** + * Same as the input shape. + * + * References: + * - [Efficient Object Localization Using Convolutional + * Networks](https://arxiv.org/abs/1411.4280) + * + * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'} + */ + function spatialDropout1d(args) { + return new SpatialDropout1D(args); + } + /** + * Flattens the input. Does not affect the batch size. + * + * A `Flatten` layer flattens each batch in its inputs to 1D (making the output + * 2D). + * + * For example: + * + * ```js + * const input = tf.input({shape: [4, 3]}); + * const flattenLayer = tf.layers.flatten(); + * // Inspect the inferred output shape of the flatten layer, which + * // equals `[null, 12]`. The 2nd dimension is 4 * 3, i.e., the result of the + * // flattening. (The 1st dimension is the undermined batch size.) + * console.log(JSON.stringify(flattenLayer.apply(input).shape)); + * ``` + * + * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'} + */ + function flatten(args) { + return new Flatten(args); + } + /** + * Repeats the input n times in a new dimension. + * + * ```js + * const model = tf.sequential(); + * model.add(tf.layers.repeatVector({n: 4, inputShape: [2]})); + * const x = tf.tensor2d([[10, 20]]); + * // Use the model to do inference on a data point the model hasn't seen + * model.predict(x).print(); + * // output shape is now [batch, 2, 4] + * ``` + * + * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'} + */ + function repeatVector(args) { + return new RepeatVector(args); + } + /** + * Reshapes an input to a certain shape. + * + * ```js + * const input = tf.input({shape: [4, 3]}); + * const reshapeLayer = tf.layers.reshape({targetShape: [2, 6]}); + * // Inspect the inferred output shape of the Reshape layer, which + * // equals `[null, 2, 6]`. (The 1st dimension is the undermined batch size.) + * console.log(JSON.stringify(reshapeLayer.apply(input).shape)); + * ``` + * + * Input shape: + * Arbitrary, although all dimensions in the input shape must be fixed. + * Use the configuration `inputShape` when using this layer as the + * first layer in a model. + * + * + * Output shape: + * [batchSize, targetShape[0], targetShape[1], ..., + * targetShape[targetShape.length - 1]]. + * + * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'} + */ + function reshape$2(args) { + return new Reshape(args); + } + /** + * Permutes the dimensions of the input according to a given pattern. + * + * Useful for, e.g., connecting RNNs and convnets together. + * + * Example: + * + * ```js + * const model = tf.sequential(); + * model.add(tf.layers.permute({ + * dims: [2, 1], + * inputShape: [10, 64] + * })); + * console.log(model.outputShape); + * // Now model's output shape is [null, 64, 10], where null is the + * // unpermuted sample (batch) dimension. + * ``` + * + * Input shape: + * Arbitrary. Use the configuration field `inputShape` when using this + * layer as the first layer in a model. + * + * Output shape: + * Same rank as the input shape, but with the dimensions re-ordered (i.e., + * permuted) according to the `dims` configuration of this layer. + * + * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'} + */ + function permute(args) { + return new Permute(args); + } + /** + * Maps positive integers (indices) into dense vectors of fixed size. + * E.g. [[4], [20]] -> [[0.25, 0.1], [0.6, -0.2]] + * + * **Input shape:** 2D tensor with shape: `[batchSize, sequenceLength]`. + * + * **Output shape:** 3D tensor with shape: `[batchSize, sequenceLength, + * outputDim]`. + * + * @doc {heading: 'Layers', subheading: 'Basic', namespace: 'layers'} + */ + function embedding(args) { + return new Embedding(args); + } + // Merge Layers. + /** + * Layer that performs element-wise addition on an `Array` of inputs. + * + * It takes as input a list of tensors, all of the same shape, and returns a + * single tensor (also of the same shape). The inputs are specified as an + * `Array` when the `apply` method of the `Add` layer instance is called. For + * example: + * + * ```js + * const input1 = tf.input({shape: [2, 2]}); + * const input2 = tf.input({shape: [2, 2]}); + * const addLayer = tf.layers.add(); + * const sum = addLayer.apply([input1, input2]); + * console.log(JSON.stringify(sum.shape)); + * // You get [null, 2, 2], with the first dimension as the undetermined batch + * // dimension. + * ``` + * + * @doc {heading: 'Layers', subheading: 'Merge', namespace: 'layers'} + */ + function add$1(args) { + return new Add(args); + } + /** + * Layer that performs element-wise averaging on an `Array` of inputs. + * + * It takes as input a list of tensors, all of the same shape, and returns a + * single tensor (also of the same shape). For example: + * + * ```js + * const input1 = tf.input({shape: [2, 2]}); + * const input2 = tf.input({shape: [2, 2]}); + * const averageLayer = tf.layers.average(); + * const average = averageLayer.apply([input1, input2]); + * console.log(JSON.stringify(average.shape)); + * // You get [null, 2, 2], with the first dimension as the undetermined batch + * // dimension. + * ``` + * + * @doc {heading: 'Layers', subheading: 'Merge', namespace: 'layers'} + */ + function average(args) { + return new Average(args); + } + /** + * Layer that concatenates an `Array` of inputs. + * + * It takes a list of tensors, all of the same shape except for the + * concatenation axis, and returns a single tensor, the concatenation + * of all inputs. For example: + * + * ```js + * const input1 = tf.input({shape: [2, 2]}); + * const input2 = tf.input({shape: [2, 3]}); + * const concatLayer = tf.layers.concatenate(); + * const output = concatLayer.apply([input1, input2]); + * console.log(JSON.stringify(output.shape)); + * // You get [null, 2, 5], with the first dimension as the undetermined batch + * // dimension. The last dimension (5) is the result of concatenating the + * // last dimensions of the inputs (2 and 3). + * ``` + * + * @doc {heading: 'Layers', subheading: 'Merge', namespace: 'layers'} + */ + function concatenate(args) { + return new Concatenate(args); + } + /** + * Layer that computes the element-wise maximum of an `Array` of inputs. + * + * It takes as input a list of tensors, all of the same shape, and returns a + * single tensor (also of the same shape). For example: + * + * ```js + * const input1 = tf.input({shape: [2, 2]}); + * const input2 = tf.input({shape: [2, 2]}); + * const maxLayer = tf.layers.maximum(); + * const max = maxLayer.apply([input1, input2]); + * console.log(JSON.stringify(max.shape)); + * // You get [null, 2, 2], with the first dimension as the undetermined batch + * // dimension. + * ``` + * + * @doc {heading: 'Layers', subheading: 'Merge', namespace: 'layers'} + */ + function maximum$2(args) { + return new Maximum(args); + } + /** + * Layer that computes the element-wise minimum of an `Array` of inputs. + * + * It takes as input a list of tensors, all of the same shape, and returns a + * single tensor (also of the same shape). For example: + * + * ```js + * const input1 = tf.input({shape: [2, 2]}); + * const input2 = tf.input({shape: [2, 2]}); + * const minLayer = tf.layers.minimum(); + * const min = minLayer.apply([input1, input2]); + * console.log(JSON.stringify(min.shape)); + * // You get [null, 2, 2], with the first dimension as the undetermined batch + * // dimension. + * ``` + * + * @doc {heading: 'Layers', subheading: 'Merge', namespace: 'layers'} + */ + function minimum$2(args) { + return new Minimum(args); + } + /** + * Layer that multiplies (element-wise) an `Array` of inputs. + * + * It takes as input an Array of tensors, all of the same + * shape, and returns a single tensor (also of the same shape). + * For example: + * + * ```js + * const input1 = tf.input({shape: [2, 2]}); + * const input2 = tf.input({shape: [2, 2]}); + * const input3 = tf.input({shape: [2, 2]}); + * const multiplyLayer = tf.layers.multiply(); + * const product = multiplyLayer.apply([input1, input2, input3]); + * console.log(product.shape); + * // You get [null, 2, 2], with the first dimension as the undetermined batch + * // dimension. + * + * @doc {heading: 'Layers', subheading: 'Merge', namespace: 'layers'} + */ + function multiply$2(args) { + return new Multiply(args); + } + /** + * Layer that computes a dot product between samples in two tensors. + * + * E.g., if applied to a list of two tensors `a` and `b` both of shape + * `[batchSize, n]`, the output will be a tensor of shape `[batchSize, 1]`, + * where each entry at index `[i, 0]` will be the dot product between + * `a[i, :]` and `b[i, :]`. + * + * Example: + * + * ```js + * const dotLayer = tf.layers.dot({axes: -1}); + * const x1 = tf.tensor2d([[10, 20], [30, 40]]); + * const x2 = tf.tensor2d([[-1, -2], [-3, -4]]); + * + * // Invoke the layer's apply() method in eager (imperative) mode. + * const y = dotLayer.apply([x1, x2]); + * y.print(); + * ``` + * + * @doc {heading: 'Layers', subheading: 'Merge', namespace: 'layers'} + */ + function dot(args) { + return new Dot(args); + } + // Normalization Layers. + /** + * Batch normalization layer (Ioffe and Szegedy, 2014). + * + * Normalize the activations of the previous layer at each batch, + * i.e. applies a transformation that maintains the mean activation + * close to 0 and the activation standard deviation close to 1. + * + * Input shape: + * Arbitrary. Use the keyword argument `inputShape` (Array of integers, does + * not include the sample axis) when calling the constructor of this class, + * if this layer is used as a first layer in a model. + * + * Output shape: + * Same shape as input. + * + * References: + * - [Batch Normalization: Accelerating Deep Network Training by Reducing + * Internal Covariate Shift](https://arxiv.org/abs/1502.03167) + * + * @doc {heading: 'Layers', subheading: 'Normalization', namespace: 'layers'} + */ + function batchNormalization(args) { + return new BatchNormalization(args); + } + /** + * Layer-normalization layer (Ba et al., 2016). + * + * Normalizes the activations of the previous layer for each given example in a + * batch independently, instead of across a batch like in `batchNormalization`. + * In other words, this layer applies a transformation that maintains the mean + * activation within each example close to 0 and activation variance close to 1. + * + * Input shape: + * Arbitrary. Use the argument `inputShape` when using this layer as the first + * layer in a model. + * + * Output shape: + * Same as input. + * + * References: + * - [Layer Normalization](https://arxiv.org/abs/1607.06450) + * + * @doc {heading: 'Layers', subheading: 'Normalization', namespace: 'layers'} + */ + function layerNormalization(args) { + return new LayerNormalization(args); + } + // Padding Layers. + /** + * Zero-padding layer for 2D input (e.g., image). + * + * This layer can add rows and columns of zeros + * at the top, bottom, left and right side of an image tensor. + * + * Input shape: + * 4D tensor with shape: + * - If `dataFormat` is `"channelsLast"`: + * `[batch, rows, cols, channels]` + * - If `data_format` is `"channels_first"`: + * `[batch, channels, rows, cols]`. + * + * Output shape: + * 4D with shape: + * - If `dataFormat` is `"channelsLast"`: + * `[batch, paddedRows, paddedCols, channels]` + * - If `dataFormat` is `"channelsFirst"`: + * `[batch, channels, paddedRows, paddedCols]`. + * + * @doc {heading: 'Layers', subheading: 'Padding', namespace: 'layers'} + */ + function zeroPadding2d(args) { + return new ZeroPadding2D(args); + } + // Pooling Layers. + /** + * Average pooling operation for spatial data. + * + * Input shape: `[batchSize, inLength, channels]` + * + * Output shape: `[batchSize, pooledLength, channels]` + * + * `tf.avgPool1d` is an alias. + * + * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'} + */ + function averagePooling1d(args) { + return new AveragePooling1D(args); + } + function avgPool1d(args) { + return averagePooling1d(args); + } + // For backwards compatibility. + // See https://github.com/tensorflow/tfjs/issues/152 + function avgPooling1d(args) { + return averagePooling1d(args); + } + /** + * Average pooling operation for spatial data. + * + * Input shape: + * - If `dataFormat === CHANNEL_LAST`: + * 4D tensor with shape: + * `[batchSize, rows, cols, channels]` + * - If `dataFormat === CHANNEL_FIRST`: + * 4D tensor with shape: + * `[batchSize, channels, rows, cols]` + * + * Output shape + * - If `dataFormat === CHANNEL_LAST`: + * 4D tensor with shape: + * `[batchSize, pooledRows, pooledCols, channels]` + * - If `dataFormat === CHANNEL_FIRST`: + * 4D tensor with shape: + * `[batchSize, channels, pooledRows, pooledCols]` + * + * `tf.avgPool2d` is an alias. + * + * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'} + */ + function averagePooling2d(args) { + return new AveragePooling2D(args); + } + function avgPool2d(args) { + return averagePooling2d(args); + } + // For backwards compatibility. + // See https://github.com/tensorflow/tfjs/issues/152 + function avgPooling2d(args) { + return averagePooling2d(args); + } + /** + * Average pooling operation for 3D data. + * + * Input shape + * - If `dataFormat === channelsLast`: + * 5D tensor with shape: + * `[batchSize, depths, rows, cols, channels]` + * - If `dataFormat === channelsFirst`: + * 4D tensor with shape: + * `[batchSize, channels, depths, rows, cols]` + * + * Output shape + * - If `dataFormat=channelsLast`: + * 5D tensor with shape: + * `[batchSize, pooledDepths, pooledRows, pooledCols, channels]` + * - If `dataFormat=channelsFirst`: + * 5D tensor with shape: + * `[batchSize, channels, pooledDepths, pooledRows, pooledCols]` + * + * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'} + */ + function averagePooling3d(args) { + return new AveragePooling3D(args); + } + function avgPool3d(args) { + return averagePooling3d(args); + } + // For backwards compatibility. + // See https://github.com/tensorflow/tfjs/issues/152 + function avgPooling3d(args) { + return averagePooling3d(args); + } + /** + * Global average pooling operation for temporal data. + * + * Input Shape: 3D tensor with shape: `[batchSize, steps, features]`. + * + * Output Shape: 2D tensor with shape: `[batchSize, features]`. + * + * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'} + */ + function globalAveragePooling1d(args) { + return new GlobalAveragePooling1D(args); + } + /** + * Global average pooling operation for spatial data. + * + * Input shape: + * - If `dataFormat` is `CHANNEL_LAST`: + * 4D tensor with shape: `[batchSize, rows, cols, channels]`. + * - If `dataFormat` is `CHANNEL_FIRST`: + * 4D tensor with shape: `[batchSize, channels, rows, cols]`. + * + * Output shape: + * 2D tensor with shape: `[batchSize, channels]`. + * + * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'} + */ + function globalAveragePooling2d(args) { + return new GlobalAveragePooling2D(args); + } + /** + * Global max pooling operation for temporal data. + * + * Input Shape: 3D tensor with shape: `[batchSize, steps, features]`. + * + * Output Shape: 2D tensor with shape: `[batchSize, features]`. + * + * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'} + */ + function globalMaxPooling1d(args) { + return new GlobalMaxPooling1D(args); + } + /** + * Global max pooling operation for spatial data. + * + * Input shape: + * - If `dataFormat` is `CHANNEL_LAST`: + * 4D tensor with shape: `[batchSize, rows, cols, channels]`. + * - If `dataFormat` is `CHANNEL_FIRST`: + * 4D tensor with shape: `[batchSize, channels, rows, cols]`. + * + * Output shape: + * 2D tensor with shape: `[batchSize, channels]`. + * + * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'} + */ + function globalMaxPooling2d(args) { + return new GlobalMaxPooling2D(args); + } + /** + * Max pooling operation for temporal data. + * + * Input shape: `[batchSize, inLength, channels]` + * + * Output shape: `[batchSize, pooledLength, channels]` + * + * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'} + */ + function maxPooling1d(args) { + return new MaxPooling1D(args); + } + /** + * Max pooling operation for spatial data. + * + * Input shape + * - If `dataFormat === CHANNEL_LAST`: + * 4D tensor with shape: + * `[batchSize, rows, cols, channels]` + * - If `dataFormat === CHANNEL_FIRST`: + * 4D tensor with shape: + * `[batchSize, channels, rows, cols]` + * + * Output shape + * - If `dataFormat=CHANNEL_LAST`: + * 4D tensor with shape: + * `[batchSize, pooledRows, pooledCols, channels]` + * - If `dataFormat=CHANNEL_FIRST`: + * 4D tensor with shape: + * `[batchSize, channels, pooledRows, pooledCols]` + * + * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'} + */ + function maxPooling2d(args) { + return new MaxPooling2D(args); + } + /** + * Max pooling operation for 3D data. + * + * Input shape + * - If `dataFormat === channelsLast`: + * 5D tensor with shape: + * `[batchSize, depths, rows, cols, channels]` + * - If `dataFormat === channelsFirst`: + * 5D tensor with shape: + * `[batchSize, channels, depths, rows, cols]` + * + * Output shape + * - If `dataFormat=channelsLast`: + * 5D tensor with shape: + * `[batchSize, pooledDepths, pooledRows, pooledCols, channels]` + * - If `dataFormat=channelsFirst`: + * 5D tensor with shape: + * `[batchSize, channels, pooledDepths, pooledRows, pooledCols]` + * + * @doc {heading: 'Layers', subheading: 'Pooling', namespace: 'layers'} + */ + function maxPooling3d(args) { + return new MaxPooling3D(args); + } + // Recurrent Layers. + /** + * Gated Recurrent Unit - Cho et al. 2014. + * + * This is an `RNN` layer consisting of one `GRUCell`. However, unlike + * the underlying `GRUCell`, the `apply` method of `SimpleRNN` operates + * on a sequence of inputs. The shape of the input (not including the first, + * batch dimension) needs to be at least 2-D, with the first dimension being + * time steps. For example: + * + * ```js + * const rnn = tf.layers.gru({units: 8, returnSequences: true}); + * + * // Create an input with 10 time steps. + * const input = tf.input({shape: [10, 20]}); + * const output = rnn.apply(input); + * + * console.log(JSON.stringify(output.shape)); + * // [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the + * // same as the sequence length of `input`, due to `returnSequences`: `true`; + * // 3rd dimension is the `GRUCell`'s number of units. + * + * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'} + */ + function gru(args) { + return new GRU(args); + } + /** + * Cell class for `GRU`. + * + * `GRUCell` is distinct from the `RNN` subclass `GRU` in that its + * `apply` method takes the input data of only a single time step and returns + * the cell's output at the time step, while `GRU` takes the input data + * over a number of time steps. For example: + * + * ```js + * const cell = tf.layers.gruCell({units: 2}); + * const input = tf.input({shape: [10]}); + * const output = cell.apply(input); + * + * console.log(JSON.stringify(output.shape)); + * // [null, 10]: This is the cell's output at a single time step. The 1st + * // dimension is the unknown batch size. + * ``` + * + * Instance(s) of `GRUCell` can be used to construct `RNN` layers. The + * most typical use of this workflow is to combine a number of cells into a + * stacked RNN cell (i.e., `StackedRNNCell` internally) and use it to create an + * RNN. For example: + * + * ```js + * const cells = [ + * tf.layers.gruCell({units: 4}), + * tf.layers.gruCell({units: 8}), + * ]; + * const rnn = tf.layers.rnn({cell: cells, returnSequences: true}); + * + * // Create an input with 10 time steps and a length-20 vector at each step. + * const input = tf.input({shape: [10, 20]}); + * const output = rnn.apply(input); + * + * console.log(JSON.stringify(output.shape)); + * // [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the + * // same as the sequence length of `input`, due to `returnSequences`: `true`; + * // 3rd dimension is the last `gruCell`'s number of units. + * ``` + * + * To create an `RNN` consisting of only *one* `GRUCell`, use the + * `tf.layers.gru`. + * + * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'} + */ + function gruCell(args) { + return new GRUCell(args); + } + /** + * Long-Short Term Memory layer - Hochreiter 1997. + * + * This is an `RNN` layer consisting of one `LSTMCell`. However, unlike + * the underlying `LSTMCell`, the `apply` method of `LSTM` operates + * on a sequence of inputs. The shape of the input (not including the first, + * batch dimension) needs to be at least 2-D, with the first dimension being + * time steps. For example: + * + * ```js + * const lstm = tf.layers.lstm({units: 8, returnSequences: true}); + * + * // Create an input with 10 time steps. + * const input = tf.input({shape: [10, 20]}); + * const output = lstm.apply(input); + * + * console.log(JSON.stringify(output.shape)); + * // [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the + * // same as the sequence length of `input`, due to `returnSequences`: `true`; + * // 3rd dimension is the `LSTMCell`'s number of units. + * + * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'} + */ + function lstm(args) { + return new LSTM(args); + } + /** + * Cell class for `LSTM`. + * + * `LSTMCell` is distinct from the `RNN` subclass `LSTM` in that its + * `apply` method takes the input data of only a single time step and returns + * the cell's output at the time step, while `LSTM` takes the input data + * over a number of time steps. For example: + * + * ```js + * const cell = tf.layers.lstmCell({units: 2}); + * const input = tf.input({shape: [10]}); + * const output = cell.apply(input); + * + * console.log(JSON.stringify(output.shape)); + * // [null, 10]: This is the cell's output at a single time step. The 1st + * // dimension is the unknown batch size. + * ``` + * + * Instance(s) of `LSTMCell` can be used to construct `RNN` layers. The + * most typical use of this workflow is to combine a number of cells into a + * stacked RNN cell (i.e., `StackedRNNCell` internally) and use it to create an + * RNN. For example: + * + * ```js + * const cells = [ + * tf.layers.lstmCell({units: 4}), + * tf.layers.lstmCell({units: 8}), + * ]; + * const rnn = tf.layers.rnn({cell: cells, returnSequences: true}); + * + * // Create an input with 10 time steps and a length-20 vector at each step. + * const input = tf.input({shape: [10, 20]}); + * const output = rnn.apply(input); + * + * console.log(JSON.stringify(output.shape)); + * // [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the + * // same as the sequence length of `input`, due to `returnSequences`: `true`; + * // 3rd dimension is the last `lstmCell`'s number of units. + * ``` + * + * To create an `RNN` consisting of only *one* `LSTMCell`, use the + * `tf.layers.lstm`. + * + * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'} + */ + function lstmCell(args) { + return new LSTMCell(args); + } + /** + * Fully-connected RNN where the output is to be fed back to input. + * + * This is an `RNN` layer consisting of one `SimpleRNNCell`. However, unlike + * the underlying `SimpleRNNCell`, the `apply` method of `SimpleRNN` operates + * on a sequence of inputs. The shape of the input (not including the first, + * batch dimension) needs to be at least 2-D, with the first dimension being + * time steps. For example: + * + * ```js + * const rnn = tf.layers.simpleRNN({units: 8, returnSequences: true}); + * + * // Create an input with 10 time steps. + * const input = tf.input({shape: [10, 20]}); + * const output = rnn.apply(input); + * + * console.log(JSON.stringify(output.shape)); + * // [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the + * // same as the sequence length of `input`, due to `returnSequences`: `true`; + * // 3rd dimension is the `SimpleRNNCell`'s number of units. + * ``` + * + * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'} + */ + function simpleRNN(args) { + return new SimpleRNN(args); + } + /** + * Cell class for `SimpleRNN`. + * + * `SimpleRNNCell` is distinct from the `RNN` subclass `SimpleRNN` in that its + * `apply` method takes the input data of only a single time step and returns + * the cell's output at the time step, while `SimpleRNN` takes the input data + * over a number of time steps. For example: + * + * ```js + * const cell = tf.layers.simpleRNNCell({units: 2}); + * const input = tf.input({shape: [10]}); + * const output = cell.apply(input); + * + * console.log(JSON.stringify(output.shape)); + * // [null, 10]: This is the cell's output at a single time step. The 1st + * // dimension is the unknown batch size. + * ``` + * + * Instance(s) of `SimpleRNNCell` can be used to construct `RNN` layers. The + * most typical use of this workflow is to combine a number of cells into a + * stacked RNN cell (i.e., `StackedRNNCell` internally) and use it to create an + * RNN. For example: + * + * ```js + * const cells = [ + * tf.layers.simpleRNNCell({units: 4}), + * tf.layers.simpleRNNCell({units: 8}), + * ]; + * const rnn = tf.layers.rnn({cell: cells, returnSequences: true}); + * + * // Create an input with 10 time steps and a length-20 vector at each step. + * const input = tf.input({shape: [10, 20]}); + * const output = rnn.apply(input); + * + * console.log(JSON.stringify(output.shape)); + * // [null, 10, 8]: 1st dimension is unknown batch size; 2nd dimension is the + * // same as the sequence length of `input`, due to `returnSequences`: `true`; + * // 3rd dimension is the last `SimpleRNNCell`'s number of units. + * ``` + * + * To create an `RNN` consisting of only *one* `SimpleRNNCell`, use the + * `tf.layers.simpleRNN`. + * + * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'} + */ + function simpleRNNCell(args) { + return new SimpleRNNCell(args); + } + /** + * Convolutional LSTM layer - Xingjian Shi 2015. + * + * This is a `ConvRNN2D` layer consisting of one `ConvLSTM2DCell`. However, + * unlike the underlying `ConvLSTM2DCell`, the `apply` method of `ConvLSTM2D` + * operates on a sequence of inputs. The shape of the input (not including the + * first, batch dimension) needs to be 4-D, with the first dimension being time + * steps. For example: + * + * ```js + * const filters = 3; + * const kernelSize = 3; + * + * const batchSize = 4; + * const sequenceLength = 2; + * const size = 5; + * const channels = 3; + * + * const inputShape = [batchSize, sequenceLength, size, size, channels]; + * const input = tf.ones(inputShape); + * + * const layer = tf.layers.convLstm2d({filters, kernelSize}); + * + * const output = layer.apply(input); + * ``` + */ + /** @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'} */ + function convLstm2d(args) { + return new ConvLSTM2D(args); + } + /** + * Cell class for `ConvLSTM2D`. + * + * `ConvLSTM2DCell` is distinct from the `ConvRNN2D` subclass `ConvLSTM2D` in + * that its `call` method takes the input data of only a single time step and + * returns the cell's output at the time step, while `ConvLSTM2D` takes the + * input data over a number of time steps. For example: + * + * ```js + * const filters = 3; + * const kernelSize = 3; + * + * const sequenceLength = 1; + * const size = 5; + * const channels = 3; + * + * const inputShape = [sequenceLength, size, size, channels]; + * const input = tf.ones(inputShape); + * + * const cell = tf.layers.convLstm2dCell({filters, kernelSize}); + * + * cell.build(input.shape); + * + * const outputSize = size - kernelSize + 1; + * const outShape = [sequenceLength, outputSize, outputSize, filters]; + * + * const initialH = tf.zeros(outShape); + * const initialC = tf.zeros(outShape); + * + * const [o, h, c] = cell.call([input, initialH, initialC], {}); + * ``` + */ + /** @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'} */ + function convLstm2dCell(args) { + return new ConvLSTM2DCell(args); + } + /** + * Base class for recurrent layers. + * + * Input shape: + * 3D tensor with shape `[batchSize, timeSteps, inputDim]`. + * + * Output shape: + * - if `returnState`, an Array of tensors (i.e., `tf.Tensor`s). The first + * tensor is the output. The remaining tensors are the states at the + * last time step, each with shape `[batchSize, units]`. + * - if `returnSequences`, the output will have shape + * `[batchSize, timeSteps, units]`. + * - else, the output will have shape `[batchSize, units]`. + * + * Masking: + * This layer supports masking for input data with a variable number + * of timesteps. To introduce masks to your data, + * use an embedding layer with the `mask_zero` parameter + * set to `True`. + * + * Notes on using statefulness in RNNs: + * You can set RNN layers to be 'stateful', which means that the states + * computed for the samples in one batch will be reused as initial states + * for the samples in the next batch. This assumes a one-to-one mapping + * between samples in different successive batches. + * + * To enable statefulness: + * - specify `stateful: true` in the layer constructor. + * - specify a fixed batch size for your model, by passing + * if sequential model: + * `batchInputShape=[...]` to the first layer in your model. + * else for functional model with 1 or more Input layers: + * `batchShape=[...]` to all the first layers in your model. + * This is the expected shape of your inputs *including the batch size*. + * It should be a tuple of integers, e.g. `(32, 10, 100)`. + * - specify `shuffle=False` when calling fit(). + * + * To reset the states of your model, call `.resetStates()` on either + * a specific layer, or on your entire model. + * + * Note on specifying the initial state of RNNs + * You can specify the initial state of RNN layers symbolically by + * calling them with the option `initialState`. The value of + * `initialState` should be a tensor or list of tensors representing + * the initial state of the RNN layer. + * + * You can specify the initial state of RNN layers numerically by + * calling `resetStates` with the keyword argument `states`. The value of + * `states` should be a numpy array or list of numpy arrays representing + * the initial state of the RNN layer. + * + * Note on passing external constants to RNNs + * You can pass "external" constants to the cell using the `constants` + * keyword argument of `RNN.call` method. This requires that the `cell.call` + * method accepts the same keyword argument `constants`. Such constants + * can be used to condition the cell transformation on additional static + * inputs (not changing over time), a.k.a. an attention mechanism. + * + * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'} + */ + function rnn(args) { + return new RNN(args); + } + /** + * Wrapper allowing a stack of RNN cells to behave as a single cell. + * + * Used to implement efficient stacked RNNs. + * + * @doc {heading: 'Layers', subheading: 'Recurrent', namespace: 'layers'} + */ + function stackedRNNCells(args) { + return new StackedRNNCells(args); + } + // Wrapper Layers. + /** @doc {heading: 'Layers', subheading: 'Wrapper', namespace: 'layers'} */ + function bidirectional(args) { + return new Bidirectional(args); + } + /** + * This wrapper applies a layer to every temporal slice of an input. + * + * The input should be at least 3D, and the dimension of the index `1` will be + * considered to be the temporal dimension. + * + * Consider a batch of 32 samples, where each sample is a sequence of 10 vectors + * of 16 dimensions. The batch input shape of the layer is then `[32, 10, + * 16]`, and the `inputShape`, not including the sample dimension, is + * `[10, 16]`. + * + * You can then use `TimeDistributed` to apply a `Dense` layer to each of the 10 + * timesteps, independently: + * + * ```js + * const model = tf.sequential(); + * model.add(tf.layers.timeDistributed({ + * layer: tf.layers.dense({units: 8}), + * inputShape: [10, 16], + * })); + * + * // Now model.outputShape = [null, 10, 8]. + * // The output will then have shape `[32, 10, 8]`. + * + * // In subsequent layers, there is no need for `inputShape`: + * model.add(tf.layers.timeDistributed({layer: tf.layers.dense({units: 32})})); + * console.log(JSON.stringify(model.outputs[0].shape)); + * // Now model.outputShape = [null, 10, 32]. + * ``` + * + * The output will then have shape `[32, 10, 32]`. + * + * `TimeDistributed` can be used with arbitrary layers, not just `Dense`, for + * instance a `Conv2D` layer. + * + * ```js + * const model = tf.sequential(); + * model.add(tf.layers.timeDistributed({ + * layer: tf.layers.conv2d({filters: 64, kernelSize: [3, 3]}), + * inputShape: [10, 299, 299, 3], + * })); + * console.log(JSON.stringify(model.outputs[0].shape)); + * ``` + * + * @doc {heading: 'Layers', subheading: 'Wrapper', namespace: 'layers'} + */ + function timeDistributed(args) { + return new TimeDistributed(args); + } + // Aliases for pooling. + const globalMaxPool1d = globalMaxPooling1d; + const globalMaxPool2d = globalMaxPooling2d; + const maxPool1d = maxPooling1d; + const maxPool2d = maxPooling2d; + /** + * Apply additive zero-centered Gaussian noise. + * + * As it is a regularization layer, it is only active at training time. + * + * This is useful to mitigate overfitting + * (you could see it as a form of random data augmentation). + * Gaussian Noise (GS) is a natural choice as corruption process + * for real valued inputs. + * + * # Arguments + * stddev: float, standard deviation of the noise distribution. + * + * # Input shape + * Arbitrary. Use the keyword argument `input_shape` + * (tuple of integers, does not include the samples axis) + * when using this layer as the first layer in a model. + * + * # Output shape + * Same shape as input. + * + * @doc {heading: 'Layers', subheading: 'Noise', namespace: 'layers'} + */ + function gaussianNoise(args) { + return new GaussianNoise(args); + } + /** + * Apply multiplicative 1-centered Gaussian noise. + * + * As it is a regularization layer, it is only active at training time. + * + * Arguments: + * - `rate`: float, drop probability (as with `Dropout`). + * The multiplicative noise will have + * standard deviation `sqrt(rate / (1 - rate))`. + * + * Input shape: + * Arbitrary. Use the keyword argument `inputShape` + * (tuple of integers, does not include the samples axis) + * when using this layer as the first layer in a model. + * + * Output shape: + * Same shape as input. + * + * References: + * - [Dropout: A Simple Way to Prevent Neural Networks from Overfitting]( + * http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf) + * + * @doc {heading: 'Layers', subheading: 'Noise', namespace: 'layers'} + */ + function gaussianDropout(args) { + return new GaussianDropout(args); + } + /** + * Applies Alpha Dropout to the input. + * + * As it is a regularization layer, it is only active at training time. + * + * Alpha Dropout is a `Dropout` that keeps mean and variance of inputs + * to their original values, in order to ensure the self-normalizing property + * even after this dropout. + * Alpha Dropout fits well to Scaled Exponential Linear Units + * by randomly setting activations to the negative saturation value. + * + * Arguments: + * - `rate`: float, drop probability (as with `Dropout`). + * The multiplicative noise will have + * standard deviation `sqrt(rate / (1 - rate))`. + * - `noise_shape`: A 1-D `Tensor` of type `int32`, representing the + * shape for randomly generated keep/drop flags. + * + * Input shape: + * Arbitrary. Use the keyword argument `inputShape` + * (tuple of integers, does not include the samples axis) + * when using this layer as the first layer in a model. + * + * Output shape: + * Same shape as input. + * + * References: + * - [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515) + * + * @doc {heading: 'Layers', subheading: 'Noise', namespace: 'layers'} + */ + function alphaDropout(args) { + return new AlphaDropout(args); + } + /** + * Masks a sequence by using a mask value to skip timesteps. + * + * If all features for a given sample timestep are equal to `mask_value`, + * then the sample timestep will be masked (skipped) in all downstream layers + * (as long as they support masking). + * + * If any downstream layer does not support masking yet receives such + * an input mask, an exception will be raised. + * + * Arguments: + * - `maskValue`: Either None or mask value to skip. + * + * Input shape: + * Arbitrary. Use the keyword argument `inputShape` + * (tuple of integers, does not include the samples axis) + * when using this layer as the first layer in a model. + * + * Output shape: + * Same shape as input. + * + * @doc {heading: 'Layers', subheading: 'Mask', namespace: 'layers'} + */ + function masking(args) { + return new Masking(args); + } + /** + * A preprocessing layer which rescales input values to a new range. + * + * This layer rescales every value of an input (often an image) by multiplying + * by `scale` and adding `offset`. + * + * For instance: + * 1. To rescale an input in the ``[0, 255]`` range + * to be in the `[0, 1]` range, you would pass `scale=1/255`. + * 2. To rescale an input in the ``[0, 255]`` range to be in the `[-1, 1]` + * range, you would pass `scale=1./127.5, offset=-1`. + * The rescaling is applied both during training and inference. Inputs can be + * of integer or floating point dtype, and by default the layer will output + * floats. + * + * Arguments: + * - `scale`: Float, the scale to apply to the inputs. + * - `offset`: Float, the offset to apply to the inputs. + * + * Input shape: + * Arbitrary. + * + * Output shape: + * Same as input. + * + * @doc {heading: 'Layers', subheading: 'Rescaling', namespace: 'layers'} + */ + function rescaling(args) { + return new Rescaling(args); + } + /** + * A preprocessing layer which center crops images. + * + * This layers crops the central portion of the images to a target size. If an + * image is smaller than the target size, it will be resized and cropped so as + * to return the largest possible window in the image that matches the target + * aspect ratio. + * + * Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`) and + * of integer or floating point dtype. + * + * If the input height/width is even and the target height/width is odd (or + * inversely), the input image is left-padded by 1 pixel. + * + * Arguments: + * `height`: Integer, the height of the output shape. + * `width`: Integer, the width of the output shape. + * + * Input shape: + * 3D (unbatched) or 4D (batched) tensor with shape: + * `(..., height, width, channels)`, in `channelsLast` format. + * + * Output shape: + * 3D (unbatched) or 4D (batched) tensor with shape: + * `(..., targetHeight, targetWidth, channels)`. + * + * + * @doc {heading: 'Layers', subheading: 'CenterCrop', namespace: 'layers'} + */ + function centerCrop(args) { + return new CenterCrop(args); + } + /** + * A preprocessing layer which resizes images. + * This layer resizes an image input to a target height and width. The input + * should be a 4D (batched) or 3D (unbatched) tensor in `"channels_last"` + * format. Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, + * 255]`) and of interger or floating point dtype. By default, the layer will + * output floats. + * + * Arguments: + * - `height`: number, the height for the output tensor. + * - `width`: number, the width for the output tensor. + * - `interpolation`: string, the method for image resizing interpolation. + * - `cropToAspectRatio`: boolean, whether to keep image aspect ratio. + * + * Input shape: + * Arbitrary. + * + * Output shape: + * height, width, num channels. + * + * @doc {heading: 'Layers', subheading: 'Resizing', namespace: 'layers'} + */ + function resizing(args) { + return new Resizing(args); + } + /** + * A preprocessing layer which encodes integer features. + * + * This layer provides options for condensing data into a categorical encoding + * when the total number of tokens are known in advance. It accepts integer + * values as inputs, and it outputs a dense representation of those + * inputs. + * + * Arguments: + * + * numTokens: The total number of tokens the layer should support. All + * inputs to the layer must integers in the range `0 <= value < + * numTokens`, or an error will be thrown. + * + * outputMode: Specification for the output of the layer. + * Defaults to `multiHot`. Values can be `oneHot`, `multiHot` or + * `count`, configuring the layer as follows: + * + * oneHot: Encodes each individual element in the input into an + * array of `numTokens` size, containing a 1 at the element index. If + * the last dimension is size 1, will encode on that dimension. If the + * last dimension is not size 1, will append a new dimension for the + * encoded output. + * + * multiHot: Encodes each sample in the input into a single array + * of `numTokens` size, containing a 1 for each vocabulary term + * present in the sample. Treats the last dimension as the sample + * dimension, if input shape is `(..., sampleLength)`, output shape + * will be `(..., numTokens)`. + * + * count: Like `multiHot`, but the int array contains a count of + * the number of times the token at that index appeared in the sample. + * + * For all output modes, currently only output up to rank 2 is supported. + * Call arguments: + * inputs: A 1D or 2D tensor of integer inputs. + * countWeights: A tensor in the same shape as `inputs` indicating the + * weight for each sample value when summing up in `count` mode. Not used + * in `multiHot` or `oneHot` modes. + * + * + * @doc {heading: 'Layers', subheading: 'CategoryEncoding', namespace: 'layers'} + */ + function categoryEncoding(args) { + return new CategoryEncoding(args); + } + /** + * A preprocessing layer which randomly varies image width during training. + * + * This layer will randomly adjusts the width of a batch of images of a batch + * of images by a random factor. + * + * The input should be a 3D (unbatched) or 4D (batched) tensor in + * the `"channels_last"` image data format. Input pixel values can be of any + * range (e.g. `[0., 1.)` or `[0, 255]`) and of integer or floating point + * dtype. By default, the layer will output floats. By default, this layer is + * inactive during inference. For an overview and full list of preprocessing + * layers, see the preprocessing [guide] + * (https://www.tensorflow.org/guide/keras/preprocessing_layers). + * + * Arguments: + * + * factor: + * A positive float (fraction of original width), or a tuple of size 2 + * representing lower and upper bound for resizing vertically. + * When represented as a single float, this value is used for both the upper + * and lower bound. For instance, `factor=(0.2, 0.3)` results in an output + * with width changed by a random amount in the range `[20%, 30%]`. + * `factor=(-0.2, 0.3)` results in an output with width changed by a random + * amount in the range `[-20%, +30%]`. `factor=0.2` results in an output + * with width changed by a random amount in the range `[-20%, +20%]`. + * interpolation: + * String, the interpolation method. + * Defaults to `bilinear`. + * Supports `"bilinear"`, `"nearest"`. + * The tf methods `"bicubic"`, `"area"`, `"lanczos3"`, `"lanczos5"`, + * `"gaussian"`, `"mitchellcubic"` are unimplemented in tfjs. + * seed: + * Integer. Used to create a random seed. + * + * Input shape: + * 3D (unbatched) or 4D (batched) tensor with shape: + * `(..., height, width, channels)`, in `"channels_last"` format. + * Output shape: + * 3D (unbatched) or 4D (batched) tensor with shape: + * `(..., height, random_width, channels)`. + * + * + * @doc {heading: 'Layers', subheading: 'RandomWidth', namespace: 'layers'} + */ + function randomWidth(args) { + return new RandomWidth(args); + } + + var exports_layers = /*#__PURE__*/Object.freeze({ + __proto__: null, + Layer: Layer, + RNN: RNN, + RNNCell: RNNCell, + activation: activation, + add: add$1, + alphaDropout: alphaDropout, + average: average, + averagePooling1d: averagePooling1d, + averagePooling2d: averagePooling2d, + averagePooling3d: averagePooling3d, + avgPool1d: avgPool1d, + avgPool2d: avgPool2d, + avgPool3d: avgPool3d, + avgPooling1d: avgPooling1d, + avgPooling2d: avgPooling2d, + avgPooling3d: avgPooling3d, + batchNormalization: batchNormalization, + bidirectional: bidirectional, + categoryEncoding: categoryEncoding, + centerCrop: centerCrop, + concatenate: concatenate, + conv1d: conv1d, + conv2d: conv2d$1, + conv2dTranspose: conv2dTranspose, + conv3d: conv3d, + conv3dTranspose: conv3dTranspose, + convLstm2d: convLstm2d, + convLstm2dCell: convLstm2dCell, + cropping2D: cropping2D, + dense: dense, + depthwiseConv2d: depthwiseConv2d, + dot: dot, + dropout: dropout, + elu: elu$2, + embedding: embedding, + flatten: flatten, + gaussianDropout: gaussianDropout, + gaussianNoise: gaussianNoise, + globalAveragePooling1d: globalAveragePooling1d, + globalAveragePooling2d: globalAveragePooling2d, + globalMaxPool1d: globalMaxPool1d, + globalMaxPool2d: globalMaxPool2d, + globalMaxPooling1d: globalMaxPooling1d, + globalMaxPooling2d: globalMaxPooling2d, + gru: gru, + gruCell: gruCell, + input: input, + inputLayer: inputLayer, + layerNormalization: layerNormalization, + leakyReLU: leakyReLU, + lstm: lstm, + lstmCell: lstmCell, + masking: masking, + maxPool1d: maxPool1d, + maxPool2d: maxPool2d, + maxPooling1d: maxPooling1d, + maxPooling2d: maxPooling2d, + maxPooling3d: maxPooling3d, + maximum: maximum$2, + minimum: minimum$2, + multiply: multiply$2, + permute: permute, + prelu: prelu$2, + randomWidth: randomWidth, + reLU: reLU, + repeatVector: repeatVector, + rescaling: rescaling, + reshape: reshape$2, + resizing: resizing, + rnn: rnn, + separableConv2d: separableConv2d, + simpleRNN: simpleRNN, + simpleRNNCell: simpleRNNCell, + softmax: softmax$2, + spatialDropout1d: spatialDropout1d, + stackedRNNCells: stackedRNNCells, + thresholdedReLU: thresholdedReLU, + timeDistributed: timeDistributed, + upSampling2d: upSampling2d, + zeroPadding2d: zeroPadding2d + }); + + /** + * Binary accuracy metric function. + * + * `yTrue` and `yPred` can have 0-1 values. Example: + * ```js + * const x = tf.tensor2d([[1, 1, 1, 1], [0, 0, 0, 0]], [2, 4]); + * const y = tf.tensor2d([[1, 0, 1, 0], [0, 0, 0, 1]], [2, 4]); + * const accuracy = tf.metrics.binaryAccuracy(x, y); + * accuracy.print(); + * ``` + * + * `yTrue` and `yPred` can also have floating-number values between 0 and 1, in + * which case the values will be thresholded at 0.5 to yield 0-1 values (i.e., + * a value >= 0.5 and <= 1.0 is interpreted as 1). + * + * Example: + * ```js + * const x = tf.tensor1d([1, 1, 1, 1, 0, 0, 0, 0]); + * const y = tf.tensor1d([0.2, 0.4, 0.6, 0.8, 0.2, 0.3, 0.4, 0.7]); + * const accuracy = tf.metrics.binaryAccuracy(x, y); + * accuracy.print(); + * ``` + * + * @param yTrue Binary Tensor of truth. + * @param yPred Binary Tensor of prediction. + * @return Accuracy Tensor. + * + * @doc {heading: 'Metrics', namespace: 'metrics'} + */ + function binaryAccuracy(yTrue, yPred) { + return binaryAccuracy$1(yTrue, yPred); + } + /** + * Binary crossentropy metric function. + * + * Example: + * ```js + * const x = tf.tensor2d([[0], [1], [1], [1]]); + * const y = tf.tensor2d([[0], [0], [0.5], [1]]); + * const crossentropy = tf.metrics.binaryCrossentropy(x, y); + * crossentropy.print(); + * ``` + * + * @param yTrue Binary Tensor of truth. + * @param yPred Binary Tensor of prediction, probabilities for the `1` case. + * @return Accuracy Tensor. + * + * @doc {heading: 'Metrics', namespace: 'metrics'} + */ + function binaryCrossentropy(yTrue, yPred) { + return binaryCrossentropy$1(yTrue, yPred); + } + /** + * Sparse categorical accuracy metric function. + * + * Example: + * ```js + * + * const yTrue = tf.tensor1d([1, 1, 2, 2, 0]); + * const yPred = tf.tensor2d( + * [[0, 1, 0], [1, 0, 0], [0, 0.4, 0.6], [0, 0.6, 0.4], [0.7, 0.3, 0]]); + * const crossentropy = tf.metrics.sparseCategoricalAccuracy(yTrue, yPred); + * crossentropy.print(); + * ``` + * + * @param yTrue True labels: indices. + * @param yPred Predicted probabilities or logits. + * @returns Accuracy tensor. + * + * @doc {heading: 'Metrics', namespace: 'metrics'} + */ + function sparseCategoricalAccuracy(yTrue, yPred) { + return sparseCategoricalAccuracy$1(yTrue, yPred); + } + /** + * Categorical accuracy metric function. + * + * Example: + * ```js + * const x = tf.tensor2d([[0, 0, 0, 1], [0, 0, 0, 1]]); + * const y = tf.tensor2d([[0.1, 0.8, 0.05, 0.05], [0.1, 0.05, 0.05, 0.8]]); + * const accuracy = tf.metrics.categoricalAccuracy(x, y); + * accuracy.print(); + * ``` + * + * @param yTrue Binary Tensor of truth: one-hot encoding of categories. + * @param yPred Binary Tensor of prediction: probabilities or logits for the + * same categories as in `yTrue`. + * @return Accuracy Tensor. + * + * @doc {heading: 'Metrics', namespace: 'metrics'} + */ + function categoricalAccuracy(yTrue, yPred) { + return categoricalAccuracy$1(yTrue, yPred); + } + /** + * Categorical crossentropy between an output tensor and a target tensor. + * + * @param target A tensor of the same shape as `output`. + * @param output A tensor resulting from a softmax (unless `fromLogits` is + * `true`, in which case `output` is expected to be the logits). + * @param fromLogits Boolean, whether `output` is the result of a softmax, or is + * a tensor of logits. + * + * @doc {heading: 'Metrics', namespace: 'metrics'} + */ + function categoricalCrossentropy(yTrue, yPred) { + return categoricalCrossentropy$1(yTrue, yPred); + } + /** + * Computes the precision of the predictions with respect to the labels. + * + * Example: + * ```js + * const x = tf.tensor2d( + * [ + * [0, 0, 0, 1], + * [0, 1, 0, 0], + * [0, 0, 0, 1], + * [1, 0, 0, 0], + * [0, 0, 1, 0] + * ] + * ); + * + * const y = tf.tensor2d( + * [ + * [0, 0, 1, 0], + * [0, 1, 0, 0], + * [0, 0, 0, 1], + * [0, 1, 0, 0], + * [0, 1, 0, 0] + * ] + * ); + * + * const precision = tf.metrics.precision(x, y); + * precision.print(); + * ``` + * + * @param yTrue The ground truth values. Expected to contain only 0-1 values. + * @param yPred The predicted values. Expected to contain only 0-1 values. + * @return Precision Tensor. + * + * @doc {heading: 'Metrics', namespace: 'metrics'} + */ + function precision(yTrue, yPred) { + return precision$1(yTrue, yPred); + } + /** + * Computes the recall of the predictions with respect to the labels. + * + * Example: + * ```js + * const x = tf.tensor2d( + * [ + * [0, 0, 0, 1], + * [0, 1, 0, 0], + * [0, 0, 0, 1], + * [1, 0, 0, 0], + * [0, 0, 1, 0] + * ] + * ); + * + * const y = tf.tensor2d( + * [ + * [0, 0, 1, 0], + * [0, 1, 0, 0], + * [0, 0, 0, 1], + * [0, 1, 0, 0], + * [0, 1, 0, 0] + * ] + * ); + * + * const recall = tf.metrics.recall(x, y); + * recall.print(); + * ``` + * + * @param yTrue The ground truth values. Expected to contain only 0-1 values. + * @param yPred The predicted values. Expected to contain only 0-1 values. + * @return Recall Tensor. + * + * @doc {heading: 'Metrics', namespace: 'metrics'} + */ + function recall(yTrue, yPred) { + return recall$1(yTrue, yPred); + } + /** + * Loss or metric function: Cosine proximity. + * + * Mathematically, cosine proximity is defined as: + * `-sum(l2Normalize(yTrue) * l2Normalize(yPred))`, + * wherein `l2Normalize()` normalizes the L2 norm of the input to 1 and `*` + * represents element-wise multiplication. + * + * ```js + * const yTrue = tf.tensor2d([[1, 0], [1, 0]]); + * const yPred = tf.tensor2d([[1 / Math.sqrt(2), 1 / Math.sqrt(2)], [0, 1]]); + * const proximity = tf.metrics.cosineProximity(yTrue, yPred); + * proximity.print(); + * ``` + * + * @param yTrue Truth Tensor. + * @param yPred Prediction Tensor. + * @return Cosine proximity Tensor. + * + * @doc {heading: 'Metrics', namespace: 'metrics'} + */ + function cosineProximity(yTrue, yPred) { + return cosineProximity$1(yTrue, yPred); + } + /** + * Loss or metric function: Mean absolute error. + * + * Mathematically, mean absolute error is defined as: + * `mean(abs(yPred - yTrue))`, + * wherein the `mean` is applied over feature dimensions. + * + * ```js + * const yTrue = tf.tensor2d([[0, 1], [0, 0], [2, 3]]); + * const yPred = tf.tensor2d([[0, 1], [0, 1], [-2, -3]]); + * const mse = tf.metrics.meanAbsoluteError(yTrue, yPred); + * mse.print(); + * ``` + * + * @param yTrue Truth Tensor. + * @param yPred Prediction Tensor. + * @return Mean absolute error Tensor. + * + * @doc {heading: 'Metrics', namespace: 'metrics'} + */ + function meanAbsoluteError(yTrue, yPred) { + return meanAbsoluteError$1(yTrue, yPred); + } + /** + * Loss or metric function: Mean absolute percentage error. + * + * ```js + * const yTrue = tf.tensor2d([[0, 1], [10, 20]]); + * const yPred = tf.tensor2d([[0, 1], [11, 24]]); + * const mse = tf.metrics.meanAbsolutePercentageError(yTrue, yPred); + * mse.print(); + * ``` + * + * Aliases: `tf.metrics.MAPE`, `tf.metrics.mape`. + * + * @param yTrue Truth Tensor. + * @param yPred Prediction Tensor. + * @return Mean absolute percentage error Tensor. + * + * @doc {heading: 'Metrics', namespace: 'metrics'} + */ + function meanAbsolutePercentageError(yTrue, yPred) { + return meanAbsolutePercentageError$1(yTrue, yPred); + } + function MAPE(yTrue, yPred) { + return meanAbsolutePercentageError$1(yTrue, yPred); + } + function mape(yTrue, yPred) { + return meanAbsolutePercentageError$1(yTrue, yPred); + } + /** + * Loss or metric function: Mean squared error. + * + * ```js + * const yTrue = tf.tensor2d([[0, 1], [3, 4]]); + * const yPred = tf.tensor2d([[0, 1], [-3, -4]]); + * const mse = tf.metrics.meanSquaredError(yTrue, yPred); + * mse.print(); + * ``` + * + * Aliases: `tf.metrics.MSE`, `tf.metrics.mse`. + * + * @param yTrue Truth Tensor. + * @param yPred Prediction Tensor. + * @return Mean squared error Tensor. + * + * @doc {heading: 'Metrics', namespace: 'metrics'} + */ + function meanSquaredError(yTrue, yPred) { + return meanSquaredError$1(yTrue, yPred); + } + function MSE(yTrue, yPred) { + return meanSquaredError$1(yTrue, yPred); + } + function mse(yTrue, yPred) { + return meanSquaredError$1(yTrue, yPred); + } + /** + * Computes R2 score. + * + * ```js + * const yTrue = tf.tensor2d([[0, 1], [3, 4]]); + * const yPred = tf.tensor2d([[0, 1], [-3, -4]]); + * const r2Score = tf.metrics.r2Score(yTrue, yPred); + * r2Score.print(); + * ``` + * @param yTrue Truth Tensor. + * @param yPred Prediction Tensor. + * @return R2 score Tensor. + * + * @doc {heading: 'Metrics', namespace: 'metrics'} + */ + function r2Score(yTrue, yPred) { + return r2Score$1(yTrue, yPred); + } + + var exports_metrics = /*#__PURE__*/Object.freeze({ + __proto__: null, + MAPE: MAPE, + MSE: MSE, + binaryAccuracy: binaryAccuracy, + binaryCrossentropy: binaryCrossentropy, + categoricalAccuracy: categoricalAccuracy, + categoricalCrossentropy: categoricalCrossentropy, + cosineProximity: cosineProximity, + mape: mape, + meanAbsoluteError: meanAbsoluteError, + meanAbsolutePercentageError: meanAbsolutePercentageError, + meanSquaredError: meanSquaredError, + mse: mse, + precision: precision, + r2Score: r2Score, + recall: recall, + sparseCategoricalAccuracy: sparseCategoricalAccuracy + }); + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + + var exports_models = /*#__PURE__*/Object.freeze({ + __proto__: null, + modelFromJSON: modelFromJSON + }); + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + /** + * Regularizer for L1 and L2 regularization. + * + * Adds a term to the loss to penalize large weights: + * loss += sum(l1 * abs(x)) + sum(l2 * x^2) + * + * @doc {heading: 'Regularizers', namespace: 'regularizers'} + */ + function l1l2(config) { + return new L1L2(config); + } + /** + * Regularizer for L1 regularization. + * + * Adds a term to the loss to penalize large weights: + * loss += sum(l1 * abs(x)) + * @param args l1 config. + * + * @doc {heading: 'Regularizers', namespace: 'regularizers'} + */ + function l1(config) { + return l1$1(config); + } + /** + * Regularizer for L2 regularization. + * + * Adds a term to the loss to penalize large weights: + * loss += sum(l2 * x^2) + * @param args l2 config. + * + * @doc {heading: 'Regularizers', namespace: 'regularizers'} + */ + function l2(config) { + return l2$1(config); + } + + var exports_regularizers = /*#__PURE__*/Object.freeze({ + __proto__: null, + l1: l1, + l1l2: l1l2, + l2: l2 + }); + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + class Callback extends BaseCallback { + constructor() { + super(...arguments); + /** Instance of `keras.models.Model`. Reference of the model being trained. */ + this.model = null; + } + setModel(model) { + if (!(model instanceof LayersModel)) { + throw new Error('model must be a LayersModel, not some other Container'); + } + this.model = model; + } + } + function less$2(currVal, prevVal) { + return currVal < prevVal; + } + function greater$2(currVal, prevVal) { + return currVal > prevVal; + } + /** + * A Callback that stops training when a monitored quantity has stopped + * improving. + */ + class EarlyStopping extends Callback { + constructor(args) { + super(); + if (args == null) { + args = {}; + } + if (args.restoreBestWeights) { + throw new NotImplementedError('restoreBestWeights = True is not implemented in EarlyStopping yet.'); + } + this.monitor = args.monitor || 'val_loss'; + this.minDelta = Math.abs(args.minDelta || 0); + this.patience = args.patience || 0; + this.verbose = args.verbose || 0; + this.mode = args.mode || 'auto'; + this.baseline = args.baseline; + if (['auto', 'min', 'max'].indexOf(this.mode) === -1) { + console.warn(`EarlyStopping mode '${this.mode}' is invalid. ` + + `Falling back to mode 'auto'.`); + this.mode = 'auto'; + } + if (this.mode === 'min') { + this.monitorFunc = less$2; + } + else if (this.mode === 'max') { + this.monitorFunc = greater$2; + } + else { + // For mode === 'auto'. + if (this.monitor.indexOf('acc') !== -1) { + this.monitorFunc = greater$2; + } + else { + this.monitorFunc = less$2; + } + } + if (this.monitorFunc === less$2) { + this.minDelta *= -1; + } + } + async onTrainBegin(logs) { + this.wait = 0; + this.stoppedEpoch = 0; + if (this.baseline != null) { + this.best = this.baseline; + } + else { + this.best = this.monitorFunc === less$2 ? Infinity : -Infinity; + } + } + async onEpochEnd(epoch, logs) { + await resolveScalarsInLogs(logs); + const current = this.getMonitorValue(logs); + if (current == null) { + return; + } + if (this.monitorFunc(current - this.minDelta, this.best)) { + this.best = current; + this.wait = 0; + // TODO(cais): Logic for restoreBestWeights. + } + else { + this.wait++; + if (this.wait >= this.patience) { + this.stoppedEpoch = epoch; + this.model.stopTraining = true; + } + // TODO(cais): Logic for restoreBestWeights. + } + } + async onTrainEnd(logs) { + if (this.stoppedEpoch > 0 && this.verbose) { + console.log(`Epoch ${this.stoppedEpoch}: early stopping.`); + } + } + getMonitorValue(logs) { + if (logs == null) { + logs = {}; + } + const monitorValue = logs[this.monitor]; + if (monitorValue == null) { + console.warn(`Metric for EarlyStopping ${this.monitor} is not available. ` + + `Available metrics are: ${Object.keys(logs)}`); + } + return monitorValue; + } + } + /** + * Factory function for a Callback that stops training when a monitored + * quantity has stopped improving. + * + * Early stopping is a type of regularization, and protects model against + * overfitting. + * + * The following example based on fake data illustrates how this callback + * can be used during `tf.LayersModel.fit()`: + * + * ```js + * const model = tf.sequential(); + * model.add(tf.layers.dense({ + * units: 3, + * activation: 'softmax', + * kernelInitializer: 'ones', + * inputShape: [2] + * })); + * const xs = tf.tensor2d([1, 2, 3, 4], [2, 2]); + * const ys = tf.tensor2d([[1, 0, 0], [0, 1, 0]], [2, 3]); + * const xsVal = tf.tensor2d([4, 3, 2, 1], [2, 2]); + * const ysVal = tf.tensor2d([[0, 0, 1], [0, 1, 0]], [2, 3]); + * model.compile( + * {loss: 'categoricalCrossentropy', optimizer: 'sgd', metrics: ['acc']}); + * + * // Without the EarlyStopping callback, the val_acc value would be: + * // 0.5, 0.5, 0.5, 0.5, ... + * // With val_acc being monitored, training should stop after the 2nd epoch. + * const history = await model.fit(xs, ys, { + * epochs: 10, + * validationData: [xsVal, ysVal], + * callbacks: tf.callbacks.earlyStopping({monitor: 'val_acc'}) + * }); + * + * // Expect to see a length-2 array. + * console.log(history.history.val_acc); + * ``` + * + * @doc { + * heading: 'Callbacks', + * namespace: 'callbacks' + * } + */ + function earlyStopping(args) { + return new EarlyStopping(args); + } + const callbacks = { earlyStopping }; + + /** + * @license + * Copyright 2018 Google LLC + * + * Use of this source code is governed by an MIT-style + * license that can be found in the LICENSE file or at + * https://opensource.org/licenses/MIT. + * ============================================================================= + */ + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const ENV$1 = env(); + /** Whether to keep intermediate tensors. */ + ENV$1.registerFlag('KEEP_INTERMEDIATE_TENSORS', () => false, debugValue => { + if (debugValue) { + console.warn('Keep intermediate tensors is ON. This will print the values of all ' + + 'intermediate tensors during model inference. Not all models ' + + 'support this mode. For details, check e2e/benchmarks/ ' + + 'model_config.js. This significantly impacts performance.'); + } + }); + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * ============================================================================= + */ + /** DataType enum. */ + var DataType; + (function (DataType) { + // These properties must be quoted since they are used by parseDtypeParam + // in tfjs-converter/src/operations/operation_mapper.ts to look up dtypes + // by string name. If they are not quoted, Closure will mangle their names. + // Not a legal value for DataType. Used to indicate a DataType field + // has not been set. + DataType[DataType["DT_INVALID"] = 0] = "DT_INVALID"; + // Data types that all computation devices are expected to be + // capable to support. + DataType[DataType["DT_FLOAT"] = 1] = "DT_FLOAT"; + DataType[DataType["DT_DOUBLE"] = 2] = "DT_DOUBLE"; + DataType[DataType["DT_INT32"] = 3] = "DT_INT32"; + DataType[DataType["DT_UINT8"] = 4] = "DT_UINT8"; + DataType[DataType["DT_INT16"] = 5] = "DT_INT16"; + DataType[DataType["DT_INT8"] = 6] = "DT_INT8"; + DataType[DataType["DT_STRING"] = 7] = "DT_STRING"; + DataType[DataType["DT_COMPLEX64"] = 8] = "DT_COMPLEX64"; + DataType[DataType["DT_INT64"] = 9] = "DT_INT64"; + DataType[DataType["DT_BOOL"] = 10] = "DT_BOOL"; + DataType[DataType["DT_QINT8"] = 11] = "DT_QINT8"; + DataType[DataType["DT_QUINT8"] = 12] = "DT_QUINT8"; + DataType[DataType["DT_QINT32"] = 13] = "DT_QINT32"; + DataType[DataType["DT_BFLOAT16"] = 14] = "DT_BFLOAT16"; + DataType[DataType["DT_QINT16"] = 15] = "DT_QINT16"; + DataType[DataType["DT_QUINT16"] = 16] = "DT_QUINT16"; + DataType[DataType["DT_UINT16"] = 17] = "DT_UINT16"; + DataType[DataType["DT_COMPLEX128"] = 18] = "DT_COMPLEX128"; + DataType[DataType["DT_HALF"] = 19] = "DT_HALF"; + DataType[DataType["DT_RESOURCE"] = 20] = "DT_RESOURCE"; + DataType[DataType["DT_VARIANT"] = 21] = "DT_VARIANT"; + DataType[DataType["DT_UINT32"] = 22] = "DT_UINT32"; + DataType[DataType["DT_UINT64"] = 23] = "DT_UINT64"; + // Do not use! These are only for parameters. Every enum above + // should have a corresponding value below (verified by types_test). + DataType[DataType["DT_FLOAT_REF"] = 101] = "DT_FLOAT_REF"; + DataType[DataType["DT_DOUBLE_REF"] = 102] = "DT_DOUBLE_REF"; + DataType[DataType["DT_INT32_REF"] = 103] = "DT_INT32_REF"; + DataType[DataType["DT_UINT8_REF"] = 104] = "DT_UINT8_REF"; + DataType[DataType["DT_INT16_REF"] = 105] = "DT_INT16_REF"; + DataType[DataType["DT_INT8_REF"] = 106] = "DT_INT8_REF"; + DataType[DataType["DT_STRING_REF"] = 107] = "DT_STRING_REF"; + DataType[DataType["DT_COMPLEX64_REF"] = 108] = "DT_COMPLEX64_REF"; + DataType[DataType["DT_INT64_REF"] = 109] = "DT_INT64_REF"; + DataType[DataType["DT_BOOL_REF"] = 110] = "DT_BOOL_REF"; + DataType[DataType["DT_QINT8_REF"] = 111] = "DT_QINT8_REF"; + DataType[DataType["DT_QUINT8_REF"] = 112] = "DT_QUINT8_REF"; + DataType[DataType["DT_QINT32_REF"] = 113] = "DT_QINT32_REF"; + DataType[DataType["DT_BFLOAT16_REF"] = 114] = "DT_BFLOAT16_REF"; + DataType[DataType["DT_QINT16_REF"] = 115] = "DT_QINT16_REF"; + DataType[DataType["DT_QUINT16_REF"] = 116] = "DT_QUINT16_REF"; + DataType[DataType["DT_UINT16_REF"] = 117] = "DT_UINT16_REF"; + DataType[DataType["DT_COMPLEX128_REF"] = 118] = "DT_COMPLEX128_REF"; + DataType[DataType["DT_HALF_REF"] = 119] = "DT_HALF_REF"; + DataType[DataType["DT_RESOURCE_REF"] = 120] = "DT_RESOURCE_REF"; + DataType[DataType["DT_VARIANT_REF"] = 121] = "DT_VARIANT_REF"; + DataType[DataType["DT_UINT32_REF"] = 122] = "DT_UINT32_REF"; + DataType[DataType["DT_UINT64_REF"] = 123] = "DT_UINT64_REF"; + })(DataType || (DataType = {})); + var SaverDef; + (function (SaverDef) { + /** CheckpointFormatVersion enum. */ + let CheckpointFormatVersion; + (function (CheckpointFormatVersion) { + CheckpointFormatVersion[CheckpointFormatVersion["LEGACY"] = 0] = "LEGACY"; + CheckpointFormatVersion[CheckpointFormatVersion["V1"] = 1] = "V1"; + CheckpointFormatVersion[CheckpointFormatVersion["V2"] = 2] = "V2"; + })(CheckpointFormatVersion = SaverDef.CheckpointFormatVersion || (SaverDef.CheckpointFormatVersion = {})); + })(SaverDef || (SaverDef = {})); + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const CUSTOM_OPS = {}; + /** + * Register an Op for graph model executor. This allows you to register + * TensorFlow custom op or override existing op. + * + * Here is an example of registering a new MatMul Op. + * ```js + * const customMatmul = (node) => + * tf.matMul( + * node.inputs[0], node.inputs[1], + * node.attrs['transpose_a'], node.attrs['transpose_b']); + * + * tf.registerOp('MatMul', customMatmul); + * ``` + * The inputs and attrs of the node object are based on the TensorFlow op + * registry. + * + * @param name The Tensorflow Op name. + * @param opFunc An op function which is called with the current graph node + * during execution and needs to return a tensor or a list of tensors. The node + * has the following attributes: + * - attr: A map from attribute name to its value + * - inputs: A list of input tensors + * + * @doc {heading: 'Models', subheading: 'Op Registry'} + */ + function registerOp(name, opFunc) { + const opMapper = { + tfOpName: name, + category: 'custom', + inputs: [], + attrs: [], + customExecutor: opFunc + }; + CUSTOM_OPS[name] = opMapper; + } + /** + * Retrieve the OpMapper object for the registered op. + * + * @param name The Tensorflow Op name. + * + * @doc {heading: 'Models', subheading: 'Op Registry'} + */ + function getRegisteredOp(name) { + return CUSTOM_OPS[name]; + } + /** + * Deregister the Op for graph model executor. + * + * @param name The Tensorflow Op name. + * + * @doc {heading: 'Models', subheading: 'Op Registry'} + */ + function deregisterOp(name) { + delete CUSTOM_OPS[name]; + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function getParamValue(paramName, node, tensorMap, context, resourceManager) { + const inputParam = node.inputParams[paramName]; + if (inputParam && inputParam.inputIndexStart !== undefined) { + const start = inputParam.inputIndexStart; + const end = inputParam.inputIndexEnd === 0 ? + undefined : + (inputParam.inputIndexEnd === undefined ? start + 1 : + inputParam.inputIndexEnd); + const shiftedStart = start < 0 ? node.inputNames.length + start : start; + if (inputParam.type === 'tensor') { + return getTensor(node.inputNames[shiftedStart], tensorMap, context, resourceManager); + } + if (inputParam.type === 'tensors') { + // TODO(mattSoulanille): This filters out NoOp nodes during execution, but + // these should really never be in the execution graph in the first place. + // They're necessary for ordering the graph, but should not be visible + // during execution. Perhaps have different sets of children, one for + // control dependencies and another for real dependencies. + const inputs = node.inputs.slice(start, end); + const inputNames = node.inputNames.slice(start, end) + .filter((_name, index) => { var _a; return ((_a = inputs[index]) === null || _a === void 0 ? void 0 : _a.op) !== 'NoOp'; }); + return inputNames.map(name => getTensor(name, tensorMap, context, resourceManager)); + } + const tensor = getTensor(node.inputNames[shiftedStart], tensorMap, context, resourceManager); + const data = tensor.dataSync(); + return inputParam.type === 'number' ? + data[0] : + toNestedArray(tensor.shape, data); + } + const attrParam = node.attrParams[paramName]; + return attrParam && attrParam.value; + } + /** + * Retrieve the tensor from tensorsMap based on input name. + * @param name Node input name + * @param tensorsMap Tensors map keyed by the node + * @param context contains tensors and information for running the current node. + * @param resourceManager Optional. Contains global resources of the model. + */ + function getTensor(name, tensorsMap, context, resourceManager) { + const [nodeName, index] = parseNodeName(name, context); + if (resourceManager != null) { + const tensor = resourceManager.getHashTableHandleByName(nodeName); + if (tensor != null) { + return tensor; + } + } + const contextId = context.currentContextIds.find(contextId => { + return !!tensorsMap[getNodeNameWithContextId(nodeName, contextId)]; + }); + return contextId !== undefined ? + tensorsMap[getNodeNameWithContextId(nodeName, contextId)][index] : + undefined; + } + /** + * Retrieve the tensors based on input name for current context. + * @param name Node input name + * @param tensorsMap Tensors map keyed by the node + */ + function getTensorsForCurrentContext(name, tensorsMap, context) { + return tensorsMap[getNodeNameWithContextId(name, context.currentContextId)]; + } + /** + * Returns the node name, outputName and index from the Node input name. + * @param inputName The input name of the node, in format of + * node_name:output_index, i.e. MatMul:0, if the output_index is not set, it is + * default to 0. + * If the input name contains output name i.e. StringSplit:indices:0, it will + * return ['StringSplit', 0, 'indices']. + */ + function getNodeNameAndIndex(inputName, context) { + const [nodeName, index, outputName] = parseNodeName(inputName, context); + return [ + getNodeNameWithContextId(nodeName, context && context.currentContextId), + index, outputName + ]; + } + function getNodeNameWithContextId(name, contextId) { + return !!contextId ? `${name}-${contextId}` : name; + } + function parseNodeName(name, context) { + if (name === '') { + return ['', 0, undefined]; + } + const isCacheEnabled = context != null && context.parseNodeNameCache != null; + if (isCacheEnabled) { + const cachedResult = context.parseNodeNameCache.get(name); + if (cachedResult != null) { + return cachedResult; + } + } + const parts = name.split(':'); + let result; + if (parts.length === 1) { + result = [name, 0, undefined]; + } + else { + const nodeName = parts[0]; + const outputName = parts.length === 3 ? parts[1] : undefined; + const index = Number(parts[parts.length - 1]); + result = [nodeName, index, outputName]; + } + if (isCacheEnabled) { + context.parseNodeNameCache.set(name, result); + } + return result; + } + function split$2(arr, size) { + const res = []; + for (let i = 0; i < arr.length; i += size) { + res.push(arr.slice(i, i + size)); + } + return res; + } + function getPadding(node, tensorMap, context) { + let pad = getParamValue('pad', node, tensorMap, context); + if (pad === 'explicit') { + // This is 1d array, we need to convert it to 2d array + pad = getParamValue('explicitPaddings', node, tensorMap, context); + const explicitPadding = [[0, 0], [0, 0], [0, 0], [0, 0]]; + for (let i = 0; i < 4; i++) { + explicitPadding[i][0] = pad[i * 2]; + explicitPadding[i][1] = pad[i * 2 + 1]; + } + return explicitPadding; + } + return pad; + } + /** + * Reuse the tensor if it is marked as keep, otherwise clone the tensor to + * avoid disposal. This is important for TensorArray and TensorList ops, since + * internally they use a tensor as the id for TensorArray and TensorList, and + * to simplify lookup, they also use Tensor.id as the key to the internal map. + * These id tensors have been marked as kept in the backend, we need avoid clone + * them in order to create new Tensor.id. + * @param tensor + */ + function cloneTensor(tensor) { + return tensor.kept ? tensor : clone(tensor); + } + + /** + * @license + * Copyright 2023 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const json$i = [ + { + 'tfOpName': 'Add', + 'category': 'arithmetic', + 'inputs': [ + { + 'start': 0, + 'name': 'a', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'b', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'AddV2', + 'category': 'arithmetic', + 'inputs': [ + { + 'start': 0, + 'name': 'a', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'b', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'AddN', + 'category': 'arithmetic', + 'inputs': [ + { + 'start': 0, + 'end': 0, + 'name': 'tensors', + 'type': 'tensors' + } + ] + }, + { + 'tfOpName': 'BiasAdd', + 'category': 'arithmetic', + 'inputs': [ + { + 'start': 0, + 'name': 'a', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'b', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + }, + { + 'tfName': 'data_format', + 'name': 'dataFormat', + 'type': 'string', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Sub', + 'category': 'arithmetic', + 'inputs': [ + { + 'start': 0, + 'name': 'a', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'b', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'RealDiv', + 'category': 'arithmetic', + 'inputs': [ + { + 'start': 0, + 'name': 'a', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'b', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Div', + 'category': 'arithmetic', + 'inputs': [ + { + 'start': 0, + 'name': 'a', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'b', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'DivNoNan', + 'category': 'arithmetic', + 'inputs': [ + { + 'start': 0, + 'name': 'a', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'b', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'FloorDiv', + 'category': 'arithmetic', + 'inputs': [ + { + 'start': 0, + 'name': 'a', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'b', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Mul', + 'category': 'arithmetic', + 'inputs': [ + { + 'start': 0, + 'name': 'a', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'b', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Maximum', + 'category': 'arithmetic', + 'inputs': [ + { + 'start': 0, + 'name': 'a', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'b', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Minimum', + 'category': 'arithmetic', + 'inputs': [ + { + 'start': 0, + 'name': 'a', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'b', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Pow', + 'category': 'arithmetic', + 'inputs': [ + { + 'start': 0, + 'name': 'a', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'b', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'SquaredDifference', + 'category': 'arithmetic', + 'inputs': [ + { + 'start': 0, + 'name': 'a', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'b', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Mod', + 'category': 'arithmetic', + 'inputs': [ + { + 'start': 0, + 'name': 'a', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'b', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'FloorMod', + 'category': 'arithmetic', + 'inputs': [ + { + 'start': 0, + 'name': 'a', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'b', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + } + ]; + + var arithmetic = /*#__PURE__*/Object.freeze({ + __proto__: null, + json: json$i + }); + + /** + * @license + * Copyright 2023 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const json$h = [ + { + 'tfOpName': 'Abs', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Acos', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Asin', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Atan', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Atan2', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'y', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Ceil', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'ClipByValue', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'clipValueMin', + 'type': 'number' + }, + { + 'start': 2, + 'name': 'clipValueMax', + 'type': 'number' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Complex', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'real', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'imag', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'ComplexAbs', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Cos', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Cosh', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Elu', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Exp', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Floor', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Log', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Imag', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + }, + { + 'tfName': 'Tout', + 'name': 'outputType', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Neg', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Real', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + }, + { + 'tfName': 'Tout', + 'name': 'outputType', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Prelu', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'alpha', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Relu', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Relu6', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Selu', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Sigmoid', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Sin', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Sinh', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Sqrt', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Rsqrt', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Square', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Tan', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Tanh', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Sign', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Round', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Expm1', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Log1p', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Reciprocal', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Softplus', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Asinh', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Acosh', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Atanh', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Erf', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'LeakyRelu', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'alpha', + 'name': 'alpha', + 'type': 'number', + 'defaultValue': 0.2 + }, + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'IsNan', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'IsFinite', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'IsInf', + 'category': 'basic_math', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + } + ]; + + var basicMath = /*#__PURE__*/Object.freeze({ + __proto__: null, + json: json$h + }); + + /** + * @license + * Copyright 2023 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const json$g = [ + { + 'tfOpName': 'EmptyTensorList', + 'category': 'control', + 'inputs': [ + { + 'start': 0, + 'name': 'elementShape', + 'type': 'shape' + }, + { + 'start': 1, + 'name': 'maxNumElements', + 'type': 'number' + } + ], + 'attrs': [ + { + 'tfName': 'element_dtype', + 'name': 'elementDType', + 'type': 'dtype' + } + ] + }, + { + 'tfOpName': 'LoopCond', + 'category': 'control', + 'inputs': [ + { + 'start': 0, + 'name': 'pred', + 'type': 'tensor' + } + ] + }, + { + 'tfOpName': 'Switch', + 'category': 'control', + 'inputs': [ + { + 'start': 0, + 'name': 'data', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'pred', + 'type': 'tensor' + } + ] + }, + { + 'tfOpName': 'Merge', + 'category': 'control', + 'inputs': [ + { + 'start': 0, + 'end': 0, + 'name': 'tensors', + 'type': 'tensors' + } + ] + }, + { + 'tfOpName': 'Enter', + 'category': 'control', + 'inputs': [ + { + 'start': 0, + 'name': 'tensor', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + }, + { + 'tfName': 'frame_name', + 'name': 'frameName', + 'type': 'string' + }, + { + 'tfName': 'is_constant', + 'name': 'isConstant', + 'type': 'bool' + } + ] + }, + { + 'tfOpName': 'Exit', + 'category': 'control', + 'inputs': [ + { + 'start': 0, + 'name': 'tensor', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'NextIteration', + 'category': 'control', + 'inputs': [ + { + 'start': 0, + 'name': 'tensor', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'TensorArrayV3', + 'category': 'control', + 'inputs': [ + { + 'start': 0, + 'name': 'size', + 'type': 'number' + } + ], + 'attrs': [ + { + 'tfName': 'dtype', + 'name': 'dtype', + 'type': 'dtype' + }, + { + 'tfName': 'element_shape', + 'name': 'elementShape', + 'type': 'shape' + }, + { + 'tfName': 'dynamic_size', + 'name': 'dynamicSize', + 'type': 'bool' + }, + { + 'tfName': 'clear_after_read', + 'name': 'clearAfterRead', + 'type': 'bool' + }, + { + 'tfName': 'identical_element_shapes', + 'name': 'identicalElementShapes', + 'type': 'bool' + }, + { + 'tfName': 'tensor_array_name', + 'name': 'name', + 'type': 'string' + } + ] + }, + { + 'tfOpName': 'TensorArrayWriteV3', + 'category': 'control', + 'inputs': [ + { + 'start': 0, + 'name': 'tensorArrayId', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'index', + 'type': 'number' + }, + { + 'start': 2, + 'name': 'tensor', + 'type': 'tensor' + }, + { + 'start': 3, + 'name': 'flowIn', + 'type': 'number' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'TensorArrayReadV3', + 'category': 'control', + 'inputs': [ + { + 'start': 0, + 'name': 'tensorArrayId', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'index', + 'type': 'number' + }, + { + 'start': 2, + 'name': 'flowIn', + 'type': 'number' + } + ], + 'attrs': [ + { + 'tfName': 'dtype', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'TensorArrayGatherV3', + 'category': 'control', + 'inputs': [ + { + 'start': 0, + 'name': 'tensorArrayId', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'indices', + 'type': 'number[]' + }, + { + 'start': 2, + 'name': 'flowIn', + 'type': 'number' + } + ], + 'attrs': [ + { + 'tfName': 'dtype', + 'name': 'dtype', + 'type': 'dtype' + }, + { + 'tfName': 'element_shape', + 'name': 'elementShape', + 'type': 'shape' + } + ] + }, + { + 'tfOpName': 'TensorArrayScatterV3', + 'category': 'control', + 'inputs': [ + { + 'start': 0, + 'name': 'tensorArrayId', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'indices', + 'type': 'number[]' + }, + { + 'start': 2, + 'name': 'tensor', + 'type': 'tensor' + }, + { + 'start': 3, + 'name': 'flowIn', + 'type': 'number' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype' + } + ] + }, + { + 'tfOpName': 'TensorArrayConcatV3', + 'category': 'control', + 'inputs': [ + { + 'start': 0, + 'name': 'tensorArrayId', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'flowIn', + 'type': 'number' + } + ], + 'attrs': [ + { + 'tfName': 'dtype', + 'name': 'dtype', + 'type': 'dtype' + }, + { + 'tfName': 'element_shape_except0', + 'name': 'elementShapeExcept0', + 'type': 'shape', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'TensorArraySplitV3', + 'category': 'control', + 'inputs': [ + { + 'start': 0, + 'name': 'tensorArrayId', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'tensor', + 'type': 'tensor' + }, + { + 'start': 2, + 'name': 'lengths', + 'type': 'number[]' + }, + { + 'start': 3, + 'name': 'flowIn', + 'type': 'number' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype' + } + ] + }, + { + 'tfOpName': 'TensorArraySizeV3', + 'category': 'control', + 'inputs': [ + { + 'start': 0, + 'name': 'tensorArrayId', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'flowIn', + 'type': 'number' + } + ] + }, + { + 'tfOpName': 'TensorArrayCloseV3', + 'category': 'control', + 'inputs': [ + { + 'start': 0, + 'name': 'tensorArrayId', + 'type': 'tensor' + } + ] + }, + { + 'tfOpName': 'StatelessIf', + 'category': 'control', + 'inputs': [ + { + 'start': 0, + 'name': 'cond', + 'type': 'tensor' + }, + { + 'start': 1, + 'end': 0, + 'name': 'args', + 'type': 'tensors' + } + ], + 'attrs': [ + { + 'tfName': 'then_branch', + 'name': 'thenBranch', + 'type': 'func' + }, + { + 'tfName': 'else_branch', + 'name': 'elseBranch', + 'type': 'func' + } + ] + }, + { + 'tfOpName': 'If', + 'category': 'control', + 'inputs': [ + { + 'start': 0, + 'name': 'cond', + 'type': 'tensor' + }, + { + 'start': 1, + 'end': 0, + 'name': 'args', + 'type': 'tensors' + } + ], + 'attrs': [ + { + 'tfName': 'then_branch', + 'name': 'thenBranch', + 'type': 'func' + }, + { + 'tfName': 'else_branch', + 'name': 'elseBranch', + 'type': 'func' + } + ] + }, + { + 'tfOpName': 'StatelessWhile', + 'category': 'control', + 'inputs': [ + { + 'start': 0, + 'end': 0, + 'name': 'args', + 'type': 'tensors' + } + ], + 'attrs': [ + { + 'tfName': 'cond', + 'name': 'cond', + 'type': 'func' + }, + { + 'tfName': 'body', + 'name': 'body', + 'type': 'func' + } + ] + }, + { + 'tfOpName': 'While', + 'category': 'control', + 'inputs': [ + { + 'start': 0, + 'end': 0, + 'name': 'args', + 'type': 'tensors' + } + ], + 'attrs': [ + { + 'tfName': 'cond', + 'name': 'cond', + 'type': 'func' + }, + { + 'tfName': 'body', + 'name': 'body', + 'type': 'func' + } + ] + }, + { + 'tfOpName': 'TensorListScatter', + 'category': 'control', + 'inputs': [ + { + 'start': 0, + 'name': 'tensor', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'indices', + 'type': 'number[]' + }, + { + 'start': 2, + 'name': 'elementShape', + 'type': 'shape' + } + ], + 'attrs': [ + { + 'tfName': 'element_dtype', + 'name': 'elementDType', + 'type': 'dtype' + } + ] + }, + { + 'tfOpName': 'TensorListScatterV2', + 'category': 'control', + 'inputs': [ + { + 'start': 0, + 'name': 'tensor', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'indices', + 'type': 'number[]' + }, + { + 'start': 2, + 'name': 'elementShape', + 'type': 'shape' + }, + { + 'start': 3, + 'name': 'numElements', + 'type': 'number' + } + ], + 'attrs': [ + { + 'tfName': 'element_dtype', + 'name': 'elementDType', + 'type': 'dtype' + } + ] + }, + { + 'tfOpName': 'TensorListGather', + 'category': 'control', + 'inputs': [ + { + 'start': 0, + 'name': 'tensorListId', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'indices', + 'type': 'number[]' + }, + { + 'start': 2, + 'name': 'elementShape', + 'type': 'shape' + } + ], + 'attrs': [ + { + 'tfName': 'element_dtype', + 'name': 'elementDType', + 'type': 'dtype' + } + ] + }, + { + 'tfOpName': 'TensorListGetItem', + 'category': 'control', + 'inputs': [ + { + 'start': 0, + 'name': 'tensorListId', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'index', + 'type': 'number' + }, + { + 'start': 2, + 'name': 'elementShape', + 'type': 'shape' + } + ], + 'attrs': [ + { + 'tfName': 'element_dtype', + 'name': 'elementDType', + 'type': 'dtype' + } + ] + }, + { + 'tfOpName': 'TensorListSetItem', + 'category': 'control', + 'inputs': [ + { + 'start': 0, + 'name': 'tensorListId', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'index', + 'type': 'number' + }, + { + 'start': 2, + 'name': 'tensor', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'element_dtype', + 'name': 'elementDType', + 'type': 'dtype' + } + ] + }, + { + 'tfOpName': 'TensorListReserve', + 'category': 'control', + 'inputs': [ + { + 'start': 0, + 'name': 'elementShape', + 'type': 'shape' + }, + { + 'start': 1, + 'name': 'numElements', + 'type': 'number' + } + ], + 'attrs': [ + { + 'tfName': 'element_dtype', + 'name': 'elementDType', + 'type': 'dtype' + } + ] + }, + { + 'tfOpName': 'TensorListFromTensor', + 'category': 'control', + 'inputs': [ + { + 'start': 0, + 'name': 'tensor', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'elementShape', + 'type': 'shape' + } + ], + 'attrs': [ + { + 'tfName': 'element_dtype', + 'name': 'elementDType', + 'type': 'dtype' + } + ] + }, + { + 'tfOpName': 'TensorListStack', + 'category': 'control', + 'inputs': [ + { + 'start': 0, + 'name': 'tensorListId', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'elementShape', + 'type': 'shape' + } + ], + 'attrs': [ + { + 'tfName': 'element_dtype', + 'name': 'elementDType', + 'type': 'dtype' + }, + { + 'tfName': 'num_elements', + 'name': 'numElements', + 'type': 'dtype' + } + ] + }, + { + 'tfOpName': 'TensorListSplit', + 'category': 'control', + 'inputs': [ + { + 'start': 0, + 'name': 'tensor', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'elementShape', + 'type': 'shape' + }, + { + 'start': 2, + 'name': 'lengths', + 'type': 'number[]' + } + ], + 'attrs': [ + { + 'tfName': 'element_dtype', + 'name': 'elementDType', + 'type': 'dtype' + } + ] + }, + { + 'tfOpName': 'TensorListConcat', + 'category': 'control', + 'inputs': [ + { + 'start': 0, + 'name': 'tensorListId', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'element_shape', + 'name': 'elementShape', + 'type': 'shape' + }, + { + 'tfName': 'element_dtype', + 'name': 'elementDType', + 'type': 'dtype' + } + ] + }, + { + 'tfOpName': 'TensorListConcatV2', + 'category': 'control', + 'inputs': [ + { + 'start': 0, + 'name': 'tensorListId', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'element_shape', + 'name': 'elementShape', + 'type': 'shape' + }, + { + 'tfName': 'element_dtype', + 'name': 'elementDType', + 'type': 'dtype' + } + ] + }, + { + 'tfOpName': 'TensorListPopBack', + 'category': 'control', + 'inputs': [ + { + 'start': 0, + 'name': 'tensorListId', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'elementShape', + 'type': 'shape' + } + ], + 'attrs': [ + { + 'tfName': 'element_dtype', + 'name': 'elementDType', + 'type': 'dtype' + } + ] + }, + { + 'tfOpName': 'TensorListPushBack', + 'category': 'control', + 'inputs': [ + { + 'start': 0, + 'name': 'tensorListId', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'tensor', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'element_dtype', + 'name': 'elementDType', + 'type': 'dtype' + } + ] + }, + { + 'tfOpName': 'TensorListLength', + 'category': 'control', + 'inputs': [ + { + 'start': 0, + 'name': 'tensorListId', + 'type': 'tensor' + } + ] + }, + { + 'tfOpName': 'TensorListResize', + 'category': 'control', + 'inputs': [ + { + 'start': 0, + 'name': 'tensorListId', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'size', + 'type': 'number' + } + ] + } + ]; + + var control = /*#__PURE__*/Object.freeze({ + __proto__: null, + json: json$g + }); + + /** + * @license + * Copyright 2023 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const json$f = [ + { + 'tfOpName': 'AvgPool', + 'category': 'convolution', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'strides', + 'name': 'strides', + 'type': 'number[]' + }, + { + 'tfName': 'padding', + 'name': 'pad', + 'type': 'string' + }, + { + 'tfName': 'data_format', + 'name': 'dataFormat', + 'type': 'string', + 'notSupported': true + }, + { + 'tfName': 'ksize', + 'name': 'kernelSize', + 'type': 'number[]' + }, + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'MaxPool', + 'category': 'convolution', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'strides', + 'name': 'strides', + 'type': 'number[]' + }, + { + 'tfName': 'padding', + 'name': 'pad', + 'type': 'string' + }, + { + 'tfName': 'data_format', + 'name': 'dataFormat', + 'type': 'string', + 'notSupported': true + }, + { + 'tfName': 'ksize', + 'name': 'kernelSize', + 'type': 'number[]' + }, + { + 'tfName': 'explicit_paddings', + 'name': 'explicitPaddings', + 'type': 'number[]', + 'defaultValue': [], + 'notSupported': true + }, + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'MaxPoolWithArgmax', + 'category': 'convolution', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'strides', + 'name': 'strides', + 'type': 'number[]' + }, + { + 'tfName': 'padding', + 'name': 'pad', + 'type': 'string' + }, + { + 'tfName': 'ksize', + 'name': 'kernelSize', + 'type': 'number[]' + }, + { + 'tfName': 'include_batch_in_index', + 'name': 'includeBatchInIndex', + 'type': 'bool' + }, + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'AvgPool3D', + 'category': 'convolution', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'strides', + 'name': 'strides', + 'type': 'number[]' + }, + { + 'tfName': 'padding', + 'name': 'pad', + 'type': 'string' + }, + { + 'tfName': 'data_format', + 'name': 'dataFormat', + 'type': 'string', + 'notSupported': true + }, + { + 'tfName': 'ksize', + 'name': 'kernelSize', + 'type': 'number[]' + }, + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'MaxPool3D', + 'category': 'convolution', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'strides', + 'name': 'strides', + 'type': 'number[]' + }, + { + 'tfName': 'padding', + 'name': 'pad', + 'type': 'string' + }, + { + 'tfName': 'data_format', + 'name': 'dataFormat', + 'type': 'string', + 'notSupported': true + }, + { + 'tfName': 'ksize', + 'name': 'kernelSize', + 'type': 'number[]' + }, + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Conv1D', + 'category': 'convolution', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'filter', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'stride', + 'name': 'stride', + 'type': 'number' + }, + { + 'tfName': 'padding', + 'name': 'pad', + 'type': 'string' + }, + { + 'tfName': 'data_format', + 'name': 'dataFormat', + 'type': 'string', + 'defaultValue': 'NWC' + }, + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + }, + { + 'tfName': 'dilation', + 'name': 'dilation', + 'type': 'number', + 'defaultValue': 1 + } + ] + }, + { + 'tfOpName': 'Conv2D', + 'category': 'convolution', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'filter', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + }, + { + 'tfName': 'strides', + 'name': 'strides', + 'type': 'number[]' + }, + { + 'tfName': 'padding', + 'name': 'pad', + 'type': 'string' + }, + { + 'tfName': 'useCudnnOnGpu', + 'name': 'useCudnnOnGpu', + 'type': 'bool' + }, + { + 'tfName': 'data_format', + 'name': 'dataFormat', + 'type': 'string', + 'defaultValue': 'NHWC' + }, + { + 'tfName': 'explicit_paddings', + 'name': 'explicitPaddings', + 'type': 'number[]', + 'defaultValue': [] + }, + { + 'tfName': 'dilations', + 'name': 'dilations', + 'type': 'number[]' + } + ] + }, + { + 'tfOpName': '_FusedConv2D', + 'category': 'convolution', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'filter', + 'type': 'tensor' + }, + { + 'start': 2, + 'end': 0, + 'name': 'args', + 'type': 'tensors' + } + ], + 'attrs': [ + { + 'tfName': 'num_args', + 'name': 'numArgs', + 'type': 'number' + }, + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + }, + { + 'tfName': 'strides', + 'name': 'strides', + 'type': 'number[]' + }, + { + 'tfName': 'padding', + 'name': 'pad', + 'type': 'string' + }, + { + 'tfName': 'explicit_paddings', + 'name': 'explicitPaddings', + 'type': 'number[]', + 'defaultValue': [] + }, + { + 'tfName': 'use_cudnn_on_gpu', + 'name': 'useCudnnOnGpu', + 'type': 'bool', + 'defaultValue': true + }, + { + 'tfName': 'data_format', + 'name': 'dataFormat', + 'type': 'string', + 'defaultValue': 'NHWC' + }, + { + 'tfName': 'dilations', + 'name': 'dilations', + 'type': 'number[]', + 'defaultValue': [ + 1, + 1, + 1, + 1 + ] + }, + { + 'tfName': 'fused_ops', + 'name': 'fusedOps', + 'type': 'string[]', + 'defaultValue': [] + }, + { + 'tfName': 'epsilon', + 'name': 'epsilon', + 'type': 'number', + 'defaultValue': 0.0001 + }, + { + 'tfName': 'leakyrelu_alpha', + 'name': 'leakyreluAlpha', + 'type': 'number', + 'defaultValue': 0.2 + } + ] + }, + { + 'tfOpName': 'Conv2DBackpropInput', + 'category': 'convolution', + 'inputs': [ + { + 'start': 2, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'filter', + 'type': 'tensor' + }, + { + 'start': 0, + 'name': 'outputShape', + 'type': 'number[]' + } + ], + 'attrs': [ + { + 'tfName': 'strides', + 'name': 'strides', + 'type': 'number[]' + }, + { + 'tfName': 'padding', + 'name': 'pad', + 'type': 'string' + }, + { + 'tfName': 'data_format', + 'name': 'dataFormat', + 'type': 'string', + 'notSupported': true + }, + { + 'tfName': 'explicit_paddings', + 'name': 'explicitPaddings', + 'type': 'number[]', + 'defaultValue': [] + }, + { + 'tfName': 'dilations', + 'name': 'dilations', + 'type': 'number[]', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'DepthwiseConv2d', + 'category': 'convolution', + 'inputs': [ + { + 'start': 0, + 'name': 'input', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'filter', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'strides', + 'name': 'strides', + 'type': 'number[]' + }, + { + 'tfName': 'padding', + 'name': 'pad', + 'type': 'string' + }, + { + 'tfName': 'data_format', + 'name': 'dataFormat', + 'type': 'string', + 'defaultValue': 'NHWC' + }, + { + 'tfName': 'explicit_paddings', + 'name': 'explicitPaddings', + 'type': 'number[]', + 'defaultValue': [] + }, + { + 'tfName': 'dilations', + 'name': 'dilations', + 'type': 'number[]' + } + ] + }, + { + 'tfOpName': 'DepthwiseConv2dNative', + 'category': 'convolution', + 'inputs': [ + { + 'start': 0, + 'name': 'input', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'filter', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'strides', + 'name': 'strides', + 'type': 'number[]' + }, + { + 'tfName': 'padding', + 'name': 'pad', + 'type': 'string' + }, + { + 'tfName': 'data_format', + 'name': 'dataFormat', + 'type': 'string', + 'defaultValue': 'NHWC' + }, + { + 'tfName': 'explicit_paddings', + 'name': 'explicitPaddings', + 'type': 'number[]', + 'defaultValue': [] + }, + { + 'tfName': 'dilations', + 'name': 'dilations', + 'type': 'number[]' + } + ] + }, + { + 'tfOpName': 'FusedDepthwiseConv2dNative', + 'category': 'convolution', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'filter', + 'type': 'tensor' + }, + { + 'start': 2, + 'end': 0, + 'name': 'args', + 'type': 'tensors' + } + ], + 'attrs': [ + { + 'tfName': 'num_args', + 'name': 'numArgs', + 'type': 'number' + }, + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + }, + { + 'tfName': 'strides', + 'name': 'strides', + 'type': 'number[]' + }, + { + 'tfName': 'padding', + 'name': 'pad', + 'type': 'string' + }, + { + 'tfName': 'data_format', + 'name': 'dataFormat', + 'type': 'string', + 'defaultValue': 'NHWC' + }, + { + 'tfName': 'dilations', + 'name': 'dilations', + 'type': 'number[]', + 'defaultValue': [ + 1, + 1, + 1, + 1 + ] + }, + { + 'tfName': 'fused_ops', + 'name': 'fusedOps', + 'type': 'string[]', + 'defaultValue': [] + }, + { + 'tfName': 'explicit_paddings', + 'name': 'explicitPaddings', + 'type': 'number[]', + 'defaultValue': [] + } + ] + }, + { + 'tfOpName': 'Conv3D', + 'category': 'convolution', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'filter', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'strides', + 'name': 'strides', + 'type': 'number[]' + }, + { + 'tfName': 'padding', + 'name': 'pad', + 'type': 'string' + }, + { + 'tfName': 'data_format', + 'name': 'dataFormat', + 'type': 'string', + 'defaultValue': 'NHWC' + }, + { + 'tfName': 'dilations', + 'name': 'dilations', + 'type': 'number[]' + } + ] + }, + { + 'tfOpName': 'Dilation2D', + 'category': 'convolution', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'filter', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'strides', + 'name': 'strides', + 'type': 'number[]' + }, + { + 'tfName': 'rates', + 'name': 'dilations', + 'type': 'number[]' + }, + { + 'tfName': 'padding', + 'name': 'pad', + 'type': 'string' + } + ] + } + ]; + + var convolution = /*#__PURE__*/Object.freeze({ + __proto__: null, + json: json$f + }); + + /** + * @license + * Copyright 2023 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const json$e = [ + { + 'tfOpName': 'Fill', + 'category': 'creation', + 'inputs': [ + { + 'start': 0, + 'name': 'shape', + 'type': 'number[]' + }, + { + 'start': 1, + 'name': 'value', + 'type': 'number' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype' + } + ] + }, + { + 'tfOpName': 'LinSpace', + 'category': 'creation', + 'inputs': [ + { + 'start': 0, + 'name': 'start', + 'type': 'number' + }, + { + 'start': 1, + 'name': 'stop', + 'type': 'number' + }, + { + 'start': 2, + 'name': 'num', + 'type': 'number' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'OneHot', + 'category': 'creation', + 'inputs': [ + { + 'start': 0, + 'name': 'indices', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'depth', + 'type': 'number' + }, + { + 'start': 2, + 'name': 'onValue', + 'type': 'number', + 'defaultValue': 1 + }, + { + 'start': 3, + 'name': 'offValue', + 'type': 'number', + 'defaultValue': 0 + } + ], + 'attrs': [ + { + 'tfName': 'axis', + 'name': 'axis', + 'type': 'number', + 'notSupported': true + }, + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype' + } + ] + }, + { + 'tfOpName': 'Ones', + 'category': 'creation', + 'inputs': [ + { + 'start': 0, + 'name': 'shape', + 'type': 'number[]' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype' + } + ] + }, + { + 'tfOpName': 'OnesLike', + 'category': 'creation', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'dtype', + 'name': 'dtype', + 'type': 'dtype' + } + ] + }, + { + 'tfOpName': 'RandomStandardNormal', + 'category': 'creation', + 'inputs': [ + { + 'start': 0, + 'name': 'shape', + 'type': 'number[]' + } + ], + 'attrs': [ + { + 'tfName': 'seed', + 'name': 'seed', + 'type': 'number', + 'defaultValue': 0 + }, + { + 'tfName': 'seed2', + 'name': 'seed2', + 'type': 'number', + 'defaultValue': 0, + 'notSupported': true + }, + { + 'tfName': 'dtype', + 'name': 'dtype', + 'type': 'dtype' + }, + { + 'tfName': 'T', + 'name': 'T', + 'type': 'number', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'RandomUniform', + 'category': 'creation', + 'inputs': [ + { + 'start': 0, + 'name': 'shape', + 'type': 'number[]' + } + ], + 'attrs': [ + { + 'tfName': 'minval', + 'name': 'minval', + 'type': 'number', + 'defaultValue': 0 + }, + { + 'tfName': 'maxval', + 'name': 'maxval', + 'type': 'number', + 'defaultValue': 1 + }, + { + 'tfName': 'dtype', + 'name': 'dtype', + 'type': 'dtype' + }, + { + 'tfName': 'seed', + 'name': 'seed', + 'type': 'number', + 'defaultValue': 0 + }, + { + 'tfName': 'seed2', + 'name': 'seed2', + 'type': 'number', + 'defaultValue': 0, + 'notSupported': true + }, + { + 'tfName': 'T', + 'name': 'T', + 'type': 'number', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'RandomUniformInt', + 'category': 'creation', + 'inputs': [ + { + 'start': 0, + 'name': 'shape', + 'type': 'number[]' + } + ], + 'attrs': [ + { + 'tfName': 'minval', + 'name': 'minval', + 'type': 'number' + }, + { + 'tfName': 'maxval', + 'name': 'maxval', + 'type': 'number' + }, + { + 'tfName': 'seed', + 'name': 'seed', + 'type': 'number', + 'defaultValue': 0 + }, + { + 'tfName': 'seed2', + 'name': 'seed2', + 'type': 'number', + 'defaultValue': 0, + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Range', + 'category': 'creation', + 'inputs': [ + { + 'start': 0, + 'name': 'start', + 'type': 'number' + }, + { + 'start': 1, + 'name': 'stop', + 'type': 'number' + }, + { + 'start': 2, + 'name': 'step', + 'type': 'number', + 'defaultValue': 0 + } + ], + 'attrs': [ + { + 'tfName': 'Tidx', + 'name': 'dtype', + 'type': 'dtype' + } + ] + }, + { + 'tfOpName': 'TruncatedNormal', + 'category': 'creation', + 'inputs': [ + { + 'start': 0, + 'name': 'shape', + 'type': 'number[]' + } + ], + 'attrs': [ + { + 'tfName': 'means', + 'name': 'mean', + 'type': 'number', + 'defaultValue': 0 + }, + { + 'tfName': 'stddev', + 'name': 'stdDev', + 'type': 'number', + 'defaultValue': 1 + }, + { + 'tfName': 'seed', + 'name': 'seed', + 'type': 'number' + }, + { + 'tfName': 'seed2', + 'name': 'seed2', + 'type': 'number', + 'defaultValue': 0, + 'notSupported': true + }, + { + 'tfName': 'dtype', + 'name': 'dtype', + 'type': 'dtype' + }, + { + 'tfName': 'T', + 'name': 'T', + 'type': 'number', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Zeros', + 'category': 'creation', + 'inputs': [ + { + 'start': 0, + 'name': 'shape', + 'type': 'number[]' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype' + } + ] + }, + { + 'tfOpName': 'ZerosLike', + 'category': 'creation', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype' + } + ] + }, + { + 'tfOpName': 'Multinomial', + 'category': 'creation', + 'inputs': [ + { + 'start': 0, + 'name': 'logits', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'numSamples', + 'type': 'number' + } + ], + 'attrs': [ + { + 'tfName': 'seed', + 'name': 'seed', + 'type': 'number' + }, + { + 'tfName': 'seed2', + 'name': 'seed2', + 'type': 'number' + }, + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype' + }, + { + 'tfName': 'output_dtype', + 'name': 'output_dtype', + 'type': 'dtype' + } + ] + } + ]; + + var creation = /*#__PURE__*/Object.freeze({ + __proto__: null, + json: json$e + }); + + /** + * @license + * Copyright 2023 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const json$d = [ + { + 'tfOpName': 'NonMaxSuppressionV2', + 'category': 'dynamic', + 'inputs': [ + { + 'start': 0, + 'name': 'boxes', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'scores', + 'type': 'tensor' + }, + { + 'start': 2, + 'name': 'maxOutputSize', + 'type': 'number' + }, + { + 'start': 3, + 'name': 'iouThreshold', + 'type': 'number' + } + ] + }, + { + 'tfOpName': 'NonMaxSuppressionV3', + 'category': 'dynamic', + 'inputs': [ + { + 'start': 0, + 'name': 'boxes', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'scores', + 'type': 'tensor' + }, + { + 'start': 2, + 'name': 'maxOutputSize', + 'type': 'number' + }, + { + 'start': 3, + 'name': 'iouThreshold', + 'type': 'number' + }, + { + 'start': 4, + 'name': 'scoreThreshold', + 'type': 'number' + } + ] + }, + { + 'tfOpName': 'NonMaxSuppressionV4', + 'category': 'dynamic', + 'inputs': [ + { + 'start': 0, + 'name': 'boxes', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'scores', + 'type': 'tensor' + }, + { + 'start': 2, + 'name': 'maxOutputSize', + 'type': 'number' + }, + { + 'start': 3, + 'name': 'iouThreshold', + 'type': 'number' + }, + { + 'start': 4, + 'name': 'scoreThreshold', + 'type': 'number' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + }, + { + 'tfName': 'T_threshold', + 'name': 'threshold', + 'type': 'dtype', + 'notSupported': true + }, + { + 'tfName': 'pad_to_max_output_size', + 'name': 'padToMaxOutputSize', + 'type': 'bool' + } + ] + }, + { + 'tfOpName': 'NonMaxSuppressionV5', + 'category': 'dynamic', + 'inputs': [ + { + 'start': 0, + 'name': 'boxes', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'scores', + 'type': 'tensor' + }, + { + 'start': 2, + 'name': 'maxOutputSize', + 'type': 'number' + }, + { + 'start': 3, + 'name': 'iouThreshold', + 'type': 'number' + }, + { + 'start': 4, + 'name': 'scoreThreshold', + 'type': 'number' + }, + { + 'start': 5, + 'name': 'softNmsSigma', + 'type': 'number' + } + ] + }, + { + 'tfOpName': 'Where', + 'category': 'dynamic', + 'inputs': [ + { + 'start': 0, + 'name': 'condition', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'ListDiff', + 'category': 'dynamic', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'y', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + } + ]; + + var dynamic = /*#__PURE__*/Object.freeze({ + __proto__: null, + json: json$d + }); + + /** + * @license + * Copyright 2023 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const json$c = [ + { + 'tfOpName': 'LowerBound', + 'category': 'evaluation', + 'inputs': [ + { + 'start': 0, + 'name': 'sortedSequence', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'values', + 'type': 'tensor' + } + ] + }, + { + 'tfOpName': 'TopKV2', + 'category': 'evaluation', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'k', + 'type': 'number' + } + ], + 'attrs': [ + { + 'tfName': 'sorted', + 'name': 'sorted', + 'type': 'bool' + } + ] + }, + { + 'tfOpName': 'UpperBound', + 'category': 'evaluation', + 'inputs': [ + { + 'start': 0, + 'name': 'sortedSequence', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'values', + 'type': 'tensor' + } + ] + }, + { + 'tfOpName': 'Unique', + 'category': 'evaluation', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ] + }, + { + 'tfOpName': 'UniqueV2', + 'category': 'evaluation', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'axis', + 'type': 'number' + } + ] + } + ]; + + var evaluation = /*#__PURE__*/Object.freeze({ + __proto__: null, + json: json$c + }); + + /** + * @license + * Copyright 2023 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const json$b = [ + { + 'tfOpName': 'PlaceholderWithDefault', + 'category': 'graph', + 'inputs': [ + { + 'start': 0, + 'name': 'default', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'shape', + 'name': 'shape', + 'type': 'shape' + }, + { + 'tfName': 'dtype', + 'name': 'dtype', + 'type': 'dtype' + } + ] + }, + { + 'tfOpName': 'Placeholder', + 'category': 'graph', + 'attrs': [ + { + 'tfName': 'shape', + 'name': 'shape', + 'type': 'shape' + }, + { + 'tfName': 'dtype', + 'name': 'dtype', + 'type': 'dtype' + } + ] + }, + { + 'tfOpName': 'Const', + 'category': 'graph' + }, + { + 'tfOpName': 'Identity', + 'category': 'graph', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ] + }, + { + 'tfOpName': 'IdentityN', + 'category': 'graph', + 'inputs': [ + { + 'start': 0, + 'end': 0, + 'name': 'x', + 'type': 'tensors' + } + ] + }, + { + 'tfOpName': 'Snapshot', + 'category': 'graph', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ] + }, + { + 'tfOpName': 'Rank', + 'category': 'graph', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ] + }, + { + 'tfOpName': 'Size', + 'category': 'graph', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ] + }, + { + 'tfOpName': 'Shape', + 'category': 'graph', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ] + }, + { + 'tfOpName': 'ShapeN', + 'category': 'graph', + 'inputs': [ + { + 'start': 0, + 'end': 0, + 'name': 'x', + 'type': 'tensors' + } + ] + }, + { + 'tfOpName': 'Print', + 'category': 'graph', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'data', + 'type': 'tensors' + } + ], + 'attrs': [ + { + 'tfName': 'message', + 'name': 'message', + 'type': 'string' + }, + { + 'tfName': 'first_n', + 'name': 'firstN', + 'type': 'number', + 'notSupported': true + }, + { + 'tfName': 'summarize', + 'name': 'summarize', + 'type': 'number', + 'defaultValue': 3 + } + ] + }, + { + 'tfOpName': 'NoOp', + 'category': 'graph', + 'inputs': [] + }, + { + 'tfOpName': 'StopGradient', + 'category': 'graph', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ] + }, + { + 'tfOpName': 'FakeQuantWithMinMaxVars', + 'category': 'graph', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'min', + 'name': 'min', + 'type': 'number' + }, + { + 'tfName': 'max', + 'name': 'max', + 'type': 'number' + } + ] + } + ]; + + var graph = /*#__PURE__*/Object.freeze({ + __proto__: null, + json: json$b + }); + + /** + * @license + * Copyright 2023 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const json$a = [ + { + 'tfOpName': 'HashTable', + 'category': 'hash_table', + 'inputs': [], + 'attrs': [ + { + 'tfName': 'shared_name', + 'name': 'sharedName', + 'type': 'string' + }, + { + 'tfName': 'use_node_name_sharing', + 'name': 'useNodeNameSharing', + 'type': 'bool' + }, + { + 'tfName': 'key_dtype', + 'name': 'keyDType', + 'type': 'dtype' + }, + { + 'tfName': 'value_dtype', + 'name': 'valueDType', + 'type': 'dtype' + } + ] + }, + { + 'tfOpName': 'HashTableV2', + 'category': 'hash_table', + 'inputs': [], + 'attrs': [ + { + 'tfName': 'shared_name', + 'name': 'sharedName', + 'type': 'string' + }, + { + 'tfName': 'use_node_name_sharing', + 'name': 'useNodeNameSharing', + 'type': 'bool' + }, + { + 'tfName': 'key_dtype', + 'name': 'keyDType', + 'type': 'dtype' + }, + { + 'tfName': 'value_dtype', + 'name': 'valueDType', + 'type': 'dtype' + } + ] + }, + { + 'tfOpName': 'LookupTableImport', + 'category': 'hash_table', + 'inputs': [ + { + 'start': 0, + 'name': 'tableHandle', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'keys', + 'type': 'tensor' + }, + { + 'start': 2, + 'name': 'values', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'Tin', + 'name': 'tIn', + 'type': 'dtype', + 'notSupported': true + }, + { + 'tfName': 'Tout', + 'name': 'tOut', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'LookupTableImportV2', + 'category': 'hash_table', + 'inputs': [ + { + 'start': 0, + 'name': 'tableHandle', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'keys', + 'type': 'tensor' + }, + { + 'start': 2, + 'name': 'values', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'Tin', + 'name': 'tIn', + 'type': 'dtype', + 'notSupported': true + }, + { + 'tfName': 'Tout', + 'name': 'tOut', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'LookupTableFind', + 'category': 'hash_table', + 'inputs': [ + { + 'start': 0, + 'name': 'tableHandle', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'keys', + 'type': 'tensor' + }, + { + 'start': 2, + 'name': 'defaultValue', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'Tin', + 'name': 'tIn', + 'type': 'dtype', + 'notSupported': true + }, + { + 'tfName': 'Tout', + 'name': 'tOut', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'LookupTableFindV2', + 'category': 'hash_table', + 'inputs': [ + { + 'start': 0, + 'name': 'tableHandle', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'keys', + 'type': 'tensor' + }, + { + 'start': 2, + 'name': 'defaultValue', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'Tin', + 'name': 'tIn', + 'type': 'dtype', + 'notSupported': true + }, + { + 'tfName': 'Tout', + 'name': 'tOut', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'LookupTableSize', + 'category': 'hash_table', + 'inputs': [ + { + 'start': 0, + 'name': 'tableHandle', + 'type': 'tensor' + } + ] + }, + { + 'tfOpName': 'LookupTableSizeV2', + 'category': 'hash_table', + 'inputs': [ + { + 'start': 0, + 'name': 'tableHandle', + 'type': 'tensor' + } + ] + }, + { + 'tfOpName': 'InitializeTable', + 'category': 'hash_table', + 'inputs': [ + { + 'start': 0, + 'name': 'tableHandle', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'keys', + 'type': 'tensor' + }, + { + 'start': 2, + 'name': 'values', + 'type': 'tensor' + } + ] + }, + { + 'tfOpName': 'InitializeTableV2', + 'category': 'hash_table', + 'inputs': [ + { + 'start': 0, + 'name': 'tableHandle', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'keys', + 'type': 'tensor' + }, + { + 'start': 2, + 'name': 'values', + 'type': 'tensor' + } + ] + } + ]; + + var hashTable = /*#__PURE__*/Object.freeze({ + __proto__: null, + json: json$a + }); + + /** + * @license + * Copyright 2023 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const json$9 = [ + { + 'tfOpName': 'ResizeBilinear', + 'category': 'image', + 'inputs': [ + { + 'start': 0, + 'name': 'images', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'size', + 'type': 'number[]' + } + ], + 'attrs': [ + { + 'tfName': 'align_corners', + 'name': 'alignCorners', + 'type': 'bool' + }, + { + 'tfName': 'half_pixel_centers', + 'name': 'halfPixelCenters', + 'type': 'bool' + }, + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'ResizeNearestNeighbor', + 'category': 'image', + 'inputs': [ + { + 'start': 0, + 'name': 'images', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'size', + 'type': 'number[]' + } + ], + 'attrs': [ + { + 'tfName': 'align_corners', + 'name': 'alignCorners', + 'type': 'bool' + }, + { + 'tfName': 'half_pixel_centers', + 'name': 'halfPixelCenters', + 'type': 'bool' + }, + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'CropAndResize', + 'category': 'image', + 'inputs': [ + { + 'start': 0, + 'name': 'image', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'boxes', + 'type': 'tensor' + }, + { + 'start': 2, + 'name': 'boxInd', + 'type': 'tensor' + }, + { + 'start': 3, + 'name': 'cropSize', + 'type': 'number[]' + } + ], + 'attrs': [ + { + 'tfName': 'method', + 'name': 'method', + 'type': 'string' + }, + { + 'tfName': 'extrapolation_value', + 'name': 'extrapolationValue', + 'type': 'number' + } + ] + }, + { + 'tfOpName': 'ImageProjectiveTransformV3', + 'category': 'image', + 'inputs': [ + { + 'start': 0, + 'name': 'images', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'transforms', + 'type': 'tensor' + }, + { + 'start': 2, + 'name': 'outputShape', + 'type': 'number[]' + }, + { + 'start': 3, + 'name': 'fillValue', + 'type': 'number' + } + ], + 'attrs': [ + { + 'tfName': 'interpolation', + 'name': 'interpolation', + 'type': 'string' + }, + { + 'tfName': 'fill_mode', + 'name': 'fillMode', + 'type': 'string' + } + ] + } + ]; + + var image = /*#__PURE__*/Object.freeze({ + __proto__: null, + json: json$9 + }); + + /** + * @license + * Copyright 2023 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const json$8 = [ + { + 'tfOpName': 'Equal', + 'category': 'logical', + 'inputs': [ + { + 'start': 0, + 'name': 'a', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'b', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'NotEqual', + 'category': 'logical', + 'inputs': [ + { + 'start': 0, + 'name': 'a', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'b', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Greater', + 'category': 'logical', + 'inputs': [ + { + 'start': 0, + 'name': 'a', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'b', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'GreaterEqual', + 'category': 'logical', + 'inputs': [ + { + 'start': 0, + 'name': 'a', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'b', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Less', + 'category': 'logical', + 'inputs': [ + { + 'start': 0, + 'name': 'a', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'b', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'LessEqual', + 'category': 'logical', + 'inputs': [ + { + 'start': 0, + 'name': 'a', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'b', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'LogicalAnd', + 'category': 'logical', + 'inputs': [ + { + 'start': 0, + 'name': 'a', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'b', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'LogicalNot', + 'category': 'logical', + 'inputs': [ + { + 'start': 0, + 'name': 'a', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'LogicalOr', + 'category': 'logical', + 'inputs': [ + { + 'start': 0, + 'name': 'a', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'b', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Select', + 'category': 'logical', + 'inputs': [ + { + 'start': 0, + 'name': 'condition', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'a', + 'type': 'tensor' + }, + { + 'start': 2, + 'name': 'b', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'SelectV2', + 'category': 'logical', + 'inputs': [ + { + 'start': 0, + 'name': 'condition', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'a', + 'type': 'tensor' + }, + { + 'start': 2, + 'name': 'b', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'BitwiseAnd', + 'category': 'logical', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'y', + 'type': 'tensor' + } + ] + } + ]; + + var logical = /*#__PURE__*/Object.freeze({ + __proto__: null, + json: json$8 + }); + + /** + * @license + * Copyright 2023 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const json$7 = [ + { + 'tfOpName': '_FusedMatMul', + 'category': 'matrices', + 'inputs': [ + { + 'start': 0, + 'name': 'a', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'b', + 'type': 'tensor' + }, + { + 'start': 2, + 'end': 0, + 'name': 'args', + 'type': 'tensors' + } + ], + 'attrs': [ + { + 'tfName': 'num_args', + 'name': 'numArgs', + 'type': 'number' + }, + { + 'tfName': 'fused_ops', + 'name': 'fusedOps', + 'type': 'string[]', + 'defaultValue': [] + }, + { + 'tfName': 'epsilon', + 'name': 'epsilon', + 'type': 'number', + 'defaultValue': 0.0001 + }, + { + 'tfName': 'transpose_a', + 'name': 'transposeA', + 'type': 'bool', + 'defaultValue': false + }, + { + 'tfName': 'transpose_b', + 'name': 'transposeB', + 'type': 'bool', + 'defaultValue': false + }, + { + 'tfName': 'leakyrelu_alpha', + 'name': 'leakyreluAlpha', + 'type': 'number', + 'defaultValue': 0.2 + }, + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'MatMul', + 'category': 'matrices', + 'inputs': [ + { + 'start': 0, + 'name': 'a', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'b', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'transpose_a', + 'name': 'transposeA', + 'type': 'bool', + 'defaultValue': false + }, + { + 'tfName': 'transpose_b', + 'name': 'transposeB', + 'type': 'bool', + 'defaultValue': false + }, + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'BatchMatMul', + 'category': 'matrices', + 'inputs': [ + { + 'start': 0, + 'name': 'a', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'b', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'adj_x', + 'name': 'transposeA', + 'type': 'bool', + 'defaultValue': false + }, + { + 'tfName': 'adj_y', + 'name': 'transposeB', + 'type': 'bool', + 'defaultValue': false + }, + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'BatchMatMulV2', + 'category': 'matrices', + 'inputs': [ + { + 'start': 0, + 'name': 'a', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'b', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'adj_x', + 'name': 'transposeA', + 'type': 'bool', + 'defaultValue': false + }, + { + 'tfName': 'adj_y', + 'name': 'transposeB', + 'type': 'bool', + 'defaultValue': false + }, + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Transpose', + 'category': 'matrices', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'perm', + 'type': 'number[]' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Einsum', + 'category': 'matrices', + 'inputs': [ + { + 'start': 0, + 'end': 0, + 'name': 'tensors', + 'type': 'tensors' + } + ], + 'attrs': [ + { + 'tfName': 'equation', + 'name': 'equation', + 'type': 'string' + }, + { + 'tfName': 'N', + 'name': 'n', + 'type': 'number', + 'defaultValue': 2 + }, + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype' + } + ] + }, + { + 'tfOpName': 'MatrixBandPart', + 'category': 'matrices', + 'inputs': [ + { + 'start': 0, + 'name': 'a', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'numLower', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'numUpper', + 'type': 'tensor' + } + ] + } + ]; + + var matrices = /*#__PURE__*/Object.freeze({ + __proto__: null, + json: json$7 + }); + + /** + * @license + * Copyright 2023 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const json$6 = [ + { + 'tfOpName': 'EuclideanNorm', + 'category': 'normalization', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'axis', + 'type': 'number[]' + } + ], + 'attrs': [ + { + 'tfName': 'keep_dims', + 'name': 'keepDims', + 'type': 'bool', + 'defaultValue': false + } + ] + }, + { + 'tfOpName': 'FusedBatchNorm', + 'category': 'normalization', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'scale', + 'type': 'tensor' + }, + { + 'start': 2, + 'name': 'offset', + 'type': 'tensor' + }, + { + 'start': 3, + 'name': 'mean', + 'type': 'tensor' + }, + { + 'start': 4, + 'name': 'variance', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'epsilon', + 'name': 'epsilon', + 'type': 'number', + 'defaultValue': 0.001 + }, + { + 'tfName': 'data_format', + 'name': 'dataFormat', + 'type': 'string', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'FusedBatchNormV2', + 'category': 'normalization', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'scale', + 'type': 'tensor' + }, + { + 'start': 2, + 'name': 'offset', + 'type': 'tensor' + }, + { + 'start': 3, + 'name': 'mean', + 'type': 'tensor' + }, + { + 'start': 4, + 'name': 'variance', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'epsilon', + 'name': 'epsilon', + 'type': 'number', + 'defaultValue': 0.001 + }, + { + 'tfName': 'data_format', + 'name': 'dataFormat', + 'type': 'string', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'FusedBatchNormV3', + 'category': 'normalization', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'scale', + 'type': 'tensor' + }, + { + 'start': 2, + 'name': 'offset', + 'type': 'tensor' + }, + { + 'start': 3, + 'name': 'mean', + 'type': 'tensor' + }, + { + 'start': 4, + 'name': 'variance', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'epsilon', + 'name': 'epsilon', + 'type': 'number', + 'defaultValue': 0.001 + }, + { + 'tfName': 'data_format', + 'name': 'dataFormat', + 'type': 'string', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'LRN', + 'category': 'normalization', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'depth_radius', + 'name': 'radius', + 'type': 'number', + 'defaultValue': 5 + }, + { + 'tfName': 'bias', + 'name': 'bias', + 'type': 'number', + 'defaultValue': 1 + }, + { + 'tfName': 'alpha', + 'name': 'alpha', + 'type': 'number', + 'defaultValue': 1 + }, + { + 'tfName': 'beta', + 'name': 'beta', + 'type': 'number', + 'defaultValue': 0.5 + } + ] + }, + { + 'tfOpName': 'Softmax', + 'category': 'normalization', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ] + }, + { + 'tfOpName': 'LogSoftmax', + 'category': 'normalization', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ] + } + ]; + + var normalization = /*#__PURE__*/Object.freeze({ + __proto__: null, + json: json$6 + }); + + /** + * @license + * Copyright 2023 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const json$5 = [ + { + 'tfOpName': 'Bincount', + 'category': 'reduction', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'size', + 'type': 'number' + }, + { + 'start': 2, + 'name': 'weights', + 'type': 'tensor' + } + ] + }, + { + 'tfOpName': 'DenseBincount', + 'category': 'reduction', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'size', + 'type': 'number' + }, + { + 'start': 2, + 'name': 'weights', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'binary_output', + 'name': 'binaryOutput', + 'type': 'bool' + } + ] + }, + { + 'tfOpName': 'Max', + 'category': 'reduction', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'axis', + 'type': 'number[]' + } + ], + 'attrs': [ + { + 'tfName': 'keep_dims', + 'name': 'keepDims', + 'type': 'bool' + } + ] + }, + { + 'tfOpName': 'Mean', + 'category': 'reduction', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'axis', + 'type': 'number[]' + } + ], + 'attrs': [ + { + 'tfName': 'keep_dims', + 'name': 'keepDims', + 'type': 'bool' + } + ] + }, + { + 'tfOpName': 'Min', + 'category': 'reduction', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'axis', + 'type': 'number[]' + } + ], + 'attrs': [ + { + 'tfName': 'keep_dims', + 'name': 'keepDims', + 'type': 'bool' + } + ] + }, + { + 'tfOpName': 'Sum', + 'category': 'reduction', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'axis', + 'type': 'number[]' + } + ], + 'attrs': [ + { + 'tfName': 'keep_dims', + 'name': 'keepDims', + 'type': 'bool' + } + ] + }, + { + 'tfOpName': 'All', + 'category': 'reduction', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'axis', + 'type': 'number[]' + } + ], + 'attrs': [ + { + 'tfName': 'keep_dims', + 'name': 'keepDims', + 'type': 'bool' + } + ] + }, + { + 'tfOpName': 'Any', + 'category': 'reduction', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'axis', + 'type': 'number[]' + } + ], + 'attrs': [ + { + 'tfName': 'keep_dims', + 'name': 'keepDims', + 'type': 'bool' + } + ] + }, + { + 'tfOpName': 'ArgMax', + 'category': 'reduction', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'axis', + 'type': 'number' + } + ] + }, + { + 'tfOpName': 'ArgMin', + 'category': 'reduction', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'axis', + 'type': 'number' + } + ] + }, + { + 'tfOpName': 'Prod', + 'category': 'reduction', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'axis', + 'type': 'number[]' + } + ], + 'attrs': [ + { + 'tfName': 'keep_dims', + 'name': 'keepDims', + 'type': 'bool' + }, + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Cumprod', + 'category': 'reduction', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'axis', + 'type': 'number' + } + ], + 'attrs': [ + { + 'tfName': 'exclusive', + 'name': 'exclusive', + 'type': 'bool' + }, + { + 'tfName': 'reverse', + 'name': 'reverse', + 'type': 'bool' + } + ] + }, + { + 'tfOpName': 'Cumsum', + 'category': 'reduction', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'axis', + 'type': 'number' + } + ], + 'attrs': [ + { + 'tfName': 'exclusive', + 'name': 'exclusive', + 'type': 'bool' + }, + { + 'tfName': 'reverse', + 'name': 'reverse', + 'type': 'bool' + } + ] + } + ]; + + var reduction = /*#__PURE__*/Object.freeze({ + __proto__: null, + json: json$5 + }); + + /** + * @license + * Copyright 2023 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const json$4 = [ + { + 'tfOpName': 'ConcatV2', + 'category': 'slice_join', + 'inputs': [ + { + 'start': 0, + 'end': -1, + 'name': 'tensors', + 'type': 'tensors' + }, + { + 'start': -1, + 'name': 'axis', + 'type': 'number' + } + ], + 'attrs': [ + { + 'tfName': 'N', + 'name': 'n', + 'type': 'number', + 'defaultValue': 2 + } + ] + }, + { + 'tfOpName': 'Concat', + 'category': 'slice_join', + 'inputs': [ + { + 'start': 1, + 'end': 0, + 'name': 'tensors', + 'type': 'tensors' + }, + { + 'start': 0, + 'name': 'axis', + 'type': 'number' + } + ], + 'attrs': [ + { + 'tfName': 'N', + 'name': 'n', + 'type': 'number', + 'defaultValue': 2 + } + ] + }, + { + 'tfOpName': 'GatherV2', + 'category': 'slice_join', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'indices', + 'type': 'tensor' + }, + { + 'start': 2, + 'name': 'axis', + 'type': 'number', + 'defaultValue': 0 + } + ], + 'attrs': [ + { + 'tfName': 'batch_dims', + 'name': 'batchDims', + 'type': 'number', + 'defaultValue': 0 + } + ] + }, + { + 'tfOpName': 'Gather', + 'category': 'slice_join', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'indices', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'validate_indices', + 'name': 'validateIndices', + 'type': 'bool', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Reverse', + 'category': 'slice_join', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'dims', + 'type': 'bool[]' + } + ] + }, + { + 'tfOpName': 'ReverseV2', + 'category': 'slice_join', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'axis', + 'type': 'number[]' + } + ] + }, + { + 'tfOpName': 'Slice', + 'category': 'slice_join', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'begin', + 'type': 'number[]' + }, + { + 'start': 2, + 'name': 'size', + 'type': 'number[]' + } + ] + }, + { + 'tfOpName': 'StridedSlice', + 'category': 'slice_join', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'begin', + 'type': 'number[]' + }, + { + 'start': 2, + 'name': 'end', + 'type': 'number[]' + }, + { + 'start': 3, + 'name': 'strides', + 'type': 'number[]' + } + ], + 'attrs': [ + { + 'tfName': 'begin_mask', + 'name': 'beginMask', + 'type': 'number', + 'defaultValue': 0 + }, + { + 'tfName': 'end_mask', + 'name': 'endMask', + 'type': 'number', + 'defaultValue': 0 + }, + { + 'tfName': 'new_axis_mask', + 'name': 'newAxisMask', + 'type': 'number', + 'defaultValue': 0 + }, + { + 'tfName': 'ellipsis_mask', + 'name': 'ellipsisMask', + 'type': 'number', + 'defaultValue': 0 + }, + { + 'tfName': 'shrink_axis_mask', + 'name': 'shrinkAxisMask', + 'type': 'number', + 'defaultValue': 0 + } + ] + }, + { + 'tfOpName': 'Pack', + 'category': 'slice_join', + 'inputs': [ + { + 'start': 0, + 'end': 0, + 'name': 'tensors', + 'type': 'tensors' + } + ], + 'attrs': [ + { + 'tfName': 'axis', + 'name': 'axis', + 'type': 'number', + 'defaultValue': 0 + } + ] + }, + { + 'tfOpName': 'Unpack', + 'category': 'slice_join', + 'inputs': [ + { + 'start': 0, + 'name': 'tensor', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'axis', + 'name': 'axis', + 'type': 'number', + 'defaultValue': 0 + }, + { + 'tfName': 'num', + 'name': 'num', + 'type': 'number', + 'defaultValue': 0, + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'Tile', + 'category': 'slice_join', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'reps', + 'type': 'number[]' + } + ] + }, + { + 'tfOpName': 'Split', + 'category': 'slice_join', + 'inputs': [ + { + 'start': 0, + 'name': 'axis', + 'type': 'number', + 'defaultValue': 0 + }, + { + 'start': 1, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'num_split', + 'name': 'numOrSizeSplits', + 'type': 'number', + 'defaultValue': 1 + } + ] + }, + { + 'tfOpName': 'SplitV', + 'category': 'slice_join', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'numOrSizeSplits', + 'type': 'number[]' + }, + { + 'start': 2, + 'name': 'axis', + 'type': 'number', + 'defaultValue': 0 + } + ] + }, + { + 'tfOpName': 'ScatterNd', + 'category': 'slice_join', + 'inputs': [ + { + 'start': 0, + 'name': 'indices', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'values', + 'type': 'tensor' + }, + { + 'start': 2, + 'name': 'shape', + 'type': 'number[]' + } + ] + }, + { + 'tfOpName': 'GatherNd', + 'category': 'slice_join', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'indices', + 'type': 'tensor' + } + ] + }, + { + 'tfOpName': 'SparseToDense', + 'category': 'slice_join', + 'inputs': [ + { + 'start': 0, + 'name': 'sparseIndices', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'outputShape', + 'type': 'number[]' + }, + { + 'start': 2, + 'name': 'sparseValues', + 'type': 'tensor' + }, + { + 'start': 3, + 'name': 'defaultValue', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'validate_indices', + 'name': 'validateIndices', + 'type': 'bool', + 'defaultValue': false, + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'TensorScatterUpdate', + 'category': 'slice_join', + 'inputs': [ + { + 'start': 0, + 'name': 'tensor', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'indices', + 'type': 'tensor' + }, + { + 'start': 2, + 'name': 'values', + 'type': 'tensor' + } + ] + } + ]; + + var sliceJoin = /*#__PURE__*/Object.freeze({ + __proto__: null, + json: json$4 + }); + + /** + * @license + * Copyright 2023 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const json$3 = [ + { + 'tfOpName': 'SparseFillEmptyRows', + 'category': 'sparse', + 'inputs': [ + { + 'start': 0, + 'name': 'indices', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'values', + 'type': 'tensor' + }, + { + 'start': 2, + 'name': 'denseShape', + 'type': 'tensor' + }, + { + 'start': 3, + 'name': 'defaultValue', + 'type': 'tensor' + } + ] + }, + { + 'tfOpName': 'SparseReshape', + 'category': 'sparse', + 'inputs': [ + { + 'start': 0, + 'name': 'inputIndices', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'inputShape', + 'type': 'tensor' + }, + { + 'start': 2, + 'name': 'newShape', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'T', + 'name': 'dtype', + 'type': 'dtype', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'SparseSegmentMean', + 'category': 'sparse', + 'inputs': [ + { + 'start': 0, + 'name': 'data', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'indices', + 'type': 'tensor' + }, + { + 'start': 2, + 'name': 'segmentIds', + 'type': 'tensor' + } + ] + }, + { + 'tfOpName': 'SparseSegmentSum', + 'category': 'sparse', + 'inputs': [ + { + 'start': 0, + 'name': 'data', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'indices', + 'type': 'tensor' + }, + { + 'start': 2, + 'name': 'segmentIds', + 'type': 'tensor' + } + ] + } + ]; + + var sparse = /*#__PURE__*/Object.freeze({ + __proto__: null, + json: json$3 + }); + + /** + * @license + * Copyright 2023 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const json$2 = [ + { + 'tfOpName': 'FFT', + 'category': 'spectral', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ] + }, + { + 'tfOpName': 'IFFT', + 'category': 'spectral', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ] + }, + { + 'tfOpName': 'RFFT', + 'category': 'spectral', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'fft_length', + 'type': 'number', + 'notSupported': true + } + ] + }, + { + 'tfOpName': 'IRFFT', + 'category': 'spectral', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'fft_length', + 'type': 'number', + 'notSupported': true + } + ] + } + ]; + + var spectral = /*#__PURE__*/Object.freeze({ + __proto__: null, + json: json$2 + }); + + /** + * @license + * Copyright 2023 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const json$1 = [ + { + 'tfOpName': 'StaticRegexReplace', + 'category': 'string', + 'inputs': [ + { + 'start': 0, + 'name': 'input', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'pattern', + 'name': 'pattern', + 'type': 'string' + }, + { + 'tfName': 'rewrite', + 'name': 'rewrite', + 'type': 'string' + }, + { + 'tfName': 'replace_global', + 'name': 'replaceGlobal', + 'type': 'bool' + } + ] + }, + { + 'tfOpName': 'StringNGrams', + 'category': 'string', + 'inputs': [ + { + 'start': 0, + 'name': 'data', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'dataSplits', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'separator', + 'name': 'separator', + 'type': 'string' + }, + { + 'tfName': 'ngram_widths', + 'name': 'nGramWidths', + 'type': 'number[]' + }, + { + 'tfName': 'left_pad', + 'name': 'leftPad', + 'type': 'string' + }, + { + 'tfName': 'right_pad', + 'name': 'rightPad', + 'type': 'string' + }, + { + 'tfName': 'pad_width', + 'name': 'padWidth', + 'type': 'number' + }, + { + 'tfName': 'preserve_short_sequences', + 'name': 'preserveShortSequences', + 'type': 'bool' + } + ], + 'outputs': [ + 'ngrams', + 'ngrams_splits' + ] + }, + { + 'tfOpName': 'StringSplit', + 'category': 'string', + 'inputs': [ + { + 'start': 0, + 'name': 'input', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'delimiter', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'skip_empty', + 'name': 'skipEmpty', + 'type': 'bool' + } + ], + 'outputs': [ + 'indices', + 'values', + 'shape' + ] + }, + { + 'tfOpName': 'StringToHashBucketFast', + 'category': 'string', + 'inputs': [ + { + 'start': 0, + 'name': 'input', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'num_buckets', + 'name': 'numBuckets', + 'type': 'number' + } + ] + } + ]; + + var string = /*#__PURE__*/Object.freeze({ + __proto__: null, + json: json$1 + }); + + /** + * @license + * Copyright 2023 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const json = [ + { + 'tfOpName': 'Cast', + 'category': 'transformation', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'SrcT', + 'name': 'sdtype', + 'type': 'dtype', + 'notSupported': true + }, + { + 'tfName': 'DstT', + 'name': 'dtype', + 'type': 'dtype' + } + ] + }, + { + 'tfOpName': 'ExpandDims', + 'category': 'transformation', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'axis', + 'type': 'number' + } + ] + }, + { + 'tfOpName': 'MirrorPad', + 'category': 'transformation', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'padding', + 'type': 'number[]' + } + ], + 'attrs': [ + { + 'tfName': 'mode', + 'name': 'mode', + 'type': 'string' + } + ] + }, + { + 'tfOpName': 'Pad', + 'category': 'transformation', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'padding', + 'type': 'number[]' + } + ], + 'attrs': [ + { + 'tfName': 'constant_value', + 'name': 'constantValue', + 'type': 'number', + 'defaultValue': 0 + } + ] + }, + { + 'tfOpName': 'PadV2', + 'category': 'transformation', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'padding', + 'type': 'number[]' + }, + { + 'start': 2, + 'name': 'constantValue', + 'type': 'number', + 'defaultValue': 0 + } + ] + }, + { + 'tfOpName': 'Reshape', + 'category': 'transformation', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'shape', + 'type': 'number[]' + } + ] + }, + { + 'tfOpName': 'EnsureShape', + 'category': 'transformation', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'shape', + 'type': 'number[]' + } + ] + }, + { + 'tfOpName': 'Squeeze', + 'category': 'transformation', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'axis', + 'tfDeprecatedName': 'squeeze_dims', + 'name': 'axis', + 'type': 'number[]' + } + ] + }, + { + 'tfOpName': 'SpaceToBatchND', + 'category': 'transformation', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'blockShape', + 'type': 'number[]' + }, + { + 'start': 2, + 'name': 'paddings', + 'type': 'number[]' + } + ] + }, + { + 'tfOpName': 'BatchToSpaceND', + 'category': 'transformation', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'blockShape', + 'type': 'number[]' + }, + { + 'start': 2, + 'name': 'crops', + 'type': 'number[]' + } + ] + }, + { + 'tfOpName': 'DepthToSpace', + 'category': 'transformation', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + } + ], + 'attrs': [ + { + 'tfName': 'block_size', + 'name': 'blockSize', + 'type': 'number' + }, + { + 'tfName': 'data_format', + 'name': 'dataFormat', + 'type': 'string' + } + ] + }, + { + 'tfOpName': 'BroadcastTo', + 'category': 'transformation', + 'inputs': [ + { + 'start': 0, + 'name': 'x', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 'shape', + 'type': 'number[]' + } + ], + 'attrs': [] + }, + { + 'tfOpName': 'BroadcastArgs', + 'category': 'transformation', + 'inputs': [ + { + 'start': 0, + 'name': 's0', + 'type': 'tensor' + }, + { + 'start': 1, + 'name': 's1', + 'type': 'tensor' + } + ], + 'attrs': [] + } + ]; + + var transformation = /*#__PURE__*/Object.freeze({ + __proto__: null, + json: json + }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class OperationMapper { + // Singleton instance for the mapper + static get Instance() { + return this._instance || (this._instance = new this()); + } + // Loads the op mapping from the JSON file. + constructor() { + const ops = [ + arithmetic, basicMath, control, convolution, creation, dynamic, + evaluation, graph, hashTable, image, logical, matrices, normalization, + reduction, sliceJoin, sparse, spectral, string, transformation + ]; + const mappersJson = [].concat(...ops.map(op => op.json)); + this.opMappers = mappersJson.reduce((map, mapper) => { + map[mapper.tfOpName] = mapper; + return map; + }, {}); + } + // Converts the model inference graph from Tensorflow GraphDef to local + // representation for TensorFlow.js API + transformGraph(graph, signature = {}) { + const tfNodes = graph.node; + const placeholders = []; + const weights = []; + const initNodes = []; + const nodes = tfNodes.reduce((map, node) => { + map[node.name] = this.mapNode(node); + if (node.op.startsWith('Placeholder')) { + placeholders.push(map[node.name]); + } + else if (node.op === 'Const') { + weights.push(map[node.name]); + } + else if (node.input == null || node.input.length === 0) { + initNodes.push(map[node.name]); + } + return map; + }, {}); + let inputs = []; + const outputs = []; + let inputNodeNameToKey = {}; + let outputNodeNameToKey = {}; + if (signature != null) { + inputNodeNameToKey = this.mapSignatureEntries(signature.inputs); + outputNodeNameToKey = this.mapSignatureEntries(signature.outputs); + } + const allNodes = Object.keys(nodes); + allNodes.forEach(key => { + const node = nodes[key]; + node.inputNames.forEach((name, index) => { + const [nodeName, , outputName] = getNodeNameAndIndex(name); + const inputNode = nodes[nodeName]; + if (inputNode.outputs != null) { + const outputIndex = inputNode.outputs.indexOf(outputName); + if (outputIndex !== -1) { + const inputName = `${nodeName}:${outputIndex}`; + // update the input name to use the mapped output index directly. + node.inputNames[index] = inputName; + } + } + node.inputs.push(inputNode); + inputNode.children.push(node); + }); + }); + // if signature has not outputs set, add any node that does not have + // outputs. + if (Object.keys(outputNodeNameToKey).length === 0) { + allNodes.forEach(key => { + const node = nodes[key]; + if (node.children.length === 0) { + outputs.push(node); + } + }); + } + else { + Object.keys(outputNodeNameToKey).forEach(name => { + const [nodeName,] = getNodeNameAndIndex(name); + const node = nodes[nodeName]; + if (node != null) { + node.signatureKey = outputNodeNameToKey[name]; + outputs.push(node); + } + }); + } + if (Object.keys(inputNodeNameToKey).length > 0) { + Object.keys(inputNodeNameToKey).forEach(name => { + const [nodeName,] = getNodeNameAndIndex(name); + const node = nodes[nodeName]; + if (node) { + node.signatureKey = inputNodeNameToKey[name]; + inputs.push(node); + } + }); + } + else { + inputs = placeholders; + } + let functions = {}; + if (graph.library != null && graph.library.function != null) { + functions = graph.library.function.reduce((functions, func) => { + functions[func.signature.name] = this.mapFunction(func); + return functions; + }, {}); + } + const result = { nodes, inputs, outputs, weights, placeholders, signature, functions }; + if (initNodes.length > 0) { + result.initNodes = initNodes; + } + return result; + } + mapSignatureEntries(entries) { + return Object.keys(entries || {}) + .reduce((prev, curr) => { + prev[entries[curr].name] = curr; + return prev; + }, {}); + } + mapNode(node) { + // Unsupported ops will cause an error at run-time (not parse time), since + // they may not be used by the actual execution subgraph. + const mapper = getRegisteredOp(node.op) || this.opMappers[node.op] || {}; + if (node.attr == null) { + node.attr = {}; + } + const newNode = { + name: node.name, + op: node.op, + category: mapper.category, + inputNames: (node.input || + []).map(input => input.startsWith('^') ? input.slice(1) : input), + inputs: [], + children: [], + inputParams: {}, + attrParams: {}, + rawAttrs: node.attr, + outputs: mapper.outputs + }; + if (mapper.inputs != null) { + newNode.inputParams = + mapper.inputs.reduce((map, param) => { + map[param.name] = { + type: param.type, + inputIndexStart: param.start, + inputIndexEnd: param.end + }; + return map; + }, {}); + } + if (mapper.attrs != null) { + newNode.attrParams = + mapper.attrs.reduce((map, param) => { + const type = param.type; + let value = undefined; + switch (param.type) { + case 'string': + value = getStringParam(node.attr, param.tfName, param.defaultValue); + if (value === undefined && !!param.tfDeprecatedName) { + value = getStringParam(node.attr, param.tfDeprecatedName, param.defaultValue); + } + break; + case 'string[]': + value = getStringArrayParam(node.attr, param.tfName, param.defaultValue); + if (value === undefined && !!param.tfDeprecatedName) { + value = getStringArrayParam(node.attr, param.tfDeprecatedName, param.defaultValue); + } + break; + case 'number': + value = getNumberParam(node.attr, param.tfName, (param.defaultValue || 0)); + if (value === undefined && !!param.tfDeprecatedName) { + value = getNumberParam(node.attr, param.tfDeprecatedName, param.defaultValue); + } + break; + case 'number[]': + value = getNumericArrayParam(node.attr, param.tfName, param.defaultValue); + if (value === undefined && !!param.tfDeprecatedName) { + value = getNumericArrayParam(node.attr, param.tfDeprecatedName, param.defaultValue); + } + break; + case 'bool': + value = getBoolParam(node.attr, param.tfName, param.defaultValue); + if (value === undefined && !!param.tfDeprecatedName) { + value = getBoolParam(node.attr, param.tfDeprecatedName, param.defaultValue); + } + break; + case 'bool[]': + value = getBoolArrayParam(node.attr, param.tfName, param.defaultValue); + if (value === undefined && !!param.tfDeprecatedName) { + value = getBoolArrayParam(node.attr, param.tfDeprecatedName, param.defaultValue); + } + break; + case 'shape': + value = getTensorShapeParam(node.attr, param.tfName, param.defaultValue); + if (value === undefined && !!param.tfDeprecatedName) { + value = getTensorShapeParam(node.attr, param.tfDeprecatedName, param.defaultValue); + } + break; + case 'shape[]': + value = getTensorShapeArrayParam(node.attr, param.tfName, param.defaultValue); + if (value === undefined && !!param.tfDeprecatedName) { + value = getTensorShapeArrayParam(node.attr, param.tfDeprecatedName, param.defaultValue); + } + break; + case 'dtype': + value = getDtypeParam(node.attr, param.tfName, param.defaultValue); + if (value === undefined && !!param.tfDeprecatedName) { + value = getDtypeParam(node.attr, param.tfDeprecatedName, param.defaultValue); + } + break; + case 'dtype[]': + value = getDtypeArrayParam(node.attr, param.tfName, param.defaultValue); + if (value === undefined && !!param.tfDeprecatedName) { + value = getDtypeArrayParam(node.attr, param.tfDeprecatedName, param.defaultValue); + } + break; + case 'func': + value = getFuncParam(node.attr, param.tfName, param.defaultValue); + if (value === undefined && !!param.tfDeprecatedName) { + value = getFuncParam(node.attr, param.tfDeprecatedName, param.defaultValue); + } + break; + case 'tensor': + case 'tensors': + break; + default: + throw new Error(`Unsupported param type: ${param.type} for op: ${node.op}`); + } + map[param.name] = { value, type }; + return map; + }, {}); + } + return newNode; + } + // map the TFunctionDef to TFJS graph object + mapFunction(functionDef) { + const tfNodes = functionDef.nodeDef; + const placeholders = []; + const weights = []; + let nodes = {}; + if (tfNodes != null) { + nodes = tfNodes.reduce((map, node) => { + map[node.name] = this.mapNode(node); + if (node.op === 'Const') { + weights.push(map[node.name]); + } + return map; + }, {}); + } + const inputs = []; + const outputs = []; + functionDef.signature.inputArg.forEach(arg => { + const [nodeName,] = getNodeNameAndIndex(arg.name); + const node = { + name: nodeName, + op: 'Placeholder', + inputs: [], + inputNames: [], + category: 'graph', + inputParams: {}, + attrParams: { dtype: { value: parseDtypeParam(arg.type), type: 'dtype' } }, + children: [] + }; + node.signatureKey = arg.name; + inputs.push(node); + nodes[nodeName] = node; + }); + const allNodes = Object.keys(nodes); + allNodes.forEach(key => { + const node = nodes[key]; + node.inputNames.forEach((name, index) => { + const [nodeName, , outputName] = getNodeNameAndIndex(name); + const inputNode = nodes[nodeName]; + if (inputNode.outputs != null) { + const outputIndex = inputNode.outputs.indexOf(outputName); + if (outputIndex !== -1) { + const inputName = `${nodeName}:${outputIndex}`; + // update the input name to use the mapped output index directly. + node.inputNames[index] = inputName; + } + } + node.inputs.push(inputNode); + inputNode.children.push(node); + }); + }); + const returnNodeMap = functionDef.ret; + functionDef.signature.outputArg.forEach(output => { + const [nodeName, index] = getNodeNameAndIndex(returnNodeMap[output.name]); + const node = nodes[nodeName]; + if (node != null) { + node.defaultOutput = index; + outputs.push(node); + } + }); + const signature = this.mapArgsToSignature(functionDef); + return { nodes, inputs, outputs, weights, placeholders, signature }; + } + mapArgsToSignature(functionDef) { + return { + methodName: functionDef.signature.name, + inputs: functionDef.signature.inputArg.reduce((map, arg) => { + map[arg.name] = this.mapArgToTensorInfo(arg); + return map; + }, {}), + outputs: functionDef.signature.outputArg.reduce((map, arg) => { + map[arg.name] = this.mapArgToTensorInfo(arg, functionDef.ret); + return map; + }, {}), + }; + } + mapArgToTensorInfo(arg, nameMap) { + let name = arg.name; + if (nameMap != null) { + name = nameMap[name]; + } + return { name, dtype: arg.type }; + } + } + function decodeBase64(text) { + const global = env().global; + if (typeof global.atob !== 'undefined') { + return global.atob(text); + } + else if (typeof Buffer !== 'undefined') { + return new Buffer(text, 'base64').toString(); + } + else { + throw new Error('Unable to decode base64 in this environment. ' + + 'Missing built-in atob() or Buffer()'); + } + } + function parseStringParam(s, keepCase) { + const value = Array.isArray(s) ? String.fromCharCode.apply(null, s) : decodeBase64(s); + return keepCase ? value : value.toLowerCase(); + } + function getStringParam(attrs, name, def, keepCase = false) { + const param = attrs[name]; + if (param != null) { + return parseStringParam(param.s, keepCase); + } + return def; + } + function getBoolParam(attrs, name, def) { + const param = attrs[name]; + return param ? param.b : def; + } + function getNumberParam(attrs, name, def) { + const param = attrs[name] || {}; + const value = param['i'] != null ? param['i'] : (param['f'] != null ? param['f'] : def); + return (typeof value === 'number') ? value : parseInt(value, 10); + } + function parseDtypeParam(value) { + if (typeof (value) === 'string') { + // tslint:disable-next-line:no-any + value = DataType[value]; + } + switch (value) { + case DataType.DT_FLOAT: + case DataType.DT_HALF: + return 'float32'; + case DataType.DT_INT32: + case DataType.DT_INT64: + case DataType.DT_INT8: + case DataType.DT_UINT8: + return 'int32'; + case DataType.DT_BOOL: + return 'bool'; + case DataType.DT_DOUBLE: + return 'float32'; + case DataType.DT_STRING: + return 'string'; + case DataType.DT_COMPLEX64: + case DataType.DT_COMPLEX128: + return 'complex64'; + default: + // Unknown dtype error will happen at runtime (instead of parse time), + // since these nodes might not be used by the actual subgraph execution. + return null; + } + } + function getFuncParam(attrs, name, def) { + const param = attrs[name]; + if (param && param.func) { + return param.func.name; + } + return def; + } + function getDtypeParam(attrs, name, def) { + const param = attrs[name]; + if (param && param.type) { + return parseDtypeParam(param.type); + } + return def; + } + function getDtypeArrayParam(attrs, name, def) { + const param = attrs[name]; + if (param && param.list && param.list.type) { + return param.list.type.map(v => parseDtypeParam(v)); + } + return def; + } + function parseTensorShapeParam(shape) { + if (shape.unknownRank) { + return undefined; + } + if (shape.dim != null) { + return shape.dim.map(dim => (typeof dim.size === 'number') ? dim.size : parseInt(dim.size, 10)); + } + return []; + } + function getTensorShapeParam(attrs, name, def) { + const param = attrs[name]; + if (param && param.shape) { + return parseTensorShapeParam(param.shape); + } + return def; + } + function getNumericArrayParam(attrs, name, def) { + const param = attrs[name]; + if (param) { + return ((param.list.f && param.list.f.length ? param.list.f : + param.list.i) || + []) + .map(v => (typeof v === 'number') ? v : parseInt(v, 10)); + } + return def; + } + function getStringArrayParam(attrs, name, def, keepCase = false) { + const param = attrs[name]; + if (param && param.list && param.list.s) { + return param.list.s.map((v) => { + return parseStringParam(v, keepCase); + }); + } + return def; + } + function getTensorShapeArrayParam(attrs, name, def) { + const param = attrs[name]; + if (param && param.list && param.list.shape) { + return param.list.shape.map((v) => { + return parseTensorShapeParam(v); + }); + } + return def; + } + function getBoolArrayParam(attrs, name, def) { + const param = attrs[name]; + if (param && param.list && param.list.b) { + return param.list.b; + } + return def; + } + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Helper class for lookup inputs and params for nodes in the model graph. + */ + class NodeValueImpl { + constructor(node, tensorMap, context) { + this.node = node; + this.tensorMap = tensorMap; + this.context = context; + this.inputs = []; + this.attrs = {}; + this.inputs = node.inputNames.map(name => this.getInput(name)); + if (node.rawAttrs != null) { + this.attrs = Object.keys(node.rawAttrs) + .reduce((attrs, key) => { + attrs[key] = this.getAttr(key); + return attrs; + }, {}); + } + } + /** + * Return the value of the attribute or input param. + * @param name String: name of attribute or input param. + */ + getInput(name) { + return getTensor(name, this.tensorMap, this.context); + } + /** + * Return the value of the attribute or input param. + * @param name String: name of attribute or input param. + */ + getAttr(name, defaultValue) { + const value = this.node.rawAttrs[name]; + if (value.tensor != null) { + return getTensor(name, this.tensorMap, this.context); + } + if (value.i != null || value.f != null) { + return getNumberParam(this.node.rawAttrs, name, defaultValue); + } + if (value.s != null) { + return getStringParam(this.node.rawAttrs, name, defaultValue); + } + if (value.b != null) { + return getBoolParam(this.node.rawAttrs, name, defaultValue); + } + if (value.shape != null) { + return getTensorShapeParam(this.node.rawAttrs, name, defaultValue); + } + if (value.type != null) { + return getDtypeParam(this.node.rawAttrs, name, defaultValue); + } + if (value.list != null) { + if (value.list.i != null || value.list.f != null) { + return getNumericArrayParam(this.node.rawAttrs, name, defaultValue); + } + if (value.list.s != null) { + return getStringArrayParam(this.node.rawAttrs, name, defaultValue); + } + if (value.list.shape != null) { + return getTensorShapeArrayParam(this.node.rawAttrs, name, defaultValue); + } + if (value.list.b != null) { + return getBoolArrayParam(this.node.rawAttrs, name, defaultValue); + } + if (value.list.type != null) { + return getDtypeArrayParam(this.node.rawAttrs, name, defaultValue); + } + } + return defaultValue; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + + var tfOps = /*#__PURE__*/Object.freeze({ + __proto__: null, + OP_SCOPE_SUFFIX: OP_SCOPE_SUFFIX, + abs: abs$2, + acos: acos$2, + acosh: acosh$2, + add: add$3, + addN: addN$2, + all: all$2, + any: any$2, + argMax: argMax$2, + argMin: argMin$2, + asin: asin$2, + asinh: asinh$2, + atan: atan$2, + atan2: atan2$2, + atanh: atanh$2, + avgPool: avgPool$2, + avgPool3d: avgPool3d$1, + basicLSTMCell: basicLSTMCell, + batchNorm: batchNorm$2, + batchNorm2d: batchNorm2d, + batchNorm3d: batchNorm3d, + batchNorm4d: batchNorm4d, + batchToSpaceND: batchToSpaceND$2, + bincount: bincount$2, + bitwiseAnd: bitwiseAnd$2, + booleanMaskAsync: booleanMaskAsync, + broadcastArgs: broadcastArgs$2, + broadcastTo: broadcastTo, + buffer: buffer, + cast: cast$3, + ceil: ceil$2, + clipByValue: clipByValue$2, + clone: clone, + complex: complex$2, + concat: concat$2, + concat1d: concat1d, + concat2d: concat2d, + concat3d: concat3d, + concat4d: concat4d, + conv1d: conv1d$2, + conv2d: conv2d$4, + conv2dTranspose: conv2dTranspose$1, + conv3d: conv3d$2, + conv3dTranspose: conv3dTranspose$1, + cos: cos$2, + cosh: cosh$2, + cosineWindow: cosineWindow, + cumprod: cumprod$2, + cumsum: cumsum$2, + denseBincount: denseBincount$2, + depthToSpace: depthToSpace$2, + depthwiseConv2d: depthwiseConv2d$3, + diag: diag$2, + dilation2d: dilation2d, + div: div$1, + divNoNan: divNoNan, + dot: dot$2, + dropout: dropout$2, + einsum: einsum$2, + elu: elu$4, + enclosingPowerOfTwo: enclosingPowerOfTwo, + ensureShape: ensureShape, + equal: equal$2, + erf: erf$2, + euclideanNorm: euclideanNorm, + exp: exp$2, + expandDims: expandDims$3, + expm1: expm1$2, + eye: eye, + fft: fft$2, + fill: fill$2, + floor: floor$2, + floorDiv: floorDiv$2, + fused: fused_ops, + gather: gather$1, + gatherND: gatherND, + greater: greater$3, + greaterEqual: greaterEqual$2, + ifft: ifft$2, + imag: imag$2, + image: image$1, + inTopKAsync: inTopKAsync, + irfft: irfft, + isFinite: isFinite$3, + isInf: isInf$2, + isNaN: isNaN$3, + leakyRelu: leakyRelu$2, + less: less$3, + lessEqual: lessEqual$2, + linalg: linalg, + linspace: linspace, + localResponseNormalization: localResponseNormalization, + log: log$2, + log1p: log1p$2, + logSigmoid: logSigmoid, + logSoftmax: logSoftmax, + logSumExp: logSumExp, + logicalAnd: logicalAnd$2, + logicalNot: logicalNot$2, + logicalOr: logicalOr$2, + logicalXor: logicalXor, + losses: losses, + lowerBound: lowerBound$1, + matMul: matMul$1, + max: max$3, + maxPool: maxPool$2, + maxPool3d: maxPool3d$1, + maxPoolWithArgmax: maxPoolWithArgmax, + maximum: maximum$4, + mean: mean$3, + meshgrid: meshgrid, + min: min$3, + minimum: minimum$4, + mirrorPad: mirrorPad$1, + mod: mod$2, + moments: moments, + movingAverage: movingAverage, + mul: mul, + multiRNNCell: multiRNNCell, + multinomial: multinomial$2, + neg: neg$2, + norm: norm, + notEqual: notEqual$2, + oneHot: oneHot$3, + ones: ones$1, + onesLike: onesLike$3, + op: op, + outerProduct: outerProduct, + pad: pad, + pad1d: pad1d, + pad2d: pad2d, + pad3d: pad3d, + pad4d: pad4d, + pool: pool$1, + pow: pow$3, + prelu: prelu$3, + print: print, + prod: prod$2, + raggedGather: raggedGather$2, + raggedRange: raggedRange$2, + raggedTensorToTensor: raggedTensorToTensor$2, + rand: rand, + randomGamma: randomGamma, + randomNormal: randomNormal$2, + randomStandardNormal: randomStandardNormal, + randomUniform: randomUniform$1, + randomUniformInt: randomUniformInt, + range: range$3, + real: real$2, + reciprocal: reciprocal$2, + relu: relu$2, + relu6: relu6$2, + reshape: reshape$3, + reverse: reverse$2, + reverse1d: reverse1d, + reverse2d: reverse2d, + reverse3d: reverse3d, + reverse4d: reverse4d, + rfft: rfft, + round: round$2, + rsqrt: rsqrt$2, + scalar: scalar, + scatterND: scatterND, + searchSorted: searchSorted$2, + selu: selu$2, + separableConv2d: separableConv2d$1, + setdiff1dAsync: setdiff1dAsync, + sigmoid: sigmoid$2, + sign: sign$3, + signal: signal, + sin: sin$2, + sinh: sinh$2, + slice: slice$2, + slice1d: slice1d, + slice2d: slice2d, + slice3d: slice3d, + slice4d: slice4d, + softmax: softmax$3, + softplus: softplus$2, + spaceToBatchND: spaceToBatchND$2, + sparse: sparse$1, + sparseToDense: sparseToDense$2, + spectral: spectral$1, + split: split$3, + sqrt: sqrt$2, + square: square$2, + squaredDifference: squaredDifference$2, + squeeze: squeeze, + stack: stack, + step: step$2, + stridedSlice: stridedSlice$2, + string: string$1, + sub: sub$2, + sum: sum$3, + tan: tan$2, + tanh: tanh$2, + tensor: tensor, + tensor1d: tensor1d, + tensor2d: tensor2d, + tensor3d: tensor3d, + tensor4d: tensor4d, + tensor5d: tensor5d, + tensor6d: tensor6d, + tensorScatterUpdate: tensorScatterUpdate$2, + tile: tile$3, + topk: topk, + transpose: transpose$2, + truncatedNormal: truncatedNormal$1, + unique: unique$3, + unsortedSegmentSum: unsortedSegmentSum$2, + unstack: unstack, + upperBound: upperBound$1, + variable: variable$1, + where: where, + whereAsync: whereAsync, + zeros: zeros$2, + zerosLike: zerosLike$3 + }); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const executeOp$k = (node, tensorMap, context, ops = tfOps) => { + switch (node.op) { + case 'BiasAdd': + case 'AddV2': + case 'Add': { + return [ops.add(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))]; + } + case 'AddN': { + return [ops.addN(getParamValue('tensors', node, tensorMap, context))]; + } + case 'FloorMod': + case 'Mod': + return [ops.mod(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))]; + case 'Mul': + return [ops.mul(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))]; + case 'RealDiv': + case 'Div': { + return [ops.div(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))]; + } + case 'DivNoNan': { + return [ops.divNoNan(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))]; + } + case 'FloorDiv': { + return [ops.floorDiv(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))]; + } + case 'Sub': { + return [ops.sub(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))]; + } + case 'Minimum': { + return [ops.minimum(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))]; + } + case 'Maximum': { + return [ops.maximum(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))]; + } + case 'Pow': { + return [ops.pow(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))]; + } + case 'SquaredDifference': { + return [ops.squaredDifference(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))]; + } + default: + throw TypeError(`Node type ${node.op} is not implemented`); + } + }; + const CATEGORY$j = 'arithmetic'; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const executeOp$j = (node, tensorMap, context, ops = tfOps) => { + switch (node.op) { + case 'Abs': + case 'ComplexAbs': + return [ops.abs(getParamValue('x', node, tensorMap, context))]; + case 'Acos': + return [ops.acos(getParamValue('x', node, tensorMap, context))]; + case 'Acosh': + return [ops.acosh(getParamValue('x', node, tensorMap, context))]; + case 'Asin': + return [ops.asin(getParamValue('x', node, tensorMap, context))]; + case 'Asinh': + return [ops.asinh(getParamValue('x', node, tensorMap, context))]; + case 'Atan': + return [ops.atan(getParamValue('x', node, tensorMap, context))]; + case 'Atan2': + return [ops.atan2(getParamValue('x', node, tensorMap, context), getParamValue('y', node, tensorMap, context))]; + case 'Atanh': + return [ops.atanh(getParamValue('x', node, tensorMap, context))]; + case 'Ceil': + return [ops.ceil(getParamValue('x', node, tensorMap, context))]; + case 'Complex': + return [ops.complex(getParamValue('real', node, tensorMap, context), getParamValue('imag', node, tensorMap, context))]; + case 'Cos': + return [ops.cos(getParamValue('x', node, tensorMap, context))]; + case 'Cosh': + return [ops.cosh(getParamValue('x', node, tensorMap, context))]; + case 'Elu': + return [ops.elu(getParamValue('x', node, tensorMap, context))]; + case 'Erf': + return [ops.erf(getParamValue('x', node, tensorMap, context))]; + case 'Exp': + return [ops.exp(getParamValue('x', node, tensorMap, context))]; + case 'Expm1': { + return [ops.expm1(getParamValue('x', node, tensorMap, context))]; + } + case 'Floor': + return [ops.floor(getParamValue('x', node, tensorMap, context))]; + case 'Log': + return [ops.log(getParamValue('x', node, tensorMap, context))]; + case 'Log1p': { + return [ops.log1p(getParamValue('x', node, tensorMap, context))]; + } + case 'Imag': + return [ops.imag(getParamValue('x', node, tensorMap, context))]; + case 'Neg': + return [ops.neg(getParamValue('x', node, tensorMap, context))]; + case 'Reciprocal': { + return [ops.reciprocal(getParamValue('x', node, tensorMap, context))]; + } + case 'Real': + return [ops.real(getParamValue('x', node, tensorMap, context))]; + case 'Relu': + return [ops.relu(getParamValue('x', node, tensorMap, context))]; + case 'Round': { + return [ops.round(getParamValue('x', node, tensorMap, context))]; + } + case 'Selu': + return [ops.selu(getParamValue('x', node, tensorMap, context))]; + case 'Sigmoid': + return [ops.sigmoid(getParamValue('x', node, tensorMap, context))]; + case 'Sin': + return [ops.sin(getParamValue('x', node, tensorMap, context))]; + case 'Sign': { + return [ops.sign(getParamValue('x', node, tensorMap, context))]; + } + case 'Sinh': { + return [ops.sinh(getParamValue('x', node, tensorMap, context))]; + } + case 'Softplus': { + return [ops.softplus(getParamValue('x', node, tensorMap, context))]; + } + case 'Sqrt': { + return [ops.sqrt(getParamValue('x', node, tensorMap, context))]; + } + case 'Square': { + return [ops.square(getParamValue('x', node, tensorMap, context))]; + } + case 'Tanh': { + return [ops.tanh(getParamValue('x', node, tensorMap, context))]; + } + case 'Tan': + return [ops.tan(getParamValue('x', node, tensorMap, context))]; + case 'ClipByValue': + return [ops.clipByValue(getParamValue('x', node, tensorMap, context), getParamValue('clipValueMin', node, tensorMap, context), getParamValue('clipValueMax', node, tensorMap, context))]; + case 'Relu6': + return [ops.relu6(getParamValue('x', node, tensorMap, context))]; + case 'Rsqrt': + return [ops.rsqrt(getTensor(node.inputNames[0], tensorMap, context))]; + case 'LeakyRelu': + return [ops.leakyRelu(getParamValue('x', node, tensorMap, context), getParamValue('alpha', node, tensorMap, context))]; + case 'Prelu': + return [ops.prelu(getParamValue('x', node, tensorMap, context), getParamValue('alpha', node, tensorMap, context))]; + case 'IsNan': + return [ops.isNaN(getTensor(node.inputNames[0], tensorMap, context))]; + case 'IsInf': + return [ops.isInf(getTensor(node.inputNames[0], tensorMap, context))]; + case 'IsFinite': + return [ops.isFinite(getTensor(node.inputNames[0], tensorMap, context))]; + default: + throw TypeError(`Node type ${node.op} is not implemented`); + } + }; + const CATEGORY$i = 'basic_math'; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Used by TensorList and TensorArray to verify if elementShape matches, support + * negative value as the dim shape. + * @param shapeA + * @param shapeB + * @param errorMessagePrefix + */ + function assertShapesMatchAllowUndefinedSize(shapeA, shapeB, errorMessagePrefix = '') { + // constant shape means unknown rank + if (typeof shapeA === 'number' || typeof shapeB === 'number') { + return; + } + assert$1(shapeA.length === shapeB.length, () => errorMessagePrefix + ` Shapes ${shapeA} and ${shapeB} must match`); + for (let i = 0; i < shapeA.length; i++) { + const dim0 = shapeA[i]; + const dim1 = shapeB[i]; + assert$1(dim0 < 0 || dim1 < 0 || dim0 === dim1, () => errorMessagePrefix + ` Shapes ${shapeA} and ${shapeB} must match`); + } + } + function fullDefinedShape(elementShape) { + if (typeof elementShape === 'number' || elementShape.some(dim => dim < 0)) { + return false; + } + return true; + } + /** + * Generate the output element shape from the list elementShape, list tensors + * and input param. + * @param listElementShape + * @param tensors + * @param elementShape + */ + function inferElementShape(listElementShape, tensors, elementShape) { + let partialShape = mergeElementShape(listElementShape, elementShape); + const notfullDefinedShape = !fullDefinedShape(partialShape); + if (notfullDefinedShape && tensors.length === 0) { + throw new Error(`Tried to calculate elements of an empty list` + + ` with non-fully-defined elementShape: ${partialShape}`); + } + if (notfullDefinedShape) { + tensors.forEach(tensor => { + partialShape = mergeElementShape(tensor.shape, partialShape); + }); + } + if (!fullDefinedShape(partialShape)) { + throw new Error(`Non-fully-defined elementShape: ${partialShape}`); + } + return partialShape; + } + function mergeElementShape(elementShapeA, elementShapeB) { + if (typeof elementShapeA === 'number') { + return elementShapeB; + } + if (typeof elementShapeB === 'number') { + return elementShapeA; + } + if (elementShapeA.length !== elementShapeB.length) { + throw new Error(`Incompatible ranks during merge: ${elementShapeA} vs. ${elementShapeB}`); + } + const result = []; + for (let i = 0; i < elementShapeA.length; ++i) { + const dim0 = elementShapeA[i]; + const dim1 = elementShapeB[i]; + if (dim0 >= 0 && dim1 >= 0 && dim0 !== dim1) { + throw new Error(`Incompatible shape during merge: ${elementShapeA} vs. ${elementShapeB}`); + } + result[i] = dim0 >= 0 ? dim0 : dim1; + } + return result; + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * The TensorArray object keeps an array of Tensors. It + * allows reading from the array and writing to the array. + */ + class TensorArray { + constructor(name, dtype, maxSize, elementShape, identicalElementShapes, dynamicSize, clearAfterRead) { + this.name = name; + this.dtype = dtype; + this.maxSize = maxSize; + this.elementShape = elementShape; + this.identicalElementShapes = identicalElementShapes; + this.dynamicSize = dynamicSize; + this.clearAfterRead = clearAfterRead; + this.tensors = []; + this.closed_ = false; + this.idTensor = scalar(0); + keep(this.idTensor); + } + get id() { + return this.idTensor.id; + } + get closed() { + return this.closed_; + } + /** + * Dispose the tensors and idTensor and mark the TensoryArray as closed. + */ + clearAndClose(keepIds) { + this.tensors.forEach(tensor => { + if (keepIds == null || !keepIds.has(tensor.tensor.id)) { + tensor.tensor.dispose(); + } + }); + this.tensors = []; + this.closed_ = true; + this.idTensor.dispose(); + } + size() { + return this.tensors.length; + } + /** + * Read the value at location index in the TensorArray. + * @param index Number the index to read from. + */ + read(index) { + if (this.closed_) { + throw new Error(`TensorArray ${this.name} has already been closed.`); + } + if (index < 0 || index >= this.size()) { + throw new Error(`Tried to read from index ${index}, but array size is: ${this.size()}`); + } + const tensorWithState = this.tensors[index]; + if (tensorWithState.cleared) { + throw new Error(`TensorArray ${this.name}: Could not read index ${index} twice because it was cleared after a previous read ` + + `(perhaps try setting clear_after_read = false?).`); + } + if (this.clearAfterRead) { + tensorWithState.cleared = true; + } + tensorWithState.read = true; + return tensorWithState.tensor; + } + /** + * Helper method to read multiple tensors from the specified indices. + */ + readMany(indices) { + return indices.map(index => this.read(index)); + } + /** + * Write value into the index of the TensorArray. + * @param index number the index to write to. + * @param tensor + */ + write(index, tensor) { + if (this.closed_) { + throw new Error(`TensorArray ${this.name} has already been closed.`); + } + if (index < 0 || !this.dynamicSize && index >= this.maxSize) { + throw new Error(`Tried to write to index ${index}, but array is not resizeable and size is: ${this.maxSize}`); + } + const t = this.tensors[index] || {}; + if (tensor.dtype !== this.dtype) { + throw new Error(`TensorArray ${this.name}: Could not write to TensorArray index ${index}, + because the value dtype is ${tensor.dtype}, but TensorArray dtype is ${this.dtype}.`); + } + // Set the shape for the first time write to unknow shape tensor array + if (this.size() === 0 && + (this.elementShape == null || this.elementShape.length === 0)) { + this.elementShape = tensor.shape; + } + assertShapesMatchAllowUndefinedSize(this.elementShape, tensor.shape, `TensorArray ${this.name}: Could not write to TensorArray index ${index}.`); + if (t.read) { + throw new Error(`TensorArray ${this.name}: Could not write to TensorArray index ${index}, because it has already been read.`); + } + if (t.written) { + throw new Error(`TensorArray ${this.name}: Could not write to TensorArray index ${index}, because it has already been written.`); + } + t.tensor = tensor; + keep(tensor); + t.written = true; + this.tensors[index] = t; + } + /** + * Helper method to write multiple tensors to the specified indices. + */ + writeMany(indices, tensors) { + if (indices.length !== tensors.length) { + throw new Error(`TensorArray ${this.name}: could not write multiple tensors,` + + `because the index size: ${indices.length} is not the same as tensors size: ${tensors.length}.`); + } + indices.forEach((i, index) => this.write(i, tensors[index])); + } + /** + * Return selected values in the TensorArray as a packed Tensor. All of + * selected values must have been written and their shapes must all match. + * @param [indices] number[] Optional. Taking values in [0, max_value). If the + * TensorArray is not dynamic, max_value=size(). If not specified returns + * all tensors in the original order. + * @param [dtype] + */ + gather(indices, dtype) { + if (!!dtype && dtype !== this.dtype) { + throw new Error(`TensorArray dtype is ${this.dtype} but gather requested dtype ${dtype}`); + } + if (!indices) { + indices = []; + for (let i = 0; i < this.size(); i++) { + indices.push(i); + } + } + else { + indices = indices.slice(0, this.size()); + } + if (indices.length === 0) { + return tensor([], [0].concat(this.elementShape)); + } + // Read all the PersistentTensors into a vector to keep track of + // their memory. + const tensors = this.readMany(indices); + assertShapesMatchAllowUndefinedSize(this.elementShape, tensors[0].shape, 'TensorArray shape mismatch: '); + return stack(tensors, 0); + } + /** + * Return the values in the TensorArray as a concatenated Tensor. + */ + concat(dtype) { + if (!!dtype && dtype !== this.dtype) { + throw new Error(`TensorArray dtype is ${this.dtype} but concat requested dtype ${dtype}`); + } + if (this.size() === 0) { + return tensor([], [0].concat(this.elementShape)); + } + const indices = []; + for (let i = 0; i < this.size(); i++) { + indices.push(i); + } + // Collect all the tensors from the tensors array. + const tensors = this.readMany(indices); + assertShapesMatchAllowUndefinedSize(this.elementShape, tensors[0].shape, `TensorArray shape mismatch: tensor array shape (${this.elementShape}) vs first tensor shape (${tensors[0].shape})`); + return concat$2(tensors, 0); + } + /** + * Scatter the values of a Tensor in specific indices of a TensorArray. + * @param indices number[] values in [0, max_value). If the + * TensorArray is not dynamic, max_value=size(). + * @param tensor Tensor input tensor. + */ + scatter(indices, tensor) { + if (tensor.dtype !== this.dtype) { + throw new Error(`TensorArray dtype is ${this.dtype} but tensor has dtype ${tensor.dtype}`); + } + if (indices.length !== tensor.shape[0]) { + throw new Error(`Expected len(indices) == tensor.shape[0], but saw: ${indices.length} vs. ${tensor.shape[0]}`); + } + const maxIndex = Math.max(...indices); + if (!this.dynamicSize && maxIndex >= this.maxSize) { + throw new Error(`Max index must be < array size (${maxIndex} vs. ${this.maxSize})`); + } + this.writeMany(indices, unstack(tensor, 0)); + } + /** + * Split the values of a Tensor into the TensorArray. + * @param length number[] with the lengths to use when splitting value along + * its first dimension. + * @param tensor Tensor, the tensor to split. + */ + split(length, tensor) { + if (tensor.dtype !== this.dtype) { + throw new Error(`TensorArray dtype is ${this.dtype} but tensor has dtype ${tensor.dtype}`); + } + let totalLength = 0; + const cumulativeLengths = length.map(len => { + totalLength += len; + return totalLength; + }); + if (totalLength !== tensor.shape[0]) { + throw new Error(`Expected sum of lengths to be equal to + tensor.shape[0], but sum of lengths is + ${totalLength}, and tensor's shape is: ${tensor.shape}`); + } + if (!this.dynamicSize && length.length !== this.maxSize) { + throw new Error(`TensorArray's size is not equal to the size of lengths (${this.maxSize} vs. ${length.length}), ` + + 'and the TensorArray is not marked as dynamically resizeable'); + } + const elementPerRow = totalLength === 0 ? 0 : tensor.size / totalLength; + const tensors = []; + tidy(() => { + tensor = reshape$3(tensor, [1, totalLength, elementPerRow]); + for (let i = 0; i < length.length; ++i) { + const previousLength = (i === 0) ? 0 : cumulativeLengths[i - 1]; + const indices = [0, previousLength, 0]; + const sizes = [1, length[i], elementPerRow]; + tensors[i] = reshape$3(slice$2(tensor, indices, sizes), this.elementShape); + } + return tensors; + }); + const indices = []; + for (let i = 0; i < length.length; i++) { + indices[i] = i; + } + this.writeMany(indices, tensors); + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * TensorList stores a container of `tf.Tensor` objects, which are accessible + * via tensors field. + * + * In order to get a copy of the underlying list, use the copy method: + * ``` + * TensorList b = a.copy(); + * b.tensors().pushBack(t); // This does not modify a.tensors(). + * ``` + * + * Note that this is not a deep copy: the memory locations of the underlying + * tensors will still point to the same locations of the corresponding tensors + * in the original. + */ + class TensorList { + get id() { + return this.idTensor.id; + } + /** + * + * @param tensors list of tensors + * @param elementShape shape of each tensor, this can be a single number (any + * shape is allowed) or partial shape (dim = -1). + * @param elementDtype data type of each tensor + * @param maxNumElements The maximum allowed size of `tensors`. Defaults to -1 + * meaning that the size of `tensors` is unbounded. + */ + constructor(tensors, elementShape, elementDtype, maxNumElements = -1) { + this.tensors = tensors; + this.elementShape = elementShape; + this.elementDtype = elementDtype; + if (tensors != null) { + tensors.forEach(tensor => { + if (elementDtype !== tensor.dtype) { + throw new Error(`Invalid data types; op elements ${elementDtype}, but list elements ${tensor.dtype}`); + } + assertShapesMatchAllowUndefinedSize(elementShape, tensor.shape, 'TensorList shape mismatch: '); + keep(tensor); + }); + } + this.idTensor = scalar(0); + this.maxNumElements = maxNumElements; + keep(this.idTensor); + } + /** + * Get a new TensorList containing a copy of the underlying tensor container. + */ + copy() { + return new TensorList([...this.tensors], this.elementShape, this.elementDtype); + } + /** + * Dispose the tensors and idTensor and clear the tensor list. + */ + clearAndClose(keepIds) { + this.tensors.forEach(tensor => { + if (keepIds == null || !keepIds.has(tensor.id)) { + tensor.dispose(); + } + }); + this.tensors.length = 0; + this.idTensor.dispose(); + } + /** + * The size of the tensors in the tensor list. + */ + size() { + return this.tensors.length; + } + /** + * Return a tensor that stacks a list of rank-R tf.Tensors into one rank-(R+1) + * tf.Tensor. + * @param elementShape shape of each tensor + * @param elementDtype data type of each tensor + * @param numElements the number of elements to stack + */ + stack(elementShape, elementDtype, numElements = -1) { + if (elementDtype !== this.elementDtype) { + throw new Error(`Invalid data types; op elements ${elementDtype}, but list elements ${this.elementDtype}`); + } + if (numElements !== -1 && this.tensors.length !== numElements) { + throw new Error(`Operation expected a list with ${numElements} elements but got a list with ${this.tensors.length} elements.`); + } + assertShapesMatchAllowUndefinedSize(elementShape, this.elementShape, 'TensorList shape mismatch: '); + const outputElementShape = inferElementShape(this.elementShape, this.tensors, elementShape); + return tidy(() => { + const reshapedTensors = this.tensors.map(tensor => reshape$3(tensor, outputElementShape)); + return stack(reshapedTensors, 0); + }); + } + /** + * Pop a tensor from the end of the list. + * @param elementShape shape of the tensor + * @param elementDtype data type of the tensor + */ + popBack(elementShape, elementDtype) { + if (elementDtype !== this.elementDtype) { + throw new Error(`Invalid data types; op elements ${elementDtype}, but list elements ${this.elementDtype}`); + } + if (this.size() === 0) { + throw new Error('Trying to pop from an empty list.'); + } + const outputElementShape = inferElementShape(this.elementShape, this.tensors, elementShape); + const tensor = this.tensors.pop(); + tensor.kept = false; + assertShapesMatchAllowUndefinedSize(tensor.shape, elementShape, 'TensorList shape mismatch: '); + return reshape$3(tensor, outputElementShape); + } + /** + * Push a tensor to the end of the list. + * @param tensor Tensor to be pushed. + */ + pushBack(tensor) { + if (tensor.dtype !== this.elementDtype) { + throw new Error(`Invalid data types; op elements ${tensor.dtype}, but list elements ${this.elementDtype}`); + } + assertShapesMatchAllowUndefinedSize(tensor.shape, this.elementShape, 'TensorList shape mismatch: '); + if (this.maxNumElements === this.size()) { + throw new Error(`Trying to push element into a full list.`); + } + keep(tensor); + this.tensors.push(tensor); + } + /** + * Update the size of the list. + * @param size the new size of the list. + */ + resize(size) { + if (size < 0) { + throw new Error(`TensorListResize expects size to be non-negative. Got: ${size}`); + } + if (this.maxNumElements !== -1 && size > this.maxNumElements) { + throw new Error(`TensorListResize input size ${size} is greater maxNumElement ${this.maxNumElements}.`); + } + const destTensorList = new TensorList([], this.elementShape, this.elementDtype, this.maxNumElements); + destTensorList.tensors.length = size; + for (let i = 0; i < Math.min(this.tensors.length, size); ++i) { + destTensorList.tensors[i] = this.tensors[i]; + } + return destTensorList; + } + /** + * Retrieve the element at the provided index + * @param elementShape shape of the tensor + * @param elementDtype dtype of the tensor + * @param elementIndex index of the tensor + */ + getItem(elementIndex, elementShape, elementDtype) { + if (elementDtype !== this.elementDtype) { + throw new Error(`Invalid data types; op elements ${elementDtype}, but list elements ${this.elementDtype}`); + } + if (elementIndex < 0 || elementIndex > this.tensors.length) { + throw new Error(`Trying to access element ${elementIndex} in a list with ${this.tensors.length} elements.`); + } + if (this.tensors[elementIndex] == null) { + throw new Error(`element at index ${elementIndex} is null.`); + } + assertShapesMatchAllowUndefinedSize(this.tensors[elementIndex].shape, elementShape, 'TensorList shape mismatch: '); + const outputElementShape = inferElementShape(this.elementShape, this.tensors, elementShape); + return reshape$3(this.tensors[elementIndex], outputElementShape); + } + /** + * Set the tensor at the index + * @param elementIndex index of the tensor + * @param tensor the tensor to be inserted into the list + */ + setItem(elementIndex, tensor) { + if (tensor.dtype !== this.elementDtype) { + throw new Error(`Invalid data types; op elements ${tensor.dtype}, but list elements ${this.elementDtype}`); + } + if (elementIndex < 0 || + this.maxNumElements !== -1 && elementIndex >= this.maxNumElements) { + throw new Error(`Trying to set element ${elementIndex} in a list with max ${this.maxNumElements} elements.`); + } + assertShapesMatchAllowUndefinedSize(this.elementShape, tensor.shape, 'TensorList shape mismatch: '); + keep(tensor); + // dispose the previous value if it is replacing. + if (this.tensors[elementIndex] != null) { + this.tensors[elementIndex].kept = false; + } + this.tensors[elementIndex] = tensor; + } + /** + * Return selected values in the TensorList as a stacked Tensor. All of + * selected values must have been written and their shapes must all match. + * @param indices indices of tensors to gather + * @param elementDtype output tensor dtype + * @param elementShape output tensor element shape + */ + gather(indices, elementDtype, elementShape) { + if (elementDtype !== this.elementDtype) { + throw new Error(`Invalid data types; op elements ${elementDtype}, but list elements ${this.elementDtype}`); + } + assertShapesMatchAllowUndefinedSize(this.elementShape, elementShape, 'TensorList shape mismatch: '); + // When indices is greater than the size of the list, indices beyond the + // size of the list are ignored. + indices = indices.slice(0, this.size()); + const outputElementShape = inferElementShape(this.elementShape, this.tensors, elementShape); + if (indices.length === 0) { + return tensor([], [0].concat(outputElementShape)); + } + return tidy(() => { + const tensors = indices.map(i => reshape$3(this.tensors[i], outputElementShape)); + return stack(tensors, 0); + }); + } + /** + * Return the values in the TensorList as a concatenated Tensor. + * @param elementDtype output tensor dtype + * @param elementShape output tensor element shape + */ + concat(elementDtype, elementShape) { + if (!!elementDtype && elementDtype !== this.elementDtype) { + throw new Error(`TensorList dtype is ${this.elementDtype} but concat requested dtype ${elementDtype}`); + } + assertShapesMatchAllowUndefinedSize(this.elementShape, elementShape, 'TensorList shape mismatch: '); + const outputElementShape = inferElementShape(this.elementShape, this.tensors, elementShape); + if (this.size() === 0) { + return tensor([], [0].concat(outputElementShape)); + } + return tidy(() => { + const tensors = this.tensors.map(t => reshape$3(t, outputElementShape)); + return concat$2(tensors, 0); + }); + } + } + /** + * Creates a TensorList which, when stacked, has the value of tensor. + * @param tensor from tensor + * @param elementShape output tensor element shape + */ + function fromTensor(tensor, elementShape, elementDtype) { + const dtype = tensor.dtype; + if (tensor.shape.length < 1) { + throw new Error(`Tensor must be at least a vector, but saw shape: ${tensor.shape}`); + } + if (tensor.dtype !== elementDtype) { + throw new Error(`Invalid data types; op elements ${tensor.dtype}, but list elements ${elementDtype}`); + } + const tensorElementShape = tensor.shape.slice(1); + assertShapesMatchAllowUndefinedSize(tensorElementShape, elementShape, 'TensorList shape mismatch: '); + const tensorList = unstack(tensor); + return new TensorList(tensorList, elementShape, dtype); + } + /** + * Return a TensorList of the given size with empty elements. + * @param elementShape the shape of the future elements of the list + * @param elementDtype the desired type of elements in the list + * @param numElements the number of elements to reserve + * @param maxNumElements the maximum number of elements in th list + */ + function reserve(elementShape, elementDtype, numElements, maxNumElements) { + return new TensorList([], elementShape, elementDtype, maxNumElements); + } + /** + * Put tensors at specific indices of a stacked tensor into a TensorList. + * @param indices list of indices on how to scatter the tensor. + * @param tensor input tensor. + * @param elementShape the shape of the future elements of the list + * @param numElements the number of elements to scatter + */ + function scatter(tensor, indices, elementShape, numElements) { + if (indices.length !== tensor.shape[0]) { + throw new Error(`Expected len(indices) == tensor.shape[0], but saw: ${indices.length} vs. ${tensor.shape[0]}`); + } + const maxIndex = Math.max(...indices); + if (numElements != null && numElements !== -1 && maxIndex >= numElements) { + throw new Error(`Max index must be < array size (${maxIndex} vs. ${numElements})`); + } + const list = new TensorList([], elementShape, tensor.dtype, numElements); + const tensors = unstack(tensor, 0); + indices.forEach((value, index) => { + list.setItem(value, tensors[index]); + }); + return list; + } + /** + * Split the values of a Tensor into a TensorList. + * @param length the lengths to use when splitting value along + * its first dimension. + * @param tensor the tensor to split. + * @param elementShape the shape of the future elements of the list + */ + function split$1(tensor, length, elementShape) { + let totalLength = 0; + const cumulativeLengths = length.map(len => { + totalLength += len; + return totalLength; + }); + if (totalLength !== tensor.shape[0]) { + throw new Error(`Expected sum of lengths to be equal to + tensor.shape[0], but sum of lengths is + ${totalLength}, and tensor's shape is: ${tensor.shape}`); + } + const shapeWithoutFirstDim = tensor.shape.slice(1); + const outputElementShape = mergeElementShape(shapeWithoutFirstDim, elementShape); + const elementPerRow = totalLength === 0 ? 0 : tensor.size / totalLength; + const tensors = tidy(() => { + const tensors = []; + tensor = reshape$3(tensor, [1, totalLength, elementPerRow]); + for (let i = 0; i < length.length; ++i) { + const previousLength = (i === 0) ? 0 : cumulativeLengths[i - 1]; + const indices = [0, previousLength, 0]; + const sizes = [1, length[i], elementPerRow]; + tensors[i] = reshape$3(slice$2(tensor, indices, sizes), outputElementShape); + } + tensor.dispose(); + return tensors; + }); + const list = new TensorList([], elementShape, tensor.dtype, length.length); + for (let i = 0; i < tensors.length; i++) { + list.setItem(i, tensors[i]); + } + return list; + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const executeOp$i = async (node, tensorMap, context) => { + switch (node.op) { + case 'If': + case 'StatelessIf': { + const thenFunc = getParamValue('thenBranch', node, tensorMap, context); + const elseFunc = getParamValue('elseBranch', node, tensorMap, context); + const cond = getParamValue('cond', node, tensorMap, context); + const args = getParamValue('args', node, tensorMap, context); + const condValue = await cond.data(); + if (condValue[0]) { + return context.functionMap[thenFunc].executeFunctionAsync(args, context.tensorArrayMap, context.tensorListMap); + } + else { + return context.functionMap[elseFunc].executeFunctionAsync(args, context.tensorArrayMap, context.tensorListMap); + } + } + case 'While': + case 'StatelessWhile': { + const bodyFunc = getParamValue('body', node, tensorMap, context); + const condFunc = getParamValue('cond', node, tensorMap, context); + const args = getParamValue('args', node, tensorMap, context); + // Calculate the condition of the loop + const condResult = (await context.functionMap[condFunc].executeFunctionAsync(args, context.tensorArrayMap, context.tensorListMap)); + const argIds = args.map(tensor => tensor.id); + let condValue = await condResult[0].data(); + // Dispose the intermediate tensors for condition function + condResult.forEach(tensor => { + if (!tensor.kept && argIds.indexOf(tensor.id) === -1) { + tensor.dispose(); + } + }); + let result = args; + while (condValue[0]) { + // Record the previous result for intermediate tensor tracking + const origResult = result; + // Execution the body of the loop + result = await context.functionMap[bodyFunc].executeFunctionAsync(result, context.tensorArrayMap, context.tensorListMap); + const resultIds = result.map(tensor => tensor.id); + // Dispose the intermediate tensor for body function that is not global + // kept, not input/output of the body function + origResult.forEach(tensor => { + if (!tensor.kept && argIds.indexOf(tensor.id) === -1 && + resultIds.indexOf(tensor.id) === -1) { + tensor.dispose(); + } + }); + // Recalcuate the condition of the loop using the latest results. + const condResult = (await context.functionMap[condFunc].executeFunctionAsync(result, context.tensorArrayMap, context.tensorListMap)); + condValue = await condResult[0].data(); + // Dispose the intermediate tensors for condition function + condResult.forEach(tensor => { + if (!tensor.kept && argIds.indexOf(tensor.id) === -1 && + resultIds.indexOf(tensor.id) === -1) { + tensor.dispose(); + } + }); + } + return result; + } + case 'LoopCond': { + const pred = getParamValue('pred', node, tensorMap, context); + return [cloneTensor(pred)]; + } + case 'Switch': { + const pred = getParamValue('pred', node, tensorMap, context); + let data = getParamValue('data', node, tensorMap, context); + if (!data.kept) { + data = cloneTensor(data); + } + // Outputs nodes :0 => false, :1 => true + return (await pred.data())[0] ? [undefined, data] : [data, undefined]; + } + case 'Merge': { + const inputName = node.inputNames.find(name => getTensor(name, tensorMap, context) !== undefined); + if (inputName) { + const data = getTensor(inputName, tensorMap, context); + return [cloneTensor(data)]; + } + return undefined; + } + case 'Enter': { + const frameId = getParamValue('frameName', node, tensorMap, context); + const data = getParamValue('tensor', node, tensorMap, context); + context.enterFrame(frameId); + return [cloneTensor(data)]; + } + case 'Exit': { + const data = getParamValue('tensor', node, tensorMap, context); + context.exitFrame(); + return [cloneTensor(data)]; + } + case 'NextIteration': { + const data = getParamValue('tensor', node, tensorMap, context); + context.nextIteration(); + return [cloneTensor(data)]; + } + case 'TensorArrayV3': { + const size = getParamValue('size', node, tensorMap, context); + const dtype = getParamValue('dtype', node, tensorMap, context); + const elementShape = getParamValue('elementShape', node, tensorMap, context); + const dynamicSize = getParamValue('dynamicSize', node, tensorMap, context); + const clearAfterRead = getParamValue('clearAfterRead', node, tensorMap, context); + const identicalElementShapes = getParamValue('identicalElementShapes', node, tensorMap, context); + const name = getParamValue('name', node, tensorMap, context); + const tensorArray = new TensorArray(name, dtype, size, elementShape, identicalElementShapes, dynamicSize, clearAfterRead); + context.addTensorArray(tensorArray); + return [tensorArray.idTensor, scalar(1.0)]; + } + case 'TensorArrayWriteV3': { + const id = getParamValue('tensorArrayId', node, tensorMap, context); + const index = getParamValue('index', node, tensorMap, context); + const writeTensor = getParamValue('tensor', node, tensorMap, context); + const writeTensorArray = context.getTensorArray(id.id); + writeTensorArray.write(index, writeTensor); + return [writeTensorArray.idTensor]; + } + case 'TensorArrayReadV3': { + const readId = getParamValue('tensorArrayId', node, tensorMap, context); + const readIndex = getParamValue('index', node, tensorMap, context); + const readTensorArray = context.getTensorArray(readId.id); + return [readTensorArray.read(readIndex)]; + } + case 'TensorArrayGatherV3': { + const gatherId = getParamValue('tensorArrayId', node, tensorMap, context); + const gatherIndices = getParamValue('indices', node, tensorMap, context); + const gatherDtype = getParamValue('dtype', node, tensorMap, context); + const gatherTensorArray = context.getTensorArray(gatherId.id); + return [gatherTensorArray.gather(gatherIndices, gatherDtype)]; + } + case 'TensorArrayScatterV3': { + const scatterId = getParamValue('tensorArrayId', node, tensorMap, context); + const scatterIndices = getParamValue('indices', node, tensorMap, context); + const scatterTensor = getParamValue('tensor', node, tensorMap, context); + const scatterTensorArray = context.getTensorArray(scatterId.id); + scatterTensorArray.scatter(scatterIndices, scatterTensor); + return [scatterTensorArray.idTensor]; + } + case 'TensorArrayConcatV3': { + const concatId = getParamValue('tensorArrayId', node, tensorMap, context); + const concatTensorArray = context.getTensorArray(concatId.id); + const concatDtype = getParamValue('dtype', node, tensorMap, context); + return [concatTensorArray.concat(concatDtype)]; + } + case 'TensorArraySplitV3': { + const splitId = getParamValue('tensorArrayId', node, tensorMap, context); + const splitTensor = getParamValue('tensor', node, tensorMap, context); + const lengths = getParamValue('lengths', node, tensorMap, context); + const splitTensorArray = context.getTensorArray(splitId.id); + splitTensorArray.split(lengths, splitTensor); + return [splitTensorArray.idTensor]; + } + case 'TensorArraySizeV3': { + const sizeId = getParamValue('tensorArrayId', node, tensorMap, context); + const sizeTensorArray = context.getTensorArray(sizeId.id); + return [scalar(sizeTensorArray.size(), 'int32')]; + } + case 'TensorArrayCloseV3': { + const closeId = getParamValue('tensorArrayId', node, tensorMap, context); + const closeTensorArray = context.getTensorArray(closeId.id); + closeTensorArray.clearAndClose(); + return [closeTensorArray.idTensor]; + } + case 'TensorListSetItem': { + const idTensor = getParamValue('tensorListId', node, tensorMap, context); + const index = getParamValue('index', node, tensorMap, context); + const writeTensor = getParamValue('tensor', node, tensorMap, context); + const tensorList = context.getTensorList(idTensor.id); + tensorList.setItem(index, writeTensor); + return [tensorList.idTensor]; + } + case 'TensorListGetItem': { + const idTensor = getParamValue('tensorListId', node, tensorMap, context); + const readIndex = getParamValue('index', node, tensorMap, context); + const elementShape = getParamValue('elementShape', node, tensorMap, context); + const elementDType = getParamValue('elementDType', node, tensorMap, context); + const tensorList = context.getTensorList(idTensor.id); + return [tensorList.getItem(readIndex, elementShape, elementDType)]; + } + case 'TensorListScatterV2': + case 'TensorListScatter': { + const scatterIndices = getParamValue('indices', node, tensorMap, context); + const scatterTensor = getParamValue('tensor', node, tensorMap, context); + const elementShape = getParamValue('elementShape', node, tensorMap, context); + const numElements = getParamValue('numElements', node, tensorMap, context); + const tensorList = scatter(scatterTensor, scatterIndices, elementShape, numElements); + context.addTensorList(tensorList); + return [tensorList.idTensor]; + } + case 'TensorListReserve': + case 'EmptyTensorList': { + const elementShape = getParamValue('elementShape', node, tensorMap, context); + const elementDtype = getParamValue('elementDType', node, tensorMap, context); + let numElementsParam; + if (node.op === 'TensorListReserve') { + numElementsParam = 'numElements'; + } + else { + numElementsParam = 'maxNumElements'; + } + const numElements = getParamValue(numElementsParam, node, tensorMap, context); + const maxNumElements = node.op === 'TensorListReserve' ? -1 : numElements; + const tensorList = reserve(elementShape, elementDtype, numElements, maxNumElements); + context.addTensorList(tensorList); + return [tensorList.idTensor]; + } + case 'TensorListGather': { + const gatherId = getParamValue('tensorListId', node, tensorMap, context); + const gatherIndices = getParamValue('indices', node, tensorMap, context); + const elementShape = getParamValue('elementShape', node, tensorMap, context); + const elementDtype = getParamValue('elementDType', node, tensorMap, context); + const tensorList = context.getTensorList(gatherId.id); + return [tensorList.gather(gatherIndices, elementDtype, elementShape)]; + } + case 'TensorListStack': { + const idTensor = getParamValue('tensorListId', node, tensorMap, context); + const elementShape = getParamValue('elementShape', node, tensorMap, context); + const elementDtype = getParamValue('elementDType', node, tensorMap, context); + const numElements = getParamValue('numElements', node, tensorMap, context); + const tensorList = context.getTensorList(idTensor.id); + return [tensorList.stack(elementShape, elementDtype, numElements)]; + } + case 'TensorListFromTensor': { + const tensor = getParamValue('tensor', node, tensorMap, context); + const elementShape = getParamValue('elementShape', node, tensorMap, context); + const elementDtype = getParamValue('elementDType', node, tensorMap, context); + const tensorList = fromTensor(tensor, elementShape, elementDtype); + context.addTensorList(tensorList); + return [tensorList.idTensor]; + } + case 'TensorListConcat': + case 'TensorListConcatV2': { + const concatId = getParamValue('tensorListId', node, tensorMap, context); + const tensorList = context.getTensorList(concatId.id); + const concatDtype = getParamValue('dtype', node, tensorMap, context); + const elementShape = getParamValue('elementShape', node, tensorMap, context); + return [tensorList.concat(concatDtype, elementShape)]; + } + case 'TensorListPushBack': { + const idTensor = getParamValue('tensorListId', node, tensorMap, context); + const writeTensor = getParamValue('tensor', node, tensorMap, context); + const tensorList = context.getTensorList(idTensor.id); + tensorList.pushBack(writeTensor); + return [tensorList.idTensor]; + } + case 'TensorListPopBack': { + const idTensor = getParamValue('tensorListId', node, tensorMap, context); + const elementShape = getParamValue('elementShape', node, tensorMap, context); + const elementDType = getParamValue('elementDType', node, tensorMap, context); + const tensorList = context.getTensorList(idTensor.id); + return [tensorList.popBack(elementShape, elementDType)]; + } + case 'TensorListSplit': { + const splitTensor = getParamValue('tensor', node, tensorMap, context); + const elementShape = getParamValue('elementShape', node, tensorMap, context); + const lengths = getParamValue('lengths', node, tensorMap, context); + const tensorList = split$1(splitTensor, lengths, elementShape); + context.addTensorList(tensorList); + return [tensorList.idTensor]; + } + case 'TensorListLength': { + const idTensor = getParamValue('tensorListId', node, tensorMap, context); + const tensorList = context.getTensorList(idTensor.id); + return [scalar(tensorList.size(), 'int32')]; + } + case 'TensorListResize': { + const idTensor = getParamValue('tensorListId', node, tensorMap, context); + const size = getParamValue('size', node, tensorMap, context); + const srcTensorList = context.getTensorList(idTensor.id); + const destTensorList = srcTensorList.resize(size); + context.addTensorList(destTensorList); + return [destTensorList.idTensor]; + } + default: + throw TypeError(`Node type ${node.op} is not implemented`); + } + }; + const CATEGORY$h = 'control'; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function fusedConvAndDepthWiseParams(node, tensorMap, context) { + const [extraOp, activationFunc] = getParamValue('fusedOps', node, tensorMap, context); + const isBiasAdd = extraOp === 'biasadd'; + const noBiasAdd = !isBiasAdd; + const isPrelu = activationFunc === 'prelu'; + const isBatchNorm = extraOp === 'fusedbatchnorm'; + const numArgs = getParamValue('numArgs', node, tensorMap, context); + if (isBiasAdd) { + if (isPrelu && numArgs !== 2) { + throw new Error('FusedConv2d and DepthwiseConv2d with BiasAdd and Prelu ' + + 'must have two extra arguments: bias and alpha.'); + } + if (!isPrelu && isBiasAdd && numArgs !== 1) { + throw new Error('FusedConv2d and DepthwiseConv2d with BiasAdd must have ' + + 'one extra argument: bias.'); + } + } + if (isBatchNorm) { + throw new Error('FusedConv2d and DepthwiseConv2d with FusedBatchNorm is not supported'); + } + const stride = getParamValue('strides', node, tensorMap, context); + const pad = getPadding(node, tensorMap, context); + const dataFormat = getParamValue('dataFormat', node, tensorMap, context) + .toUpperCase(); + const dilations = getParamValue('dilations', node, tensorMap, context); + let [biasArg, preluArg] = getParamValue('args', node, tensorMap, context); + if (noBiasAdd) { + preluArg = biasArg; + biasArg = undefined; + } + const leakyreluAlpha = getParamValue('leakyreluAlpha', node, tensorMap, context); + return { + stride, + pad, + dataFormat, + dilations, + biasArg, + preluArg, + activationFunc, + leakyreluAlpha + }; + } + const executeOp$h = (node, tensorMap, context, ops = tfOps) => { + switch (node.op) { + case 'Conv1D': { + const stride = getParamValue('stride', node, tensorMap, context); + const pad = getParamValue('pad', node, tensorMap, context); + const dataFormat = getParamValue('dataFormat', node, tensorMap, context) + .toUpperCase(); + const dilation = getParamValue('dilation', node, tensorMap, context); + return [ops.conv1d(getParamValue('x', node, tensorMap, context), getParamValue('filter', node, tensorMap, context), stride, pad, dataFormat, dilation)]; + } + case 'Conv2D': { + const stride = getParamValue('strides', node, tensorMap, context); + const pad = getPadding(node, tensorMap, context); + const dataFormat = getParamValue('dataFormat', node, tensorMap, context) + .toUpperCase(); + const dilations = getParamValue('dilations', node, tensorMap, context); + return [ops.conv2d(getParamValue('x', node, tensorMap, context), getParamValue('filter', node, tensorMap, context), [stride[1], stride[2]], pad, dataFormat, [dilations[1], dilations[2]])]; + } + case '_FusedConv2D': { + const { stride, pad, dataFormat, dilations, biasArg, preluArg, activationFunc, leakyreluAlpha } = fusedConvAndDepthWiseParams(node, tensorMap, context); + return [ops.fused.conv2d({ + x: getParamValue('x', node, tensorMap, context), + filter: getParamValue('filter', node, tensorMap, context), + strides: [stride[1], stride[2]], + pad: pad, + dataFormat: dataFormat, + dilations: [dilations[1], dilations[2]], + bias: biasArg, + activation: activationFunc, + preluActivationWeights: preluArg, + leakyreluAlpha + })]; + } + case 'FusedDepthwiseConv2dNative': { + const { stride, pad, dataFormat, dilations, biasArg, preluArg, activationFunc, leakyreluAlpha, } = fusedConvAndDepthWiseParams(node, tensorMap, context); + return [ops.fused.depthwiseConv2d({ + x: getParamValue('x', node, tensorMap, context), + filter: getParamValue('filter', node, tensorMap, context), + strides: [stride[1], stride[2]], + pad: pad, + dataFormat: dataFormat, + dilations: [dilations[1], dilations[2]], + bias: biasArg, + activation: activationFunc, + preluActivationWeights: preluArg, + leakyreluAlpha + })]; + } + case 'Conv2DBackpropInput': + case 'Conv2dTranspose': { + const shape = getParamValue('outputShape', node, tensorMap, context); + const stride = getParamValue('strides', node, tensorMap, context); + const pad = getPadding(node, tensorMap, context); + return [ops.conv2dTranspose(getParamValue('x', node, tensorMap, context), getParamValue('filter', node, tensorMap, context), shape, [stride[1], stride[2]], pad)]; + } + case 'DepthwiseConv2dNative': + case 'DepthwiseConv2d': { + const stride = getParamValue('strides', node, tensorMap, context); + const pad = getPadding(node, tensorMap, context); + const dilations = getParamValue('dilations', node, tensorMap, context); + const dataFormat = getParamValue('dataFormat', node, tensorMap, context) + .toUpperCase(); + return [ops.depthwiseConv2d(getParamValue('input', node, tensorMap, context), getParamValue('filter', node, tensorMap, context), [stride[1], stride[2]], pad, dataFormat, [dilations[1], dilations[2]])]; + } + case 'Conv3D': { + const stride = getParamValue('strides', node, tensorMap, context); + const pad = getParamValue('pad', node, tensorMap, context); + const dataFormat = getParamValue('dataFormat', node, tensorMap, context) + .toUpperCase(); + const dilations = getParamValue('dilations', node, tensorMap, context); + return [ops.conv3d(getParamValue('x', node, tensorMap, context), getParamValue('filter', node, tensorMap, context), [stride[1], stride[2], stride[3]], pad, dataFormat, [dilations[1], dilations[2], dilations[3]])]; + } + case 'AvgPool': { + const stride = getParamValue('strides', node, tensorMap, context); + const pad = getParamValue('pad', node, tensorMap, context); + const kernelSize = getParamValue('kernelSize', node, tensorMap, context); + return [ops.avgPool(getParamValue('x', node, tensorMap, context), [kernelSize[1], kernelSize[2]], [stride[1], stride[2]], pad)]; + } + case 'MaxPool': { + const stride = getParamValue('strides', node, tensorMap, context); + const pad = getParamValue('pad', node, tensorMap, context); + const kernelSize = getParamValue('kernelSize', node, tensorMap, context); + return [ops.maxPool(getParamValue('x', node, tensorMap, context), [kernelSize[1], kernelSize[2]], [stride[1], stride[2]], pad)]; + } + case 'MaxPoolWithArgmax': { + const stride = getParamValue('strides', node, tensorMap, context); + const pad = getParamValue('pad', node, tensorMap, context); + const kernelSize = getParamValue('kernelSize', node, tensorMap, context); + const includeBatchInIndex = getParamValue('includeBatchInIndex', node, tensorMap, context); + const { result, indexes } = ops.maxPoolWithArgmax(getParamValue('x', node, tensorMap, context), [kernelSize[1], kernelSize[2]], [stride[1], stride[2]], pad, includeBatchInIndex); + return [result, indexes]; + } + case 'AvgPool3D': { + const stride = getParamValue('strides', node, tensorMap, context); + const pad = getParamValue('pad', node, tensorMap, context); + const kernelSize = getParamValue('kernelSize', node, tensorMap, context); + return [ops.avgPool3d(getParamValue('x', node, tensorMap, context), [kernelSize[1], kernelSize[2], kernelSize[3]], [stride[1], stride[2], stride[3]], pad)]; + } + case 'MaxPool3D': { + const stride = getParamValue('strides', node, tensorMap, context); + const pad = getParamValue('pad', node, tensorMap, context); + const kernelSize = getParamValue('kernelSize', node, tensorMap, context); + return [ops.maxPool3d(getParamValue('x', node, tensorMap, context), [kernelSize[1], kernelSize[2], kernelSize[3]], [stride[1], stride[2], stride[3]], pad)]; + } + case 'Dilation2D': { + const strides = getParamValue('strides', node, tensorMap, context); + const pad = getParamValue('pad', node, tensorMap, context); + const dilations = getParamValue('dilations', node, tensorMap, context); + // strides: [1, stride_height, stride_width, 1]. + const strideHeight = strides[1]; + const strideWidth = strides[2]; + // dilations: [1, dilation_height, dilation_width, 1]. + const dilationHeight = dilations[1]; + const dilationWidth = dilations[2]; + return [ops.dilation2d(getParamValue('x', node, tensorMap, context), getParamValue('filter', node, tensorMap, context), [strideHeight, strideWidth], pad, [dilationHeight, dilationWidth], 'NHWC' /* dataFormat */)]; + } + default: + throw TypeError(`Node type ${node.op} is not implemented`); + } + }; + const CATEGORY$g = 'convolution'; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const executeOp$g = (node, tensorMap, context, ops = tfOps) => { + switch (node.op) { + case 'Fill': { + const shape = getParamValue('shape', node, tensorMap, context); + const dtype = getParamValue('dtype', node, tensorMap, context); + const value = getParamValue('value', node, tensorMap, context); + return [ops.fill(shape, value, dtype)]; + } + case 'LinSpace': { + const start = getParamValue('start', node, tensorMap, context); + const stop = getParamValue('stop', node, tensorMap, context); + const num = getParamValue('num', node, tensorMap, context); + return [ops.linspace(start, stop, num)]; + } + case 'Multinomial': { + const logits = getParamValue('logits', node, tensorMap, context); + const numSamples = getParamValue('numSamples', node, tensorMap, context); + const seed = getParamValue('seed', node, tensorMap, context); + return [ops.multinomial(logits, numSamples, seed)]; + } + case 'OneHot': { + const indices = getParamValue('indices', node, tensorMap, context); + const depth = getParamValue('depth', node, tensorMap, context); + const onValue = getParamValue('onValue', node, tensorMap, context); + const offValue = getParamValue('offValue', node, tensorMap, context); + const dtype = getParamValue('dtype', node, tensorMap, context); + return [ops.oneHot(indices, depth, onValue, offValue, dtype)]; + } + case 'Ones': { + return [ops.ones(getParamValue('shape', node, tensorMap, context), getParamValue('dtype', node, tensorMap, context))]; + } + case 'OnesLike': { + return [ops.onesLike(getParamValue('x', node, tensorMap, context))]; + } + case 'RandomStandardNormal': { + return [ops.randomStandardNormal(getParamValue('shape', node, tensorMap, context), getParamValue('dtype', node, tensorMap, context), getParamValue('seed', node, tensorMap, context))]; + } + case 'RandomUniform': { + return [ops.randomUniform( + // tslint:disable-next-line:no-any + getParamValue('shape', node, tensorMap, context), getParamValue('minval', node, tensorMap, context), getParamValue('maxval', node, tensorMap, context), getParamValue('dtype', node, tensorMap, context))]; + } + case 'RandomUniformInt': { + return [ops.randomUniformInt(getParamValue('shape', node, tensorMap, context), getParamValue('minval', node, tensorMap, context), getParamValue('maxval', node, tensorMap, context), getParamValue('seed', node, tensorMap, context))]; + } + case 'Range': { + const start = getParamValue('start', node, tensorMap, context); + const stop = getParamValue('stop', node, tensorMap, context); + const step = getParamValue('step', node, tensorMap, context); + return [ops.range(start, stop, step, getParamValue('dtype', node, tensorMap, context))]; + } + case 'TruncatedNormal': { + const shape = getParamValue('shape', node, tensorMap, context); + const mean = getParamValue('mean', node, tensorMap, context); + const stdDev = getParamValue('stdDev', node, tensorMap, context); + const seed = getParamValue('seed', node, tensorMap, context); + return [ops.truncatedNormal(shape, mean, stdDev, getParamValue('dtype', node, tensorMap, context), seed)]; + } + case 'Zeros': { + return [ops.zeros(getParamValue('shape', node, tensorMap, context), getParamValue('dtype', node, tensorMap, context))]; + } + case 'ZerosLike': { + return [ops.zerosLike(getParamValue('x', node, tensorMap, context))]; + } + default: + throw TypeError(`Node type ${node.op} is not implemented`); + } + }; + const CATEGORY$f = 'creation'; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function nmsParams(node, tensorMap, context) { + const boxes = getParamValue('boxes', node, tensorMap, context); + const scores = getParamValue('scores', node, tensorMap, context); + const maxOutputSize = getParamValue('maxOutputSize', node, tensorMap, context); + const iouThreshold = getParamValue('iouThreshold', node, tensorMap, context); + const scoreThreshold = getParamValue('scoreThreshold', node, tensorMap, context); + const softNmsSigma = getParamValue('softNmsSigma', node, tensorMap, context); + return { + boxes, + scores, + maxOutputSize, + iouThreshold, + scoreThreshold, + softNmsSigma + }; + } + const executeOp$f = async (node, tensorMap, context, resourceManager, ops = tfOps) => { + switch (node.op) { + case 'NonMaxSuppressionV5': { + const { boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma } = nmsParams(node, tensorMap, context); + const result = await ops.image.nonMaxSuppressionWithScoreAsync(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma); + return [result.selectedIndices, result.selectedScores]; + } + case 'NonMaxSuppressionV4': { + const { boxes, scores, maxOutputSize, iouThreshold, scoreThreshold } = nmsParams(node, tensorMap, context); + const padToMaxOutputSize = getParamValue('padToMaxOutputSize', node, tensorMap, context); + const result = await ops.image.nonMaxSuppressionPaddedAsync(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold, padToMaxOutputSize); + return [result.selectedIndices, result.validOutputs]; + } + case 'NonMaxSuppressionV3': + case 'NonMaxSuppressionV2': { + const { boxes, scores, maxOutputSize, iouThreshold, scoreThreshold } = nmsParams(node, tensorMap, context); + return [await ops.image.nonMaxSuppressionAsync(boxes, scores, maxOutputSize, iouThreshold, scoreThreshold)]; + } + case 'Where': { + const condition = ops.cast(getParamValue('condition', node, tensorMap, context), 'bool'); + const result = [await ops.whereAsync(condition)]; + condition.dispose(); + return result; + } + case 'ListDiff': { + return ops.setdiff1dAsync(getParamValue('x', node, tensorMap, context), getParamValue('y', node, tensorMap, context)); + } + default: + throw TypeError(`Node type ${node.op} is not implemented`); + } + }; + const CATEGORY$e = 'dynamic'; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const executeOp$e = (node, tensorMap, context, ops = tfOps) => { + switch (node.op) { + case 'LowerBound': { + const sortedSequence = getParamValue('sortedSequence', node, tensorMap, context); + const values = getParamValue('values', node, tensorMap, context); + return [ops.lowerBound(sortedSequence, values)]; + } + case 'TopKV2': { + const x = getParamValue('x', node, tensorMap, context); + const k = getParamValue('k', node, tensorMap, context); + const sorted = getParamValue('sorted', node, tensorMap, context); + const result = ops.topk(x, k, sorted); + return [result.values, result.indices]; + } + case 'UpperBound': { + const sortedSequence = getParamValue('sortedSequence', node, tensorMap, context); + const values = getParamValue('values', node, tensorMap, context); + return [ops.upperBound(sortedSequence, values)]; + } + case 'Unique': { + const x = getParamValue('x', node, tensorMap, context); + const result = ops.unique(x); + return [result.values, result.indices]; + } + case 'UniqueV2': { + const x = getParamValue('x', node, tensorMap, context); + const axis = getParamValue('axis', node, tensorMap, context); + const result = ops.unique(x, axis); + return [result.values, result.indices]; + } + default: + throw TypeError(`Node type ${node.op} is not implemented`); + } + }; + const CATEGORY$d = 'evaluation'; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const executeOp$d = (node, tensorMap, context, ops = tfOps) => { + switch (node.op) { + case 'Const': { + return tensorMap[node.name]; + } + case 'PlaceholderWithDefault': + const def = getParamValue('default', node, tensorMap, context); + return [getTensor(node.name, tensorMap, context) || def]; + case 'Placeholder': + return [getTensor(node.name, tensorMap, context)]; + case 'Identity': + case 'StopGradient': + case 'FakeQuantWithMinMaxVars': { // This op is currently ignored. + const data = getParamValue('x', node, tensorMap, context); + return [cloneTensor(data)]; + } + case 'IdentityN': + return getParamValue('x', node, tensorMap, context) + .map((t) => cloneTensor(t)); + case 'Snapshot': + const snapshot = getParamValue('x', node, tensorMap, context); + return [cloneTensor(snapshot)]; + case 'Shape': + return [ops.tensor1d(getParamValue('x', node, tensorMap, context).shape, 'int32')]; + case 'ShapeN': + return getParamValue('x', node, tensorMap, context) + .map((t) => ops.tensor1d(t.shape)); + case 'Size': + return [ops.scalar(getParamValue('x', node, tensorMap, context).size, 'int32')]; + case 'Rank': + return [ops.scalar(getParamValue('x', node, tensorMap, context).rank, 'int32')]; + case 'NoOp': + return [ops.scalar(1)]; + case 'Print': + const input = getParamValue('x', node, tensorMap, context); + const data = getParamValue('data', node, tensorMap, context); + const message = getParamValue('message', node, tensorMap, context); + const summarize = getParamValue('summarize', node, tensorMap, context); + console.warn('The graph has a tf.print() operation,' + + 'usually used for debugging, which slows down performance.'); + console.log(message); + for (let i = 0; i < data.length; i++) { + console.log(Array.prototype.slice.call(data[i].dataSync()) + .slice(0, summarize)); + } + return [input]; + default: + throw TypeError(`Node type ${node.op} is not implemented`); + } + }; + const CATEGORY$c = 'graph'; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Hashtable contains a set of tensors, which can be accessed by key. + */ + class HashTable { + get id() { + return this.handle.id; + } + /** + * Constructor of HashTable. Creates a hash table. + * + * @param keyDType `dtype` of the table keys. + * @param valueDType `dtype` of the table values. + */ + constructor(keyDType, valueDType) { + this.keyDType = keyDType; + this.valueDType = valueDType; + this.handle = scalar(0); + // tslint:disable-next-line: no-any + this.tensorMap = new Map(); + keep(this.handle); + } + /** + * Dispose the tensors and handle and clear the hashtable. + */ + clearAndClose() { + this.tensorMap.forEach(value => value.dispose()); + this.tensorMap.clear(); + this.handle.dispose(); + } + /** + * The number of items in the hash table. + */ + size() { + return this.tensorMap.size; + } + /** + * The number of items in the hash table as a rank-0 tensor. + */ + tensorSize() { + return scalar(this.size(), 'int32'); + } + /** + * Replaces the contents of the table with the specified keys and values. + * @param keys Keys to store in the hashtable. + * @param values Values to store in the hashtable. + */ + async import(keys, values) { + this.checkKeyAndValueTensor(keys, values); + // We only store the primitive values of the keys, this allows lookup + // to be O(1). + const $keys = await keys.data(); + // Clear the hashTable before inserting new values. + this.tensorMap.forEach(value => value.dispose()); + this.tensorMap.clear(); + return tidy(() => { + const $values = unstack(values); + const keysLength = $keys.length; + const valuesLength = $values.length; + assert$1(keysLength === valuesLength, () => `The number of elements doesn't match, keys has ` + + `${keysLength} elements, the values has ${valuesLength} ` + + `elements.`); + for (let i = 0; i < keysLength; i++) { + const key = $keys[i]; + const value = $values[i]; + keep(value); + this.tensorMap.set(key, value); + } + return this.handle; + }); + } + /** + * Looks up keys in a hash table, outputs the corresponding values. + * + * Performs batch lookups, for every element in the key tensor, `find` + * stacks the corresponding value into the return tensor. + * + * If an element is not present in the table, the given `defaultValue` is + * used. + * + * @param keys Keys to look up. Must have the same type as the keys of the + * table. + * @param defaultValue The scalar `defaultValue` is the value output for keys + * not present in the table. It must also be of the same type as the + * table values. + */ + async find(keys, defaultValue) { + this.checkKeyAndValueTensor(keys, defaultValue); + const $keys = await keys.data(); + return tidy(() => { + const result = []; + for (let i = 0; i < $keys.length; i++) { + const key = $keys[i]; + const value = this.findWithDefault(key, defaultValue); + result.push(value); + } + return stack(result); + }); + } + // tslint:disable-next-line: no-any + findWithDefault(key, defaultValue) { + const result = this.tensorMap.get(key); + return result != null ? result : defaultValue; + } + checkKeyAndValueTensor(key, value) { + if (key.dtype !== this.keyDType) { + throw new Error(`Expect key dtype ${this.keyDType}, but got ` + + `${key.dtype}`); + } + if (value.dtype !== this.valueDType) { + throw new Error(`Expect value dtype ${this.valueDType}, but got ` + + `${value.dtype}`); + } + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const executeOp$c = async (node, tensorMap, context, resourceManager) => { + switch (node.op) { + case 'HashTable': + case 'HashTableV2': { + const existingTableHandle = resourceManager.getHashTableHandleByName(node.name); + // Table is shared with initializer. + if (existingTableHandle != null) { + return [existingTableHandle]; + } + else { + const keyDType = getParamValue('keyDType', node, tensorMap, context); + const valueDType = getParamValue('valueDType', node, tensorMap, context); + const hashTable = new HashTable(keyDType, valueDType); + resourceManager.addHashTable(node.name, hashTable); + return [hashTable.handle]; + } + } + case 'InitializeTable': + case 'InitializeTableV2': + case 'LookupTableImport': + case 'LookupTableImportV2': { + const handle = getParamValue('tableHandle', node, tensorMap, context, resourceManager); + const keys = getParamValue('keys', node, tensorMap, context); + const values = getParamValue('values', node, tensorMap, context); + const hashTable = resourceManager.getHashTableById(handle.id); + return [await hashTable.import(keys, values)]; + } + case 'LookupTableFind': + case 'LookupTableFindV2': { + const handle = getParamValue('tableHandle', node, tensorMap, context, resourceManager); + const keys = getParamValue('keys', node, tensorMap, context); + const defaultValue = getParamValue('defaultValue', node, tensorMap, context); + const hashTable = resourceManager.getHashTableById(handle.id); + return [await hashTable.find(keys, defaultValue)]; + } + case 'LookupTableSize': + case 'LookupTableSizeV2': { + const handle = getParamValue('tableHandle', node, tensorMap, context, resourceManager); + const hashTable = resourceManager.getHashTableById(handle.id); + return [hashTable.tensorSize()]; + } + default: + throw TypeError(`Node type ${node.op} is not implemented`); + } + }; + const CATEGORY$b = 'hash_table'; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const executeOp$b = (node, tensorMap, context, ops = tfOps) => { + switch (node.op) { + case 'ResizeBilinear': { + const images = getParamValue('images', node, tensorMap, context); + const size = getParamValue('size', node, tensorMap, context); + const alignCorners = getParamValue('alignCorners', node, tensorMap, context); + const halfPixelCenters = getParamValue('halfPixelCenters', node, tensorMap, context); + return [ops.image.resizeBilinear(images, [size[0], size[1]], alignCorners, halfPixelCenters)]; + } + case 'ResizeNearestNeighbor': { + const images = getParamValue('images', node, tensorMap, context); + const size = getParamValue('size', node, tensorMap, context); + const alignCorners = getParamValue('alignCorners', node, tensorMap, context); + const halfPixelCenters = getParamValue('halfPixelCenters', node, tensorMap, context); + return [ops.image.resizeNearestNeighbor(images, [size[0], size[1]], alignCorners, halfPixelCenters)]; + } + case 'CropAndResize': { + const image = getParamValue('image', node, tensorMap, context); + const boxes = getParamValue('boxes', node, tensorMap, context); + const boxInd = getParamValue('boxInd', node, tensorMap, context); + const cropSize = getParamValue('cropSize', node, tensorMap, context); + const method = getParamValue('method', node, tensorMap, context); + const extrapolationValue = getParamValue('extrapolationValue', node, tensorMap, context); + return [ops.image.cropAndResize(image, boxes, boxInd, cropSize, method, extrapolationValue)]; + } + case 'ImageProjectiveTransformV3': { + const images = getParamValue('images', node, tensorMap, context); + const transforms = getParamValue('transforms', node, tensorMap, context); + const outputShape = getParamValue('outputShape', node, tensorMap, context); + const fillValue = getParamValue('fillValue', node, tensorMap, context); + const interpolation = getParamValue('interpolation', node, tensorMap, context); + const fillMode = getParamValue('fillMode', node, tensorMap, context); + return [ops.image.transform(images, transforms, interpolation.toLowerCase(), fillMode.toLowerCase(), fillValue, outputShape)]; + } + default: + throw TypeError(`Node type ${node.op} is not implemented`); + } + }; + const CATEGORY$a = 'image'; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const executeOp$a = (node, tensorMap, context, ops = tfOps) => { + switch (node.op) { + case 'Equal': { + return [ops.equal(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))]; + } + case 'NotEqual': { + return [ops.notEqual(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))]; + } + case 'Greater': { + return [ops.greater(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))]; + } + case 'GreaterEqual': { + return [ops.greaterEqual(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))]; + } + case 'Less': { + return [ops.less(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))]; + } + case 'LessEqual': { + return [ops.lessEqual(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))]; + } + case 'LogicalAnd': { + return [ops.logicalAnd(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))]; + } + case 'LogicalNot': { + return [ops.logicalNot(getParamValue('a', node, tensorMap, context))]; + } + case 'LogicalOr': { + return [ops.logicalOr(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))]; + } + case 'Select': + case 'SelectV2': { + return [ops.where(getParamValue('condition', node, tensorMap, context), getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))]; + } + case 'BitwiseAnd': { + return [ops.bitwiseAnd(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context))]; + } + default: + throw TypeError(`Node type ${node.op} is not implemented`); + } + }; + const CATEGORY$9 = 'logical'; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const executeOp$9 = (node, tensorMap, context, ops = tfOps) => { + switch (node.op) { + case 'BatchMatMul': + case 'BatchMatMulV2': + case 'MatMul': + return [ops.matMul(getParamValue('a', node, tensorMap, context), getParamValue('b', node, tensorMap, context), getParamValue('transposeA', node, tensorMap, context), getParamValue('transposeB', node, tensorMap, context))]; + case 'Einsum': + return [ops.einsum(getParamValue('equation', node, tensorMap, context), ...getParamValue('tensors', node, tensorMap, context))]; + case 'Transpose': + return [ops.transpose(getParamValue('x', node, tensorMap, context), getParamValue('perm', node, tensorMap, context))]; + case '_FusedMatMul': + const [extraOp, activationFunc] = getParamValue('fusedOps', node, tensorMap, context); + const isBiasAdd = extraOp === 'biasadd'; + const isPrelu = activationFunc === 'prelu'; + const numArgs = getParamValue('numArgs', node, tensorMap, context); + const leakyreluAlpha = getParamValue('leakyreluAlpha', node, tensorMap, context); + if (isBiasAdd) { + if (isPrelu && numArgs !== 2) { + throw new Error('Fused MatMul with BiasAdd and Prelu must have two ' + + 'extra arguments: bias and alpha.'); + } + if (!isPrelu && numArgs !== 1) { + throw new Error('Fused MatMul with BiasAdd must have one extra argument: bias.'); + } + } + const [biasArg, preluArg] = getParamValue('args', node, tensorMap, context); + return [ops.fused.matMul({ + a: getParamValue('a', node, tensorMap, context), + b: getParamValue('b', node, tensorMap, context), + transposeA: getParamValue('transposeA', node, tensorMap, context), + transposeB: getParamValue('transposeB', node, tensorMap, context), + bias: biasArg, + activation: activationFunc, + preluActivationWeights: preluArg, + leakyreluAlpha + })]; + case 'MatrixBandPart': + return [ops.linalg.bandPart(getParamValue('a', node, tensorMap, context), getParamValue('numLower', node, tensorMap, context), getParamValue('numUpper', node, tensorMap, context))]; + default: + throw TypeError(`Node type ${node.op} is not implemented`); + } + }; + const CATEGORY$8 = 'matrices'; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const executeOp$8 = (node, tensorMap, context, ops = tfOps) => { + switch (node.op) { + case 'EuclideanNorm': + return [ops.euclideanNorm(getParamValue('x', node, tensorMap, context), getParamValue('axis', node, tensorMap, context), getParamValue('keepDims', node, tensorMap, context))]; + case 'FusedBatchNorm': + case 'FusedBatchNormV2': { + return [ops.batchNorm(getParamValue('x', node, tensorMap, context), getParamValue('mean', node, tensorMap, context), getParamValue('variance', node, tensorMap, context), getParamValue('offset', node, tensorMap, context), getParamValue('scale', node, tensorMap, context), getParamValue('epsilon', node, tensorMap, context))]; + } + case 'FusedBatchNormV3': { + return [ops.batchNorm(getParamValue('x', node, tensorMap, context), getParamValue('mean', node, tensorMap, context), getParamValue('variance', node, tensorMap, context), getParamValue('offset', node, tensorMap, context), getParamValue('scale', node, tensorMap, context), getParamValue('epsilon', node, tensorMap, context))]; + } + case 'LRN': { + return [ops.localResponseNormalization(getParamValue('x', node, tensorMap, context), getParamValue('radius', node, tensorMap, context), getParamValue('bias', node, tensorMap, context), getParamValue('alpha', node, tensorMap, context), getParamValue('beta', node, tensorMap, context))]; + } + case 'Softmax': { + return [ops.softmax(getParamValue('x', node, tensorMap, context))]; + } + case 'LogSoftmax': { + return [ops.logSoftmax(getParamValue('x', node, tensorMap, context))]; + } + default: + throw TypeError(`Node type ${node.op} is not implemented`); + } + }; + const CATEGORY$7 = 'normalization'; + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const executeOp$7 = (node, tensorMap, context, ops = tfOps) => { + switch (node.op) { + case 'RaggedGather': { + const { outputNestedSplits, outputDenseValues, } = ops.raggedGather(getParamValue('paramsNestedSplits', node, tensorMap, context), getParamValue('paramsDenseValues', node, tensorMap, context), getParamValue('indices', node, tensorMap, context), getParamValue('outputRaggedRank', node, tensorMap, context)); + return outputNestedSplits.concat(outputDenseValues); + } + case 'RaggedRange': { + const { rtNestedSplits, rtDenseValues } = ops.raggedRange(getParamValue('starts', node, tensorMap, context), getParamValue('limits', node, tensorMap, context), getParamValue('splits', node, tensorMap, context)); + return [rtNestedSplits, rtDenseValues]; + } + case 'RaggedTensorToTensor': { + return [ops.raggedTensorToTensor(getParamValue('shape', node, tensorMap, context), getParamValue('values', node, tensorMap, context), getParamValue('defaultValue', node, tensorMap, context), getParamValue('rowPartitionTensors', node, tensorMap, context), getParamValue('rowPartitionTypes', node, tensorMap, context))]; + } + default: + throw TypeError(`Node type ${node.op} is not implemented`); + } + }; + const CATEGORY$6 = 'ragged'; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const executeOp$6 = (node, tensorMap, context, ops = tfOps) => { + switch (node.op) { + case 'Max': { + const axis = getParamValue('axis', node, tensorMap, context); + const keepDims = getParamValue('keepDims', node, tensorMap, context); + return [ops.max(getParamValue('x', node, tensorMap, context), axis, keepDims)]; + } + case 'Mean': { + const axis = getParamValue('axis', node, tensorMap, context); + const keepDims = getParamValue('keepDims', node, tensorMap, context); + return [ops.mean(getParamValue('x', node, tensorMap, context), axis, keepDims)]; + } + case 'Min': { + const axis = getParamValue('axis', node, tensorMap, context); + const keepDims = getParamValue('keepDims', node, tensorMap, context); + return [ops.min(getParamValue('x', node, tensorMap, context), axis, keepDims)]; + } + case 'Sum': { + const axis = getParamValue('axis', node, tensorMap, context); + const keepDims = getParamValue('keepDims', node, tensorMap, context); + return [ops.sum(getParamValue('x', node, tensorMap, context), axis, keepDims)]; + } + case 'All': { + const axis = getParamValue('axis', node, tensorMap, context); + const keepDims = getParamValue('keepDims', node, tensorMap, context); + return [ops.all(getParamValue('x', node, tensorMap, context), axis, keepDims)]; + } + case 'Any': { + const axis = getParamValue('axis', node, tensorMap, context); + const keepDims = getParamValue('keepDims', node, tensorMap, context); + return [ops.any(getParamValue('x', node, tensorMap, context), axis, keepDims)]; + } + case 'ArgMax': { + const axis = getParamValue('axis', node, tensorMap, context); + return [ops.argMax(getParamValue('x', node, tensorMap, context), axis)]; + } + case 'ArgMin': { + const axis = getParamValue('axis', node, tensorMap, context); + return [ops.argMin(getParamValue('x', node, tensorMap, context), axis)]; + } + case 'Prod': { + const axis = getParamValue('axis', node, tensorMap, context); + const keepDims = getParamValue('keepDims', node, tensorMap, context); + return [ops.prod(getParamValue('x', node, tensorMap, context), axis, keepDims)]; + } + case 'Cumprod': { + const axis = getParamValue('axis', node, tensorMap, context); + const exclusive = getParamValue('exclusive', node, tensorMap, context); + const reverse = getParamValue('reverse', node, tensorMap, context); + return [ops.cumprod(getParamValue('x', node, tensorMap, context), axis, exclusive, reverse)]; + } + case 'Cumsum': { + const axis = getParamValue('axis', node, tensorMap, context); + const exclusive = getParamValue('exclusive', node, tensorMap, context); + const reverse = getParamValue('reverse', node, tensorMap, context); + return [ops.cumsum(getParamValue('x', node, tensorMap, context), axis, exclusive, reverse)]; + } + case 'Bincount': + const x = getParamValue('x', node, tensorMap, context); + const weights = getParamValue('weights', node, tensorMap, context); + const size = getParamValue('size', node, tensorMap, context); + return [ops.bincount(x, weights, size)]; + case 'DenseBincount': { + const x = getParamValue('x', node, tensorMap, context); + const weights = getParamValue('weights', node, tensorMap, context); + const size = getParamValue('size', node, tensorMap, context); + const binaryOutput = getParamValue('binaryOutput', node, tensorMap, context); + return [ops.denseBincount(x, weights, size, binaryOutput)]; + } + default: + throw TypeError(`Node type ${node.op} is not implemented`); + } + }; + const CATEGORY$5 = 'reduction'; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const executeOp$5 = (node, tensorMap, context, ops = tfOps) => { + switch (node.op) { + case 'ConcatV2': + case 'Concat': { + const n = getParamValue('n', node, tensorMap, context); + const axis = getParamValue('axis', node, tensorMap, context); + let inputs = getParamValue('tensors', node, tensorMap, context); + inputs = inputs.slice(0, n); + return [ops.concat(inputs, axis)]; + } + case 'Gather': { + const input = getParamValue('x', node, tensorMap, context); + const indices = getParamValue('indices', node, tensorMap, context); + return [ops.gather(input, ops.cast(indices, 'int32'), 0)]; + } + case 'GatherV2': { + const axis = getParamValue('axis', node, tensorMap, context); + const batchDims = getParamValue('batchDims', node, tensorMap, context); + const input = getParamValue('x', node, tensorMap, context); + const indices = getParamValue('indices', node, tensorMap, context); + return [ops.gather(input, ops.cast(indices, 'int32'), axis, batchDims)]; + } + case 'Reverse': { + const dims = getParamValue('dims', node, tensorMap, context); + const axis = []; + for (let i = 0; i < dims.length; i++) { + if (dims[i]) { + axis.push(i); + } + } + const input = getParamValue('x', node, tensorMap, context); + return [ops.reverse(input, axis)]; + } + case 'ReverseV2': { + const axis = getParamValue('axis', node, tensorMap, context); + const input = getParamValue('x', node, tensorMap, context); + return [ops.reverse(input, axis)]; + } + case 'Slice': { + // tslint:disable-next-line:no-any + const begin = getParamValue('begin', node, tensorMap, context); + // tslint:disable-next-line:no-any + const size = getParamValue('size', node, tensorMap, context); + return [ops.slice(getParamValue('x', node, tensorMap, context), begin, size)]; + } + case 'StridedSlice': { + const begin = getParamValue('begin', node, tensorMap, context); + const end = getParamValue('end', node, tensorMap, context); + const strides = getParamValue('strides', node, tensorMap, context); + const beginMask = getParamValue('beginMask', node, tensorMap, context); + const endMask = getParamValue('endMask', node, tensorMap, context); + const ellipsisMask = getParamValue('ellipsisMask', node, tensorMap, context); + const newAxisMask = getParamValue('newAxisMask', node, tensorMap, context); + const shrinkAxisMask = getParamValue('shrinkAxisMask', node, tensorMap, context); + const tensor = getParamValue('x', node, tensorMap, context); + return [ops.stridedSlice(tensor, begin, end, strides, beginMask, endMask, ellipsisMask, newAxisMask, shrinkAxisMask)]; + } + case 'Pack': { + return tidy(() => { + const axis = getParamValue('axis', node, tensorMap, context); + const tensors = getParamValue('tensors', node, tensorMap, context); + // Reshape the tensors to the first tensor's shape if they don't + // match. + const shape = tensors[0].shape; + const squeezedShape = ops.squeeze(tensors[0]).shape; + const mapped = tensors.map(tensor => { + const sameShape = arraysEqual(tensor.shape, shape); + if (!sameShape && + !arraysEqual(ops.squeeze(tensor).shape, squeezedShape)) { + throw new Error('the input tensors shape does not match'); + } + return sameShape ? tensor : ops.reshape(tensor, shape); + }); + return [ops.stack(mapped, axis)]; + }); + } + case 'Unpack': { + const axis = getParamValue('axis', node, tensorMap, context); + const tensor = getParamValue('tensor', node, tensorMap, context); + return ops.unstack(tensor, axis); + } + case 'Tile': { + const reps = getParamValue('reps', node, tensorMap, context); + return [ops.tile(getParamValue('x', node, tensorMap, context), reps)]; + } + case 'Split': + case 'SplitV': { + const axis = getParamValue('axis', node, tensorMap, context); + const numOrSizeSplits = getParamValue('numOrSizeSplits', node, tensorMap, context); + const tensor = getParamValue('x', node, tensorMap, context); + return ops.split(tensor, numOrSizeSplits, axis); + } + case 'ScatterNd': { + const indices = getParamValue('indices', node, tensorMap, context); + const values = getParamValue('values', node, tensorMap, context); + const shape = getParamValue('shape', node, tensorMap, context); + return [ops.scatterND(indices, values, shape)]; + } + case 'GatherNd': { + const x = getParamValue('x', node, tensorMap, context); + const indices = getParamValue('indices', node, tensorMap, context); + return [ops.gatherND(x, indices)]; + } + case 'SparseToDense': { + const indices = getParamValue('sparseIndices', node, tensorMap, context); + const shape = getParamValue('outputShape', node, tensorMap, context); + const sparseValues = getParamValue('sparseValues', node, tensorMap, context); + const defaultValue = getParamValue('defaultValue', node, tensorMap, context); + return [ops.sparseToDense(indices, sparseValues, shape, sparseValues.dtype === defaultValue.dtype ? + defaultValue : + ops.cast(defaultValue, sparseValues.dtype))]; + } + case 'TensorScatterUpdate': { + const indices = getParamValue('indices', node, tensorMap, context); + const values = getParamValue('values', node, tensorMap, context); + const tensor = getParamValue('tensor', node, tensorMap, context); + return [ops.tensorScatterUpdate(tensor, indices, values)]; + } + default: + throw TypeError(`Node type ${node.op} is not implemented`); + } + }; + const CATEGORY$4 = 'slice_join'; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const executeOp$4 = (node, tensorMap, context, ops = tfOps) => { + switch (node.op) { + case 'SparseFillEmptyRows': { + const { outputIndices, outputValues, emptyRowIndicator, reverseIndexMap } = ops.sparse.sparseFillEmptyRows(getParamValue('indices', node, tensorMap, context), getParamValue('values', node, tensorMap, context), getParamValue('denseShape', node, tensorMap, context), getParamValue('defaultValue', node, tensorMap, context)); + return [ + outputIndices, outputValues, emptyRowIndicator, reverseIndexMap + ]; + } + case 'SparseReshape': { + const { outputIndices, outputShape } = ops.sparse.sparseReshape(getParamValue('inputIndices', node, tensorMap, context), getParamValue('inputShape', node, tensorMap, context), getParamValue('newShape', node, tensorMap, context)); + return [outputIndices, outputShape]; + } + case 'SparseSegmentMean': { + const outputData = ops.sparse.sparseSegmentMean(getParamValue('data', node, tensorMap, context), getParamValue('indices', node, tensorMap, context), getParamValue('segmentIds', node, tensorMap, context)); + return [outputData]; + } + case 'SparseSegmentSum': { + const outputData = ops.sparse.sparseSegmentSum(getParamValue('data', node, tensorMap, context), getParamValue('indices', node, tensorMap, context), getParamValue('segmentIds', node, tensorMap, context)); + return [outputData]; + } + default: + throw TypeError(`Node type ${node.op} is not implemented`); + } + }; + const CATEGORY$3 = 'sparse'; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const executeOp$3 = (node, tensorMap, context, ops = tfOps) => { + switch (node.op) { + case 'FFT': { + return [ops.fft(getParamValue('x', node, tensorMap, context))]; + } + case 'IFFT': { + return [ops.ifft(getParamValue('x', node, tensorMap, context))]; + } + case 'RFFT': { + return [ops.rfft(getParamValue('x', node, tensorMap, context))]; + } + case 'IRFFT': { + return [ops.irfft(getParamValue('x', node, tensorMap, context))]; + } + default: + throw TypeError(`Node type ${node.op} is not implemented`); + } + }; + const CATEGORY$2 = 'spectral'; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const executeOp$2 = (node, tensorMap, context, ops = tfOps) => { + switch (node.op) { + case 'StaticRegexReplace': { + return [ops.string.staticRegexReplace(getParamValue('input', node, tensorMap, context), getParamValue('pattern', node, tensorMap, context), getParamValue('rewrite', node, tensorMap, context), getParamValue('replaceGlobal', node, tensorMap, context))]; + } + case 'StringNGrams': { + const { nGrams, nGramsSplits } = ops.string.stringNGrams(getParamValue('data', node, tensorMap, context), getParamValue('dataSplits', node, tensorMap, context), getParamValue('separator', node, tensorMap, context), getParamValue('nGramWidths', node, tensorMap, context), getParamValue('leftPad', node, tensorMap, context), getParamValue('rightPad', node, tensorMap, context), getParamValue('padWidth', node, tensorMap, context), getParamValue('preserveShortSequences', node, tensorMap, context)); + return [nGrams, nGramsSplits]; + } + case 'StringSplit': { + const { indices, values, shape } = ops.string.stringSplit(getParamValue('input', node, tensorMap, context), getParamValue('delimiter', node, tensorMap, context), getParamValue('skipEmpty', node, tensorMap, context)); + return [indices, values, shape]; + } + case 'StringToHashBucketFast': { + const output = ops.string.stringToHashBucketFast(getParamValue('input', node, tensorMap, context), getParamValue('numBuckets', node, tensorMap, context)); + return [output]; + } + default: + throw TypeError(`Node type ${node.op} is not implemented`); + } + }; + const CATEGORY$1 = 'string'; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const executeOp$1 = (node, tensorMap, context, ops = tfOps) => { + switch (node.op) { + case 'Cast': { + return [ops.cast(getParamValue('x', node, tensorMap, context), getParamValue('dtype', node, tensorMap, context))]; + } + case 'ExpandDims': { + const axis = getParamValue('axis', node, tensorMap, context); + return [ops.expandDims(getParamValue('x', node, tensorMap, context), axis)]; + } + case 'Squeeze': { + const axis = getParamValue('axis', node, tensorMap, context); + return [ops.squeeze(getParamValue('x', node, tensorMap, context), axis)]; + } + case 'Reshape': { + return [ops.reshape(getParamValue('x', node, tensorMap, context), getParamValue('shape', node, tensorMap, context))]; + } + case 'EnsureShape': { + return [ops.ensureShape(getParamValue('x', node, tensorMap, context), getParamValue('shape', node, tensorMap, context))]; + } + case 'MirrorPad': { + return [ops.mirrorPad(getParamValue('x', node, tensorMap, context), getParamValue('padding', node, tensorMap, context), getParamValue('mode', node, tensorMap, context))]; + } + case 'PadV2': + case 'Pad': { + return [ops.pad(getParamValue('x', node, tensorMap, context), getParamValue('padding', node, tensorMap, context), getParamValue('constantValue', node, tensorMap, context))]; + } + case 'SpaceToBatchND': { + const blockShape = getParamValue('blockShape', node, tensorMap, context); + const paddings = getParamValue('paddings', node, tensorMap, context); + return [ops.spaceToBatchND(getParamValue('x', node, tensorMap, context), blockShape, paddings)]; + } + case 'BatchToSpaceND': { + const blockShape = getParamValue('blockShape', node, tensorMap, context); + const crops = getParamValue('crops', node, tensorMap, context); + return [ops.batchToSpaceND(getParamValue('x', node, tensorMap, context), blockShape, crops)]; + } + case 'DepthToSpace': { + const blockSize = getParamValue('blockSize', node, tensorMap, context); + const dataFormat = getParamValue('dataFormat', node, tensorMap, context).toUpperCase(); + return [ops.depthToSpace(getParamValue('x', node, tensorMap, context), blockSize, dataFormat)]; + } + case 'BroadcastTo': { + return [ops.broadcastTo(getParamValue('x', node, tensorMap, context), getParamValue('shape', node, tensorMap, context))]; + } + case 'BroadcastArgs': { + return [ops.broadcastArgs(getParamValue('s0', node, tensorMap, context), getParamValue('s1', node, tensorMap, context))]; + } + default: + throw TypeError(`Node type ${node.op} is not implemented`); + } + }; + const CATEGORY = 'transformation'; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Executes the op defined by the node object. + * @param node + * @param tensorMap contains tensors for executed nodes and weights + * @param context contains tensors and information for running the current node. + * @param resourceManager Optional. Contains global resources of the model. + */ + function executeOp(node, tensorMap, context, resourceManager, tidy$1 = tidy) { + const value = ((node, tensorMap, context) => { + switch (node.category) { + case 'arithmetic': + return tidy$1(() => executeOp$k(node, tensorMap, context)); + case 'basic_math': + return tidy$1(() => executeOp$j(node, tensorMap, context)); + case 'control': + return executeOp$i(node, tensorMap, context); + case 'convolution': + return tidy$1(() => executeOp$h(node, tensorMap, context)); + case 'creation': + return tidy$1(() => executeOp$g(node, tensorMap, context)); + case 'dynamic': + return executeOp$f(node, tensorMap, context); + case 'evaluation': + return tidy$1(() => executeOp$e(node, tensorMap, context)); + case 'image': + return tidy$1(() => executeOp$b(node, tensorMap, context)); + case 'graph': + return tidy$1(() => executeOp$d(node, tensorMap, context)); + case 'logical': + return tidy$1(() => executeOp$a(node, tensorMap, context)); + case 'matrices': + return tidy$1(() => executeOp$9(node, tensorMap, context)); + case 'normalization': + return tidy$1(() => executeOp$8(node, tensorMap, context)); + case 'ragged': + return tidy$1(() => executeOp$7(node, tensorMap, context)); + case 'reduction': + return tidy$1(() => executeOp$6(node, tensorMap, context)); + case 'slice_join': + return tidy$1(() => executeOp$5(node, tensorMap, context)); + case 'sparse': + return tidy$1(() => executeOp$4(node, tensorMap, context)); + case 'spectral': + return tidy$1(() => executeOp$3(node, tensorMap, context)); + case 'string': + return tidy$1(() => executeOp$2(node, tensorMap, context)); + case 'transformation': + return tidy$1(() => executeOp$1(node, tensorMap, context)); + case 'hash_table': + return executeOp$c(node, tensorMap, context, resourceManager); + case 'custom': + const opMapper = getRegisteredOp(node.op); + if (opMapper && opMapper.customExecutor) { + return opMapper.customExecutor(new NodeValueImpl(node, tensorMap, context)); + } + else { + throw TypeError(`Custom op ${node.op} is not registered.`); + } + default: + throw TypeError(`Unknown op '${node.op}'. File an issue at ` + + `https://github.com/tensorflow/tfjs/issues so we can add it` + + `, or register a custom execution with tf.registerOp()`); + } + })(node, tensorMap, context); + if (isPromise(value)) { + return value.then((data) => [].concat(data)); + } + return [].concat(value); + } + + /** + * ExecutionContext captures the runtime environment of the node. It keeps + * track of the current frame and iteration for the control flow ops. + * + * For example, typical Dynamic RNN model may contain loops, for which + * TensorFlow will generate graphs with Enter/Exit nodes to control the + * current execution frame, and NextIteration Nodes for iteration id increment. + * For model with branch logic, TensorFLow will generate Switch/Merge ops. + */ + class ExecutionContext { + constructor(weightMap = {}, tensorArrayMap = {}, tensorListMap = {}, functionMap = {}, parseNodeNameCache) { + this.weightMap = weightMap; + this.tensorArrayMap = tensorArrayMap; + this.tensorListMap = tensorListMap; + this.functionMap = functionMap; + this.parseNodeNameCache = parseNodeNameCache; + this.rootContext = { id: 0, frameName: '', iterationId: 0 }; + this.contexts = [this.rootContext]; + this.lastId = 0; + this.generateCurrentContextIds(); + } + newFrame(id, frameName) { + return { id, frameName, iterationId: 0 }; + } + /** + * Set the current context + * @param contexts: ExecutionContextInfo[] the current path of execution + * frames + */ + set currentContext(contexts) { + if (this.contexts !== contexts) { + this.contexts = contexts; + this.generateCurrentContextIds(); + } + } + get currentContext() { + return this.contexts; + } + /** + * Returns the current context in string format. + */ + get currentContextId() { + return this._currentContextIds[0]; + } + /** + * Returns the current context and all parent contexts in string format. + * This allow access to the nodes in the current and parent frames. + */ + get currentContextIds() { + return this._currentContextIds; + } + generateCurrentContextIds() { + const names = []; + for (let i = 0; i < this.contexts.length - 1; i++) { + const contexts = this.contexts.slice(0, this.contexts.length - i); + names.push(this.contextIdforContexts(contexts)); + } + names.push(''); + this._currentContextIds = names; + } + contextIdforContexts(contexts) { + return contexts ? + contexts + .map(context => (context.id === 0 && context.iterationId === 0) ? + '' : + `${context.frameName}-${context.iterationId}`) + .join('/') : + ''; + } + /** + * Enter a new frame, a new context is pushed on the current context list. + * @param frameId new frame id + */ + enterFrame(frameId) { + if (this.contexts) { + this.lastId++; + this.contexts = this.contexts.slice(); + this.contexts.push(this.newFrame(this.lastId, frameId)); + this._currentContextIds.unshift(this.contextIdforContexts(this.contexts)); + } + } + /** + * Exit the current frame, the last context is removed from the current + * context list. + */ + exitFrame() { + if (this.contexts && this.contexts.length > 1) { + this.contexts = this.contexts.slice(); + this.contexts.splice(-1); + this.currentContextIds.shift(); + } + else { + throw new Error('Cannot exit frame, the context is empty'); + } + } + /** + * Enter the next iteration of a loop, the iteration id of last context is + * increased. + */ + nextIteration() { + if (this.contexts && this.contexts.length > 0) { + this.contexts = this.contexts.slice(); + this.lastId++; + const context = Object.assign({}, this.contexts[this.contexts.length - 1]); + context.iterationId += 1; + context.id = this.lastId; + this.contexts.splice(-1, 1, context); + this._currentContextIds.splice(0, 1, this.contextIdforContexts(this.contexts)); + } + else { + throw new Error('Cannot increase frame iteration, the context is empty'); + } + } + getWeight(name) { + return this.weightMap[name]; + } + addTensorArray(tensorArray) { + this.tensorArrayMap[tensorArray.id] = tensorArray; + } + getTensorArray(id) { + return this.tensorArrayMap[id]; + } + addTensorList(tensorList) { + this.tensorListMap[tensorList.id] = tensorList; + } + getTensorList(id) { + return this.tensorListMap[id]; + } + dispose(keepIds) { + for (const key in this.tensorArrayMap) { + this.tensorArrayMap[key].clearAndClose(keepIds); + } + for (const key in this.tensorListMap) { + this.tensorListMap[key].clearAndClose(keepIds); + } + } + } + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Given graph inputs and desired outputs, find the minimal set of nodes + * to execute in order to compute the outputs. In addition return other useful + * info such: + * - Missing inputs needed to compute the output. + * - Whether the subgraph contains dynamic ops (control flow, dynamic shape). + * - Alternative inputs in order to avoid async (dynamic op) execution. + */ + function getExecutionSubgraph(inputs, outputs, weightMap, initNodes) { + const usedNodes = new Set(); + const missingInputs = []; + let dynamicNode = null; + let syncInputs = null; + // Start with the outputs, going backwards and find all the nodes that are + // needed to compute those outputs. + const seen = new Set(); + const inputNodeNames = new Set(Object.keys(inputs).map((name) => parseNodeName(name)[0])); + initNodes = initNodes || []; + const initNodeNames = new Set(initNodes.map((node) => parseNodeName(node.name)[0])); + const frontier = [...outputs]; + while (frontier.length > 0) { + const node = frontier.pop(); + if (isControlFlow(node) || isDynamicShape(node) || isHashTable(node)) { + if (dynamicNode == null) { + dynamicNode = node; + syncInputs = dynamicNode.children.map(child => child.name) + .filter(name => usedNodes.has(name)); + } + } + usedNodes.add(node.name); + // Weights are dead end since we already have their values. + if (weightMap[node.name] != null) { + continue; + } + // This node is a dead end since it's one of the user-provided inputs. + if (inputNodeNames.has(node.name)) { + continue; + } + // This node is a dead end since it doesn't have any inputs. + if (initNodeNames.has(node.name)) { + continue; + } + if (node.inputs.length === 0) { + missingInputs.push(node.name); + continue; + } + node.inputs.forEach(input => { + // Don't add to the frontier if it is already there. + if (seen.has(input.name)) { + return; + } + seen.add(input.name); + frontier.push(input); + }); + } + return { inputs, outputs, usedNodes, missingInputs, dynamicNode, syncInputs }; + } + /** + * Given the execution info, return a list of nodes in topological order that + * need to be executed to compute the output. + */ + function getNodesInTopologicalOrder(graph, executionInfo) { + const { usedNodes, inputs } = executionInfo; + const inputNodes = Object.keys(inputs) + .map(name => parseNodeName(name)[0]) + .map(name => graph.nodes[name]); + const initNodes = graph.initNodes || []; + const isUsed = (node) => usedNodes.has(typeof node === 'string' ? node : node.name); + function unique(nodes) { + return [...new Map(nodes.map((node) => [node.name, node])).values()]; + } + const predefinedNodes = unique([ + ...inputNodes, + ...graph.weights, + ...initNodes, + ]).filter(isUsed); + const allNodes = unique([ + ...predefinedNodes, + ...Object.values(graph.nodes), + ]).filter(isUsed); + const nameToNode = new Map(allNodes.map((node) => [node.name, node])); + const inCounts = {}; + for (const node of allNodes) { + inCounts[node.name] = inCounts[node.name] || 0; + for (const child of node.children) { + // When the child is unused, set in counts to infinity so that it will + // never be decreased to 0 and added to the execution list. + if (!isUsed(child)) { + inCounts[child.name] = Number.POSITIVE_INFINITY; + } + inCounts[child.name] = (inCounts[child.name] || 0) + 1; + } + } + // Build execution order for all used nodes regardless whether they are + // predefined or not. + const frontier = Object.entries(inCounts) + .filter(([, inCount]) => inCount === 0) + .map(([name]) => name); + const orderedNodeNames = [...frontier]; + while (frontier.length > 0) { + const nodeName = frontier.pop(); + const node = nameToNode.get(nodeName); + for (const child of node.children.filter(isUsed)) { + if (--inCounts[child.name] === 0) { + orderedNodeNames.push(child.name); + frontier.push(child.name); + } + } + } + const orderedNodes = orderedNodeNames.map((name) => nameToNode.get(name)); + const filteredOrderedNodes = filterPredefinedReachableNodes(orderedNodes, predefinedNodes); + // TODO: Turn validation on/off with tf env flag. + validateNodesExecutionOrder(filteredOrderedNodes, predefinedNodes); + return filteredOrderedNodes; + } + /** + * This is a helper function of `getNodesInTopologicalOrder`. + * Returns ordered nodes reachable by at least one predefined node. + * This can help us filter out redundant nodes from the returned node list. + * For example: + * If we have four nodes with dependencies like this: + * a --> b --> c --> d + * when node `c` is predefined (e.g. given as an input tensor), we can + * skip node `a` and `b` since their outputs will never be used. + * + * @param orderedNodes Graph nodes in execution order. + * @param predefinedNodes Graph inputs, weights, and init nodes. Nodes in this + * list must have distinct names. + */ + function filterPredefinedReachableNodes(orderedNodes, predefinedNodes) { + const nameToNode = new Map(orderedNodes.map((node) => [node.name, node])); + // TODO: Filter out more nodes when >=2 nodes are predefined in a path. + const stack = predefinedNodes.map((node) => node.name); + const predefinedReachableNodeNames = new Set(stack); + // Perform a DFS starting from the set of all predefined nodes + // to find the set of all nodes reachable from the predefined nodes. + while (stack.length > 0) { + const nodeName = stack.pop(); + const node = nameToNode.get(nodeName); + for (const child of node.children) { + if (!nameToNode.has(child.name) || + predefinedReachableNodeNames.has(child.name)) { + continue; + } + predefinedReachableNodeNames.add(child.name); + stack.push(child.name); + } + } + // Filter out unreachable nodes and build the ordered node list. + const filteredOrderedNodes = orderedNodes.filter((node) => predefinedReachableNodeNames.has(node.name)); + return filteredOrderedNodes; + } + class NodesExecutionOrderError extends Error { + constructor(message) { + super(`NodesExecutionOrderError: ${message}`); + } + } + /** + * This is a helper function of `getNodesInTopologicalOrder`. + * Validates property: given nodes `a` and `b`, Order(a) > Order(b) if `a` + * is a child of `b`. This function throws an error if validation fails. + * + * @param orderedNodes Graph nodes in execution order. + * @param predefinedNodes Graph inputs, weights, and init nodes. Nodes in this + * list must have distinct names. + */ + function validateNodesExecutionOrder(orderedNodes, predefinedNodes) { + const nodeNameToOrder = new Map(orderedNodes.map((node, order) => [node.name, order])); + const predefinedNodeNames = new Set(predefinedNodes.map((node) => node.name)); + const isPredefined = (node) => predefinedNodeNames.has(typeof node === 'string' ? node : node.name); + const willBeExecutedNodeNames = new Set(orderedNodes.map((node) => node.name)); + const willBeExecuted = (node) => willBeExecutedNodeNames.has(typeof node === 'string' ? node : node.name); + for (const node of orderedNodes) { + for (const child of node.children.filter(willBeExecuted)) { + if (!nodeNameToOrder.has(child.name)) { + throw new NodesExecutionOrderError(`Child ${child.name} of node ${node.name} is unreachable.`); + } + if (nodeNameToOrder.get(node.name) > nodeNameToOrder.get(child.name)) { + throw new NodesExecutionOrderError(`Node ${node.name} is scheduled to run after its child ${child.name}.`); + } + } + if (!isPredefined(node)) { + for (const input of node.inputs) { + if (!nodeNameToOrder.has(input.name)) { + throw new NodesExecutionOrderError(`Input ${input.name} of node ${node.name} is unreachable.`); + } + if (nodeNameToOrder.get(input.name) > nodeNameToOrder.get(node.name)) { + throw new NodesExecutionOrderError(`Node ${node.name} is scheduled to run before its input ${input.name}.`); + } + } + } + } + } + /** + * Given the execution info, return a map from node name to the disposable + * node name list after its execution. + * + * @returns A map from node name to disposable nodes after its + * execution. That is, for a node `x`, `nodeLiveUntilMap[x]` indicates + * all nodes which their intermediate tensors should be disposed after `x` + * being executed. + */ + function getNodeLiveUntilMap(orderedNodes) { + const nodeNameToOrder = new Map(orderedNodes.map((node, order) => [node.name, order])); + const INF_LIFE = Number.MAX_SAFE_INTEGER; + // Make control flow nodes (and consequently their direct parents) + // live forever since they're tricky to track correctly. + const selfLifespans = orderedNodes.map((node, nodeOrder) => isControlFlow(node) ? INF_LIFE : nodeOrder); + const getSelfLifeSpan = (node) => { + const selfLife = selfLifespans[nodeNameToOrder.get(node.name)]; + if (selfLife == null) { + // If nodeToOrder does not contain the node, it is unused or + // unreachable in graph. + return -1; + } + return selfLife; + }; + // `liveUntil[i]` points to the last node in the `orderedNodes` array that + // may depend on tensors from node `i`. It indicates that all the + // intermediate tensors from `orderedNodes[i]` should be disposed after + // `orderedNodes[liveUntil[i]]` is executed. + // A node lives long enough to pass on its tensors to its children. + // It lives until at least `max(node's position, children's positions)`. + const liveUntilOrders = orderedNodes.map((node, nodeOrder) => { + return node.children.map(getSelfLifeSpan) + .reduce((a, b) => Math.max(a, b), selfLifespans[nodeOrder]); + }); + // liveUntilMap: + // - Key: Name of a node `x` + // - Values: All nodes whose intermediate tensors should be disposed + // after `x` is executed. + const liveUntilMap = new Map(); + for (let nodeOrder = 0; nodeOrder < orderedNodes.length; ++nodeOrder) { + const liveUntilOrder = liveUntilOrders[nodeOrder]; + if (liveUntilOrder === INF_LIFE) { + continue; + } + const node = orderedNodes[nodeOrder]; + const liveUntilNode = orderedNodes[liveUntilOrder]; + if (!liveUntilMap.has(liveUntilNode.name)) { + liveUntilMap.set(liveUntilNode.name, []); + } + liveUntilMap.get(liveUntilNode.name).push(node); + } + return liveUntilMap; + } + const CONTROL_FLOW_OPS = new Set([ + 'Switch', 'Merge', 'Enter', 'Exit', 'NextIteration', 'StatelessIf', + 'StatelessWhile', 'if', 'While' + ]); + const DYNAMIC_SHAPE_OPS = new Set([ + 'NonMaxSuppressionV2', 'NonMaxSuppressionV3', 'NonMaxSuppressionV5', 'Where' + ]); + const HASH_TABLE_OPS = new Set([ + 'HashTable', 'HashTableV2', 'LookupTableImport', 'LookupTableImportV2', + 'LookupTableFind', 'LookupTableFindV2', 'LookupTableSize', 'LookupTableSizeV2' + ]); + function isControlFlow(node) { + return CONTROL_FLOW_OPS.has(node.op); + } + function isDynamicShape(node) { + return DYNAMIC_SHAPE_OPS.has(node.op); + } + function isHashTable(node) { + return HASH_TABLE_OPS.has(node.op); + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class GraphExecutor { + get weightIds() { + return this.parent ? this.parent.weightIds : this._weightIds; + } + get functionExecutorMap() { + return this.parent ? this.parent.functionExecutorMap : + this._functionExecutorMap; + } + get weightMap() { + return this.parent ? this.parent.weightMap : this._weightMap; + } + set weightMap(weightMap) { + const weightIds = Object.keys(weightMap).map(key => weightMap[key].map(tensor => tensor.id)); + this._weightIds = [].concat(...weightIds); + this._weightMap = weightMap; + } + /** + * Set `ResourceManager` shared by executors of a model. + * @param resourceManager: `ResourceManager` of the `GraphModel`. + */ + set resourceManager(resourceManager) { + this._resourceManager = resourceManager; + } + get inputs() { + return this._inputs.map(node => { + return { + name: node.name, + shape: node.attrParams['shape'] ? + node.attrParams['shape'].value : + undefined, + dtype: node.attrParams['dtype'] ? + node.attrParams['dtype'].value : + undefined + }; + }); + } + get outputs() { + return this._outputs.map(node => { + return { + name: node.name, + shape: node.attrParams['shape'] ? + node.attrParams['shape'].value : + undefined, + dtype: node.attrParams['dtype'] ? + node.attrParams['dtype'].value : + undefined + }; + }); + } + get inputNodes() { + return this._inputs.map(node => node.signatureKey || node.name); + } + get outputNodes() { + return this._outputs.map((node) => { + const name = node.signatureKey || node.name; + return node.defaultOutput ? (`${name}:${node.defaultOutput}`) : name; + }); + } + get functions() { + return Object.keys(this._functions).reduce((map, key) => { + map[key] = this._functions[key].signature; + return map; + }, {}); + } + /** + * + * @param graph Graph the model or function graph to be executed. + * @param parent When building function exector you need to set the parent + * executor. Since the weights and function executor maps are set at parant + * level, that function executor can access the function maps and weight maps + * through the parent. + */ + constructor(graph, parent) { + this.graph = graph; + this.parent = parent; + this.compiledMap = new Map(); + this.parseNodeNameCache = new Map(); + this._weightMap = {}; + this.SEPARATOR = ','; + this._functions = {}; + this._functionExecutorMap = {}; + this.keepIntermediateTensors = false; + this._outputs = graph.outputs; + this._inputs = graph.inputs; + this._initNodes = graph.initNodes; + this._signature = graph.signature; + this._functions = graph.functions; + // create sub-graph executors + if (graph.functions != null) { + Object.keys(graph.functions).forEach(name => { + this._functionExecutorMap[name] = + new GraphExecutor(graph.functions[name], this); + }); + } + } + getCompilationKey(inputs, outputs) { + const sortedInputs = inputs.map(node => node.name).sort(); + const sortedOutputs = outputs.map(node => node.name).sort(); + return sortedInputs.join(this.SEPARATOR) + '--' + + sortedOutputs.join(this.SEPARATOR); + } + /** + * Compiles the inference graph and returns the minimal set of nodes that are + * required for execution, in the correct execution order. + * @returns {Object} compilation The compile result. + * @returns {Node[]} compilation.orderedNodes Nodes in the correct execution + * order. + * @returns {Map} compilation.nodeLiveUntilMap A map from node + * to disposable nodes after its execution. That is, for a node `x`, + * `nodeLiveUntilMap[x]` indicates all nodes whose intermediate + * tensors should be disposed after `x` is executed. + */ + compile(inputs, outputs) { + const executionInfo = getExecutionSubgraph(inputs, outputs, this.weightMap, this._initNodes); + const { missingInputs, dynamicNode, syncInputs } = executionInfo; + if (dynamicNode != null) { + throw new Error(`This execution contains the node '${dynamicNode.name}', which has ` + + `the dynamic op '${dynamicNode.op}'. Please use ` + + `model.executeAsync() instead. Alternatively, to avoid the ` + + `dynamic ops, specify the inputs [${syncInputs}]`); + } + if (missingInputs.length > 0) { + const outNames = outputs.map(n => n.name); + const inNames = Object.keys(inputs); + throw new Error(`Cannot compute the outputs [${outNames}] from the provided inputs ` + + `[${inNames}]. Missing the following inputs: [${missingInputs}]`); + } + const orderedNodes = getNodesInTopologicalOrder(this.graph, executionInfo); + const nodeLiveUntilMap = getNodeLiveUntilMap(orderedNodes); + return { orderedNodes, nodeLiveUntilMap }; + } + cloneAndKeepTensor(tensor) { + if (tensor == null) { + return null; + } + const clone = tensor.clone(); + // Keep the clone because`model.execute()` may be called within + // a `tidy()`, but the user may inspect these tensors after the + // tidy. + keep(clone); + return clone; + } + cloneTensorList(tensors) { + if (!tensors) { + return null; + } + const clonedTensor = tensors.map(tensor => { + return this.cloneAndKeepTensor(tensor); + }); + return clonedTensor; + } + cloneTensorMap(tensorsMap) { + return Object.fromEntries(Object.entries(tensorsMap).map(([name, tensorsList]) => { + return [name, this.cloneTensorList(tensorsList)]; + })); + } + /** + * Executes the inference for given input tensors. + * @param inputs Tensor map for the model inputs, keyed by the input node + * names. + * @param outputs Optional. output node name from the Tensorflow model, if + * no outputs are specified, the default outputs of the model would be used. + * You can inspect intermediate nodes of the model by adding them to the + * outputs array. + */ + execute(inputs, outputs) { + // Dispose any tensors from a prior run to avoid leaking them. + this.disposeIntermediateTensors(); + inputs = this.mapInputs(inputs); + const names = Object.keys(inputs).sort(); + this.checkInputs(inputs); + this.checkInputShapeAndType(inputs); + outputs = this.mapOutputs(outputs); + this.checkOutputs(outputs); + const inputNodes = names.map(name => this.graph.nodes[parseNodeName(name)[0]]); + const outputNodeNames = outputs.map(name => parseNodeName(name)[0]); + const outputNodeNameSet = new Set(outputNodeNames); + let outputNodes = outputNodeNames.map(name => this.graph.nodes[name]); + // If no outputs are specified, then use the default outputs of the model. + if (outputNodes.length === 0) { + outputNodes = this._outputs; + } + const compilationKey = this.getCompilationKey(inputNodes, outputNodes); + // Do nothing if the compiled graph cache contains the input. + let compilation = this.compiledMap.get(compilationKey); + if (compilation == null) { + compilation = this.compile(inputs, outputNodes); + this.compiledMap.set(compilationKey, compilation); + } + // Keep tensors if KEEP_INTERMEDIATE_TENSORS is on. + try { + this.keepIntermediateTensors = env().getBool('KEEP_INTERMEDIATE_TENSORS'); + } + catch (e) { + this.keepIntermediateTensors = false; + console.warn(e.message); + } + const tensorArrayMap = {}; + const tensorListMap = {}; + return tidy(() => { + const context = new ExecutionContext(this.weightMap, tensorArrayMap, tensorListMap, this.functionExecutorMap, this.parseNodeNameCache); + const tensorsMap = Object.assign({}, this.weightMap); + if (this.keepIntermediateTensors) { + this.clonedTensorsMap = this.cloneTensorMap(this.weightMap); + } + Object.keys(inputs).forEach(name => { + const [nodeName, index] = parseNodeName(name, context); + const tensors = []; + tensors[index] = inputs[name]; + tensorsMap[nodeName] = tensors; + if (this.keepIntermediateTensors) { + this.clonedTensorsMap[nodeName] = this.cloneTensorList(tensors); + } + }); + const tensorsToKeep = this.getFrozenTensorIds(tensorsMap); + const { orderedNodes, nodeLiveUntilMap } = compilation; + for (const node of orderedNodes) { + if (tensorsMap[node.name]) { + continue; + } + const tensors = executeOp(node, tensorsMap, context, this._resourceManager); + if (isPromise(tensors)) { + throw new Error(`The execution of the op '${node.op}' returned a promise. ` + + `Please use model.executeAsync() instead.`); + } + tensorsMap[node.name] = tensors; + if (this.keepIntermediateTensors) { + this.clonedTensorsMap[node.name] = this.cloneTensorList(tensors); + } + this.checkTensorForDisposalWithNodeLiveUntilInfo(node, tensorsMap, context, tensorsToKeep, outputNodeNameSet, nodeLiveUntilMap.get(node.name)); + } + // dispose the context for the root executor + if (this.parent == null) { + context.dispose(tensorsToKeep); + } + return outputs.map(name => getTensor(name, tensorsMap, context)); + }); + } + getFrozenTensorIds(tensorMap) { + const ids = [].concat.apply([], Object.keys(tensorMap) + .map(key => tensorMap[key]) + .map(tensors => tensors.map(tensor => tensor.id))); + return new Set(ids); + } + checkTensorForDisposal(nodeName, node, tensorMap, context, tensorsToKeep, outputNodeNameSet, intermediateTensorConsumerCount) { + // Skip output nodes and any control flow nodes, since its dependency is + // tricky to track correctly. + if (isControlFlow(node) || outputNodeNameSet.has(nodeName)) { + return; + } + for (const tensor of tensorMap[nodeName]) { + if (tensor == null) { + continue; + } + intermediateTensorConsumerCount[tensor.id] = + (intermediateTensorConsumerCount[tensor.id] || 0) + + node.children.length; + } + for (const input of node.inputs) { + // Skip any control flow nodes, since its dependency is tricky to track + // correctly. + if (isControlFlow(input)) { + continue; + } + const tensors = getTensorsForCurrentContext(input.name, tensorMap, context); + if (tensors == null) { + continue; + } + for (const tensor of tensors) { + if (!tensor || tensor.kept || tensorsToKeep.has(tensor.id)) { + continue; + } + // Only intermediate nodes' tensors have counts set, not marked as + // kept, and not in `tensorsToKeep`. + // Input and weight nodes' tensors should exist in `tensorsToKeep`. + // Output and control flow nodes' tensors should never have count set. + const count = intermediateTensorConsumerCount[tensor.id]; + if (count === 1) { + tensor.dispose(); + delete intermediateTensorConsumerCount[tensor.id]; + } + else if (count != null) { + intermediateTensorConsumerCount[tensor.id]--; + } + } + } + } + checkTensorForDisposalWithNodeLiveUntilInfo(node, tensorMap, context, tensorsToKeep, outputNodeNameSet, liveUntilNodes) { + function isNonDisposableNode(node) { + // Skip output nodes and any control flow nodes, since its dependency is + // tricky to track correctly. + return isControlFlow(node) || outputNodeNameSet.has(node.name); + } + if (isControlFlow(node) || liveUntilNodes == null) { + return; + } + for (const nodeToDispose of liveUntilNodes) { + if (isNonDisposableNode(nodeToDispose)) { + continue; + } + const tensors = getTensorsForCurrentContext(nodeToDispose.name, tensorMap, context); + for (const tensor of tensors) { + if (!tensor || tensor.kept || tensorsToKeep.has(tensor.id)) { + continue; + } + tensor.dispose(); + } + } + } + /** + * Executes the inference for given input tensors in Async fashion. + * @param inputs Tensor map for the model inputs, keyed by the input node + * names. + * @param outputs output node name from the Tensorflow model, if no outputs + * are specified, the default outputs of the model would be used. You can + * inspect intermediate nodes of the model by adding them to the outputs + * array. + */ + async executeAsync(inputs, outputs) { + return this._executeAsync(inputs, outputs); + } + disposeIntermediateTensors() { + if (!this.clonedTensorsMap) { + return; + } + Object.values(this.clonedTensorsMap).forEach(tensorsList => { + for (const tensor of tensorsList) { + if (tensor && !tensor.isDisposed) { + tensor.dispose(); + } + } + }); + this.clonedTensorsMap = null; + } + getIntermediateTensors() { + return this.clonedTensorsMap; + } + /** + * Executes the inference for given input tensors in Async fashion. + * @param inputs Tensor map for the model inputs, keyed by the input node + * names. + * @param outputs Optional. output node name from the Tensorflow model, + * if no outputs are specified, the default outputs of the model would be + * used. You can inspect intermediate nodes of the model by adding them to + * the outputs array. + * @param isFunctionExecution Optional. Flag for executing a function. + * @param tensorArrayMap Optional, global TensorArray map by id. Used for + * function execution. + * @param tensorArrayMap Optional global TensorList map by id. Used for + * function execution. + */ + async _executeAsync(inputs, outputs, isFunctionExecution = false, tensorArrayMap = {}, tensorListMap = {}) { + // Dispose any tensors from a prior run to avoid leaking them. + this.disposeIntermediateTensors(); + if (!isFunctionExecution) { + inputs = this.mapInputs(inputs); + this.checkInputs(inputs); + this.checkInputShapeAndType(inputs); + outputs = this.mapOutputs(outputs); + this.checkOutputs(outputs); + } + // Keep tensors if KEEP_INTERMEDIATE_TENSORS is on. + try { + this.keepIntermediateTensors = env().getBool('KEEP_INTERMEDIATE_TENSORS'); + } + catch (e) { + this.keepIntermediateTensors = false; + console.warn(e.message); + } + const context = new ExecutionContext(this.weightMap, tensorArrayMap, tensorListMap, this.functionExecutorMap, this.parseNodeNameCache); + if (this.keepIntermediateTensors) { + this.clonedTensorsMap = this.cloneTensorMap(this.weightMap); + } + // Graph with control flow op requires runtime evaluation of the execution + // order, while without control flow the execution order is pre-determined + // in the compile method. + const tensorsMap = await this.executeWithControlFlow(inputs, context, outputs, isFunctionExecution); + const results = outputs.map(name => getTensor(name, tensorsMap, context)); + // dispose all the intermediate tensors + const outputIds = results.map(t => t.id); + const inputIds = Object.keys(inputs).map(name => inputs[name].id); + const keepIds = new Set([...outputIds, ...inputIds, ...this.weightIds]); + Object.values(tensorsMap).forEach(tensorsList => { + tensorsList.forEach(tensor => { + if (tensor && !tensor.isDisposed && !keepIds.has(tensor.id)) { + tensor.dispose(); + } + }); + }); + // dispose the context for the root executor + if (this.parent == null) { + context.dispose(keepIds); + } + return results; + } + async executeFunctionAsync(inputs, tensorArrayMap, tensorListMap) { + const mappedInputs = inputs.reduce((map, tensor, index) => { + map[this.inputs[index].name] = tensor; + return map; + }, {}); + return this._executeAsync(mappedInputs, this.outputNodes, true, tensorArrayMap, tensorListMap); + } + /** + * When there are control flow nodes in the graph, the graph execution use + * ExecutionContext to keep track of the frames and loop iterators. + * @param inputs placeholder tensors for the graph. + * @param context the execution context object for current execution. + * @param outputNames Optional. output node name from the Tensorflow model, + * if no outputs are specified, the default outputs of the model would be + * used. You can inspect intermediate nodes of the model by adding them to + * the outputs array. + * @param isFunctionExecution Flag for executing a function. + */ + async executeWithControlFlow(inputs, context, outputNames, isFunctionExecution) { + const names = Object.keys(inputs); + const inputNodes = names.map(name => this.graph.nodes[parseNodeName(name)[0]]); + const outputNodeNames = outputNames.map(name => parseNodeName(name)[0]); + const outputNodeNameSet = new Set(outputNodeNames); + let outputNodes = outputNodeNames.map(name => this.graph.nodes[name]); + // If no outputs are specified, then use the default outputs of the model. + if (outputNodes.length === 0) { + outputNodes = this._outputs; + } + const { usedNodes, missingInputs, dynamicNode, syncInputs } = getExecutionSubgraph(inputs, outputNodes, this.weightMap, this._initNodes); + // First nodes to execute include inputNodes, weights, and initNodes. + const stack = [ + ...inputNodes, ...this.graph.weights, ...(this._initNodes || []) + ].map(node => { + return { node, contexts: context.currentContext }; + }); + const tensorsMap = Object.assign({}, this.weightMap); + Object.keys(inputs).forEach(name => { + const [nodeName, index] = parseNodeName(name); + const tensors = []; + tensors[index] = inputs[name]; + tensorsMap[nodeName] = tensors; + }); + const intermediateTensorConsumerCount = {}; + const tensorsToKeep = this.getFrozenTensorIds(tensorsMap); + const added = {}; + while (stack.length > 0) { + const promises = this.processStack(inputNodes, stack, context, tensorsMap, added, tensorsToKeep, outputNodeNameSet, intermediateTensorConsumerCount, usedNodes); + await Promise.all(promises); + } + if (dynamicNode == null && !isFunctionExecution) { + console.warn(`This model execution did not contain any nodes with control flow ` + + `or dynamic output shapes. You can use model.execute() instead.`); + } + const missingOutputs = outputNodes + .filter(node => !isControlFlow(node) && + !getTensor(node.name, tensorsMap, context)) + .map(node => node.name); + if (missingOutputs.length > 0) { + let alternativeMsg = ''; + if (dynamicNode != null) { + alternativeMsg = + `Alternatively, to avoid the dynamic ops, use model.execute() ` + + `and specify the inputs [${syncInputs}]`; + } + throw new Error(`Cannot compute the outputs [${missingOutputs}] from the provided ` + + `inputs [${names}]. Consider providing the following inputs: ` + + `[${missingInputs}]. ${alternativeMsg}`); + } + return tensorsMap; + } + processStack(inputNodes, stack, context, tensorMap, added, tensorsToKeep, outputNodeNameSet, intermediateTensorConsumerCount, usedNodes) { + const promises = []; + while (stack.length > 0) { + const item = stack.pop(); + context.currentContext = item.contexts; + let nodeName = ''; + // The tensor of the Enter op with isConstant set should be set + // in the parent scope, so it will be available as constant for the + // whole loop. + if (item.node.op === 'Enter' && + getParamValue('isConstant', item.node, tensorMap, context)) { + [nodeName] = getNodeNameAndIndex(item.node.name, context); + } + // only process nodes that are not in the tensorMap yet, this include + // inputNodes and internal initNodes. + if (tensorMap[item.node.name] == null) { + const tensors = executeOp(item.node, tensorMap, context, this._resourceManager); + if (!nodeName) { + [nodeName] = getNodeNameAndIndex(item.node.name, context); + } + const currentContext = context.currentContext; + if (isPromise(tensors)) { + promises.push(tensors.then(t => { + tensorMap[nodeName] = t; + if (this.keepIntermediateTensors) { + this.clonedTensorsMap[nodeName] = this.cloneTensorList(t); + } + context.currentContext = currentContext; + this.checkTensorForDisposal(nodeName, item.node, tensorMap, context, tensorsToKeep, outputNodeNameSet, intermediateTensorConsumerCount); + this.processChildNodes(item.node, stack, context, tensorMap, added, usedNodes); + return t; + })); + } + else { + tensorMap[nodeName] = tensors; + if (this.keepIntermediateTensors) { + this.clonedTensorsMap[nodeName] = this.cloneTensorList(tensors); + } + this.checkTensorForDisposal(nodeName, item.node, tensorMap, context, tensorsToKeep, outputNodeNameSet, intermediateTensorConsumerCount); + this.processChildNodes(item.node, stack, context, tensorMap, added, usedNodes); + } + } + else { + this.processChildNodes(item.node, stack, context, tensorMap, added, usedNodes); + } + } + return promises; + } + processChildNodes(node, stack, context, tensorMap, added, usedNodes) { + node.children.forEach((childNode) => { + const [nodeName,] = getNodeNameAndIndex(childNode.name, context); + if (added[nodeName] || !usedNodes.has(childNode.name)) { + return; + } + // Merge op can be pushed if any of its inputs has value. + if (childNode.op === 'Merge') { + if (childNode.inputNames.some(name => { + return !!getTensor(name, tensorMap, context); + })) { + added[nodeName] = true; + stack.push({ contexts: context.currentContext, node: childNode }); + } + } + else // Otherwise all inputs must to have value. + if (childNode.inputNames.every(name => { + return !!getTensor(name, tensorMap, context); + })) { + added[nodeName] = true; + stack.push({ contexts: context.currentContext, node: childNode }); + } + }); + } + /** + * Releases the memory used by the weight tensors. + */ + dispose() { + Object.keys(this.weightMap) + .forEach(key => this.weightMap[key].forEach(tensor => tensor.dispose())); + } + checkInputShapeAndType(inputs) { + Object.keys(inputs).forEach(name => { + const input = inputs[name]; + const [nodeName,] = parseNodeName(name); + const node = this.graph.nodes[nodeName]; + if (node.attrParams['shape'] && node.attrParams['shape'].value) { + const shape = node.attrParams['shape'].value; + const match = shape.length === input.shape.length && + input.shape.every((dim, index) => shape[index] === -1 || shape[index] === dim); + assert$1(match, () => `The shape of dict['${node.name}'] provided in ` + + `model.execute(dict) must be [${shape}], but was ` + + `[${input.shape}]`); + } + if (node.attrParams['dtype'] && node.attrParams['dtype'].value) { + assert$1(input.dtype === node.attrParams['dtype'].value, () => `The dtype of dict['${node.name}'] provided in ` + + `model.execute(dict) must be ` + + `${node.attrParams['dtype'].value}, but was ${input.dtype}`); + } + }); + } + mapInputs(inputs) { + var _a, _b; + const result = {}; + for (const inputName in inputs) { + const tensor = (_b = (_a = this._signature) === null || _a === void 0 ? void 0 : _a.inputs) === null || _b === void 0 ? void 0 : _b[inputName]; + if (tensor != null) { + result[tensor.name] = inputs[inputName]; + } + else { + result[inputName] = inputs[inputName]; + } + } + return result; + } + checkInputs(inputs) { + const notInGraph = Object.keys(inputs).filter(name => { + const [nodeName] = parseNodeName(name); + return this.graph.nodes[nodeName] == null; + }); + if (notInGraph.length > 0) { + throw new Error(`The dict provided in model.execute(dict) has ` + + `keys: [${notInGraph}] that are not part of graph`); + } + } + mapOutputs(outputs) { + return outputs.map(name => { + var _a, _b; + const tensor = (_b = (_a = this._signature) === null || _a === void 0 ? void 0 : _a.outputs) === null || _b === void 0 ? void 0 : _b[name]; + if (tensor != null) { + return tensor.name; + } + return name; + }, {}); + } + checkOutputs(outputs) { + outputs.forEach(name => { + const [normalizedName] = parseNodeName(name); + if (!this.graph.nodes[normalizedName]) { + throw new Error(`The output '${name}' is not found in the graph`); + } + }); + } + } + + /** + * Contains global resources of a model. + */ + class ResourceManager { + constructor(hashTableNameToHandle = {}, hashTableMap = {}) { + this.hashTableNameToHandle = hashTableNameToHandle; + this.hashTableMap = hashTableMap; + } + /** + * Register a `HashTable` in the resource manager. + * + * The `HashTable` can be retrieved by `resourceManager.getHashTableById`, + * where id is the table handle tensor's id. + * + * @param name Op node name that creates the `HashTable`. + * @param hashTable The `HashTable` to be added to resource manager. + */ + addHashTable(name, hashTable) { + this.hashTableNameToHandle[name] = hashTable.handle; + this.hashTableMap[hashTable.id] = hashTable; + } + /** + * Get the table handle by node name. + * @param name Op node name that creates the `HashTable`. This name is also + * used in the inputs list of lookup and import `HashTable` ops. + */ + getHashTableHandleByName(name) { + return this.hashTableNameToHandle[name]; + } + /** + * Get the actual `HashTable` by its handle tensor's id. + * @param id The id of the handle tensor. + */ + getHashTableById(id) { + return this.hashTableMap[id]; + } + /** + * Dispose `ResourceManager`, including its hashTables and tensors in them. + */ + dispose() { + for (const key in this.hashTableMap) { + this.hashTableMap[key].clearAndClose(); + delete this.hashTableMap[key]; + } + for (const name in this.hashTableNameToHandle) { + this.hashTableNameToHandle[name].dispose(); + delete this.hashTableNameToHandle[name]; + } + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const TFHUB_SEARCH_PARAM = '?tfjs-format=file'; + const DEFAULT_MODEL_NAME = 'model.json'; + /** + * A `tf.GraphModel` is a directed, acyclic graph built from a + * SavedModel GraphDef and allows inference execution. + * + * A `tf.GraphModel` can only be created by loading from a model converted from + * a [TensorFlow SavedModel](https://www.tensorflow.org/guide/saved_model) using + * the command line converter tool and loaded via `tf.loadGraphModel`. + * + * @doc {heading: 'Models', subheading: 'Classes'} + */ + class GraphModel { + // Returns the version information for the tensorflow model GraphDef. + get modelVersion() { + return this.version; + } + get inputNodes() { + return this.executor.inputNodes; + } + get outputNodes() { + return this.executor.outputNodes; + } + get inputs() { + return this.executor.inputs; + } + get outputs() { + return this.executor.outputs; + } + get weights() { + return this.executor.weightMap; + } + get metadata() { + return this.artifacts.userDefinedMetadata; + } + get modelSignature() { + return this.signature; + } + get modelStructuredOutputKeys() { + return this.structuredOutputKeys; + } + /** + * @param modelUrl url for the model, or an `io.IOHandler`. + * @param weightManifestUrl url for the weight file generated by + * scripts/convert.py script. + * @param requestOption options for Request, which allows to send credentials + * and custom headers. + * @param onProgress Optional, progress callback function, fired periodically + * before the load is completed. + */ + constructor(modelUrl, loadOptions = {}, tfio = io) { + this.modelUrl = modelUrl; + this.loadOptions = loadOptions; + this.version = 'n/a'; + this.io = tfio; + if (loadOptions == null) { + this.loadOptions = {}; + } + this.resourceManager = new ResourceManager(); + } + findIOHandler() { + const path = this.modelUrl; + if (path.load != null) { + // Path is an IO Handler. + this.handler = path; + } + else if (this.loadOptions.requestInit != null) { + this.handler = this.io.browserHTTPRequest(path, this.loadOptions); + } + else { + const handlers = this.io.getLoadHandlers(path, this.loadOptions); + if (handlers.length === 0) { + // For backward compatibility: if no load handler can be found, + // assume it is a relative http path. + handlers.push(this.io.browserHTTPRequest(path, this.loadOptions)); + } + else if (handlers.length > 1) { + throw new Error(`Found more than one (${handlers.length}) load handlers for ` + + `URL '${[path]}'`); + } + this.handler = handlers[0]; + } + } + /** + * Loads the model and weight files, construct the in memory weight map and + * compile the inference graph. + */ + load() { + this.findIOHandler(); + if (this.handler.load == null) { + throw new Error('Cannot proceed with model loading because the IOHandler provided ' + + 'does not have the `load` method implemented.'); + } + const loadResult = this.handler.load(); + if (isPromise(loadResult)) { + return loadResult.then(artifacts => { + if (artifacts.getWeightStream == null) { + return this.loadSync(artifacts); + } + return this.loadStreaming(artifacts); + }); + } + return this.loadSync(loadResult); + } + /** + * Synchronously construct the in memory weight map and + * compile the inference graph. + * + * @doc {heading: 'Models', subheading: 'Classes', ignoreCI: true} + */ + loadSync(artifacts) { + const weightMap = this.io.decodeWeights(artifacts.weightData, artifacts.weightSpecs); + return this.loadWithWeightMap(artifacts, weightMap); + } + async loadStreaming(artifacts) { + if (artifacts.getWeightStream == null) { + throw new Error('Model artifacts missing streamWeights function'); + } + const weightMap = await decodeWeightsStream(artifacts.getWeightStream(), artifacts.weightSpecs); + return this.loadWithWeightMap(artifacts, weightMap); + } + loadWithWeightMap(artifacts, weightMap) { + this.artifacts = artifacts; + const graph = this.artifacts.modelTopology; + let signature = this.artifacts.signature; + if (this.artifacts.userDefinedMetadata != null) { + const metadata = this.artifacts.userDefinedMetadata; + if (metadata.signature != null) { + signature = metadata.signature; + } + if (metadata.structuredOutputKeys != null) { + this.structuredOutputKeys = metadata.structuredOutputKeys; + } + } + this.signature = signature; + this.version = `${graph.versions.producer}.${graph.versions.minConsumer}`; + this.executor = new GraphExecutor(OperationMapper.Instance.transformGraph(graph, this.signature)); + this.executor.weightMap = this.convertTensorMapToTensorsMap(weightMap); + // Attach a model-level resourceManager to each executor to share resources, + // such as `HashTable`. + this.executor.resourceManager = this.resourceManager; + if (artifacts.modelInitializer != null && + artifacts.modelInitializer.node != null) { + const initializer = OperationMapper.Instance.transformGraph(artifacts.modelInitializer); + this.initializer = new GraphExecutor(initializer); + this.initializer.weightMap = this.executor.weightMap; + // Attach a model-level resourceManager to the initializer, the + // hashTables created from when executing the initializer will be stored + // in the resourceManager. + this.initializer.resourceManager = this.resourceManager; + this.initializerSignature = artifacts.initializerSignature; + } + return true; + } + /** + * Save the configuration and/or weights of the GraphModel. + * + * An `IOHandler` is an object that has a `save` method of the proper + * signature defined. The `save` method manages the storing or + * transmission of serialized data ("artifacts") that represent the + * model's topology and weights onto or via a specific medium, such as + * file downloads, local storage, IndexedDB in the web browser and HTTP + * requests to a server. TensorFlow.js provides `IOHandler` + * implementations for a number of frequently used saving mediums, such as + * `tf.io.browserDownloads` and `tf.io.browserLocalStorage`. See `tf.io` + * for more details. + * + * This method also allows you to refer to certain types of `IOHandler`s + * as URL-like string shortcuts, such as 'localstorage://' and + * 'indexeddb://'. + * + * Example 1: Save `model`'s topology and weights to browser [local + * storage](https://developer.mozilla.org/en-US/docs/Web/API/Window/localStorage); + * then load it back. + * + * ```js + * const modelUrl = + * 'https://storage.googleapis.com/tfjs-models/savedmodel/mobilenet_v2_1.0_224/model.json'; + * const model = await tf.loadGraphModel(modelUrl); + * const zeros = tf.zeros([1, 224, 224, 3]); + * model.predict(zeros).print(); + * + * const saveResults = await model.save('localstorage://my-model-1'); + * + * const loadedModel = await tf.loadGraphModel('localstorage://my-model-1'); + * console.log('Prediction from loaded model:'); + * model.predict(zeros).print(); + * ``` + * + * @param handlerOrURL An instance of `IOHandler` or a URL-like, + * scheme-based string shortcut for `IOHandler`. + * @param config Options for saving the model. + * @returns A `Promise` of `SaveResult`, which summarizes the result of + * the saving, such as byte sizes of the saved artifacts for the model's + * topology and weight values. + * + * @doc {heading: 'Models', subheading: 'Classes', ignoreCI: true} + */ + async save(handlerOrURL, config) { + if (typeof handlerOrURL === 'string') { + const handlers = this.io.getSaveHandlers(handlerOrURL); + if (handlers.length === 0) { + throw new Error(`Cannot find any save handlers for URL '${handlerOrURL}'`); + } + else if (handlers.length > 1) { + throw new Error(`Found more than one (${handlers.length}) save handlers for ` + + `URL '${handlerOrURL}'`); + } + handlerOrURL = handlers[0]; + } + if (handlerOrURL.save == null) { + throw new Error('GraphModel.save() cannot proceed because the IOHandler ' + + 'provided does not have the `save` attribute defined.'); + } + return handlerOrURL.save(this.artifacts); + } + addStructuredOutputNames(outputTensors) { + if (this.structuredOutputKeys) { + const outputTensorsArray = outputTensors instanceof Tensor ? [outputTensors] : outputTensors; + const outputTensorMap = {}; + outputTensorsArray.forEach((outputTensor, i) => outputTensorMap[this.structuredOutputKeys[i]] = + outputTensor); + return outputTensorMap; + } + return outputTensors; + } + /** + * Execute the inference for the input tensors. + * + * @param input The input tensors, when there is single input for the model, + * inputs param should be a `tf.Tensor`. For models with multiple inputs, + * inputs params should be in either `tf.Tensor`[] if the input order is + * fixed, or otherwise NamedTensorMap format. + * + * For model with multiple inputs, we recommend you use NamedTensorMap as the + * input type, if you use `tf.Tensor`[], the order of the array needs to + * follow the + * order of inputNodes array. @see {@link GraphModel.inputNodes} + * + * You can also feed any intermediate nodes using the NamedTensorMap as the + * input type. For example, given the graph + * InputNode => Intermediate => OutputNode, + * you can execute the subgraph Intermediate => OutputNode by calling + * model.execute('IntermediateNode' : tf.tensor(...)); + * + * This is useful for models that uses tf.dynamic_rnn, where the intermediate + * state needs to be fed manually. + * + * For batch inference execution, the tensors for each input need to be + * concatenated together. For example with mobilenet, the required input shape + * is [1, 244, 244, 3], which represents the [batch, height, width, channel]. + * If we are provide a batched data of 100 images, the input tensor should be + * in the shape of [100, 244, 244, 3]. + * + * @param config Prediction configuration for specifying the batch size. + * Currently the batch size option is ignored for graph model. + * + * @returns Inference result tensors. If the model is converted and it + * originally had structured_outputs in tensorflow, then a NamedTensorMap + * will be returned matching the structured_outputs. If no structured_outputs + * are present, the output will be single `tf.Tensor` if the model has single + * output node, otherwise Tensor[]. + * + * @doc {heading: 'Models', subheading: 'Classes'} + */ + predict(inputs, config) { + const outputTensors = this.execute(inputs, this.outputNodes); + return this.addStructuredOutputNames(outputTensors); + } + /** + * Execute the inference for the input tensors in async fashion, use this + * method when your model contains control flow ops. + * + * @param input The input tensors, when there is single input for the model, + * inputs param should be a `tf.Tensor`. For models with mutliple inputs, + * inputs params should be in either `tf.Tensor`[] if the input order is + * fixed, or otherwise NamedTensorMap format. + * + * For model with multiple inputs, we recommend you use NamedTensorMap as the + * input type, if you use `tf.Tensor`[], the order of the array needs to + * follow the + * order of inputNodes array. @see {@link GraphModel.inputNodes} + * + * You can also feed any intermediate nodes using the NamedTensorMap as the + * input type. For example, given the graph + * InputNode => Intermediate => OutputNode, + * you can execute the subgraph Intermediate => OutputNode by calling + * model.execute('IntermediateNode' : tf.tensor(...)); + * + * This is useful for models that uses tf.dynamic_rnn, where the intermediate + * state needs to be fed manually. + * + * For batch inference execution, the tensors for each input need to be + * concatenated together. For example with mobilenet, the required input shape + * is [1, 244, 244, 3], which represents the [batch, height, width, channel]. + * If we are provide a batched data of 100 images, the input tensor should be + * in the shape of [100, 244, 244, 3]. + * + * @param config Prediction configuration for specifying the batch size. + * Currently the batch size option is ignored for graph model. + * + * @returns A Promise of inference result tensors. If the model is converted + * and it originally had structured_outputs in tensorflow, then a + * NamedTensorMap will be returned matching the structured_outputs. If no + * structured_outputs are present, the output will be single `tf.Tensor` if + * the model has single output node, otherwise Tensor[]. + * + * @doc {heading: 'Models', subheading: 'Classes'} + */ + async predictAsync(inputs, config) { + const outputTensors = await this.executeAsync(inputs, this.outputNodes); + return this.addStructuredOutputNames(outputTensors); + } + normalizeInputs(inputs) { + var _a; + if (!(inputs instanceof Tensor) && !Array.isArray(inputs)) { + // The input is already a NamedTensorMap. + const signatureInputs = (_a = this.signature) === null || _a === void 0 ? void 0 : _a.inputs; + if (signatureInputs != null) { + for (const input in signatureInputs) { + const tensor = signatureInputs[input]; + if (tensor.resourceId != null) { + inputs[input] = this.resourceIdToCapturedInput[tensor.resourceId]; + } + } + } + return inputs; + } + inputs = Array.isArray(inputs) ? inputs : [inputs]; + const numCapturedInputs = Object.keys(this.resourceIdToCapturedInput).length; + if (inputs.length + numCapturedInputs !== this.inputNodes.length) { + throw new Error(`Input tensor count mismatch, the graph model has ${this.inputNodes.length - + numCapturedInputs} non-resource placeholders, while there are ${inputs.length} input tensors provided.`); + } + let inputIndex = 0; + return this.inputNodes.reduce((map, inputName) => { + var _a, _b, _c; + const resourceId = (_c = (_b = (_a = this.signature) === null || _a === void 0 ? void 0 : _a.inputs) === null || _b === void 0 ? void 0 : _b[inputName]) === null || _c === void 0 ? void 0 : _c.resourceId; + if (resourceId != null) { + map[inputName] = this.resourceIdToCapturedInput[resourceId]; + } + else { + map[inputName] = inputs[inputIndex++]; + } + return map; + }, {}); + } + normalizeOutputs(outputs) { + outputs = outputs || this.outputNodes; + return !Array.isArray(outputs) ? [outputs] : outputs; + } + executeInitializerGraph() { + if (this.initializer == null) { + return []; + } + if (this.initializerSignature == null) { + return this.initializer.execute({}, []); + } + else { + return this.initializer.execute({}, Object.keys(this.initializerSignature.outputs)); + } + } + async executeInitializerGraphAsync() { + if (this.initializer == null) { + return []; + } + if (this.initializerSignature == null) { + return this.initializer.executeAsync({}, []); + } + else { + return this.initializer.executeAsync({}, Object.keys(this.initializerSignature.outputs)); + } + } + setResourceIdToCapturedInput(outputs) { + this.resourceIdToCapturedInput = {}; + if (this.initializerSignature) { + const signatureOutputs = this.initializerSignature.outputs; + const outputNames = Object.keys(signatureOutputs); + for (let i = 0; i < outputNames.length; i++) { + const outputName = outputNames[i]; + const tensorInfo = signatureOutputs[outputName]; + this.resourceIdToCapturedInput[tensorInfo.resourceId] = outputs[i]; + } + } + } + /** + * Executes inference for the model for given input tensors. + * @param inputs tensor, tensor array or tensor map of the inputs for the + * model, keyed by the input node names. + * @param outputs output node name from the TensorFlow model, if no + * outputs are specified, the default outputs of the model would be used. + * You can inspect intermediate nodes of the model by adding them to the + * outputs array. + * + * @returns A single tensor if provided with a single output or no outputs + * are provided and there is only one default output, otherwise return a + * tensor array. The order of the tensor array is the same as the outputs + * if provided, otherwise the order of outputNodes attribute of the model. + * + * @doc {heading: 'Models', subheading: 'Classes'} + */ + execute(inputs, outputs) { + if (this.resourceIdToCapturedInput == null) { + this.setResourceIdToCapturedInput(this.executeInitializerGraph()); + } + inputs = this.normalizeInputs(inputs); + outputs = this.normalizeOutputs(outputs); + const result = this.executor.execute(inputs, outputs); + return result.length > 1 ? result : result[0]; + } + /** + * Executes inference for the model for given input tensors in async + * fashion, use this method when your model contains control flow ops. + * @param inputs tensor, tensor array or tensor map of the inputs for the + * model, keyed by the input node names. + * @param outputs output node name from the TensorFlow model, if no outputs + * are specified, the default outputs of the model would be used. You can + * inspect intermediate nodes of the model by adding them to the outputs + * array. + * + * @returns A Promise of single tensor if provided with a single output or + * no outputs are provided and there is only one default output, otherwise + * return a tensor map. + * + * @doc {heading: 'Models', subheading: 'Classes'} + */ + async executeAsync(inputs, outputs) { + if (this.resourceIdToCapturedInput == null) { + this.setResourceIdToCapturedInput(await this.executeInitializerGraphAsync()); + } + inputs = this.normalizeInputs(inputs); + outputs = this.normalizeOutputs(outputs); + const result = await this.executor.executeAsync(inputs, outputs); + return result.length > 1 ? result : result[0]; + } + /** + * Get intermediate tensors for model debugging mode (flag + * KEEP_INTERMEDIATE_TENSORS is true). + * + * @doc {heading: 'Models', subheading: 'Classes'} + */ + getIntermediateTensors() { + return this.executor.getIntermediateTensors(); + } + /** + * Dispose intermediate tensors for model debugging mode (flag + * KEEP_INTERMEDIATE_TENSORS is true). + * + * @doc {heading: 'Models', subheading: 'Classes'} + */ + disposeIntermediateTensors() { + this.executor.disposeIntermediateTensors(); + } + convertTensorMapToTensorsMap(map) { + return Object.keys(map).reduce((newMap, key) => { + newMap[key] = [map[key]]; + return newMap; + }, {}); + } + /** + * Releases the memory used by the weight tensors and resourceManager. + * + * @doc {heading: 'Models', subheading: 'Classes'} + */ + dispose() { + this.executor.dispose(); + if (this.initializer) { + this.initializer.dispose(); + if (this.resourceIdToCapturedInput) { + dispose(this.resourceIdToCapturedInput); + } + } + this.resourceManager.dispose(); + } + } + /** + * Load a graph model given a URL to the model definition. + * + * Example of loading MobileNetV2 from a URL and making a prediction with a + * zeros input: + * + * ```js + * const modelUrl = + * 'https://storage.googleapis.com/tfjs-models/savedmodel/mobilenet_v2_1.0_224/model.json'; + * const model = await tf.loadGraphModel(modelUrl); + * const zeros = tf.zeros([1, 224, 224, 3]); + * model.predict(zeros).print(); + * ``` + * + * Example of loading MobileNetV2 from a TF Hub URL and making a prediction + * with a zeros input: + * + * ```js + * const modelUrl = + * 'https://tfhub.dev/google/imagenet/mobilenet_v2_140_224/classification/2'; + * const model = await tf.loadGraphModel(modelUrl, {fromTFHub: true}); + * const zeros = tf.zeros([1, 224, 224, 3]); + * model.predict(zeros).print(); + * ``` + * @param modelUrl The url or an `io.IOHandler` that loads the model. + * @param options Options for the HTTP request, which allows to send + * credentials + * and custom headers. + * + * @doc {heading: 'Models', subheading: 'Loading'} + */ + async function loadGraphModel(modelUrl, options = {}, tfio = io) { + if (modelUrl == null) { + throw new Error('modelUrl in loadGraphModel() cannot be null. Please provide a url ' + + 'or an IOHandler that loads the model'); + } + if (options == null) { + options = {}; + } + if (options.fromTFHub && typeof modelUrl === 'string') { + modelUrl = getTFHubUrl(modelUrl); + } + const model = new GraphModel(modelUrl, options, tfio); + await model.load(); + return model; + } + /** + * Load a graph model given a synchronous IO handler with a 'load' method. + * + * @param modelSource The `io.IOHandlerSync` that loads the model, or the + * `io.ModelArtifacts` that encode the model, or a tuple of + * `[io.ModelJSON, ArrayBuffer]` of which the first element encodes the + * model and the second contains the weights. + * + * @doc {heading: 'Models', subheading: 'Loading'} + */ + function loadGraphModelSync(modelSource) { + if (modelSource == null) { + throw new Error('modelUrl in loadGraphModelSync() cannot be null. Please provide ' + + 'model artifacts or an IOHandler that loads the model'); + } + let ioHandler; + if (modelSource instanceof Array) { + const [modelJSON, weights] = modelSource; + if (!modelJSON) { + throw new Error('modelJSON must be the first element of the array'); + } + if (!weights || !(weights instanceof ArrayBuffer)) { + throw new Error('An ArrayBuffer of weights must be the second element of' + + ' the array'); + } + if (!('modelTopology' in modelJSON)) { + throw new Error('Model JSON is missing \'modelTopology\''); + } + if (!('weightsManifest' in modelJSON)) { + throw new Error('Model JSON is missing \'weightsManifest\''); + } + const weightSpecs = getWeightSpecs(modelJSON.weightsManifest); + const modelArtifacts = getModelArtifactsForJSONSync(modelJSON, weightSpecs, weights); + ioHandler = fromMemorySync(modelArtifacts); + } + else if ('load' in modelSource) { + // Then modelSource is already an IOHandlerSync. + ioHandler = modelSource; + } + else if ('modelTopology' in modelSource && 'weightSpecs' in modelSource && + 'weightData' in modelSource) { + // modelSource is of type ModelArtifacts. + ioHandler = fromMemorySync(modelSource); + } + else { + throw new Error('Unknown model format'); + } + const model = new GraphModel(ioHandler); + model.load(); + return model; + } + function getTFHubUrl(modelUrl) { + if (!modelUrl.endsWith('/')) { + modelUrl = (modelUrl) + '/'; + } + return `${modelUrl}${DEFAULT_MODEL_NAME}${TFHUB_SEARCH_PARAM}`; + } + + /** @license See the LICENSE file. */ + // This code is auto-generated, do not modify this file! + const version$5 = '4.22.0'; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * ============================================================================= + */ + /** + * Apply a mapping function to a nested structure in a recursive manner. + * + * The result of the mapping is an object with the same nested structure (i.e., + * of arrays and dicts) as the input, except that some subtrees are replaced, + * according to the results of the mapping function. + * + * Mappings are memoized. Thus, if the nested structure contains the same + * object in multiple positions, the output will contain the same mapped object + * in those positions. Cycles are not supported, however. + * + * @param input: The object to which to apply the mapping function. + * @param mapFn: A function that expects a single node of the object tree, and + * returns a `DeepMapResult`. The `DeepMapResult` either provides a + * replacement value for that node (i.e., replacing the subtree), or indicates + * that the node should be processed recursively. + */ + function deepMap(input, mapFn) { + return deepMapInternal(input, mapFn); + } + /** + * @param seen: A Map of known object mappings (i.e., memoized results of + * `mapFn()`) + * @param containedIn: An set containing objects on the reference path currently + * being processed (used to detect cycles). + */ + function deepMapInternal(input, mapFn, seen = new Map(), containedIn = new Set()) { + if (input == null) { + return null; + } + if (typeof Blob === 'function' && input instanceof Blob) { + return input.slice(); + } + if (containedIn.has(input)) { + throw new Error('Circular references are not supported.'); + } + if (seen.has(input)) { + return seen.get(input); + } + const result = mapFn(input); + if (result.recurse && result.value !== null) { + throw new Error('A deep map function may not return both a value and recurse=true.'); + } + if (!result.recurse) { + seen.set(input, result.value); + return result.value; + } + else if (isIterable(input)) { + // tslint:disable-next-line:no-any + const mappedIterable = Array.isArray(input) ? [] : {}; + containedIn.add(input); + for (const k in input) { + const child = input[k]; + const childResult = deepMapInternal(child, mapFn, seen, containedIn); + mappedIterable[k] = childResult; + } + containedIn.delete(input); + if (input.__proto__) { + mappedIterable.__proto__ = input.__proto__; + } + return mappedIterable; + } + else { + throw new Error(`Can't recurse into non-iterable type: ${input}`); + } + } + // TODO(soergel, kangyizhang) Reconsider naming of deepZip() to avoid confusion + // with zip() + /** + * Zip nested structures together in a recursive manner. + * + * This has the effect of transposing or pivoting data, e.g. converting it from + * a row-major representation to a column-major representation. + * + * For example, `deepZip([{a: 1, b: 2}, {a: 3, b: 4}])` returns + * `{a: [1, 3], b: [2, 4]}`. + * + * The inputs should all have the same nested structure (i.e., of arrays and + * dicts). The result is a single object with the same nested structure, where + * the leaves are arrays collecting the values of the inputs at that location + * (or, optionally, the result of a custom function applied to those arrays). + * + * @param inputs: An array of the objects to zip together. + * @param zipFn: (optional) A function that expects an array of elements at a + * single node of the object tree, and returns a `DeepMapResult`. The + * `DeepMapResult` either provides a result value for that node (i.e., + * representing the subtree), or indicates that the node should be processed + * recursively. The default zipFn recurses as far as possible and places + * arrays at the leaves. + */ + function deepZip(inputs, zipFn = zipToList) { + return deepZipInternal(inputs, zipFn); + } + /** + * @param containedIn: An set containing objects on the reference path currently + * being processed (used to detect cycles). + */ + function deepZipInternal(inputs, zipFn, containedIn = new Set()) { + // The recursion follows the structure of input 0; it's assumed that all the + // other inputs have the same structure. + const input = inputs[0]; + if (containedIn.has(input)) { + throw new Error('Circular references are not supported.'); + } + const result = zipFn(inputs); + if (result.recurse && result.value !== null) { + throw new Error('A deep zip function may not return both a value and recurse=true.'); + } + if (!result.recurse) { + return result.value; + } + else if (isIterable(input)) { + // tslint:disable-next-line:no-any + const mappedIterable = Array.isArray(input) ? [] : {}; + containedIn.add(input); + for (const k in input) { + const children = inputs.map(x => x[k]); + const childResult = deepZipInternal(children, zipFn, containedIn); + mappedIterable[k] = childResult; + } + containedIn.delete(input); + return mappedIterable; + } + else { + throw new Error(`Can't recurse into non-iterable type: ${input}`); + } + } + // tslint:disable-next-line:no-any + function zipToList(x) { + if (x === null) { + return null; + } + // TODO(soergel): validate array type? + if (isIterable(x[0])) { + return { value: null, recurse: true }; + } + else { + return { value: x, recurse: false }; + } + } + /** + * Apply an async mapping function to a nested structure in a recursive manner. + * + * This first creates a nested structure of Promises, and then awaits all of + * those, resulting in a single Promise for a resolved nested structure. + * + * The result of the mapping is an object with the same nested structure (i.e., + * of arrays and dicts) as the input, except that some subtrees are replaced, + * according to the results of the mapping function. + * + * Mappings are memoized. Thus, if the nested structure contains the same + * object in multiple positions, the output will contain the same mapped object + * in those positions. Cycles are not supported, however. + * + * @param input: The object to which to apply the mapping function. + * @param mapFn: A function that expects a single node of the object tree, and + * returns a `DeepMapAsyncResult`. The `DeepMapAsyncResult` either provides + * a `Promise` for a replacement value for that node (i.e., replacing the + * subtree), or indicates that the node should be processed recursively. Note + * that the decision whether or not to recurse must be made immediately; only + * the mapped value may be promised. + */ + async function deepMapAndAwaitAll(input, mapFn) { + const seen = new Map(); + // First do a normal deepMap, collecting Promises in 'seen' as a side effect. + deepMapInternal(input, mapFn, seen); + // Replace the Promises in 'seen' in place. + // Note TypeScript provides no async map iteration, and regular map iteration + // is broken too, so sadly we have to do Array.from() to make it work. + // (There's no advantage to Promise.all(), and that would be tricky anyway.) + for (const key of Array.from(seen.keys())) { + const value = seen.get(key); + if (isPromise(value)) { + const mappedValue = await value; + seen.set(key, mappedValue); + } + } + // Normal deepMap again, this time filling in the resolved values. + // It's unfortunate that we have to do two passes. + // TODO(soergel): test performance and think harder about a fast solution. + const result = deepMapInternal(input, mapFn, seen); + return result; + } + /** + * Determine whether the argument is iterable. + * + * @returns true if the argument is an array or any non-Tensor object. + */ + // tslint:disable-next-line:no-any + function isIterable(obj) { + let isTextDecoder = false; + if (env().get('IS_BROWSER')) { + isTextDecoder = obj instanceof TextDecoder; + } + else { + // tslint:disable-next-line:no-require-imports + const { StringDecoder } = require('string_decoder'); + isTextDecoder = obj instanceof StringDecoder; + } + return obj != null && (!ArrayBuffer.isView(obj)) && + (Array.isArray(obj) || + (typeof obj === 'object' && !(obj instanceof Tensor) && + !(obj instanceof Promise) && !isTextDecoder)); + } + /** + * Determine whether the argument can be converted to Tensor. + * + * Tensors, primitives, arrays, and TypedArrays all qualify; anything else does + * not. + * + * @returns true if the argument can be converted to Tensor. + */ + // tslint:disable-next-line:no-any + function canTensorify(obj) { + return obj == null || isPrimitive(obj) || Array.isArray(obj) || + (typeof obj === 'object' && (obj instanceof Tensor)) || + isTypedArray(obj); + } + /** + * Returns true if the given `value` is a primitive type. Otherwise returns + * false. This is equivalant to node util.isPrimitive + */ + function isPrimitive(value) { + return (value === null || + (typeof value !== 'object' && typeof value !== 'function')); + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * ============================================================================= + */ + function deepClone(container) { + return deepMap(container, cloneIfTensor); + } + // tslint:disable-next-line: no-any + function cloneIfTensor(item) { + if (item instanceof Tensor) { + return ({ value: item.clone(), recurse: false }); + } + else if (isIterable(item)) { + return { value: null, recurse: true }; + } + else { + return { value: item, recurse: false }; + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * ============================================================================= + */ + /** + * A ring buffer, providing O(1) FIFO, LIFO, and related operations. + */ + class RingBuffer { + /** + * Constructs a `RingBuffer`. + * @param capacity The number of items that the buffer can accomodate. + */ + constructor(capacity) { + this.capacity = capacity; + // Note we store the indices in the range 0 <= index < 2*capacity. + // This allows us to distinguish the full from the empty case. + // See https://www.snellman.net/blog/archive/2016-12-13-ring-buffers/ + this.begin = 0; // inclusive + this.end = 0; // exclusive + if (capacity == null) { + throw new RangeError('Can\'t create a ring buffer of unknown capacity.'); + } + if (capacity < 1) { + throw new RangeError('Can\'t create ring buffer of capacity < 1.'); + } + this.data = new Array(capacity); + this.doubledCapacity = 2 * capacity; + } + /** + * Map any index into the range 0 <= index < 2*capacity. + */ + wrap(index) { + // don't trust % on negative numbers + while (index < 0) { + index += this.doubledCapacity; + } + return index % this.doubledCapacity; + } + get(index) { + if (index < 0) { + throw new RangeError('Can\'t get item at a negative index.'); + } + return this.data[index % this.capacity]; + } + set(index, value) { + if (index < 0) { + throw new RangeError('Can\'t set item at a negative index.'); + } + this.data[index % this.capacity] = value; + } + /** + * Returns the current number of items in the buffer. + */ + length() { + let length = this.end - this.begin; + if (length < 0) { + length = this.doubledCapacity + length; + } + return length; + } + /** + * Reports whether the buffer is full. + * @returns true if the number of items in the buffer equals its capacity, and + * false otherwise. + */ + isFull() { + return this.length() === this.capacity; + } + /** + * Reports whether the buffer is empty. + * @returns true if the number of items in the buffer equals zero, and + * false otherwise. + */ + isEmpty() { + return this.length() === 0; + } + /** + * Adds an item to the end of the buffer. + */ + push(value) { + if (this.isFull()) { + throw new RangeError('Ring buffer is full.'); + } + this.set(this.end, value); + this.end = this.wrap(this.end + 1); + } + /** + * Adds many items to the end of the buffer, in order. + */ + pushAll(values) { + for (const value of values) { + this.push(value); + } + } + /** + * Removes and returns the last item in the buffer. + */ + pop() { + if (this.isEmpty()) { + throw new RangeError('Ring buffer is empty.'); + } + this.end = this.wrap(this.end - 1); + const result = this.get(this.end); + this.set(this.end, undefined); + return result; + } + /** + * Adds an item to the beginning of the buffer. + */ + unshift(value) { + if (this.isFull()) { + throw new RangeError('Ring buffer is full.'); + } + this.begin = this.wrap(this.begin - 1); + this.set(this.begin, value); + } + /** + * Removes and returns the first item in the buffer. + */ + shift() { + if (this.isEmpty()) { + throw new RangeError('Ring buffer is empty.'); + } + const result = this.get(this.begin); + this.set(this.begin, undefined); + this.begin = this.wrap(this.begin + 1); + return result; + } + /** + * Removes and returns a specific item in the buffer, and moves the last item + * to the vacated slot. This is useful for implementing a shuffling stream. + * Note that this operation necessarily scrambles the original order. + * + * @param relativeIndex: the index of the item to remove, relative to the + * first item in the buffer (e.g., hiding the ring nature of the underlying + * storage). + */ + shuffleExcise(relativeIndex) { + if (this.isEmpty()) { + throw new RangeError('Ring buffer is empty.'); + } + const index = this.wrap(this.begin + relativeIndex); + const result = this.get(index); + this.set(index, this.pop()); + return result; + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * ============================================================================= + */ + class GrowingRingBuffer extends RingBuffer { + /** + * Constructs a `GrowingRingBuffer`. + */ + constructor() { + super(GrowingRingBuffer.INITIAL_CAPACITY); + } + isFull() { + return false; + } + push(value) { + if (super.isFull()) { + this.expand(); + } + super.push(value); + } + unshift(value) { + if (super.isFull()) { + this.expand(); + } + super.unshift(value); + } + /** + * Doubles the capacity of the buffer. + */ + expand() { + const newCapacity = this.capacity * 2; + const newData = new Array(newCapacity); + const len = this.length(); + // Rotate the buffer to start at index 0 again, since we can't just + // allocate more space at the end. + for (let i = 0; i < len; i++) { + newData[i] = this.get(this.wrap(this.begin + i)); + } + this.data = newData; + this.capacity = newCapacity; + this.doubledCapacity = 2 * this.capacity; + this.begin = 0; + this.end = len; + } + } + GrowingRingBuffer.INITIAL_CAPACITY = 32; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * ============================================================================= + */ + // Here we implement a simple asynchronous iterator. + // This lets us avoid using either third-party stream libraries or + // recent TypeScript language support requiring polyfills. + /** + * Create a `LazyIterator` from an array of items. + */ + function iteratorFromItems(items) { + return new ArrayIterator(items); + } + /** + * Create a `LazyIterator` of incrementing integers. + */ + function iteratorFromIncrementing(start) { + let i = start; + return iteratorFromFunction(() => ({ value: i++, done: false })); + } + /** + * Create a `LazyIterator` from a function. + * + * ```js + * let i = -1; + * const func = () => + * ++i < 5 ? {value: i, done: false} : {value: null, done: true}; + * const iter = tf.data.iteratorFromFunction(func); + * await iter.forEachAsync(e => console.log(e)); + * ``` + * + * @param func A function that produces data on each call. + */ + function iteratorFromFunction(func) { + return new FunctionCallIterator(func); + } + /** + * Create a `LazyIterator` by concatenating underlying streams, which are + * themselves provided as a stream. + * + * This can also be thought of as a "stream flatten" operation. + * + * @param baseIterators A stream of streams to be concatenated. + * @param baseErrorHandler An optional function that can intercept `Error`s + * raised during a `next()` call on the base stream. This function can decide + * whether the error should be propagated, whether the error should be + * ignored, or whether the base stream should be terminated. + */ + function iteratorFromConcatenated(baseIterators, baseErrorHandler) { + return new ChainedIterator(baseIterators, baseErrorHandler); + } + /** + * Create a `LazyIterator` by concatenating streams produced by calling a + * stream-generating function a given number of times. + * + * Since a `LazyIterator` is read-once, it cannot be repeated, but this + * function can be used to achieve a similar effect: + * + * LazyIterator.ofConcatenatedFunction(() => new MyIterator(), 6); + * + * @param iteratorFunc: A function that produces a new stream on each call. + * @param count: The number of times to call the function. + * @param baseErrorHandler An optional function that can intercept `Error`s + * raised during a `next()` call on the base stream. This function can decide + * whether the error should be propagated, whether the error should be + * ignored, or whether the base stream should be terminated. + */ + function iteratorFromConcatenatedFunction(iteratorFunc, count, baseErrorHandler) { + return iteratorFromConcatenated(iteratorFromFunction(iteratorFunc).take(count), baseErrorHandler); + } + /** + * Create a `LazyIterator` by zipping together an array, dict, or nested + * structure of `LazyIterator`s (and perhaps additional constants). + * + * The underlying streams must provide elements in a consistent order such + * that they correspond. + * + * Typically, the underlying streams should have the same number of + * elements. If they do not, the behavior is determined by the + * `mismatchMode` argument. + * + * The nested structure of the `iterators` argument determines the + * structure of elements in the resulting iterator. + * + * @param iterators: An array or object containing LazyIterators at the + * leaves. + * @param mismatchMode: Determines what to do when one underlying iterator + * is exhausted before the others. `ZipMismatchMode.FAIL` (the default) + * causes an error to be thrown in this case. `ZipMismatchMode.SHORTEST` + * causes the zipped iterator to terminate with the furst underlying + * streams, so elements remaining on the longer streams are ignored. + * `ZipMismatchMode.LONGEST` causes the zipped stream to continue, filling + * in nulls for the exhausted streams, until all streams are exhausted. + */ + function iteratorFromZipped(iterators, mismatchMode = ZipMismatchMode.FAIL) { + return new ZipIterator(iterators, mismatchMode); + } + /** + * An asynchronous iterator, providing lazy access to a potentially + * unbounded stream of elements. + * + * Iterator can be obtained from a dataset: + * `const iter = await dataset.iterator();` + */ + class LazyIterator { + /** + * Collect all remaining elements of a bounded stream into an array. + * Obviously this will succeed only for small streams that fit in memory. + * Useful for testing. + * + * @returns A Promise for an array of stream elements, which will resolve + * when the stream is exhausted. + */ + async toArray() { + const result = []; + let x = await this.next(); + while (!x.done) { + result.push(x.value); + x = await this.next(); + } + return result; + } + /** + * Collect all elements of this dataset into an array with prefetching 100 + * elements. This is useful for testing, because the prefetch changes the + * order in which the Promises are resolved along the processing pipeline. + * This may help expose bugs where results are dependent on the order of + * Promise resolution rather than on the logical order of the stream (i.e., + * due to hidden mutable state). + * + * @returns A Promise for an array of stream elements, which will resolve + * when the stream is exhausted. + */ + async toArrayForTest() { + const stream = this.prefetch(100); + const result = []; + let x = await stream.next(); + while (!x.done) { + result.push(x.value); + x = await stream.next(); + } + return result; + } + /** + * Draw items from the stream until it is exhausted. + * + * This can be useful when the stream has side effects but no output. In + * that case, calling this function guarantees that the stream will be + * fully processed. + */ + async resolveFully() { + let x = await this.next(); + while (!x.done) { + x = await this.next(); + } + } + /** + * Draw items from the stream until it is exhausted, or a predicate fails. + * + * This can be useful when the stream has side effects but no output. In + * that case, calling this function guarantees that the stream will be + * fully processed. + */ + async resolveWhile(predicate) { + let x = await this.next(); + let shouldContinue = predicate(x.value); + while ((!x.done) && shouldContinue) { + x = await this.next(); + shouldContinue = predicate(x.value); + } + } + /** + * Handles errors thrown on this stream using a provided handler function. + * + * @param handler A function that handles any `Error` thrown during a `next()` + * call and returns true if the stream should continue (dropping the failed + * call) or false if the stream should quietly terminate. If the handler + * itself throws (or rethrows) an `Error`, that will be propagated. + * + * @returns A `LazyIterator` of elements passed through from upstream, + * possibly filtering or terminating on upstream `next()` calls that + * throw an `Error`. + */ + handleErrors(handler) { + return new ErrorHandlingLazyIterator(this, handler); + } + // TODO(soergel): Implement reduce() etc. + /** + * Filters this stream according to `predicate`. + * + * @param predicate A function mapping a stream element to a boolean or a + * `Promise` for one. + * + * @returns A `LazyIterator` of elements for which the predicate was true. + */ + filter(predicate) { + return new FilterIterator(this, predicate); + } + /** + * Maps this stream through a 1-to-1 transform. + * + * @param transform A function mapping a stream element to a transformed + * element. + * + * @returns A `LazyIterator` of transformed elements. + */ + map(transform) { + return new MapIterator(this, transform); + } + /** + * Maps this stream through an async 1-to-1 transform. + * + * @param transform A function mapping a stream element to a `Promise` for a + * transformed stream element. + * + * @returns A `LazyIterator` of transformed elements. + */ + mapAsync(transform) { + return new AsyncMapIterator(this, transform); + } + /** + * Maps this stream through a 1-to-1 transform, forcing serial execution. + * + * @param transform A function mapping a stream element to a transformed + * element. + * + * @returns A `LazyIterator` of transformed elements. + */ + serialMapAsync(transform) { + return new AsyncMapIterator(this, transform).serial(); + } + /** + * Maps this stream through a 1-to-many transform. + * + * @param transform A function mapping a stream element to an array of + * transformed elements. + * + * @returns A `DataStream` of transformed elements. + */ + flatmap(transform) { + return new FlatmapIterator(this, transform); + } + /** + * Apply a function to every element of the stream. + * + * @param f A function to apply to each stream element. + */ + async forEachAsync(f) { + return this.map(f).resolveFully(); + } + /** + * Apply a function to every element of the stream, forcing serial execution. + * + * @param f A function to apply to each stream element. Should return 'true' + * to indicate that the stream should continue, or 'false' to cause it to + * terminate. + */ + async serialForEach(f) { + return this.serialMapAsync(f).resolveWhile(x => (x === true)); + } + /** + * Groups elements into batches, represented as arrays of elements. + * + * We can think of the elements of this iterator as 'rows' (even if they are + * nested structures). By the same token, consecutive values for a given + * key within the elements form a 'column'. This matches the usual sense of + * 'row' and 'column' when processing tabular data (e.g., parsing a CSV). + * + * Thus, "Row-major" means that the resulting batch is simply a collection of + * rows: `[row1, row2, row3, ...]`. This is contrast to the column-major + * form, which is needed for vectorized computation. + * + * @param batchSize The number of elements desired per batch. + * @param smallLastBatch Whether to emit the final batch when it has fewer + * than batchSize elements. Default true. + * @returns A `LazyIterator` of batches of elements, represented as arrays + * of the original element type. + */ + rowMajorBatch(batchSize, smallLastBatch = true) { + return new RowMajorBatchIterator(this, batchSize, smallLastBatch); + } + /** + * Groups elements into batches, represented in column-major form. + * + * We can think of the elements of this iterator as 'rows' (even if they are + * nested structures). By the same token, consecutive values for a given + * key within the elements form a 'column'. This matches the usual sense of + * 'row' and 'column' when processing tabular data (e.g., parsing a CSV). + * + * Thus, "column-major" means that the resulting batch is a (potentially + * nested) structure representing the columns. Each column entry, then, + * contains a collection of the values found in that column for a range of + * input elements. This representation allows for vectorized computation, in + * contrast to the row-major form. + * + * The inputs should all have the same nested structure (i.e., of arrays and + * dicts). The result is a single object with the same nested structure, + * where the leaves are arrays collecting the values of the inputs at that + * location (or, optionally, the result of a custom function applied to those + * arrays). + * + * @param batchSize The number of elements desired per batch. + * @param smallLastBatch Whether to emit the final batch when it has fewer + * than batchSize elements. Default true. + * @param zipFn: (optional) A function that expects an array of elements at a + * single node of the object tree, and returns a `DeepMapResult`. The + * `DeepMapResult` either provides a result value for that node (i.e., + * representing the subtree), or indicates that the node should be processed + * recursively. The default zipFn recurses as far as possible and places + * arrays at the leaves. + * @returns A `LazyIterator` of batches of elements, represented as an object + * with collections at the leaves. + */ + columnMajorBatch(batchSize, smallLastBatch = true, + // tslint:disable-next-line:no-any + zipFn = zipToList) { + // First collect the desired number of input elements as a row-major batch. + const rowBatches = this.rowMajorBatch(batchSize, smallLastBatch); + // Now 'rotate' or 'pivot' the data, collecting all values from each column + // in the batch (i.e., for each key within the elements) into an array. + return rowBatches.map(x => deepZip(x, zipFn)); + } + /** + * Concatenate this `LazyIterator` with another. + * + * @param iterator A `LazyIterator` to be concatenated onto this one. + * @param baseErrorHandler An optional function that can intercept `Error`s + * raised during a `next()` call on the base stream. This function can + * decide whether the error should be propagated, whether the error should + * be ignored, or whether the base stream should be terminated. + * @returns A `LazyIterator`. + */ + concatenate(iterator, baseErrorHandler) { + return new ChainedIterator(iteratorFromItems([this, iterator]), baseErrorHandler); + } + /** + * Limits this stream to return at most `count` items. + * + * @param count The maximum number of items to provide from the stream. If + * a negative or undefined value is given, the entire stream is returned + * unaltered. + */ + take(count) { + if (count < 0 || count == null) { + return this; + } + return new TakeIterator(this, count); + } + /** + * Skips the first `count` items in this stream. + * + * @param count The number of items to skip. If a negative or undefined + * value is given, the entire stream is returned unaltered. + */ + skip(count) { + if (count < 0 || count == null) { + return this; + } + return new SkipIterator(this, count); + } + /** + * Prefetch the first `bufferSize` items in this stream. + * + * Note this prefetches Promises, but makes no guarantees about when those + * Promises resolve. + * + * @param bufferSize: An integer specifying the number of elements to be + * prefetched. + */ + prefetch(bufferSize) { + return new PrefetchIterator(this, bufferSize); + } + // TODO(soergel): deep sharded shuffle, where supported + /** + * Randomly shuffles the elements of this stream. + * + * @param bufferSize: An integer specifying the number of elements from + * this stream from which the new stream will sample. + * @param seed: (Optional.) An integer specifying the random seed that + * will be used to create the distribution. + */ + shuffle(windowSize, seed) { + return new ShuffleIterator(this, windowSize, seed); + } + /** + * Force an iterator to execute serially: each next() call will await the + * prior one, so that they cannot execute concurrently. + */ + serial() { + return new SerialIterator(this); + } + } + // ============================================================================ + // The following private classes serve to implement the chainable methods + // on LazyIterator. Unfortunately they can't be placed in separate files, + // due to resulting trouble with circular imports. + // ============================================================================ + // Iterators that just extend LazyIterator directly + // ============================================================================ + class ArrayIterator extends LazyIterator { + constructor(items) { + super(); + this.items = items; + this.trav = 0; + } + summary() { + return `Array of ${this.items.length} items`; + } + async next() { + if (this.trav >= this.items.length) { + return { value: null, done: true }; + } + const item = this.items[this.trav]; + this.trav++; + return { value: deepClone(item), done: false }; + } + } + class FunctionCallIterator extends LazyIterator { + constructor(nextFn) { + super(); + this.nextFn = nextFn; + } + summary() { + return `Function call`; + } + async next() { + try { + return this.nextFn(); + } + catch (e) { + // Modify the error message but leave the stack trace intact + e.message = + `Error thrown while iterating through a dataset: ${e.message}`; + throw e; + } + } + } + class SerialIterator extends LazyIterator { + constructor(upstream) { + super(); + this.upstream = upstream; + this.lastRead = Promise.resolve({ value: null, done: false }); + } + summary() { + return `${this.upstream.summary()} -> Serial`; + } + async next() { + // This sets this.lastRead to a new Promise right away, as opposed to + // saying `await this.lastRead; this.lastRead = this.serialNext();` which + // would not work because this.nextRead would be updated only after the + // promise resolves. + this.lastRead = this.lastRead.then(() => this.serialNext()); + return this.lastRead; + } + async serialNext() { + return this.upstream.next(); + } + } + class SkipIterator extends LazyIterator { + constructor(upstream, maxCount) { + super(); + this.upstream = upstream; + this.maxCount = maxCount; + // Local state that should not be clobbered by out-of-order execution. + this.count = 0; + this.lastRead = Promise.resolve({ value: null, done: false }); + } + summary() { + return `${this.upstream.summary()} -> Skip`; + } + async next() { + // This sets this.lastRead to a new Promise right away, as opposed to + // saying `await this.lastRead; this.lastRead = this.serialNext();` which + // would not work because this.nextRead would be updated only after the + // promise resolves. + this.lastRead = this.lastRead.then(() => this.serialNext()); + return this.lastRead; + } + async serialNext() { + // TODO(soergel): consider tradeoffs of reading in parallel, eg. + // collecting next() promises in an Array and then waiting for + // Promise.all() of those. Benefit: pseudo-parallel execution. Drawback: + // maybe delayed GC. + while (this.count++ < this.maxCount) { + const skipped = await this.upstream.next(); + // short-circuit if upstream is already empty + if (skipped.done) { + return skipped; + } + dispose(skipped.value); + } + return this.upstream.next(); + } + } + class TakeIterator extends LazyIterator { + constructor(upstream, maxCount) { + super(); + this.upstream = upstream; + this.maxCount = maxCount; + this.count = 0; + } + summary() { + return `${this.upstream.summary()} -> Take`; + } + async next() { + if (this.count++ >= this.maxCount) { + return { value: null, done: true }; + } + return this.upstream.next(); + } + } + // Note this batch just groups items into row-wise element arrays. + // Rotating these to a column-wise representation happens only at the dataset + // level. + class RowMajorBatchIterator extends LazyIterator { + constructor(upstream, batchSize, enableSmallLastBatch = true) { + super(); + this.upstream = upstream; + this.batchSize = batchSize; + this.enableSmallLastBatch = enableSmallLastBatch; + this.lastRead = Promise.resolve({ value: null, done: false }); + } + summary() { + return `${this.upstream.summary()} -> RowMajorBatch`; + } + async next() { + // This sets this.lastRead to a new Promise right away, as opposed to + // saying `await this.lastRead; this.lastRead = this.serialNext();` which + // would not work because this.nextRead would be updated only after the + // promise resolves. + this.lastRead = this.lastRead.then(() => this.serialNext()); + return this.lastRead; + } + async serialNext() { + const batch = []; + while (batch.length < this.batchSize) { + const item = await this.upstream.next(); + if (item.done) { + if (this.enableSmallLastBatch && batch.length > 0) { + return { value: batch, done: false }; + } + return { value: null, done: true }; + } + batch.push(item.value); + } + return { value: batch, done: false }; + } + } + class FilterIterator extends LazyIterator { + constructor(upstream, predicate) { + super(); + this.upstream = upstream; + this.predicate = predicate; + this.lastRead = Promise.resolve({ value: null, done: false }); + } + summary() { + return `${this.upstream.summary()} -> Filter`; + } + async next() { + // This sets this.lastRead to a new Promise right away, as opposed to + // saying `await this.lastRead; this.lastRead = this.serialNext();` which + // would not work because this.nextRead would be updated only after the + // promise resolves. + this.lastRead = this.lastRead.then(() => this.serialNext()); + return this.lastRead; + } + async serialNext() { + while (true) { + const item = await this.upstream.next(); + if (item.done || this.predicate(item.value)) { + return item; + } + dispose(item.value); + } + } + } + class MapIterator extends LazyIterator { + constructor(upstream, transform) { + super(); + this.upstream = upstream; + this.transform = transform; + } + summary() { + return `${this.upstream.summary()} -> Map`; + } + async next() { + const item = await this.upstream.next(); + if (item.done) { + return { value: null, done: true }; + } + const inputTensors = getTensorsInContainer(item.value); + // Careful: the transform may mutate the item in place. + // That's why we have to remember the input Tensors above, and then + // below dispose only those that were not passed through to the output. + // Note too that the transform function is responsible for tidying + // any intermediate Tensors. Here we are concerned only about the + // inputs. + const mapped = this.transform(item.value); + const outputTensors = getTensorsInContainer(mapped); + // TODO(soergel) faster intersection + // TODO(soergel) move to tf.disposeExcept(in, out)? + for (const t of inputTensors) { + if (!isTensorInList(t, outputTensors)) { + t.dispose(); + } + } + return { value: mapped, done: false }; + } + } + class ErrorHandlingLazyIterator extends LazyIterator { + constructor(upstream, handler) { + super(); + this.upstream = upstream; + this.handler = handler; + this.count = 0; + this.lastRead = Promise.resolve({ value: null, done: false }); + } + summary() { + return `${this.upstream.summary()} -> handleErrors`; + } + async next() { + // This sets this.lastRead to a new Promise right away, as opposed to + // saying `await this.lastRead; this.lastRead = this.serialNext();` which + // would not work because this.nextRead would be updated only after the + // promise resolves. + this.lastRead = this.lastRead.then(() => this.serialNext()); + return this.lastRead; + } + async serialNext() { + while (true) { + try { + return await this.upstream.next(); + } + catch (e) { + if (!this.handler(e)) { + return { value: null, done: true }; + } + // If the handler returns true, loop and fetch the next upstream item. + // If the upstream iterator throws an endless stream of errors, and if + // the handler says to ignore them, then we loop forever here. That is + // the correct behavior-- it's up to the handler to decide when to stop. + } + } + } + } + class AsyncMapIterator extends LazyIterator { + constructor(upstream, transform) { + super(); + this.upstream = upstream; + this.transform = transform; + } + summary() { + return `${this.upstream.summary()} -> AsyncMap`; + } + async next() { + const item = await this.upstream.next(); + if (item.done) { + return { value: null, done: true }; + } + const inputTensors = getTensorsInContainer(item.value); + // Careful: the transform may mutate the item in place. + // That's why we have to remember the input Tensors above, and then + // below dispose only those that were not passed through to the output. + // Note too that the transform function is responsible for tidying + // any intermediate Tensors. Here we are concerned only about the + // inputs. + const mapped = await this.transform(item.value); + const outputTensors = getTensorsInContainer(mapped); + // TODO(soergel) faster intersection + // TODO(soergel) move to tf.disposeExcept(in, out)? + for (const t of inputTensors) { + if (!isTensorInList(t, outputTensors)) { + t.dispose(); + } + } + return { value: mapped, done: false }; + } + } + // Iterators that maintain a queue of pending items + // ============================================================================ + /** + * A base class for transforming streams that operate by maintaining an + * output queue of elements that are ready to return via next(). This is + * commonly required when the transformation is 1-to-many: A call to next() + * may trigger a call to the underlying stream, which will produce many + * mapped elements of this stream-- of which we need to return only one, so + * we have to queue the rest. + */ + class OneToManyIterator extends LazyIterator { + constructor() { + super(); + this.outputQueue = new GrowingRingBuffer(); + this.lastRead = Promise.resolve({ value: null, done: false }); + } + async next() { + // This sets this.lastRead to a new Promise right away, as opposed to + // saying `await this.lastRead; this.lastRead = this.serialNext();` which + // would not work because this.nextRead would be updated only after the + // promise resolves. + this.lastRead = this.lastRead.then(() => this.serialNext()); + return this.lastRead; + } + async serialNext() { + // Fetch so that the queue contains at least one item if possible. + // If the upstream source is exhausted, AND there are no items left in + // the output queue, then this stream is also exhausted. + while (this.outputQueue.length() === 0) { + // TODO(soergel): consider parallel reads. + if (!await this.pump()) { + return { value: null, done: true }; + } + } + return { value: this.outputQueue.shift(), done: false }; + } + } + class FlatmapIterator extends OneToManyIterator { + constructor(upstream, transform) { + super(); + this.upstream = upstream; + this.transform = transform; + } + summary() { + return `${this.upstream.summary()} -> Flatmap`; + } + async pump() { + const item = await this.upstream.next(); + if (item.done) { + return false; + } + const inputTensors = getTensorsInContainer(item.value); + // Careful: the transform may mutate the item in place. + // that's why we have to remember the input Tensors above, and then + // below dispose only those that were not passed through to the output. + // Note too that the transform function is responsible for tidying any + // intermediate Tensors. Here we are concerned only about the inputs. + const mappedArray = this.transform(item.value); + const outputTensors = getTensorsInContainer(mappedArray); + this.outputQueue.pushAll(mappedArray); + // TODO(soergel) faster intersection, and deduplicate outputTensors + // TODO(soergel) move to tf.disposeExcept(in, out)? + for (const t of inputTensors) { + if (!isTensorInList(t, outputTensors)) { + t.dispose(); + } + } + return true; + } + } + /** + * Provides a `LazyIterator` that concatenates a stream of underlying + * streams. + * + * Doing this in a concurrency-safe way requires some trickery. In + * particular, we want this stream to return the elements from the + * underlying streams in the correct order according to when next() was + * called, even if the resulting Promises resolve in a different order. + */ + class ChainedIterator extends LazyIterator { + constructor(iterators, baseErrorHandler) { + super(); + this.baseErrorHandler = baseErrorHandler; + // Strict Promise execution order: + // a next() call may not even begin until the previous one completes. + this.lastRead = null; + // Local state that should not be clobbered by out-of-order execution. + this.iterator = null; + this.moreIterators = iterators; + } + summary() { + const upstreamSummaries = 'TODO: fill in upstream of chained summaries'; + return `${upstreamSummaries} -> Chained`; + } + async next() { + this.lastRead = this.readFromChain(this.lastRead); + return this.lastRead; + } + async readFromChain(lastRead) { + // Must await on the previous read since the previous read may have advanced + // the stream of streams, from which we need to read. + // This is unfortunate since we can't parallelize reads. Which means + // prefetching of chained streams is a no-op. + // One solution is to prefetch immediately upstream of this. + await lastRead; + if (this.iterator == null) { + const iteratorResult = await this.moreIterators.next(); + if (iteratorResult.done) { + // No more streams to stream from. + return { value: null, done: true }; + } + this.iterator = iteratorResult.value; + if (this.baseErrorHandler != null) { + this.iterator = this.iterator.handleErrors(this.baseErrorHandler); + } + } + const itemResult = await this.iterator.next(); + if (itemResult.done) { + this.iterator = null; + return this.readFromChain(lastRead); + } + return itemResult; + } + } + var ZipMismatchMode; + (function (ZipMismatchMode) { + ZipMismatchMode[ZipMismatchMode["FAIL"] = 0] = "FAIL"; + ZipMismatchMode[ZipMismatchMode["SHORTEST"] = 1] = "SHORTEST"; + ZipMismatchMode[ZipMismatchMode["LONGEST"] = 2] = "LONGEST"; // use nulls for exhausted streams; use up the longest stream. + })(ZipMismatchMode || (ZipMismatchMode = {})); + /** + * Provides a `LazyIterator` that zips together an array, dict, or nested + * structure of `LazyIterator`s (and perhaps additional constants). + * + * The underlying streams must provide elements in a consistent order such + * that they correspond. + * + * Typically, the underlying streams should have the same number of + * elements. If they do not, the behavior is determined by the + * `mismatchMode` argument. + * + * The nested structure of the `iterators` argument determines the + * structure of elements in the resulting iterator. + * + * Doing this in a concurrency-safe way requires some trickery. In + * particular, we want this stream to return the elements from the + * underlying streams in the correct order according to when next() was + * called, even if the resulting Promises resolve in a different order. + * + * @param iterators: An array or object containing LazyIterators at the + * leaves. + * @param mismatchMode: Determines what to do when one underlying iterator + * is exhausted before the others. `ZipMismatchMode.FAIL` (the default) + * causes an error to be thrown in this case. `ZipMismatchMode.SHORTEST` + * causes the zipped iterator to terminate with the furst underlying + * streams, so elements remaining on the longer streams are ignored. + * `ZipMismatchMode.LONGEST` causes the zipped stream to continue, filling + * in nulls for the exhausted streams, until all streams are exhausted. + */ + class ZipIterator extends LazyIterator { + constructor(iterators, mismatchMode = ZipMismatchMode.FAIL) { + super(); + this.iterators = iterators; + this.mismatchMode = mismatchMode; + this.count = 0; + this.currentPromise = null; + } + summary() { + const upstreamSummaries = 'TODO: fill in upstream of zip summaries'; + return `{${upstreamSummaries}} -> Zip`; + } + async nextState(afterState) { + // This chaining ensures that the underlying next() are not even called + // before the previous ones have resolved. + await afterState; + // Collect underlying iterator "done" signals as a side effect in + // getNext() + let numIterators = 0; + let iteratorsDone = 0; + function getNext(container) { + if (container instanceof LazyIterator) { + const result = container.next(); + return { + value: result.then(x => { + numIterators++; + if (x.done) { + iteratorsDone++; + } + return x.value; + }), + recurse: false + }; + } + else { + return { value: null, recurse: true }; + } + } + const mapped = await deepMapAndAwaitAll(this.iterators, getNext); + if (numIterators === iteratorsDone) { + // The streams have all ended. + return { value: null, done: true }; + } + if (iteratorsDone > 0) { + switch (this.mismatchMode) { + case ZipMismatchMode.FAIL: + throw new Error('Zipped streams should have the same length. ' + + `Mismatched at element ${this.count}.`); + case ZipMismatchMode.SHORTEST: + return { value: null, done: true }; + case ZipMismatchMode.LONGEST: + default: + // Continue. The exhausted streams already produced value: null. + } + } + this.count++; + return { value: mapped, done: false }; + } + async next() { + this.currentPromise = this.nextState(this.currentPromise); + return this.currentPromise; + } + } + // Iterators that maintain a ring buffer of pending promises + // ============================================================================ + /** + * A stream that prefetches a given number of items from an upstream source, + * returning them in FIFO order. + * + * Note this prefetches Promises, but makes no guarantees about when those + * Promises resolve. + */ + class PrefetchIterator extends LazyIterator { + constructor(upstream, bufferSize) { + super(); + this.upstream = upstream; + this.bufferSize = bufferSize; + this.buffer = new RingBuffer(bufferSize); + } + summary() { + return `${this.upstream.summary()} -> Prefetch`; + } + /** + * Refill the prefetch buffer. Returns only after the buffer is full, or + * the upstream source is exhausted. + */ + refill() { + while (!this.buffer.isFull()) { + const v = this.upstream.next(); + this.buffer.push(v); + } + } + next() { + this.refill(); + // This shift will never throw an error because the buffer is always + // full after a refill. If the stream is exhausted, the buffer will be + // full of Promises that will resolve to the end-of-stream signal. + return this.buffer.shift(); + } + } + /** + * A stream that performs a sliding-window random shuffle on an upstream + * source. This is like a `PrefetchIterator` except that the items are + * returned in randomized order. Mixing naturally improves as the buffer + * size increases. + */ + class ShuffleIterator extends PrefetchIterator { + constructor(upstream, windowSize, seed) { + super(upstream, windowSize); + this.upstream = upstream; + this.windowSize = windowSize; + // Local state that should not be clobbered by out-of-order execution. + this.upstreamExhausted = false; + this.random = seedrandom.alea(seed || now().toString()); + this.lastRead = Promise.resolve({ value: null, done: false }); + } + async next() { + // This sets this.lastRead to a new Promise right away, as opposed to + // saying `await this.lastRead; this.lastRead = this.serialNext();` which + // would not work because this.nextRead would be updated only after the + // promise resolves. + this.lastRead = this.lastRead.then(() => this.serialNext()); + return this.lastRead; + } + randomInt(max) { + return Math.floor(this.random() * max); + } + chooseIndex() { + return this.randomInt(this.buffer.length()); + } + async serialNext() { + // TODO(soergel): consider performance + if (!this.upstreamExhausted) { + this.refill(); + } + while (!this.buffer.isEmpty()) { + const chosenIndex = this.chooseIndex(); + const result = await this.buffer.shuffleExcise(chosenIndex); + if (result.done) { + this.upstreamExhausted = true; + } + else { + this.refill(); + return result; + } + } + return { value: null, done: true }; + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * ============================================================================= + */ + // TODO(soergel): consider vectorized operations within the pipeline. + /** + * Represents a potentially large list of independent data elements (typically + * 'samples' or 'examples'). + * + * A 'data example' may be a primitive, an array, a map from string keys to + * values, or any nested structure of these. + * + * A `Dataset` represents an ordered collection of elements, together with a + * chain of transformations to be performed on those elements. Each + * transformation is a method of `Dataset` that returns another `Dataset`, so + * these may be chained, e.g. + * `const processedDataset = rawDataset.filter(...).map(...).batch(...)`. + * + * Data loading and transformation is done in a lazy, streaming fashion. The + * dataset may be iterated over multiple times; each iteration starts the data + * loading anew and recapitulates the transformations. + * + * A `Dataset` is typically processed as a stream of unbatched examples -- i.e., + * its transformations are applied one example at a time. Batching produces a + * new `Dataset` where each element is a batch. Batching should usually come + * last in a pipeline, because data transformations are easier to express on a + * per-example basis than on a per-batch basis. + * + * The following code examples are calling `await dataset.forEachAsync(...)` to + * iterate once over the entire dataset in order to print out the data. + * + * @doc {heading: 'Data', subheading: 'Classes', namespace: 'data'} + */ + class Dataset { + constructor() { + this.size = null; + } + // TODO(soergel): Make Datasets report whether repeated iterator() calls + // produce the same result (e.g., reading from a file) or different results + // (e.g., from the webcam). Currently we don't make this distinction but it + // could be important for the user to know. + // abstract isDeterministic(): boolean; + /** + * Groups elements into batches. + * + * It is assumed that each of the incoming dataset elements has the same + * structure -- i.e. the same set of keys at each location in an object + * hierarchy. For each key, the resulting `Dataset` provides a batched + * element collecting all of the incoming values for that key. + * + * * Incoming primitives are grouped into a 1-D Tensor. + * * Incoming Tensors are grouped into a new Tensor where the 0th axis is + * the batch dimension. + * * Incoming arrays are converted to Tensor and then batched. + * * A nested array is interpreted as an n-D Tensor, so the batched result + * has n+1 dimensions. + * * An array that cannot be converted to Tensor produces an error. + * + * If an array should not be batched as a unit, it should first be converted + * to an object with integer keys. + * + * Here are a few examples: + * + * Batch a dataset of numbers: + * ```js + * const a = tf.data.array([1, 2, 3, 4, 5, 6, 7, 8]).batch(4); + * await a.forEachAsync(e => e.print()); + * ``` + * + * Batch a dataset of arrays: + * ```js + * const b = tf.data.array([[1], [2], [3], [4], [5], [6], [7], [8]]).batch(4); + * await b.forEachAsync(e => e.print()); + * ``` + * + * Batch a dataset of objects: + * ```js + * const c = tf.data.array([{a: 1, b: 11}, {a: 2, b: 12}, {a: 3, b: 13}, + * {a: 4, b: 14}, {a: 5, b: 15}, {a: 6, b: 16}, {a: 7, b: 17}, + * {a: 8, b: 18}]).batch(4); + * await c.forEachAsync(e => { + * console.log('{'); + * for(var key in e) { + * console.log(key+':'); + * e[key].print(); + * } + * console.log('}'); + * }) + * ``` + * + * @param batchSize The number of elements desired per batch. + * @param smallLastBatch Whether to emit the final batch when it has fewer + * than batchSize elements. Default true. + * @returns A `Dataset`, from which a stream of batches can be obtained. + * + * @doc {heading: 'Data', subheading: 'Classes'} + */ + batch(batchSize, smallLastBatch = true) { + const base = this; + assert$1(batchSize > 0, () => `batchSize needs to be positive, but it is + ${batchSize}`); + let size; + if (this.size === Infinity || this.size == null) { + // If the size of this dataset is infinity or null, the new size keeps the + // same. + size = this.size; + } + else if (smallLastBatch) { + // If the size of this dataset is known and include small last batch, the + // new size is full batch count plus last batch. + size = Math.ceil(this.size / batchSize); + } + else { + // If the size of this dataset is known and not include small last batch, + // the new size is full batch count. + size = Math.floor(this.size / batchSize); + } + return datasetFromIteratorFn(async () => { + return (await base.iterator()) + .columnMajorBatch(batchSize, smallLastBatch, deepBatchConcat); + }, size); + } + /** + * Concatenates this `Dataset` with another. + * + * ```js + * const a = tf.data.array([1, 2, 3]); + * const b = tf.data.array([4, 5, 6]); + * const c = a.concatenate(b); + * await c.forEachAsync(e => console.log(e)); + * ``` + * + * @param dataset A `Dataset` to be concatenated onto this one. + * @returns A `Dataset`. + * + * @doc {heading: 'Data', subheading: 'Classes'} + */ + concatenate(dataset) { + const base = this; + let size; + if (this.size === Infinity || dataset.size === Infinity) { + // If the size of any of these two dataset is infinity, new size is + // infinity. + size = Infinity; + } + else if (this.size != null && dataset.size != null) { + // If the size of both datasets are known and not infinity, new size is + // sum the size of these two datasets. + size = this.size + dataset.size; + } + else { + // If neither of these two datasets has infinite size and any of these two + // datasets' size is null, the new size is null. + size = null; + } + return datasetFromIteratorFn(async () => (await base.iterator()).concatenate(await dataset.iterator()), size); + } + /** + * Filters this dataset according to `predicate`. + * + * ```js + * const a = tf.data.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) + * .filter(x => x%2 === 0); + * await a.forEachAsync(e => console.log(e)); + * ``` + * + * @param predicate A function mapping a dataset element to a boolean or a + * `Promise` for one. + * + * @returns A `Dataset` of elements for which the predicate was true. + * + * @doc {heading: 'Data', subheading: 'Classes'} + */ + filter(predicate) { + const base = this; + let size; + if (this.size === Infinity) { + // If the size of this dataset is infinity, new size is infinity + size = Infinity; + } + else { + // If this dataset has limited elements, new size is null because it might + // exhausted randomly. + size = null; + } + return datasetFromIteratorFn(async () => { + return (await base.iterator()).filter(x => tidy(() => predicate(x))); + }, size); + } + /** + * Apply a function to every element of the dataset. + * + * After the function is applied to a dataset element, any Tensors contained + * within that element are disposed. + * + * ```js + * const a = tf.data.array([1, 2, 3]); + * await a.forEachAsync(e => console.log(e)); + * ``` + * + * @param f A function to apply to each dataset element. + * @returns A `Promise` that resolves after all elements have been processed. + * + * @doc {heading: 'Data', subheading: 'Classes'} + */ + async forEachAsync(f) { + return (await this.iterator()).forEachAsync(f); + } + /** + * Maps this dataset through a 1-to-1 transform. + * + * ```js + * const a = tf.data.array([1, 2, 3]).map(x => x*x); + * await a.forEachAsync(e => console.log(e)); + * ``` + * + * @param transform A function mapping a dataset element to a transformed + * dataset element. + * + * @returns A `Dataset` of transformed elements. + * + * @doc {heading: 'Data', subheading: 'Classes'} + */ + map(transform) { + const base = this; + return datasetFromIteratorFn(async () => { + return (await base.iterator()).map(x => tidy(() => transform(x))); + }, this.size); + } + /** + * Maps this dataset through an async 1-to-1 transform. + * + * ```js + * const a = + * tf.data.array([1, 2, 3]).mapAsync(x => new Promise(function(resolve){ + * setTimeout(() => { + * resolve(x * x); + * }, Math.random()*1000 + 500); + * })); + * console.log(await a.toArray()); + * ``` + * + * @param transform A function mapping a dataset element to a `Promise` for a + * transformed dataset element. This transform is responsible for disposing + * any intermediate `Tensor`s, i.e. by wrapping its computation in + * `tf.tidy()`; that cannot be automated here (as it is in the synchronous + * `map()` case). + * + * @returns A `Dataset` of transformed elements. + * + * @doc {heading: 'Data', subheading: 'Classes'} + */ + mapAsync(transform) { + const base = this; + return datasetFromIteratorFn(async () => { + return (await base.iterator()).mapAsync(transform); + }, this.size); + } + /** + * Creates a `Dataset` that prefetches elements from this dataset. + * + * @param bufferSize: An integer specifying the number of elements to be + * prefetched. + * @returns A `Dataset`. + * + * @doc {heading: 'Data', subheading: 'Classes'} + */ + prefetch(bufferSize) { + if (bufferSize == null) { + throw new RangeError('`Dataset.prefetch()` requires bufferSize to be specified.'); + } + const base = this; + return datasetFromIteratorFn(async () => (await base.iterator()).prefetch(bufferSize), this.size); + } + /** + * Repeats this dataset `count` times. + * + * NOTE: If this dataset is a function of global state (e.g. a random number + * generator), then different repetitions may produce different elements. + * + * ```js + * const a = tf.data.array([1, 2, 3]).repeat(3); + * await a.forEachAsync(e => console.log(e)); + * ``` + * + * @param count: (Optional) An integer, representing the number of times + * the dataset should be repeated. The default behavior (if `count` is + * `undefined` or negative) is for the dataset be repeated indefinitely. + * @returns A `Dataset`. + * + * @doc {heading: 'Data', subheading: 'Classes'} + */ + repeat(count) { + const base = this; + let size; + if (this.size != null && count > 0) { + // If this dataset has size and count is positive, new size is current + // size multiply count. This also covers the case that current size is + // infinity. + size = this.size * count; + } + else if (count === 0) { + // If count is 0, new size is 0. + size = 0; + } + else if (this.size != null && (count === undefined || count < 0)) { + // If this dataset has size and count is undefined or negative, the + // dataset will be repeated indefinitely and new size is infinity. + size = Infinity; + } + else { + // If the size of this dataset is null, the new dataset's size is null. + size = null; + } + return datasetFromIteratorFn(async () => { + const iteratorIterator = iteratorFromFunction(async () => ({ value: await base.iterator(), done: false })); + return iteratorFromConcatenated(iteratorIterator.take(count)); + }, size); + } + /** + * Creates a `Dataset` that skips `count` initial elements from this dataset. + * + * ```js + * const a = tf.data.array([1, 2, 3, 4, 5, 6]).skip(3); + * await a.forEachAsync(e => console.log(e)); + * ``` + * + * @param count: The number of elements of this dataset that should be skipped + * to form the new dataset. If `count` is greater than the size of this + * dataset, the new dataset will contain no elements. If `count` + * is `undefined` or negative, skips the entire dataset. + * + * @returns A `Dataset`. + * + * @doc {heading: 'Data', subheading: 'Classes'} + */ + skip(count) { + const base = this; + let size; + if (this.size != null && count >= 0 && this.size >= count) { + // If the size of this dataset is greater than count, the new dataset's + // size is current size minus skipped size.This also covers the case that + // current size is infinity. + size = this.size - count; + } + else if (this.size != null && + (this.size < count || count === undefined || count < 0)) { + // If the size of this dataset is smaller than count, or count is + // undefined or negative, skips the entire dataset and the new size is 0. + size = 0; + } + else { + // If the size of this dataset is null, the new dataset's size is null. + size = null; + } + return datasetFromIteratorFn(async () => (await base.iterator()).skip(count), size); + } + /** + * Pseudorandomly shuffles the elements of this dataset. This is done in a + * streaming manner, by sampling from a given number of prefetched elements. + * + * ```js + * const a = tf.data.array([1, 2, 3, 4, 5, 6]).shuffle(3); + * await a.forEachAsync(e => console.log(e)); + * ``` + * + * @param bufferSize: An integer specifying the number of elements from this + * dataset from which the new dataset will sample. + * @param seed: (Optional) An integer specifying the random seed that will + * be used to create the distribution. + * @param reshuffleEachIteration: (Optional) A boolean, which if true + * indicates that the dataset should be pseudorandomly reshuffled each time + * it is iterated over. If false, elements will be returned in the same + * shuffled order on each iteration. (Defaults to `true`.) + * @returns A `Dataset`. + * + * @doc {heading: 'Data', subheading: 'Classes'} + */ + shuffle(bufferSize, seed, reshuffleEachIteration = true) { + if (bufferSize == null || bufferSize < 0) { + if (this.size == null) { + throw new RangeError('`Dataset.shuffle()` requires bufferSize to be specified.'); + } + else { + throw new RangeError('`Dataset.shuffle()` requires bufferSize to be specified. ' + + 'If your data fits in main memory (for regular JS objects), ' + + 'and/or GPU memory (for `tf.Tensor`s), consider setting ' + + `bufferSize to the dataset size (${this.size} elements)`); + } + } + const base = this; + const random = seedrandom.alea(seed || now().toString()); + return datasetFromIteratorFn(async () => { + let seed2 = random.int32(); + if (reshuffleEachIteration) { + seed2 += random.int32(); + } + return (await base.iterator()).shuffle(bufferSize, seed2.toString()); + }, this.size); + } + /** + * Creates a `Dataset` with at most `count` initial elements from this + * dataset. + * + * ```js + * const a = tf.data.array([1, 2, 3, 4, 5, 6]).take(3); + * await a.forEachAsync(e => console.log(e)); + * ``` + * + * @param count: The number of elements of this dataset that should be taken + * to form the new dataset. If `count` is `undefined` or negative, or if + * `count` is greater than the size of this dataset, the new dataset will + * contain all elements of this dataset. + * @returns A `Dataset`. + * + * @doc {heading: 'Data', subheading: 'Classes'} + */ + take(count) { + const base = this; + let size; + if (this.size != null && this.size > count) { + // If the size of this dataset is greater than count, the new dataset's + // size is count. + size = count; + } + else if (this.size != null && this.size <= count) { + // If the size of this dataset is equal or smaller than count, the new + // dataset's size is the size of this dataset. + size = this.size; + } + else { + // If the size of this dataset is null, the new dataset's size is null. + size = null; + } + return datasetFromIteratorFn(async () => (await base.iterator()).take(count), size); + } + /** + * Collect all elements of this dataset into an array. + * + * Obviously this will succeed only for small datasets that fit in memory. + * Useful for testing and generally should be avoided if possible. + * + * ```js + * const a = tf.data.array([1, 2, 3, 4, 5, 6]); + * console.log(await a.toArray()); + * ``` + * + * @returns A Promise for an array of elements, which will resolve + * when a new stream has been obtained and fully consumed. + * + * @doc {heading: 'Data', subheading: 'Classes'} + */ + async toArray() { + if (this.size === Infinity) { + throw new Error('Can not convert infinite data stream to array.'); + } + return (await this.iterator()).toArray(); + } + /** + * Collect all elements of this dataset into an array with prefetching 100 + * elements. This is useful for testing, because the prefetch changes the + * order in which the Promises are resolved along the processing pipeline. + * This may help expose bugs where results are dependent on the order of + * Promise resolution rather than on the logical order of the stream (i.e., + * due to hidden mutable state). + * + * @returns A Promise for an array of elements, which will resolve + * when a new stream has been obtained and fully consumed. + */ + async toArrayForTest() { + if (this.size === Infinity) { + throw new Error('Can not convert infinite data stream to array.'); + } + return (await this.iterator()).toArrayForTest(); + } + } + // TODO(soergel): deep sharded shuffle, where supported + Dataset.MAX_BUFFER_SIZE = 10000; + /** + * Create a `Dataset` defined by a provided iterator() function. + * + * ```js + * let i = -1; + * const func = () => + * ++i < 5 ? {value: i, done: false} : {value: null, done: true}; + * const iter = tf.data.iteratorFromFunction(func); + * const ds = tf.data.datasetFromIteratorFn(iter); + * await ds.forEachAsync(e => console.log(e)); + * ``` + */ + function datasetFromIteratorFn(iteratorFn, size = null) { + return new class extends Dataset { + constructor() { + super(...arguments); + this.size = size; + } + /* + * Provide a new stream of elements. Note this will also start new streams + * from any underlying `Dataset`s. + */ + async iterator() { + return iteratorFn(); + } + }(); + } + /** + * Create a `Dataset` from an array of elements. + * + * Create a Dataset from an array of objects: + * ```js + * const a = tf.data.array([{'item': 1}, {'item': 2}, {'item': 3}]); + * await a.forEachAsync(e => console.log(e)); + * ``` + * + * Create a Dataset from an array of numbers: + * ```js + * const a = tf.data.array([4, 5, 6]); + * await a.forEachAsync(e => console.log(e)); + * ``` + * @param items An array of elements that will be parsed as items in a dataset. + * + * @doc {heading: 'Data', subheading: 'Creation', namespace: 'data'} + */ + function array(items) { + return datasetFromIteratorFn(async () => iteratorFromItems(items), items.length); + } + /** + * Create a `Dataset` by zipping together an array, dict, or nested + * structure of `Dataset`s (and perhaps additional constants). + * The underlying datasets must provide elements in a consistent order such that + * they correspond. + * + * The number of elements in the resulting dataset is the same as the size of + * the smallest dataset in datasets. + * + * The nested structure of the `datasets` argument determines the + * structure of elements in the resulting iterator. + * + * Note this means that, given an array of two datasets that produce dict + * elements, the result is a dataset that produces elements that are arrays + * of two dicts: + * + * Zip an array of datasets: + * ```js + * console.log('Zip two datasets of objects:'); + * const ds1 = tf.data.array([{a: 1}, {a: 2}, {a: 3}]); + * const ds2 = tf.data.array([{b: 4}, {b: 5}, {b: 6}]); + * const ds3 = tf.data.zip([ds1, ds2]); + * await ds3.forEachAsync(e => console.log(JSON.stringify(e))); + * + * // If the goal is to merge the dicts in order to produce elements like + * // {a: ..., b: ...}, this requires a second step such as: + * console.log('Merge the objects:'); + * const ds4 = ds3.map(x => {return {a: x[0].a, b: x[1].b}}); + * await ds4.forEachAsync(e => console.log(e)); + * ``` + * + * Zip a dict of datasets: + * ```js + * const a = tf.data.array([{a: 1}, {a: 2}, {a: 3}]); + * const b = tf.data.array([{b: 4}, {b: 5}, {b: 6}]); + * const c = tf.data.zip({c: a, d: b}); + * await c.forEachAsync(e => console.log(JSON.stringify(e))); + * ``` + * + * @doc {heading: 'Data', subheading: 'Operations', namespace: 'data'} + */ + function zip(datasets) { + // manually type-check the argument for JS users + if (!isIterable(datasets)) { + throw new Error('The argument to zip() must be an object or array.'); + } + let size; + if (Array.isArray(datasets)) { + for (let i = 0; i < datasets.length; i++) { + size = size == null ? datasets[i].size : + Math.min(size, datasets[i].size); + } + } + else if (datasets instanceof Object) { + for (const ds in datasets) { + size = size == null ? datasets[ds].size : + Math.min(size, datasets[ds].size); + } + } + return datasetFromIteratorFn(async () => { + const streams = await deepMapAndAwaitAll(datasets, d => { + if (d instanceof Dataset) { + return { value: d.iterator(), recurse: false }; + } + else if (isIterable(d)) { + return { value: null, recurse: true }; + } + else { + throw new Error('Leaves of the structure passed to zip() must be Datasets, ' + + 'not primitives.'); + } + }); + return iteratorFromZipped(streams, ZipMismatchMode.SHORTEST); + }, size); + } + /** + * A zip function for use with deepZip, passed via the columnMajorBatch call. + * + * Accepts an array of identically-structured nested elements and either batches + * them (if they are primitives, numeric arrays, or Tensors) or requests + * recursion (if not). + */ + // tslint:disable-next-line:no-any + function deepBatchConcat(rows) { + if (rows === null) { + return null; + } + // use the first item to decide whether to recurse or batch here. + const exampleRow = rows[0]; + if (canTensorify(exampleRow)) { + // rows is an array of primitives, Tensors, or arrays. Batch them. + const value = batchConcat(rows); + return { value, recurse: false }; + } + // the example row is an object, so recurse into it. + return { value: null, recurse: true }; + } + /** + * Assembles a list of same-shaped numbers, number arrays, or Tensors + * into a single new Tensor where axis 0 is the batch dimension. + */ + function batchConcat(arrays) { + if (arrays.length === 0) { + // We can't return an empty Tensor because we don't know the element shape. + throw new Error('Can\'t make a batch of zero elements.'); + } + if (arrays[0] instanceof Tensor) { + // Input is an array of Tensors + return stack(arrays); + } + else { + // Input is a possibly-nested array of numbers. + return tensor(arrays); + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * ============================================================================= + */ + /** + * Represents a potentially large collection of text lines. + * + * The results are not batched. + */ + class TextLineDataset extends Dataset { + /** + * Create a `TextLineDataset`. + * + * @param input A `DataSource` providing a chunked, UTF8-encoded byte stream. + */ + constructor(input) { + super(); + this.input = input; + } + async iterator() { + const inputIterator = await this.input.iterator(); + const utf8Iterator = inputIterator.decodeUTF8(); + const lineIterator = utf8Iterator.split('\n').map(line => { + // Windows/DOS format text file has extra line breaker at the end of line. + if (line.endsWith('\r')) { + line = line.slice(0, -1); + } + return line; + }); + return lineIterator; + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * ============================================================================= + */ + const CODE_QUOTE = '"'; + const STATE_OUT = Symbol('out'); + const STATE_FIELD = Symbol('field'); + const STATE_QUOTE = Symbol('quote'); + const STATE_QUOTE_AFTER_QUOTE = Symbol('quoteafterquote'); + const STATE_WITHIN_QUOTE_IN_QUOTE = Symbol('quoteinquote'); + /** + * Represents a potentially large collection of delimited text records. + * + * The produced `TensorContainer`s each contain one key-value pair for + * every column of the table. When a field is empty in the incoming data, the + * resulting value is `undefined`, or throw error if it is required. Values + * that can be parsed as numbers are emitted as type `number`, other values + * are parsed as `string`. + * + * The results are not batched. + * + * @doc {heading: 'Data', subheading: 'Classes', namespace: 'data'} + */ + class CSVDataset extends Dataset { + /** + * Returns column names of the csv dataset. If `configuredColumnsOnly` is + * true, return column names in `columnConfigs`. If `configuredColumnsOnly` is + * false and `columnNames` is provided, `columnNames`. If + * `configuredColumnsOnly` is false and `columnNames` is not provided, return + * all column names parsed from the csv file. For example usage please go to + * `tf.data.csv`. + * + * @doc {heading: 'Data', subheading: 'Classes'} + */ + async columnNames() { + if (!this.columnNamesValidated) { + await this.setColumnNames(); + } + return this.configuredColumnsOnly ? Object.keys(this.columnConfigs) : + this.fullColumnNames; + } + /* 1) If `columnNames` is provided as string[], use this string[] as output + * keys in corresponding order. The length must match the number of inferred + * columns if `hasHeader` is true . + * 2) If `columnNames` is not provided, parse header line as `columnNames` if + * hasHeader is true. If `hasHeader` is false, throw an error. + * 3) If `columnConfigs` is provided, all the keys in `columnConfigs` must + * exist in parsed `columnNames`. + */ + async setColumnNames() { + const columnNamesFromFile = await this.maybeReadHeaderLine(); + if (!this.fullColumnNames && !columnNamesFromFile) { + // Throw an error if columnNames is not provided and no header line. + throw new Error('Column names must be provided if there is no header line.'); + } + else if (this.fullColumnNames && columnNamesFromFile) { + // Check provided columnNames match header line. + assert$1(columnNamesFromFile.length === this.fullColumnNames.length, () => 'The length of provided columnNames (' + + this.fullColumnNames.length.toString() + + ') does not match the length of the header line read from ' + + 'file (' + columnNamesFromFile.length.toString() + ').'); + } + if (!this.fullColumnNames) { + this.fullColumnNames = columnNamesFromFile; + } + // Check if there are duplicate column names. + const counts = this.fullColumnNames.reduce((countAcc, name) => { + countAcc[name] = (countAcc[name] + 1) || 1; + return countAcc; + }, {}); + const duplicateNames = Object.keys(counts).filter((name) => (counts[name] > 1)); + assert$1(duplicateNames.length === 0, () => 'Duplicate column names found: ' + duplicateNames.toString()); + // Check if keys in columnConfigs match columnNames. + if (this.columnConfigs) { + for (const key of Object.keys(this.columnConfigs)) { + const index = this.fullColumnNames.indexOf(key); + if (index === -1) { + throw new Error('The key "' + key + + '" provided in columnConfigs does not match any of the column ' + + 'names (' + this.fullColumnNames.toString() + ').'); + } + } + } + this.columnNamesValidated = true; + } + async maybeReadHeaderLine() { + if (this.hasHeader) { + const iter = await this.base.iterator(); + const firstElement = await iter.next(); + if (firstElement.done) { + throw new Error('No data was found for CSV parsing.'); + } + const firstLine = firstElement.value; + const headers = this.parseRow(firstLine, false); + return headers; + } + else { + return null; + } + } + /** + * Create a `CSVDataset`. + * + * @param input A `DataSource` providing a chunked, UTF8-encoded byte stream. + * @param csvConfig (Optional) A CSVConfig object that contains configurations + * of reading and decoding from CSV file(s). + * + * hasHeader: (Optional) A boolean value that indicates whether the first + * row of provided CSV file is a header line with column names, and should + * not be included in the data. Defaults to `true`. + * + * columnNames: (Optional) A list of strings that corresponds to + * the CSV column names, in order. If provided, it ignores the column + * names inferred from the header row. If not provided, infers the column + * names from the first row of the records. If hasHeader is false and + * columnNames is not provided, this method throws an error. + * + * columnConfigs: (Optional) A dictionary whose key is column names, value + * is an object stating if this column is required, column's data type, + * default value, and if this column is label. If provided, keys must + * correspond to names provided in columnNames or inferred from the file + * header lines. If isLabel is true any column, returns an array of two + * items: the first item is a dict of features key/value pairs, the second + * item is a dict of labels key/value pairs. If no feature is marked as + * label, returns a dict of features only. + * + * configuredColumnsOnly (Optional) If true, only columns provided in + * columnConfigs will be parsed and provided during iteration. + * + * delimiter (Optional) The string used to parse each line of the input + * file. Defaults to `,`. + */ + constructor(input, csvConfig) { + super(); + this.input = input; + this.hasHeader = true; + this.fullColumnNames = null; + this.columnNamesValidated = false; + this.columnConfigs = null; + this.configuredColumnsOnly = false; + this.delimiter = ','; + this.delimWhitespace = false; + this.base = new TextLineDataset(input); + if (!csvConfig) { + csvConfig = {}; + } + this.hasHeader = csvConfig.hasHeader === false ? false : true; + this.fullColumnNames = csvConfig.columnNames; + this.columnConfigs = csvConfig.columnConfigs; + this.configuredColumnsOnly = csvConfig.configuredColumnsOnly; + if (csvConfig.delimWhitespace) { + assert$1(csvConfig.delimiter == null, () => 'Delimiter should not be provided when delimWhitespace is true.'); + this.delimWhitespace = true; + this.delimiter = ' '; + } + else { + this.delimiter = csvConfig.delimiter ? csvConfig.delimiter : ','; + } + } + async iterator() { + if (!this.columnNamesValidated) { + await this.setColumnNames(); + } + let lines = await this.base.iterator(); + if (this.hasHeader) { + // We previously read the first line to get the columnNames. + // Now that we're providing data, skip it. + lines = lines.skip(1); + } + return lines.map(x => this.makeDataElement(x)); + } + makeDataElement(line) { + const values = this.parseRow(line); + const features = {}; + const labels = {}; + for (let i = 0; i < this.fullColumnNames.length; i++) { + const key = this.fullColumnNames[i]; + const config = this.columnConfigs ? this.columnConfigs[key] : null; + if (this.configuredColumnsOnly && !config) { + // This column is not selected. + continue; + } + else { + const value = values[i]; + let parsedValue = null; + if (value === '') { + // If default value is provided, use it. If default value is not + // provided, set as undefined. + if (config && config.default !== undefined) { + parsedValue = config.default; + } + else if (config && (config.required || config.isLabel)) { + throw new Error(`Required column ${key} is empty in this line: ${line}`); + } + else { + parsedValue = undefined; + } + } + else { + // A value is present, so parse it based on type + const valueAsNum = Number(value); + if (isNaN(valueAsNum)) { + // The value is a string and this column is declared as boolean + // in config, parse it as boolean. + if (config && config.dtype === 'bool') { + parsedValue = this.getBoolean(value); + } + else { + // Set value as string + parsedValue = value; + } + } + else if (!config || !config.dtype) { + // If this value is a number and no type config is provided, return + // it as number. + parsedValue = valueAsNum; + } + else { + // If this value is a number and data type is provided, parse it + // according to provided data type. + switch (config.dtype) { + case 'float32': + parsedValue = valueAsNum; + break; + case 'int32': + parsedValue = Math.floor(valueAsNum); + break; + case 'bool': + parsedValue = this.getBoolean(value); + break; + default: + parsedValue = valueAsNum; + } + } + } + // Check if this column is label. + (config && config.isLabel) ? labels[key] = parsedValue : + features[key] = parsedValue; + } + } + // If label exists, return an object of features and labels as {xs:features, + // ys:labels}, otherwise return features only. + if (Object.keys(labels).length === 0) { + return features; + } + else { + return { xs: features, ys: labels }; + } + } + getBoolean(value) { + if (value === '1' || value.toLowerCase() === 'true') { + return 1; + } + else { + return 0; + } + } + // adapted from https://beta.observablehq.com/@mbostock/streaming-csv + parseRow(line, validateElementCount = true) { + const result = []; + let readOffset = 0; + const readLength = line.length; + let currentState = STATE_OUT; + // Goes through the line to parse quote. + for (let i = 0; i < readLength; i++) { + switch (currentState) { + // Before enter a new field + case STATE_OUT: + switch (line.charAt(i)) { + // Enter a quoted field + case CODE_QUOTE: + readOffset = i + 1; + currentState = STATE_QUOTE; + break; + // Read an empty field + case this.delimiter: + readOffset = i + 1; + // If delimiter is white space and configured to collapse + // multiple white spaces, ignore this white space. + if (this.delimiter === ' ' && this.delimWhitespace) { + break; + } + result.push(''); + currentState = STATE_OUT; + break; + // Enter an unquoted field + default: + currentState = STATE_FIELD; + readOffset = i; + break; + } + break; + // In an unquoted field + case STATE_FIELD: + switch (line.charAt(i)) { + // Exit an unquoted field, add it to result + case this.delimiter: + result.push(line.substring(readOffset, i)); + currentState = STATE_OUT; + readOffset = i + 1; + break; + default: + } + break; + // In a quoted field + case STATE_QUOTE: + switch (line.charAt(i)) { + // Read a quote after a quote + case CODE_QUOTE: + currentState = STATE_QUOTE_AFTER_QUOTE; + break; + default: + } + break; + // This state means it's right after a second quote in a field + case STATE_QUOTE_AFTER_QUOTE: + switch (line.charAt(i)) { + // Finished a quoted field + case this.delimiter: + result.push(line.substring(readOffset, i - 1)); + currentState = STATE_OUT; + readOffset = i + 1; + break; + // Finished a quoted part in a quoted field + case CODE_QUOTE: + currentState = STATE_QUOTE; + break; + // In a quoted part in a quoted field + default: + currentState = STATE_WITHIN_QUOTE_IN_QUOTE; + break; + } + break; + case STATE_WITHIN_QUOTE_IN_QUOTE: + switch (line.charAt(i)) { + // Exit a quoted part in a quoted field + case CODE_QUOTE: + currentState = STATE_QUOTE; + break; + default: + } + break; + default: + } + } + // Adds last item based on if it is quoted. + if (currentState === STATE_QUOTE_AFTER_QUOTE) { + result.push(line.substring(readOffset, readLength - 1)); + } + else { + result.push(line.substring(readOffset)); + } + // Check if each row has the same number of elements as column names. + if (validateElementCount && result.length !== this.fullColumnNames.length) { + throw new Error(`Invalid row in csv file. Should have ${this.fullColumnNames.length} elements in a row, but got ${result}`); + } + return result; + } + } + // TODO(soergel): add more basic datasets for parity with tf.data + // tf.data.FixedLengthRecordDataset() + // tf.data.TFRecordDataset() + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * ============================================================================= + */ + /** + * Provide a stream of tensors from microphone audio stream. The tensors are + * representing audio data as frequency-domain spectrogram generated with + * browser's native FFT. Tensors representing time-domain waveform is available + * based on configuration. Only works in browser environment. + */ + class MicrophoneIterator extends LazyIterator { + constructor(microphoneConfig) { + super(); + this.microphoneConfig = microphoneConfig; + this.isClosed = false; + this.fftSize = microphoneConfig.fftSize || 1024; + const fftSizeLog2 = Math.log2(this.fftSize); + if (this.fftSize < 0 || fftSizeLog2 < 4 || fftSizeLog2 > 14 || + !Number.isInteger(fftSizeLog2)) { + throw new Error(`Invalid fftSize: it must be a power of 2 between ` + + `2 to 4 and 2 to 14, but got ${this.fftSize}`); + } + this.numFrames = microphoneConfig.numFramesPerSpectrogram || 43; + this.sampleRateHz = microphoneConfig.sampleRateHz; + this.columnTruncateLength = + microphoneConfig.columnTruncateLength || this.fftSize; + this.audioTrackConstraints = microphoneConfig.audioTrackConstraints; + this.smoothingTimeConstant = microphoneConfig.smoothingTimeConstant || 0; + this.includeSpectrogram = + microphoneConfig.includeSpectrogram === false ? false : true; + this.includeWaveform = + microphoneConfig.includeWaveform === true ? true : false; + if (!this.includeSpectrogram && !this.includeWaveform) { + throw new Error('Both includeSpectrogram and includeWaveform are false. ' + + 'At least one type of data should be returned.'); + } + } + summary() { + return `microphone`; + } + // Construct a MicrophoneIterator and start the audio stream. + static async create(microphoneConfig = {}) { + if (!env().get('IS_BROWSER')) { + throw new Error('microphone API is only supported in browser environment.'); + } + const microphoneIterator = new MicrophoneIterator(microphoneConfig); + // Call async function start() to initialize the audio stream. + await microphoneIterator.start(); + return microphoneIterator; + } + // Start the audio stream and FFT. + async start() { + try { + this.stream = await navigator.mediaDevices.getUserMedia({ + audio: this.audioTrackConstraints == null ? true : + this.audioTrackConstraints, + video: false + }); + } + catch (e) { + throw new Error(`Error thrown while initializing video stream: ${e.message}`); + } + if (!this.stream) { + throw new Error('Could not obtain audio from microphone.'); + } + const ctxConstructor = + // tslint:disable-next-line:no-any + window.AudioContext || window.webkitAudioContext; + this.audioContext = new ctxConstructor(); + if (!this.sampleRateHz) { + // If sample rate is not provided, use the available sample rate on + // device. + this.sampleRateHz = this.audioContext.sampleRate; + } + else if (this.audioContext.sampleRate !== this.sampleRateHz) { + throw new Error(`Mismatch in sampling rate: ` + + `Expected: ${this.sampleRateHz}; ` + + `Actual: ${this.audioContext.sampleRate}`); + } + const streamSource = this.audioContext.createMediaStreamSource(this.stream); + this.analyser = this.audioContext.createAnalyser(); + this.analyser.fftSize = this.fftSize * 2; + this.analyser.smoothingTimeConstant = this.smoothingTimeConstant; + streamSource.connect(this.analyser); + this.freqData = new Float32Array(this.fftSize); + this.timeData = new Float32Array(this.fftSize); + return; + } + async next() { + if (this.isClosed) { + return { value: null, done: true }; + } + let spectrogramTensor; + let waveformTensor; + const audioDataQueue = await this.getAudioData(); + if (this.includeSpectrogram) { + const freqData = this.flattenQueue(audioDataQueue.freqDataQueue); + spectrogramTensor = this.getTensorFromAudioDataArray(freqData, [this.numFrames, this.columnTruncateLength, 1]); + } + if (this.includeWaveform) { + const timeData = this.flattenQueue(audioDataQueue.timeDataQueue); + waveformTensor = this.getTensorFromAudioDataArray(timeData, [this.numFrames * this.fftSize, 1]); + } + return { + value: { 'spectrogram': spectrogramTensor, 'waveform': waveformTensor }, + done: false + }; + } + // Capture one result from the audio stream, and extract the value from + // iterator.next() result. + async capture() { + return (await this.next()).value; + } + async getAudioData() { + const freqDataQueue = []; + const timeDataQueue = []; + let currentFrames = 0; + return new Promise(resolve => { + const intervalID = setInterval(() => { + if (this.includeSpectrogram) { + this.analyser.getFloatFrequencyData(this.freqData); + // If the audio stream is initializing, return empty queue. + if (this.freqData[0] === -Infinity) { + resolve({ freqDataQueue, timeDataQueue }); + } + freqDataQueue.push(this.freqData.slice(0, this.columnTruncateLength)); + } + if (this.includeWaveform) { + this.analyser.getFloatTimeDomainData(this.timeData); + timeDataQueue.push(this.timeData.slice()); + } + // Clean interval and return when all frames have been collected + if (++currentFrames === this.numFrames) { + clearInterval(intervalID); + resolve({ freqDataQueue, timeDataQueue }); + } + }, this.fftSize / this.sampleRateHz * 1e3); + }); + } + // Stop the audio stream and pause the iterator. + stop() { + if (!this.isClosed) { + this.isClosed = true; + this.analyser.disconnect(); + this.audioContext.close(); + if (this.stream != null && this.stream.getTracks().length > 0) { + this.stream.getTracks()[0].stop(); + } + } + } + // Override toArray() function to prevent collecting. + toArray() { + throw new Error('Can not convert infinite audio stream to array.'); + } + // Return audio sampling rate in Hz + getSampleRate() { + return this.sampleRateHz; + } + flattenQueue(queue) { + const frameSize = queue[0].length; + const freqData = new Float32Array(queue.length * frameSize); + queue.forEach((data, i) => freqData.set(data, i * frameSize)); + return freqData; + } + getTensorFromAudioDataArray(freqData, shape) { + const vals = new Float32Array(sizeFromShape(shape)); + // If the data is less than the output shape, the rest is padded with zeros. + vals.set(freqData, vals.length - freqData.length); + return tensor(vals, shape); + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * ============================================================================= + */ + /** + * Provide a stream of image tensors from webcam video stream. Only works in + * browser environment. + */ + class WebcamIterator extends LazyIterator { + constructor(webcamVideoElement, webcamConfig) { + super(); + this.webcamVideoElement = webcamVideoElement; + this.webcamConfig = webcamConfig; + this.isClosed = true; + this.resize = false; + if (this.needToResize()) { + this.resize = true; + this.cropSize = + [this.webcamConfig.resizeHeight, this.webcamConfig.resizeWidth]; + this.cropBoxInd = tensor1d([0], 'int32'); + if (this.webcamConfig.centerCrop) { + // Calculate the box based on resizing shape. + const widthCroppingRatio = this.webcamConfig.resizeWidth * 1.0 / this.webcamVideoElement.width; + const heightCroppingRatio = this.webcamConfig.resizeHeight * 1.0 / + this.webcamVideoElement.height; + const widthCropStart = (1 - widthCroppingRatio) / 2; + const heightCropStart = (1 - heightCroppingRatio) / 2; + const widthCropEnd = widthCropStart + widthCroppingRatio; + const heightCropEnd = heightCroppingRatio + heightCropStart; + this.cropBox = tensor2d([heightCropStart, widthCropStart, heightCropEnd, widthCropEnd], [1, 4]); + } + else { + this.cropBox = tensor2d([0, 0, 1, 1], [1, 4]); + } + } + } + summary() { + return `webcam`; + } + // Construct a WebcamIterator and start it's video stream. + static async create(webcamVideoElement, webcamConfig = {}) { + if (!env().get('IS_BROWSER')) { + throw new Error('tf.data.webcam is only supported in browser environment.'); + } + if (!webcamVideoElement) { + // If webcam video element is not provided, create a hidden video element + // with provided width and height. + webcamVideoElement = document.createElement('video'); + if (!webcamConfig.resizeWidth || !webcamConfig.resizeHeight) { + throw new Error('Please provide webcam video element, or resizeWidth and ' + + 'resizeHeight to create a hidden video element.'); + } + webcamVideoElement.width = webcamConfig.resizeWidth; + webcamVideoElement.height = webcamConfig.resizeHeight; + } + const webcamIterator = new WebcamIterator(webcamVideoElement, webcamConfig); + // Call async function to initialize the video stream. + await webcamIterator.start(); + return webcamIterator; + } + // Async function to start video stream. + async start() { + if (this.webcamConfig.facingMode) { + assert$1((this.webcamConfig.facingMode === 'user') || + (this.webcamConfig.facingMode === 'environment'), () => `Invalid webcam facing mode: ${this.webcamConfig.facingMode}. ` + + `Please provide 'user' or 'environment'`); + } + try { + this.stream = await navigator.mediaDevices.getUserMedia({ + video: { + deviceId: this.webcamConfig.deviceId, + facingMode: this.webcamConfig.facingMode ? + this.webcamConfig.facingMode : + 'user', + width: this.webcamVideoElement.width, + height: this.webcamVideoElement.height + } + }); + } + catch (e) { + // Modify the error message but leave the stack trace intact + e.message = `Error thrown while initializing video stream: ${e.message}`; + throw e; + } + if (!this.stream) { + throw new Error('Could not obtain video from webcam.'); + } + // Older browsers may not have srcObject + try { + this.webcamVideoElement.srcObject = this.stream; + } + catch (error) { + console.log(error); + this.webcamVideoElement.src = window.URL.createObjectURL(this.stream); + } + // Start the webcam video stream + this.webcamVideoElement.play(); + this.isClosed = false; + return new Promise(resolve => { + // Add event listener to make sure the webcam has been fully initialized. + this.webcamVideoElement.onloadedmetadata = () => { + resolve(); + }; + }); + } + async next() { + if (this.isClosed) { + return { value: null, done: true }; + } + let img; + try { + img = fromPixels$1(this.webcamVideoElement); + } + catch (e) { + throw new Error(`Error thrown converting video to pixels: ${JSON.stringify(e)}`); + } + if (this.resize) { + try { + return { value: this.cropAndResizeFrame(img), done: false }; + } + catch (e) { + throw new Error(`Error thrown cropping the video: ${e.message}`); + } + finally { + img.dispose(); + } + } + else { + return { value: img, done: false }; + } + } + needToResize() { + // If resizeWidth and resizeHeight are provided, and different from the + // width and height of original HTMLVideoElement, then resizing and cropping + // is required. + if (this.webcamConfig.resizeWidth && this.webcamConfig.resizeHeight && + (this.webcamVideoElement.width !== this.webcamConfig.resizeWidth || + this.webcamVideoElement.height !== this.webcamConfig.resizeHeight)) { + return true; + } + return false; + } + // Cropping and resizing each frame based on config + cropAndResizeFrame(img) { + return tidy(() => { + const expandedImage = expandDims$3(cast$3(img, 'float32'), (0)); + let resizedImage; + resizedImage = image$1.cropAndResize(expandedImage, this.cropBox, this.cropBoxInd, this.cropSize, 'bilinear'); + // Extract image from batch cropping. + const shape = resizedImage.shape; + return reshape$3(resizedImage, shape.slice(1)); + }); + } + // Capture one frame from the video stream, and extract the value from + // iterator.next() result. + async capture() { + return (await this.next()).value; + } + // Stop the video stream and pause webcam iterator. + stop() { + const tracks = this.stream.getTracks(); + tracks.forEach(track => track.stop()); + try { + this.webcamVideoElement.srcObject = null; + } + catch (error) { + console.log(error); + this.webcamVideoElement.src = null; + } + this.isClosed = true; + } + // Override toArray() function to prevent collecting. + toArray() { + throw new Error('Can not convert infinite video stream to array.'); + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * ============================================================================= + */ + /** + * Represents a data source readable as a stream of binary data chunks. + * + * Because `Dataset`s can be read repeatedly (via `Dataset.iterator()`), this + * provides a means to repeatedly create streams from the underlying data + * sources. + */ + class DataSource { + } + // TODO(soergel): consider convenience factory functions here + // in combination with chainable source->dataset above, e.g.: + // tf.data.url(...).asCsvDataset().shuffle().batch() + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * ============================================================================= + */ + class StringIterator extends LazyIterator { + /** + * Splits a string stream on a given separator. + * + * It is assumed that the incoming chunk boundaries have no semantic meaning, + * so conceptually the incoming stream is treated simply as the concatenation + * of its elements. + * + * The outgoing stream provides chunks corresponding to the results of the + * standard string split() operation (even if such a chunk spanned incoming + * chunks). The separators are not included. + * + * A typical usage is to split a text file (represented as a stream with + * arbitrary chunk boundaries) into lines. + * + * @param upstream A readable stream of strings that can be treated as + * concatenated. + * @param separator A character to split on. + */ + split(separator) { + return new SplitIterator(this, separator); + } + } + // ============================================================================ + // The following private classes serve to implement the chainable methods + // on StringIterator. Unfortunately they can't be placed in separate files, due + // to resulting trouble with circular imports. + // ============================================================================ + // We wanted multiple inheritance, e.g. + // class SplitIterator extends QueueIterator, StringIterator + // but the TypeScript mixin approach is a bit hacky, so we take this adapter + // approach instead. + class SplitIterator extends StringIterator { + constructor(upstream, separator) { + super(); + this.upstream = upstream; + this.impl = new SplitIteratorImpl(upstream, separator); + } + summary() { + return this.impl.summary(); + } + async next() { + return this.impl.next(); + } + } + class SplitIteratorImpl extends OneToManyIterator { + constructor(upstream, separator) { + super(); + this.upstream = upstream; + this.separator = separator; + // A partial string at the end of an upstream chunk + this.carryover = ''; + } + summary() { + return `${this.upstream.summary()} -> Split('${this.separator}')`; + } + async pump() { + const chunkResult = await this.upstream.next(); + if (chunkResult.done) { + if (this.carryover === '') { + return false; + } + // Pretend that the pump succeeded in order to emit the small last batch. + // The next pump() call will actually fail. + this.outputQueue.push(this.carryover); + this.carryover = ''; + return true; + } + const lines = chunkResult.value.split(this.separator); + // Note the behavior: " ab ".split(' ') === ['', 'ab', ''] + // Thus the carryover may be '' if the separator falls on a chunk + // boundary; this produces the correct result. + lines[0] = this.carryover + lines[0]; + for (const line of lines.slice(0, -1)) { + this.outputQueue.push(line); + } + this.carryover = lines[lines.length - 1]; + return true; + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * ============================================================================= + */ + class ByteChunkIterator extends LazyIterator { + /** + * Decode a stream of UTF8-encoded byte arrays to a stream of strings. + * + * The byte arrays producetd from the ByteChunkIterator on which this is + * called will be interpreted as concatenated. No assumptions are made about + * the boundaries of the incoming chunks, so a multi-byte UTF8 encoding of a + * character may span the boundary between chunks. This naturally happens, + * for instance, when reading fixed-size byte arrays from a file. + */ + decodeUTF8() { + return new Utf8Iterator(this); + } + } + // ============================================================================ + // The following private classes serve to implement the chainable methods + // on ByteChunkIterator. Unfortunately they can't be placed in separate files, + // due to resulting trouble with circular imports. + // ============================================================================ + // We wanted multiple inheritance, e.g. + // class Utf8Iterator extends QueueIterator, StringIterator + // but the TypeScript mixin approach is a bit hacky, so we take this adapter + // approach instead. + class Utf8Iterator extends StringIterator { + constructor(upstream) { + super(); + this.upstream = upstream; + this.impl = new Utf8IteratorImpl(upstream); + } + summary() { + return this.impl.summary(); + } + async next() { + return this.impl.next(); + } + } + /** + * Decode a stream of UTF8-encoded byte arrays to a stream of strings. + * + * This is tricky because the incoming byte array boundaries may disrupt a + * multi-byte UTF8 character. Thus any incomplete character data at the end of + * a chunk must be carried over and prepended to the next chunk before + * decoding. Luckily with native decoder, TextDecoder in browser and + * string_decoder in node, byte array boundaries are handled automatically. + * + * In the context of an input pipeline for machine learning, UTF8 decoding is + * needed to parse text files containing training examples or prediction + * requests (e.g., formatted as CSV or JSON). We cannot use the built-in + * decoding provided by FileReader.readAsText() because here we are in a + * streaming context, which FileReader does not support. + * + * @param upstream A `LazyIterator` of `Uint8Arrays` containing UTF8-encoded + * text, which should be interpreted as concatenated. No assumptions are + * made about the boundaries of the incoming chunks, so a multi-byte UTF8 + * encoding of a character may span the boundary between chunks. This + * naturally happens, for instance, when reading fixed-size byte arrays from a + * file. + */ + class Utf8IteratorImpl extends OneToManyIterator { + constructor(upstream) { + super(); + this.upstream = upstream; + if (env().get('IS_BROWSER')) { + this.decoder = new TextDecoder('utf-8'); + } + else { + // tslint:disable-next-line:no-require-imports + const { StringDecoder } = require('string_decoder'); + this.decoder = new StringDecoder('utf8'); + } + } + summary() { + return `${this.upstream.summary()} -> Utf8`; + } + async pump() { + const chunkResult = await this.upstream.next(); + let chunk; + if (chunkResult.done) { + return false; + } + else { + chunk = chunkResult.value; + } + let text; + if (env().get('IS_BROWSER')) { + text = this.decoder.decode(chunk, { stream: true }); + } + else { + text = this.decoder.write(Buffer.from(chunk.buffer)); + } + this.outputQueue.push(text); + return true; + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * ============================================================================= + */ + /** + * Provide a stream of chunks from a File, Blob, or Uint8Array. + * @param file The source File, Blob or Uint8Array. + * @param options Optional settings controlling file reading. + * @returns a lazy Iterator of Uint8Arrays containing sequential chunks of the + * input File, Blob or Uint8Array. + */ + class FileChunkIterator extends ByteChunkIterator { + constructor(file, options = {}) { + super(); + this.file = file; + this.options = options; + assert$1((file instanceof Uint8Array) || + (env().get('IS_BROWSER') ? + (file instanceof File || file instanceof Blob) : + false), () => 'FileChunkIterator only supports File, Blob and Uint8Array ' + + 'right now.'); + this.offset = options.offset || 0; + // default 1MB chunk has tolerable perf on large files + this.chunkSize = options.chunkSize || 1024 * 1024; + } + summary() { + return `FileChunks ${this.file}`; + } + async next() { + if (this.offset >= ((this.file instanceof Uint8Array) ? + this.file.byteLength : + this.file.size)) { + return { value: null, done: true }; + } + const chunk = new Promise((resolve, reject) => { + const end = this.offset + this.chunkSize; + if (this.file instanceof Uint8Array) { + // Note if end > this.uint8Array.byteLength, we just get a small last + // chunk. + resolve(new Uint8Array(this.file.slice(this.offset, end))); + } + else { + // This branch assumes that this.file type is File or Blob, which + // means it is in the browser environment. + // TODO(soergel): is this a performance issue? + const fileReader = new FileReader(); + fileReader.onload = (event) => { + let data = fileReader.result; + // Not sure we can trust the return type of + // FileReader.readAsArrayBuffer See e.g. + // https://github.com/node-file-api/FileReader/issues/2 + if (data instanceof ArrayBuffer) { + data = new Uint8Array(data); + } + if (!(data instanceof Uint8Array)) { + return reject(new TypeError('FileReader returned unknown type.')); + } + resolve(data); + }; + fileReader.onabort = (event) => { + return reject(new Error('Aborted')); + }; + fileReader.onerror = (event) => { + return reject(new Error(event.type)); + }; + // TODO(soergel): better handle onabort, onerror + // Note if end > this.file.size, we just get a small last chunk. + const slice = this.file.slice(this.offset, end); + // We can't use readAsText here (even if we know the file is text) + // because the slice boundary may fall within a multi-byte character. + fileReader.readAsArrayBuffer(slice); + } + this.offset = end; + }); + return { value: (await chunk), done: false }; + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * ============================================================================= + */ + /** + * Provide a stream of chunks from a URL. + * + * Note this class first downloads the entire file into memory before providing + * the first element from the stream. This is because the Fetch API does not + * yet reliably provide a reader stream for the response body. + */ + async function urlChunkIterator(url, options = {}, fetchFunc) { + let urlString; + let requestInit; + if ((typeof url) === 'string') { + urlString = url; + } + else { + urlString = url.url; + requestInit = getRequestInitFromRequest(url); + } + const response = await (fetchFunc || fetch$1)(urlString, requestInit); + if (response.ok) { + const uint8Array = new Uint8Array(await response.arrayBuffer()); + return new FileChunkIterator(uint8Array, options); + } + else { + throw new Error(response.statusText); + } + } + // Generate RequestInit from Request to match tf.util.fetch signature. + const getRequestInitFromRequest = (request) => { + const init = { + method: request.method, + headers: request.headers, + body: request.body, + mode: request.mode, + credentials: request.credentials, + cache: request.cache, + redirect: request.redirect, + referrer: request.referrer, + integrity: request.integrity, + }; + return init; + }; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * ============================================================================= + */ + // Skip tslint any type check cause this method is aiming to check type of + // input. + // tslint:disable-next-line:no-any + function isLocalPath(source) { + return (typeof source === 'string') && source.slice(0, 7) === 'file://'; + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * ============================================================================= + */ + /** + * Represents a file, blob, or Uint8Array readable as a stream of binary data + * chunks. + */ + class FileDataSource extends DataSource { + /** + * Create a `FileDataSource`. + * + * @param input Local file path, or `File`/`Blob`/`Uint8Array` object to + * read. Local file only works in node environment. + * @param options Options passed to the underlying `FileChunkIterator`s, + * such as {chunksize: 1024}. + */ + constructor(input, options = {}) { + super(); + this.input = input; + this.options = options; + } + async iterator() { + if (isLocalPath(this.input) && env().get('IS_NODE')) { + // tslint:disable-next-line:no-require-imports + const fs = require('fs'); + this.input = fs.readFileSync(this.input.slice(7)); + } + // TODO(kangyizhang): Add LocalFileChunkIterator to split local streaming + // with file in browser. + return new FileChunkIterator(this.input, this.options); + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * ============================================================================= + */ + /* + * Represents a URL readable as a stream of binary data chunks. + */ + class URLDataSource extends DataSource { + /** + * Create a `URLDataSource`. + * + * @param url A source URL string, or a `Request` object. + * @param options Options passed to the underlying `FileChunkIterator`s, + * such as {chunksize: 1024}. + */ + constructor(url, fileOptions = {}) { + super(); + this.url = url; + this.fileOptions = fileOptions; + } + // TODO(soergel): provide appropriate caching options. Currently this + // will download the URL anew for each call to iterator(). Since we have + // to treat the downloaded file as a blob/buffer anyway, we may as well retain + // it-- but that raises GC issues. Also we may want a persistent disk cache. + async iterator() { + if (isLocalPath(this.url)) { + return (new FileDataSource(this.url, this.fileOptions)) + .iterator(); + } + else { + return urlChunkIterator(this.url, this.fileOptions); + } + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * ============================================================================= + */ + /** + * Create a `CSVDataset` by reading and decoding CSV file(s) from provided URL + * or local path if it's in Node environment. + * + * Note: If isLabel in columnConfigs is `true` for at least one column, the + * element in returned `CSVDataset` will be an object of + * `{xs:features, ys:labels}`: xs is a dict of features key/value pairs, ys + * is a dict of labels key/value pairs. If no column is marked as label, + * returns a dict of features only. + * + * ```js + * const csvUrl = + * 'https://storage.googleapis.com/tfjs-examples/multivariate-linear-regression/data/boston-housing-train.csv'; + * + * async function run() { + * // We want to predict the column "medv", which represents a median value of + * // a home (in $1000s), so we mark it as a label. + * const csvDataset = tf.data.csv( + * csvUrl, { + * columnConfigs: { + * medv: { + * isLabel: true + * } + * } + * }); + * + * // Number of features is the number of column names minus one for the label + * // column. + * const numOfFeatures = (await csvDataset.columnNames()).length - 1; + * + * // Prepare the Dataset for training. + * const flattenedDataset = + * csvDataset + * .map(({xs, ys}) => + * { + * // Convert xs(features) and ys(labels) from object form (keyed by + * // column name) to array form. + * return {xs:Object.values(xs), ys:Object.values(ys)}; + * }) + * .batch(10); + * + * // Define the model. + * const model = tf.sequential(); + * model.add(tf.layers.dense({ + * inputShape: [numOfFeatures], + * units: 1 + * })); + * model.compile({ + * optimizer: tf.train.sgd(0.000001), + * loss: 'meanSquaredError' + * }); + * + * // Fit the model using the prepared Dataset + * return model.fitDataset(flattenedDataset, { + * epochs: 10, + * callbacks: { + * onEpochEnd: async (epoch, logs) => { + * console.log(epoch + ':' + logs.loss); + * } + * } + * }); + * } + * + * await run(); + * ``` + * + * @param source URL or local path to get CSV file. If it's a local path, it + * must have prefix `file://` and it only works in node environment. + * @param csvConfig (Optional) A CSVConfig object that contains configurations + * of reading and decoding from CSV file(s). + * + * @doc { + * heading: 'Data', + * subheading: 'Creation', + * namespace: 'data', + * configParamIndices: [1] + * } + */ + function csv(source, csvConfig = {}) { + return new CSVDataset(new URLDataSource(source), csvConfig); + } + /** + * Create a `Dataset` that produces each element by calling a provided function. + * + * Note that repeated iterations over this `Dataset` may produce different + * results, because the function will be called anew for each element of each + * iteration. + * + * Also, beware that the sequence of calls to this function may be out of order + * in time with respect to the logical order of the Dataset. This is due to the + * asynchronous lazy nature of stream processing, and depends on downstream + * transformations (e.g. .shuffle()). If the provided function is pure, this is + * no problem, but if it is a closure over a mutable state (e.g., a traversal + * pointer), then the order of the produced elements may be scrambled. + * + * ```js + * let i = -1; + * const func = () => + * ++i < 5 ? {value: i, done: false} : {value: null, done: true}; + * const ds = tf.data.func(func); + * await ds.forEachAsync(e => console.log(e)); + * ``` + * + * @param f A function that produces one data element on each call. + */ + function func(f) { + const iter = iteratorFromFunction(f); + return datasetFromIteratorFn(async () => iter); + } + /** + * Create a `Dataset` that produces each element from provided JavaScript + * generator, which is a function that returns a (potentially async) iterator. + * + * For more information on iterators and generators, see + * https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Iterators_and_Generators . + * For the iterator protocol, see + * https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Iteration_protocols . + * + * Example of creating a dataset from an iterator factory: + * ```js + * function makeIterator() { + * const numElements = 10; + * let index = 0; + * + * const iterator = { + * next: () => { + * let result; + * if (index < numElements) { + * result = {value: index, done: false}; + * index++; + * return result; + * } + * return {value: index, done: true}; + * } + * }; + * return iterator; + * } + * const ds = tf.data.generator(makeIterator); + * await ds.forEachAsync(e => console.log(e)); + * ``` + * + * Example of creating a dataset from a generator: + * ```js + * function* dataGenerator() { + * const numElements = 10; + * let index = 0; + * while (index < numElements) { + * const x = index; + * index++; + * yield x; + * } + * } + * + * const ds = tf.data.generator(dataGenerator); + * await ds.forEachAsync(e => console.log(e)); + * ``` + * + * @param generator A JavaScript function that returns + * a (potentially async) JavaScript iterator. + * + * @doc { + * heading: 'Data', + * subheading: 'Creation', + * namespace: 'data', + * configParamIndices: [1] + * } + */ + function generator(generator) { + return datasetFromIteratorFn(async () => { + const gen = await generator(); + return iteratorFromFunction(() => gen.next()); + }); + } + /** + * Create an iterator that generates `Tensor`s from webcam video stream. This + * API only works in Browser environment when the device has webcam. + * + * Note: this code snippet only works when the device has a webcam. It will + * request permission to open the webcam when running. + * ```js + * const videoElement = document.createElement('video'); + * videoElement.width = 100; + * videoElement.height = 100; + * const cam = await tf.data.webcam(videoElement); + * const img = await cam.capture(); + * img.print(); + * cam.stop(); + * ``` + * + * @param webcamVideoElement A `HTMLVideoElement` used to play video from + * webcam. If this element is not provided, a hidden `HTMLVideoElement` will + * be created. In that case, `resizeWidth` and `resizeHeight` must be + * provided to set the generated tensor shape. + * @param webcamConfig A `WebcamConfig` object that contains configurations of + * reading and manipulating data from webcam video stream. + * + * @doc { + * heading: 'Data', + * subheading: 'Creation', + * namespace: 'data', + * ignoreCI: true + * } + */ + async function webcam(webcamVideoElement, webcamConfig) { + return WebcamIterator.create(webcamVideoElement, webcamConfig); + } + /** + * Create an iterator that generates frequency-domain spectrogram `Tensor`s from + * microphone audio stream with browser's native FFT. This API only works in + * browser environment when the device has microphone. + * + * Note: this code snippet only works when the device has a microphone. It will + * request permission to open the microphone when running. + * ```js + * const mic = await tf.data.microphone({ + * fftSize: 1024, + * columnTruncateLength: 232, + * numFramesPerSpectrogram: 43, + * sampleRateHz:44100, + * includeSpectrogram: true, + * includeWaveform: true + * }); + * const audioData = await mic.capture(); + * const spectrogramTensor = audioData.spectrogram; + * spectrogramTensor.print(); + * const waveformTensor = audioData.waveform; + * waveformTensor.print(); + * mic.stop(); + * ``` + * + * @param microphoneConfig A `MicrophoneConfig` object that contains + * configurations of reading audio data from microphone. + * + * @doc { + * heading: 'Data', + * subheading: 'Creation', + * namespace: 'data', + * ignoreCI: true + * } + */ + async function microphone(microphoneConfig) { + return MicrophoneIterator.create(microphoneConfig); + } + + /** @license See the LICENSE file. */ + // This code is auto-generated, do not modify this file! + const version$4 = '4.22.0'; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + + var index = /*#__PURE__*/Object.freeze({ + __proto__: null, + CSVDataset: CSVDataset, + Dataset: Dataset, + FileDataSource: FileDataSource, + TextLineDataset: TextLineDataset, + URLDataSource: URLDataSource, + array: array, + csv: csv, + func: func, + generator: generator, + microphone: microphone, + version_data: version$4, + webcam: webcam, + zip: zip + }); + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function assertNotComplex$1(tensor, opName) { + if (!Array.isArray(tensor)) { + tensor = [tensor]; + } + tensor.forEach(t => { + if (t != null) { + assert$1(t.dtype !== 'complex64', () => `${opName} does not support complex64 tensors in the CPU backend.`); + } + }); + } + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const whereImpl$1 = whereImpl$2; + class MathBackendCPU extends KernelBackend { + nextDataId() { + return MathBackendCPU.nextDataId++; + } + constructor() { + super(); + this.blockSize = 48; + this.firstUse = true; + this.data = new DataStorage(this, engine()); + } + write(values, shape, dtype) { + if (this.firstUse) { + this.firstUse = false; + if (env().get('IS_NODE')) { + warn('\n============================\n' + + 'Hi, looks like you are running TensorFlow.js in ' + + 'Node.js. To speed things up dramatically, install our node ' + + 'backend, visit https://github.com/tensorflow/tfjs-node for more details. ' + + '\n============================'); + } + } + const dataId = { id: this.nextDataId() }; + this.data.set(dataId, { values, dtype, refCount: 1 }); + return dataId; + } + /** + * Create a data bucket in cpu backend. + * @param shape Shape of the `TensorInfo`. + * @param dtype DType of the `TensorInfo`. + * @param values The value of the `TensorInfo` stored as a flattened array. + */ + makeTensorInfo(shape, dtype, values) { + let outId; + if (dtype === 'string' && values != null && values.length > 0 && + isString(values[0])) { + const encodedValues = values.map(d => encodeString(d)); + outId = this.write(encodedValues, shape, dtype); + } + else { + outId = this.write(values, shape, dtype); + } + return { dataId: outId, shape, dtype }; + } + /** Return refCount of a `TensorData`. */ + refCount(dataId) { + if (this.data.has(dataId)) { + const tensorData = this.data.get(dataId); + return tensorData.refCount; + } + return 0; + } + /** Increase refCount of a `TensorData`. */ + incRef(dataId) { + const tensorData = this.data.get(dataId); + tensorData.refCount++; + } + /** Decrease refCount of a `TensorData`. */ + decRef(dataId) { + if (this.data.has(dataId)) { + const tensorData = this.data.get(dataId); + tensorData.refCount--; + } + } + move(dataId, values, shape, dtype, refCount) { + this.data.set(dataId, { values, dtype, refCount }); + } + numDataIds() { + return this.data.numDataIds(); + } + async read(dataId) { + return this.readSync(dataId); + } + readSync(dataId) { + const { dtype, complexTensorInfos } = this.data.get(dataId); + if (dtype === 'complex64') { + const realValues = this.readSync(complexTensorInfos.real.dataId); + const imagValues = this.readSync(complexTensorInfos.imag.dataId); + return mergeRealAndImagArrays(realValues, imagValues); + } + return convertBackendValuesAndArrayBuffer(this.data.get(dataId).values, dtype); + } + bufferSync(t) { + const data = this.readSync(t.dataId); + if (t.dtype === 'string') { + try { + // Decode the bytes into string. + const strings = data.map(d => decodeString(d)); + return buffer(t.shape, t.dtype, strings); + } + catch (_a) { + throw new Error('Failed to decode encoded string bytes into utf-8'); + } + } + return buffer(t.shape, t.dtype, data); + } + makeOutput(values, shape, dtype) { + return engine().makeTensorFromTensorInfo(this.makeTensorInfo(shape, dtype, values), this); + } + /** + * Dispose the memory if the dataId has 0 refCount. Return true if the memory + * is released or memory is not managed in this backend, false if memory is + * not cleared. + * @param dataId + * @oaram force Optional, remove the data regardless of refCount + */ + disposeData(dataId, force = false) { + if (this.data.has(dataId)) { + this.data.get(dataId).refCount--; + if (!force && this.data.get(dataId).refCount > 0) { + return false; + } + const { complexTensorInfos } = this.data.get(dataId); + if (complexTensorInfos != null) { + this.disposeData(complexTensorInfos.real.dataId, true); + this.disposeData(complexTensorInfos.imag.dataId, true); + } + this.data.delete(dataId); + } + return true; + } + disposeIntermediateTensorInfo(tensorInfo) { + this.disposeData(tensorInfo.dataId); + } + async time(f) { + const start = now(); + f(); + const kernelMs = now() - start; + return { kernelMs }; + } + memory() { + return { + // Unreliable due to automatic gc. The numbers above are cumulative. + unreliable: true, + reasons: ['The reported memory is an upper bound. Due to automatic garbage ' + + 'collection, the true allocated memory may be less.'] + }; + } + where(condition) { + assertNotComplex$1([condition], 'where'); + const condVals = this.readSync(condition.dataId); + return whereImpl$1(condition.shape, condVals); + } + dispose() { } + floatPrecision() { + return 32; + } + /** Returns the smallest representable number. */ + epsilon() { + return super.epsilon(); + } + } + MathBackendCPU.nextDataId = 0; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function simpleAbsImpl(vals) { + const resultValues = new Float32Array(vals.length); + for (let i = 0; i < vals.length; ++i) { + resultValues[i] = Math.abs(vals[i]); + } + return resultValues; + } + const abs$1 = (args) => { + const { x } = args.inputs; + const cpuBackend = args.backend; + assertNotComplex$1(x, 'abs'); + let resultValues = new Float32Array(sizeFromShape(x.shape)); + const values = cpuBackend.data.get(x.dataId).values; + resultValues = simpleAbsImpl(values); + return cpuBackend.makeOutput(resultValues, x.shape, x.dtype); + }; + const absConfig$1 = { + kernelName: Abs, + backendName: 'cpu', + kernelFunc: abs$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Template that creates implementation for binary ops. Supports broadcast. + */ + function createSimpleBinaryKernelImpl(op) { + return (aShape, bShape, aVals, bVals, dtype) => { + const newShape = assertAndGetBroadcastShape(aShape, bShape); + const resultRank = newShape.length; + const resultStrides = computeStrides(newShape); + const resultSize = sizeFromShape(newShape); + const result = getTypedArrayFromDType(dtype, resultSize); + const aRank = aShape.length; + const bRank = bShape.length; + const aStrides = computeStrides(aShape); + const bStrides = computeStrides(bShape); + const aBroadcastDims = getBroadcastDims$1(aShape, newShape); + const bBroadcastDims = getBroadcastDims$1(bShape, newShape); + if (aBroadcastDims.length + bBroadcastDims.length === 0) { + for (let i = 0; i < result.length; ++i) { + result[i] = op(aVals[i % aVals.length], bVals[i % bVals.length]); + } + } + else { + for (let i = 0; i < result.length; ++i) { + const loc = indexToLoc(i, resultRank, resultStrides); + const aLoc = loc.slice(-aRank); + aBroadcastDims.forEach(d => aLoc[d] = 0); + const aIndex = locToIndex(aLoc, aRank, aStrides); + const bLoc = loc.slice(-bRank); + bBroadcastDims.forEach(d => bLoc[d] = 0); + const bIndex = locToIndex(bLoc, bRank, bStrides); + result[i] = op(aVals[aIndex], bVals[bIndex]); + } + } + return [result, newShape]; + }; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function complex$1(args) { + const { inputs, backend } = args; + const { real, imag } = inputs; + const realVals = backend.data.get(real.dataId).values; + const imagVals = backend.data.get(imag.dataId).values; + const complexInfo = backend.makeTensorInfo(real.shape, 'complex64'); + const complex = backend.data.get(complexInfo.dataId); + // The complex tensor owns the underlying real and imag tensorInfos, only the + // complex tensor tracks refCount, when complexData is disposed the + // underlying tensorData will be disposed. + complex.complexTensorInfos = { + real: backend.makeTensorInfo(real.shape, 'float32', realVals), + imag: backend.makeTensorInfo(imag.shape, 'float32', imagVals) + }; + return complexInfo; + } + const complexConfig$1 = { + kernelName: Complex, + backendName: 'cpu', + kernelFunc: complex$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Generates a tensorInfo with all zeros value. + * @param backend cpu backend. + * @param shape Shape for the zeros tensor. + * @param dtype Optional. If set, the result has this dtype. + */ + function zeros(backend, shape, dtype = 'float32') { + if (dtype === 'complex64') { + const real = zeros(backend, shape, 'float32'); + const imag = zeros(backend, shape, 'float32'); + return complex$1({ inputs: { real, imag }, backend }); + } + const values = makeZerosTypedArray(sizeFromShape(shape), dtype); + return backend.makeTensorInfo(shape, dtype, values); + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function identity$1(args) { + const { inputs, backend } = args; + const { x } = inputs; + backend.incRef(x.dataId); + return { dataId: x.dataId, shape: x.shape, dtype: x.dtype }; + } + const identityConfig$1 = { + kernelName: Identity$1, + backendName: 'cpu', + kernelFunc: identity$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function real$1(args) { + const { inputs, backend } = args; + const { input } = inputs; + const real = backend.data.get(input.dataId).complexTensorInfos.real; + const realVal = backend.data.get(real.dataId).values; + // When complex tensor is disposed, its underlying parts will be disposed too. + // Make new tensor out of the real value of the complex. This makes sure the + // value is still accessible even if complex tensor is disposed. + return backend.makeTensorInfo(real.shape, real.dtype, realVal); + } + const realConfig$1 = { + kernelName: Real, + backendName: 'cpu', + kernelFunc: real$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function castImpl(values, shape, inputType, dtype) { + if (dtype === 'int32') { + const resultValues = Int32Array.from(values); + return [shape, 'int32', resultValues]; + } + if (dtype === 'bool') { + // This is essentially the result of notEqual(x, 0). We avoid using + // kernel notEqual to avoid circular dependency, i.e. binary_utils -> + // cast -> notEqual -> binary_utils. + const zero = toTypedArray([0], inputType); + const [resultData, resultShape] = createSimpleBinaryKernelImpl((a, b) => (a !== b) ? 1 : 0)(shape, [], values, zero, 'bool'); + return [resultShape, 'bool', resultData]; + } + throw new Error(`Error in Cast: failed to cast ${inputType} to ${dtype}`); + } + function cast$1(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { dtype } = attrs; + // Casting to complex64. + if (dtype === 'complex64') { + if (x.dtype === 'complex64') { + return identity$1({ inputs: { x }, backend }); + } + const zerosTensorInfo = zeros(backend, x.shape, x.dtype); + const floatX = cast$1({ inputs: { x }, backend, attrs: { dtype: 'float32' } }); + const result = complex$1({ inputs: { real: floatX, imag: zerosTensorInfo }, backend }); + backend.disposeIntermediateTensorInfo(zerosTensorInfo); + backend.disposeIntermediateTensorInfo(floatX); + return result; + } + // Casting from complex64 + if (x.dtype === 'complex64') { + const realPart = real$1({ inputs: { input: x }, backend }); + const result = cast$1({ inputs: { x: realPart }, backend, attrs: { dtype } }); + backend.disposeIntermediateTensorInfo(realPart); + return result; + } + if (!hasEncodingLoss(x.dtype, dtype)) { + // We don't change the underlying data, since we cast to higher + // precision. + const result = identity$1({ inputs: { x }, backend }); + return { dataId: result.dataId, shape: result.shape, dtype }; + } + const values = backend.data.get(x.dataId).values; + const [resultShape, resultType, resultData] = castImpl(values, x.shape, x.dtype, dtype); + return backend.makeTensorInfo(resultShape, resultType, resultData); + } + const castConfig$1 = { + kernelName: Cast, + backendName: 'cpu', + kernelFunc: cast$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Template that creates a `KernelFunc` for binary ops. + * @param name Kernel name. + * @param binaryKernelImpl A `SimpleBinaryKernelImpl` for the kernel. + * @param binaryKernelComplexImpl Optional. If exists, represents a + * `ComplexBinaryKernelImpl` for the kernel, will be used when input dtype + * is `complex64`. + * @param dtype Optional. If set, the result has this dtype. Otherwise, the + * result has the same dtype as the first input. This is mainly used in + * comparison kernels, such as Equal, Less, Greater, etc. + */ + function binaryKernelFunc$1(name, simpleImpl, complexImpl, dtype) { + if (complexImpl == null) { + return ({ inputs, backend }) => { + const { a, b } = inputs; + const cpuBackend = backend; + assertNotComplex$1([a, b], name); + const aVals = cpuBackend.data.get(a.dataId).values; + const bVals = cpuBackend.data.get(b.dataId).values; + const decodedAVals = a.dtype === 'string' ? + // tslint:disable-next-line: no-any + fromUint8ToStringArray(aVals) : + aVals; + const decodedBVals = a.dtype === 'string' ? + // tslint:disable-next-line: no-any + fromUint8ToStringArray(bVals) : + bVals; + const $dtype = dtype || a.dtype; + const [resultData, resultShape] = simpleImpl(a.shape, b.shape, decodedAVals, decodedBVals, $dtype); + return cpuBackend.makeTensorInfo(resultShape, $dtype, resultData); + }; + } + return ({ inputs, backend }) => { + const { a, b } = inputs; + const cpuBackend = backend; + if (a.dtype === 'complex64' || b.dtype === 'complex64') { + const $aComplex = cast$1({ inputs: { x: a }, backend: cpuBackend, attrs: { dtype: 'complex64' } }); + const $aComplexVals = cpuBackend.data.get($aComplex.dataId); + const aReal = $aComplexVals.complexTensorInfos.real; + const aImag = $aComplexVals.complexTensorInfos.imag; + const aRealVals = cpuBackend.data.get(aReal.dataId).values; + const aImagVals = cpuBackend.data.get(aImag.dataId).values; + const $bComplex = cast$1({ inputs: { x: b }, backend: cpuBackend, attrs: { dtype: 'complex64' } }); + const $bComplexVals = cpuBackend.data.get($bComplex.dataId); + const bReal = $bComplexVals.complexTensorInfos.real; + const bImag = $bComplexVals.complexTensorInfos.imag; + const bRealVals = cpuBackend.data.get(bReal.dataId).values; + const bImagVals = cpuBackend.data.get(bImag.dataId).values; + const [resultRealData, resultImagData, resultShape] = complexImpl(a.shape, b.shape, aRealVals, aImagVals, bRealVals, bImagVals); + const resultReal = cpuBackend.makeTensorInfo(resultShape, 'float32', resultRealData); + const resultImag = cpuBackend.makeTensorInfo(resultShape, 'float32', resultImagData); + const result = complex$1({ inputs: { real: resultReal, imag: resultImag }, backend: cpuBackend }); + cpuBackend.disposeIntermediateTensorInfo($aComplex); + cpuBackend.disposeIntermediateTensorInfo($bComplex); + cpuBackend.disposeIntermediateTensorInfo(resultReal); + cpuBackend.disposeIntermediateTensorInfo(resultImag); + return result; + } + else { + const aVals = cpuBackend.data.get(a.dataId).values; + const bVals = cpuBackend.data.get(b.dataId).values; + const $dtype = dtype || a.dtype; + const [resultData, resultShape] = simpleImpl(a.shape, b.shape, aVals, bVals, $dtype); + return cpuBackend.makeTensorInfo(resultShape, $dtype, resultData); + } + }; + } + /** + * Template that creates the complex type implementation for binary ops. + * Supports broadcast. + */ + function createComplexBinaryKernelImpl(op) { + return (aShape, bShape, aRealVals, aImagVals, bRealVals, bImagVals) => { + const resultShape = assertAndGetBroadcastShape(aShape, bShape); + const resultSize = sizeFromShape(resultShape); + const resultRank = resultShape.length; + const resultStrides = computeStrides(resultShape); + const resultRealVals = getTypedArrayFromDType('float32', resultSize); + const resultImagVals = getTypedArrayFromDType('float32', resultSize); + const aBroadcastDims = getBroadcastDims$1(aShape, resultShape); + const bBroadcastDims = getBroadcastDims$1(bShape, resultShape); + const aVals = mergeRealAndImagArrays(aRealVals, aImagVals); + const bVals = mergeRealAndImagArrays(bRealVals, bImagVals); + const aRank = aShape.length; + const aStrides = computeStrides(aShape); + const bRank = bShape.length; + const bStrides = computeStrides(bShape); + if (aBroadcastDims.length + bBroadcastDims.length === 0) { + for (let i = 0; i < resultRealVals.length; i++) { + const aIdx = i % aVals.length; + const bIdx = i % bVals.length; + const result = op(aVals[aIdx * 2], aVals[aIdx * 2 + 1], bVals[bIdx * 2], bVals[bIdx * 2 + 1]); + resultRealVals[i] = result.real; + resultImagVals[i] = result.imag; + } + } + else { + for (let i = 0; i < resultRealVals.length; i++) { + const loc = indexToLoc(i, resultRank, resultStrides); + const aLoc = loc.slice(-aRank); + aBroadcastDims.forEach(d => aLoc[d] = 0); + const aIndex = locToIndex(aLoc, aRank, aStrides); + const bLoc = loc.slice(-bRank); + bBroadcastDims.forEach(d => bLoc[d] = 0); + const bIndex = locToIndex(bLoc, bRank, bStrides); + const opResult = op(aVals[aIndex * 2], aVals[aIndex * 2 + 1], bVals[bIndex * 2], bVals[bIndex * 2 + 1]); + resultRealVals[i] = opResult.real; + resultImagVals[i] = opResult.imag; + } + } + return [resultRealVals, resultImagVals, resultShape]; + }; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const addImpl = createSimpleBinaryKernelImpl(((a, b) => a + b)); + const addComplexImpl = createComplexBinaryKernelImpl(((aReal, aImag, bReal, bImag) => { + return { real: aReal + bReal, imag: aImag + bImag }; + })); + const add = binaryKernelFunc$1(Add$1, addImpl, addComplexImpl); + const addConfig$1 = { + kernelName: Add$1, + backendName: 'cpu', + kernelFunc: add + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function bincountImpl(xVals, weightsVals, weightsDtype, weightsShape, size) { + const weightsSize = sizeFromShape(weightsShape); + const outVals = makeZerosTypedArray(size, weightsDtype); + for (let i = 0; i < xVals.length; i++) { + const value = xVals[i]; + if (value < 0) { + throw new Error('Input x must be non-negative!'); + } + if (value >= size) { + continue; + } + if (weightsSize > 0) { + outVals[value] += weightsVals[i]; + } + else { + outVals[value] += 1; + } + } + return outVals; + } + function bincountReduceImpl(xBuf, weightsBuf, size, binaryOutput = false) { + const numRows = xBuf.shape[0]; + const numCols = xBuf.shape[1]; + const outBuf = buffer([numRows, size], weightsBuf.dtype); + for (let i = 0; i < numRows; i++) { + for (let j = 0; j < numCols; j++) { + const value = xBuf.get(i, j); + if (value < 0) { + throw new Error('Input x must be non-negative!'); + } + if (value >= size) { + continue; + } + if (binaryOutput) { + outBuf.set(1, i, value); + } + else { + if (weightsBuf.size > 0) { + outBuf.set(outBuf.get(i, value) + weightsBuf.get(i, j), i, value); + } + else { + outBuf.set(outBuf.get(i, value) + 1, i, value); + } + } + } + } + return outBuf; + } + + /** + * @license + * Copyright 2023 Google LLC. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const bitwiseAndImpl = createSimpleBinaryKernelImpl(((a, b) => a & b)); + const bitwiseAnd$1 = binaryKernelFunc$1(BitwiseAnd, bitwiseAndImpl); + const bitwiseAndConfig$1 = { + kernelName: BitwiseAnd, + backendName: 'cpu', + kernelFunc: bitwiseAnd$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Template that creates implementation for unary op. + */ + function createSimpleUnaryImpl(op) { + return (values, dtype, attrs) => { + const newValues = getArrayFromDType(dtype, values.length); + for (let i = 0; i < values.length; ++i) { + newValues[i] = op(values[i], attrs); + } + return newValues; + }; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Template that creates a `KernelFunc` for unary ops. + * @param name Kernel name. + * @param op A `SimpleUnaryOperation` for the kernel. + * @param dtype Optional. If set, the result has this dtype. Otherwise, the + * result has the same dtype as the input. This is mainly used in certain + * kernels that return bool type, such as isFinite, isInf, etc. + */ + function unaryKernelFunc$1(name, op, dtype) { + const impl = createSimpleUnaryImpl(op); + return unaryKernelFuncFromImpl(name, impl, dtype); + } + /** + * Template that creates a `KernelFunc` for unary ops from the given + * `SimpleUnaryImpl`.. + * @param name Kernel name. + * @param unaryImpl A `SimpleUnaryImpl` that implements the op. + * @param dtype Optional. If set, the result has this dtype. Otherwise, the + * result has the same dtype as the input. This is mainly used in certain + * kernels that return bool type, such as isFinite, isInf, etc. + */ + function unaryKernelFuncFromImpl(name, unaryImpl, dtype) { + return ({ inputs, attrs, backend }) => { + const { x } = inputs; + assertNotComplex$1(x, name); + const cpuBackend = backend; + const values = cpuBackend.data.get(x.dataId).values; + let decoded; + if (x.dtype === 'string') { + if (!Array.isArray(values)) { + throw new Error('String tensor\'s value was not an instance of Array'); + } + decoded = fromUint8ToStringArray(values); + } + else { + decoded = values; + } + const $dtype = dtype || x.dtype; + const newValues = unaryImpl(decoded, $dtype, attrs); + return cpuBackend.makeTensorInfo(x.shape, $dtype, newValues); + }; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const ceilImpl = createSimpleUnaryImpl((xi) => Math.ceil(xi)); + const ceil$1 = unaryKernelFuncFromImpl(Ceil, ceilImpl); + const ceilConfig$1 = { + kernelName: Ceil, + backendName: 'cpu', + kernelFunc: ceil$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function concatImpl$1(inputs, outShape, dtype, simplyConcat) { + const outVals = getArrayFromDType(dtype, sizeFromShape(outShape)); + if (simplyConcat && dtype !== 'string') { + // Use built-in TypedArray.set() method for speed. + let offset = 0; + inputs.forEach(input => { + const size = sizeFromShape(input.shape); + outVals.set(input.vals, offset); + offset += size; + }); + } + else { + let colOffset = 0; + inputs.forEach(input => { + const decodedData = dtype === 'string' ? + fromUint8ToStringArray(input.vals) : + input.vals; + let tIdx = 0; + for (let row = 0; row < input.shape[0]; ++row) { + const resIdx = row * outShape[1] + colOffset; + for (let col = 0; col < input.shape[1]; ++col) { + outVals[resIdx + col] = decodedData[tIdx++]; + } + } + colOffset += input.shape[1]; + }); + } + return outVals; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const equalImpl = createSimpleBinaryKernelImpl((a, b) => (a === b) ? 1 : 0); + const equal$1 = binaryKernelFunc$1(Equal, equalImpl, null /* complexImpl */, 'bool'); + const equalConfig$1 = { + kernelName: Equal, + backendName: 'cpu', + kernelFunc: equal$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const expImpl = createSimpleUnaryImpl((xi) => Math.exp(xi)); + const exp$1 = unaryKernelFuncFromImpl(Exp, expImpl, 'float32'); + const expConfig$1 = { + kernelName: Exp, + backendName: 'cpu', + kernelFunc: exp$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const expm1Impl = createSimpleUnaryImpl((xi) => Math.expm1(xi)); + const expm1$1 = unaryKernelFuncFromImpl(Expm1, expm1Impl); + const expm1Config$1 = { + kernelName: Expm1, + backendName: 'cpu', + kernelFunc: expm1$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const floorImpl = createSimpleUnaryImpl((xi) => Math.floor(xi)); + const floor$1 = unaryKernelFuncFromImpl(Floor, floorImpl); + const floorConfig$1 = { + kernelName: Floor, + backendName: 'cpu', + kernelFunc: floor$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const floorDivImpl = createSimpleBinaryKernelImpl((a, b) => Math.floor(a / b)); + const floorDiv$1 = binaryKernelFunc$1(FloorDiv, floorDivImpl, null /* complexImpl */, 'int32'); + const floorDivConfig$1 = { + kernelName: FloorDiv, + backendName: 'cpu', + kernelFunc: floorDiv$1 + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function gatherNdImpl(indicesData, paramsBuf, dtype, numSlices, sliceRank, sliceSize, strides, paramsShape, paramsSize) { + const outBuf = buffer([numSlices, sliceSize], dtype); + for (let i = 0; i < numSlices; i++) { + const index = []; + let flattenIndex = 0; + for (let j = 0; j < sliceRank; j++) { + const dim = indicesData[i * sliceRank + j]; + flattenIndex += dim * strides[j]; + index.push(dim); + } + if (flattenIndex < 0 || flattenIndex >= paramsSize / sliceSize) { + throw new Error(`Invalid indices: ${index} does not index into ${paramsShape}`); + } + for (let k = 0; k < sliceSize; k++) { + outBuf.values[i * sliceSize + k] = + paramsBuf.get(...paramsBuf.indexToLoc(flattenIndex * sliceSize + k)); + } + } + return outBuf; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function gatherV2Impl(xBuf, indicesBuf, flattenOutputShape) { + const outBuf = buffer(flattenOutputShape, xBuf.dtype); + for (let i = 0; i < outBuf.size; ++i) { + const newLoc = outBuf.indexToLoc(i); + const originalLoc = newLoc.slice(); + const batchIdx = originalLoc[0]; + const indicesIdx = originalLoc[2]; + const indicesIndex = indicesBuf.locToIndex([batchIdx, indicesIdx]); + originalLoc[2] = indicesBuf.values[indicesIndex]; + const originalIndex = xBuf.locToIndex(originalLoc); + if (0 <= originalIndex && originalIndex < xBuf.values.length) { + outBuf.values[i] = xBuf.values[originalIndex]; + } // Else, index is out of bounds, so leave the default zero val in outBuf. + } + return outBuf; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const greaterImpl = createSimpleBinaryKernelImpl((a, b) => (a > b) ? 1 : 0); + const greater$1 = binaryKernelFunc$1(Greater, greaterImpl, null /* complexImpl */, 'bool'); + const greaterConfig$1 = { + kernelName: Greater, + backendName: 'cpu', + kernelFunc: greater$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const greaterEqualImpl = createSimpleBinaryKernelImpl((a, b) => (a >= b) ? 1 : 0); + const greaterEqual$1 = binaryKernelFunc$1(GreaterEqual, greaterEqualImpl, null /* complexImpl */, 'bool'); + const greaterEqualConfig$1 = { + kernelName: GreaterEqual, + backendName: 'cpu', + kernelFunc: greaterEqual$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const lessImpl = createSimpleBinaryKernelImpl((a, b) => (a < b) ? 1 : 0); + const less$1 = binaryKernelFunc$1(Less, lessImpl, null /* complexImpl */, 'bool'); + const lessConfig$1 = { + kernelName: Less, + backendName: 'cpu', + kernelFunc: less$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const lessEqualImpl = createSimpleBinaryKernelImpl((a, b) => (a <= b) ? 1 : 0); + const lessEqual$1 = binaryKernelFunc$1(LessEqual, lessEqualImpl, null /* complexImpl */, 'bool'); + const lessEqualConfig$1 = { + kernelName: LessEqual, + backendName: 'cpu', + kernelFunc: lessEqual$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function linSpaceImpl(start, stop, num) { + const step = (stop - start) / (num - 1); + const values = makeZerosTypedArray(num, 'float32'); + values[0] = start; + for (let i = 1; i < values.length; i++) { + values[i] = values[i - 1] + step; + } + return values; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const logImpl = createSimpleUnaryImpl((xi) => Math.log(xi)); + const log$1 = unaryKernelFuncFromImpl(Log, logImpl); + const logConfig$1 = { + kernelName: Log, + backendName: 'cpu', + kernelFunc: log$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function maxImpl$1(aVals, reduceSize, outShape, dtype) { + const vals = getTypedArrayFromDType(dtype, sizeFromShape(outShape)); + for (let i = 0; i < vals.length; ++i) { + const offset = i * reduceSize; + let max = aVals[offset]; + for (let j = 0; j < reduceSize; ++j) { + const value = aVals[offset + j]; + if (Number.isNaN(value) || + value > max) { // comparison with NaN always return false + max = value; + } + } + vals[i] = max; + } + return vals; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const maximumImpl = createSimpleBinaryKernelImpl(((aValue, bValue) => Math.max(aValue, bValue))); + const maximum$1 = binaryKernelFunc$1(Maximum$1, maximumImpl); + const maximumConfig$1 = { + kernelName: Maximum$1, + backendName: 'cpu', + kernelFunc: maximum$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const minimumImpl = createSimpleBinaryKernelImpl(((aValue, bValue) => Math.min(aValue, bValue))); + const minimum$1 = binaryKernelFunc$1(Minimum$1, minimumImpl); + const minimumConfig$1 = { + kernelName: Minimum$1, + backendName: 'cpu', + kernelFunc: minimum$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const multiplyImpl = createSimpleBinaryKernelImpl(((aValue, bValue) => aValue * bValue)); + const multiplyComplexImpl = createComplexBinaryKernelImpl(((aReal, aImag, bReal, bImag) => { + return { + real: aReal * bReal - aImag * bImag, + imag: aReal * bImag + aImag * bReal + }; + })); + const multiply$1 = binaryKernelFunc$1(Multiply$1, multiplyImpl, multiplyComplexImpl); + const multiplyConfig$1 = { + kernelName: Multiply$1, + backendName: 'cpu', + kernelFunc: multiply$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function negImpl(xVals, xShape, xDtype) { + const minusOne = createScalarValue(-1, xDtype); + return multiplyImpl([], xShape, minusOne, xVals, xDtype); + } + function neg$1(args) { + const { inputs, backend } = args; + const { x } = inputs; + assertNotComplex$1(x, 'neg'); + const xVals = backend.data.get(x.dataId).values; + const [res, newShape] = negImpl(xVals, x.shape, x.dtype); + return backend.makeTensorInfo(newShape, x.dtype, res); + } + const negConfig$1 = { + kernelName: Neg, + backendName: 'cpu', + kernelFunc: neg$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const notEqualImpl = createSimpleBinaryKernelImpl(((a, b) => (a !== b) ? 1 : 0)); + const notEqual$1 = binaryKernelFunc$1(NotEqual, notEqualImpl, null /* complexOp */, 'bool'); + const notEqualConfig$1 = { + kernelName: NotEqual, + backendName: 'cpu', + kernelFunc: notEqual$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function transposeImpl$1(xVals, xShape, dtype, perm, newShape) { + const xRank = xShape.length; + const xSize = sizeFromShape(xShape); + const xStrides = computeStrides(xShape); + const newStrides = computeStrides(newShape); + const result = getTypedArrayFromDType(dtype, sizeFromShape(newShape)); + for (let i = 0; i < xSize; ++i) { + const loc = indexToLoc(i, xRank, xStrides); + // Permute location. + const newLoc = new Array(loc.length); + for (let i = 0; i < newLoc.length; i++) { + newLoc[i] = loc[perm[i]]; + } + const newIndex = locToIndex(newLoc, xRank, newStrides); + result[newIndex] = xVals[i]; + } + return result; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function transpose$1(args) { + const { inputs, attrs, backend } = args; + const { x } = inputs; + const { perm } = attrs; + assertNotComplex$1(x, 'transpose'); + const xRank = x.shape.length; + const newShape = new Array(xRank); + for (let i = 0; i < newShape.length; i++) { + newShape[i] = x.shape[perm[i]]; + } + const values = backend.data.get(x.dataId).values; + const result = transposeImpl$1(values, x.shape, x.dtype, perm, newShape); + const dataId = backend.write(result, newShape, x.dtype); + return { dataId, shape: newShape, dtype: x.dtype }; + } + const transposeConfig$1 = { + kernelName: Transpose, + backendName: 'cpu', + kernelFunc: transpose$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function prodImpl(xShape, xDtype, xVals, reductionAxes) { + const [outShape, reduceShape] = computeOutAndReduceShapes(xShape, reductionAxes); + const outDtype = upcastType(xDtype, 'int32'); + const outVals = makeZerosTypedArray(sizeFromShape(outShape), outDtype); + const reduceSize = sizeFromShape(reduceShape); + for (let i = 0; i < outVals.length; ++i) { + const offset = i * reduceSize; + let prod = 1; + for (let j = 0; j < reduceSize; ++j) { + prod *= xVals[offset + j]; + } + outVals[i] = prod; + } + return { outVals, outShape, outDtype }; + } + function prod$1(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { axis, keepDims } = attrs; + assertNotComplex$1(x, 'prod'); + const xRank = x.shape.length; + const axes = parseAxisParam(axis, x.shape); + const permutation = getAxesPermutation(axes, xRank); + let reductionAxes = axes; + let permutedX = x; + const intermediateTensorInfos = []; + if (permutation != null) { + permutedX = transpose$1({ inputs: { x }, backend, attrs: { perm: permutation } }); + intermediateTensorInfos.push(permutedX); + reductionAxes = getInnerMostAxes(reductionAxes.length, xRank); + } + const xVals = backend.data.get(permutedX.dataId).values; + const { outVals, outShape, outDtype } = prodImpl(permutedX.shape, permutedX.dtype, xVals, reductionAxes); + let resultShape = outShape; + if (keepDims) { + resultShape = expandShapeToKeepDim(outShape, axes); + } + intermediateTensorInfos.forEach(t => backend.disposeIntermediateTensorInfo(t)); + return backend.makeTensorInfo(resultShape, outDtype, outVals); + } + const prodConfig$1 = { + kernelName: Prod, + backendName: 'cpu', + kernelFunc: prod$1 + }; + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function validateIndices(indices, indicesShape, numParams) { + indices.forEach((index, i) => { + if (index < 0 || index >= numParams) { + const locString = indexToLoc(i, indicesShape.length, computeStrides(indicesShape)) + .join(','); + throw new Error(`indices[${locString}] = ${index} is not in [0, ${numParams})`); + } + }); + } + function validateSplits(paramsNestedSplits, numParamsDenseValues) { + // Validate + for (let dim = 0; dim < paramsNestedSplits.length; ++dim) { + const splits = paramsNestedSplits[dim]; + const lastSplit = (dim === paramsNestedSplits.length - 1) ? + numParamsDenseValues : + paramsNestedSplits[dim + 1].length; + if (splits.length === 0) { + throw new Error('Ragged splits may not be empty'); + } + if (splits[0] < 0) { + throw new Error('Ragged splits must be non-negative'); + } + if (splits[splits.length - 1] > lastSplit) { + throw new Error('Ragged splits must not point past values'); + } + for (let i = 1; i < splits.length; ++i) { + if (splits[i - 1] > splits[i]) { + throw new Error('Ragged splits must be sorted in ascending order'); + } + } + } + } + // Construct the `splits` output tensors, encoded using a nested vector. + // Also find the slices of values that need to be copied, and store them + // in `valueSlices`. The total number of values that will be copied (which + // we need for allocating the output values tensor) is stored in `numValues`. + function makeSplits(indices, indicesShape, paramsNestedSplits, numParamsDenseValues) { + const valueSlices = []; + let numValues = 0; + const numSplits = indicesShape.length - 1 + paramsNestedSplits.length; + const outSplits = new Array(numSplits).fill(null).map(() => [0]); + validateSplits(paramsNestedSplits, numParamsDenseValues); + // Add `splits` that come from all but the last dimension of the dense + // Tensor `indices`. In particular, for each dimension D, we add a + // splits tensor whose values are: + // range(reduceProd(splits.shape[:D]) + 1) * splits.shape[D+1] + // E.g., if indices.shape=[2, 3, 4] then we will add splits tensors: + // [0, 3, 6] # length=2+1, stride=3 + // [0, 4, 8, 12, 16, 20, 24] # length=2*3+1, stride=4 + let nrows = 1; + for (let dim = 0; dim < indicesShape.length - 1; ++dim) { + nrows *= indicesShape[dim]; + const rowLength = indicesShape[dim + 1]; + for (let i = 1; i < nrows + 1; ++i) { + outSplits[dim].push(i * rowLength); + } + } + // Add `splits` that come from `paramsNestedSplits`. Starting with the + // outermost ragged dimension (i.e., the first `splits` tensor), we work + // our way in, finding the range of values that should be copied. As we + // go, we update the output `splits` for each dimension with the appropriate + // values. In particular, the *lengths* of the slices from `param_splits` + // should be copied to generate corresponding slice lengths in the output + // splits. E.g., if we are copying a ragged row with length 4, then we + // should add a new split point to outSplits that is 4 greater than the + // previous split point in outSplits. + for (let i = 0; i < indices.length; ++i) { + let start = indices[i]; + let limit = indices[i] + 1; + // Copy splits. + for (let dim = 0; dim < paramsNestedSplits.length; ++dim) { + const splits = paramsNestedSplits[dim]; + const outDim = dim + indicesShape.length - 1; + if (outDim >= 0) { + const outSplitsOutDim = outSplits[outDim]; + const delta = outSplitsOutDim[outSplitsOutDim.length - 1] - splits[start]; + for (let j = start; j < limit; ++j) { + outSplits[outDim].push(splits[j + 1] + delta); + } + } + start = splits[start]; + limit = splits[limit]; + } + if (limit !== start) { + valueSlices.push([start, limit]); + numValues += limit - start; + } + } + return { outSplits, valueSlices, numValues }; + } + function getSplits(outSplits) { + const splitsOut = []; + for (let i = 0; i < outSplits.length; ++i) { + const numSplits = outSplits[i].length; + const splits = getArrayFromDType('int32', numSplits); + splitsOut.push(splits); + outSplits[i].forEach((value, j) => splits[j] = value); + } + return splitsOut; + } + function computeFlatOuterDims(orig, numOutDims) { + const outDims = orig.slice(0, numOutDims); + while (outDims.length < numOutDims) { + outDims.push(1); + } + for (let inDim = numOutDims; inDim < orig.length; inDim++) { + outDims[numOutDims - 1] *= orig[inDim]; + } + return outDims; + } + // For each slice in `(start, limit)` in `valueSlices`, append + // `paramsDenseValues[start,...,limit] to `values`. `valueSize` indicates + // the number of scalars contained in each value paramsDenseValues[i]. + function writeValueSlices(paramsDenseValues, paramsDenseValuesShape, valueSlices, valueSize, values, valuesShape) { + const denseM = computeFlatOuterDims(paramsDenseValuesShape, 2)[1]; + const valuesM = computeFlatOuterDims(valuesShape, 2)[1]; + let outPos = 0; + for (const slice of valueSlices) { + for (let i = slice[0]; i < slice[1]; ++i) { + for (let j = 0; j < valueSize; ++j) { + values[outPos * valuesM + j] = paramsDenseValues[i * denseM + j]; + } + ++outPos; + } + } + } + function getValues(paramsDenseValues, paramsDenseValuesShape, paramsDenseValuesDType, valueSlices, numValues) { + const valuesShape = paramsDenseValuesShape.slice(); + valuesShape[0] = numValues; + const valuesOut = getArrayFromDType(paramsDenseValuesDType, sizeFromShape(valuesShape)); + const numElements = paramsDenseValues.length; + const valueSize = numElements === 0 ? 0 : (numElements / paramsDenseValuesShape[0]); + writeValueSlices(paramsDenseValues, paramsDenseValuesShape, valueSlices, valueSize, valuesOut, valuesShape); + return [valuesOut, valuesShape]; + } + function raggedGatherImpl(paramsNestedSplits, paramsNestedSplitsShapes, paramsDenseValues, paramsDenseValuesShape, paramsDenseValuesDType, indices, indicesShape, outputRaggedRank) { + if (paramsNestedSplits.length === 0) { + throw new Error('paramsNestedSplits must be non empty'); + } + if (paramsNestedSplitsShapes[0].length === 0) { + throw new Error('Split tensors must not be scalars'); + } + const numParams = paramsNestedSplitsShapes[0][0] - 1; + validateIndices(indices, indicesShape, numParams); + if (paramsDenseValuesShape.length === 0) { + throw new Error('params.rank must be nonzero'); + } + const numParamsDenseValues = paramsDenseValuesShape[0]; + // Calculate the `splits`, and store the value slices that we need to + // copy in `valueSlices`. + const { outSplits, valueSlices, numValues } = makeSplits(indices, indicesShape, paramsNestedSplits, numParamsDenseValues); + // Write the output tensors. + const outputNestedSplits = getSplits(outSplits); + const outputDenseValues = getValues(paramsDenseValues, paramsDenseValuesShape, paramsDenseValuesDType, valueSlices, numValues); + return [outputNestedSplits, outputDenseValues[0], outputDenseValues[1]]; + } + + /** + * @license + * Copyright 2022 Google LLC. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const INT32_MAX = 2147483647; + function raggedRangeImpl(starts, startsShape, startsDType, limits, limitsShape, deltas, deltasShape) { + // Check input tensor shapes. + if (startsShape.length > 1) { + throw new Error('starts must be a scalar or vector'); + } + if (limitsShape.length > 1) { + throw new Error('limits must be a scalar or vector'); + } + if (deltasShape.length > 1) { + throw new Error('deltas must be a scalar or vector'); + } + // Determine which tensors we need to broadcast. + const broadcastStarts = startsShape.length === 0; + const broadcastLimits = limitsShape.length === 0; + const broadcastDeltas = deltasShape.length === 0; + // nRows (number of output rows) is the size of the non-broadcast inputs, + // or 1 if all inputs are scalars. + const inSizes = []; + if (!broadcastStarts) { + inSizes.push(startsShape[0]); + } + if (!broadcastLimits) { + inSizes.push(limitsShape[0]); + } + if (!broadcastDeltas) { + inSizes.push(deltasShape[0]); + } + for (let i = 1; i < inSizes.length; ++i) { + if (inSizes[i] !== inSizes[i - 1]) { + throw new Error('starts, limits, and deltas must have the same shape'); + } + } + const nRows = inSizes.length === 0 ? 1 : inSizes[0]; + // Construct the rtNestedSplits tensor. + const rtNestedSplits = getArrayFromDType('int32', nRows + 1); + rtNestedSplits[0] = 0; + for (let row = 0; row < nRows; ++row) { + const start = broadcastStarts ? starts[0] : starts[row]; + const limit = broadcastLimits ? limits[0] : limits[row]; + const delta = broadcastDeltas ? deltas[0] : deltas[row]; + if (delta === 0) { + throw new Error('Requires delta != 0'); + } + let size; // The number of elements in the specified range. + if (((delta > 0) && (limit < start)) || ((delta < 0) && (limit > start))) { + size = 0; + } + else { + size = Math.ceil(Math.abs((limit - start) / delta)); + if (size > INT32_MAX) { + throw new Error(`Requires ((limit - start) / delta) <= ${INT32_MAX}`); + } + } + rtNestedSplits[row + 1] = rtNestedSplits[row] + size; + } + const nVals = rtNestedSplits[nRows]; + // Construct the rtDenseValues tensor. + const rtDenseValues = getArrayFromDType(startsDType, nVals); + let valueIndex = 0; + for (let row = 0; row < nRows; ++row) { + const rowSize = rtNestedSplits[row + 1] - rtNestedSplits[row]; + let value = broadcastStarts ? starts[0] : starts[row]; + const delta = broadcastDeltas ? deltas[0] : deltas[row]; + for (let i = 0; i < rowSize; ++i) { + rtDenseValues[valueIndex++] = value; + value += delta; + } + } + return [rtNestedSplits, rtDenseValues]; + } + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var RowPartitionType = RowPartitionType$1; + // Based on + // https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc + class RaggedTensorToTensorOp { + constructor(shape, shapeShape, values, valuesShape, valuesDType, defaultValue, defaultValueShape, rowPartitionValues, rowPartitionValuesShapes, rowPartitionTypeStrings) { + this.shape = shape; + this.shapeShape = shapeShape; + this.values = values; + this.valuesShape = valuesShape; + this.valuesDType = valuesDType; + this.defaultValue = defaultValue; + this.defaultValueShape = defaultValueShape; + this.rowPartitionValues = rowPartitionValues; + this.rowPartitionValuesShapes = rowPartitionValuesShapes; + this.rowPartitionTypes = + getRowPartitionTypesHelper(rowPartitionTypeStrings); + this.raggedRank = getRaggedRank(this.rowPartitionTypes); + } + getRowPartitionTypeByDimension(dimension) { + if (this.rowPartitionTypes[0] === RowPartitionType.FIRST_DIM_SIZE) { + return this.rowPartitionTypes[dimension + 1]; + } + else { + return this.rowPartitionTypes[dimension]; + } + } + // Returns the relationship between dimension and dimension + 1. + getRowPartitionTensor(dimension) { + if (this.rowPartitionTypes[0] === RowPartitionType.FIRST_DIM_SIZE) { + return this.rowPartitionValues[dimension + 1]; + } + else { + return this.rowPartitionValues[dimension]; + } + } + getMaxWidth(dimension) { + const rowPartitionTensor = this.getRowPartitionTensor(dimension - 1); + switch (this.getRowPartitionTypeByDimension(dimension - 1)) { + case RowPartitionType.VALUE_ROWIDS: + return RaggedTensorToTensorOp.getMaxWidthValueRowID(rowPartitionTensor); + case RowPartitionType.ROW_SPLITS: + return RaggedTensorToTensorOp.getMaxWidthRowSplit(rowPartitionTensor); + default: + throw new Error(`Cannot handle partition type ${RowPartitionType[this.getRowPartitionTypeByDimension(dimension - 1)]}`); + } + } + static getMaxWidthRowSplit(rowSplit) { + const tensorLength = rowSplit.length; + if (tensorLength === 0 || tensorLength === 1) { + return 0; + } + let maxWidth = 0; + for (let i = 0; i < tensorLength - 1; ++i) { + const currentWidth = rowSplit[i + 1] - rowSplit[i]; + if (currentWidth > maxWidth) { + maxWidth = currentWidth; + } + } + return maxWidth; + } + static getMaxWidthValueRowID(valueRowIds) { + const indexLength = valueRowIds.length; + if (indexLength === 0) { + return 0; + } + let firstEqualIndex = 0; + let firstEqualIndexValue = valueRowIds[0]; + let maxWidth = 0; + for (let i = 1; i < indexLength; ++i) { + const value = valueRowIds[i]; + if (value !== firstEqualIndexValue) { + firstEqualIndexValue = value; + maxWidth = Math.max(i - firstEqualIndex, maxWidth); + firstEqualIndex = i; + } + } + return Math.max(indexLength - firstEqualIndex, maxWidth); + } + tensorShapeFromTensor(t, tShape, isPartial = true) { + if (tShape.length === 0) { + if (t[0] === -1) { + return []; + } + throw new Error(`The only valid scalar shape tensor is the fully unknown shape specified as -1.`); + } + // MakePartialShape/MakeShapeHelper. + return makeShape(t, isPartial); + } + calculateOutputSize(firstDim) { + const valueShape = this.valuesShape; + const defaultValueShape = this.defaultValueShape; + validateDefaultValueShape(defaultValueShape, valueShape); + const shape = this.tensorShapeFromTensor(this.shape, this.shapeShape); + const outputShape = combineRaggedTensorToTensorShapes(this.raggedRank, shape, valueShape); + const result = outputShape; + if (result[0] < 0) { + result[0] = firstDim; + } + for (let i = 1; i <= this.raggedRank; ++i) { + if (result[i] < 0) { + result[i] = this.getMaxWidth(i); + } + } + return result; + } + /** + * The outputIndex represents the index in the output tensor + * where the first element of a particular dimension would be written. + * If it is -1, it indicates that the index is out of scope. + * Example, given firstDimension = 10, firstDimensionOutput = 6, + * and outputIndexMultiplier = 100: + * result = [0 100 200 300 400 500 -1 -1 -1 -1] + * If firstDimensionOutput = 11 instead, then: + * result = [0 100 200 300 400 500 600 700 800 900] + */ + calculateFirstParentOutputIndex(firstDimension, outputIndexMultiplier, firstDimensionOutput) { + const minDimension = Math.min(firstDimension, firstDimensionOutput); + const result = []; + let currentOutputIndex = 0; + for (let i = 0; i < minDimension; ++i, currentOutputIndex += outputIndexMultiplier) { + result.push(currentOutputIndex); + } + for (let i = minDimension; i < firstDimension; ++i) { + result.push(-1); + } + assert$1(result.length === firstDimension, () => 'Final length of result must be equal to firstDimension.'); + return result; + } + calculateOutputIndexRowSplit(rowSplit, parentOutputIndex, outputIndexMultiplier, outputSize) { + const rowSplitSize = rowSplit.length; + const result = []; + for (let i = 0; i < rowSplitSize - 1; ++i) { + const rowLength = rowSplit[i + 1] - rowSplit[i]; + let realLength = Math.min(outputSize, rowLength); + let parentOutputIndexCurrent = parentOutputIndex[i]; + if (parentOutputIndexCurrent === -1) { + realLength = 0; + } + for (let j = 0; j < realLength; ++j) { + result.push(parentOutputIndexCurrent); + parentOutputIndexCurrent += outputIndexMultiplier; + } + for (let j = 0; j < rowLength - realLength; ++j) { + result.push(-1); + } + } + if (rowSplitSize > 0 && result.length !== rowSplit[rowSplitSize - 1]) { + throw new Error('Invalid row split size.'); + } + return result; + } + // Calculate the output index of the first element of a list. + // The parentOutputIndex is the same computation for the previous list. + // -1 indicates an element or list that is out of range. + // The outputIndexMultiplier is the number of output indices one moves + // forward for each column. + // E.g., given: + // valueRowIds:[0 1 2 2 2 3 5 5 6] + // parentOutputIndex:[1000 1100 2000 2100 -1 3000 4000] + // outputIndexMultiplier: 10 + // outputSize: 2 + // You get: + // result = [1000 1100 2000 2010 -1 2100 -1 -1 3000] + // result[0] = parentOutputIndex[valueRowIds[0]] + // result[1] = parentOutputIndex[valueRowIds[1]] + // result[2] = parentOutputIndex[valueRowIds[2]] + // result[3] = parentOutputIndex[valueRowIds[2] + 10] + // result[4] = -1 because it is the third element the size is 2. + // result[5] = parentOutputIndex[valueRowIds[3]] + // result[6] = -1 because parentOutputIndex[valueRowIds[6]] == -1 + // result[7] = -1 because parentOutputIndex[valueRowIds[6]] == -1 + // result[8] = parentOutputIndex[valueRowIds[7]] + calculateOutputIndexValueRowID(valueRowIds, parentOutputIndex, outputIndexMultiplier, outputSize) { + const indexSize = valueRowIds.length; + const result = []; + if (indexSize === 0) { + return []; + } + let currentOutputColumn = 0; + let currentValueRowId = valueRowIds[0]; + if (currentValueRowId >= parentOutputIndex.length) { + throw new Error(`Got currentValueRowId=${currentValueRowId}, which is not less than ${parentOutputIndex.length}`); + } + let currentOutputIndex = parentOutputIndex[currentValueRowId]; + result.push(currentOutputIndex); + for (let i = 1; i < indexSize; ++i) { + const nextValueRowId = valueRowIds[i]; + if (nextValueRowId === currentValueRowId) { + if (currentOutputIndex >= 0) { + ++currentOutputColumn; + if (currentOutputColumn < outputSize) { + currentOutputIndex += outputIndexMultiplier; + } + else { + currentOutputIndex = -1; + } + } + } + else { + currentOutputColumn = 0; + currentValueRowId = nextValueRowId; + if (nextValueRowId >= parentOutputIndex.length) { + throw new Error(`Got nextValueRowId=${nextValueRowId} which is not less than ${parentOutputIndex.length}`); + } + currentOutputIndex = parentOutputIndex[nextValueRowId]; + } + result.push(currentOutputIndex); + } + if (result.length !== valueRowIds.length) { + throw new Error('Invalid row ids.'); + } + return result; + } + calculateOutputIndex(dimension, parentOutputIndex, outputIndexMultiplier, outputSize) { + const rowPartitionTensor = this.getRowPartitionTensor(dimension); + const partitionType = this.getRowPartitionTypeByDimension(dimension); + switch (partitionType) { + case RowPartitionType.VALUE_ROWIDS: + return this.calculateOutputIndexValueRowID(rowPartitionTensor, parentOutputIndex, outputIndexMultiplier, outputSize); + case RowPartitionType.ROW_SPLITS: + if (rowPartitionTensor.length - 1 > parentOutputIndex.length) { + throw new Error(`Row partition size is greater than output size: ${rowPartitionTensor.length - 1} > ${parentOutputIndex.length}`); + } + return this.calculateOutputIndexRowSplit(rowPartitionTensor, parentOutputIndex, outputIndexMultiplier, outputSize); + default: + throw new Error(`Unsupported partition type: ${RowPartitionType[partitionType]}`); + } + } + getFirstDimensionSize() { + const firstPartitionTensor = this.rowPartitionValues[0]; + if (this.rowPartitionTypes.length === 0) { + throw new Error('No row_partition_types given.'); + } + const firstPartitionType = this.rowPartitionTypes[0]; + switch (firstPartitionType) { + case RowPartitionType.FIRST_DIM_SIZE: + return firstPartitionTensor[0]; + case RowPartitionType.VALUE_ROWIDS: + throw new Error('Cannot handle VALUE_ROWIDS in first dimension.'); + case RowPartitionType.ROW_SPLITS: + return this.rowPartitionValuesShapes[0][0] - 1; + default: + throw new Error(`Cannot handle type ${RowPartitionType[firstPartitionType]}`); + } + } + compute() { + const firstPartitionTensor = this.rowPartitionValues[0]; + if (firstPartitionTensor.length <= 0) { + throw new Error('Invalid first partition input. ' + + 'Tensor requires at least one element.'); + } + const firstDimension = this.getFirstDimensionSize(); + const outputSize = this.calculateOutputSize(firstDimension); + const multiplier = new Array(this.raggedRank + 1); + multiplier[multiplier.length - 1] = 1; + for (let i = multiplier.length - 2; i >= 0; --i) { + multiplier[i] = multiplier[i + 1] * outputSize[i + 1]; + } + // Full size of the tensor. + const outputShape = makeShape(outputSize, false); + const outputTensor = getArrayFromDType(this.valuesDType, sizeFromShape(outputShape)); + const fullSize = multiplier[0] * outputSize[0]; + if (fullSize > 0) { + let outputIndex = this.calculateFirstParentOutputIndex(firstDimension, multiplier[0], outputSize[0]); + for (let i = 1; i <= this.raggedRank; ++i) { + const newOutputIndex = this.calculateOutputIndex(i - 1, outputIndex, multiplier[i], outputSize[i]); + outputIndex = newOutputIndex; + } + this.setOutput(this.raggedRank, outputIndex, outputTensor, outputShape); + } + return [outputShape, outputTensor]; + } + setOutput(raggedRank, outputIndex, outputTensor, outputShape) { + if (outputTensor.length === 0) { + return; + } + const valuesBase = this.values; + const outputBase = outputTensor; + let elementShape = outputShape.slice(); + elementShape = elementShape.slice(raggedRank + 1); + const valueElementSize = sizeFromShape(elementShape); + const outputIndexSize = outputIndex.length; + // Broadcast the default value to value_element_size. (We can skip this + // if defaultValueTensor.size == 1, since we use fill when that's true.) + let defaultValue = this.defaultValue; + if (defaultValue.length !== valueElementSize && defaultValue.length !== 1) { + const srcShape = this.defaultValueShape; + tidy(() => { + const defaultValueTensor = reshape$3(defaultValue, srcShape); + const bCastDefault = broadcastTo(defaultValueTensor, elementShape); + defaultValue = bCastDefault.dataSync(); + }); + } + // Loop through the outputIndex array, finding contiguous regions that + // should be copied. Once we find the end of a contiguous region, copy it + // and add any necessary padding (with defaultValue). + let srcStart = 0; // Start of contiguous region (in values) + let dstStart = 0; // Destination for contiguous region (in output) + let dstEnd = 0; // Destination for contiguous region (in output) + for (let srcI = 0; srcI <= outputIndexSize; ++srcI) { + // dstI is the destination where the value at srcI should be copied. + let dstI = srcI < outputIndexSize ? outputIndex[srcI] : -1; + // If we're still in a contiguous region, then update dstEnd go to the + // next srcI. + if (dstI === dstEnd) { + ++dstEnd; + continue; + } + // We found the end of contiguous region. This can be because we found + // a gap (dstI > dstEnd), or a source value that shouldn't be copied + // because it's out-of-bounds (dstI == -1), or the end of the tensor + // (dstI === -1). + if (dstStart < dstEnd) { + // Copy the contiguous region. + const src = valuesBase.subarray(srcStart * valueElementSize); + const dst = outputBase.subarray(dstStart * valueElementSize); + const nVals = (dstEnd - dstStart) * valueElementSize; + copyArray(dst, src, nVals); + } + // Add any necessary padding (w/ defaultValue). + if (srcI >= outputIndexSize) { + // We reached the end of values: pad to the end of output. + const outputSize = outputTensor.length; + dstI = Math.floor(outputSize / valueElementSize); + } + if (dstI > dstEnd) { + if (this.defaultValue.length === 1) { + outputBase + .subarray(dstEnd * valueElementSize, dstI * valueElementSize) + .fill(this.defaultValue[0]); + dstEnd = dstI; + } + else { + while (dstI > dstEnd) { + const dst = outputBase.slice(dstEnd * valueElementSize); + copyArray(dst, defaultValue, valueElementSize); + ++dstEnd; + } + } + } + // Update indices. + if (dstI < 0) { + // srcI should be skipped -- leave it out of the contiguous region. + srcStart = srcI + 1; + dstStart = dstEnd; + } + else { + // srcI should be copied -- include it in the contiguous region. + srcStart = srcI; + dstStart = dstEnd; + dstEnd = dstStart + 1; + } + } + } + } + function copyArray(dst, src, size) { + for (let i = 0; i < size; i++) { + dst[i] = src[i]; + } + } + function makeShape(shape, isPartial) { + const out = []; + for (let dim of shape) { + if (dim < 0) { + if (!isPartial) { + throw new Error(`Dimension ${dim} must be >= 0`); + } + if (dim < -1) { + throw new Error(`Dimension ${dim} must be >= -1`); + } + dim = -1; + } + out.push(dim); + } + return out; + } + function raggedTensorToTensorImpl(shape, shapesShape, values, valuesShape, valuesDType, defaultValue, defaultValueShape, rowPartitionValues, rowPartitionValuesShapes, rowPartitionTypes) { + return new RaggedTensorToTensorOp(shape, shapesShape, values, valuesShape, valuesDType, defaultValue, defaultValueShape, rowPartitionValues, rowPartitionValuesShapes, rowPartitionTypes) + .compute(); + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function rangeImpl(start, stop, step, dtype) { + const sameStartStop = start === stop; + const increasingRangeNegativeStep = start < stop && step < 0; + const decreasingRangePositiveStep = stop < start && step > 1; + if (sameStartStop || increasingRangeNegativeStep || + decreasingRangePositiveStep) { + return makeZerosTypedArray(0, dtype); + } + const numElements = Math.abs(Math.ceil((stop - start) / step)); + const values = makeZerosTypedArray(numElements, dtype); + if (stop < start && step === 1) { + // Auto adjust the step's sign if it hasn't been set + // (or was set to 1) + step = -1; + } + values[0] = start; + for (let i = 1; i < values.length; i++) { + values[i] = values[i - 1] + step; + } + return values; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const rsqrtImpl = createSimpleUnaryImpl((xi) => 1 / Math.sqrt(xi)); + const rsqrt$1 = unaryKernelFuncFromImpl(Rsqrt, rsqrtImpl); + const rsqrtConfig$1 = { + kernelName: Rsqrt, + backendName: 'cpu', + kernelFunc: rsqrt$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function scatterImpl(indices, updates, shape, outputSize, sliceSize, numUpdates, sliceRank, strides, defaultValue, sumDupeIndices) { + const flattenShape = [outputSize / sliceSize, sliceSize]; + const indicesData = indices.values; + const updatesData = updates.values; + if (outputSize === 0) { + return buffer(shape, updates.dtype); + } + const outBuf = (defaultValue instanceof TensorBuffer) ? + defaultValue : + buffer(flattenShape, updates.dtype); + if (typeof defaultValue === 'string') { + outBuf.values.fill(defaultValue); + } + else if (typeof defaultValue === 'number') { + outBuf.values.fill(defaultValue); + } + else if (typeof defaultValue === 'boolean') { + outBuf.values.fill(+defaultValue); + } + for (let i = 0; i < numUpdates; i++) { + const index = []; + let flattenIndex = 0; + for (let j = 0; j < sliceRank; j++) { + const dim = indicesData[i * sliceRank + j]; + index.push(dim); + flattenIndex += dim * strides[j]; + } + if (flattenIndex < 0 || flattenIndex >= outputSize / sliceSize) { + throw new Error(`Invalid indices: ${index} does not index into ${shape}`); + } + for (let k = 0; k < sliceSize; k++) { + if (sumDupeIndices) { + outBuf.values[flattenIndex * sliceSize + k] += + updatesData[i * sliceSize + k]; + } + else { + outBuf.values[flattenIndex * sliceSize + k] = updates.rank === 0 ? + updatesData[0] : + updatesData[i * sliceSize + k]; + } + } + } + return outBuf; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const sigmoidImpl = createSimpleUnaryImpl((xi) => 1 / (1 + Math.exp(-xi))); + const sigmoid$1 = unaryKernelFunc$1(Sigmoid$1, (xi) => 1 / (1 + Math.exp(-xi))); + const sigmoidConfig$1 = { + kernelName: Sigmoid$1, + backendName: 'cpu', + kernelFunc: sigmoid$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function sliceImpl(vals, begin, size, shape, dtype) { + const isContinous = isSliceContinous(shape, begin, size); + const length = sizeFromShape(size); + const xStrides = computeStrides(shape); + if (isContinous) { + const flatOffset = computeFlatOffset(begin, xStrides); + if (dtype === 'string') { + return vals.slice(flatOffset, flatOffset + length); + } + return vals.subarray(flatOffset, flatOffset + length); + } + const decodedData = dtype === 'string' ? + fromUint8ToStringArray(vals) : + vals; + const inBuf = buffer(shape, dtype, decodedData); + const outBuf = buffer(size, dtype); + for (let i = 0; i < outBuf.size; ++i) { + const outLoc = outBuf.indexToLoc(i); + const inLoc = outLoc.map((idx, j) => idx + begin[j]); + outBuf.set(inBuf.get(...inLoc), ...outLoc); + } + if (dtype === 'string') { + return fromStringArrayToUint8(outBuf.values); + } + return outBuf.values; + } + function slice$1(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { begin, size } = attrs; + assertNotComplex$1(x, 'slice'); + const [$begin, $size] = parseSliceParams(x, begin, size); + assertParamsValid(x, $begin, $size); + const vals = backend.data.get(x.dataId).values; + const outVals = sliceImpl(vals, $begin, $size, x.shape, x.dtype); + return backend.makeTensorInfo($size, x.dtype, outVals); + } + const sliceConfig$1 = { + kernelName: Slice, + backendName: 'cpu', + kernelFunc: slice$1 + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function sparseFillEmptyRowsImpl(indices, indicesShape, indicesDType, values, valuesDType, denseShape, defaultValue) { + const indicesCount = indicesShape[0]; + const denseRows = denseShape[0]; + const emptyRowIndicator = new Array(denseRows); + const reverseIndexMap = new Array(indicesCount); + const rank = indicesShape[1]; + if (denseRows === 0) { + if (indicesCount !== 0) { + throw new Error(getSparseFillEmptyRowsIndicesDenseShapeMismatch(indicesCount)); + } + const outputIndices = getArrayFromDType(indicesDType, 0); + const outputValues = getArrayFromDType(valuesDType, 0); + return [ + outputIndices, [0, rank], outputValues, emptyRowIndicator, reverseIndexMap + ]; + } + let rowsAreOrdered = true; + let lastIndicesRow = 0; + const csrOffset = new Array(denseRows).fill(0); + for (let i = 0; i < indicesCount; ++i) { + // indices is a 2d tensor with shape of [N, rank] + const row = indices[i * rank]; + if (row < 0) { + throw new Error(getSparseFillEmptyRowsNegativeIndexErrorMessage(i, row)); + } + if (row >= denseRows) { + throw new Error(getSparseFillEmptyRowsOutOfRangeIndexErrorMessage(i, row, denseRows)); + } + ++csrOffset[row]; + rowsAreOrdered = rowsAreOrdered && (row >= lastIndicesRow); + lastIndicesRow = row; + } + let allRowsFull = true; + for (let row = 0; row < denseRows; ++row) { + // csrOffset here describes the number of elements in this dense row + const rowEmpty = (csrOffset[row] === 0); + emptyRowIndicator[row] = rowEmpty; + allRowsFull = allRowsFull && !rowEmpty; + // In filled version, each row has at least one element. + csrOffset[row] = Math.max(csrOffset[row], 1); + // Update csrOffset to represent the number of elements up to and + // including denseRows + 1: + // csrOffset[0] == #{elements of row 0} + // csrOffset[1] == #{elements of row 1} + #{elements of row 0} + // .. + // csrOffset[i] == starting index for elements in row i + 1. + if (row > 0) { + csrOffset[row] += csrOffset[row - 1]; + } + } + if (allRowsFull && rowsAreOrdered) { + const outputIndices = indices; + const outputValues = values; + for (let i = 0; i < indicesCount; ++i) { + reverseIndexMap[i] = i; + } + return [ + outputIndices, [indicesCount, rank], outputValues, emptyRowIndicator, + reverseIndexMap + ]; + } + else { + const fullIndicesCount = csrOffset[denseRows - 1]; + const outputIndices = getArrayFromDType(indicesDType, fullIndicesCount * rank); + const outputValues = getArrayFromDType(valuesDType, fullIndicesCount); + const filledCount = new Array(denseRows).fill(0); + // Fill in values for rows that are not missing + for (let i = 0; i < indicesCount; ++i) { + // indices is a 2d tensor with shape of [N, rank] + const row = indices[i * rank]; + const offset = filledCount[row]; + const outputI = ((row === 0) ? 0 : csrOffset[row - 1]) + offset; + filledCount[row]++; // Increment the filled count for this row. + for (let j = 0; j < rank; ++j) { + // indices and outputIndices are 2d tensors with shape of [N, rank] + outputIndices[outputI * rank + j] = indices[i * rank + j]; + } + outputValues[outputI] = values[i]; + // We'll need this reverse index map to backprop correctly. + reverseIndexMap[i] = outputI; + } + // Fill in values for rows that are missing + for (let row = 0; row < denseRows; ++row) { + const rowCount = filledCount[row]; + if (rowCount === 0) { // We haven't filled this row + const startingIndex = (row === 0) ? 0 : csrOffset[row - 1]; + // Remaining index values were set to zero already. + // Just need to set the row index in the right location. + // outputIndices is a 2d tensor with shape of [N, rank] + outputIndices[startingIndex * rank + 0] = row; + for (let col = 1; col < rank; ++col) { + outputIndices[startingIndex * rank + col] = 0; + } + outputValues[startingIndex] = defaultValue; + } + } + return [ + outputIndices, [fullIndicesCount, rank], outputValues, emptyRowIndicator, + reverseIndexMap + ]; + } + } + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function sparseReshapeImpl(inputIndices, inputIndicesShape, inputDType, inputShape, targetShape) { + const denseSize = sizeFromShape(inputShape); + const nnz = inputIndicesShape[0]; + const outputRank = targetShape.length; + // Compute the output shape. Determine product of specified dimensions, and + // find the index of the unspecified one. + const outputShape = []; + let product = 1; + let unknownIndex = -1; + for (let d = 0; d < outputRank; ++d) { + const size = targetShape[d]; + if (size === -1) { + if (unknownIndex !== -1) { + throw new Error(getSparseReshapeMultipleNegativeOneOutputDimErrorMessage(unknownIndex, d)); + } + unknownIndex = d; + outputShape.push(1); + } + else { + if (size < 0) { + throw new Error(getSparseReshapeNegativeOutputDimErrorMessage(d, size)); + } + product *= size; + outputShape.push(size); + } + } + if (unknownIndex !== -1) { + if (product <= 0) { + throw new Error(getSparseReshapeEmptyTensorZeroOutputDimErrorMessage()); + } + const missing = Math.trunc(denseSize / product); + if (product * missing !== denseSize) { + throw new Error(getSparseReshapeInputOutputMultipleErrorMessage(inputShape, outputShape)); + } + outputShape[unknownIndex] = missing; + } + const outputSize = sizeFromShape(outputShape); + if (outputSize !== denseSize) { + throw new Error(getSparseReshapeInputOutputMismatchErrorMessage(inputShape, outputShape)); + } + const inputRank = inputShape.length; + const inputStrides = []; + if (inputRank > 0) { + inputStrides[inputRank - 1] = 1; + for (let d = inputRank - 2; d >= 0; --d) { + inputStrides[d] = inputStrides[d + 1] * inputShape[d + 1]; + } + } + const outputStrides = []; + if (outputRank > 0) { + outputStrides[outputRank - 1] = 1; + for (let d = outputRank - 2; d >= 0; --d) { + outputStrides[d] = outputStrides[d + 1] * outputShape[d + 1]; + } + } + const newIndices = getArrayFromDType(inputDType, nnz * outputRank); + for (let i = 0; i < nnz; ++i) { + let id = 0; + for (let j = 0; j < inputRank; ++j) { + // inputIndices is a 2d tensor with shape of [nnz, inputRank] + id += inputIndices[i * inputRank + j] * inputStrides[j]; + } + for (let j = 0; j < outputRank; ++j) { + // newIndices is a 2d tensor with shape of [nnz, outputRank] + newIndices[i * outputRank + j] = Math.trunc(id / outputStrides[j]); + id %= outputStrides[j]; + } + } + return [newIndices, [nnz, outputRank], outputShape]; + } + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function sparseSegmentReductionImpl(input, inputShape, inputDType, indices, segmentIds, isMean = false, defaultValue = 0) { + const numIndices = indices.length; + // Flatten the array to two dimensions + const inputFlat = [inputShape[0], input.length / inputShape[0]]; + const numCol = inputFlat[1]; + // Note that the current implementation assumes that segmentIds values are + // sorted. + const lastSegmentIdPlusOne = numIndices > 0 ? segmentIds[numIndices - 1] + 1 : 0; + const outputRows = lastSegmentIdPlusOne; + if (outputRows < 0) { + throw new Error(getSparseSegmentReductionNegativeSegmentIdsErrorMessage()); + } + const outputShape = inputShape.slice(); + outputShape[0] = outputRows; + const outputLength = outputShape.reduce((product, value) => product * value, 1); + // Output array is initialized with the value 0 by default. + const output = getArrayFromDType(inputDType, outputLength); + // Note that we do not initialize the output buffer with a default value, so + // we need to explicitly set missing indices to the default value. + if (numIndices === 0) { + if (outputRows > 0) { + output.fill(defaultValue); + } + return [output, outputShape]; + } + if (outputRows <= 0) { + throw new Error(getSparseSegmentReductionNegativeSegmentIdsErrorMessage()); + } + let start = 0, end = 1; + // Index from which the output is not initialized. + let uninitializedIndex = 0; + let outIndex = segmentIds[start]; + while (true) { + // We initialize nextIndex to 0 to avoid may be uninitialized warning + let nextIndex = 0; + if (end < numIndices) { + nextIndex = segmentIds[end]; + if (outIndex === nextIndex) { + ++end; + continue; + } + // We have a new segment here. Verify that the segment ids are growing. + if (outIndex >= nextIndex) { + throw new Error(getSparseSegmentReductionNonIncreasingSegmentIdsErrorMessage()); + } + } + if (outIndex < 0 || outIndex >= outputRows) { + throw new Error(getSparseSegmentReductionSegmentIdOutOfRangeErrorMessage(outIndex, outputRows)); + } + // If there is a gap between two indices, we need to set that gap to the + // default value. + if (outIndex > uninitializedIndex) { + output.fill(defaultValue, uninitializedIndex * numCol, outIndex * numCol); + } + for (let i = start; i < end; ++i) { + const index = indices[i]; + if (index < 0 || index >= inputFlat[0]) { + throw new Error(getSparseSegmentReductionIndicesOutOfRangeErrorMessage(i, indices[i], inputFlat[0])); + } + for (let j = 0; j < numCol; j++) { + output[outIndex * numCol + j] += input[index * numCol + j]; + } + } + if (isMean) { + for (let j = 0; j < numCol; j++) { + output[outIndex * numCol + j] /= end - start; + } + } + start = end; + ++end; + uninitializedIndex = outIndex + 1; + outIndex = nextIndex; + if (end > numIndices) { + break; + } + } + // Fill the gap at the end with the default value. + if (uninitializedIndex < outputRows) { + output.fill(defaultValue, uninitializedIndex * numCol, outputRows * numCol); + } + return [output, outputShape]; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const sqrtImpl = createSimpleUnaryImpl((xi) => Math.sqrt(xi)); + const sqrt$1 = unaryKernelFunc$1(Sqrt, (xi) => Math.sqrt(xi)); + const sqrtConfig$1 = { + kernelName: Sqrt, + backendName: 'cpu', + kernelFunc: sqrt$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const squaredDifferenceImpl = createSimpleBinaryKernelImpl(((a, b) => { + const diff = a - b; + return diff * diff; + })); + const squaredDifference$1 = binaryKernelFunc$1(SquaredDifference, squaredDifferenceImpl); + const squaredDifferenceConfig$1 = { + kernelName: SquaredDifference, + backendName: 'cpu', + kernelFunc: squaredDifference$1 + }; + + /** + * @license + * Copyright 2023 Google LLC. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const staticRegexReplaceImpl = createSimpleUnaryImpl((x, attrs) => { + const { pattern, replaceGlobal, rewrite } = attrs; + // TODO(mattSoulanille): Don't create a regex each time. + return x.replace(new RegExp(pattern, replaceGlobal ? 'g' : ''), rewrite); + }); + const staticRegexReplace$1 = unaryKernelFuncFromImpl(StaticRegexReplace, staticRegexReplaceImpl); + const staticRegexReplaceConfig$1 = { + kernelName: StaticRegexReplace, + backendName: 'cpu', + kernelFunc: staticRegexReplace$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function stridedSliceImpl(outShape, xBuf, strides, begin) { + const outBuf = buffer(outShape, xBuf.dtype); + for (let i = 0; i < outBuf.size; i++) { + const loc = outBuf.indexToLoc(i); + const newLoc = new Array(loc.length); + for (let j = 0; j < newLoc.length; j++) { + newLoc[j] = loc[j] * strides[j] + begin[j]; + } + outBuf.set(xBuf.get(...newLoc), ...loc); + } + return outBuf; + } + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * The StringNGramsOp class creates ngrams from ragged string data. + * The constructor contains all attributes related to the operation such as + * padding widths and strings, and the compute function can be used to + * compute the ngrams for different ragged tensor inputs. + */ + class StringNGramsOp { + constructor(separator, nGramWidths, leftPad, rightPad, padWidth, preserveShortSequences) { + this.separator = encodeString(separator); + this.nGramWidths = nGramWidths; + this.leftPad = encodeString(leftPad); + this.rightPad = encodeString(rightPad); + this.padWidth = padWidth; + this.preserveShort = preserveShortSequences; + } + getPadWidth(nGramWidth) { + // Ngrams can be padded with either a fixed pad width or a dynamic pad + // width depending on the 'padWidth' arg, but in no case should the padding + // ever be wider than 'nGramWidth' - 1. + return Math.min(this.padWidth < 0 ? nGramWidth - 1 : this.padWidth, nGramWidth - 1); + } + getNumNGrams(length, nGramWidth) { + const padWidth = this.getPadWidth(nGramWidth); + return Math.max(0, ((length + 2 * padWidth) - nGramWidth) + 1); + } + createNGrams(data, splitIndex, output, outputStartIndex, numNGrams, nGramWidth) { + for (let nGramIndex = 0; nGramIndex < numNGrams; ++nGramIndex) { + const padWidth = this.getPadWidth(nGramWidth); + const leftPadding = Math.max(0, padWidth - nGramIndex); + const rightPadding = Math.max(0, padWidth - (numNGrams - (nGramIndex + 1))); + const numTokens = nGramWidth - (leftPadding + rightPadding); + const dataStartIndex = splitIndex + (leftPadding > 0 ? 0 : nGramIndex - padWidth); + // Calculate the total expected size of the nGram so we can reserve the + // correct amount of space in the string. + let nGramSize = 0; + // Size of the left padding. + nGramSize += leftPadding * this.leftPad.length; + // Size of the tokens. + for (let n = 0; n < numTokens; ++n) { + nGramSize += data[dataStartIndex + n].length; + } + // Size of the right padding. + nGramSize += rightPadding * this.rightPad.length; + // Size of the separators. + const numSeparators = leftPadding + rightPadding + numTokens - 1; + nGramSize += numSeparators * this.separator.length; + // Build the nGram. + output[outputStartIndex + nGramIndex] = new Uint8Array(nGramSize); + const nGram = output[outputStartIndex + nGramIndex]; + let nextNGramIndex = 0; + const appendToNGram = (str) => str.forEach((value) => nGram[nextNGramIndex++] = value); + for (let n = 0; n < leftPadding; ++n) { + appendToNGram(this.leftPad); + appendToNGram(this.separator); + } + // Only output first numTokens - 1 pairs of data and separator + for (let n = 0; n < numTokens - 1; ++n) { + appendToNGram(data[dataStartIndex + n]); + appendToNGram(this.separator); + } + // Handle case when there are no tokens or no right padding as these + // can result in consecutive separators. + if (numTokens > 0) { + // If we have tokens, then output last and then pair each separator + // with the right padding that follows, to ensure nGram ends either with + // the token or with the right pad. + appendToNGram(data[dataStartIndex + numTokens - 1]); + for (let n = 0; n < rightPadding; ++n) { + appendToNGram(this.separator); + appendToNGram(this.rightPad); + } + } + else { + // If we don't have tokens, then the last item inserted into the nGram + // has been the separator from the left padding loop above. Hence, + // output right pad and separator and make sure to finish with a + // padding, not a separator. + for (let n = 0; n < rightPadding - 1; ++n) { + appendToNGram(this.rightPad); + appendToNGram(this.separator); + } + appendToNGram(this.rightPad); + } + } + } + // Data and splits together form the definition of the ragged tensor, + // where data is 1 dimensional and contains the values of the tensor + // and splits denotes the indices at which each row starts. + compute(data, splits) { + // Validate that the splits are valid indices into data, only if there are + // splits specified. + const inputDataSize = data.length; + const splitsSize = splits.length; + if (splitsSize > 0) { + let prevSplit = splits[0]; + if (prevSplit !== 0) { + throw new Error(`First split value must be 0, got ${prevSplit}`); + } + for (let i = 1; i < splitsSize; ++i) { + let validSplits = splits[i] >= prevSplit; + validSplits = validSplits && (splits[i] <= inputDataSize); + if (!validSplits) { + throw new Error(`Invalid split value ${splits[i]}, must be in [${prevSplit}, ${inputDataSize}]`); + } + prevSplit = splits[i]; + } + if (prevSplit !== inputDataSize) { + throw new Error(`Last split value must be data size. Expected ${inputDataSize}, got ${prevSplit}`); + } + } + const numBatchItems = splitsSize - 1; + const nGramsSplits = getArrayFromDType('int32', splitsSize); + // If there is no data or size, return an empty ragged tensor. + if (inputDataSize === 0 || splitsSize === 0) { + const empty = new Array(inputDataSize); + for (let i = 0; i <= numBatchItems; ++i) { + nGramsSplits[i] = 0; + } + return [empty, nGramsSplits]; + } + nGramsSplits[0] = 0; + for (let i = 1; i <= numBatchItems; ++i) { + const length = splits[i] - splits[i - 1]; + let numNGrams = 0; + this.nGramWidths.forEach((nGramWidth) => { + numNGrams += this.getNumNGrams(length, nGramWidth); + }); + if (this.preserveShort && length > 0 && numNGrams === 0) { + numNGrams = 1; + } + nGramsSplits[i] = nGramsSplits[i - 1] + numNGrams; + } + const nGrams = new Array(nGramsSplits[numBatchItems]); + for (let i = 0; i < numBatchItems; ++i) { + const splitIndex = splits[i]; + let outputStartIdx = nGramsSplits[i]; + this.nGramWidths.forEach((nGramWidth) => { + const length = splits[i + 1] - splits[i]; + const numNGrams = this.getNumNGrams(length, nGramWidth); + this.createNGrams(data, splitIndex, nGrams, outputStartIdx, numNGrams, nGramWidth); + outputStartIdx += numNGrams; + }); + // If we're preserving short sequences, check to see if no sequence was + // generated by comparing the current output start idx to the original + // one (nGramSplitsdata). If no ngrams were generated, then they will + // be equal (since we increment outputStartIdx by numNGrams every + // time we create a set of ngrams.) + if (this.preserveShort && outputStartIdx === nGramsSplits[i]) { + const dataLength = splits[i + 1] - splits[i]; + // One legitimate reason to not have any ngrams when this.preserveShort + // is true is if the sequence itself is empty. In that case, move on. + if (dataLength === 0) { + continue; + } + // We don't have to worry about dynamic padding sizes here: if padding + // was dynamic, every sequence would have had sufficient padding to + // generate at least one nGram. + const nGramWidth = dataLength + 2 * this.padWidth; + const numNGrams = 1; + this.createNGrams(data, splitIndex, nGrams, outputStartIdx, numNGrams, nGramWidth); + } + } + return [nGrams, nGramsSplits]; + } + } + function stringNGramsImpl(data, dataSplits, separator, nGramWidths, leftPad, rightPad, padWidth, preserveShortSequences) { + return new StringNGramsOp(separator, nGramWidths, leftPad, rightPad, padWidth, preserveShortSequences) + .compute(data, dataSplits); + } + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function split(str, delimiters, skipEmpty, result) { + if (!str.length) { + return; + } + // When the delimiter is empty, the input is split into individual characters. + if (delimiters.length === 0) { + for (let i = 0; i < str.length; ++i) { + result.push(str.subarray(i, i + 1)); + } + return; + } + // When there is one delimiter, the input is split only at that delimiter. + if (delimiters.length === 1) { + const delimiter = delimiters[0]; + let f = str.indexOf(delimiter); + while (f !== -1) { + const token = str.subarray(0, f); + if (!skipEmpty || token.length !== 0) { + result.push(token); + } + str = str.subarray(f + 1); + f = str.indexOf(delimiter); + } + if (!skipEmpty || str.length !== 0) { + result.push(str); + } + return; + } + // When there are multiple delimiters, the input is split at every instance + // one of the delimiters appears. + let tokenStart = 0; + for (let i = 0; i < str.length + 1; i++) { + if ((i === str.length) || (delimiters.indexOf(str[i]) !== -1)) { + const token = str.subarray(tokenStart, i); + if (!skipEmpty || token.length !== 0) { + result.push(token); + } + tokenStart = i + 1; + } + } + } + function stringSplitImpl(input, delimiter, skipEmpty) { + const batchSize = input.length; + // Empty delimiter means split the input character by character. + const tokens = []; + let outputSize = 0; + let maxNumEntries = 0; + const numIndices = new Array(batchSize); + for (let i = 0; i < batchSize; ++i) { + const prevTokensLength = tokens.length; + split(input[i], delimiter, skipEmpty, tokens); + const nEntries = tokens.length - prevTokensLength; + numIndices[i] = nEntries; + outputSize += nEntries; + maxNumEntries = Math.max(maxNumEntries, nEntries); + } + const indices = getArrayFromDType('int32', outputSize * 2); + const values = new Array(outputSize); + const shape = [batchSize, maxNumEntries]; + let c = 0; + for (let i = 0; i < batchSize; ++i) { + for (let j = 0; j < numIndices[i]; ++j) { + // indices is a 2d tensor with shape of [outputSize, 2] + indices[c * 2] = i; + indices[c * 2 + 1] = j; + values[c] = tokens[c]; + ++c; + } + } + return [indices, values, shape]; + } + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function stringToHashBucketFastImpl(input, numBuckets) { + const output = getArrayFromDType('int32', input.length); + for (let i = 0; i < input.length; ++i) { + output[i] = + fingerPrint64(input[i]).modulo(numBuckets).getLowBitsUnsigned(); + } + return output; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const subImpl = createSimpleBinaryKernelImpl(((aValue, bValue) => aValue - bValue)); + const subComplexImpl = createComplexBinaryKernelImpl(((aReal, aImag, bReal, bImag) => { + return { real: aReal - bReal, imag: aImag - bImag }; + })); + const sub$1 = binaryKernelFunc$1(Sub, subImpl, subComplexImpl); + const subConfig$1 = { + kernelName: Sub, + backendName: 'cpu', + kernelFunc: sub$1 + }; + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * An implementation of the tile kernel shared between webgl and cpu for string + * tensors only. + */ + function tileImpl(xBuf, reps) { + const newShape = new Array(xBuf.rank); + for (let i = 0; i < newShape.length; i++) { + newShape[i] = xBuf.shape[i] * reps[i]; + } + const result = buffer(newShape, xBuf.dtype); + for (let i = 0; i < result.values.length; ++i) { + const newLoc = result.indexToLoc(i); + const originalLoc = new Array(xBuf.rank); + for (let j = 0; j < originalLoc.length; j++) { + originalLoc[j] = newLoc[j] % xBuf.shape[j]; + } + const originalIndex = xBuf.locToIndex(originalLoc); + result.values[i] = xBuf.values[originalIndex]; + } + return result; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const comparePair = (a, b) => { + const valueDiff = b.value - a.value; + return valueDiff === 0 ? a.index - b.index : valueDiff; + }; + /** + * Partitions array where all elements smaller than the (k+1) smallest element + * are found to the left of it, and all larger to the right of it. + * Based on the Floyd-Rivest Algorithm, ref: + * https://en.wikipedia.org/wiki/Floyd%E2%80%93Rivest_algorithm + * @param array: Array to partition + * @param left: Left index for the interval + * @param right: Right index for the interval + * @param k: Desired index value, where array[k] is the (k+1)th smallest element + * when left = 0 + */ + function select$2(array, k, left = 0, right = array.length - 1) { + while (right > left) { + // Use select recursively to sample a smaller set of size s + // the arbitrary constants 600 and 0.5 are used in the original + // version to minimize execution time. + if (right - left > 600) { + const n = right - left + 1; + const i = k - left + 1; + const z = Math.log(n); + const s = 0.5 * Math.exp(2 * z / 3); + const sd = 0.5 * Math.sqrt(z * s * (n - s) / n) * Math.sign(i - n / 2); + const newLeft = Math.max(left, Math.floor(k - i * s / n + sd)); + const newRight = Math.min(right, Math.floor(k + (n - i) * s / n + sd)); + select$2(array, k, newLeft, newRight); + } + // partition the elements between left and right around t + const t = array[k]; + let i = left; + let j = right; + swap(array, left, k); + if (comparePair(array[right], t) > 0) { + swap(array, left, right); + } + while (i < j) { + swap(array, i, j); + i++; + j--; + while (comparePair(array[i], t) < 0) { + i = i + 1; + } + while (comparePair(array[j], t) > 0) { + j = j - 1; + } + } + if (comparePair(array[left], t) === 0) { + swap(array, left, j); + } + else { + j = j + 1; + swap(array, j, right); + } + // Adjust left and right towards the boundaries of the subset + // containing the (k - left + 1)th smallest element. + if (j <= k) { + left = j + 1; + } + if (k <= j) { + right = j - 1; + } + } + } + function topKImpl(x, xShape, xDtype, k, sorted) { + // Reshape into a 2d tensor [batch, lastDim] and compute topk along lastDim. + const lastDim = xShape[xShape.length - 1]; + const [batch, size] = [x.length / lastDim, lastDim]; + const allTopKVals = getTypedArrayFromDType(xDtype, batch * k); + const allTopKIndices = getTypedArrayFromDType('int32', batch * k); + for (let b = 0; b < batch; b++) { + const offset = b * size; + const vals = x.subarray(offset, offset + size); + let valAndInd = new Array(vals.length); + vals.forEach((value, index) => valAndInd[index] = { value, index }); + if (k < valAndInd.length) { + select$2(valAndInd, k); + valAndInd = valAndInd.slice(0, k); + } + if (sorted) { + valAndInd.sort(comparePair); + } + const outOffset = b * k; + const topKVals = allTopKVals.subarray(outOffset, outOffset + k); + const topKIndices = allTopKIndices.subarray(outOffset, outOffset + k); + for (let i = 0; i < k; i++) { + topKVals[i] = valAndInd[i].value; + topKIndices[i] = valAndInd[i].index; + } + } + // Reshape back to the original input shape, except that the last + // dimension is k. + const outputShape = xShape.slice(); + outputShape[outputShape.length - 1] = k; + return [ + buffer(outputShape, xDtype, allTopKVals), + buffer(outputShape, 'int32', allTopKIndices) + ]; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function uniqueImpl(values, axis, shape, dtype) { + // Normalize and validate axis. + const $axis = parseAxisParam(axis, shape)[0]; + // Calculate the new shape that is suitable for extracting data along the + // given axis. + // + // The rank is 3. + // The size of the 1st dimension is the size of all the axes < the given axis. + // The size of the 2nd dimension is the same as the size of the given axis. + // The size of the 3rd dimension is the size of all the axes > the given axis. + // + // For example, for a 4D tensor with shape=[2, 3, 5, 4] and axis=2, the + // newShape would be: [2*3, 5, 4]. + // + // Note that this is not the final output shape. This will be the shape for an + // intermediate TensorBuffer (see inputBuffer below) to allow us to extract + // values along the given axis. To demonstrate how it works, consider the + // following example: + // + // Input: a 3D tensor, with shape [1, 2, 3] + // [ + // [ + // [1,2,3], + // [4,5,6] + // ] + // ] + // Axis: 2 (the last axis). + // Along axis 2, we expect to extract 3 tensors: [1,4], [2,5], [3,6]. + // + // For this example, newShape would be: [2, 3, 1], where 2 is calculated from + // 1*2. The re-shaped data would look like: + // + // [ + // [ + // [1], [2], [3] + // ], + // [ + // [4], [5], [6] + // ] + // ] + // + // Then, we can construct a 3-level nested loop by the following dimension + // order to extract the values along the axis (dimension1): + // i: dimension1 // 0,1,2 (newShape[1]) + // m: dimension0 // 0,1 (newShape[0]) + // n: dimension2 // 0 (newShape[2]) + // + // m, i, n + // --------- + // Iteration 0: data at [0, 0, 0] => "1" + // Iteration 1: data at [1, 0, 0] => "4" + // We got [1,4]. + // Iteration 2: data at [0, 1, 0] => "2" + // Iteration 3: data at [1, 1, 0] => "5" + // We got [2,5]. + // Iteration 4: data at [0, 2, 0] => "3" + // Iteration 5: data at [1, 2, 0] => "6" + // We got [3,6]. + const newShape = [1, shape[0], 1]; + for (let i = 0; i < $axis; i++) { + newShape[0] *= shape[i]; + } + newShape[1] = shape[$axis]; + for (let i = $axis + 1; i < shape.length; i++) { + newShape[2] *= shape[i]; + } + // A map from unique elements (their string representations) to their values + // in "indices" (below). + const uniqueElements = new Map(); + // The indices of each unique element in the original tensor along the given + // axis. It is 1D and has the same size as the given axis. + const indices = new Int32Array(shape[$axis]); + // Create a buffer so we can easily extract value at a given location. + const inputBuffer = new TensorBuffer(newShape, dtype, values); + // The indices along the given axis that have unique elements. This is a + // de-duped version of "indices" above. + const uniqueIndices = []; + const is1DTensor = newShape[0] === 1 && newShape[2] === 1; + for (let i = 0; i < shape[$axis]; i++) { + // Extract values along the axis. + let element; + if (is1DTensor) { + // Fast path for 1D tensor input. + element = values[i].toString(); + } + else { + const axisValues = []; + for (let m = 0; m < newShape[0]; m++) { + for (let n = 0; n < newShape[2]; n++) { + axisValues.push(inputBuffer.get(m, i, n)); + } + } + element = axisValues.join(','); + } + // Dedup and update various indices. + const existingIndex = uniqueElements.get(element); + if (existingIndex != null) { + indices[i] = existingIndex; + } + else { + const uniqueIndex = uniqueElements.size; + uniqueElements.set(element, uniqueIndex); + indices[i] = uniqueIndex; + uniqueIndices.push(i); + } + } + // Now we know where each of the unique elements are located along the axis + // (uniqueIndices). Extract them from input buffer and store them in the + // output buffer. + const outputTmpShape = newShape.slice(); + outputTmpShape[1] = uniqueElements.size; + const outputBuffer = new TensorBuffer(outputTmpShape, dtype); + uniqueIndices.forEach((uniqueElementIndex, i) => { + for (let m = 0; m < newShape[0]; m++) { + for (let n = 0; n < newShape[2]; n++) { + outputBuffer.set(inputBuffer.get(m, uniqueElementIndex, n), m, i, n); + } + } + }); + // The output shape can be calculated from the input shape with the size of + // the given axis replaced by the number of unique elements along that axis. + const outputShape = shape.slice(); + outputShape[$axis] = outputTmpShape[1]; + return { + outputValues: outputBuffer.values, + outputShape, + indices, + }; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + + var shared = /*#__PURE__*/Object.freeze({ + __proto__: null, + addImpl: addImpl, + bincountImpl: bincountImpl, + bincountReduceImpl: bincountReduceImpl, + bitwiseAndImpl: bitwiseAndImpl, + castImpl: castImpl, + ceilImpl: ceilImpl, + concatImpl: concatImpl$1, + equalImpl: equalImpl, + expImpl: expImpl, + expm1Impl: expm1Impl, + floorDivImpl: floorDivImpl, + floorImpl: floorImpl, + gatherNdImpl: gatherNdImpl, + gatherV2Impl: gatherV2Impl, + greaterEqualImpl: greaterEqualImpl, + greaterImpl: greaterImpl, + lessEqualImpl: lessEqualImpl, + lessImpl: lessImpl, + linSpaceImpl: linSpaceImpl, + logImpl: logImpl, + maxImpl: maxImpl$1, + maximumImpl: maximumImpl, + minimumImpl: minimumImpl, + multiplyImpl: multiplyImpl, + negImpl: negImpl, + notEqualImpl: notEqualImpl, + prodImpl: prodImpl, + raggedGatherImpl: raggedGatherImpl, + raggedRangeImpl: raggedRangeImpl, + raggedTensorToTensorImpl: raggedTensorToTensorImpl, + rangeImpl: rangeImpl, + rsqrtImpl: rsqrtImpl, + scatterImpl: scatterImpl, + sigmoidImpl: sigmoidImpl, + simpleAbsImpl: simpleAbsImpl, + sliceImpl: sliceImpl, + sparseFillEmptyRowsImpl: sparseFillEmptyRowsImpl, + sparseReshapeImpl: sparseReshapeImpl, + sparseSegmentReductionImpl: sparseSegmentReductionImpl, + sqrtImpl: sqrtImpl, + squaredDifferenceImpl: squaredDifferenceImpl, + staticRegexReplaceImpl: staticRegexReplaceImpl, + stridedSliceImpl: stridedSliceImpl, + stringNGramsImpl: stringNGramsImpl, + stringSplitImpl: stringSplitImpl, + stringToHashBucketFastImpl: stringToHashBucketFastImpl, + subImpl: subImpl, + tileImpl: tileImpl, + topKImpl: topKImpl, + transposeImpl: transposeImpl$1, + uniqueImpl: uniqueImpl + }); + + /** @license See the LICENSE file. */ + // This code is auto-generated, do not modify this file! + const version$3 = '4.22.0'; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + // Side effects for default initialization of MathBackendCPU + registerBackend('cpu', () => new MathBackendCPU(), 1 /* priority */); + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const elu$1 = unaryKernelFunc$1(Elu$1, (xi) => xi >= 0 ? xi : (Math.exp(xi) - 1)); + const eluConfig$1 = { + kernelName: Elu$1, + backendName: 'cpu', + kernelFunc: elu$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function leakyRelu$1(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { alpha } = attrs; + assertNotComplex$1([x], 'leakyRelu'); + const xSize = sizeFromShape(x.shape); + const xVals = backend.data.get(x.dataId).values; + const outVals = getTypedArrayFromDType('float32', xSize); + for (let i = 0; i < xVals.length; i++) { + outVals[i] = xVals[i] < 0 ? alpha * xVals[i] : xVals[i]; + } + return backend.makeTensorInfo(x.shape, 'float32', outVals); + } + const leakyReluConfig$1 = { + kernelName: LeakyRelu, + backendName: 'cpu', + kernelFunc: leakyRelu$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const preluImpl = createSimpleBinaryKernelImpl((xValue, aValue) => xValue < 0 ? aValue * xValue : xValue); + function prelu$1(args) { + const { inputs, backend } = args; + const { x, alpha } = inputs; + assertNotComplex$1([x, alpha], 'prelu'); + const aVals = backend.data.get(x.dataId).values; + const bVals = backend.data.get(alpha.dataId).values; + const [resultData, resultShape] = preluImpl(x.shape, alpha.shape, aVals, bVals, 'float32'); + return backend.makeTensorInfo(resultShape, 'float32', resultData); + } + const preluConfig$1 = { + kernelName: Prelu, + backendName: 'cpu', + kernelFunc: prelu$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const relu$1 = unaryKernelFunc$1(Relu$1, (xi) => Math.max(0, xi)); + const reluConfig$1 = { + kernelName: Relu$1, + backendName: 'cpu', + kernelFunc: relu$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const relu6$1 = unaryKernelFunc$1(Relu6$1, (xi) => Math.min(Math.max(0, xi), 6)); + const relu6Config$1 = { + kernelName: Relu6$1, + backendName: 'cpu', + kernelFunc: relu6$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function applyActivation(backend, x, activation, preluActivationWeights, leakyreluAlpha) { + if (activation === 'linear') { + return identity$1({ inputs: { x }, backend }); + } + else if (activation === 'relu') { + return relu$1({ inputs: { x }, backend }); + } + else if (activation === 'elu') { + return elu$1({ inputs: { x }, backend }); + } + else if (activation === 'relu6') { + return relu6$1({ inputs: { x }, backend }); + } + else if (activation === 'prelu') { + return prelu$1({ inputs: { x, alpha: preluActivationWeights }, backend }); + } + else if (activation === 'leakyrelu') { + return leakyRelu$1({ inputs: { x }, backend, attrs: { alpha: leakyreluAlpha } }); + } + else if (activation === 'sigmoid') { + return sigmoid$1({ inputs: { x }, backend }); + } + throw new Error(`Activation ${activation} has not been implemented for the CPU backend.`); + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function reshape$1(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { shape } = attrs; + const xSize = sizeFromShape(x.shape); + const $shape = inferFromImplicitShape(shape, xSize); + const $xSize = sizeFromShape($shape); + assert$1(xSize === $xSize, () => `The new shape (${$shape}) has ${$xSize} elements and the old ` + + `shape (${x.shape}) has ${xSize} elements. The new shape and old ` + + `shape must have the same number of elements.`); + backend.incRef(x.dataId); + const xData = backend.data.get(x.dataId); + if (xData.complexTensorInfos != null) { + const real = xData.complexTensorInfos.real; + const imag = xData.complexTensorInfos.imag; + real.shape = $shape; + imag.shape = $shape; + } + return { dataId: x.dataId, shape: $shape, dtype: x.dtype }; + } + const reshapeConfig$1 = { + kernelName: Reshape$1, + backendName: 'cpu', + kernelFunc: reshape$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function batchMatMul$1(args) { + const { inputs, backend, attrs } = args; + const { a, b } = inputs; + const { transposeA, transposeB } = attrs; + assertNotComplex$1([a, b], 'matMul'); + const aRank = a.shape.length; + const bRank = b.shape.length; + const innerShapeA = transposeA ? a.shape[aRank - 2] : a.shape[aRank - 1]; + const innerShapeB = transposeB ? b.shape[bRank - 1] : b.shape[bRank - 2]; + const outerShapeA = transposeA ? a.shape[aRank - 1] : a.shape[aRank - 2]; + const outerShapeB = transposeB ? b.shape[bRank - 2] : b.shape[bRank - 1]; + const outerDimsA = a.shape.slice(0, -2); + const outerDimsB = b.shape.slice(0, -2); + const batchDimA = sizeFromShape(outerDimsA); + const batchDimB = sizeFromShape(outerDimsB); + const outShapeOuterDims = assertAndGetBroadcastShape(a.shape.slice(0, -2), b.shape.slice(0, -2)); + const outShape = outShapeOuterDims.concat([outerShapeA, outerShapeB]); + assert$1(innerShapeA === innerShapeB, () => `Error in matMul: inner shapes (${innerShapeA}) and (` + + `${innerShapeB}) of Tensors with shapes ${a.shape} and ` + + `${b.shape} and transposeA=${transposeA}` + + ` and transposeB=${transposeB} must match.`); + const a3dShape = transposeA ? [batchDimA, innerShapeA, outerShapeA] : + [batchDimA, outerShapeA, innerShapeA]; + const b3dShape = transposeB ? [batchDimB, outerShapeB, innerShapeB] : + [batchDimB, innerShapeB, outerShapeB]; + // The rest of the implementation is designed to operate on rank-3 tensors + const a3d = reshape$1({ inputs: { x: a }, backend, attrs: { shape: a3dShape } }); + const b3d = reshape$1({ inputs: { x: b }, backend, attrs: { shape: b3dShape } }); + const sharedDim = transposeA ? a3d.shape[1] : a3d.shape[2]; + const leftDim = transposeA ? a3d.shape[2] : a3d.shape[1]; + const rightDim = transposeB ? b3d.shape[1] : b3d.shape[2]; + const batchDim = Math.max(batchDimA, batchDimB); + const a3dValues = backend.data.get(a3d.dataId).values; + const b3dValues = backend.data.get(b3d.dataId).values; + const a3dStrides = computeStrides(a3d.shape); + const b3dStrides = computeStrides(b3d.shape); + const [aBatch, aOuterStep, aInnerStep] = transposeA ? + [a3dStrides[0], 1, a3dStrides[1]] : + [a3dStrides[0], a3dStrides[1], 1]; + const [bInnerStep, bOuterStep, bBatch] = transposeB ? + [1, b3dStrides[1], b3dStrides[0]] : + [b3dStrides[1], 1, b3dStrides[0]]; + const size = leftDim * rightDim; + const result = buffer([batchDim, leftDim, rightDim], a3d.dtype); + const resVals = result.values; + const blockSize = backend.blockSize; + for (let bi = 0; bi < batchDim; bi++) { + const batchIndexA = bi % batchDimA; + const batchIndexB = bi % batchDimB; + for (let i0 = 0; i0 < leftDim; i0 += blockSize) { + // for when blockSize doesn't evenly divide the input + const iBlock = Math.min(i0 + blockSize, leftDim); + for (let j0 = 0; j0 < rightDim; j0 += blockSize) { + const jBlock = Math.min(j0 + blockSize, rightDim); + for (let k0 = 0; k0 < sharedDim; k0 += blockSize) { + const kBlock = Math.min(k0 + blockSize, sharedDim); + for (let i = i0; i < iBlock; i++) { + for (let j = j0; j < jBlock; j++) { + let sum = 0.0; + for (let k = k0; k < kBlock; k++) { + const aVal = + // tslint:disable-next-line: max-line-length + a3dValues[batchIndexA * aBatch + i * aOuterStep + k * aInnerStep]; + const bVal = + // tslint:disable-next-line: max-line-length + b3dValues[k * bInnerStep + j * bOuterStep + batchIndexB * bBatch]; + sum += aVal * bVal; + } + resVals[bi * size + (i * rightDim + j)] += sum; + } + } + } + } + } + } + backend.disposeIntermediateTensorInfo(a3d); + backend.disposeIntermediateTensorInfo(b3d); + // set correct shape on output. + return backend.makeTensorInfo(outShape, result.dtype, result.values); + } + const batchMatMulConfig$1 = { + kernelName: BatchMatMul, + backendName: 'cpu', + kernelFunc: batchMatMul$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function _fusedMatMul$1(args) { + const { inputs, backend, attrs } = args; + const { a, b, bias, preluActivationWeights } = inputs; + const { transposeA, transposeB, activation, leakyreluAlpha } = attrs; + let current; + let addRes; + let activationRes; + const intermediates = []; + const matMulRes = batchMatMul$1({ inputs: { a, b }, attrs: { transposeA, transposeB }, backend }); + current = matMulRes; + if (bias) { + addRes = add({ inputs: { a: current, b: bias }, backend }); + intermediates.push(current); + current = addRes; + } + if (activation) { + activationRes = applyActivation(backend, current, activation, preluActivationWeights, leakyreluAlpha); + intermediates.push(current); + current = activationRes; + } + for (const i of intermediates) { + backend.disposeIntermediateTensorInfo(i); + } + return current; + } + const _fusedMatMulConfig$1 = { + kernelName: _FusedMatMul, + backendName: 'cpu', + kernelFunc: _fusedMatMul$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const acos$1 = unaryKernelFunc$1(Acos, (xi) => Math.acos(xi)); + const acosConfig$1 = { + kernelName: Acos, + backendName: 'cpu', + kernelFunc: acos$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const acosh$1 = unaryKernelFunc$1(Acosh, (xi) => Math.acosh(xi)); + const acoshConfig$1 = { + kernelName: Acosh, + backendName: 'cpu', + kernelFunc: acosh$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function addN$1(args) { + const { inputs, backend } = args; + const tensors = inputs; + assertNotComplex$1(inputs, 'addN'); + const vals = tensors.map(t => backend.data.get(t.dataId).values); + const outBuf = buffer(tensors[0].shape, tensors[0].dtype); + const outVals = outBuf.values; + for (let i = 0; i < tensors.length; i++) { + const currVals = vals[i]; + for (let j = 0; j < outVals.length; j++) { + outVals[j] += currVals[j]; + } + } + return backend.makeTensorInfo(outBuf.shape, outBuf.dtype, outBuf.values); + } + const addNConfig$1 = { + kernelName: AddN, + backendName: 'cpu', + kernelFunc: addN$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function all$1(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { axis, keepDims } = attrs; + assertNotComplex$1(x, 'all'); + const origAxes = parseAxisParam(axis, x.shape); + let axes = origAxes; + const permutedAxes = getAxesPermutation(axes, x.shape.length); + let $x = x; + if (permutedAxes != null) { + $x = transpose$1({ inputs: { x }, backend, attrs: { perm: permutedAxes } }); + axes = getInnerMostAxes(axes.length, x.shape.length); + } + assertAxesAreInnerMostDims('all', axes, $x.shape.length); + const [outShape, reduceShape] = computeOutAndReduceShapes($x.shape, axes); + const reduceSize = sizeFromShape(reduceShape); + const vals = makeZerosTypedArray(sizeFromShape(outShape), $x.dtype); + const aVals = backend.data.get($x.dataId).values; + for (let i = 0; i < vals.length; ++i) { + const offset = i * reduceSize; + let all = aVals[offset]; + for (let j = 0; j < reduceSize; ++j) { + const value = aVals[offset + j]; + all = all && value; + } + vals[i] = all; + } + if (permutedAxes != null) { + backend.disposeIntermediateTensorInfo($x); + } + const result = backend.makeTensorInfo(outShape, $x.dtype, vals); + if (keepDims) { + const expandedShape = expandShapeToKeepDim(outShape, origAxes); + const reshapedResult = reshape$1({ inputs: { x: result }, backend, attrs: { shape: expandedShape } }); + backend.disposeIntermediateTensorInfo(result); + return reshapedResult; + } + return result; + } + const allConfig$1 = { + kernelName: All, + backendName: 'cpu', + kernelFunc: all$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function any$1(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { axis, keepDims } = attrs; + assertNotComplex$1(x, 'any'); + const origAxes = parseAxisParam(axis, x.shape); + let axes = origAxes; + const permutedAxes = getAxesPermutation(axes, x.shape.length); + let $x = x; + if (permutedAxes != null) { + $x = transpose$1({ inputs: { x }, backend, attrs: { perm: permutedAxes } }); + axes = getInnerMostAxes(axes.length, x.shape.length); + } + assertAxesAreInnerMostDims('any', axes, $x.shape.length); + const [outShape, reduceShape] = computeOutAndReduceShapes($x.shape, axes); + const reduceSize = sizeFromShape(reduceShape); + const vals = makeZerosTypedArray(sizeFromShape(outShape), $x.dtype); + const aVals = backend.data.get($x.dataId).values; + for (let i = 0; i < vals.length; ++i) { + const offset = i * reduceSize; + let anyVal = aVals[offset]; + for (let j = 0; j < reduceSize; ++j) { + const value = aVals[offset + j]; + anyVal = anyVal || value; + } + vals[i] = anyVal; + } + if (permutedAxes != null) { + backend.disposeIntermediateTensorInfo($x); + } + const result = backend.makeTensorInfo(outShape, $x.dtype, vals); + if (keepDims) { + const expandedShape = expandShapeToKeepDim(outShape, origAxes); + const reshapedResult = reshape$1({ inputs: { x: result }, backend, attrs: { shape: expandedShape } }); + backend.disposeIntermediateTensorInfo(result); + return reshapedResult; + } + return result; + } + const anyConfig$1 = { + kernelName: Any, + backendName: 'cpu', + kernelFunc: any$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function argMax$1(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { axis } = attrs; + assertNotComplex$1(x, 'argMax'); + let axes = parseAxisParam(axis, x.shape); + const permutedAxes = getAxesPermutation(axes, x.shape.length); + let $x = x; + const intermediateTensorInfos = []; + if (permutedAxes != null) { + $x = transpose$1({ inputs: { x }, backend, attrs: { perm: permutedAxes } }); + intermediateTensorInfos.push($x); + axes = getInnerMostAxes(axes.length, $x.shape.length); + } + axes = [axes[0]]; + assertAxesAreInnerMostDims('argMax', axes, $x.shape.length); + const [outShape, reduceShape] = computeOutAndReduceShapes($x.shape, axes); + const outSize = sizeFromShape(outShape); + const vals = makeZerosTypedArray(outSize, 'int32'); + const reduceSize = sizeFromShape(reduceShape); + const aVals = backend.data.get($x.dataId).values; + for (let i = 0; i < vals.length; ++i) { + const offset = i * reduceSize; + let max = aVals[offset]; + let maxIndex = 0; + for (let j = 0; j < reduceSize; ++j) { + const value = aVals[offset + j]; + if (value > max) { + max = value; + maxIndex = j; + } + } + vals[i] = maxIndex; + } + intermediateTensorInfos.forEach(t => backend.disposeIntermediateTensorInfo(t)); + return backend.makeTensorInfo(outShape, 'int32', vals); + } + const argMaxConfig$1 = { + kernelName: ArgMax, + backendName: 'cpu', + kernelFunc: argMax$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function argMin$1(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { axis } = attrs; + assertNotComplex$1(x, 'argMin'); + let axes = parseAxisParam(axis, x.shape); + const permutedAxes = getAxesPermutation(axes, x.shape.length); + let $x = x; + const intermediateTensorInfos = []; + if (permutedAxes != null) { + $x = transpose$1({ inputs: { x }, backend, attrs: { perm: permutedAxes } }); + intermediateTensorInfos.push($x); + axes = getInnerMostAxes(axes.length, $x.shape.length); + } + axes = [axes[0]]; + assertAxesAreInnerMostDims('argMin', axes, $x.shape.length); + const [outShape, reduceShape] = computeOutAndReduceShapes($x.shape, axes); + const outSize = sizeFromShape(outShape); + const vals = makeZerosTypedArray(outSize, 'int32'); + const reduceSize = sizeFromShape(reduceShape); + const aVals = backend.data.get($x.dataId).values; + for (let i = 0; i < vals.length; ++i) { + const offset = i * reduceSize; + let min = aVals[offset]; + let minIndex = 0; + for (let j = 0; j < reduceSize; ++j) { + const value = aVals[offset + j]; + if (value < min) { + min = value; + minIndex = j; + } + } + vals[i] = minIndex; + } + intermediateTensorInfos.forEach(t => backend.disposeIntermediateTensorInfo(t)); + return backend.makeTensorInfo(outShape, 'int32', vals); + } + const argMinConfig$1 = { + kernelName: ArgMin, + backendName: 'cpu', + kernelFunc: argMin$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const asin$1 = unaryKernelFunc$1(Asin, (xi) => Math.asin(xi)); + const asinConfig$1 = { + kernelName: Asin, + backendName: 'cpu', + kernelFunc: asin$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const asinh$1 = unaryKernelFunc$1(Asinh, (xi) => Math.asinh(xi)); + const asinhConfig$1 = { + kernelName: Asinh, + backendName: 'cpu', + kernelFunc: asinh$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const atan$1 = unaryKernelFunc$1(Atan, (xi) => Math.atan(xi)); + const atanConfig$1 = { + kernelName: Atan, + backendName: 'cpu', + kernelFunc: atan$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const atan2Impl = createSimpleBinaryKernelImpl((aValue, bValue) => Math.atan2(aValue, bValue)); + const atan2$1 = binaryKernelFunc$1(Atan2, atan2Impl); + const atan2Config$1 = { + kernelName: Atan2, + backendName: 'cpu', + kernelFunc: atan2$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const atanh$1 = unaryKernelFunc$1(Atanh, (xi) => Math.atanh(xi)); + const atanhConfig$1 = { + kernelName: Atanh, + backendName: 'cpu', + kernelFunc: atanh$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function pool(xValues, xShape, dtype, strides, convInfo, poolType) { + const strideHeight = convInfo.strideHeight; + const strideWidth = convInfo.strideWidth; + const dilationHeight = convInfo.dilationHeight; + const dilationWidth = convInfo.dilationWidth; + const effectiveFilterHeight = convInfo.effectiveFilterHeight; + const effectiveFilterWidth = convInfo.effectiveFilterWidth; + const padTop = convInfo.padInfo.top; + const padLeft = convInfo.padInfo.left; + const initialValue = (poolType === 'max' ? Number.NEGATIVE_INFINITY : + Number.POSITIVE_INFINITY); + const output = buffer(convInfo.outShape, dtype); + const outputVals = output.values; + const outputBatchStrides = convInfo.outShape[1] * convInfo.outShape[2] * convInfo.outShape[3]; + const outputRowStrides = convInfo.outShape[2] * convInfo.outShape[3]; + const outputColStrides = convInfo.outShape[3]; + for (let b = 0; b < convInfo.batchSize; ++b) { + const outputBatchOffset = b * outputBatchStrides; + const inputBatchOffset = b * strides[0]; + for (let d = 0; d < convInfo.inChannels; ++d) { + for (let yR = 0; yR < convInfo.outHeight; ++yR) { + const xRCorner = yR * strideHeight - padTop; + const xRMin = Math.max(0, xRCorner); + const xRMax = Math.min(convInfo.inHeight, effectiveFilterHeight + xRCorner); + const outputRowOffset = outputBatchOffset + yR * outputRowStrides; + for (let yC = 0; yC < convInfo.outWidth; ++yC) { + const xCCorner = yC * strideWidth - padLeft; + const xCMin = Math.max(0, xCCorner); + const xCMax = Math.min(convInfo.inWidth, effectiveFilterWidth + xCCorner); + let minMaxValue = initialValue; + let avgValue = 0; + let count = 0; + for (let xR = xRMin; xR < xRMax; xR += dilationHeight) { + const xROffset = inputBatchOffset + xR * strides[1]; + for (let xC = xCMin; xC < xCMax; xC += dilationWidth) { + const xCOffset = xROffset + xC * strides[2]; + const pixel = xValues[xCOffset + d]; + if ((poolType === 'max' && pixel > minMaxValue)) { + minMaxValue = pixel; + } + else if (poolType === 'avg') { + avgValue += pixel; + count++; + } + } + if (isNaN(minMaxValue)) { + break; + } + } + const outputOffset = outputRowOffset + yC * outputColStrides + d; + outputVals[outputOffset] = + poolType === 'avg' ? avgValue / count : minMaxValue; + } + } + } + } + return output; + } + function maxPoolPositions(xValues, xShape, dtype, convInfo, flattenPositions = false, includeBatchInIndex = false) { + const maxPositions = buffer(convInfo.outShape, 'int32'); + const strideHeight = convInfo.strideHeight; + const strideWidth = convInfo.strideWidth; + const dilationHeight = convInfo.dilationHeight; + const dilationWidth = convInfo.dilationWidth; + const effectiveFilterHeight = convInfo.effectiveFilterHeight; + const effectiveFilterWidth = convInfo.effectiveFilterWidth; + const padTop = convInfo.padInfo.top; + const padLeft = convInfo.padInfo.left; + const xBuf = buffer(xShape, dtype, xValues); + for (let b = 0; b < convInfo.batchSize; ++b) { + for (let d = 0; d < convInfo.inChannels; ++d) { + for (let yR = 0; yR < convInfo.outHeight; ++yR) { + const xRCorner = yR * strideHeight - padTop; + let xRMin = xRCorner; + while (xRMin < 0) { + xRMin += dilationHeight; + } + // const xRMin = Math.max(0, xRCorner); + const xRMax = Math.min(convInfo.inHeight, effectiveFilterHeight + xRCorner); + for (let yC = 0; yC < convInfo.outWidth; ++yC) { + const xCCorner = yC * strideWidth - padLeft; + let xCMin = xCCorner; + while (xCMin < 0) { + xCMin += dilationWidth; + } + const xCMax = Math.min(convInfo.inWidth, effectiveFilterWidth + xCCorner); + let maxValue = Number.NEGATIVE_INFINITY; + let maxPosition = -1; + for (let xR = xRMin; xR < xRMax; xR += dilationHeight) { + const wR = xR - xRCorner; + for (let xC = xCMin; xC < xCMax; xC += dilationWidth) { + const wC = xC - xCCorner; + // For some reason, disable-next-line is not working + // TODO(mattsoulanille): Remove this when switching to TS5. + /* tslint:disable: no-unnecessary-type-assertion */ + const pixel = xBuf.get(b, xR, xC, d); + if (pixel > maxValue) { + maxValue = pixel; + if (flattenPositions) { + maxPosition = includeBatchInIndex ? + ((b * convInfo.inHeight + xR) * convInfo.inWidth + xC) * + convInfo.inChannels + + d : + (xR * convInfo.inWidth + xC) * convInfo.inChannels + d; + } + else { + maxPosition = wR * effectiveFilterWidth + wC; + } + } + } + } + maxPositions.set(maxPosition, b, yR, yC, d); + } + } + } + } + return maxPositions; + } + function pool3d(xValues, xShape, dtype, strides, convInfo, poolType) { + const strideDepth = convInfo.strideDepth; + const strideHeight = convInfo.strideHeight; + const strideWidth = convInfo.strideWidth; + const dilationDepth = convInfo.dilationDepth; + const dilationHeight = convInfo.dilationHeight; + const dilationWidth = convInfo.dilationWidth; + const effectiveFilterDepth = convInfo.effectiveFilterDepth; + const effectiveFilterHeight = convInfo.effectiveFilterHeight; + const effectiveFilterWidth = convInfo.effectiveFilterWidth; + const padFront = convInfo.padInfo.front; + const padTop = convInfo.padInfo.top; + const padLeft = convInfo.padInfo.left; + const initialValue = (poolType === 'max' ? Number.NEGATIVE_INFINITY : + Number.POSITIVE_INFINITY); + const output = buffer(convInfo.outShape, dtype); + const outputVals = output.values; + const outputBatchStrides = convInfo.outShape[1] * convInfo.outShape[2] * + convInfo.outShape[3] * convInfo.outShape[4]; + const outputDepthStrides = convInfo.outShape[2] * convInfo.outShape[3] * convInfo.outShape[4]; + const outputRowStrides = convInfo.outShape[3] * convInfo.outShape[4]; + const outputColStrides = convInfo.outShape[4]; + for (let batch = 0; batch < convInfo.batchSize; ++batch) { + const outputBatchOffset = batch * outputBatchStrides; + const inputBatchOffset = batch * strides[0]; + for (let channel = 0; channel < convInfo.inChannels; ++channel) { + for (let yDepth = 0; yDepth < convInfo.outDepth; ++yDepth) { + const xDepthCorner = yDepth * strideDepth - padFront; + let xDepthMin = xDepthCorner; + while (xDepthMin < 0) { + xDepthMin += dilationDepth; + } + const xDepthMax = Math.min(convInfo.inDepth, effectiveFilterDepth + xDepthCorner); + const outputDepthOffset = outputBatchOffset + yDepth * outputDepthStrides; + for (let yRow = 0; yRow < convInfo.outHeight; ++yRow) { + const xRowCorner = yRow * strideHeight - padTop; + let xRowMin = xRowCorner; + while (xRowMin < 0) { + xRowMin += dilationHeight; + } + const xRowMax = Math.min(convInfo.inHeight, effectiveFilterHeight + xRowCorner); + const outputRowOffset = outputDepthOffset + yRow * outputRowStrides; + for (let yCol = 0; yCol < convInfo.outWidth; ++yCol) { + const xColCorner = yCol * strideWidth - padLeft; + let xColMin = xColCorner; + while (xColMin < 0) { + xColMin += dilationWidth; + } + const xColMax = Math.min(convInfo.inWidth, effectiveFilterWidth + xColCorner); + // Shader code begins + const outputColOffset = outputRowOffset + yCol * outputColStrides; + let minMaxValue = initialValue; + let avgValue = 0; + let count = 0; + for (let xDepth = xDepthMin; xDepth < xDepthMax; xDepth += dilationDepth) { + const xDepthOffset = inputBatchOffset + xDepth * strides[1]; + for (let xRow = xRowMin; xRow < xRowMax; xRow += dilationHeight) { + const xRowOffset = xDepthOffset + xRow * strides[2]; + for (let xCol = xColMin; xCol < xColMax; xCol += dilationWidth) { + const xColOffset = xRowOffset + xCol * strides[3]; + const pixel = xValues[xColOffset + channel]; + if ((poolType === 'max' && pixel > minMaxValue)) { + minMaxValue = pixel; + } + else if (poolType === 'avg') { + avgValue += pixel; + count++; + } + if (isNaN(minMaxValue)) { + break; + } + } + if (isNaN(minMaxValue)) { + break; + } + } + if (isNaN(minMaxValue)) { + break; + } + } + const outputOffset = outputColOffset + channel; + outputVals[outputOffset] = poolType === 'avg' ? + avgValue / Math.max(count, 1) : + minMaxValue; + } + } + } + } + } + return output; + } + function maxPool3dPositions(xBuf, convInfo) { + const maxPositions = buffer(convInfo.outShape, 'int32'); + const strideDepth = convInfo.strideDepth; + const strideHeight = convInfo.strideHeight; + const strideWidth = convInfo.strideWidth; + const dilationDepth = convInfo.dilationDepth; + const dilationHeight = convInfo.dilationHeight; + const dilationWidth = convInfo.dilationWidth; + const effectiveFilterDepth = convInfo.effectiveFilterDepth; + const effectiveFilterHeight = convInfo.effectiveFilterHeight; + const effectiveFilterWidth = convInfo.effectiveFilterWidth; + const padFront = convInfo.padInfo.front; + const padTop = convInfo.padInfo.top; + const padLeft = convInfo.padInfo.left; + for (let batch = 0; batch < convInfo.batchSize; ++batch) { + for (let channel = 0; channel < convInfo.inChannels; ++channel) { + for (let yDepth = 0; yDepth < convInfo.outDepth; ++yDepth) { + const xDepthCorner = yDepth * strideDepth - padFront; + let xDepthMin = xDepthCorner; + while (xDepthMin < 0) { + xDepthMin += dilationDepth; + } + const xDepthMax = Math.min(convInfo.inDepth, effectiveFilterDepth + xDepthCorner); + for (let yRow = 0; yRow < convInfo.outHeight; ++yRow) { + const xRowCorner = yRow * strideHeight - padTop; + let xRowMin = xRowCorner; + while (xRowMin < 0) { + xRowMin += dilationHeight; + } + const xRowMax = Math.min(convInfo.inHeight, effectiveFilterHeight + xRowCorner); + for (let yCol = 0; yCol < convInfo.outWidth; ++yCol) { + const xColCorner = yCol * strideWidth - padLeft; + let xColMin = xColCorner; + while (xColMin < 0) { + xColMin += dilationWidth; + } + const xColMax = Math.min(convInfo.inWidth, effectiveFilterWidth + xColCorner); + // Shader code begins + let maxValue = Number.NEGATIVE_INFINITY; + let maxPosition = -1; + for (let xDepth = xDepthMin; xDepth < xDepthMax; xDepth += dilationDepth) { + const wDepth = xDepth - xDepthCorner; + for (let xRow = xRowMin; xRow < xRowMax; xRow += dilationHeight) { + const wRow = xRow - xRowCorner; + for (let xCol = xColMin; xCol < xColMax; xCol += dilationWidth) { + const wCol = xCol - xColCorner; + const pixel = xBuf.get(batch, xDepth, xRow, xCol, channel); + if (pixel >= maxValue) { + maxValue = pixel; + maxPosition = + wDepth * effectiveFilterHeight * effectiveFilterWidth + + wRow * effectiveFilterHeight + wCol; + } + } + } + } + maxPositions.set(maxPosition, batch, yDepth, yRow, yCol, channel); + } + } + } + } + } + return maxPositions; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function avgPool$1(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + assertNotComplex$1(x, 'avgPool'); + const { filterSize, strides, pad, dimRoundingMode } = attrs; + const dilations = 1; + assert$1(eitherStridesOrDilationsAreOne(strides, dilations), () => 'Error in avgPool: Either strides or dilations must be 1. ' + + `Got strides ${strides} and dilations '${dilations}'`); + const convInfo = computePool2DInfo(x.shape, filterSize, strides, dilations, pad, dimRoundingMode); + let res; + if (convInfo.filterWidth === 1 && convInfo.filterHeight === 1 && + arraysEqual(convInfo.inShape, convInfo.outShape)) { + res = identity$1({ inputs: { x }, backend }); + } + else { + const xValues = backend.data.get(x.dataId).values; + const strides = computeStrides(x.shape); + const buffer = pool(xValues, x.shape, x.dtype, strides, convInfo, 'avg'); + res = backend.makeTensorInfo(convInfo.outShape, x.dtype, buffer.values); + } + return res; + } + const avgPoolConfig$1 = { + kernelName: AvgPool, + backendName: 'cpu', + kernelFunc: avgPool$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function avgPool3D$1(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { filterSize, strides, pad, dimRoundingMode, dataFormat } = attrs; + assertNotComplex$1(x, 'avgPool3d'); + const convInfo = computePool3DInfo(x.shape, filterSize, strides, 1 /* dilations */, pad, dimRoundingMode, dataFormat); + const xValues = backend.data.get(x.dataId).values; + const outBuf = pool3d(xValues, x.shape, x.dtype, computeStrides(x.shape), convInfo, 'avg'); + return backend.makeTensorInfo(outBuf.shape, 'float32', outBuf.values); + } + const avgPool3DConfig$1 = { + kernelName: AvgPool3D, + backendName: 'cpu', + kernelFunc: avgPool3D$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function avgPool3DGrad$1(args) { + const { inputs, backend, attrs } = args; + const { dy, input } = inputs; + const { filterSize, strides, pad, dimRoundingMode } = attrs; + assertNotComplex$1([dy, input], 'avgPool3DGrad'); + const convInfo = computePool3DInfo(input.shape, filterSize, strides, 1 /* dilations */, pad, dimRoundingMode); + const strideDepth = convInfo.strideDepth; + const strideHeight = convInfo.strideHeight; + const strideWidth = convInfo.strideWidth; + const filterDepth = convInfo.filterDepth; + const filterHeight = convInfo.filterHeight; + const filterWidth = convInfo.filterWidth; + const dilationDepth = convInfo.dilationDepth; + const dilationHeight = convInfo.dilationHeight; + const dilationWidth = convInfo.dilationWidth; + const effectiveFilterDepth = convInfo.effectiveFilterDepth; + const effectiveFilterHeight = convInfo.effectiveFilterHeight; + const effectiveFilterWidth = convInfo.effectiveFilterWidth; + const padFront = effectiveFilterDepth - 1 - convInfo.padInfo.front; + const padLeft = effectiveFilterWidth - 1 - convInfo.padInfo.left; + const padTop = effectiveFilterHeight - 1 - convInfo.padInfo.top; + const dx = buffer(input.shape, 'float32'); + const avgMultiplier = 1 / (filterDepth * filterHeight * filterWidth); + const dyBuf = backend.bufferSync(dy); + for (let batch = 0; batch < convInfo.batchSize; ++batch) { + for (let channel = 0; channel < convInfo.inChannels; ++channel) { + for (let dxDepth = 0; dxDepth < convInfo.inDepth; ++dxDepth) { + for (let dxRow = 0; dxRow < convInfo.inHeight; ++dxRow) { + for (let dxCol = 0; dxCol < convInfo.inWidth; ++dxCol) { + // Shader code begins. + const dyDepthCorner = dxDepth - padFront; + const dyRowCorner = dxRow - padTop; + const dyColCorner = dxCol - padLeft; + let dotProd = 0; + for (let wDepth = 0; wDepth < effectiveFilterDepth; wDepth += dilationDepth) { + const dyDepth = (dyDepthCorner + wDepth) / strideDepth; + if (dyDepth < 0 || dyDepth >= convInfo.outDepth || + Math.floor(dyDepth) !== dyDepth) { + continue; + } + for (let wRow = 0; wRow < effectiveFilterHeight; wRow += dilationHeight) { + const dyRow = (dyRowCorner + wRow) / strideHeight; + if (dyRow < 0 || dyRow >= convInfo.outHeight || + Math.floor(dyRow) !== dyRow) { + continue; + } + for (let wCol = 0; wCol < effectiveFilterWidth; wCol += dilationWidth) { + const dyCol = (dyColCorner + wCol) / strideWidth; + if (dyCol < 0 || dyCol >= convInfo.outWidth || + Math.floor(dyCol) !== dyCol) { + continue; + } + const pixel = dyBuf.get(batch, dyDepth, dyRow, dyCol, channel); + dotProd += pixel; + } + } + } + dx.set(dotProd * avgMultiplier, batch, dxDepth, dxRow, dxCol, channel); + } + } + } + } + } + return backend.makeTensorInfo(dx.shape, dx.dtype, dx.values); + } + const avgPool3DGradConfig$1 = { + kernelName: AvgPool3DGrad, + backendName: 'cpu', + kernelFunc: avgPool3DGrad$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function avgPoolGrad$1(args) { + const { inputs, backend, attrs } = args; + const { dy, input } = inputs; + const x = input; + assertNotComplex$1([dy, input], 'avgPoolGrad'); + const { filterSize, strides, pad } = attrs; + const convInfo = computePool2DInfo(x.shape, filterSize, strides, 1 /* dilations */, pad); + const strideHeight = convInfo.strideHeight; + const strideWidth = convInfo.strideWidth; + const filterHeight = convInfo.filterHeight; + const filterWidth = convInfo.filterWidth; + const dilationHeight = convInfo.dilationHeight; + const dilationWidth = convInfo.dilationWidth; + const effectiveFilterHeight = convInfo.effectiveFilterHeight; + const effectiveFilterWidth = convInfo.effectiveFilterWidth; + const padLeft = effectiveFilterWidth - 1 - convInfo.padInfo.left; + const padTop = effectiveFilterHeight - 1 - convInfo.padInfo.top; + const dx = buffer(x.shape, 'float32'); + const avgMultiplier = 1 / (filterHeight * filterWidth); + const dyData = backend.data.get(dy.dataId).values; + const dyBuf = buffer(dy.shape, 'float32', dyData); + for (let b = 0; b < convInfo.batchSize; ++b) { + for (let d = 0; d < convInfo.inChannels; ++d) { + for (let dxR = 0; dxR < convInfo.inHeight; ++dxR) { + for (let dxC = 0; dxC < convInfo.inWidth; ++dxC) { + // Shader code begins. + const dyRCorner = dxR - padTop; + const dyCCorner = dxC - padLeft; + let dotProd = 0; + for (let wR = 0; wR < effectiveFilterHeight; wR += dilationHeight) { + const dyR = (dyRCorner + wR) / strideHeight; + if (dyR < 0 || dyR >= convInfo.outHeight || + Math.floor(dyR) !== dyR) { + continue; + } + for (let wC = 0; wC < effectiveFilterWidth; wC += dilationWidth) { + const dyC = (dyCCorner + wC) / strideWidth; + if (dyC < 0 || dyC >= convInfo.outWidth || + Math.floor(dyC) !== dyC) { + continue; + } + const pixel = dyBuf.get(b, dyR, dyC, d); + dotProd += pixel; + } + } + dx.set(dotProd * avgMultiplier, b, dxR, dxC, d); + } + } + } + } + return backend.makeTensorInfo(dx.shape, dx.dtype, dx.values); + } + const avgPoolGradConfig$1 = { + kernelName: AvgPoolGrad, + backendName: 'cpu', + kernelFunc: avgPoolGrad$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function batchNorm$1(args) { + const { inputs, backend, attrs } = args; + const { x, scale, offset, mean, variance } = inputs; + assert$1(mean.shape.length === variance.shape.length, () => 'Batch normalization gradient requires mean and variance to have ' + + 'equal ranks.'); + assert$1(offset == null || mean.shape.length === offset.shape.length, () => 'Batch normalization gradient requires mean and offset to have ' + + 'equal ranks.'); + assert$1(scale == null || mean.shape.length === scale.shape.length, () => 'Batch normalization gradient requires mean and scale to have ' + + 'equal ranks.'); + assertNotComplex$1([x, mean, variance, scale, offset], 'batchNorm'); + let { varianceEpsilon } = attrs; + if (varianceEpsilon == null) { + varianceEpsilon = 0.001; + } + const xVals = backend.data.get(x.dataId).values; + const mVals = backend.data.get(mean.dataId).values; + const varVals = backend.data.get(variance.dataId).values; + const sVals = scale ? backend.data.get(scale.dataId).values : + new Float32Array([1]); + const offVals = offset ? + backend.data.get(offset.dataId).values : + new Float32Array([0]); + const outVals = new Float32Array(xVals.length); + const offValsLength = offVals.length; + const sValsLength = sVals.length; + const varValsLength = varVals.length; + const mValsLength = mVals.length; + let offi = 0; + let mi = 0; + let si = 0; + let vi = 0; + for (let i = 0; i < xVals.length; ++i) { + outVals[i] = offVals[offi++] + + (xVals[i] - mVals[mi++]) * sVals[si++] / + Math.sqrt(varVals[vi++] + varianceEpsilon); + if (offi >= offValsLength) { + offi = 0; + } + if (mi >= mValsLength) { + mi = 0; + } + if (si >= sValsLength) { + si = 0; + } + if (vi >= varValsLength) { + vi = 0; + } + } + return backend.makeTensorInfo(x.shape, x.dtype, outVals); + } + const batchNormConfig$1 = { + kernelName: FusedBatchNorm, + backendName: 'cpu', + kernelFunc: batchNorm$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function batchToSpaceND$1(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { blockShape, crops } = attrs; + assertNotComplex$1([x], 'batchToSpaceND'); + const prod = blockShape.reduce((a, b) => a * b); + const reshaped = getReshaped(x.shape, blockShape, prod); + const permuted = getPermuted(reshaped.length, blockShape.length); + const reshapedPermuted = getReshapedPermuted(x.shape, blockShape, prod); + const sliceBeginCoords = getSliceBeginCoords(crops, blockShape.length); + const sliceSize = getSliceSize(reshapedPermuted, crops, blockShape.length); + const xReshaped = reshape$1({ inputs: { x }, backend, attrs: { shape: reshaped } }); + const xTransposed = transpose$1({ inputs: { x: xReshaped }, backend, attrs: { perm: permuted } }); + const xTransposedReshaped = reshape$1({ inputs: { x: xTransposed }, backend, attrs: { shape: reshapedPermuted } }); + const result = slice$1({ + inputs: { x: xTransposedReshaped }, + backend, + attrs: { begin: sliceBeginCoords, size: sliceSize } + }); + backend.disposeIntermediateTensorInfo(xReshaped); + backend.disposeIntermediateTensorInfo(xTransposed); + backend.disposeIntermediateTensorInfo(xTransposedReshaped); + return result; + } + const batchToSpaceNDConfig$1 = { + kernelName: BatchToSpaceND, + backendName: 'cpu', + kernelFunc: batchToSpaceND$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function bincount$1(args) { + const { inputs, backend, attrs } = args; + const { x, weights } = inputs; + const { size } = attrs; + const xVals = backend.data.get(x.dataId).values; + const weightsVals = backend.data.get(weights.dataId).values; + const outVals = bincountImpl(xVals, weightsVals, weights.dtype, weights.shape, size); + return backend.makeTensorInfo([size], weights.dtype, outVals); + } + const bincountConfig$1 = { + kernelName: Bincount, + backendName: 'cpu', + kernelFunc: bincount$1 + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function broadcastArgs$1(args) { + const { inputs, backend } = args; + const { s0, s1 } = inputs; + const s0Vals = backend.data.get(s0.dataId).values; + const s1Vals = backend.data.get(s1.dataId).values; + const broadcastShape = assertAndGetBroadcastShape(Array.from(s0Vals), Array.from(s1Vals)); + return backend.makeTensorInfo([broadcastShape.length], 'int32', Int32Array.from(broadcastShape)); + } + const broadcastArgsConfig$1 = { + kernelName: BroadcastArgs, + backendName: 'cpu', + kernelFunc: broadcastArgs$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const clipByValue$1 = unaryKernelFunc$1(ClipByValue, (xi, attrs) => { + const clipAttrs = attrs; + if (xi > clipAttrs.clipValueMax) { + return clipAttrs.clipValueMax; + } + return xi < clipAttrs.clipValueMin ? clipAttrs.clipValueMin : xi; + }); + const clipByValueConfig$1 = { + kernelName: ClipByValue, + backendName: 'cpu', + kernelFunc: clipByValue$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const complexAbs$1 = (args) => { + const { x } = args.inputs; + const cpuBackend = args.backend; + const resultValues = new Float32Array(sizeFromShape(x.shape)); + const complexVals = cpuBackend.data.get(x.dataId); + const real = complexVals.complexTensorInfos.real; + const imag = complexVals.complexTensorInfos.imag; + const realVals = cpuBackend.data.get(real.dataId).values; + const imagVals = cpuBackend.data.get(imag.dataId).values; + for (let i = 0; i < realVals.length; i++) { + const real = realVals[i]; + const imag = imagVals[i]; + resultValues[i] = Math.hypot(real, imag); + } + return cpuBackend.makeOutput(resultValues, x.shape, 'float32'); + }; + const complexAbsConfig$1 = { + kernelName: ComplexAbs, + backendName: 'cpu', + kernelFunc: complexAbs$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function imag$1(args) { + const { inputs, backend } = args; + const { input } = inputs; + const imag = backend.data.get(input.dataId).complexTensorInfos.imag; + const imagVal = backend.data.get(imag.dataId).values; + // When complex tensor is disposed, its underlying parts will be disposed too. + // Make new tensor out of the imag value of the complex. This makes sure the + // value is still accessible even if complex tensor is disposed. + return backend.makeTensorInfo(imag.shape, imag.dtype, imagVal); + } + const imagConfig$1 = { + kernelName: Imag, + backendName: 'cpu', + kernelFunc: imag$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function concat$1(args) { + const { inputs, backend, attrs } = args; + const { axis } = attrs; + const $axis = parseAxisParam(axis, inputs[0].shape)[0]; + const shapes = inputs.map(t => t.shape); + assertParamsConsistent(shapes, $axis); + let outShape = computeOutShape$1(inputs.map(t => t.shape), $axis); + if (sizeFromShape(outShape) === 0) { + return backend.makeTensorInfo(outShape, inputs[0].dtype, []); + } + // Keep only non-empty tensors (ignore tensors with 0 in their shape). + const $inputs = inputs.filter(t => sizeFromShape(t.shape) > 0); + if ($inputs.length === 1) { + return identity$1({ inputs: { x: $inputs[0] }, backend }); + } + if ($inputs[0].dtype === 'complex64') { + const reals = $inputs.map((t) => real$1({ inputs: { input: t }, backend })); + const imags = $inputs.map((t) => imag$1({ inputs: { input: t }, backend })); + const realConcated = concat$1({ inputs: reals, backend, attrs: { axis: $axis } }); + const imagConcated = concat$1({ inputs: imags, backend, attrs: { axis: $axis } }); + const result = complex$1({ inputs: { real: realConcated, imag: imagConcated }, backend }); + reals.forEach(r => backend.disposeIntermediateTensorInfo(r)); + imags.forEach(i => backend.disposeIntermediateTensorInfo(i)); + backend.disposeIntermediateTensorInfo(realConcated); + backend.disposeIntermediateTensorInfo(imagConcated); + return result; + } + // Any concat of n-dimensional tensors across any axis can be reduced to + // a concatenation of two-dimensional tensors across the axis 1 by first + // partitioning the axes of the original tensors into those less than the + // axis to be concatenated and the rest. Then reshape the tensors + // into a two-dimensional tensor by collapsing these two sets of axes and + // concatenate the resulting matrices across the axis 1, finally reshaping + // the result to have the proper shape. + const inputs2D = $inputs.map(t => { + const innerSize = sizeFromShape(t.shape.slice($axis)); + const shape = [-1, innerSize]; + return reshape$1({ inputs: { x: t }, backend, attrs: { shape } }); + }); + const inputsValShapes = inputs2D.map(t => { + return { vals: backend.data.get(t.dataId).values, shape: t.shape }; + }); + // Concats 2d tensors along axis=1. + outShape = + computeOutShape$1(inputs2D.map(t => t.shape), 1 /* axis */); + const simplyConcat = inputs2D[0].shape[0] === 1; + const outVals = concatImpl$1(inputsValShapes, outShape, inputs[0].dtype, simplyConcat); + const finalOutShape = computeOutShape$1($inputs.map(t => t.shape), $axis); + const outInfo = backend.makeTensorInfo(finalOutShape, inputs[0].dtype, outVals); + inputs2D.forEach(t => backend.disposeIntermediateTensorInfo(t)); + return outInfo; + } + const concatConfig$1 = { + kernelName: Concat, + backendName: 'cpu', + kernelFunc: concat$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function conv2D(args) { + const { inputs, backend, attrs } = args; + const { x, filter } = inputs; + const { strides, pad, dataFormat, dilations, dimRoundingMode } = attrs; + assertNotComplex$1([x, filter], 'conv2d'); + const $dataFormat = convertConv2DDataFormat(dataFormat); + const convInfo = computeConv2DInfo(x.shape, filter.shape, strides, dilations, pad, dimRoundingMode, false /* depthwise */, $dataFormat); + const filterHeight = convInfo.filterHeight; + const filterWidth = convInfo.filterWidth; + const dilationHeight = convInfo.dilationHeight; + const dilationWidth = convInfo.dilationWidth; + const padLeft = convInfo.padInfo.left; + const padTop = convInfo.padInfo.top; + const isChannelsLast = convInfo.dataFormat === 'channelsLast'; + const y = new TensorBuffer(convInfo.outShape, x.dtype); + const xStrides = computeStrides(x.shape); + const filterStrides = computeStrides(filter.shape); + const xBatchStride = xStrides[0]; + const xRowStride = isChannelsLast ? xStrides[1] : xStrides[2]; + const xColStride = isChannelsLast ? xStrides[2] : 1; + const xChannelStride = isChannelsLast ? 1 : xStrides[1]; + const yBatchStride = y.strides[0]; + const yRowStride = isChannelsLast ? y.strides[1] : y.strides[2]; + const yColStride = isChannelsLast ? y.strides[2] : 1; + const yChannelStride = isChannelsLast ? 1 : y.strides[1]; + const xVals = backend.data.get(x.dataId).values; + const wVals = backend.data.get(filter.dataId).values; + const yVals = y.values; + for (let b = 0; b < convInfo.batchSize; ++b) { + const xOffset1 = b * xBatchStride; + const yOffset1 = b * yBatchStride; + for (let yR = 0; yR < convInfo.outHeight; ++yR) { + const yOffset2 = yOffset1 + yR * yRowStride; + const xRCorner = yR * convInfo.strideHeight - padTop; + for (let wR = 0; wR < filterHeight; ++wR) { + const xR = xRCorner + wR * dilationHeight; + if (xR < 0 || xR >= convInfo.inHeight) { + continue; + } + const wOffset1 = wR * filterStrides[0]; + const xOffset2 = xOffset1 + xR * xRowStride; + for (let yC = 0; yC < convInfo.outWidth; ++yC) { + const yOffset3 = yOffset2 + yC * yColStride; + const xCCorner = yC * convInfo.strideWidth - padLeft; + for (let wC = 0; wC < filterWidth; ++wC) { + const xC = xCCorner + wC * dilationWidth; + if (xC < 0 || xC >= convInfo.inWidth) { + continue; + } + const wOffset2 = wOffset1 + wC * filterStrides[1]; + const xOffset3 = xOffset2 + xC * xColStride; + let wOffset3 = wOffset2; + for (let d1 = 0; d1 < convInfo.inChannels; ++d1) { + const xVal = xVals[xOffset3 + d1 * xChannelStride]; + for (let d2 = 0; d2 < convInfo.outChannels; ++d2) { + yVals[yOffset3 + d2 * yChannelStride] += + xVal * wVals[wOffset3 + d2]; + } + wOffset3 += convInfo.outChannels; + } + } + } + } + } + } + return backend.makeTensorInfo(y.shape, y.dtype, yVals); + } + const conv2DConfig$1 = { + kernelName: Conv2D$1, + backendName: 'cpu', + kernelFunc: conv2D + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function conv2DBackpropFilter$1(args) { + const { inputs, backend, attrs } = args; + const { x, dy } = inputs; + const { strides, pad, dataFormat, dimRoundingMode, filterShape } = attrs; + assertNotComplex$1([x, dy], 'conv2dBackpropFilter'); + const $dataFormat = convertConv2DDataFormat(dataFormat); + const convInfo = computeConv2DInfo(x.shape, filterShape, strides, 1 /* dilations */, pad, dimRoundingMode, false /* depthwise */, $dataFormat); + const { strideHeight, strideWidth, filterHeight, filterWidth } = convInfo; + const isChannelsLast = convInfo.dataFormat === 'channelsLast'; + const dW = new TensorBuffer(convInfo.filterShape, 'float32'); + const leftPad = convInfo.padInfo.left; + const topPad = convInfo.padInfo.top; + const xVals = backend.data.get(x.dataId).values; + const dyVals = backend.data.get(dy.dataId).values; + const xBuf = new TensorBuffer(x.shape, x.dtype, xVals); + const dyBuf = new TensorBuffer(dy.shape, dy.dtype, dyVals); + for (let wR = 0; wR < filterHeight; ++wR) { + const yRMin = Math.max(0, Math.ceil((topPad - wR) / strideHeight)); + const yRMax = Math.min(convInfo.outHeight, (convInfo.inHeight + topPad - wR) / strideHeight); + for (let wC = 0; wC < filterWidth; ++wC) { + const yCMin = Math.max(0, Math.ceil((leftPad - wC) / strideWidth)); + const yCMax = Math.min(convInfo.outWidth, (convInfo.inWidth + leftPad - wC) / strideWidth); + for (let d1 = 0; d1 < convInfo.inChannels; ++d1) { + for (let d2 = 0; d2 < convInfo.outChannels; ++d2) { + let dotProd = 0; + for (let b = 0; b < convInfo.batchSize; ++b) { + for (let yR = yRMin; yR < yRMax; ++yR) { + const xR = wR + yR * strideHeight - topPad; + for (let yC = yCMin; yC < yCMax; ++yC) { + const xC = wC + yC * strideWidth - leftPad; + if (isChannelsLast) { + dotProd += xBuf.get(b, xR, xC, d1) * + dyBuf.get(b, yR, yC, d2); + } + else { + dotProd += xBuf.get(b, d1, xR, xC) * + dyBuf.get(b, d2, yR, yC); + } + } + } + } + dW.set(dotProd, wR, wC, d1, d2); + } + } + } + } + return backend.makeTensorInfo(dW.shape, dW.dtype, dW.values); + } + const conv2DBackpropFilterConfig$1 = { + kernelName: Conv2DBackpropFilter, + backendName: 'cpu', + kernelFunc: conv2DBackpropFilter$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function conv2DBackpropInput$1(args) { + const { inputs, backend, attrs } = args; + const { dy, filter } = inputs; + const { inputShape, strides, pad, dataFormat, dimRoundingMode } = attrs; + assertNotComplex$1([dy, filter], 'conv2dBackpropInput'); + const filterStrides = computeStrides(filter.shape); + const dyStrides = computeStrides(dy.shape); + let $dataFormat = convertConv2DDataFormat(dataFormat); + const convInfo = computeConv2DInfo(inputShape, filter.shape, strides, 1 /* dilations */, pad, dimRoundingMode, false, $dataFormat); + const dx = new TensorBuffer(convInfo.inShape, 'float32'); + const dxValues = dx.values; + const dyValues = backend.data.get(dy.dataId).values; + const fltValues = backend.data.get(filter.dataId).values; + const [fltS0, fltS1, fltS2] = filterStrides; + const { batchSize, filterHeight, filterWidth, inChannels, inHeight, inWidth, outChannels, outHeight, outWidth, strideHeight, strideWidth } = convInfo; + $dataFormat = convInfo.dataFormat; + const topPad = filterHeight - 1 - convInfo.padInfo.top; + const leftPad = filterWidth - 1 - convInfo.padInfo.left; + const isChannelsLast = $dataFormat === 'channelsLast'; + const xBatchStride = dx.strides[0]; + const xRowStride = isChannelsLast ? dx.strides[1] : dx.strides[2]; + const xColStride = isChannelsLast ? dx.strides[2] : 1; + const xChannelStride = isChannelsLast ? 1 : dx.strides[1]; + const yBatchStride = dyStrides[0]; + const yRowStride = isChannelsLast ? dyStrides[1] : dyStrides[2]; + const yColStride = isChannelsLast ? dyStrides[2] : 1; + const yChannelStride = isChannelsLast ? 1 : dyStrides[1]; + for (let b = 0; b < batchSize; ++b) { + for (let d1 = 0; d1 < inChannels; ++d1) { + for (let xR = 0; xR < inHeight; ++xR) { + const xRCorner = xR - topPad; + const xRMin = Math.max(0, Math.ceil(xRCorner / strideHeight)); + const yRMax = Math.min(outHeight, (filterHeight + xRCorner) / strideHeight); + for (let xC = 0; xC < inWidth; ++xC) { + const xCCorner = xC - leftPad; + const xCMin = Math.max(0, Math.ceil(xCCorner / strideWidth)); + const yCMax = Math.min(outWidth, (filterWidth + xCCorner) / strideWidth); + let dotProd = 0; + for (let yR = xRMin; yR < yRMax; ++yR) { + const wR = yR * strideHeight - xRCorner; + for (let yC = xCMin; yC < yCMax; ++yC) { + const wC = yC * strideWidth - xCCorner; + const dyOffset = yBatchStride * b + yRowStride * yR + yColStride * yC; + const fltOffset = fltS0 * (filterHeight - 1 - wR) + + fltS1 * (filterWidth - 1 - wC) + fltS2 * d1; + for (let d2 = 0; d2 < outChannels; ++d2) { + const pixel = dyValues[dyOffset + yChannelStride * d2]; + const weight = fltValues[fltOffset + d2]; + dotProd += pixel * weight; + } + } + } + const dxOffset = xBatchStride * b + xRowStride * xR + + xColStride * xC + xChannelStride * d1; + dxValues[dxOffset] = dotProd; + } + } + } + } + return backend.makeTensorInfo(dx.shape, dx.dtype, dx.values); + } + const conv2DBackpropInputConfig$1 = { + kernelName: Conv2DBackpropInput, + backendName: 'cpu', + kernelFunc: conv2DBackpropInput$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function conv3D$1(args) { + const { inputs, backend, attrs } = args; + const { x, filter } = inputs; + const { strides, pad, dilations } = attrs; + assertNotComplex$1([x, filter], 'conv3d'); + const convInfo = computeConv3DInfo(x.shape, filter.shape, strides, dilations, pad); + const { filterDepth, filterHeight, filterWidth, dilationDepth, dilationHeight, dilationWidth, padInfo } = convInfo; + const padFront = padInfo.front; + const padLeft = padInfo.left; + const padTop = padInfo.top; + const y = new TensorBuffer(convInfo.outShape, x.dtype); + const xVals = backend.data.get(x.dataId).values; + const wVals = backend.data.get(filter.dataId).values; + const yVals = y.values; + const xStrides = computeStrides(x.shape); + const filterStrides = computeStrides(filter.shape); + for (let b = 0; b < convInfo.batchSize; ++b) { + const xOffset1 = b * xStrides[0]; + const yOffset1 = b * y.strides[0]; + for (let yF = 0; yF < convInfo.outDepth; ++yF) { + const yOffset2 = yOffset1 + yF * y.strides[1]; + const xFCorner = yF * convInfo.strideDepth - padFront; + for (let wF = 0; wF < filterDepth; ++wF) { + const xF = xFCorner + wF * dilationDepth; + if (xF < 0 || xF >= convInfo.inDepth) { + continue; + } + const wOffset1 = wF * filterStrides[0]; + const xOffset2 = xOffset1 + xF * xStrides[1]; + for (let yR = 0; yR < convInfo.outHeight; ++yR) { + const yOffset3 = yOffset2 + yR * y.strides[2]; + const xRCorner = yR * convInfo.strideHeight - padTop; + for (let wR = 0; wR < filterHeight; ++wR) { + const xR = xRCorner + wR * dilationHeight; + if (xR < 0 || xR >= convInfo.inHeight) { + continue; + } + const wOffset2 = wOffset1 + wR * filterStrides[1]; + const xOffset3 = xOffset2 + xR * xStrides[2]; + for (let yC = 0; yC < convInfo.outWidth; ++yC) { + const yOffset4 = yOffset3 + yC * convInfo.outChannels; + const xCCorner = yC * convInfo.strideWidth - padLeft; + for (let wC = 0; wC < filterWidth; ++wC) { + const xC = xCCorner + wC * dilationWidth; + if (xC < 0 || xC >= convInfo.inWidth) { + continue; + } + const wOffset3 = wOffset2 + wC * filterStrides[2]; + const xOffset4 = xOffset3 + xC * convInfo.inChannels; + let wOffset4 = wOffset3; + for (let d1 = 0; d1 < convInfo.inChannels; ++d1) { + const xVal = xVals[xOffset4 + d1]; + for (let d2 = 0; d2 < convInfo.outChannels; ++d2) { + yVals[yOffset4 + d2] += xVal * wVals[wOffset4 + d2]; + } + wOffset4 += convInfo.outChannels; + } + } + } + } + } + } + } + } + return backend.makeTensorInfo(y.shape, y.dtype, y.values); + } + const conv3DConfig$1 = { + kernelName: Conv3D$1, + backendName: 'cpu', + kernelFunc: conv3D$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function conv3DBackpropFilterV2$1(args) { + const { inputs, backend, attrs } = args; + const { x, dy } = inputs; + const { strides, pad, filterShape } = attrs; + assertNotComplex$1([x, dy], 'conv3dBackpropFilterV2'); + const xStrides = computeStrides(x.shape); + const dyStrides = computeStrides(dy.shape); + const convInfo = computeConv3DInfo(x.shape, filterShape, strides, 1 /* dilations */, pad); + const strideDepth = convInfo.strideDepth; + const strideHeight = convInfo.strideHeight; + const strideWidth = convInfo.strideWidth; + const filterDepth = convInfo.filterDepth; + const filterHeight = convInfo.filterHeight; + const filterWidth = convInfo.filterWidth; + const dw = new TensorBuffer(convInfo.filterShape, 'float32'); + const dwValues = dw.values; + const [dwS0, dwS1, dwS2, dwS3] = dw.strides; + const dyValues = backend.data.get(dy.dataId).values; + const [dyS0, dyS1, dyS2, dyS3] = dyStrides; + const xValues = backend.data.get(x.dataId).values; + const [xS0, xS1, xS2, xS3] = xStrides; + const frontPad = convInfo.padInfo.front; + const leftPad = convInfo.padInfo.left; + const topPad = convInfo.padInfo.top; + for (let wF = 0; wF < filterDepth; ++wF) { + const yFMin = Math.max(0, Math.ceil((frontPad - wF) / strideDepth)); + const yFMax = Math.min(convInfo.outDepth, (convInfo.inDepth + frontPad - wF) / strideDepth); + const wOffset1 = wF * dwS0; + for (let wR = 0; wR < filterHeight; ++wR) { + const yRMin = Math.max(0, Math.ceil((topPad - wR) / strideHeight)); + const yRMax = Math.min(convInfo.outHeight, (convInfo.inHeight + topPad - wR) / strideHeight); + const wOffset2 = wR * dwS1 + wOffset1; + for (let wC = 0; wC < filterWidth; ++wC) { + const yCMin = Math.max(0, Math.ceil((leftPad - wC) / strideWidth)); + const yCMax = Math.min(convInfo.outWidth, (convInfo.inWidth + leftPad - wC) / strideWidth); + const wOffset3 = wC * dwS2 + wOffset2; + for (let d1 = 0; d1 < convInfo.inChannels; ++d1) { + const wOffset4 = d1 * dwS3 + wOffset3; + for (let d2 = 0; d2 < convInfo.outChannels; ++d2) { + let dotProd = 0; + for (let b = 0; b < convInfo.batchSize; ++b) { + const xOffset1 = b * xS0; + const yOffset1 = b * dyS0; + for (let yF = yFMin; yF < yFMax; ++yF) { + const xF = wF + yF * strideDepth - frontPad; + const xOffset2 = xF * xS1 + xOffset1; + const yOffset2 = yF * dyS1 + yOffset1; + for (let yR = yRMin; yR < yRMax; ++yR) { + const xR = wR + yR * strideHeight - topPad; + const xOffset3 = xR * xS2 + xOffset2; + const yOffset3 = yR * dyS2 + yOffset2; + for (let yC = yCMin; yC < yCMax; ++yC) { + const xC = wC + yC * strideWidth - leftPad; + const xOffset4 = xC * xS3 + xOffset3; + const yOffset4 = yC * dyS3 + yOffset3; + dotProd += xValues[xOffset4 + d1] * dyValues[yOffset4 + d2]; + } + } + } + } + dwValues[wOffset4 + d2] = dotProd; + } + } + } + } + } + return backend.makeTensorInfo(dw.shape, dw.dtype, dw.values); + } + const conv3DBackpropFilterV2Config$1 = { + kernelName: Conv3DBackpropFilterV2, + backendName: 'cpu', + kernelFunc: conv3DBackpropFilterV2$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function conv3DBackpropInputV2(args) { + const { inputs, backend, attrs } = args; + const { dy, filter } = inputs; + const { pad, strides, inputShape } = attrs; + assertNotComplex$1([dy], 'conv3dBackpropInputV2'); + const dyStrides = computeStrides(dy.shape); + const filterStrides = computeStrides(filter.shape); + const convInfo = computeConv3DInfo(inputShape, filter.shape, strides, 1 /* dilations */, pad); + const dx = new TensorBuffer(convInfo.inShape, 'float32'); + const dxValues = dx.values; + const [dxS0, dxS1, dxS2, dxS3] = dx.strides; + const dyValues = backend.data.get(dy.dataId).values; + const [dyS0, dyS1, dyS2, dyS3] = dyStrides; + const fltValues = backend.data.get(filter.dataId).values; + const [fltS0, fltS1, fltS2, fltS3] = filterStrides; + const { batchSize, filterDepth, filterHeight, filterWidth, inChannels, inDepth, inHeight, inWidth, outChannels, outDepth, outHeight, outWidth, strideDepth, strideHeight, strideWidth } = convInfo; + const frontPad = filterDepth - 1 - convInfo.padInfo.front; + const topPad = filterHeight - 1 - convInfo.padInfo.top; + const leftPad = filterWidth - 1 - convInfo.padInfo.left; + for (let b = 0; b < batchSize; ++b) { + for (let d1 = 0; d1 < inChannels; ++d1) { + // Frames of depth + for (let xF = 0; xF < inDepth; ++xF) { + const xFCorner = xF - frontPad; + const xFMin = Math.max(0, Math.ceil(xFCorner / strideDepth)); + const yFMax = Math.min(outDepth, (filterDepth + xFCorner) / strideDepth); + // Rows as per standard 2d matrix notation + for (let xR = 0; xR < inHeight; ++xR) { + const xRCorner = xR - topPad; + const xRMin = Math.max(0, Math.ceil(xRCorner / strideHeight)); + const yRMax = Math.min(outHeight, (filterHeight + xRCorner) / strideHeight); + // Columns as per standard 2d matrix notation + for (let xC = 0; xC < inWidth; ++xC) { + const xCCorner = xC - leftPad; + const xCMin = Math.max(0, Math.ceil(xCCorner / strideWidth)); + const yCMax = Math.min(outWidth, (filterWidth + xCCorner) / strideWidth); + let dotProd = 0; + for (let yF = xFMin; yF < yFMax; ++yF) { + const wF = yF * strideDepth - xFCorner; + for (let yR = xRMin; yR < yRMax; ++yR) { + const wR = yR * strideHeight - xRCorner; + for (let yC = xCMin; yC < yCMax; ++yC) { + const wC = yC * strideWidth - xCCorner; + const dyOffset = dyS0 * b + dyS1 * yF + dyS2 * yR + dyS3 * yC; + const fltOffset = fltS0 * (filterDepth - 1 - wF) + + fltS1 * (filterHeight - 1 - wR) + + fltS2 * (filterWidth - 1 - wC) + fltS3 * d1; + for (let d2 = 0; d2 < outChannels; ++d2) { + const pixel = dyValues[dyOffset + d2]; + const weight = fltValues[fltOffset + d2]; + dotProd += pixel * weight; + } + } + } + } + dxValues[dxS0 * b + dxS1 * xF + dxS2 * xR + dxS3 * xC + d1] = + dotProd; + } + } + } + } + } + return backend.makeTensorInfo(dx.shape, dx.dtype, dx.values); + } + const conv3DBackpropInputV2Config = { + kernelName: Conv3DBackpropInputV2, + backendName: 'cpu', + kernelFunc: conv3DBackpropInputV2 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const cos$1 = unaryKernelFunc$1(Cos, (xi) => Math.cos(xi)); + const cosConfig$1 = { + kernelName: Cos, + backendName: 'cpu', + kernelFunc: cos$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const cosh$1 = unaryKernelFunc$1(Cosh, (xi) => Math.cosh(xi)); + const coshConfig$1 = { + kernelName: Cosh, + backendName: 'cpu', + kernelFunc: cosh$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function cropAndResize$1(args) { + const { inputs, backend, attrs } = args; + const { image, boxes, boxInd } = inputs; + const { cropSize, method, extrapolationValue } = attrs; + const [batch, imageHeight, imageWidth, numChannels] = image.shape; + const numBoxes = boxes.shape[0]; + const [cropHeight, cropWidth] = cropSize; + const output = buffer([numBoxes, cropHeight, cropWidth, numChannels], 'float32'); + const boxVals = backend.data.get(boxes.dataId).values; + const boxIndVals = backend.data.get(boxInd.dataId).values; + const imageVals = backend.data.get(image.dataId).values; + const inStride = computeStrides(image.shape); // to calculate flat indexes into image + const outStride = computeStrides(output.shape); // to calculate flat indexes into output + // Reference implementation + // tslint:disable-next-line:max-line-length + // https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/crop_and_resize_op.cc + for (let b = 0; b < numBoxes; b++) { + const startInd = b * 4; + const y1 = boxVals[startInd]; + const x1 = boxVals[startInd + 1]; + const y2 = boxVals[startInd + 2]; + const x2 = boxVals[startInd + 3]; + const bInd = boxIndVals[b]; + if (bInd >= batch) { + continue; + } + const heightScale = (cropHeight > 1) ? (y2 - y1) * (imageHeight - 1) / (cropHeight - 1) : 0; + const widthScale = (cropWidth > 1) ? (x2 - x1) * (imageWidth - 1) / (cropWidth - 1) : 0; + for (let y = 0; y < cropHeight; y++) { + const yInd = (cropHeight > 1) ? + y1 * (imageHeight - 1) + y * (heightScale) : + 0.5 * (y1 + y2) * (imageHeight - 1); + if (yInd < 0 || yInd > imageHeight - 1) { + for (let x = 0; x < cropWidth; x++) { + for (let c = 0; c < numChannels; c++) { + const ind = c + x * outStride[2] + y * outStride[1] + b * outStride[0]; + output.values[ind] = extrapolationValue; + } + } + continue; + } + if (method === 'bilinear') { + const topInd = Math.floor(yInd); + const bottomInd = Math.ceil(yInd); + const yLerp = yInd - topInd; + for (let x = 0; x < cropWidth; x++) { + const xInd = (cropWidth > 1) ? + x1 * (imageWidth - 1) + x * widthScale : + 0.5 * (x1 + x2) * (imageWidth - 1); + if (xInd < 0 || xInd > imageWidth - 1) { + for (let c = 0; c < numChannels; c++) { + const ind = c + x * outStride[2] + y * outStride[1] + b * outStride[0]; + output.values[ind] = extrapolationValue; + } + continue; + } + const leftInd = Math.floor(xInd); + const rightInd = Math.ceil(xInd); + const xLerp = xInd - leftInd; + for (let c = 0; c < numChannels; c++) { + let ind = c + leftInd * inStride[2] + topInd * inStride[1] + + bInd * inStride[0]; + const topLeft = imageVals[ind]; + ind = c + rightInd * inStride[2] + topInd * inStride[1] + + bInd * inStride[0]; + const topRight = imageVals[ind]; + ind = c + leftInd * inStride[2] + bottomInd * inStride[1] + + bInd * inStride[0]; + const bottomLeft = imageVals[ind]; + ind = c + rightInd * inStride[2] + bottomInd * inStride[1] + + bInd * inStride[0]; + const bottomRight = imageVals[ind]; + const top = topLeft + (topRight - topLeft) * xLerp; + const bottom = bottomLeft + (bottomRight - bottomLeft) * xLerp; + ind = c + x * outStride[2] + y * outStride[1] + b * outStride[0]; + output.values[ind] = top + ((bottom - top) * yLerp); + } + } + } + else { // method == "nearest" + for (let x = 0; x < cropWidth; ++x) { + const xInd = (cropWidth > 1) ? + x1 * (imageWidth - 1) + x * widthScale : + 0.5 * (x1 + x2) * (imageWidth - 1); + if (xInd < 0 || xInd > imageWidth - 1) { + for (let c = 0; c < numChannels; c++) { + const ind = c + x * outStride[2] + y * outStride[1] + b * outStride[0]; + output.values[ind] = extrapolationValue; + } + continue; + } + const closestX = Math.round(xInd); + const closestY = Math.round(yInd); + for (let c = 0; c < numChannels; c++) { + const inInd = c + closestX * inStride[2] + closestY * inStride[1] + + bInd * inStride[0]; + const outInd = c + x * outStride[2] + y * outStride[1] + b * outStride[0]; + output.values[outInd] = imageVals[inInd]; + } + } + } + } + } + return backend.makeTensorInfo(output.shape, output.dtype, output.values); + } + const cropAndResizeConfig$1 = { + kernelName: CropAndResize, + backendName: 'cpu', + kernelFunc: cropAndResize$1 + }; + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function cumprod$1(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { axis, exclusive, reverse } = attrs; + assertNotComplex$1(x, 'cumprod'); + const permutation = getAxesPermutation([axis], x.shape.length); + let $x = x; + if (permutation != null) { + $x = transpose$1({ inputs: { x }, backend, attrs: { perm: permutation } }); + } + const permutedAxis = getInnerMostAxes(1, x.shape.length)[0]; + if (permutedAxis !== $x.shape.length - 1) { + throw new Error(`backend.cumprod in CPU expects an inner-most ` + + `axis=${$x.shape.length - 1} but got axis=${permutedAxis}`); + } + const resultDtype = upcastType($x.dtype, 'int32'); + const vals = makeOnesTypedArray(sizeFromShape($x.shape), resultDtype); + const aVals = backend.data.get($x.dataId).values; + const finalDim = $x.shape[$x.shape.length - 1]; + const indexAdjuster = reverse ? + (i, j) => i + finalDim - j - 1 : + (i, j) => i + j; + for (let i = 0; i < aVals.length; i += finalDim) { + for (let j = 0; j < finalDim; j++) { + const idx = indexAdjuster(i, j); + if (j === 0) { + vals[idx] = exclusive ? 1 : aVals[idx]; + } + else { + const prevIdx = indexAdjuster(i, j - 1); + vals[idx] = exclusive ? aVals[prevIdx] * vals[prevIdx] : + aVals[idx] * vals[prevIdx]; + } + } + } + const result = backend.makeTensorInfo($x.shape, resultDtype, vals); + if (permutation != null) { + const reversePermutation = getUndoAxesPermutation(permutation); + const reverseTransposedResult = transpose$1({ inputs: { x: result }, backend, attrs: { perm: reversePermutation } }); + backend.disposeIntermediateTensorInfo(result); + backend.disposeIntermediateTensorInfo($x); + return reverseTransposedResult; + } + return result; + } + const cumprodConfig$1 = { + kernelName: Cumprod, + backendName: 'cpu', + kernelFunc: cumprod$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function cumsum$1(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { axis, exclusive, reverse } = attrs; + assertNotComplex$1(x, 'cumsum'); + const permutation = getAxesPermutation([axis], x.shape.length); + let $x = x; + if (permutation != null) { + $x = transpose$1({ inputs: { x }, backend, attrs: { perm: permutation } }); + } + const permutedAxis = getInnerMostAxes(1, x.shape.length)[0]; + if (permutedAxis !== $x.shape.length - 1) { + throw new Error(`backend.cumsum in CPU expects an inner-most ` + + `axis=${$x.shape.length - 1} but got axis=${permutedAxis}`); + } + const resultDtype = upcastType($x.dtype, 'int32'); + const vals = makeZerosTypedArray(sizeFromShape($x.shape), resultDtype); + const aVals = backend.data.get($x.dataId).values; + const finalDim = $x.shape[$x.shape.length - 1]; + const indexAdjuster = reverse ? + (i, j) => i + finalDim - j - 1 : + (i, j) => i + j; + for (let i = 0; i < aVals.length; i += finalDim) { + for (let j = 0; j < finalDim; j++) { + const idx = indexAdjuster(i, j); + if (j === 0) { + vals[idx] = exclusive ? 0 : aVals[idx]; + } + else { + const prevIdx = indexAdjuster(i, j - 1); + vals[idx] = exclusive ? aVals[prevIdx] + vals[prevIdx] : + aVals[idx] + vals[prevIdx]; + } + } + } + const result = backend.makeTensorInfo($x.shape, resultDtype, vals); + if (permutation != null) { + const reversePermutation = getUndoAxesPermutation(permutation); + const reverseTransposedResult = transpose$1({ inputs: { x: result }, backend, attrs: { perm: reversePermutation } }); + backend.disposeIntermediateTensorInfo(result); + backend.disposeIntermediateTensorInfo($x); + return reverseTransposedResult; + } + return result; + } + const cumsumConfig$1 = { + kernelName: Cumsum, + backendName: 'cpu', + kernelFunc: cumsum$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function denseBincount$1(args) { + const { inputs, backend, attrs } = args; + const { x, weights } = inputs; + const { size, binaryOutput } = attrs; + if (x.shape.length === 1) { + const xVals = backend.data.get(x.dataId).values; + const weightsVals = backend.data.get(weights.dataId).values; + const outVals = bincountImpl(xVals, weightsVals, weights.dtype, weights.shape, size); + return backend.makeTensorInfo([size], weights.dtype, outVals); + } + else if (x.shape.length === 2) { + const xBuf = backend.bufferSync(x); + const weightsBuf = backend.bufferSync(weights); + const outBuf = bincountReduceImpl(xBuf, weightsBuf, size, binaryOutput); + return backend.makeTensorInfo(outBuf.shape, weights.dtype, outBuf.values); + } + throw new Error(`Error in denseBincount: input must be at most rank 2, but got rank` + + `${x.shape.length}.`); + } + const denseBincountConfig$1 = { + kernelName: DenseBincount, + backendName: 'cpu', + kernelFunc: denseBincount$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function depthToSpace$1(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { blockSize, dataFormat } = attrs; + assert$1(dataFormat === 'NHWC', () => `Only NHWC dataFormat supported on CPU for depthToSpace. Got ${dataFormat}`); + const batchSize = x.shape[0]; + const inputHeight = x.shape[1]; + const inputWidth = x.shape[2]; + const inputDepth = x.shape[3]; + const outputHeight = inputHeight * blockSize; + const outputWidth = inputWidth * blockSize; + const outputDepth = inputDepth / (blockSize * blockSize); + const xValues = backend.data.get(x.dataId).values; + const result = new Float32Array(batchSize * outputHeight * outputWidth * outputDepth); + let outputIdx = 0; + for (let b = 0; b < batchSize; ++b) { + for (let h = 0; h < outputHeight; ++h) { + const inH = Math.floor(h / blockSize); + const offsetH = (h % blockSize); + for (let w = 0; w < outputWidth; ++w) { + const inW = Math.floor(w / blockSize); + const offsetW = (w % blockSize); + const offsetD = (offsetH * blockSize + offsetW) * outputDepth; + for (let d = 0; d < outputDepth; ++d) { + const inD = d + offsetD; + const inputIdx = inD + inputDepth * (inW + inputWidth * (inH + inputHeight * b)); + result[outputIdx++] = xValues[inputIdx]; + } + } + } + } + return backend.makeTensorInfo([batchSize, outputHeight, outputWidth, outputDepth], x.dtype, result); + } + const depthToSpaceConfig$1 = { + kernelName: DepthToSpace, + backendName: 'cpu', + kernelFunc: depthToSpace$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function depthwiseConv2dNative$1(args) { + const { inputs, backend, attrs } = args; + const { x, filter } = inputs; + const { strides, pad, dilations, dimRoundingMode } = attrs; + assertNotComplex$1([x, filter], 'depthwiseConv2DNative'); + const xStrides = computeStrides(x.shape); + const filterStrides = computeStrides(filter.shape); + let $dilations = dilations; + if ($dilations == null) { + $dilations = [1, 1]; + } + assert$1(eitherStridesOrDilationsAreOne(strides, $dilations), () => 'Error in depthwiseConv2d: Either strides or dilations must be ' + + `1. Got strides ${strides} and dilations '${$dilations}'`); + const convInfo = computeConv2DInfo(x.shape, filter.shape, strides, $dilations, pad, dimRoundingMode, true /* depthwise */); + const { filterHeight, filterWidth, dilationHeight, dilationWidth, padInfo } = convInfo; + const padLeft = padInfo.left; + const padTop = padInfo.top; + const chMul = convInfo.outChannels / convInfo.inChannels; + const y = new TensorBuffer(convInfo.outShape, x.dtype); + const xVals = backend.data.get(x.dataId).values; + const wVals = backend.data.get(filter.dataId).values; + const yVals = y.values; + for (let b = 0; b < convInfo.batchSize; ++b) { + const xOffset1 = b * xStrides[0]; + const yOffset1 = b * y.strides[0]; + for (let yR = 0; yR < convInfo.outHeight; ++yR) { + const yOffset2 = yOffset1 + yR * y.strides[1]; + const xRCorner = yR * convInfo.strideHeight - padTop; + for (let wR = 0; wR < filterHeight; ++wR) { + const xR = xRCorner + wR * dilationHeight; + if (xR < 0 || xR >= convInfo.inHeight) { + continue; + } + const wOffset1 = wR * filterStrides[0]; + const xOffset2 = xOffset1 + xR * xStrides[1]; + for (let yC = 0; yC < convInfo.outWidth; ++yC) { + const yOffset3 = yOffset2 + yC * y.strides[2]; + const xCCorner = yC * convInfo.strideWidth - padLeft; + for (let wC = 0; wC < filterWidth; ++wC) { + const xC = xCCorner + wC * dilationWidth; + if (xC < 0 || xC >= convInfo.inWidth) { + continue; + } + const wOffset2 = wOffset1 + wC * filterStrides[1]; + const xOffset3 = xOffset2 + xC * convInfo.inChannels; + let yOffset4 = yOffset3; + let wOffset3 = wOffset2; + for (let d1 = 0; d1 < convInfo.inChannels; ++d1) { + const xVal = xVals[xOffset3 + d1]; + for (let q = 0; q < chMul; ++q) { + yVals[yOffset4 + q] += xVal * wVals[wOffset3 + q]; + } + yOffset4 += chMul; + wOffset3 += chMul; + } + } + } + } + } + } + return backend.makeTensorInfo(y.shape, y.dtype, y.values); + } + const depthwiseConv2dNativeConfig$1 = { + kernelName: DepthwiseConv2dNative, + backendName: 'cpu', + kernelFunc: depthwiseConv2dNative$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function depthwiseConv2dNativeBackpropFilter$1(args) { + const { inputs, backend, attrs } = args; + const { x, dy } = inputs; + const { strides, dilations, pad, dimRoundingMode, filterShape } = attrs; + assertNotComplex$1([x, dy], 'depthwiseConv2dNativeBackpropFilter'); + const convInfo = computeConv2DInfo(x.shape, filterShape, strides, dilations, pad, dimRoundingMode, true /* depthwise */); + const { strideHeight, strideWidth, filterHeight, filterWidth } = convInfo; + const dW = new TensorBuffer(convInfo.filterShape, 'float32'); + const leftPad = convInfo.padInfo.left; + const topPad = convInfo.padInfo.top; + const chMul = convInfo.outChannels / convInfo.inChannels; + const xVals = backend.data.get(x.dataId).values; + const xBuf = new TensorBuffer(x.shape, x.dtype, xVals); + const dyVals = backend.data.get(dy.dataId).values; + const dyBuf = new TensorBuffer(dy.shape, dy.dtype, dyVals); + for (let wR = 0; wR < filterHeight; ++wR) { + const yRMin = Math.max(0, Math.ceil((topPad - wR) / strideHeight)); + const yRMax = Math.min(convInfo.outHeight, (convInfo.inHeight + topPad - wR) / strideHeight); + for (let wC = 0; wC < filterWidth; ++wC) { + const yCMin = Math.max(0, Math.ceil((leftPad - wC) / strideWidth)); + const yCMax = Math.min(convInfo.outWidth, (convInfo.inWidth + leftPad - wC) / strideWidth); + for (let d2 = 0; d2 < convInfo.outChannels; ++d2) { + const d1 = Math.trunc(d2 / chMul); + const dm = d2 % chMul; + let dotProd = 0; + for (let b = 0; b < convInfo.batchSize; ++b) { + for (let yR = yRMin; yR < yRMax; ++yR) { + const xR = wR + yR * strideHeight - topPad; + for (let yC = yCMin; yC < yCMax; ++yC) { + const xC = wC + yC * strideWidth - leftPad; + dotProd += xBuf.get(b, xR, xC, d1) * + dyBuf.get(b, yR, yC, d2); + } + } + } + dW.set(dotProd, wR, wC, d1, dm); + } + } + } + return backend.makeTensorInfo(dW.shape, dW.dtype, dW.values); + } + const depthwiseConv2dNativeBackpropFilterConfig$1 = { + kernelName: DepthwiseConv2dNativeBackpropFilter, + backendName: 'cpu', + kernelFunc: depthwiseConv2dNativeBackpropFilter$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function depthwiseConv2dNativeBackpropInput$1(args) { + const { inputs, backend, attrs } = args; + const { dy, filter } = inputs; + const { strides, dilations, pad, dimRoundingMode, inputShape } = attrs; + assertNotComplex$1([dy, filter], 'depthwiseConv2DNativeBackpropInput'); + const dyStrides = computeStrides(dy.shape); + const filterStrides = computeStrides(filter.shape); + const convInfo = computeConv2DInfo(inputShape, filter.shape, strides, dilations, pad, dimRoundingMode, true /* depthwise */); + const dx = new TensorBuffer(convInfo.inShape, 'float32'); + const dxValues = dx.values; + const [dxS0, dxS1, dxS2] = dx.strides; + const dyValues = backend.data.get(dy.dataId).values; + const [dyS0, dyS1, dyS2] = dyStrides; + const fltValues = backend.data.get(filter.dataId).values; + const [fltS0, fltS1, fltS2] = filterStrides; + const { batchSize, filterHeight, filterWidth, inChannels, inHeight, inWidth, outChannels, outHeight, outWidth, strideHeight, strideWidth } = convInfo; + const topPad = filterHeight - 1 - convInfo.padInfo.top; + const leftPad = filterWidth - 1 - convInfo.padInfo.left; + const chMul = outChannels / inChannels; + for (let b = 0; b < batchSize; ++b) { + for (let d1 = 0; d1 < inChannels; ++d1) { + for (let xR = 0; xR < inHeight; ++xR) { + const xRCorner = xR - topPad; + const xRMin = Math.max(0, Math.ceil(xRCorner / strideHeight)); + const yRMax = Math.min(outHeight, (filterHeight + xRCorner) / strideHeight); + for (let xC = 0; xC < inWidth; ++xC) { + const xCCorner = xC - leftPad; + const xCMin = Math.max(0, Math.ceil(xCCorner / strideWidth)); + const yCMax = Math.min(outWidth, (filterWidth + xCCorner) / strideWidth); + let dotProd = 0; + for (let yR = xRMin; yR < yRMax; ++yR) { + const wR = yR * strideHeight - xRCorner; + for (let yC = xCMin; yC < yCMax; ++yC) { + const wC = yC * strideWidth - xCCorner; + const dyOffset = dyS0 * b + dyS1 * yR + dyS2 * yC; + const fltOffset = fltS0 * (filterHeight - 1 - wR) + + fltS1 * (filterWidth - 1 - wC) + fltS2 * d1; + for (let dm = 0; dm < chMul; ++dm) { + const d2 = d1 * chMul + dm; + const pixel = dyValues[dyOffset + d2]; + const weight = fltValues[fltOffset + dm]; + dotProd += pixel * weight; + } + } + } + dxValues[dxS0 * b + dxS1 * xR + dxS2 * xC + d1] = dotProd; + } + } + } + } + return backend.makeTensorInfo(dx.shape, dx.dtype, dx.values); + } + const depthwiseConv2dNativeBackpropInputConfig$1 = { + kernelName: DepthwiseConv2dNativeBackpropInput, + backendName: 'cpu', + kernelFunc: depthwiseConv2dNativeBackpropInput$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function diag$1(args) { + const { inputs, backend } = args; + const { x } = inputs; + const xSize = sizeFromShape(x.shape); + const xVals = backend.data.get(x.dataId).values; + const outBuf = buffer([xSize, xSize], x.dtype); + const vals = outBuf.values; + for (let i = 0; i < xVals.length; i++) { + vals[i * xSize + i] = xVals[i]; + } + const outShape = [...x.shape, ...x.shape]; + return backend.makeTensorInfo(outShape, outBuf.dtype, outBuf.values); + } + const diagConfig$1 = { + kernelName: Diag, + backendName: 'cpu', + kernelFunc: diag$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const dilation2DConfig$1 = { + kernelName: Dilation2D, + backendName: 'cpu', + kernelFunc: ({ inputs, backend, attrs }) => { + const { x, filter } = inputs; + const { strides, pad, dilations } = attrs; + const cpuBackend = backend; + const xVals = cpuBackend.data.get(x.dataId).values; + const xRank = x.shape.length; + const filterVals = cpuBackend.data.get(filter.dataId).values; + const filterRank = filter.shape.length; + const { batchSize, inHeight, inWidth, inChannels, outHeight, outWidth, padInfo, strideHeight, strideWidth, filterHeight, filterWidth, dilationHeight, dilationWidth, outShape } = computeDilation2DInfo(x.shape, filter.shape, strides, pad, 'NHWC' /* dataFormat */, dilations); + const outSize = sizeFromShape(outShape); + const outRank = outShape.length; + const outputVals = getArrayFromDType(x.dtype, outSize); + // Upsampling the input by fill in `dilation size - 1` values between each + // input value. + // This implementation follows the TF c++ implementation: + // https://github.com/tensorflow/tensorflow/blob/d9a3a849edc198e90172bc58eb293de457f9d986/tensorflow/core/kernels/dilation_ops.cc + for (let b = 0; b < batchSize; ++b) { + for (let hOut = 0; hOut < outHeight; ++hOut) { + const hBeg = hOut * strideHeight - padInfo.top; + for (let wOut = 0; wOut < outWidth; ++wOut) { + const wBeg = wOut * strideWidth - padInfo.left; + for (let d = 0; d < inChannels; ++d) { + let curVal = Number.MIN_SAFE_INTEGER; + for (let h = 0; h < filterHeight; ++h) { + const hIn = hBeg + h * dilationHeight; + if (hIn >= 0 && hIn < inHeight) { + for (let w = 0; w < filterWidth; ++w) { + const wIn = wBeg + w * dilationWidth; + if (wIn >= 0 && wIn < inWidth) { + const xIndex = locToIndex([b, hIn, wIn, d], xRank, computeStrides(x.shape)); + const filterIndex = locToIndex([h, w, d], filterRank, computeStrides(filter.shape)); + const val = xVals[xIndex] + filterVals[filterIndex]; + if (val > curVal) { + curVal = val; + } + } + } + } + } + const outputIndex = locToIndex([b, hOut, wOut, d], outRank, computeStrides(outShape)); + outputVals[outputIndex] = curVal; + } + } + } + } + const dataId = cpuBackend.write(toTypedArray(outputVals, x.dtype), outShape, x.dtype); + return { dataId, shape: outShape, dtype: x.dtype }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const dilation2DBackpropFilterConfig = { + kernelName: Dilation2DBackpropFilter, + backendName: 'cpu', + kernelFunc: ({ inputs, backend, attrs }) => { + const { x, filter, dy } = inputs; + const { strides, pad, dilations } = attrs; + const cpuBackend = backend; + const $x = toNestedArray(x.shape, cpuBackend.data.get(x.dataId).values); + const $filter = toNestedArray(filter.shape, cpuBackend.data.get(filter.dataId).values); + const { batchSize, inHeight, inWidth, inChannels, outHeight, outWidth, padInfo, strideHeight, strideWidth, filterHeight, filterWidth, dilationHeight, dilationWidth, outShape } = computeDilation2DInfo(x.shape, filter.shape, strides, pad, 'NHWC' /* dataFormat */, dilations); + assert$1(dy.rank === outShape.length, () => `Error in ${Dilation2DBackpropFilter}, dy ` + + `must have the same rank as output ${outShape.length}, but got ` + + `${dy.rank}`); + const $dy = toNestedArray(outShape, cpuBackend.data.get(dy.dataId).values); + // The computed filter gradients has the same dimensions as the filter: + // [filterHeight, filterWidth, depth] + const gradients = makeZerosNestedTypedArray(filter.shape, filter.dtype); + // In the case of multiple argmax branches, we only back-propagate along the + // last branch, i.e., the one with largest value of `h * filter_cols + w`, + // similarly to the max-pooling backward routines. + // This implementation follows the TF c++ implementation: + // https://github.com/tensorflow/tensorflow/blob/d9a3a849edc198e90172bc58eb293de457f9d986/tensorflow/core/kernels/dilation_ops.cc + for (let b = 0; b < batchSize; ++b) { + for (let hOut = 0; hOut < outHeight; ++hOut) { + const hBeg = hOut * strideHeight - padInfo.top; + for (let wOut = 0; wOut < outWidth; ++wOut) { + const wBeg = wOut * strideWidth - padInfo.left; + for (let d = 0; d < inChannels; ++d) { + let curVal = Number.MIN_SAFE_INTEGER; + let hMax = 0; + let wMax = 0; + for (let h = 0; h < filterHeight; ++h) { + const hIn = hBeg + h * dilationHeight; + if (hIn >= 0 && hIn < inHeight) { + for (let w = 0; w < filterWidth; ++w) { + const wIn = wBeg + w * dilationWidth; + if (wIn >= 0 && wIn < inWidth) { + const val = $x[b][hIn][wIn][d] + $filter[h][w][d]; + if (val > curVal) { + curVal = val; + hMax = h; + wMax = w; + } + } + } + } + } + gradients[hMax][wMax][d] += $dy[b][hOut][wOut][d]; + } + } + } + } + const dataId = cpuBackend.write(toTypedArray(gradients, x.dtype), filter.shape, filter.dtype); + return { dataId, shape: filter.shape, dtype: filter.dtype }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const dilation2DBackpropInputConfig = { + kernelName: Dilation2DBackpropInput, + backendName: 'cpu', + kernelFunc: ({ inputs, backend, attrs }) => { + const { x, filter, dy } = inputs; + const { strides, pad, dilations } = attrs; + const cpuBackend = backend; + const $x = toNestedArray(x.shape, cpuBackend.data.get(x.dataId).values); + const $filter = toNestedArray(filter.shape, cpuBackend.data.get(filter.dataId).values); + const { batchSize, inHeight, inWidth, inChannels, outHeight, outWidth, padInfo, strideHeight, strideWidth, filterHeight, filterWidth, dilationHeight, dilationWidth, outShape } = computeDilation2DInfo(x.shape, filter.shape, strides, pad, 'NHWC' /* dataFormat */, dilations); + assert$1(dy.rank === outShape.length, () => `Error in ${Dilation2DBackpropInput}, dy ` + + `must have the same rank as output ${outShape.length}, but got ` + + `${dy.rank}`); + const $dy = toNestedArray(outShape, cpuBackend.data.get(dy.dataId).values); + // The computed gradients has the same dimensions as the input: + // [batch, inputHeight, inputCols, inChannel] + const gradients = makeZerosNestedTypedArray(x.shape, x.dtype); + // In the case of multiple argmax branches, we only back-propagate along the + // last branch, i.e., the one with largest value of `h * filter_cols + w`, + // similarly to the max-pooling backward routines. + // This implementation follows the TF c++ implementation: + // https://github.com/tensorflow/tensorflow/blob/d9a3a849edc198e90172bc58eb293de457f9d986/tensorflow/core/kernels/dilation_ops.cc + for (let b = 0; b < batchSize; ++b) { + for (let hOut = 0; hOut < outHeight; ++hOut) { + const hBeg = hOut * strideHeight - padInfo.top; + for (let wOut = 0; wOut < outWidth; ++wOut) { + const wBeg = wOut * strideWidth - padInfo.left; + for (let d = 0; d < inChannels; ++d) { + let curVal = Number.MIN_SAFE_INTEGER; + let hInMax = (hBeg < 0) ? 0 : hBeg; + let wInMax = (wBeg < 0) ? 0 : wBeg; + for (let h = 0; h < filterHeight; ++h) { + const hIn = hBeg + h * dilationHeight; + if (hIn >= 0 && hIn < inHeight) { + for (let w = 0; w < filterWidth; ++w) { + const wIn = wBeg + w * dilationWidth; + if (wIn >= 0 && wIn < inWidth) { + const val = $x[b][hIn][wIn][d] + $filter[h][w][d]; + if (val > curVal) { + curVal = val; + hInMax = hIn; + wInMax = wIn; + } + } + } + } + } + gradients[b][hInMax][wInMax][d] += $dy[b][hOut][wOut][d]; + } + } + } + } + const dataId = cpuBackend.write(toTypedArray(gradients, x.dtype), x.shape, x.dtype); + return { dataId, shape: x.shape, dtype: x.dtype }; + } + }; + + /** + * @license + * Copyright 2023 Google LLC. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function draw(args) { + const { inputs, backend, attrs } = args; + const { image } = inputs; + const { canvas, options } = attrs; + const { contextOptions, imageOptions } = options || {}; + const alpha = (imageOptions === null || imageOptions === void 0 ? void 0 : imageOptions.alpha) || 1; + const contextType = (contextOptions === null || contextOptions === void 0 ? void 0 : contextOptions.contextType) || '2d'; + if (contextType !== '2d') { + throw new Error(`Context type ${contextOptions.contextType} is not supported by the CPU backend.`); + } + const ctx = canvas.getContext(contextType, (contextOptions === null || contextOptions === void 0 ? void 0 : contextOptions.contextAttributes) || {}); + if (ctx == null) { + throw new Error(`Could not get the context with ${contextType} type.`); + } + const [height, width] = image.shape.slice(0, 2); + const depth = image.shape.length === 2 ? 1 : image.shape[2]; + const data = backend.data.get(image.dataId).values; + const multiplier = image.dtype === 'float32' ? 255 : 1; + const bytes = new Uint8ClampedArray(width * height * 4); + for (let i = 0; i < height * width; ++i) { + const rgba = [0, 0, 0, 255 * alpha]; + for (let d = 0; d < depth; d++) { + const value = data[i * depth + d]; + if (image.dtype === 'float32') { + if (value < 0 || value > 1) { + throw new Error(`Tensor values for a float32 Tensor must be in the ` + + `range [0 - 1] but encountered ${value}.`); + } + } + else if (image.dtype === 'int32') { + if (value < 0 || value > 255) { + throw new Error(`Tensor values for a int32 Tensor must be in the ` + + `range [0 - 255] but encountered ${value}.`); + } + } + if (depth === 1) { + rgba[0] = value * multiplier; + rgba[1] = value * multiplier; + rgba[2] = value * multiplier; + } + else { + rgba[d] = value * multiplier; + } + } + const j = i * 4; + bytes[j + 0] = Math.round(rgba[0]); + bytes[j + 1] = Math.round(rgba[1]); + bytes[j + 2] = Math.round(rgba[2]); + bytes[j + 3] = Math.round(rgba[3]); + } + canvas.width = width; + canvas.height = height; + const imageData = new ImageData(bytes, width, height); + ctx.putImageData(imageData, 0, 0); + return image; + } + const drawConfig = { + kernelName: Draw, + backendName: 'cpu', + kernelFunc: draw + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function sum$1(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { axis, keepDims } = attrs; + assertNotComplex$1(x, 'sum'); + let $x; + if (x.dtype === 'bool') { + $x = cast$1({ inputs: { x }, backend, attrs: { dtype: 'int32' } }); + } + else { + $x = identity$1({ inputs: { x }, backend }); + } + const xRank = $x.shape.length; + const axes = parseAxisParam(axis, $x.shape); + const permutation = getAxesPermutation(axes, xRank); + let reductionAxes = axes; + let permutedX = $x; + if (permutation != null) { + permutedX = + transpose$1({ inputs: { x: $x }, backend, attrs: { perm: permutation } }); + reductionAxes = getInnerMostAxes(reductionAxes.length, xRank); + } + assertAxesAreInnerMostDims('sum', reductionAxes, permutedX.shape.length); + const [outShape, reduceShape] = computeOutAndReduceShapes(permutedX.shape, reductionAxes); + const resultDtype = upcastType(permutedX.dtype, 'int32'); + let result = zeros(backend, outShape, resultDtype); + const reduceSize = sizeFromShape(reduceShape); + const vals = backend.data.get(result.dataId).values; + const aVals = backend.data.get(permutedX.dataId).values; + for (let i = 0; i < vals.length; ++i) { + const offset = i * reduceSize; + let sum = 0; + for (let j = 0; j < reduceSize; ++j) { + sum += aVals[offset + j]; + } + vals[i] = sum; + } + if (keepDims) { + const newShape = expandShapeToKeepDim(result.shape, axes); + const oldResult = result; + result = reshape$1({ inputs: { x: result }, backend, attrs: { shape: newShape } }); + backend.disposeIntermediateTensorInfo(oldResult); + } + backend.disposeIntermediateTensorInfo($x); + if (permutation != null) { + backend.disposeIntermediateTensorInfo(permutedX); + } + return result; + } + const sumConfig$1 = { + kernelName: Sum, + backendName: 'cpu', + kernelFunc: sum$1 + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function einsum$1(args) { + const { inputs, backend, attrs } = args; + const { equation } = attrs; + const tensors = inputs; + const { allDims, summedDims, idDims } = decodeEinsumEquation(equation, tensors.length); + checkEinsumDimSizes(allDims.length, idDims, tensors); + const { path, steps } = getEinsumComputePath(summedDims, idDims); + const nSteps = steps.length; + let out = null; + let numDimsRemaining = allDims.length; + const tensorsToDispose = []; + for (let i = 0; i < nSteps; ++i) { + for (const idTerm of steps[i]) { + const { permutationIndices: perm, expandDims: dimsToExpand } = getEinsumPermutation(numDimsRemaining, idDims[idTerm]); + let x; + if (isIdentityPermutation(perm)) { + x = tensors[idTerm]; + } + else { + x = transpose$1({ inputs: { x: tensors[idTerm] }, backend, attrs: { perm } }); + tensorsToDispose.push(x); + } + const targetShape = x.shape.slice(); + for (let k = 0; k < dimsToExpand.length; ++k) { + targetShape.splice(dimsToExpand[k], 0, 1); + } + if (!arraysEqual(x.shape, targetShape)) { + x = reshape$1({ inputs: { x }, backend, attrs: { shape: targetShape } }); + tensorsToDispose.push(x); + } + if (out === null) { + out = x; + } + else { + // tslint:disable-next-line: no-unnecessary-type-assertion + out = multiply$1({ inputs: { a: x, b: out }, backend }); + tensorsToDispose.push(out); + } + } + if (i < nSteps - 1) { + if (path[i] >= 0) { + out = sum$1({ + inputs: { x: out }, + backend, + attrs: { + axis: path[i] - (allDims.length - numDimsRemaining), + keepDims: false + } + }); + tensorsToDispose.push(out); + } + numDimsRemaining--; + } + } + // Clean up intermediate tensors. + for (const tensorInfo of tensorsToDispose) { + if (tensorInfo === out) { + continue; + } + backend.disposeIntermediateTensorInfo(tensorInfo); + } + return out; + } + const einsumConfig$1 = { + kernelName: Einsum, + backendName: 'cpu', + kernelFunc: einsum$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function eluGrad$1(args) { + const { inputs, backend } = args; + const { dy, y } = inputs; + assertNotComplex$1([dy, y], 'eluGrad'); + const resultValues = new Float32Array(sizeFromShape(y.shape)); + const values = backend.data.get(y.dataId).values; + const dyValues = backend.data.get(dy.dataId).values; + for (let i = 0; i < values.length; ++i) { + const v = values[i]; + if (v >= 0) { + resultValues[i] = dyValues[i]; + } + else { + resultValues[i] = dyValues[i] * (v + 1); + } + } + return backend.makeTensorInfo(y.shape, 'float32', resultValues); + } + const eluGradConfig$1 = { + kernelName: EluGrad, + backendName: 'cpu', + kernelFunc: eluGrad$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const p = ERF_P; + const a1 = ERF_A1; + const a2 = ERF_A2; + const a3 = ERF_A3; + const a4 = ERF_A4; + const a5 = ERF_A5; + const erf$1 = unaryKernelFunc$1(Erf, (xi) => { + const sign = Math.sign(xi); + const v = Math.abs(xi); + const t = 1.0 / (1.0 + p * v); + return sign * + (1.0 - + (((((a5 * t + a4) * t) + a3) * t + a2) * t + a1) * t * + Math.exp(-v * v)); + }); + const erfConfig$1 = { + kernelName: Erf, + backendName: 'cpu', + kernelFunc: erf$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function expandDims$1(args) { + const { inputs, backend, attrs } = args; + const { input } = inputs; + const { dim } = attrs; + const inputRank = input.shape.length; + const newShape = input.shape.slice(); + let $dim = dim; + if (dim < 0) { + // Negative value is counted from the tail of rank. + assert$1(-(inputRank + 1) <= dim, () => `Axis must be in the interval [${-(inputRank + 1)}, ${inputRank}]`); + $dim = inputRank + dim + 1; + } + newShape.splice($dim, 0, 1); + return reshape$1({ inputs: { x: input }, backend, attrs: { shape: newShape } }); + } + const expandDimsConfig$1 = { + kernelName: ExpandDims, + backendName: 'cpu', + kernelFunc: expandDims$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const realDivImpl = createSimpleBinaryKernelImpl((a, b) => a / b); + const div = binaryKernelFunc$1(RealDiv, realDivImpl); + const realDivConfig$1 = { + kernelName: RealDiv, + backendName: 'cpu', + kernelFunc: div + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Calculate FFT of inner most elements of batch tensor. + */ + function fftBatch(input, inverse, cpuBackend) { + const inputShape = input.shape; + const batch = inputShape[0]; + const innerDim = inputShape[1]; + const inputVals = cpuBackend.data.get(input.dataId); + const real2D = inputVals.complexTensorInfos.real; + const imag2D = inputVals.complexTensorInfos.imag; + // Collects real and imaginary values separately. + const resultShape = [batch, innerDim]; + const resultSize = sizeFromShape(resultShape); + const resultReal = getTypedArrayFromDType('float32', resultSize); + const resultImag = getTypedArrayFromDType('float32', resultSize); + for (let b = 0; b < batch; b++) { + // TODO: Support slice ops for complex type. + const r = slice$1({ + inputs: { x: real2D }, + backend: cpuBackend, + attrs: { begin: [b, 0], size: [1, innerDim] } + }); + const i = slice$1({ + inputs: { x: imag2D }, + backend: cpuBackend, + attrs: { begin: [b, 0], size: [1, innerDim] } + }); + const input = complex$1({ inputs: { real: r, imag: i }, backend: cpuBackend }); + // Run FFT by batch element. + const { real, imag } = fftImpl$1(input, inverse, cpuBackend); + const res = mergeRealAndImagArrays(real, imag); + for (let d = 0; d < innerDim; d++) { + const c = getComplexWithIndex(res, d); + resultReal[b * innerDim + d] = c.real; + resultImag[b * innerDim + d] = c.imag; + } + cpuBackend.disposeIntermediateTensorInfo(r); + cpuBackend.disposeIntermediateTensorInfo(i); + cpuBackend.disposeIntermediateTensorInfo(input); + } + const $realInfo = cpuBackend.makeTensorInfo(resultShape, 'float32', resultReal); + const $imagInfo = cpuBackend.makeTensorInfo(resultShape, 'float32', resultImag); + const result = complex$1({ inputs: { real: $realInfo, imag: $imagInfo }, backend: cpuBackend }); + cpuBackend.disposeIntermediateTensorInfo($realInfo); + cpuBackend.disposeIntermediateTensorInfo($imagInfo); + return result; + } + function fftImpl$1(input, inverse, cpuBackend) { + const inputSize = sizeFromShape(input.shape); + const inputVals = cpuBackend.data.get(input.dataId); + const realVals = cpuBackend.data.get(inputVals.complexTensorInfos.real.dataId).values; + const imagVals = cpuBackend.data.get(inputVals.complexTensorInfos.imag.dataId).values; + if (isExponentOf2(inputSize)) { + const result = fftRadix2(realVals, imagVals, inputSize, inverse, cpuBackend); + const resultShape = [input.shape[0], input.shape[1]]; + if (inverse) { + const realInfo = cpuBackend.makeTensorInfo(resultShape, 'float32', result.real); + const imagInfo = cpuBackend.makeTensorInfo(resultShape, 'float32', result.imag); + const sizeInfo = cpuBackend.makeTensorInfo([], 'float32', createScalarValue(inputSize, 'float32')); + const sizeInfoCopy = identity$1({ inputs: { x: sizeInfo }, backend: cpuBackend }); + const divRealInfo = realDivConfig$1.kernelFunc({ inputs: { a: realInfo, b: sizeInfo }, backend: cpuBackend }); + const divImagInfo = realDivConfig$1.kernelFunc({ inputs: { a: imagInfo, b: sizeInfoCopy }, backend: cpuBackend }); + const divRealVals = cpuBackend.data.get(divRealInfo.dataId).values; + const divImagVals = cpuBackend.data.get(divImagInfo.dataId).values; + cpuBackend.disposeIntermediateTensorInfo(realInfo); + cpuBackend.disposeIntermediateTensorInfo(imagInfo); + cpuBackend.disposeIntermediateTensorInfo(sizeInfo); + cpuBackend.disposeIntermediateTensorInfo(sizeInfoCopy); + cpuBackend.disposeIntermediateTensorInfo(divRealInfo); + cpuBackend.disposeIntermediateTensorInfo(divImagInfo); + return { real: divRealVals, imag: divImagVals }; + } + return result; + } + else { + const data = mergeRealAndImagArrays(realVals, imagVals); + const rawOutput = fourierTransformByMatmul(data, inputSize, inverse); + return splitRealAndImagArrays(rawOutput); + } + } + function isExponentOf2(size) { + return (size & size - 1) === 0; + } + // FFT using Cooley-Tukey algorithm on radix 2 dimensional input. + function fftRadix2(realVals, imagVals, size, inverse, cpuBackend) { + if (size === 1) { + return { real: realVals, imag: imagVals }; + } + const data = mergeRealAndImagArrays(realVals, imagVals); + const half = size / 2; + const evenComplex = complexWithEvenIndex(data); + const evenRealVals = evenComplex.real; + const evenImagVals = evenComplex.imag; + const evenShape = [evenRealVals.length]; + const evenRealInfo = cpuBackend.makeTensorInfo(evenShape, 'float32', evenRealVals); + const evenImagInfo = cpuBackend.makeTensorInfo(evenShape, 'float32', evenImagVals); + const evenTensorInfo = complex$1({ inputs: { real: evenRealInfo, imag: evenImagInfo }, backend: cpuBackend }); + const oddComplex = complexWithOddIndex(data); + const oddRealVals = oddComplex.real; + const oddImagVals = oddComplex.imag; + const oddShape = [oddRealVals.length]; + const oddRealInfo = cpuBackend.makeTensorInfo(oddShape, 'float32', oddRealVals); + const oddImagInfo = cpuBackend.makeTensorInfo(oddShape, 'float32', oddImagVals); + const oddTensorInfo = complex$1({ inputs: { real: oddRealInfo, imag: oddImagInfo }, backend: cpuBackend }); + // Recursive call for half part of original input. + const $evenComplex = fftRadix2(evenRealVals, evenImagVals, half, inverse, cpuBackend); + const $evenRealVals = $evenComplex.real; + const $evenImagVals = $evenComplex.imag; + const $evenShape = [$evenRealVals.length]; + const $evenRealInfo = cpuBackend.makeTensorInfo($evenShape, 'float32', $evenRealVals); + const $evenImagInfo = cpuBackend.makeTensorInfo($evenShape, 'float32', $evenImagVals); + const $evenTensorInfo = complex$1({ + inputs: { real: $evenRealInfo, imag: $evenImagInfo }, + backend: cpuBackend + }); + const $oddComplex = fftRadix2(oddRealVals, oddImagVals, half, inverse, cpuBackend); + const $oddRealVals = $oddComplex.real; + const $oddImagVals = $oddComplex.imag; + const $oddShape = [$oddRealVals.length]; + const $oddRealInfo = cpuBackend.makeTensorInfo($oddShape, 'float32', $oddRealVals); + const $oddImagInfo = cpuBackend.makeTensorInfo($oddShape, 'float32', $oddImagVals); + const $oddTensorInfo = complex$1({ inputs: { real: $oddRealInfo, imag: $oddImagInfo }, backend: cpuBackend }); + const e = exponents(size, inverse); + const eShape = [e.real.length]; + const eRealInfo = cpuBackend.makeTensorInfo(eShape, 'float32', e.real); + const eImagInfo = cpuBackend.makeTensorInfo(eShape, 'float32', e.imag); + const complexInfo = complex$1({ inputs: { real: eRealInfo, imag: eImagInfo }, backend: cpuBackend }); + const exponentInfo = multiply$1({ inputs: { a: complexInfo, b: $oddTensorInfo }, backend: cpuBackend }); + const addPart = add({ + inputs: { a: $evenTensorInfo, b: exponentInfo }, + backend: cpuBackend + }); + const subPart = sub$1({ + inputs: { a: $evenTensorInfo, b: exponentInfo }, + backend: cpuBackend + }); + const addPartReal = real$1({ inputs: { input: addPart }, backend: cpuBackend }); + const subPartReal = real$1({ inputs: { input: subPart }, backend: cpuBackend }); + const addPartImag = imag$1({ inputs: { input: addPart }, backend: cpuBackend }); + const subPartImag = imag$1({ inputs: { input: subPart }, backend: cpuBackend }); + const $real = concat$1({ + inputs: [addPartReal, subPartReal], + backend: cpuBackend, + attrs: { axis: 0 } + }); + const $imag = concat$1({ + inputs: [addPartImag, subPartImag], + backend: cpuBackend, + attrs: { axis: 0 } + }); + const $realVals = cpuBackend.data.get($real.dataId).values; + const $imagVals = cpuBackend.data.get($imag.dataId).values; + cpuBackend.disposeIntermediateTensorInfo(evenRealInfo); + cpuBackend.disposeIntermediateTensorInfo(evenImagInfo); + cpuBackend.disposeIntermediateTensorInfo(evenTensorInfo); + cpuBackend.disposeIntermediateTensorInfo(oddRealInfo); + cpuBackend.disposeIntermediateTensorInfo(oddImagInfo); + cpuBackend.disposeIntermediateTensorInfo(oddTensorInfo); + cpuBackend.disposeIntermediateTensorInfo($evenRealInfo); + cpuBackend.disposeIntermediateTensorInfo($evenImagInfo); + cpuBackend.disposeIntermediateTensorInfo($evenTensorInfo); + cpuBackend.disposeIntermediateTensorInfo($oddRealInfo); + cpuBackend.disposeIntermediateTensorInfo($oddImagInfo); + cpuBackend.disposeIntermediateTensorInfo($oddTensorInfo); + cpuBackend.disposeIntermediateTensorInfo(eRealInfo); + cpuBackend.disposeIntermediateTensorInfo(eImagInfo); + cpuBackend.disposeIntermediateTensorInfo(complexInfo); + cpuBackend.disposeIntermediateTensorInfo(exponentInfo); + cpuBackend.disposeIntermediateTensorInfo(addPart); + cpuBackend.disposeIntermediateTensorInfo(subPart); + cpuBackend.disposeIntermediateTensorInfo(addPartReal); + cpuBackend.disposeIntermediateTensorInfo(addPartImag); + cpuBackend.disposeIntermediateTensorInfo(subPartReal); + cpuBackend.disposeIntermediateTensorInfo(subPartImag); + cpuBackend.disposeIntermediateTensorInfo($real); + cpuBackend.disposeIntermediateTensorInfo($imag); + return { real: $realVals, imag: $imagVals }; + } + // Calculate fourier transform by multplying sinusoid matrix. + function fourierTransformByMatmul(data, size, inverse) { + const ret = new Float32Array(size * 2); + // TODO: Use matmul instead once it supports complex64 type. + for (let r = 0; r < size; r++) { + let real = 0.0; + let imag = 0.0; + for (let c = 0; c < size; c++) { + const e = exponent(r * c, size, inverse); + const term = getComplexWithIndex(data, c); + real += term.real * e.real - term.imag * e.imag; + imag += term.real * e.imag + term.imag * e.real; + } + if (inverse) { + real /= size; + imag /= size; + } + assignToTypedArray(ret, real, imag, r); + } + return ret; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function fft$1(args) { + const { inputs, backend } = args; + const { input } = inputs; + const inputSize = sizeFromShape(input.shape); + // Collapse all outer dimensions to a single batch dimension. + const innerDimensionSize = input.shape[input.shape.length - 1]; + const batch = inputSize / innerDimensionSize; + const input2D = reshape$1({ + inputs: { x: input }, + backend, + attrs: { shape: [batch, innerDimensionSize] } + }); + const result = fftBatch(input2D, false, backend); + const resultReshaped = reshape$1({ inputs: { x: result }, backend, attrs: { shape: input.shape } }); + backend.disposeIntermediateTensorInfo(input2D); + backend.disposeIntermediateTensorInfo(result); + return resultReshaped; + } + const fftConfig$1 = { + kernelName: FFT, + backendName: 'cpu', + kernelFunc: fft$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function fill$1(args) { + const { backend, attrs } = args; + const { shape, value, dtype } = attrs; + const $dtype = dtype || inferDtype(value); + const values = getArrayFromDType($dtype, sizeFromShape(shape)); + fillValues(values, value, $dtype); + return backend.makeTensorInfo(shape, $dtype, values); + } + const fillConfig$1 = { + kernelName: Fill, + backendName: 'cpu', + kernelFunc: fill$1 + }; + function fillValues(values, value, dtype) { + if (dtype === 'string') { + values.fill(value); + } + else { + values.fill(value); + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const flipLeftRightConfig$1 = { + kernelName: FlipLeftRight, + backendName: 'cpu', + kernelFunc: ({ inputs, attrs, backend }) => { + const { image } = inputs; + const cpuBackend = backend; + const output = getTypedArrayFromDType(image.dtype, sizeFromShape(image.shape)); + const [batch, imageHeight, imageWidth, numChannels] = image.shape; + const imageVals = cpuBackend.data.get(image.dataId).values; + for (let batchIdx = 0; batchIdx < batch; batchIdx++) { + const batchOffset = batchIdx * imageWidth * imageHeight * numChannels; + for (let row = 0; row < imageHeight; row++) { + const rowOffset = row * (imageWidth * numChannels); + for (let col = 0; col < imageWidth; col++) { + const colOffset = col * numChannels; + for (let channel = 0; channel < numChannels; channel++) { + const coordX = Math.round(imageWidth - col - 1); + const outIdx = batchOffset + rowOffset + colOffset + channel; + let outputValue = imageVals[outIdx]; + // If the coordinate position falls within the image boundaries... + if (coordX >= 0 && coordX < imageWidth) { + // set the output to the image value at the coordinate position. + const rotatedColOffset = coordX * numChannels; + const imageIdx = batchOffset + rowOffset + rotatedColOffset + channel; + outputValue = imageVals[imageIdx]; + } + output[outIdx] = outputValue; + } + } + } + } + const dataId = cpuBackend.write(output, image.shape, image.dtype); + return { dataId, shape: image.shape, dtype: image.dtype }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function fusedConv2D(args) { + const { inputs, backend, attrs } = args; + const { x, filter, bias, preluActivationWeights } = inputs; + const { strides, pad, dataFormat, dilations, dimRoundingMode, activation, leakyreluAlpha } = attrs; + let result = conv2D({ + inputs: { x, filter }, + backend, + attrs: { strides, pad, dataFormat, dilations, dimRoundingMode } + }); + if (bias) { + const resultOld = result; + // For NCHW format, if bias is a 1-D tensor, it is supposed to be aligned + // to the channel of the conv2d's result; if the bias is a scalar, the + // bias_add is computed as if the bias was broadcasted to the shape of the + // conv2d's result. + if (dataFormat === 'NCHW' && bias.shape.length === 1 && + bias.shape[0] !== 1) { + const reshapedBias = reshape$1({ inputs: { x: bias }, backend, attrs: { shape: [bias.shape[0], 1, 1] } }); + result = + add({ inputs: { a: result, b: reshapedBias }, backend }); + backend.disposeIntermediateTensorInfo(reshapedBias); + } + else { + // This condition handles NHWC and NCHW (scalar case). The only other case + // for NCHW (1D case) is handled above. + result = add({ inputs: { a: result, b: bias }, backend }); + } + backend.disposeIntermediateTensorInfo(resultOld); + } + if (activation) { + const resultOld = result; + // For NCHW format, if PReLu activation weights is a 1-D tensor, it is + // supposed to be aligned with the channel of the conv2d's result. For other + // cases, whether NCHW or NHWC data format, the conv2d result is + // already aligned with the activation weights. + if (dataFormat === 'NCHW' && activation === 'prelu' && + preluActivationWeights.shape.length === 1 && + preluActivationWeights.shape[0] !== 1) { + const reshapedAlpha = reshape$1({ + inputs: { x: preluActivationWeights }, + backend, + attrs: { shape: [preluActivationWeights.shape[0], 1, 1] } + }); + result = applyActivation(backend, result, activation, reshapedAlpha, leakyreluAlpha); + backend.disposeIntermediateTensorInfo(reshapedAlpha); + } + else { + result = applyActivation(backend, result, activation, preluActivationWeights, leakyreluAlpha); + } + backend.disposeIntermediateTensorInfo(resultOld); + } + return result; + } + const fusedConv2DConfig$1 = { + kernelName: FusedConv2D, + backendName: 'cpu', + kernelFunc: fusedConv2D + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function fusedDepthwiseConv2D$1(args) { + const { inputs, backend, attrs } = args; + const { x, filter, bias, preluActivationWeights } = inputs; + const { strides, pad, dataFormat, dilations, dimRoundingMode, activation, leakyreluAlpha } = attrs; + let result = depthwiseConv2dNative$1({ + inputs: { x, filter }, + backend, + attrs: { strides, pad, dataFormat, dilations, dimRoundingMode } + }); + if (bias) { + const oldResult = result; + result = add({ inputs: { a: result, b: bias }, backend }); + backend.disposeIntermediateTensorInfo(oldResult); + } + if (activation) { + const oldResult = result; + result = applyActivation(backend, result, activation, preluActivationWeights, leakyreluAlpha); + backend.disposeIntermediateTensorInfo(oldResult); + } + return result; + } + const fusedDepthwiseConv2DConfig$1 = { + kernelName: FusedDepthwiseConv2D, + backendName: 'cpu', + kernelFunc: fusedDepthwiseConv2D$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function gatherNd$1(args) { + const { inputs, backend } = args; + const { params, indices } = inputs; + const paramsSize = sizeFromShape(params.shape); + const indicesShape = indices.shape; + const sliceRank = indicesShape[indicesShape.length - 1]; + const [resultShape, numSlices, sliceSize, strides] = prepareAndValidate(params, indices); + if (numSlices === 0) { + return backend.makeTensorInfo(resultShape, params.dtype, []); + } + const indicesData = backend.data.get(indices.dataId).values; + const paramsBuf = backend.bufferSync(params); + const outBuf = gatherNdImpl(indicesData, paramsBuf, params.dtype, numSlices, sliceRank, sliceSize, strides, params.shape, paramsSize); + return backend.makeTensorInfo(resultShape, params.dtype, outBuf.values); + } + const gatherNdConfig$1 = { + kernelName: GatherNd, + backendName: 'cpu', + kernelFunc: gatherNd$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function gatherV2$1(args) { + const { inputs, backend, attrs } = args; + const { x, indices } = inputs; + const { axis, batchDims } = attrs; + assertNotComplex$1([x, indices], 'gatherV2'); + // Throw error when any index is out of bound. + const parsedAxis = parseAxisParam(axis, x.shape)[0]; + const indicesVals = backend.data.get(indices.dataId).values; + const axisDim = x.shape[parsedAxis]; + for (let i = 0; i < indicesVals.length; ++i) { + const index = indicesVals[i]; + assert$1(index <= axisDim - 1 && index >= 0, () => `GatherV2: the index value ${index} is not in [0, ${axisDim - 1}]`); + } + let $batchDims = batchDims; + if (batchDims == null) { + $batchDims = 0; + } + const indicesSize = sizeFromShape(indices.shape); + const shapeInfo = collectGatherOpShapeInfo(x, indices, parsedAxis, $batchDims); + const flattenX = reshape$1({ + inputs: { x }, + backend, + attrs: { + shape: [ + shapeInfo.batchSize, shapeInfo.outerSize, shapeInfo.dimSize, + shapeInfo.sliceSize + ] + } + }); + const flattenIndex = reshape$1({ + inputs: { x: indices }, + backend, + attrs: { shape: [shapeInfo.batchSize, indicesSize / shapeInfo.batchSize] } + }); + const flattenOutputShape = [ + shapeInfo.batchSize, shapeInfo.outerSize, indicesSize / shapeInfo.batchSize, + shapeInfo.sliceSize + ]; + const indicesBuf = backend.bufferSync(flattenIndex); + const xBuf = backend.bufferSync(flattenX); + const outBuf = gatherV2Impl(xBuf, indicesBuf, flattenOutputShape); + backend.disposeIntermediateTensorInfo(flattenX); + backend.disposeIntermediateTensorInfo(flattenIndex); + return backend.makeTensorInfo(shapeInfo.outputShape, outBuf.dtype, outBuf.values); + } + const gatherV2Config$1 = { + kernelName: GatherV2, + backendName: 'cpu', + kernelFunc: gatherV2$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function ifft$1(args) { + const { inputs, backend } = args; + const { input } = inputs; + const inputSize = sizeFromShape(input.shape); + // Collapse all outer dimensions to a single batch dimension. + const innerDimensionSize = input.shape[input.shape.length - 1]; + const batch = inputSize / innerDimensionSize; + const input2D = reshape$1({ + inputs: { x: input }, + backend, + attrs: { shape: [batch, innerDimensionSize] } + }); + const result = fftBatch(input2D, true, backend); + const resultReshaped = reshape$1({ inputs: { x: result }, backend, attrs: { shape: input.shape } }); + backend.disposeIntermediateTensorInfo(input2D); + backend.disposeIntermediateTensorInfo(result); + return resultReshaped; + } + const ifftConfig$1 = { + kernelName: IFFT, + backendName: 'cpu', + kernelFunc: ifft$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const isFinite$2 = unaryKernelFunc$1(IsFinite, (xi) => Number.isFinite(xi) ? 1 : 0, 'bool'); + const isFiniteConfig$1 = { + kernelName: IsFinite, + backendName: 'cpu', + kernelFunc: isFinite$2, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const isInf$1 = unaryKernelFunc$1(IsInf, (xi) => Math.abs(xi) === Infinity ? 1 : 0, 'bool'); + const isInfConfig$1 = { + kernelName: IsInf, + backendName: 'cpu', + kernelFunc: isInf$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const isNaN$2 = unaryKernelFunc$1(IsNan, (xi) => Number.isNaN(xi) ? 1 : 0, 'bool'); + const isNaNConfig$1 = { + kernelName: IsNan, + backendName: 'cpu', + kernelFunc: isNaN$2, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function linSpace$1(args) { + const { backend, attrs } = args; + const { start, stop, num } = attrs; + const outVals = linSpaceImpl(start, stop, num); + return backend.makeTensorInfo([outVals.length], 'float32', outVals); + } + const linSpaceConfig$1 = { + kernelName: LinSpace, + backendName: 'cpu', + kernelFunc: linSpace$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const log1p$1 = unaryKernelFunc$1(Log1p, (xi) => Math.log1p(xi)); + const log1pConfig$1 = { + kernelName: Log1p, + backendName: 'cpu', + kernelFunc: log1p$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const logicalAndImpl = createSimpleBinaryKernelImpl((a, b) => a && b); + const logicalAnd$1 = binaryKernelFunc$1(LogicalAnd, logicalAndImpl, null /* complexImpl */, 'bool'); + const logicalAndConfig$1 = { + kernelName: LogicalAnd, + backendName: 'cpu', + kernelFunc: logicalAnd$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const logicalNot$1 = unaryKernelFunc$1(LogicalNot, (xi) => xi ? 0 : 1, 'bool'); + const logicalNotConfig$1 = { + kernelName: LogicalNot, + backendName: 'cpu', + kernelFunc: logicalNot$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const logicalOrImpl = createSimpleBinaryKernelImpl((a, b) => a || b); + const logicalOr$1 = binaryKernelFunc$1(LogicalOr, logicalOrImpl, null /* complexImpl */, 'bool'); + const logicalOrConfig$1 = { + kernelName: LogicalOr, + backendName: 'cpu', + kernelFunc: logicalOr$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function lRN(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { depthRadius, bias, alpha, beta } = attrs; + assertNotComplex$1(x, 'LRN'); + const channels = x.shape[3]; + const maxD = channels - 1; + const xValues = backend.data.get(x.dataId).values; + const size = sizeFromShape(x.shape); + const result = new Float32Array(size); + function sumAcrossChannels(offset) { + const currentChannel = offset % channels; + let beginSumOffset = offset - currentChannel + Math.max(0, currentChannel - depthRadius); + const endSumOffset = offset - currentChannel + Math.min(currentChannel + depthRadius, maxD); + let sum = 0.0; + for (; beginSumOffset <= endSumOffset; beginSumOffset++) { + const z = xValues[beginSumOffset]; + sum += z * z; + } + return sum; + } + for (let offset = 0; offset < size; offset++) { + const sum = sumAcrossChannels(offset); + const val = xValues[offset] * Math.pow(bias + alpha * sum, -beta); + result[offset] = val; + } + return backend.makeTensorInfo(x.shape, x.dtype, result); + } + // tslint:disable-next-line: variable-name + const LRNConfig$1 = { + kernelName: LRN, + backendName: 'cpu', + kernelFunc: lRN + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function lRNGrad(args) { + const { inputs, backend, attrs } = args; + const { x, y, dy } = inputs; + const { depthRadius, bias, alpha, beta } = attrs; + assertNotComplex$1(dy, 'LRNGrad'); + const dySize = sizeFromShape(dy.shape); + const channels = dy.shape[3]; + const dyValues = backend.data.get(dy.dataId).values; + const xValues = backend.data.get(x.dataId).values; + const yValues = backend.data.get(y.dataId).values; + const result = new Float32Array(dySize); + const size = dySize; + for (let offset = 0; offset < size; offset++) { + const currentChannel = offset % channels; + const depthBegin = (offset - currentChannel) + Math.max(0, currentChannel - depthRadius); + const depthEnd = (offset - currentChannel) + + Math.min(channels, currentChannel + depthRadius + 1); + let norm = 0; + for (let k = depthBegin; k < depthEnd; k++) { + norm += Math.pow(xValues[k], 2); + } + norm = alpha * norm + bias; + for (let k = depthBegin; k < depthEnd; k++) { + let dyi = -2 * alpha * beta * xValues[k] * yValues[offset] / norm; + if (offset === k) { + dyi += Math.pow(norm, -beta); + } + dyi *= dyValues[offset]; + result[k] += dyi; + } + } + return backend.makeTensorInfo(dy.shape, x.dtype, result); + } + // tslint:disable-next-line: variable-name + const LRNGradConfig$1 = { + kernelName: LRNGrad, + backendName: 'cpu', + kernelFunc: lRNGrad + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function max$1(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { reductionIndices, keepDims } = attrs; + const cpuBackend = backend; + let xShape = x.shape; + const xRank = xShape.length; + const origAxes = parseAxisParam(reductionIndices, xShape); + let axes = origAxes; + const permutedAxes = getAxesPermutation(axes, xRank); + let xVals = cpuBackend.data.get(x.dataId).values; + if (permutedAxes != null) { + const newShape = new Array(xRank); + for (let i = 0; i < newShape.length; i++) { + newShape[i] = xShape[permutedAxes[i]]; + } + xVals = transposeImpl$1(xVals, xShape, x.dtype, permutedAxes, newShape); + axes = getInnerMostAxes(axes.length, xRank); + xShape = newShape; + } + assertNotComplex$1(x, 'max'); + assertAxesAreInnerMostDims('max', axes, xRank); + const [maxOutShape, reduceShape] = computeOutAndReduceShapes(xShape, axes); + const reduceSize = sizeFromShape(reduceShape); + const result = maxImpl$1(xVals, reduceSize, maxOutShape, x.dtype); + const dataId = cpuBackend.write(result, maxOutShape, x.dtype); + let outShape = maxOutShape; + if (keepDims) { + // reshape + const newShape = expandShapeToKeepDim(maxOutShape, origAxes); + outShape = newShape; + } + return { dataId, shape: outShape, dtype: x.dtype }; + } + const maxConfig$1 = { + kernelName: Max, + backendName: 'cpu', + kernelFunc: max$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function maxPool$1(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + assertNotComplex$1(x, 'maxPool'); + const { filterSize, strides, pad, dimRoundingMode } = attrs; + const dilations = 1; + assert$1(eitherStridesOrDilationsAreOne(strides, dilations), () => 'Error in maxPool: Either strides or dilations must be 1. ' + + `Got strides ${strides} and dilations '${dilations}'`); + const convInfo = computePool2DInfo(x.shape, filterSize, strides, dilations, pad, dimRoundingMode); + let res; + if (convInfo.filterWidth === 1 && convInfo.filterHeight === 1 && + arraysEqual(convInfo.inShape, convInfo.outShape)) { + res = identity$1({ inputs: { x }, backend }); + } + else { + const xValues = backend.data.get(x.dataId).values; + const strides = computeStrides(x.shape); + const buffer = pool(xValues, x.shape, x.dtype, strides, convInfo, 'max'); + res = backend.makeTensorInfo(convInfo.outShape, x.dtype, buffer.values); + } + return res; + } + const maxPoolConfig$1 = { + kernelName: MaxPool, + backendName: 'cpu', + kernelFunc: maxPool$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function maxPool3D(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { filterSize, strides, pad, dimRoundingMode, dataFormat } = attrs; + assertNotComplex$1(x, 'maxPool3d'); + const convInfo = computePool3DInfo(x.shape, filterSize, strides, 1 /* dilations */, pad, dimRoundingMode, dataFormat); + const xValues = backend.data.get(x.dataId).values; + const outBuf = pool3d(xValues, x.shape, x.dtype, computeStrides(x.shape), convInfo, 'max'); + return backend.makeTensorInfo(outBuf.shape, 'float32', outBuf.values); + } + const maxPool3DConfig$1 = { + kernelName: MaxPool3D, + backendName: 'cpu', + kernelFunc: maxPool3D + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function maxPool3DGrad$1(args) { + const { inputs, backend, attrs } = args; + const { dy, input } = inputs; + const { filterSize, strides, pad, dimRoundingMode } = attrs; + assertNotComplex$1([dy, input], 'maxPool3DGrad'); + const convInfo = computePool3DInfo(input.shape, filterSize, strides, 1 /* dilations */, pad, dimRoundingMode); + const inputBuf = backend.bufferSync(input); + const maxPosBuf = maxPool3dPositions(inputBuf, convInfo); + const strideDepth = convInfo.strideDepth; + const strideHeight = convInfo.strideHeight; + const strideWidth = convInfo.strideWidth; + const dilationDepth = convInfo.dilationDepth; + const dilationHeight = convInfo.dilationHeight; + const dilationWidth = convInfo.dilationWidth; + const effectiveFilterDepth = convInfo.effectiveFilterDepth; + const effectiveFilterHeight = convInfo.effectiveFilterHeight; + const effectiveFilterWidth = convInfo.effectiveFilterWidth; + const padFront = effectiveFilterDepth - 1 - convInfo.padInfo.front; + const padLeft = effectiveFilterWidth - 1 - convInfo.padInfo.left; + const padTop = effectiveFilterHeight - 1 - convInfo.padInfo.top; + const dx = buffer(input.shape, 'float32'); + const dyBuf = backend.bufferSync(dy); + for (let batch = 0; batch < convInfo.batchSize; ++batch) { + for (let channel = 0; channel < convInfo.inChannels; ++channel) { + for (let dxDepth = 0; dxDepth < convInfo.inDepth; ++dxDepth) { + for (let dxRow = 0; dxRow < convInfo.inHeight; ++dxRow) { + for (let dxCol = 0; dxCol < convInfo.inWidth; ++dxCol) { + // Shader code begins + const dyDepthCorner = dxDepth - padFront; + const dyRowCorner = dxRow - padTop; + const dyColCorner = dxCol - padLeft; + let dotProd = 0; + for (let wDepth = 0; wDepth < effectiveFilterDepth; wDepth += dilationDepth) { + const dyDepth = (dyDepthCorner + wDepth) / strideDepth; + if (dyDepth < 0 || dyDepth >= convInfo.outDepth || + Math.floor(dyDepth) !== dyDepth) { + continue; + } + for (let wRow = 0; wRow < effectiveFilterHeight; wRow += dilationHeight) { + const dyRow = (dyRowCorner + wRow) / strideHeight; + if (dyRow < 0 || dyRow >= convInfo.outHeight || + Math.floor(dyRow) !== dyRow) { + continue; + } + for (let wCol = 0; wCol < effectiveFilterWidth; wCol += dilationWidth) { + const dyCol = (dyColCorner + wCol) / strideWidth; + if (dyCol < 0 || dyCol >= convInfo.outWidth || + Math.floor(dyCol) !== dyCol) { + continue; + } + const maxPos = effectiveFilterDepth * effectiveFilterHeight * + effectiveFilterWidth - + 1 - + maxPosBuf.get(batch, dyDepth, dyRow, dyCol, channel); + const curPos = wDepth * effectiveFilterHeight * effectiveFilterWidth + + wRow * effectiveFilterWidth + wCol; + const mask = maxPos === curPos ? 1 : 0; + if (mask === 0) { + continue; + } + const pixel = dyBuf.get(batch, dyDepth, dyRow, dyCol, channel); + dotProd += pixel * mask; + } + } + } + dx.set(dotProd, batch, dxDepth, dxRow, dxCol, channel); + } + } + } + } + } + return backend.makeTensorInfo(dx.shape, dx.dtype, dx.values); + } + const maxPool3DGradConfig$1 = { + kernelName: MaxPool3DGrad, + backendName: 'cpu', + kernelFunc: maxPool3DGrad$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function maxPoolGrad$1(args) { + const { inputs, backend, attrs } = args; + const { dy, input, output } = inputs; + const x = input; + assertNotComplex$1([input, output], 'maxPoolGrad'); + const { filterSize, strides, pad, dimRoundingMode } = attrs; + const convInfo = computePool2DInfo(x.shape, filterSize, strides, 1 /* dilations */, pad, dimRoundingMode); + const xValues = backend.data.get(x.dataId).values; + const maxPosBuf = buffer(convInfo.outShape, x.dtype, maxPoolPositions(xValues, x.shape, x.dtype, convInfo).values); + const strideHeight = convInfo.strideHeight; + const strideWidth = convInfo.strideWidth; + const dilationHeight = convInfo.dilationHeight; + const dilationWidth = convInfo.dilationWidth; + const effectiveFilterHeight = convInfo.effectiveFilterHeight; + const effectiveFilterWidth = convInfo.effectiveFilterWidth; + const padLeft = effectiveFilterWidth - 1 - convInfo.padInfo.left; + const padTop = effectiveFilterHeight - 1 - convInfo.padInfo.top; + const dx = buffer(x.shape, 'float32'); + const dyData = backend.data.get(dy.dataId).values; + const dyBuf = buffer(dy.shape, 'float32', dyData); + for (let b = 0; b < convInfo.batchSize; ++b) { + for (let d = 0; d < convInfo.inChannels; ++d) { + for (let dxR = 0; dxR < convInfo.inHeight; ++dxR) { + for (let dxC = 0; dxC < convInfo.inWidth; ++dxC) { + // Shader code begins. + const dyRCorner = dxR - padTop; + const dyCCorner = dxC - padLeft; + let dotProd = 0; + for (let wR = 0; wR < effectiveFilterHeight; wR += dilationHeight) { + const dyR = (dyRCorner + wR) / strideHeight; + if (dyR < 0 || dyR >= convInfo.outHeight || + Math.floor(dyR) !== dyR) { + continue; + } + for (let wC = 0; wC < effectiveFilterWidth; wC += dilationWidth) { + const dyC = (dyCCorner + wC) / strideWidth; + if (dyC < 0 || dyC >= convInfo.outWidth || + Math.floor(dyC) !== dyC) { + continue; + } + const maxPos = effectiveFilterHeight * effectiveFilterWidth - 1 - + maxPosBuf.get(b, dyR, dyC, d); + const curPos = wR * effectiveFilterWidth + wC; + const mask = maxPos === curPos ? 1 : 0; + if (mask === 0) { + continue; + } + const pixel = dyBuf.get(b, dyR, dyC, d); + dotProd += pixel * mask; + } + } + dx.set(dotProd, b, dxR, dxC, d); + } + } + } + } + return backend.makeTensorInfo(dx.shape, dx.dtype, dx.values); + } + const maxPoolGradConfig$1 = { + kernelName: MaxPoolGrad, + backendName: 'cpu', + kernelFunc: maxPoolGrad$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function maxPoolWithArgmaxImpl$1(xValues, xShape, dtype, includeBatchInIndex, convInfo) { + const strides = computeStrides(xShape); + const maxPools = pool(xValues, xShape, dtype, strides, convInfo, 'max'); + const maxPositions = maxPoolPositions(xValues, xShape, dtype, convInfo, true, includeBatchInIndex); + return [maxPools.values, maxPositions.values]; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const maxPoolWithArgmaxConfig$1 = { + kernelName: MaxPoolWithArgmax, + backendName: 'cpu', + kernelFunc: ({ inputs, attrs, backend }) => { + const { x } = inputs; + const { filterSize, strides, pad, includeBatchInIndex } = attrs; + const cpuBackend = backend; + assertNotComplex$1(x, 'MaxPoolWithArgmax'); + const values = cpuBackend.data.get(x.dataId).values; + const convInfo = computePool2DInfo(x.shape, filterSize, strides, [1, 1], pad); + const [pooled, indexes] = maxPoolWithArgmaxImpl$1(values, x.shape, x.dtype, includeBatchInIndex, convInfo); + const pooledDataId = cpuBackend.write(pooled, convInfo.outShape, x.dtype); + const indexesDataId = cpuBackend.write(indexes, convInfo.outShape, x.dtype); + return [ + { dataId: pooledDataId, shape: convInfo.outShape, dtype: x.dtype }, + { dataId: indexesDataId, shape: convInfo.outShape, dtype: 'int32' } + ]; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function mean(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { axis, keepDims } = attrs; + const axes = parseAxisParam(axis, x.shape); + const shapes = computeOutAndReduceShapes(x.shape, axes); + const reduceShape = shapes[1]; + const reduceSize = sizeFromShape(reduceShape); + const toDispose = []; + const reduceSizeScalar = backend.makeTensorInfo([], 'float32', new Float32Array([reduceSize])); + toDispose.push(reduceSizeScalar); + const $x = cast$1({ inputs: { x }, backend, attrs: { dtype: 'float32' } }); + toDispose.push($x); + const res = div({ inputs: { a: $x, b: reduceSizeScalar }, backend }); + toDispose.push(res); + const result = sum$1({ inputs: { x: res }, backend, attrs: { axis, keepDims } }); + toDispose.forEach(t => backend.disposeIntermediateTensorInfo(t)); + return result; + } + const meanConfig$1 = { + kernelName: Mean, + backendName: 'cpu', + kernelFunc: mean + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function min$1(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { axis, keepDims } = attrs; + assertNotComplex$1(x, 'min'); + const origAxes = parseAxisParam(axis, x.shape); + let axes = origAxes; + const permutedAxes = getAxesPermutation(axes, x.shape.length); + let $x = x; + if (permutedAxes != null) { + $x = transpose$1({ inputs: { x }, backend, attrs: { perm: permutedAxes } }); + axes = getInnerMostAxes(axes.length, x.shape.length); + } + assertAxesAreInnerMostDims('min', axes, $x.shape.length); + const [outShape, reduceShape] = computeOutAndReduceShapes($x.shape, axes); + const reduceSize = sizeFromShape(reduceShape); + const vals = makeZerosTypedArray(sizeFromShape(outShape), $x.dtype); + const aVals = backend.data.get($x.dataId).values; + for (let i = 0; i < vals.length; ++i) { + const offset = i * reduceSize; + let min = aVals[offset]; + for (let j = 0; j < reduceSize; ++j) { + const value = aVals[offset + j]; + if (Number.isNaN(value) || + value < min) { // comparison with NaN always return false + min = value; + } + } + vals[i] = min; + } + if (permutedAxes != null) { + backend.disposeIntermediateTensorInfo($x); + } + const result = backend.makeTensorInfo(outShape, $x.dtype, vals); + if (keepDims) { + const expandedShape = expandShapeToKeepDim(outShape, origAxes); + const reshapedResult = reshape$1({ inputs: { x: result }, backend, attrs: { shape: expandedShape } }); + backend.disposeIntermediateTensorInfo(result); + return reshapedResult; + } + return result; + } + const minConfig$1 = { + kernelName: Min, + backendName: 'cpu', + kernelFunc: min$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function mirrorPad(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { paddings, mode } = attrs; + assertNotComplex$1(x, 'mirrorPad'); + const outShape = paddings.map((p, i) => p[0] /* beforePad */ + x.shape[i] + p[1] /* afterPad */); + const start = paddings.map(p => p[0]); + const end = paddings.map((p, i) => p[0] + x.shape[i]); + const offset = mode === 'reflect' ? 0 : 1; + const xVals = backend.data.get(x.dataId).values; + const xRank = x.shape.length; + const xStrides = computeStrides(x.shape); + const resultSize = sizeFromShape(outShape); + const resultRank = outShape.length; + const resultStrides = computeStrides(outShape); + const resVals = getTypedArrayFromDType(x.dtype, resultSize); + for (let i = 0; i < resultSize; i++) { + let coords = indexToLoc(i, resultRank, resultStrides); + for (let i = 0; i < resultRank; i++) { + if (coords[i] < start[i]) { + coords[i] = start[i] * 2 - coords[i] - offset; + } + else if (coords[i] >= end[i]) { + coords[i] = (end[i] - 1) * 2 - coords[i] + offset; + } + } + coords = coords.map((c, i) => c - start[i]); + const inIndex = locToIndex(coords, xRank, xStrides); + resVals[i] = xVals[inIndex]; + } + const outId = backend.write(resVals, outShape, x.dtype); + return { dataId: outId, shape: outShape, dtype: x.dtype }; + } + const mirrorPadConfig$1 = { + kernelName: MirrorPad, + backendName: 'cpu', + kernelFunc: mirrorPad + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const modImpl = createSimpleBinaryKernelImpl(((aValue, bValue) => { + const rem = aValue % bValue; + if ((aValue < 0 && bValue < 0) || (aValue >= 0 && bValue >= 0)) { + return rem; + } + else { + return (rem + bValue) % bValue; + } + })); + const mod$1 = binaryKernelFunc$1(Mod, modImpl); + const modConfig$1 = { + kernelName: Mod, + backendName: 'cpu', + kernelFunc: mod$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function softmax$1(args) { + const { inputs, backend, attrs } = args; + const { logits } = inputs; + const { dim } = attrs; + const logitsRank = logits.shape.length; + let $dim = dim; + if ($dim === -1) { + $dim = logitsRank - 1; + } + if ($dim !== logitsRank - 1) { + throw Error('Softmax along a non-last dimension is not yet supported. ' + + `Logits was rank ${logitsRank} and dim was ${$dim}`); + } + const axes = parseAxisParam([$dim], logits.shape); + const maxLogit = max$1({ + inputs: { x: logits }, + backend, + attrs: { reductionIndices: axes, keepDims: false } + }); + const expandedShape = expandShapeToKeepDim(maxLogit.shape, axes); + const maxLogitReshaped = reshape$1({ inputs: { x: maxLogit }, backend, attrs: { shape: expandedShape } }); + const a = sub$1({ inputs: { a: logits, b: maxLogitReshaped }, backend }); + const b = exp$1({ inputs: { x: a }, backend }); + const sumExp = sum$1({ inputs: { x: b }, backend, attrs: { axis: axes, keepDims: false } }); + const sumReshaped = reshape$1({ inputs: { x: sumExp }, backend, attrs: { shape: expandedShape } }); + const result = div({ inputs: { a: b, b: sumReshaped }, backend }); + backend.disposeIntermediateTensorInfo(maxLogit); + backend.disposeIntermediateTensorInfo(maxLogitReshaped); + backend.disposeIntermediateTensorInfo(a); + backend.disposeIntermediateTensorInfo(b); + backend.disposeIntermediateTensorInfo(sumExp); + backend.disposeIntermediateTensorInfo(sumReshaped); + return result; + } + const softmaxConfig$1 = { + kernelName: Softmax$2, + backendName: 'cpu', + kernelFunc: softmax$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function multinomial$1(args) { + const { inputs, backend, attrs } = args; + const { logits } = inputs; + const { numSamples, seed, normalized } = attrs; + assertNotComplex$1(logits, 'multinomial'); + const probabilities = normalized ? + logits : + softmax$1({ inputs: { logits }, backend, attrs: { dim: -1 } }); + const batchSize = probabilities.shape[0]; + const numEvents = probabilities.shape[1]; + const probVals = backend.data.get(probabilities.dataId).values; + const resShape = [batchSize, numSamples]; + const resVals = makeZerosTypedArray(sizeFromShape(resShape), 'int32'); + for (let b = 0; b < batchSize; ++b) { + const offset = b * numEvents; + // The cdf won't include the last event. It will be implicit if no other + // event happened. + const cdf = new Float32Array(numEvents - 1); + cdf[0] = probVals[offset]; + for (let event = 1; event < cdf.length; ++event) { + cdf[event] = cdf[event - 1] + probVals[offset + event]; + } + const random = seedrandom.alea(seed.toString()); + const outOffset = b * numSamples; + for (let sampleId = 0; sampleId < numSamples; ++sampleId) { + const r = random(); + // Assume last event happened by default. + resVals[outOffset + sampleId] = cdf.length; + for (let event = 0; event < cdf.length; event++) { + if (r < cdf[event]) { + resVals[outOffset + sampleId] = event; + break; + } + } + } + } + if (!normalized) { + backend.disposeIntermediateTensorInfo(probabilities); + } + return backend.makeTensorInfo(resShape, 'int32', resVals); + } + const multinomialConfig$1 = { + kernelName: Multinomial, + backendName: 'cpu', + kernelFunc: multinomial$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const nonMaxSuppressionV3Impl$1 = nonMaxSuppressionV3Impl$2; + function nonMaxSuppressionV3$1(args) { + const { inputs, backend, attrs } = args; + const { boxes, scores } = inputs; + const { maxOutputSize, iouThreshold, scoreThreshold } = attrs; + assertNotComplex$1(boxes, 'NonMaxSuppression'); + const boxesVals = backend.data.get(boxes.dataId).values; + const scoresVals = backend.data.get(scores.dataId).values; + const { selectedIndices } = nonMaxSuppressionV3Impl$1(boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold); + return backend.makeTensorInfo([selectedIndices.length], 'int32', new Int32Array(selectedIndices)); + } + const nonMaxSuppressionV3Config$1 = { + kernelName: NonMaxSuppressionV3, + backendName: 'cpu', + kernelFunc: nonMaxSuppressionV3$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const nonMaxSuppressionV4Impl$1 = nonMaxSuppressionV4Impl$2; + function nonMaxSuppressionV4$1(args) { + const { inputs, backend, attrs } = args; + const { boxes, scores } = inputs; + const { maxOutputSize, iouThreshold, scoreThreshold, padToMaxOutputSize } = attrs; + assertNotComplex$1(boxes, 'NonMaxSuppressionPadded'); + const boxesVals = backend.data.get(boxes.dataId).values; + const scoresVals = backend.data.get(scores.dataId).values; + const { selectedIndices, validOutputs } = nonMaxSuppressionV4Impl$1(boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold, padToMaxOutputSize); + return [ + backend.makeTensorInfo([selectedIndices.length], 'int32', new Int32Array(selectedIndices)), + backend.makeTensorInfo([], 'int32', new Int32Array([validOutputs])) + ]; + } + const nonMaxSuppressionV4Config$1 = { + kernelName: NonMaxSuppressionV4, + backendName: 'cpu', + kernelFunc: nonMaxSuppressionV4$1 + }; + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const nonMaxSuppressionV5Impl$1 = nonMaxSuppressionV5Impl$2; + function nonMaxSuppressionV5$1(args) { + const { inputs, backend, attrs } = args; + const { boxes, scores } = inputs; + const { maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma } = attrs; + assertNotComplex$1(boxes, 'NonMaxSuppressionWithScore'); + const boxesVals = backend.data.get(boxes.dataId).values; + const scoresVals = backend.data.get(scores.dataId).values; + const maxOutputSizeVal = maxOutputSize; + const iouThresholdVal = iouThreshold; + const scoreThresholdVal = scoreThreshold; + const softNmsSigmaVal = softNmsSigma; + const { selectedIndices, selectedScores } = nonMaxSuppressionV5Impl$1(boxesVals, scoresVals, maxOutputSizeVal, iouThresholdVal, scoreThresholdVal, softNmsSigmaVal); + return [ + backend.makeTensorInfo([selectedIndices.length], 'int32', new Int32Array(selectedIndices)), + backend.makeTensorInfo([selectedScores.length], 'float32', new Float32Array(selectedScores)) + ]; + } + const nonMaxSuppressionV5Config$1 = { + kernelName: NonMaxSuppressionV5, + backendName: 'cpu', + kernelFunc: nonMaxSuppressionV5$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function oneHot$1(args) { + const { inputs, backend, attrs } = args; + const { indices } = inputs; + const { dtype, depth, onValue, offValue } = attrs; + assertNotComplex$1(indices, 'oneHot'); + const indicesSize = sizeFromShape(indices.shape); + const res = new Float32Array(indicesSize * depth); + res.fill(offValue); + const indicesVal = backend.data.get(indices.dataId).values; + for (let event = 0; event < indicesSize; ++event) { + if (indicesVal[event] >= 0 && indicesVal[event] < depth) { + res[event * depth + indicesVal[event]] = onValue; + } + } + return backend.makeTensorInfo([...indices.shape, depth], dtype, res); + } + const oneHotConfig$1 = { + kernelName: OneHot, + backendName: 'cpu', + kernelFunc: oneHot$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function zerosLike$1(args) { + const { inputs, backend } = args; + const { x } = inputs; + if (x.dtype === 'string') { + throw new Error('zerosLike is not supported for string tensors'); + } + else if (x.dtype === 'complex64') { + const realPart = real$1({ inputs: { input: x }, backend }); + const r = zerosLike$1({ inputs: { x: realPart }, backend }); + const imagPart = imag$1({ inputs: { input: x }, backend }); + const i = zerosLike$1({ inputs: { x: imagPart }, backend }); + const result = complex$1({ inputs: { real: r, imag: i }, backend }); + backend.disposeIntermediateTensorInfo(realPart); + backend.disposeIntermediateTensorInfo(r); + backend.disposeIntermediateTensorInfo(imagPart); + backend.disposeIntermediateTensorInfo(i); + return result; + } + else { + return fill$1({ backend, attrs: { shape: x.shape, value: 0, dtype: x.dtype } }); + } + } + const zerosLikeConfig$1 = { + kernelName: ZerosLike, + backendName: 'cpu', + kernelFunc: zerosLike$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function onesLike$1(args) { + const { inputs, backend } = args; + const { x } = inputs; + if (x.dtype === 'string') { + throw new Error('onesLike is not supported for string tensors'); + } + else if (x.dtype === 'complex64') { + const realPart = real$1({ inputs: { input: x }, backend }); + const r = onesLike$1({ inputs: { x: realPart }, backend }); + const imagPart = imag$1({ inputs: { input: x }, backend }); + const i = zerosLike$1({ inputs: { x: imagPart }, backend }); + const result = complex$1({ inputs: { real: r, imag: i }, backend }); + backend.disposeIntermediateTensorInfo(realPart); + backend.disposeIntermediateTensorInfo(r); + backend.disposeIntermediateTensorInfo(imagPart); + backend.disposeIntermediateTensorInfo(i); + return result; + } + else { + return fill$1({ backend, attrs: { shape: x.shape, value: 1, dtype: x.dtype } }); + } + } + const onesLikeConfig$1 = { + kernelName: OnesLike, + backendName: 'cpu', + kernelFunc: onesLike$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function pack$1(args) { + const { inputs, backend, attrs } = args; + const { axis } = attrs; + if (inputs.length === 1) { + return expandDims$1({ inputs: { input: inputs[0] }, backend, attrs: { dim: axis } }); + } + const shape = inputs[0].shape; + const dtype = inputs[0].dtype; + inputs.forEach(t => { + assertShapesMatch(shape, t.shape, 'All tensors passed to stack must have matching shapes'); + assert$1(dtype === t.dtype, () => 'All tensors passed to stack must have matching dtypes'); + }); + const intermediateTensorInfos = []; + const expandedTensors = inputs.map(t => { + const expandedT = expandDims$1({ inputs: { input: t }, backend, attrs: { dim: axis } }); + intermediateTensorInfos.push(expandedT); + return expandedT; + }); + const result = concat$1({ inputs: expandedTensors, backend, attrs: { axis } }); + intermediateTensorInfos.forEach(t => backend.disposeIntermediateTensorInfo(t)); + return result; + } + const packConfig$1 = { + kernelName: Pack, + backendName: 'cpu', + kernelFunc: pack$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function padV2$1(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { paddings, constantValue } = attrs; + assertNotComplex$1(x, 'pad'); + const outShape = paddings.map((p, i) => p[0] /* beforePad */ + x.shape[i] + p[1] /* afterPad */); + const start = paddings.map(p => p[0]); + const xVals = backend.data.get(x.dataId).values; + const xSize = sizeFromShape(x.shape); + const xRank = x.shape.length; + const xStrides = computeStrides(x.shape); + const resultSize = sizeFromShape(outShape); + const resultRank = outShape.length; + const resultStrides = computeStrides(outShape); + const resVals = getTypedArrayFromDType(x.dtype, resultSize); + if (constantValue !== 0) { + resVals.fill(constantValue); + } + for (let i = 0; i < xSize; i++) { + const coords = indexToLoc(i, xRank, xStrides); + const outCoords = coords.map((c, i) => c + start[i]); + const outIndex = locToIndex(outCoords, resultRank, resultStrides); + resVals[outIndex] = xVals[i]; + } + const outId = backend.write(resVals, outShape, x.dtype); + return { dataId: outId, shape: outShape, dtype: x.dtype }; + } + const padV2Config$1 = { + kernelName: PadV2, + backendName: 'cpu', + kernelFunc: padV2$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const powImpl = createSimpleBinaryKernelImpl((a, b) => Math.pow(a, b)); + const pow$1 = binaryKernelFunc$1(Pow, powImpl); + const powConfig$1 = { + kernelName: Pow, + backendName: 'cpu', + kernelFunc: pow$1 + }; + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function raggedGather$1(args) { + const { inputs, backend, attrs } = args; + const { paramsNestedSplits, paramsDenseValues, indices } = inputs; + const { outputRaggedRank } = attrs; + const $paramsNestedSplits = paramsNestedSplits.map(t => backend.data.get(t.dataId).values); + const $paramsNestedSplitsShapes = paramsNestedSplits.map(t => t.shape); + const $paramsDenseValues = backend.data.get(paramsDenseValues.dataId).values; + const $indices = backend.data.get(indices.dataId).values; + const [outputNestedSplits, outputDenseValues, outputDenseValuesShape] = raggedGatherImpl($paramsNestedSplits, $paramsNestedSplitsShapes, $paramsDenseValues, paramsDenseValues.shape, paramsDenseValues.dtype, $indices, indices.shape, outputRaggedRank); + const outputNestedSplitsTensors = outputNestedSplits.map((splits) => backend.makeTensorInfo([splits.length], 'int32', splits)); + const outputDenseValuesTensor = backend.makeTensorInfo(outputDenseValuesShape, paramsDenseValues.dtype, outputDenseValues); + return outputNestedSplitsTensors.concat([outputDenseValuesTensor]); + } + const raggedGatherConfig$1 = { + kernelName: RaggedGather, + backendName: 'cpu', + kernelFunc: raggedGather$1, + }; + + /** + * @license + * Copyright 2022 Google LLC. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function raggedRange$1(args) { + const { inputs, backend } = args; + const { starts, limits, deltas } = inputs; + const $starts = backend.data.get(starts.dataId).values; + const $limits = backend.data.get(limits.dataId).values; + const $deltas = backend.data.get(deltas.dataId).values; + const [rtNestedSplitsData, rtDenseValuesData] = raggedRangeImpl($starts, starts.shape, starts.dtype, $limits, limits.shape, $deltas, deltas.shape); + const rtNestedSplits = backend.makeTensorInfo([rtNestedSplitsData.length], 'int32', rtNestedSplitsData); + const rtDenseValues = backend.makeTensorInfo([rtDenseValuesData.length], starts.dtype, rtDenseValuesData); + return [rtNestedSplits, rtDenseValues]; + } + const raggedRangeConfig$1 = { + kernelName: RaggedRange, + backendName: 'cpu', + kernelFunc: raggedRange$1, + }; + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function raggedTensorToTensor$1(args) { + const { inputs, backend, attrs } = args; + const { shape, values, defaultValue, rowPartitionTensors } = inputs; + const { rowPartitionTypes } = attrs; + const $shape = backend.data.get(shape.dataId).values; + const $values = backend.data.get(values.dataId).values; + const $defaultValue = backend.data.get(defaultValue.dataId).values; + const $rowPartitionValues = rowPartitionTensors.map(t => backend.data.get(t.dataId).values); + const rowPartitionValuesShapes = rowPartitionTensors.map(t => t.shape); + const [outputShape, output] = raggedTensorToTensorImpl($shape, shape.shape, $values, values.shape, values.dtype, $defaultValue, defaultValue.shape, $rowPartitionValues, rowPartitionValuesShapes, rowPartitionTypes); + return backend.makeTensorInfo(outputShape, values.dtype, output); + } + const raggedTensorToTensorConfig$1 = { + kernelName: RaggedTensorToTensor, + backendName: 'cpu', + kernelFunc: raggedTensorToTensor$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function range$1(args) { + const { backend, attrs } = args; + const { start, stop, dtype, step } = attrs; + const values = rangeImpl(start, stop, step, dtype); + return backend.makeTensorInfo([values.length], dtype, values); + } + const rangeConfig$1 = { + kernelName: Range, + backendName: 'cpu', + kernelFunc: range$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const reciprocal$1 = unaryKernelFunc$1(Reciprocal, (xi) => 1 / xi); + const reciprocalConfig$1 = { + kernelName: Reciprocal, + backendName: 'cpu', + kernelFunc: reciprocal$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function resizeBilinear$1(args) { + const { inputs, backend, attrs } = args; + const { images } = inputs; + const { alignCorners, halfPixelCenters, size } = attrs; + assertNotComplex$1(images, 'resizeBilinear'); + const imagesStrides = computeStrides(images.shape); + const [newHeight, newWidth] = size; + const [batch, oldHeight, oldWidth, numChannels] = images.shape; + const xValues = backend.data.get(images.dataId).values; + const result = new Float32Array(sizeFromShape([batch, newHeight, newWidth, numChannels])); + const effectiveInputSize = [ + (alignCorners && newHeight > 1) ? oldHeight - 1 : oldHeight, + (alignCorners && newWidth > 1) ? oldWidth - 1 : oldWidth + ]; + const effectiveOutputSize = [ + (alignCorners && newHeight > 1) ? newHeight - 1 : newHeight, + (alignCorners && newWidth > 1) ? newWidth - 1 : newWidth + ]; + let outputIdx = 0; + const effectiveRowSizeRatio = effectiveInputSize[0] / effectiveOutputSize[0]; + const effectiveColSizeRatio = effectiveInputSize[1] / effectiveOutputSize[1]; + for (let b = 0; b < batch; b++) { + for (let r = 0; r < newHeight; r++) { + let sourceFracRow; + if (halfPixelCenters) { + sourceFracRow = effectiveRowSizeRatio * (r + 0.5) - 0.5; + } + else { + sourceFracRow = effectiveRowSizeRatio * r; + } + const sourceRowFloor = Math.max(0, Math.floor(sourceFracRow)); + const rowFrac = sourceFracRow - sourceRowFloor; + const sourceRowCeil = Math.min(oldHeight - 1, Math.ceil(sourceFracRow)); + const topRowOffset = b * imagesStrides[0] + sourceRowFloor * imagesStrides[1]; + const botRowOffset = b * imagesStrides[0] + sourceRowCeil * imagesStrides[1]; + for (let c = 0; c < newWidth; c++) { + let sourceFracCol; + if (halfPixelCenters) { + sourceFracCol = effectiveColSizeRatio * (c + 0.5) - 0.5; + } + else { + sourceFracCol = effectiveColSizeRatio * c; + } + const sourceColFloor = Math.max(0, Math.floor(sourceFracCol)); + const colFrac = sourceFracCol - sourceColFloor; + const sourceColCeil = Math.min(oldWidth - 1, Math.ceil(sourceFracCol)); + const topLeftOffest = topRowOffset + sourceColFloor * imagesStrides[2]; + const botLeftOffset = botRowOffset + sourceColFloor * imagesStrides[2]; + const topRightOffset = topRowOffset + sourceColCeil * imagesStrides[2]; + const botRightOffest = botRowOffset + sourceColCeil * imagesStrides[2]; + for (let d = 0; d < numChannels; d++) { + // Begin shader. + // Compute the fractional index of the source. + const topLeft = xValues[topLeftOffest + d]; + const bottomLeft = xValues[botLeftOffset + d]; + const topRight = xValues[topRightOffset + d]; + const bottomRight = xValues[botRightOffest + d]; + const top = topLeft + (topRight - topLeft) * colFrac; + const bottom = bottomLeft + (bottomRight - bottomLeft) * colFrac; + const newValue = top + (bottom - top) * rowFrac; + result[outputIdx++] = newValue; + } + } + } + } + return backend.makeTensorInfo([batch, newHeight, newWidth, numChannels], 'float32', result); + } + const resizeBilinearConfig$1 = { + kernelName: ResizeBilinear, + backendName: 'cpu', + kernelFunc: resizeBilinear$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function resizeBilinearGrad$1(args) { + const { inputs, backend, attrs } = args; + const { images, dy } = inputs; + const { alignCorners } = attrs; + assertNotComplex$1([dy, images], 'resizeBilinearGrad'); + const imagesStrides = computeStrides(images.shape); + const [batch, xHeight, xWidth, depth] = images.shape; + const [, yHeight, yWidth] = dy.shape; + const output = new Float32Array(batch * xHeight * xWidth * depth); + // In the backwards pass, we want to find the pixels that were generated + // for each pixel in the input image the forward pass and add the + // corresponding coefficient from dy to the gradient (with some + // interpolation). + const effectiveXSize = [ + (alignCorners && yHeight > 1) ? xHeight - 1 : xHeight, + (alignCorners && yWidth > 1) ? xWidth - 1 : xWidth + ]; + const effectiveYSize = [ + (alignCorners && yHeight > 1) ? yHeight - 1 : yHeight, + (alignCorners && yWidth > 1) ? yWidth - 1 : yWidth + ]; + const heightScale = effectiveXSize[0] / effectiveYSize[0]; + const widthScale = effectiveXSize[1] / effectiveYSize[1]; + // Reference implementation + // tslint:disable-next-line:max-line-length + // https://github.com/tensorflow/tensorflow/blob/3039375c86a5bbc9610c7725dcaa95d635f87ba2/tensorflow/core/kernels/resize_bilinear_op.cc#L275 + const dyValues = backend.data.get(dy.dataId).values; + let offset = 0; + for (let b = 0; b < batch; b++) { + const bOffset = b * imagesStrides[0]; + for (let r = 0; r < yHeight; r++) { + const dxR = r * heightScale; + const topDxRIndex = Math.floor(dxR); + const bottomDxRIndex = Math.min(Math.ceil(dxR), xHeight - 1); + const topDxROffset = bOffset + topDxRIndex * imagesStrides[1]; + const bottomDxROffset = bOffset + bottomDxRIndex * imagesStrides[1]; + const dxRLerp = dxR - topDxRIndex; + const inverseDxRLerp = 1.0 - dxRLerp; + for (let c = 0; c < yWidth; c++) { + const dxC = c * widthScale; + const leftDxCIndex = Math.floor(dxC); + const rightDxCIndex = Math.min(Math.ceil(dxC), xWidth - 1); + const dxCLerp = dxC - leftDxCIndex; + const inverseDxCLerp = 1.0 - dxCLerp; + const topLeftRCOffset = topDxROffset + leftDxCIndex * imagesStrides[2]; + const topRightRCOffset = topDxROffset + rightDxCIndex * imagesStrides[2]; + const bottomLeftRCOffset = bottomDxROffset + leftDxCIndex * imagesStrides[2]; + const bottomRightRCOffset = bottomDxROffset + rightDxCIndex * imagesStrides[2]; + const inverseDxRLerpTimesInverseDxCLerp = inverseDxRLerp * inverseDxCLerp; + const inverseDxRLerpTimesDxCLerp = inverseDxRLerp * dxCLerp; + const dxRLerpTimesInverseDxCLerp = dxRLerp * inverseDxCLerp; + const dxRLerpTimesDxCLerp = dxRLerp * dxCLerp; + for (let d = 0; d < depth; d++) { + const dyVal = dyValues[offset++]; + output[topLeftRCOffset + d] += + dyVal * inverseDxRLerpTimesInverseDxCLerp; + output[topRightRCOffset + d] += dyVal * inverseDxRLerpTimesDxCLerp; + output[bottomLeftRCOffset + d] += dyVal * dxRLerpTimesInverseDxCLerp; + output[bottomRightRCOffset + d] += dyVal * dxRLerpTimesDxCLerp; + } + } + } + } + return backend.makeTensorInfo([batch, xWidth, xHeight, depth], 'float32', output); + } + const resizeBilinearGradConfig$1 = { + kernelName: ResizeBilinearGrad, + backendName: 'cpu', + kernelFunc: resizeBilinearGrad$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function resizeNearestNeighbor$1(args) { + const { inputs, backend, attrs } = args; + const { images } = inputs; + const { alignCorners, halfPixelCenters, size } = attrs; + assertNotComplex$1(images, 'resizeNearestNeighbor'); + const imagesStrides = computeStrides(images.shape); + const [newHeight, newWidth] = size; + const [batch, oldHeight, oldWidth, numChannels] = images.shape; + const xValues = backend.data.get(images.dataId).values; + const output = new Float32Array(batch * newHeight * newWidth * numChannels); + const effectiveInputSize = [ + (alignCorners && newHeight > 1) ? oldHeight - 1 : oldHeight, + (alignCorners && newWidth > 1) ? oldWidth - 1 : oldWidth + ]; + const effectiveOutputSize = [ + (alignCorners && newHeight > 1) ? newHeight - 1 : newHeight, + (alignCorners && newWidth > 1) ? newWidth - 1 : newWidth + ]; + const effectiveRowSizeRatio = effectiveInputSize[0] / effectiveOutputSize[0]; + const effectiveColSizeRatio = effectiveInputSize[1] / effectiveOutputSize[1]; + let outputOffset = 0; + for (let b = 0; b < batch; b++) { + const batchOffset = b * imagesStrides[0]; + for (let r = 0; r < newHeight; r++) { + const sourceFracRow = halfPixelCenters ? + effectiveRowSizeRatio * (r + 0.5) : + effectiveRowSizeRatio * r; + let sourceNearestRow = Math.min(oldHeight - 1, alignCorners ? Math.round(sourceFracRow) : Math.floor(sourceFracRow)); + if (halfPixelCenters) { + sourceNearestRow = Math.max(0, sourceNearestRow); + } + const rowOffset = batchOffset + sourceNearestRow * imagesStrides[1]; + for (let c = 0; c < newWidth; c++) { + const sourceFracCol = halfPixelCenters ? + effectiveColSizeRatio * (c + 0.5) : + effectiveColSizeRatio * c; + let sourceNearestCol = Math.min(oldWidth - 1, alignCorners ? Math.round(sourceFracCol) : + Math.floor(sourceFracCol)); + if (halfPixelCenters) { + sourceNearestCol = Math.max(0, sourceNearestCol); + } + const colOffset = rowOffset + sourceNearestCol * imagesStrides[2]; + for (let d = 0; d < numChannels; d++) { + // Begin shader. + // Compute the fractional index of the source. + const newVal = xValues[colOffset + d]; + output[outputOffset++] = newVal; + } + } + } + } + return backend.makeTensorInfo([batch, newHeight, newWidth, numChannels], images.dtype, output); + } + const resizeNearestNeighborConfig$1 = { + kernelName: ResizeNearestNeighbor, + backendName: 'cpu', + kernelFunc: resizeNearestNeighbor$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function resizeNearestNeighborGrad$1(args) { + const { inputs, backend, attrs } = args; + const { images, dy } = inputs; + const { alignCorners } = attrs; + assertNotComplex$1([dy, images], 'resizeNearestNeighborGrad'); + const imagesStrides = computeStrides(images.shape); + const dyStrides = computeStrides(dy.shape); + const [batch, xHeight, xWidth, depth] = images.shape; + const [, yHeight, yWidth] = dy.shape; + const output = new Float32Array(batch * xHeight * xWidth * depth); + const dyValues = backend.data.get(dy.dataId).values; + // In the backwards pass, we want to find the pixels that were generated + // for each pixel in the input image the forward pass + const effectiveXSize = [ + (alignCorners && yHeight > 1) ? xHeight - 1 : xHeight, + (alignCorners && yWidth > 1) ? xWidth - 1 : xWidth + ]; + const effectiveYSize = [ + (alignCorners && yHeight > 1) ? yHeight - 1 : yHeight, + (alignCorners && yWidth > 1) ? yWidth - 1 : yWidth + ]; + const heightScale = effectiveXSize[0] / effectiveYSize[0]; + const widthScale = effectiveXSize[1] / effectiveYSize[1]; + const invHeightScale = 1 / heightScale; + const invWidthScale = 1 / widthScale; + // This defines the size of the window of values around a particular + // index in dy that we want to search for contributions to dx. + const winHeight = (Math.ceil(invHeightScale) * 2) + 2; + const winWidth = (Math.ceil(invWidthScale) * 2) + 2; + // Loop over the output space. + for (let b = 0; b < batch; b++) { + const batchOffset = b * imagesStrides[0]; + for (let r = 0; r < xHeight; r++) { + const rowOffset = batchOffset + r * imagesStrides[1]; + // Compute bounds for where in dy we will look + const startRLerp = Math.floor(r * invHeightScale); + const startDyR = Math.floor(startRLerp - (winHeight / 2)); + for (let c = 0; c < xWidth; c++) { + const colOffset = rowOffset + c * imagesStrides[2]; + // Compute bounds for where in dy we will look + const startCLerp = Math.floor(c * invWidthScale); + const startDyC = Math.floor(startCLerp - (winWidth / 2)); + for (let d = 0; d < depth; d++) { + let accum = 0; + // loop over dy + for (let dyRIndex = 0; dyRIndex < winHeight; dyRIndex++) { + const dyR = dyRIndex + startDyR; + // Guard against the window exceeding the bounds of dy + if (dyR < 0 || dyR >= yHeight) { + continue; + } + const dyROffset = batchOffset + dyR * dyStrides[1]; + const sourceFracRow = dyR * heightScale; + const sourceNearestRow = Math.min(xHeight - 1, alignCorners ? Math.round(sourceFracRow) : + Math.floor(sourceFracRow)); + if (r !== sourceNearestRow) { + continue; + } + for (let dyCIndex = 0; dyCIndex < winWidth; dyCIndex++) { + const dyC = dyCIndex + startDyC; + // Guard against the window exceeding the bounds of dy + if (dyC < 0 || dyC >= yWidth) { + continue; + } + const dyCOffset = dyROffset + dyC * dyStrides[2]; + const sourceFracCol = dyC * widthScale; + const sourceNearestCol = Math.min(xWidth - 1, alignCorners ? Math.round(sourceFracCol) : + Math.floor(sourceFracCol)); + if (c === sourceNearestCol) { + accum += dyValues[dyCOffset + d]; + } + } + } + output[colOffset + d] = accum; + } + } + } + } + return backend.makeTensorInfo(images.shape, images.dtype, output); + } + const resizeNearestNeighborGradConfig$1 = { + kernelName: ResizeNearestNeighborGrad, + backendName: 'cpu', + kernelFunc: resizeNearestNeighborGrad$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function reverse$1(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { dims } = attrs; + assertNotComplex$1(x, 'reverse'); + const xRank = x.shape.length; + const $dims = parseAxisParam(dims, x.shape); + if (xRank === 0) { + return identity$1({ inputs: { x }, backend }); + } + const outBuf = new TensorBuffer(x.shape, x.dtype); + const xBuf = backend.bufferSync(x); + for (let i = 0; i < outBuf.size; i++) { + const outLoc = outBuf.indexToLoc(i); + const inLoc = outLoc.slice(); + $dims.forEach(d => inLoc[d] = x.shape[d] - 1 - inLoc[d]); + outBuf.set(xBuf.get(...inLoc), ...outLoc); + } + return backend.makeTensorInfo(outBuf.shape, outBuf.dtype, outBuf.values); + } + const reverseConfig$1 = { + kernelName: Reverse, + backendName: 'cpu', + kernelFunc: reverse$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const rotateWithOffsetConfig$1 = { + kernelName: RotateWithOffset, + backendName: 'cpu', + kernelFunc: ({ inputs, attrs, backend }) => { + const { image } = inputs; + const { radians, fillValue, center } = attrs; + const cpuBackend = backend; + const output = getTypedArrayFromDType(image.dtype, sizeFromShape(image.shape)); + const [batch, imageHeight, imageWidth, numChannels] = image.shape; + const [centerX, centerY] = getImageCenter(center, imageHeight, imageWidth); + const fullOpacityValue = 255; + const sinFactor = Math.sin(radians); + const cosFactor = Math.cos(radians); + const imageVals = cpuBackend.data.get(image.dataId).values; + for (let batchIdx = 0; batchIdx < batch; batchIdx++) { + const batchOffset = batchIdx * imageWidth * imageHeight * numChannels; + for (let row = 0; row < imageHeight; row++) { + const rowOffset = row * (imageWidth * numChannels); + for (let col = 0; col < imageWidth; col++) { + const colOffset = col * numChannels; + for (let channel = 0; channel < numChannels; channel++) { + const coords = [batch, row, col, channel]; + const x = coords[2]; + const y = coords[1]; + // coordX/coordY are the result of rotating and translating x/y. + let coordX = (x - centerX) * cosFactor - (y - centerY) * sinFactor; + let coordY = (x - centerX) * sinFactor + (y - centerY) * cosFactor; + coordX = Math.round(coordX + centerX); + coordY = Math.round(coordY + centerY); + let outputValue = fillValue; + if (typeof fillValue !== 'number') { + if (channel === 3) { + outputValue = fullOpacityValue; + } + else { + outputValue = fillValue[channel]; + } + } + // If the coordinate position falls within the image boundaries... + if (coordX >= 0 && coordX < imageWidth && coordY >= 0 && + coordY < imageHeight) { + // set the output to the image value at the coordinate position. + const rotatedRowOffset = coordY * (imageWidth * numChannels); + const rotatedColOffset = coordX * numChannels; + const imageIdx = batchOffset + rotatedRowOffset + rotatedColOffset + channel; + outputValue = imageVals[imageIdx]; + } + const outIdx = batchOffset + rowOffset + colOffset + channel; + output[outIdx] = outputValue; + } + } + } + } + const dataId = cpuBackend.write(output, image.shape, image.dtype); + return { dataId, shape: image.shape, dtype: image.dtype }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const round$1 = unaryKernelFunc$1(Round, (xi) => { + // The algorithm is based on banker's rounding. + const base = Math.floor(xi); + if (xi - base < 0.5) { + return Math.floor(xi); + } + else if (xi - base > 0.5) { + return Math.ceil(xi); + } + else { + if (base % 2.0 === 0.0) { + return base; + } + else { + return base + 1.0; + } + } + }); + const roundConfig$1 = { + kernelName: Round, + backendName: 'cpu', + kernelFunc: round$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function scatterNd$1(args) { + const { inputs, backend, attrs } = args; + const { indices, updates } = inputs; + const { shape } = attrs; + const { sliceRank, numUpdates, sliceSize, strides, outputSize } = calculateShapes(updates, indices, shape); + const sumDupeIndices = true; + const indicesBuf = backend.bufferSync(indices); + const updatesBuf = backend.bufferSync(updates); + const outBuf = scatterImpl(indicesBuf, updatesBuf, shape, outputSize, sliceSize, numUpdates, sliceRank, strides, 0 /* defaultValue */, sumDupeIndices); + return backend.makeTensorInfo(shape, outBuf.dtype, outBuf.values); + } + const scatterNdConfig$1 = { + kernelName: ScatterNd, + backendName: 'cpu', + kernelFunc: scatterNd$1 + }; + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function lowerBound(array, value) { + let left = 0; + let right = array.length; + let mid = 0; + while (left < right) { + mid = Math.floor((left + right) / 2); + if (array[mid] < value) { + left = mid + 1; + } + else { + right = mid; + } + } + return right; + } + function upperBound(array, value) { + let left = 0; + let right = array.length; + let mid = 0; + while (left < right) { + mid = Math.floor((left + right) / 2); + if (array[mid] <= value) { + left = mid + 1; + } + else { + right = mid; + } + } + return right; + } + function searchSortedImpl(sortedInputs, values, batchSize, numInputs, numValues, side) { + const output = getArrayFromDType('int32', batchSize * numValues); + for (let b = 0; b < batchSize; ++b) { + const sortedInputsSlice = sortedInputs.slice(b * numInputs, (b + 1) * numInputs); + const outputOffset = b * numValues; + for (let i = 0; i < numValues; ++i) { + output[outputOffset + i] = side === 'left' ? + lowerBound(sortedInputsSlice, values[i + outputOffset]) : + upperBound(sortedInputsSlice, values[i + outputOffset]); + } + } + return output; + } + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function searchSorted$1(args) { + const { inputs, backend, attrs } = args; + const { sortedSequence, values } = inputs; + const { side } = attrs; + const $sortedSequence = backend.data.get(sortedSequence.dataId).values; + const $values = backend.data.get(values.dataId).values; + const output = searchSortedImpl($sortedSequence, $values, sortedSequence.shape[0], sortedSequence.shape[1], values.shape[1], side); + return backend.makeTensorInfo(values.shape, 'int32', output); + } + const searchSortedConfig$1 = { + kernelName: SearchSorted, + backendName: 'cpu', + kernelFunc: searchSorted$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function select$1(args) { + const { inputs, backend } = args; + const { condition, t, e } = inputs; + assertNotComplex$1([condition, t, e], 'select'); + const conditionRank = condition.shape.length; + const values = backend.data.get(condition.dataId).values; + const tValues = backend.data.get(t.dataId).values; + const eValues = backend.data.get(e.dataId).values; + const resultDtype = upcastType(t.dtype, e.dtype); + const newValues = makeZerosTypedArray(sizeFromShape(t.shape), resultDtype); + let index = 0; + const offset = conditionRank === 0 || conditionRank > 1 || t.shape.length === 1 ? + 1 : + sizeFromShape(t.shape.slice(1)); + for (let i = 0; i < values.length; i++) { + for (let j = 0; j < offset; j++) { + if (values[i] === 1) { + newValues[index++] = tValues[i]; + } + else { + newValues[index++] = eValues[i]; + } + } + } + return backend.makeTensorInfo(t.shape, resultDtype, newValues); + } + const selectConfig$1 = { + kernelName: Select, + backendName: 'cpu', + kernelFunc: select$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const scaleAlpha = SELU_SCALEALPHA; + const scale = SELU_SCALE; + const selu$1 = unaryKernelFunc$1(Selu$1, (xi) => { + if (xi >= 0) { + return scale * xi; + } + else { + return scaleAlpha * (Math.exp(xi) - 1); + } + }); + const seluConfig$1 = { + kernelName: Selu$1, + backendName: 'cpu', + kernelFunc: selu$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const sign$1 = unaryKernelFunc$1(Sign, (xi) => { + if (xi < 0) { + return -1; + } + else if (xi > 0) { + return 1; + } + else { + return 0; + } + }); + const signConfig$1 = { + kernelName: Sign, + backendName: 'cpu', + kernelFunc: sign$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const sin$1 = unaryKernelFunc$1(Sin, (xi) => Math.sin(xi)); + const sinConfig$1 = { + kernelName: Sin, + backendName: 'cpu', + kernelFunc: sin$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const sinh$1 = unaryKernelFunc$1(Sinh, (xi) => Math.sinh(xi)); + const sinhConfig$1 = { + kernelName: Sinh, + backendName: 'cpu', + kernelFunc: sinh$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + // mirrors the implementation of tf.nn.softplus: https://goo.gl/vkcvwX + // epsilon is the difference between 1.0 and the next representable float. + // For a single precision 32 bit float this should be 2^-23, see: + // https://math.byu.edu/~schow/work/IEEEFloatingPoint.htm + const epsilon = 1.1920928955078125e-7; + const threshold = Math.log(epsilon) + 2.0; + const softplus$1 = unaryKernelFunc$1(Softplus$1, (xi) => { + // Value above which exp(x) may overflow, but softplus(x) == x + // is within machine epsilon. + const tooLarge = xi > -threshold; + // Value below which exp(x) may underflow, but softplus(x) == exp(x) + // is within machine epsilon. + const tooSmall = xi < threshold; + const expX = Math.exp(xi); + let result; + if (tooSmall) { + result = expX; + } + else if (tooLarge) { + result = xi; + } + else { + result = Math.log(1.0 + expX); + } + return result; + }); + const softplusConfig$1 = { + kernelName: Softplus$1, + backendName: 'cpu', + kernelFunc: softplus$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function spaceToBatchND$1(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { blockShape, paddings } = attrs; + assertNotComplex$1([x], 'spaceToBatchND'); + const prod = sizeFromShape(blockShape); + const completePaddings = [[0, 0]]; + completePaddings.push(...paddings); + for (let i = 1 + blockShape.length; i < x.shape.length; ++i) { + completePaddings.push([0, 0]); + } + const paddedX = padV2Config$1.kernelFunc({ + inputs: { x }, + backend, + attrs: { paddings: completePaddings, constantValue: 0 } + }); + const reshapedPaddedShape = getReshaped(paddedX.shape, blockShape, prod, false); + const permutedReshapedPaddedPermutation = getPermuted(reshapedPaddedShape.length, blockShape.length, false); + const flattenShape = getReshapedPermuted(paddedX.shape, blockShape, prod, false); + const reshapeInputs = { x: paddedX }; + const reshapeAttrs = { shape: reshapedPaddedShape }; + const paddedXReshaped = reshape$1({ inputs: reshapeInputs, backend, attrs: reshapeAttrs }); + const transposeInputs = { x: paddedXReshaped }; + const transposeAttrs = { perm: permutedReshapedPaddedPermutation }; + const paddedXT = transpose$1({ inputs: transposeInputs, backend, attrs: transposeAttrs }); + const resultReshapeInputs = { x: paddedXT }; + const resultReshapeAttrs = { shape: flattenShape }; + const result = reshape$1({ inputs: resultReshapeInputs, backend, attrs: resultReshapeAttrs }); + backend.disposeIntermediateTensorInfo(paddedX); + backend.disposeIntermediateTensorInfo(paddedXReshaped); + backend.disposeIntermediateTensorInfo(paddedXT); + return result; + } + const spaceToBatchNDConfig$1 = { + kernelName: SpaceToBatchND, + backendName: 'cpu', + kernelFunc: spaceToBatchND$1 + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function sparseFillEmptyRows$1(args) { + const { inputs, backend } = args; + const { indices, values, denseShape, defaultValue } = inputs; + if (denseShape.shape.length !== 1) { + throw new Error(`Dense shape must be a vector, saw: + ${denseShape.shape}`); + } + if (indices.shape.length !== 2) { + throw new Error(`Indices must be a matrix, saw: + ${indices.shape}`); + } + if (values.shape.length !== 1) { + throw new Error(`Values must be a vector, saw: + ${values.shape}`); + } + if (defaultValue.shape.length !== 0) { + throw new Error(`Default value must be a scalar, saw: + ${defaultValue.shape}`); + } + const $indices = backend.data.get(indices.dataId).values; + const $values = backend.data.get(values.dataId).values; + const $denseShape = backend.data.get(denseShape.dataId).values; + const $defaultValue = backend.data.get(defaultValue.dataId).values[0]; + const [outputIndices, outputIndicesShape, outputValues, emptyRowIndicator, reverseIndexMap] = sparseFillEmptyRowsImpl($indices, indices.shape, indices.dtype, $values, values.dtype, $denseShape, $defaultValue); + return [ + backend.makeTensorInfo(outputIndicesShape, indices.dtype, outputIndices), + backend.makeTensorInfo([outputIndicesShape[0]], values.dtype, outputValues), + backend.makeTensorInfo([emptyRowIndicator.length], 'bool', new Uint8Array(emptyRowIndicator.map((value) => Number(value)))), + backend.makeTensorInfo([reverseIndexMap.length], indices.dtype, new Int32Array(reverseIndexMap)), + ]; + } + const sparseFillEmptyRowsConfig$1 = { + kernelName: SparseFillEmptyRows, + backendName: 'cpu', + kernelFunc: sparseFillEmptyRows$1, + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function sparseReshape$1(args) { + const { inputs, backend } = args; + const { inputIndices, inputShape, newShape } = inputs; + if (inputIndices.shape.length !== 2) { + throw new Error(`Input indices should be a matrix but received shape + ${inputIndices.shape}`); + } + if (inputShape.shape.length !== 1) { + throw new Error(`Input shape should be a vector but received shape + ${inputShape.shape}`); + } + if (newShape.shape.length !== 1) { + throw new Error(`Target shape should be a vector but received shape ${newShape.shape}`); + } + const $inputShape = Array.from(backend.data.get(inputShape.dataId).values); + const $inputIndices = backend.data.get(inputIndices.dataId).values; + const targetShape = Array.from(backend.data.get(newShape.dataId).values); + const [newIndices, indicesShape, outputShape] = sparseReshapeImpl($inputIndices, inputIndices.shape, inputIndices.dtype, $inputShape, targetShape); + return [ + backend.makeTensorInfo(indicesShape, inputIndices.dtype, newIndices), + backend.makeTensorInfo([outputShape.length], newShape.dtype, new Int32Array(outputShape)), + ]; + } + const sparseReshapeConfig$1 = { + kernelName: SparseReshape, + backendName: 'cpu', + kernelFunc: sparseReshape$1, + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function sparseSegmentMean$1(args) { + const { inputs, backend } = args; + const { data, indices, segmentIds } = inputs; + if (data.shape.length < 1) { + throw new Error(`Data should be at least 1 dimensional but received scalar`); + } + if (indices.shape.length !== 1) { + throw new Error(`Indices should be a vector but received shape + ${indices.shape}`); + } + if (segmentIds.shape.length !== 1) { + throw new Error(`Segment ids should be a vector but received shape + ${segmentIds.shape}`); + } + if (indices.shape[0] !== segmentIds.shape[0]) { + throw new Error(`segmentIds and indices should have same size.`); + } + const $data = backend.data.get(data.dataId).values; + const $indices = backend.data.get(indices.dataId).values; + const $segmentIds = backend.data.get(segmentIds.dataId).values; + const [outputData, outputDataShape] = sparseSegmentReductionImpl($data, data.shape, data.dtype, $indices, $segmentIds, true); + return backend.makeTensorInfo(outputDataShape, data.dtype, outputData); + } + const sparseSegmentMeanConfig$1 = { + kernelName: SparseSegmentMean, + backendName: 'cpu', + kernelFunc: sparseSegmentMean$1, + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function sparseSegmentSum$1(args) { + const { inputs, backend } = args; + const { data, indices, segmentIds } = inputs; + if (data.shape.length < 1) { + throw new Error(`Data should be at least 1 dimensional but received scalar`); + } + if (indices.shape.length !== 1) { + throw new Error(`Indices should be a vector but received shape + ${indices.shape}`); + } + if (segmentIds.shape.length !== 1) { + throw new Error(`Segment ids should be a vector but received shape + ${segmentIds.shape}`); + } + if (indices.shape[0] !== segmentIds.shape[0]) { + throw new Error(`segmentIds and indices should have same size.`); + } + const $data = backend.data.get(data.dataId).values; + const $indices = backend.data.get(indices.dataId).values; + const $segmentIds = backend.data.get(segmentIds.dataId).values; + const [outputData, outputDataShape] = sparseSegmentReductionImpl($data, data.shape, data.dtype, $indices, $segmentIds); + return backend.makeTensorInfo(outputDataShape, data.dtype, outputData); + } + const sparseSegmentSumConfig$1 = { + kernelName: SparseSegmentSum, + backendName: 'cpu', + kernelFunc: sparseSegmentSum$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function sparseToDense$1(args) { + const { inputs, backend, attrs } = args; + const { sparseIndices, sparseValues, defaultValue } = inputs; + const { outputShape } = attrs; + const { sliceRank, numUpdates, sliceSize, strides, outputSize } = calculateShapes(sparseValues, sparseIndices, outputShape); + const sumDupeIndices = false; + const indicesBuf = backend.bufferSync(sparseIndices); + let outBuf; + switch (sparseValues.dtype) { + case 'bool': { + const updatesBuf = backend.bufferSync(sparseValues); + const $defaultValue = Boolean(backend.data.get(defaultValue.dataId).values[0]); + outBuf = scatterImpl(indicesBuf, updatesBuf, outputShape, outputSize, sliceSize, numUpdates, sliceRank, strides, $defaultValue, sumDupeIndices); + break; + } + case 'float32': { + const updatesBuf = backend.bufferSync(sparseValues); + const $defaultValue = backend.data.get(defaultValue.dataId).values[0]; + outBuf = scatterImpl(indicesBuf, updatesBuf, outputShape, outputSize, sliceSize, numUpdates, sliceRank, strides, $defaultValue, sumDupeIndices); + break; + } + case 'int32': { + const updatesBuf = backend.bufferSync(sparseValues); + const $defaultValue = backend.data.get(defaultValue.dataId).values[0]; + outBuf = scatterImpl(indicesBuf, updatesBuf, outputShape, outputSize, sliceSize, numUpdates, sliceRank, strides, $defaultValue, sumDupeIndices); + break; + } + case 'string': { + const updatesBuf = backend.bufferSync(sparseValues); + const $defaultValue = decodeString(backend.data.get(defaultValue.dataId).values[0]); + outBuf = scatterImpl(indicesBuf, updatesBuf, outputShape, outputSize, sliceSize, numUpdates, sliceRank, strides, $defaultValue, sumDupeIndices); + break; + } + default: + throw new Error(`Unsupported type ${sparseValues.dtype}`); + } + return backend.makeTensorInfo(outputShape, outBuf.dtype, outBuf.values); + } + const sparseToDenseConfig$1 = { + kernelName: SparseToDense, + backendName: 'cpu', + kernelFunc: sparseToDense$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function splitV$1(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { numOrSizeSplits, axis } = attrs; + const $axis = parseAxisParam(axis, x.shape)[0]; + const splitSizes = prepareSplitSize(x, numOrSizeSplits, $axis); + const begin = new Array(x.shape.length).fill(0); + const size = x.shape.slice(); + return splitSizes.map(s => { + const sliceSize = [...size]; + sliceSize[$axis] = s; + const sliceT = slice$1({ inputs: { x }, backend, attrs: { begin, size: sliceSize } }); + begin[$axis] += s; + return sliceT; + }); + } + const splitVConfig$1 = { + kernelName: SplitV, + backendName: 'cpu', + kernelFunc: splitV$1 + }; + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const squareConfig$1 = { + kernelName: Square, + backendName: 'cpu', + kernelFunc: ({ inputs, backend }) => { + const { x } = inputs; + const cpuBackend = backend; + assertNotComplex$1(x, 'square'); + const values = cpuBackend.data.get(x.dataId).values; + const newValues = new Float32Array(values.length); + for (let i = 0; i < values.length; ++i) { + const value = values[i]; + newValues[i] = value * value; + } + const dataId = cpuBackend.write(newValues, x.shape, x.dtype); + return { dataId, shape: x.shape, dtype: x.dtype }; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const step$1 = unaryKernelFunc$1(Step, (xi, attrs) => { + const stepAttrs = attrs; + if (isNaN(xi)) { + return NaN; + } + else { + return xi > 0 ? 1 : stepAttrs.alpha; + } + }); + const stepConfig$1 = { + kernelName: Step, + backendName: 'cpu', + kernelFunc: step$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function stridedSlice$1(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { begin, end, strides, beginMask, endMask, ellipsisMask, newAxisMask, shrinkAxisMask } = attrs; + assertNotComplex$1(x, 'stridedSlice'); + const { finalShapeSparse, finalShape, isIdentity, sliceDim0, isSimpleSlice, begin: $begin, end: $end, strides: $strides } = sliceInfo(x.shape, begin, end, strides, beginMask, endMask, ellipsisMask, newAxisMask, shrinkAxisMask); + let result; + // ref: + // https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/strided_slice_op.cc + if (isIdentity) { + // Optimization #1, slice is a no-op plus reshape + result = reshape$1({ inputs: { x }, backend, attrs: { shape: finalShape } }); + } + else if (sliceDim0 || isSimpleSlice) { + // Optimization #2, slice is memory contiguous (only occurs in dim 0) + assert$1(x.shape.length >= 1, () => `Input must have rank at least 1, got: ${x.shape.length}`); + const size = computeOutShape$2($begin, $end, $strides); + // To tolerate begin[0] > end[0] (a 0-output slice), we min(begin, end). + const sliced = slice$1({ inputs: { x }, backend, attrs: { begin: $begin, size } }); + result = + reshape$1({ inputs: { x: sliced }, backend, attrs: { shape: finalShape } }); + backend.disposeIntermediateTensorInfo(sliced); + } + else { + const xBuf = backend.bufferSync(x); + const outBuf = stridedSliceImpl(finalShapeSparse, xBuf, $strides, $begin); + result = backend.makeTensorInfo(finalShape, outBuf.dtype, outBuf.values); + } + return result; + } + const stridedSliceConfig$1 = { + kernelName: StridedSlice, + backendName: 'cpu', + kernelFunc: stridedSlice$1 + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function stringNGrams$1(args) { + const { inputs, backend, attrs } = args; + const { separator, nGramWidths, leftPad, rightPad, padWidth, preserveShortSequences } = attrs; + const { data, dataSplits } = inputs; + const $data = backend.data.get(data.dataId).values; + const $dataSplits = backend.data.get(dataSplits.dataId).values; + const [nGrams, nGramsSplits] = stringNGramsImpl($data, $dataSplits, separator, nGramWidths, leftPad, rightPad, padWidth, preserveShortSequences); + return [ + backend.makeTensorInfo([nGrams.length], 'string', nGrams), + backend.makeTensorInfo(dataSplits.shape, 'int32', nGramsSplits), + ]; + } + const stringNGramsConfig$1 = { + kernelName: StringNGrams, + backendName: 'cpu', + kernelFunc: stringNGrams$1, + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function stringSplit$1(args) { + const { inputs, backend, attrs } = args; + const { skipEmpty } = attrs; + const { input, delimiter } = inputs; + if (input.dtype !== 'string') { + throw new Error('Input must be of datatype string'); + } + if (input.shape.length !== 1) { + throw new Error(`Input must be a vector, got shape: ${input.shape}`); + } + if (delimiter.shape.length !== 0) { + throw new Error(`Delimiter must be a scalar, got shape: ${delimiter.shape}`); + } + const $input = backend.data.get(input.dataId).values; + const $delimiter = backend.data.get(delimiter.dataId).values[0]; + const [indices, values, shape] = stringSplitImpl($input, $delimiter, skipEmpty); + const outputSize = values.length; + return [ + backend.makeTensorInfo([outputSize, 2], 'int32', indices), + backend.makeTensorInfo([outputSize], 'string', values), + backend.makeTensorInfo([2], 'int32', new Int32Array(shape)) + ]; + } + const stringSplitConfig$1 = { + kernelName: StringSplit, + backendName: 'cpu', + kernelFunc: stringSplit$1, + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function stringToHashBucketFast$1(args) { + const { inputs, backend, attrs } = args; + const { numBuckets } = attrs; + const { input } = inputs; + if (input.dtype !== 'string') { + throw new Error('Input must be of datatype string'); + } + if (numBuckets <= 0) { + throw new Error(`Number of buckets must be at least 1`); + } + const $input = backend.data.get(input.dataId).values; + const output = stringToHashBucketFastImpl($input, numBuckets); + return backend.makeTensorInfo(input.shape, 'int32', output); + } + const stringToHashBucketFastConfig$1 = { + kernelName: StringToHashBucketFast, + backendName: 'cpu', + kernelFunc: stringToHashBucketFast$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const tan$1 = unaryKernelFunc$1(Tan, (xi) => Math.tan(xi)); + const tanConfig$1 = { + kernelName: Tan, + backendName: 'cpu', + kernelFunc: tan$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const tanh$1 = unaryKernelFunc$1(Tanh$1, (xi) => Math.tanh(xi)); + const tanhConfig$1 = { + kernelName: Tanh$1, + backendName: 'cpu', + kernelFunc: tanh$1, + }; + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function tensorScatterUpdate$1(args) { + const { inputs, backend } = args; + const { tensor, indices, updates } = inputs; + const { sliceRank, numUpdates, sliceSize, strides, outputSize } = calculateShapes(updates, indices, tensor.shape); + const sumDupeIndices = false; + const indicesBuf = backend.bufferSync(indices); + const updatesBuf = backend.bufferSync(updates); + const tensorBuf = backend.bufferSync(tensor); + const outBuf = scatterImpl(indicesBuf, updatesBuf, tensor.shape, outputSize, sliceSize, numUpdates, sliceRank, strides, tensorBuf, sumDupeIndices); + return backend.makeTensorInfo(tensor.shape, outBuf.dtype, outBuf.values); + } + const tensorScatterUpdateConfig$1 = { + kernelName: TensorScatterUpdate, + backendName: 'cpu', + kernelFunc: tensorScatterUpdate$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function tile$1(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { reps } = attrs; + assertNotComplex$1(x, 'tile'); + const outBuf = tileImpl(backend.bufferSync(x), reps); + return backend.makeTensorInfo(outBuf.shape, outBuf.dtype, outBuf.values); + } + const tileConfig$1 = { + kernelName: Tile, + backendName: 'cpu', + kernelFunc: tile$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function topK$1(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { k, sorted } = attrs; + assertNotComplex$1(x, 'topk'); + const xVals = backend.data.get(x.dataId).values; + const [allTopKVals, allTopKIndices] = topKImpl(xVals, x.shape, x.dtype, k, sorted); + return [ + backend.makeTensorInfo(allTopKVals.shape, allTopKVals.dtype, allTopKVals.values), + backend.makeTensorInfo(allTopKIndices.shape, allTopKIndices.dtype, allTopKIndices.values) + ]; + } + const topKConfig$1 = { + kernelName: TopK, + backendName: 'cpu', + kernelFunc: topK$1 + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function transform$1(args) { + const { inputs, attrs, backend } = args; + const { image, transforms } = inputs; + const { interpolation, fillMode, fillValue, outputShape } = attrs; + const [batch, imageHeight, imageWidth, numChannels] = image.shape; + const [outHeight, outWidth] = outputShape != null ? outputShape : [imageHeight, imageWidth]; + const outShape = [batch, outHeight, outWidth, numChannels]; + const inStrides = computeStrides(image.shape); + const batchInStride = inStrides[0]; + const rowInStride = inStrides[1]; + const colInStride = inStrides[2]; + const outStrides = computeStrides(outShape); + const batchOutStride = outStrides[0]; + const rowOutStride = outStrides[1]; + const colOutStride = outStrides[2]; + const outVals = getTypedArrayFromDType(image.dtype, sizeFromShape(outShape)); + outVals.fill(fillValue); + const imageVals = backend.data.get(image.dataId).values; + const transformVals = backend.data.get(transforms.dataId).values; + // Ref TF implementation: + // https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/image/image_ops.h + for (let b = 0; b < batch; ++b) { + const transform = transforms.shape[0] === 1 ? + transformVals : + transformVals.subarray(b * 8, b * 8 + 8); + for (let outY = 0; outY < outHeight; ++outY) { + for (let outX = 0; outX < outWidth; ++outX) { + for (let channel = 0; channel < numChannels; ++channel) { + let val; + const projection = transform[6] * outX + transform[7] * outY + 1; + if (projection === 0) { + // Return the fill value for infinite coordinates, + // which are outside the input image + continue; + } + const inX = (transform[0] * outX + transform[1] * outY + transform[2]) / + projection; + const inY = (transform[3] * outX + transform[4] * outY + transform[5]) / + projection; + const x = mapCoord(inX, imageWidth, fillMode); + const y = mapCoord(inY, imageHeight, fillMode); + switch (interpolation) { + case 'nearest': + val = nearestInterpolation(imageVals, imageHeight, imageWidth, batchInStride, rowInStride, colInStride, b, y, x, channel, fillValue); + break; + case 'bilinear': + val = bilinearInterpolation(imageVals, imageHeight, imageWidth, batchInStride, rowInStride, colInStride, b, y, x, channel, fillValue); + break; + default: + throw new Error(`Error in Transform: Expect 'nearest' or ` + + `'bilinear', but got ${interpolation}`); + } + const ind = b * batchOutStride + outY * rowOutStride + + outX * colOutStride + channel; + outVals[ind] = val; + } + } + } + return backend.makeTensorInfo(outShape, image.dtype, outVals); + } + const dataId = backend.write(outVals, outShape, image.dtype); + return { dataId, shape: image.shape, dtype: image.dtype }; + } + const transformConfig$1 = { + kernelName: Transform, + backendName: 'cpu', + kernelFunc: transform$1 + }; + function mapCoord(outCoord, len, mode) { + switch (mode) { + case 'reflect': + return mapCoordReflect(outCoord, len); + case 'wrap': + return mapCoordWrap(outCoord, len); + case 'nearest': + return mapCoordNearest(outCoord, len); + case 'constant': + default: + return mapCoordConstant(outCoord, len); + } + } + function mapCoordReflect(outCoord, len) { + // Reflect [abcd] to [dcba|abcd|dcba]. + let inCoord = outCoord; + if (inCoord < 0) { + if (len <= 1) { + inCoord = 0; + } + else { + const sz2 = 2 * len; + if (inCoord < sz2) { + inCoord = sz2 * Math.trunc(-inCoord / sz2) + inCoord; + } + inCoord = inCoord < -len ? inCoord + sz2 : -inCoord - 1; + } + } + else if (inCoord > len - 1) { + if (len <= 1) { + inCoord = 0; + } + else { + const sz2 = 2 * len; + inCoord -= sz2 * Math.trunc(inCoord / sz2); + if (inCoord >= len) { + inCoord = sz2 - inCoord - 1; + } + } + } + // clamp is necessary because when outCoord = 3.5 and len = 4, + // inCoord = 3.5 and will be rounded to 4 in nearest interpolation. + return clamp(0, inCoord, len - 1); + } + function mapCoordWrap(outCoord, len) { + // Wrap [abcd] to [abcd|abcd|abcd]. + let inCoord = outCoord; + if (inCoord < 0) { + if (len <= 1) { + inCoord = 0; + } + else { + const sz = len - 1; + inCoord += len * (Math.trunc(-inCoord / sz) + 1); + } + } + else if (inCoord > len - 1) { + if (len <= 1) { + inCoord = 0; + } + else { + const sz = len - 1; + inCoord -= len * Math.trunc(inCoord / sz); + } + } + // clamp is necessary because when outCoord = -0.5 and len = 4, + // inCoord = 3.5 and will be rounded to 4 in nearest interpolation. + return clamp(0, inCoord, len - 1); + } + function mapCoordConstant(outCoord, len) { + return outCoord; + } + function mapCoordNearest(outCoord, len) { + return clamp(0, outCoord, len - 1); + } + function readWithFillValue(imageVals, imageHeight, imageWidth, batchStride, rowStride, colStride, batch, y, x, channel, fillValue) { + const ind = batch * batchStride + y * rowStride + x * colStride + channel; + if (0 <= y && y < imageHeight && 0 <= x && x < imageWidth) { + return imageVals[ind]; + } + else { + return fillValue; + } + } + function nearestInterpolation(imageVals, imageHeight, imageWidth, batchStride, rowStride, colStride, batch, y, x, channel, fillValue) { + const $y = Math.round(y); + const $x = Math.round(x); + return readWithFillValue(imageVals, imageHeight, imageWidth, batchStride, rowStride, colStride, batch, $y, $x, channel, fillValue); + } + function bilinearInterpolation(imageVals, imageHeight, imageWidth, batchStride, rowStride, colStride, batch, y, x, channel, fillValue) { + const yFloor = Math.floor(y); + const xFloor = Math.floor(x); + const yCeil = yFloor + 1; + const xCeil = xFloor + 1; + // f(x, yFloor) = (xCeil - x) / (xCeil - xFloor) * f(xFloor, yFloor) + // + (x - xFloor) / (xCeil - xFloor) * f(xCeil, yFloor) + const valueYFloor = (xCeil - x) * + readWithFillValue(imageVals, imageHeight, imageWidth, batchStride, rowStride, colStride, batch, yFloor, xFloor, channel, fillValue) + + (x - xFloor) * + readWithFillValue(imageVals, imageHeight, imageWidth, batchStride, rowStride, colStride, batch, yFloor, xCeil, channel, fillValue); + // f(x, yCeil) = (xCeil - x) / (xCeil - xFloor) * f(xFloor, yCeil) + // + (x - xFloor) / (xCeil - xFloor) * f(xCeil, yCeil) + const valueYCeil = (xCeil - x) * + readWithFillValue(imageVals, imageHeight, imageWidth, batchStride, rowStride, colStride, batch, yCeil, xFloor, channel, fillValue) + + (x - xFloor) * + readWithFillValue(imageVals, imageHeight, imageWidth, batchStride, rowStride, colStride, batch, yCeil, xCeil, channel, fillValue); + // f(x, y) = (yCeil - y) / (yCeil - yFloor) * f(x, yFloor) + // + (y - yFloor) / (yCeil - yFloor) * f(x, yCeil) + return (yCeil - y) * valueYFloor + (y - yFloor) * valueYCeil; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function unique$1(args) { + const { inputs, attrs, backend } = args; + const { axis } = attrs; + const { x } = inputs; + assertNotComplex$1(x, 'unique'); + const values = backend.data.get(x.dataId).values; + const { outputValues, outputShape, indices } = uniqueImpl(values, axis, x.shape, x.dtype); + return [ + backend.makeTensorInfo(outputShape, x.dtype, outputValues), + backend.makeTensorInfo([indices.length], 'int32', indices), + ]; + } + const uniqueConfig$1 = { + kernelName: Unique, + backendName: 'cpu', + kernelFunc: unique$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function unpack$1(args) { + const { inputs, backend, attrs } = args; + const { value } = inputs; + let { axis } = attrs; + if (axis < 0) { + axis += value.shape.length; + } + const valueRank = value.shape.length; + const num = value.shape[axis]; + const outShape = new Array(valueRank - 1); + let outIndex = 0; + for (let i = 0; i < valueRank; i++) { + if (i !== axis) { + outShape[outIndex++] = value.shape[i]; + } + } + const begin = new Array(valueRank).fill(0); + const size = value.shape.slice(); + size[axis] = 1; + const res = new Array(num); + for (let i = 0; i < res.length; i++) { + begin[axis] = i; + const tempRes = slice$1({ inputs: { x: value }, backend, attrs: { begin, size } }); + res[i] = reshape$1({ inputs: { x: tempRes }, backend, attrs: { shape: outShape } }); + backend.disposeIntermediateTensorInfo(tempRes); + } + return res; + } + const unpackConfig$1 = { + kernelName: Unpack, + backendName: 'cpu', + kernelFunc: unpack$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function unsortedSegmentSum$1(args) { + const { inputs, backend, attrs } = args; + const { x, segmentIds } = inputs; + const { numSegments } = attrs; + assertNotComplex$1(x, 'unsortedSegmentSum'); + const xRank = x.shape.length; + const segmentIdsRank = segmentIds.shape.length; + const res = []; + const intermediates = []; + // Reshape the segment id's so that they can be broadcast with + // x. The new shape should be [segmentIds.shape, 1, ..., 1] + const numIters = xRank - segmentIdsRank; + let $segmentIds = segmentIds; + for (let i = 0; i < numIters; ++i) { + const expanded = expandDims$1({ inputs: { input: $segmentIds }, backend, attrs: { dim: i + 1 } }); + $segmentIds = expanded; + intermediates.push(expanded); + } + for (let i = 0; i < numSegments; ++i) { + const scalarValue = createScalarValue(i, 'int32'); + const segmentId = backend.makeTensorInfo([], 'int32', scalarValue); + const mask = equal$1({ inputs: { a: segmentId, b: $segmentIds }, backend }); + const maskCasted = cast$1({ inputs: { x: mask }, backend, attrs: { dtype: 'float32' } }); + const mul = multiply$1({ inputs: { a: maskCasted, b: x }, backend }); + const sumTensorInfo = sum$1({ inputs: { x: mul }, backend, attrs: { axis: 0, keepDims: false } }); + res.push(sumTensorInfo); + intermediates.push(segmentId); + intermediates.push(mask); + intermediates.push(maskCasted); + intermediates.push(mul); + intermediates.push(sumTensorInfo); + } + const result = pack$1({ inputs: res, backend, attrs: { axis: 0 } }); + intermediates.forEach(t => backend.disposeIntermediateTensorInfo(t)); + return result; + } + const unsortedSegmentSumConfig$1 = { + kernelName: UnsortedSegmentSum, + backendName: 'cpu', + kernelFunc: unsortedSegmentSum$1 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + // List all kernel configs here + const kernelConfigs$1 = [ + _fusedMatMulConfig$1, + absConfig$1, + acosConfig$1, + acoshConfig$1, + addConfig$1, + addNConfig$1, + allConfig$1, + anyConfig$1, + argMaxConfig$1, + argMinConfig$1, + asinConfig$1, + asinhConfig$1, + atanConfig$1, + atan2Config$1, + atanhConfig$1, + avgPoolConfig$1, + avgPool3DConfig$1, + avgPool3DGradConfig$1, + avgPoolGradConfig$1, + batchMatMulConfig$1, + batchNormConfig$1, + batchToSpaceNDConfig$1, + bincountConfig$1, + bitwiseAndConfig$1, + broadcastArgsConfig$1, + castConfig$1, + ceilConfig$1, + clipByValueConfig$1, + complexConfig$1, + complexAbsConfig$1, + concatConfig$1, + conv2DConfig$1, + conv2DBackpropFilterConfig$1, + conv2DBackpropInputConfig$1, + conv3DConfig$1, + conv3DBackpropFilterV2Config$1, + conv3DBackpropInputV2Config, + cosConfig$1, + coshConfig$1, + cropAndResizeConfig$1, + cumprodConfig$1, + cumsumConfig$1, + denseBincountConfig$1, + depthToSpaceConfig$1, + depthwiseConv2dNativeConfig$1, + depthwiseConv2dNativeBackpropFilterConfig$1, + depthwiseConv2dNativeBackpropInputConfig$1, + diagConfig$1, + dilation2DConfig$1, + dilation2DBackpropFilterConfig, + dilation2DBackpropInputConfig, + drawConfig, + einsumConfig$1, + eluConfig$1, + eluGradConfig$1, + equalConfig$1, + erfConfig$1, + expConfig$1, + expandDimsConfig$1, + expm1Config$1, + fftConfig$1, + fillConfig$1, + flipLeftRightConfig$1, + floorConfig$1, + floorDivConfig$1, + fusedConv2DConfig$1, + fusedDepthwiseConv2DConfig$1, + gatherNdConfig$1, + gatherV2Config$1, + greaterConfig$1, + greaterEqualConfig$1, + identityConfig$1, + ifftConfig$1, + imagConfig$1, + isFiniteConfig$1, + isInfConfig$1, + isNaNConfig$1, + leakyReluConfig$1, + lessConfig$1, + lessEqualConfig$1, + linSpaceConfig$1, + logConfig$1, + log1pConfig$1, + logicalAndConfig$1, + logicalNotConfig$1, + logicalOrConfig$1, + LRNConfig$1, + LRNGradConfig$1, + maxConfig$1, + maximumConfig$1, + maxPoolConfig$1, + maxPool3DConfig$1, + maxPool3DGradConfig$1, + maxPoolGradConfig$1, + maxPoolWithArgmaxConfig$1, + meanConfig$1, + minConfig$1, + minimumConfig$1, + mirrorPadConfig$1, + modConfig$1, + multinomialConfig$1, + multiplyConfig$1, + negConfig$1, + nonMaxSuppressionV3Config$1, + nonMaxSuppressionV4Config$1, + nonMaxSuppressionV5Config$1, + notEqualConfig$1, + oneHotConfig$1, + onesLikeConfig$1, + packConfig$1, + padV2Config$1, + powConfig$1, + preluConfig$1, + prodConfig$1, + raggedGatherConfig$1, + raggedRangeConfig$1, + raggedTensorToTensorConfig$1, + rangeConfig$1, + realConfig$1, + realDivConfig$1, + reciprocalConfig$1, + reluConfig$1, + relu6Config$1, + reshapeConfig$1, + resizeBilinearConfig$1, + resizeBilinearGradConfig$1, + resizeNearestNeighborConfig$1, + resizeNearestNeighborGradConfig$1, + reverseConfig$1, + rotateWithOffsetConfig$1, + roundConfig$1, + rsqrtConfig$1, + scatterNdConfig$1, + searchSortedConfig$1, + selectConfig$1, + seluConfig$1, + sigmoidConfig$1, + signConfig$1, + sinConfig$1, + sinhConfig$1, + sliceConfig$1, + softmaxConfig$1, + softplusConfig$1, + spaceToBatchNDConfig$1, + sparseFillEmptyRowsConfig$1, + sparseReshapeConfig$1, + sparseSegmentMeanConfig$1, + sparseSegmentSumConfig$1, + sparseToDenseConfig$1, + splitVConfig$1, + sqrtConfig$1, + squareConfig$1, + squaredDifferenceConfig$1, + staticRegexReplaceConfig$1, + stepConfig$1, + stridedSliceConfig$1, + stringNGramsConfig$1, + stringSplitConfig$1, + stringToHashBucketFastConfig$1, + subConfig$1, + sumConfig$1, + tanConfig$1, + tanhConfig$1, + tensorScatterUpdateConfig$1, + tileConfig$1, + topKConfig$1, + transformConfig$1, + transposeConfig$1, + uniqueConfig$1, + unpackConfig$1, + unsortedSegmentSumConfig$1, + zerosLikeConfig$1 + ]; + for (const kernelConfig of kernelConfigs$1) { + registerKernel(kernelConfig); + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const contexts = {}; + const WEBGL_ATTRIBUTES = { + alpha: false, + antialias: false, + premultipliedAlpha: false, + preserveDrawingBuffer: false, + depth: false, + stencil: false, + failIfMajorPerformanceCaveat: true + }; + function clearWebGLContext(webGLVersion) { + delete contexts[webGLVersion]; + } + function setWebGLContext(webGLVersion, gl) { + contexts[webGLVersion] = gl; + } + function getWebGLContext(webGLVersion, customCanvas) { + if (!(webGLVersion in contexts) || customCanvas != null) { + const newCtx = getWebGLRenderingContext(webGLVersion, customCanvas); + if (newCtx !== null) { + contexts[webGLVersion] = newCtx; + } + else { + console.log('Could not get context for WebGL version', webGLVersion); + return null; + } + } + const gl = contexts[webGLVersion]; + if (gl == null || gl.isContextLost()) { + delete contexts[webGLVersion]; + return getWebGLContext(webGLVersion); + } + gl.disable(gl.DEPTH_TEST); + gl.disable(gl.STENCIL_TEST); + gl.disable(gl.BLEND); + gl.disable(gl.DITHER); + gl.disable(gl.POLYGON_OFFSET_FILL); + gl.disable(gl.SAMPLE_COVERAGE); + gl.enable(gl.SCISSOR_TEST); + gl.enable(gl.CULL_FACE); + gl.cullFace(gl.BACK); + return contexts[webGLVersion]; + } + function createCanvas(webGLVersion) { + // Use canvas element for Safari, since its offscreen canvas does not support + // fencing. + if (!env().getBool('IS_SAFARI') && typeof OffscreenCanvas !== 'undefined' && + webGLVersion === 2) { + return new OffscreenCanvas(300, 150); + } + else if (typeof document !== 'undefined') { + return document.createElement('canvas'); + } + else { + throw new Error('Cannot create a canvas in this context'); + } + } + function getWebGLRenderingContext(webGLVersion, customCanvas) { + if (webGLVersion !== 1 && webGLVersion !== 2) { + throw new Error('Cannot get WebGL rendering context, WebGL is disabled.'); + } + const canvas = customCanvas == null ? createCanvas(webGLVersion) : customCanvas; + canvas.addEventListener('webglcontextlost', (ev) => { + ev.preventDefault(); + delete contexts[webGLVersion]; + }, false); + if (env().getBool('SOFTWARE_WEBGL_ENABLED')) { + WEBGL_ATTRIBUTES.failIfMajorPerformanceCaveat = false; + } + if (webGLVersion === 1) { + return ( + // tslint:disable-next-line + canvas.getContext('webgl', WEBGL_ATTRIBUTES) || + canvas + .getContext('experimental-webgl', WEBGL_ATTRIBUTES)); + } + return canvas.getContext('webgl2', WEBGL_ATTRIBUTES); + } + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + var PackingScheme; + (function (PackingScheme) { + /** + * All values in a single texel are densely packed without any constraints. + * + * This is how the shader encodes a tensor with shape = [2, 3, 4] + * (indices are [batch, row, col]). + * + * 000|001 010|011 020|021 + * ------- ------- ------- + * 002|003 012|013 022|023 + * + * 100|101 110|111 120|121 + * ------- ------- ------- + * 102|103 112|113 122|123 + * + */ + PackingScheme[PackingScheme["DENSE"] = 0] = "DENSE"; + /** + * Single texels contain only values from the same batch, and from adjacent + * rows and columns. + * + * This is how the shader encodes a tensor with shape = [2, 3, 5] + * (indices are [batch, row, col]). + * + * 000|001 002|003 004|xxx 020|021 022|023 024|xxx + * ------- ------- ------- ------- ------- ------- + * 010|011 012|013 014|xxx xxx|xxx xxx|xxx xxx|xxx + * + * 100|101 102|103 104|xxx 120|121 122|123 124|xxx + * ------- ------- ------- ------- ------- ------- + * 110|111 112|113 114|xxx xxx|xxx xxx|xxx xxx|xxx + * + */ + PackingScheme[PackingScheme["SHARED_BATCH"] = 1] = "SHARED_BATCH"; + })(PackingScheme || (PackingScheme = {})); + var TextureUsage; + (function (TextureUsage) { + TextureUsage[TextureUsage["RENDER"] = 0] = "RENDER"; + TextureUsage[TextureUsage["UPLOAD"] = 1] = "UPLOAD"; + TextureUsage[TextureUsage["PIXELS"] = 2] = "PIXELS"; + TextureUsage[TextureUsage["DOWNLOAD"] = 3] = "DOWNLOAD"; + })(TextureUsage || (TextureUsage = {})); + var PhysicalTextureType; + (function (PhysicalTextureType) { + PhysicalTextureType[PhysicalTextureType["UNPACKED_FLOAT16"] = 0] = "UNPACKED_FLOAT16"; + PhysicalTextureType[PhysicalTextureType["UNPACKED_FLOAT32"] = 1] = "UNPACKED_FLOAT32"; + PhysicalTextureType[PhysicalTextureType["PACKED_4X1_UNSIGNED_BYTE"] = 2] = "PACKED_4X1_UNSIGNED_BYTE"; + PhysicalTextureType[PhysicalTextureType["PACKED_2X2_FLOAT32"] = 3] = "PACKED_2X2_FLOAT32"; + PhysicalTextureType[PhysicalTextureType["PACKED_2X2_FLOAT16"] = 4] = "PACKED_2X2_FLOAT16"; + })(PhysicalTextureType || (PhysicalTextureType = {})); + function getUnpackedMatrixTextureShapeWidthHeight(rows, columns) { + return [columns, rows]; + } + function getUnpackedArraySizeFromMatrixSize(matrixSize, channelsPerTexture) { + return matrixSize * channelsPerTexture; + } + function getColorMatrixTextureShapeWidthHeight(rows, columns) { + return [columns * 4, rows]; + } + /** + * Get shape for densely packed RGBA texture. + */ + function getDenseTexShape(shape) { + const size = sizeFromShape(shape); + const texelsNeeded = Math.ceil(size / 4); + return sizeToSquarishShape(texelsNeeded); + } + function getMatrixSizeFromUnpackedArraySize(unpackedSize, channelsPerTexture) { + if (unpackedSize % channelsPerTexture !== 0) { + throw new Error(`unpackedSize (${unpackedSize}) must be a multiple of ` + + `${channelsPerTexture}`); + } + return unpackedSize / channelsPerTexture; + } + function decodeMatrixFromUnpackedColorRGBAArray(unpackedArray, matrix, channels) { + const requiredSize = unpackedArray.length * channels / 4; + if (matrix.length < requiredSize) { + throw new Error(`matrix length (${matrix.length}) must be >= ${requiredSize}`); + } + let dst = 0; + for (let src = 0; src < unpackedArray.length; src += 4) { + for (let c = 0; c < channels; c++) { + matrix[dst++] = unpackedArray[src + c]; + } + } + } + function getPackedMatrixTextureShapeWidthHeight(rows, columns) { + return [ + Math.max(1, Math.ceil(columns / 2)), Math.max(1, Math.ceil(rows / 2)) + ]; + } + function getPackedRGBAArraySizeFromMatrixShape(rows, columns) { + const [w, h] = getPackedMatrixTextureShapeWidthHeight(rows, columns); + return w * h * 4; + } + function getTextureConfig( + // tslint:disable-next-line:no-any + gl, textureHalfFloatExtension) { + // tslint:disable-next-line:no-any + const glany = gl; + let internalFormatFloat; + let internalFormatHalfFloat; + let internalFormatPackedHalfFloat; + let internalFormatPackedFloat; + let textureFormatFloat; + let downloadTextureFormat; + let downloadUnpackNumChannels; + let defaultNumChannels; + let textureTypeHalfFloat; + let textureTypeFloat; + if (env().getNumber('WEBGL_VERSION') === 2) { + internalFormatFloat = glany.R32F; + internalFormatHalfFloat = glany.R16F; + internalFormatPackedHalfFloat = glany.RGBA16F; + internalFormatPackedFloat = glany.RGBA32F; + textureFormatFloat = glany.RED; + downloadUnpackNumChannels = 4; + defaultNumChannels = 1; + textureTypeHalfFloat = glany.HALF_FLOAT; + textureTypeFloat = glany.FLOAT; + downloadTextureFormat = glany.RGBA8; + } + else { + internalFormatFloat = gl.RGBA; + internalFormatHalfFloat = gl.RGBA; + internalFormatPackedHalfFloat = gl.RGBA; + internalFormatPackedFloat = glany.RGBA; + textureFormatFloat = gl.RGBA; + downloadUnpackNumChannels = 4; + defaultNumChannels = 4; + textureTypeHalfFloat = textureHalfFloatExtension != null ? + textureHalfFloatExtension.HALF_FLOAT_OES : + null; + textureTypeFloat = gl.FLOAT; + downloadTextureFormat = gl.RGBA; + } + return { + internalFormatFloat, + internalFormatHalfFloat, + internalFormatPackedHalfFloat, + internalFormatPackedFloat, + textureFormatFloat, + downloadTextureFormat, + downloadUnpackNumChannels, + defaultNumChannels, + textureTypeHalfFloat, + textureTypeFloat + }; + } + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function callAndCheck(gl, func) { + const returnValue = func(); + if (env().getBool('DEBUG')) { + checkWebGLError(gl); + } + return returnValue; + } + function checkWebGLError(gl) { + const error = gl.getError(); + if (error !== gl.NO_ERROR) { + throw new Error('WebGL Error: ' + getWebGLErrorMessage(gl, error)); + } + } + // https://en.wikipedia.org/wiki/Half-precision_floating-point_format + const MIN_FLOAT16 = 5.96e-8; + const MAX_FLOAT16 = 65504; + function canBeRepresented(num) { + if (env().getBool('WEBGL_RENDER_FLOAT32_ENABLED') || num === 0 || + (MIN_FLOAT16 < Math.abs(num) && Math.abs(num) < MAX_FLOAT16)) { + return true; + } + return false; + } + function getWebGLErrorMessage(gl, status) { + switch (status) { + case gl.NO_ERROR: + return 'NO_ERROR'; + case gl.INVALID_ENUM: + return 'INVALID_ENUM'; + case gl.INVALID_VALUE: + return 'INVALID_VALUE'; + case gl.INVALID_OPERATION: + return 'INVALID_OPERATION'; + case gl.INVALID_FRAMEBUFFER_OPERATION: + return 'INVALID_FRAMEBUFFER_OPERATION'; + case gl.OUT_OF_MEMORY: + return 'OUT_OF_MEMORY'; + case gl.CONTEXT_LOST_WEBGL: + return 'CONTEXT_LOST_WEBGL'; + default: + return `Unknown error code ${status}`; + } + } + function getExtensionOrThrow(gl, extensionName) { + return throwIfNull(gl, () => gl.getExtension(extensionName), 'Extension "' + extensionName + '" not supported on this browser.'); + } + function createVertexShader$1(gl, vertexShaderSource) { + const vertexShader = throwIfNull(gl, () => gl.createShader(gl.VERTEX_SHADER), 'Unable to create vertex WebGLShader.'); + callAndCheck(gl, () => gl.shaderSource(vertexShader, vertexShaderSource)); + callAndCheck(gl, () => gl.compileShader(vertexShader)); + if (gl.getShaderParameter(vertexShader, gl.COMPILE_STATUS) === false) { + console.log(gl.getShaderInfoLog(vertexShader)); + throw new Error('Failed to compile vertex shader.'); + } + return vertexShader; + } + function createFragmentShader(gl, fragmentShaderSource) { + const fragmentShader = throwIfNull(gl, () => gl.createShader(gl.FRAGMENT_SHADER), 'Unable to create fragment WebGLShader.'); + callAndCheck(gl, () => gl.shaderSource(fragmentShader, fragmentShaderSource)); + callAndCheck(gl, () => gl.compileShader(fragmentShader)); + if (env().get('ENGINE_COMPILE_ONLY')) { + return fragmentShader; + } + if (gl.getShaderParameter(fragmentShader, gl.COMPILE_STATUS) === false) { + logShaderSourceAndInfoLog(fragmentShaderSource, gl.getShaderInfoLog(fragmentShader)); + throw new Error('Failed to compile fragment shader.'); + } + return fragmentShader; + } + const lineNumberRegex = /ERROR: [0-9]+:([0-9]+):/g; + function logShaderSourceAndInfoLog(shaderSource, shaderInfoLog) { + const lineNumberRegexResult = lineNumberRegex.exec(shaderInfoLog); + if (lineNumberRegexResult == null) { + console.log(`Couldn't parse line number in error: ${shaderInfoLog}`); + console.log(shaderSource); + return; + } + const lineNumber = +lineNumberRegexResult[1]; + const shaderLines = shaderSource.split('\n'); + const pad = shaderLines.length.toString().length + 2; + const linesWithLineNumbers = shaderLines.map((line, lineNumber) => rightPad((lineNumber + 1).toString(), pad) + line); + let maxLineLength = 0; + for (let i = 0; i < linesWithLineNumbers.length; i++) { + maxLineLength = Math.max(linesWithLineNumbers[i].length, maxLineLength); + } + const beforeErrorLines = linesWithLineNumbers.slice(0, lineNumber - 1); + const errorLine = linesWithLineNumbers.slice(lineNumber - 1, lineNumber); + const afterErrorLines = linesWithLineNumbers.slice(lineNumber); + console.log(beforeErrorLines.join('\n')); + console.log(shaderInfoLog.split('\n')[0]); + console.log(`%c ${rightPad(errorLine[0], maxLineLength)}`, 'border:1px solid red; background-color:#e3d2d2; color:#a61717'); + console.log(afterErrorLines.join('\n')); + } + function createProgram(gl) { + return throwIfNull(gl, () => gl.createProgram(), 'Unable to create WebGLProgram.'); + } + function linkProgram(gl, program) { + callAndCheck(gl, () => gl.linkProgram(program)); + if (env().get('ENGINE_COMPILE_ONLY')) { + return; + } + if (gl.getProgramParameter(program, gl.LINK_STATUS) === false) { + console.log(gl.getProgramInfoLog(program)); + throw new Error('Failed to link vertex and fragment shaders.'); + } + } + /// validateProgram is effectively "If we `useProgram(program); drawArrays();`, + /// give feedback in log about perf/correctness warnings or errors that would + /// occur." + /// So make sure we set up all vertex/texture/sampler/uniform data before + /// calling validateProgram! + function validateProgram(gl, program) { + callAndCheck(gl, () => gl.validateProgram(program)); + if (gl.getProgramParameter(program, gl.VALIDATE_STATUS) === false) { + console.log(gl.getProgramInfoLog(program)); + throw new Error('Shader program validation failed.'); + } + } + function createStaticVertexBuffer(gl, data) { + const buffer = throwIfNull(gl, () => gl.createBuffer(), 'Unable to create WebGLBuffer'); + callAndCheck(gl, () => gl.bindBuffer(gl.ARRAY_BUFFER, buffer)); + callAndCheck(gl, () => gl.bufferData(gl.ARRAY_BUFFER, data, gl.STATIC_DRAW)); + return buffer; + } + function createStaticIndexBuffer(gl, data) { + const buffer = throwIfNull(gl, () => gl.createBuffer(), 'Unable to create WebGLBuffer'); + callAndCheck(gl, () => gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, buffer)); + callAndCheck(gl, () => gl.bufferData(gl.ELEMENT_ARRAY_BUFFER, data, gl.STATIC_DRAW)); + return buffer; + } + function getNumChannels() { + if (env().getNumber('WEBGL_VERSION') === 2) { + return 1; + } + return 4; + } + function createTexture(gl) { + return throwIfNull(gl, () => gl.createTexture(), 'Unable to create WebGLTexture.'); + } + function validateTextureSize(width, height) { + const maxTextureSize = env().getNumber('WEBGL_MAX_TEXTURE_SIZE'); + if ((width <= 0) || (height <= 0)) { + const requested = `[${width}x${height}]`; + throw new Error('Requested texture size ' + requested + ' is invalid.'); + } + if ((width > maxTextureSize) || (height > maxTextureSize)) { + const requested = `[${width}x${height}]`; + const max = `[${maxTextureSize}x${maxTextureSize}]`; + throw new Error('Requested texture size ' + requested + + ' greater than WebGL maximum on this browser / GPU ' + max + '.'); + } + } + function createFramebuffer(gl) { + return throwIfNull(gl, () => gl.createFramebuffer(), 'Unable to create WebGLFramebuffer.'); + } + function bindVertexBufferToProgramAttribute(gl, program, attribute, buffer, arrayEntriesPerItem, itemStrideInBytes, itemOffsetInBytes) { + const loc = gl.getAttribLocation(program, attribute); + if (loc === -1) { + // The GPU compiler decided to strip out this attribute because it's unused, + // thus no need to bind. + return false; + } + callAndCheck(gl, () => gl.bindBuffer(gl.ARRAY_BUFFER, buffer)); + callAndCheck(gl, () => gl.vertexAttribPointer(loc, arrayEntriesPerItem, gl.FLOAT, false, itemStrideInBytes, itemOffsetInBytes)); + callAndCheck(gl, () => gl.enableVertexAttribArray(loc)); + return true; + } + function bindTextureUnit(gl, texture, textureUnit) { + validateTextureUnit(gl, textureUnit); + callAndCheck(gl, () => gl.activeTexture(gl.TEXTURE0 + textureUnit)); + callAndCheck(gl, () => gl.bindTexture(gl.TEXTURE_2D, texture)); + } + function unbindTextureUnit(gl, textureUnit) { + validateTextureUnit(gl, textureUnit); + callAndCheck(gl, () => gl.activeTexture(gl.TEXTURE0 + textureUnit)); + callAndCheck(gl, () => gl.bindTexture(gl.TEXTURE_2D, null)); + } + function getProgramUniformLocationOrThrow(gl, program, uniformName) { + return throwIfNull(gl, () => gl.getUniformLocation(program, uniformName), 'uniform "' + uniformName + '" not present in program.'); + } + function getProgramUniformLocation(gl, program, uniformName) { + return gl.getUniformLocation(program, uniformName); + } + function bindTextureToProgramUniformSampler(gl, texture, uniformSamplerLocation, textureUnit) { + callAndCheck(gl, () => bindTextureUnit(gl, texture, textureUnit)); + callAndCheck(gl, () => gl.uniform1i(uniformSamplerLocation, textureUnit)); + } + function bindCanvasToFramebuffer(gl) { + callAndCheck(gl, () => gl.bindFramebuffer(gl.FRAMEBUFFER, null)); + callAndCheck(gl, () => gl.viewport(0, 0, gl.canvas.width, gl.canvas.height)); + callAndCheck(gl, () => gl.scissor(0, 0, gl.canvas.width, gl.canvas.height)); + } + function bindColorTextureToFramebuffer(gl, texture, framebuffer) { + callAndCheck(gl, () => gl.bindFramebuffer(gl.FRAMEBUFFER, framebuffer)); + callAndCheck(gl, () => gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, texture, 0)); + } + function unbindColorTextureFromFramebuffer(gl, framebuffer) { + callAndCheck(gl, () => gl.bindFramebuffer(gl.FRAMEBUFFER, framebuffer)); + callAndCheck(gl, () => gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, null, 0)); + } + function validateFramebuffer(gl) { + const status = gl.checkFramebufferStatus(gl.FRAMEBUFFER); + if (status !== gl.FRAMEBUFFER_COMPLETE) { + throw new Error('Error binding framebuffer: ' + getFramebufferErrorMessage(gl, status)); + } + } + function getFramebufferErrorMessage(gl, status) { + switch (status) { + case gl.FRAMEBUFFER_INCOMPLETE_ATTACHMENT: + return 'FRAMEBUFFER_INCOMPLETE_ATTACHMENT'; + case gl.FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT: + return 'FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT'; + case gl.FRAMEBUFFER_INCOMPLETE_DIMENSIONS: + return 'FRAMEBUFFER_INCOMPLETE_DIMENSIONS'; + case gl.FRAMEBUFFER_UNSUPPORTED: + return 'FRAMEBUFFER_UNSUPPORTED'; + default: + return `unknown error ${status}`; + } + } + function throwIfNull(gl, returnTOrNull, failureMessage) { + const tOrNull = callAndCheck(gl, () => returnTOrNull()); + if (tOrNull == null) { + throw new Error(failureMessage); + } + return tOrNull; + } + function validateTextureUnit(gl, textureUnit) { + const maxTextureUnit = gl.MAX_COMBINED_TEXTURE_IMAGE_UNITS - 1; + const glTextureUnit = textureUnit + gl.TEXTURE0; + if (glTextureUnit < gl.TEXTURE0 || glTextureUnit > maxTextureUnit) { + const textureUnitRange = `[gl.TEXTURE0, gl.TEXTURE${maxTextureUnit}]`; + throw new Error(`textureUnit must be in ${textureUnitRange}.`); + } + } + function getBatchDim(shape, dimsToSkip = 2) { + return sizeFromShape(shape.slice(0, shape.length - dimsToSkip)); + } + function getRowsCols(shape) { + if (shape.length === 0) { + throw Error('Cannot get rows and columns of an empty shape array.'); + } + return [ + shape.length > 1 ? shape[shape.length - 2] : 1, shape[shape.length - 1] + ]; + } + function getShapeAs3D(shape) { + let shapeAs3D = [1, 1, 1]; + const isScalar = shape.length === 0 || (shape.length === 1 && shape[0] === 1); + if (!isScalar) { + shapeAs3D = + [getBatchDim(shape), ...getRowsCols(shape)]; + } + return shapeAs3D; + } + function getTextureShapeFromLogicalShape(logShape, isPacked = false) { + let maxTexSize = env().getNumber('WEBGL_MAX_TEXTURE_SIZE'); + let maxSizeForNarrowTex = env().getNumber('WEBGL_MAX_SIZE_FOR_NARROW_TEXTURE'); + if (maxSizeForNarrowTex === Infinity && + env().getBool('WEBGL_AUTO_SQUARIFY_NARROW_TEXTURE_SHAPE')) { + maxSizeForNarrowTex = maxTexSize / 2; + } + if (isPacked) { + maxTexSize = maxTexSize * 2; + maxSizeForNarrowTex = maxSizeForNarrowTex * 2; + // This logic ensures we accurately count the number of packed texels needed + // to accommodate the tensor. We can only pack values in the same texel if + // they are from adjacent pairs of rows/cols within the same batch. So if a + // tensor has 3 rows, we pretend it has 4 rows in order to account for the + // fact that the texels containing the third row are half empty. + logShape = logShape.map((d, i) => i >= logShape.length - 2 ? + nearestLargerEven(logShape[i]) : + logShape[i]); + // Packed texture height is at least 2 (the channel height of a single + // texel). + if (logShape.length === 1) { + logShape = [2, logShape[0]]; + } + } + // If logical shape is 2, we don't squeeze, since we want to match physical. + if (logShape.length !== 2) { + const squeezeResult = squeezeShape(logShape); + logShape = squeezeResult.newShape; + } + let size = sizeFromShape(logShape); + let textureShape = null; + if (logShape.length <= 1 && size <= maxTexSize) { + textureShape = [1, size]; + } + else if (logShape.length === 2 && logShape[0] <= maxTexSize && + logShape[1] <= maxTexSize) { + textureShape = logShape; + } + else if (logShape.length === 3 && logShape[0] * logShape[1] <= maxTexSize && + logShape[2] <= maxTexSize) { + textureShape = [logShape[0] * logShape[1], logShape[2]]; + } + else if (logShape.length === 3 && logShape[0] <= maxTexSize && + logShape[1] * logShape[2] <= maxTexSize) { + textureShape = [logShape[0], logShape[1] * logShape[2]]; + } + else if (logShape.length === 4 && + logShape[0] * logShape[1] * logShape[2] <= maxTexSize && + logShape[3] <= maxTexSize) { + textureShape = [logShape[0] * logShape[1] * logShape[2], logShape[3]]; + } + else if (logShape.length === 4 && logShape[0] <= maxTexSize && + logShape[1] * logShape[2] * logShape[3] <= maxTexSize) { + textureShape = [logShape[0], logShape[1] * logShape[2] * logShape[3]]; + } + // true if one edge length is 1 (1 or 2, if packed), while another edge + // length exceeds maxSizeForNarrowTex. + const isLongNarrowTex = textureShape != null && + Math.max(...textureShape) > maxSizeForNarrowTex && + Math.min(...textureShape) <= (isPacked ? 2 : 1) && + Math.min(...textureShape) > 0; + if (textureShape == null || isLongNarrowTex) { + if (isPacked) { + // For packed textures size equals the number of channels required to + // accommodate the texture data. However in order to squarify such that + // inner dimensions stay even, we rewrite size to equal the number of + // texels. Then in the return statement we rehydrate the squarified + // dimensions to channel units. + const batchDim = getBatchDim(logShape); + let rows = 2, cols = 2; + if (logShape.length) { + [rows, cols] = getRowsCols(logShape); + } + size = batchDim * (rows / 2) * (cols / 2); + textureShape = + sizeToSquarishShape(size).map(d => d * 2); + } + else { + textureShape = sizeToSquarishShape(size); + } + } + return textureShape; + } + function isEven(n) { + return n % 2 === 0; + } + /** + * This determines whether reshaping a packed texture requires rearranging + * the data within the texture, assuming 2x2 packing. + */ + function isReshapeFree(shape1, shape2) { + shape1 = shape1.slice(-2); + shape2 = shape2.slice(-2); + if (arraysEqual(shape1, shape2)) { + return true; + } + if (!shape1.length || !shape2.length) { // One of the shapes is a scalar. + return true; + } + if (shape1[0] === 0 || shape1[1] === 0 || shape2[0] === 0 || + shape2[1] === 0) { + return true; + } + if (shape1.length !== shape2.length) { // One of the shapes is a vector. + const shape1Cols = shape1[shape1.length - 1]; + const shape2Cols = shape2[shape2.length - 1]; + if (shape1Cols === shape2Cols) { + return true; + } + if (isEven(shape1Cols) && isEven(shape2Cols) && + (shape1[0] === 1 || shape2[0] === 1)) { + return true; + } + } + return shape1[1] === shape2[1] && isEven(shape1[0]) && isEven(shape2[0]); + } + // We cache webgl params because the environment gets reset between + // unit tests and we don't want to constantly query the WebGLContext for + // MAX_TEXTURE_SIZE. + let MAX_TEXTURE_SIZE; + let MAX_TEXTURES_IN_SHADER; + function getWebGLMaxTextureSize(webGLVersion) { + if (MAX_TEXTURE_SIZE == null) { + const gl = getWebGLContext(webGLVersion); + MAX_TEXTURE_SIZE = gl.getParameter(gl.MAX_TEXTURE_SIZE); + } + return MAX_TEXTURE_SIZE; + } + function resetMaxTextureSize() { + MAX_TEXTURE_SIZE = null; + } + function resetMaxTexturesInShader() { + MAX_TEXTURES_IN_SHADER = null; + } + function getMaxTexturesInShader(webGLVersion) { + if (MAX_TEXTURES_IN_SHADER == null) { + const gl = getWebGLContext(webGLVersion); + MAX_TEXTURES_IN_SHADER = gl.getParameter(gl.MAX_TEXTURE_IMAGE_UNITS); + } + // We cap at 16 to avoid spurious runtime "memory exhausted" error. + return Math.min(16, MAX_TEXTURES_IN_SHADER); + } + function getWebGLDisjointQueryTimerVersion(webGLVersion) { + if (webGLVersion === 0) { + return 0; + } + let queryTimerVersion; + const gl = getWebGLContext(webGLVersion); + if (hasExtension(gl, 'EXT_disjoint_timer_query_webgl2') && + webGLVersion === 2) { + queryTimerVersion = 2; + } + else if (hasExtension(gl, 'EXT_disjoint_timer_query')) { + queryTimerVersion = 1; + } + else { + queryTimerVersion = 0; + } + return queryTimerVersion; + } + function hasExtension(gl, extensionName) { + const ext = gl.getExtension(extensionName); + return ext != null; + } + function isWebGLVersionEnabled(webGLVersion) { + try { + const gl = getWebGLContext(webGLVersion); + if (gl != null) { + return true; + } + } + catch (e) { + console.log('Error when getting WebGL context: ', e); + return false; + } + return false; + } + function isCapableOfRenderingToFloatTexture(webGLVersion) { + if (webGLVersion === 0) { + return false; + } + const gl = getWebGLContext(webGLVersion); + if (webGLVersion === 1) { + if (!hasExtension(gl, 'OES_texture_float')) { + return false; + } + } + else { + if (!hasExtension(gl, 'EXT_color_buffer_float')) { + return false; + } + } + const isFrameBufferComplete = createFloatTextureAndBindToFramebuffer(gl); + return isFrameBufferComplete; + } + /** + * Check if we can download values from a float/half-float texture. + * + * Note that for performance reasons we use binding a texture to a framebuffer + * as a proxy for ability to download float values later using readPixels. The + * texture params of this texture will not match those in readPixels exactly + * but if we are unable to bind some kind of float texture to the frameBuffer + * then we definitely will not be able to read float values from it. + */ + function isDownloadFloatTextureEnabled(webGLVersion) { + if (webGLVersion === 0) { + return false; + } + const gl = getWebGLContext(webGLVersion); + if (webGLVersion === 1) { + if (!hasExtension(gl, 'OES_texture_float')) { + return false; + } + if (!hasExtension(gl, 'WEBGL_color_buffer_float')) { + return false; + } + } + else { + if (hasExtension(gl, 'EXT_color_buffer_float')) { + return createFloatTextureAndBindToFramebuffer(gl); + } + const COLOR_BUFFER_HALF_FLOAT = 'EXT_color_buffer_half_float'; + if (hasExtension(gl, COLOR_BUFFER_HALF_FLOAT)) { + const textureHalfFloatExtension = gl.getExtension(COLOR_BUFFER_HALF_FLOAT); + return createHalfFloatTextureAndBindToFramebuffer(gl, textureHalfFloatExtension); + } + return false; + } + const isFrameBufferComplete = createFloatTextureAndBindToFramebuffer(gl); + return isFrameBufferComplete; + } + function createFloatTextureAndBindToFramebuffer(gl) { + const texConfig = getTextureConfig(gl); + const texture = gl.createTexture(); + gl.bindTexture(gl.TEXTURE_2D, texture); + const width = 1; + const height = 1; + gl.texImage2D(gl.TEXTURE_2D, 0, texConfig.internalFormatFloat, width, height, 0, texConfig.textureFormatFloat, texConfig.textureTypeFloat, null); + const frameBuffer = gl.createFramebuffer(); + gl.bindFramebuffer(gl.FRAMEBUFFER, frameBuffer); + gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, texture, 0); + const isFrameBufferComplete = gl.checkFramebufferStatus(gl.FRAMEBUFFER) === gl.FRAMEBUFFER_COMPLETE; + gl.bindTexture(gl.TEXTURE_2D, null); + gl.bindFramebuffer(gl.FRAMEBUFFER, null); + gl.deleteTexture(texture); + gl.deleteFramebuffer(frameBuffer); + return isFrameBufferComplete; + } + function createHalfFloatTextureAndBindToFramebuffer( + // tslint:disable-next-line:no-any + gl, textureHalfFloatExtension) { + const texConfig = getTextureConfig(gl, textureHalfFloatExtension); + const texture = gl.createTexture(); + gl.bindTexture(gl.TEXTURE_2D, texture); + const width = 1; + const height = 1; + gl.texImage2D(gl.TEXTURE_2D, 0, texConfig.internalFormatHalfFloat, width, height, 0, texConfig.textureFormatFloat, texConfig.textureTypeHalfFloat, null); + const frameBuffer = gl.createFramebuffer(); + gl.bindFramebuffer(gl.FRAMEBUFFER, frameBuffer); + gl.framebufferTexture2D(gl.FRAMEBUFFER, gl.COLOR_ATTACHMENT0, gl.TEXTURE_2D, texture, 0); + const isFrameBufferComplete = gl.checkFramebufferStatus(gl.FRAMEBUFFER) === gl.FRAMEBUFFER_COMPLETE; + gl.bindTexture(gl.TEXTURE_2D, null); + gl.bindFramebuffer(gl.FRAMEBUFFER, null); + gl.deleteTexture(texture); + gl.deleteFramebuffer(frameBuffer); + return isFrameBufferComplete; + } + function isWebGLFenceEnabled(webGLVersion) { + if (webGLVersion !== 2) { + return false; + } + const gl = getWebGLContext(webGLVersion); + // tslint:disable-next-line:no-any + const isEnabled = gl.fenceSync != null; + return isEnabled; + } + function assertNotComplex(tensor, opName) { + if (!Array.isArray(tensor)) { + tensor = [tensor]; + } + tensor.forEach(t => { + if (t != null) { + assert$1(t.dtype !== 'complex64', () => `${opName} does not support complex64 tensors ` + + 'in the WebGL backend.'); + } + }); + } + + var webgl_util = /*#__PURE__*/Object.freeze({ + __proto__: null, + assertNotComplex: assertNotComplex, + bindCanvasToFramebuffer: bindCanvasToFramebuffer, + bindColorTextureToFramebuffer: bindColorTextureToFramebuffer, + bindTextureToProgramUniformSampler: bindTextureToProgramUniformSampler, + bindTextureUnit: bindTextureUnit, + bindVertexBufferToProgramAttribute: bindVertexBufferToProgramAttribute, + callAndCheck: callAndCheck, + canBeRepresented: canBeRepresented, + createFragmentShader: createFragmentShader, + createFramebuffer: createFramebuffer, + createProgram: createProgram, + createStaticIndexBuffer: createStaticIndexBuffer, + createStaticVertexBuffer: createStaticVertexBuffer, + createTexture: createTexture, + createVertexShader: createVertexShader$1, + getBatchDim: getBatchDim, + getExtensionOrThrow: getExtensionOrThrow, + getFramebufferErrorMessage: getFramebufferErrorMessage, + getMaxTexturesInShader: getMaxTexturesInShader, + getNumChannels: getNumChannels, + getProgramUniformLocation: getProgramUniformLocation, + getProgramUniformLocationOrThrow: getProgramUniformLocationOrThrow, + getRowsCols: getRowsCols, + getShapeAs3D: getShapeAs3D, + getTextureShapeFromLogicalShape: getTextureShapeFromLogicalShape, + getWebGLDisjointQueryTimerVersion: getWebGLDisjointQueryTimerVersion, + getWebGLErrorMessage: getWebGLErrorMessage, + getWebGLMaxTextureSize: getWebGLMaxTextureSize, + hasExtension: hasExtension, + isCapableOfRenderingToFloatTexture: isCapableOfRenderingToFloatTexture, + isDownloadFloatTextureEnabled: isDownloadFloatTextureEnabled, + isReshapeFree: isReshapeFree, + isWebGLFenceEnabled: isWebGLFenceEnabled, + isWebGLVersionEnabled: isWebGLVersionEnabled, + linkProgram: linkProgram, + logShaderSourceAndInfoLog: logShaderSourceAndInfoLog, + resetMaxTextureSize: resetMaxTextureSize, + resetMaxTexturesInShader: resetMaxTexturesInShader, + unbindColorTextureFromFramebuffer: unbindColorTextureFromFramebuffer, + unbindTextureUnit: unbindTextureUnit, + validateFramebuffer: validateFramebuffer, + validateProgram: validateProgram, + validateTextureSize: validateTextureSize + }); + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const ENV = env(); + /** + * This file contains WebGL-specific flag registrations. + */ + /** + * True if WebGL is supported. + */ + ENV.registerFlag('HAS_WEBGL', () => ENV.getNumber('WEBGL_VERSION') > 0); + /** 0: No WebGL, 1: WebGL 1.0, 2: WebGL 2.0. */ + ENV.registerFlag('WEBGL_VERSION', () => { + if (isWebGLVersionEnabled(2)) { + return 2; + } + else if (isWebGLVersionEnabled(1)) { + return 1; + } + return 0; + }); + /** Whether to check for numerical representation problems. */ + ENV.registerFlag('WEBGL_CHECK_NUMERICAL_PROBLEMS', () => false); + ENV.registerFlag('WEBGL_BUFFER_SUPPORTED', () => ENV.get('WEBGL_VERSION') === 2); + /** Whether the WebGL backend will sometimes forward ops to the CPU. */ + ENV.registerFlag('WEBGL_CPU_FORWARD', () => true); + /** Whether the WebGL backend will always use f16 textures for rendering. */ + ENV.registerFlag('WEBGL_FORCE_F16_TEXTURES', () => false); + /** Whether to turn all packing related flags on. */ + ENV.registerFlag('WEBGL_PACK', () => ENV.getBool('HAS_WEBGL')); + /** Whether we will pack the batchnormalization op. */ + ENV.registerFlag('WEBGL_PACK_NORMALIZATION', () => ENV.getBool('WEBGL_PACK')); + /** Whether we will pack the clip op. */ + ENV.registerFlag('WEBGL_PACK_CLIP', () => ENV.getBool('WEBGL_PACK')); + /** Whether we will pack the depthwise conv op. */ + ENV.registerFlag('WEBGL_PACK_DEPTHWISECONV', () => ENV.getBool('WEBGL_PACK')); + /** Whether we will pack binary ops. */ + ENV.registerFlag('WEBGL_PACK_BINARY_OPERATIONS', () => ENV.getBool('WEBGL_PACK')); + /** Whether we will pack unary ops. */ + ENV.registerFlag('WEBGL_PACK_UNARY_OPERATIONS', () => ENV.getBool('WEBGL_PACK')); + /** Whether we will pack array ops. */ + ENV.registerFlag('WEBGL_PACK_ARRAY_OPERATIONS', () => ENV.getBool('WEBGL_PACK')); + /** Whether we will pack image ops. */ + ENV.registerFlag('WEBGL_PACK_IMAGE_OPERATIONS', () => ENV.getBool('WEBGL_PACK')); + /** Whether we will pack reduce ops. */ + ENV.registerFlag('WEBGL_PACK_REDUCE', () => ENV.getBool('WEBGL_PACK')); + /** Whether packed WebGL kernels lazily unpack their outputs. */ + ENV.registerFlag('WEBGL_LAZILY_UNPACK', () => ENV.getBool('WEBGL_PACK')); + /** Whether we will use the im2col algorithm to speed up convolutions. */ + ENV.registerFlag('WEBGL_CONV_IM2COL', () => ENV.getBool('WEBGL_PACK')); + /** Whether we will pack conv2dTranspose op. */ + ENV.registerFlag('WEBGL_PACK_CONV2DTRANSPOSE', () => ENV.getBool('WEBGL_PACK')); + /** The maximum texture dimension. */ + ENV.registerFlag('WEBGL_MAX_TEXTURE_SIZE', () => getWebGLMaxTextureSize(ENV.getNumber('WEBGL_VERSION'))); + /** The maximum texture dimension. */ + ENV.registerFlag('WEBGL_MAX_TEXTURES_IN_SHADER', () => getMaxTexturesInShader(ENV.getNumber('WEBGL_VERSION'))); + /** + * The disjoint_query_timer extension version. + * 0: disabled, 1: EXT_disjoint_timer_query, 2: + * EXT_disjoint_timer_query_webgl2. + * In Firefox with WebGL 2.0, + * EXT_disjoint_timer_query_webgl2 is not available, so we must use the + * WebGL 1.0 extension. + */ + ENV.registerFlag('WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION', () => { + const webGLVersion = ENV.getNumber('WEBGL_VERSION'); + if (webGLVersion === 0) { + return 0; + } + return getWebGLDisjointQueryTimerVersion(webGLVersion); + }); + /** + * Whether the timer object from the disjoint_query_timer extension gives + * timing information that is reliable. + */ + ENV.registerFlag('WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE', () => ENV.getNumber('WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION') > 0 && + !isMobile()); + /** + * Whether the device is physically capable of rendering to float32 textures. + */ + ENV.registerFlag('WEBGL_RENDER_FLOAT32_CAPABLE', () => isCapableOfRenderingToFloatTexture(ENV.getNumber('WEBGL_VERSION'))); + /** + * Whether rendering to float32 textures is enabled. If disabled, renders to + * float16 textures. + */ + ENV.registerFlag('WEBGL_RENDER_FLOAT32_ENABLED', () => { + return ENV.getBool('WEBGL_FORCE_F16_TEXTURES') ? + false : + ENV.getBool('WEBGL_RENDER_FLOAT32_CAPABLE'); + }); + /** + * Whether downloading float textures is enabled (16 or 32 bit). If disabled, + * uses IEEE 754 encoding of the float32 values to 4 uint8 when downloading. + */ + ENV.registerFlag('WEBGL_DOWNLOAD_FLOAT_ENABLED', () => isDownloadFloatTextureEnabled(ENV.getNumber('WEBGL_VERSION'))); + /** Whether the fence API is available. */ + ENV.registerFlag('WEBGL_FENCE_API_ENABLED', () => isWebGLFenceEnabled(ENV.getNumber('WEBGL_VERSION'))); + /** + * Tensors with size <= than this will be uploaded as uniforms, not textures. + */ + ENV.registerFlag('WEBGL_SIZE_UPLOAD_UNIFORM', () => { + // Use uniform uploads only when 32bit floats are supported. In + // 16bit + // environments there are problems with comparing a 16bit texture value + // with a 32bit uniform value. + const useUniforms = ENV.getBool('WEBGL_RENDER_FLOAT32_ENABLED'); + return useUniforms ? 4 : 0; + }); + /** + * If the total number of bytes allocated on the GPU is greater than this + * number, we will aggressively delete textures upon disposal with + * gl.deleteMatrixTexture, rather than making them available for reuse. + * + * Default value -1 indicates that we will never aggressively delete textures. + */ + ENV.registerFlag('WEBGL_DELETE_TEXTURE_THRESHOLD', () => { + return -1; + }, threshold => { + if (!(typeof threshold === 'number')) { + throw new Error('WEBGL_DELETE_TEXTURE_THRESHOLD must be a number but ' + + `got ${threshold}.`); + } + if (threshold < 0 && threshold !== -1) { + throw new Error(`WEBGL_DELETE_TEXTURE_THRESHOLD must be -1 (indicating never ` + + `delete) or at least 0, but got ${threshold}.`); + } + }); + /** + * Trigger a manual GL command flush if the threshold of time has passed since + * previous Kernel execution. This can be useful for Andorid device where GL + * command flush are delayed un til the end of javascript task. This value is + * measured in millisecond. Typically you want to set this value to close to 1. + * + * Default value 1 for mobile chrome, and -1 for rest cases. -1 indicates that + * we will not enforce manual flush and depend on system default flush schedule. + */ + ENV.registerFlag('WEBGL_FLUSH_THRESHOLD', () => { + return isMobile() ? 1 : -1; + }, threshold => { + if (!(typeof threshold === 'number')) { + throw new Error('WEBGL_FLUSH_THRESHOLD must be a number but got ' + + `${threshold}.`); + } + if (threshold < 0 && threshold !== -1) { + throw new Error(`WEBGL_FLUSH_THRESHOLD must be -1 (indicating never ` + + `manual flush) or at least 0, but got ${threshold}.`); + } + }); + /** + * Threshold for input tensor size that determines whether WebGL backend will + * delegate computation to CPU. + * + * Default value is 128. + */ + ENV.registerFlag('CPU_HANDOFF_SIZE_THRESHOLD', () => 128); + /** Whether we will use shapes uniforms. */ + ENV.registerFlag('WEBGL_USE_SHAPES_UNIFORMS', () => false); + /** + * Threshold for last dimension of input tensor that determines whether + * WebGL backend for the Top K op will delegate computation to CPU. If input + * is smaller than threshold then CPU will be used + * + * Default value is 100000. + */ + ENV.registerFlag('TOPK_LAST_DIM_CPU_HANDOFF_SIZE_THRESHOLD', () => 100000); + /** + * Threshold for K that determines whether + * WebGL backend for the Top K op will delegate computation to CPU. If k + * is larger than threshold then CPU will be used + * + * Default value is 128. + */ + ENV.registerFlag('TOPK_K_CPU_HANDOFF_THRESHOLD', () => 128); + /** Whether we will use the experimental conv op. */ + ENV.registerFlag('WEBGL_EXP_CONV', () => false); + /** + * If the device performance is low or if no hardware GPU is available, whether + * software WebGL will be used. + */ + ENV.registerFlag('SOFTWARE_WEBGL_ENABLED', () => ENV.getBool('IS_TEST')); + /** + * For narrow texture (physical height or physical width is 1), if the length of + * any texture edges exceed the threshold, the texture will be reshaped to be + * more squarish. + * + * This flag is used to help some GPUs that could not provide correct + * interpolations for long skinny triangles. We found Mali GPU probably has this + * problem: https://github.com/tensorflow/tfjs/issues/6775. + */ + ENV.registerFlag('WEBGL_MAX_SIZE_FOR_NARROW_TEXTURE', () => Infinity); + /** + * If the flag is set to true, the max size of the narrow texture will be auto + * computed and it will be considerred as a threshold to reshape the narrow + * texture to be more squarish. + * + * This flag is used to help some GPUs that could not provide correct + * interpolations for long skinny triangles. We found Mali GPU probably has this + * problem: https://github.com/tensorflow/tfjs/issues/6775. + */ + ENV.registerFlag('WEBGL_AUTO_SQUARIFY_NARROW_TEXTURE_SHAPE', () => false); + /** + * Whether to use the customized isnan. It's only useful for webgl2 since webgl1 + * doesn't have the builtin isnan. + */ + ENV.registerFlag('WEBGL2_ISNAN_CUSTOM', () => false); + /** Experimental flag, whether enter compile only phase. */ + ENV.registerFlag('ENGINE_COMPILE_ONLY', () => false); + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function getGlslDifferences() { + let version; + let attribute; + let varyingVs; + let varyingFs; + let texture2D; + let output; + let defineOutput; + let defineSpecialNaN; + let defineSpecialInf; + let defineRound; + if (env().getNumber('WEBGL_VERSION') === 2) { + version = '#version 300 es'; + attribute = 'in'; + varyingVs = 'out'; + varyingFs = 'in'; + texture2D = 'texture'; + output = 'outputColor'; + defineOutput = 'out vec4 outputColor;'; + // Use custom isnan definition to work across differences between + // implementations on various platforms. While this should happen in ANGLE + // we still see differences between android and windows (on chrome) when + // using isnan directly. Since WebGL2 supports uint type and + // floatBitsToUinT built-in function, we could implment isnan following + // IEEE 754 rules. + // NaN defination in IEEE 754-1985 is : + // - sign = either 0 or 1. + // - biased exponent = all 1 bits. + // - fraction = anything except all 0 bits (since all 0 bits represents + // infinity). + // https://en.wikipedia.org/wiki/IEEE_754-1985#Representation_of_non-numbers + defineSpecialNaN = env().getBool('WEBGL2_ISNAN_CUSTOM') ? ` + bool isnan_custom(float val) { + uint floatToUint = floatBitsToUint(val); + return (floatToUint & 0x7fffffffu) > 0x7f800000u; + } + + bvec4 isnan_custom(vec4 val) { + return bvec4(isnan_custom(val.x), + isnan_custom(val.y), isnan_custom(val.z), isnan_custom(val.w)); + } + + #define isnan(value) isnan_custom(value) + ` : + ''; + // In webgl 2 we do not need to specify a custom isinf so there is no + // need for a special INFINITY constant. + defineSpecialInf = ``; + defineRound = ` + #define round(value) newRound(value) + int newRound(float value) { + return int(floor(value + 0.5)); + } + + ivec4 newRound(vec4 value) { + return ivec4(floor(value + vec4(0.5))); + } + `; + } + else { + version = ''; + attribute = 'attribute'; + varyingVs = 'varying'; + varyingFs = 'varying'; + texture2D = 'texture2D'; + output = 'gl_FragColor'; + defineOutput = ''; + // WebGL1 has no built in isnan so we define one here. + defineSpecialNaN = ` + #define isnan(value) isnan_custom(value) + bool isnan_custom(float val) { + return (val > 0. || val < 1. || val == 0.) ? false : true; + } + bvec4 isnan_custom(vec4 val) { + return bvec4(isnan(val.x), isnan(val.y), isnan(val.z), isnan(val.w)); + } + `; + defineSpecialInf = ` + uniform float INFINITY; + + bool isinf(float val) { + return abs(val) == INFINITY; + } + bvec4 isinf(vec4 val) { + return equal(abs(val), vec4(INFINITY)); + } + `; + defineRound = ` + int round(float value) { + return int(floor(value + 0.5)); + } + + ivec4 round(vec4 value) { + return ivec4(floor(value + vec4(0.5))); + } + `; + } + return { + version, + attribute, + varyingVs, + varyingFs, + texture2D, + output, + defineOutput, + defineSpecialNaN, + defineSpecialInf, + defineRound + }; + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Produces GLSL code that derives logical coordinates from a flat + * index. The code performs integer division with each stride and decrements + * the index until the index equals the final dimension coordinate. + */ + function getLogicalCoordinatesFromFlatIndex(coords, shape, index = 'index') { + const strides = computeStrides(shape); + return strides + .map((stride, i) => { + const line1 = `int ${coords[i]} = ${index} / ${stride}`; + const line2 = i === strides.length - 1 ? + `int ${coords[i + 1]} = ${index} - ${coords[i]} * ${stride}` : + `index -= ${coords[i]} * ${stride}`; + return `${line1}; ${line2};`; + }) + .join(''); + } + function getOutputLogicalCoordinatesFromFlatIndexByUniform(coords, shape, index = 'index') { + const strides = computeStrides(shape); + return strides + .map((_, i) => { + const line1 = `int ${coords[i]} = ${index} / outShapeStrides[${i}]`; + const line2 = i === strides.length - 1 ? + `int ${coords[i + 1]} = ${index} - ${coords[i]} * outShapeStrides[${i}]` : + `index -= ${coords[i]} * outShapeStrides[${i}]`; + return `${line1}; ${line2};`; + }) + .join(''); + } + // Produces GLSL code that computes strides. + function symbolicallyComputeStrides(indicesArr, variableName) { + const numCoords = indicesArr.length; + const shape = indicesArr.map(d => `${variableName}[${d}]`); + const strides = new Array(numCoords - 1); + strides[numCoords - 2] = shape[numCoords - 1]; + for (let i = numCoords - 3; i >= 0; --i) { + strides[i] = `(${strides[i + 1]} * ${shape[i + 1]})`; + } + return strides; + } + function getLogicalCoordinatesFromFlatIndexByUniform(coords, variableName, index = 'index') { + const indicesArray = coords.map((_, i) => i); + const strides = symbolicallyComputeStrides(indicesArray, variableName); + return strides + .map((_, i) => { + const line1 = `int ${coords[i]} = ${index} / ${strides[i]}`; + const line2 = i === strides.length - 1 ? + `int ${coords[i + 1]} = ${index} - ${coords[i]} * ${strides[i]}` : + `index -= ${coords[i]} * ${strides[i]}`; + return `${line1}; ${line2};`; + }) + .join(''); + } + function buildVec(x) { + if (x.length === 1) { + return `${x[0]}`; + } + return `vec${x.length}(${x.join(',')})`; + } + /** + * Produces GLSL code that computes the dot product of the input x and y + * vectors. Handles splitting inputs into increments of vec4s when necessary. + */ + function dotify(x, y) { + if (x.length !== y.length) { + throw new Error(`Vectors to be dotted must be of the same length -` + + `got ${x.length} and ${y.length}`); + } + const slices = []; + const nearestVec4 = Math.floor(x.length / 4); + const nearestVec4Remainder = x.length % 4; + for (let i = 0; i < nearestVec4; i++) { + const xSlice = x.slice(i * 4, i * 4 + 4); + const ySlice = y.slice(i * 4, i * 4 + 4); + slices.push(`${buildVec(xSlice)}, ${buildVec(ySlice)}`); + } + if (nearestVec4Remainder !== 0) { + let xSlice = x.slice(nearestVec4 * 4); + let ySlice = y.slice(nearestVec4 * 4); + if (xSlice.length === 1) { + xSlice = xSlice.map(d => `float(${d})`); + ySlice = ySlice.map(d => `float(${d})`); + } + slices.push(`${buildVec(xSlice)}, ${buildVec(ySlice)}`); + } + return slices.map((d, i) => `dot(${d})`).join('+'); + } + /** + * Produces GLSL that computes the flat index from 3D coordinates. + */ + function getFlatIndexFrom3D(shape) { + const strides = computeStrides(shape).map(d => d.toString()); + return ` + int getFlatIndex(ivec3 coords) { + return coords.x * ${strides[0]} + coords.y * ${strides[1]} + coords.z; + } +`; + } + function getFlatIndexFrom3DOutput() { + return ` + int getFlatIndex(ivec3 coords) { + return coords.x * outShapeStrides[0] + coords.y * outShapeStrides[1] + coords.z; + } +`; + } + const ENCODE_FLOAT_SNIPPET = ` + const float FLOAT_MAX = 1.70141184e38; + const float FLOAT_MIN = 1.17549435e-38; + + lowp vec4 encode_float(highp float v) { + if (isnan(v)) { + return vec4(255, 255, 255, 255); + } + + highp float av = abs(v); + + if(av < FLOAT_MIN) { + return vec4(0.0, 0.0, 0.0, 0.0); + } else if(v > FLOAT_MAX) { + return vec4(0.0, 0.0, 128.0, 127.0) / 255.0; + } else if(v < -FLOAT_MAX) { + return vec4(0.0, 0.0, 128.0, 255.0) / 255.0; + } + + highp vec4 c = vec4(0,0,0,0); + + highp float e = floor(log2(av)); + highp float m = exp2(fract(log2(av))) - 1.0; + + c[2] = floor(128.0 * m); + m -= c[2] / 128.0; + c[1] = floor(32768.0 * m); + m -= c[1] / 32768.0; + c[0] = floor(8388608.0 * m); + + highp float ebias = e + 127.0; + c[3] = floor(ebias / 2.0); + ebias -= c[3] * 2.0; + c[2] += floor(ebias) * 128.0; + + c[3] += 128.0 * step(0.0, -v); + + return c / 255.0; + } +`; + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const { getBroadcastDims } = backend_util; + function makeShader(inputsInfo, outputShape, program) { + const prefixSnippets = []; + inputsInfo.forEach(x => { + const size = sizeFromShape(x.shapeInfo.logicalShape); + // Snippet when we decided to upload the values as uniform. + if (x.shapeInfo.isUniform) { + prefixSnippets.push(`uniform float ${x.name}${size > 1 ? `[${size}]` : ''};`); + } + else { + prefixSnippets.push(`uniform sampler2D ${x.name};`); + prefixSnippets.push(`uniform int offset${x.name};`); + } + if (program.enableShapeUniforms) { + const { uniformShape } = getUniformInfoFromShape(program.packedInputs, x.shapeInfo.logicalShape, x.shapeInfo.texShape); + switch (uniformShape.length) { + case 1: + prefixSnippets.push(`uniform int ${x.name}Shape;`); + break; + case 2: + prefixSnippets.push(`uniform ivec2 ${x.name}Shape;`); + break; + case 3: + prefixSnippets.push(`uniform ivec3 ${x.name}Shape;`); + break; + case 4: + prefixSnippets.push(`uniform ivec4 ${x.name}Shape;`); + break; + default: + break; + } + prefixSnippets.push(`uniform ivec2 ${x.name}TexShape;`); + } + }); + if (program.enableShapeUniforms) { + switch (outputShape.logicalShape.length) { + case 1: + prefixSnippets.push(`uniform int outShape;`); + break; + case 2: + prefixSnippets.push(`uniform ivec2 outShape;`); + prefixSnippets.push(`uniform int outShapeStrides;`); + break; + case 3: + prefixSnippets.push(`uniform ivec3 outShape;`); + prefixSnippets.push(`uniform ivec2 outShapeStrides;`); + break; + case 4: + prefixSnippets.push(`uniform ivec4 outShape;`); + prefixSnippets.push(`uniform ivec3 outShapeStrides;`); + break; + default: + break; + } + prefixSnippets.push(`uniform ivec2 outTexShape;`); + } + if (program.customUniforms) { + program.customUniforms.forEach((d) => { + prefixSnippets.push(`uniform ${d.type} ${d.name}${d.arrayIndex ? `[${d.arrayIndex}]` : ''};`); + }); + } + const inputPrefixSnippet = prefixSnippets.join('\n'); + const inputSamplingSnippet = inputsInfo + .map(x => getInputSamplingSnippet(x, outputShape, program.packedInputs, program.enableShapeUniforms)) + .join('\n'); + const outTexShape = outputShape.texShape; + const glsl = getGlslDifferences(); + const floatTextureSampleSnippet = getFloatTextureSampleSnippet(glsl); + let outputSamplingSnippet; + let floatTextureSetOutputSnippet; + let shaderPrefix = getShaderPrefix(glsl); + if (outputShape.isPacked) { + outputSamplingSnippet = getPackedOutputSamplingSnippet(outputShape.logicalShape, outTexShape, program.enableShapeUniforms); + floatTextureSetOutputSnippet = getFloatTextureSetRGBASnippet(glsl); + } + else { + outputSamplingSnippet = getOutputSamplingSnippet(outputShape.logicalShape, outTexShape, program.enableShapeUniforms); + floatTextureSetOutputSnippet = getFloatTextureSetRSnippet(glsl); + } + if (program.packedInputs) { + shaderPrefix += SHADER_PACKED_PREFIX; + } + const source = [ + shaderPrefix, floatTextureSampleSnippet, floatTextureSetOutputSnippet, + inputPrefixSnippet, outputSamplingSnippet, inputSamplingSnippet, + program.userCode + ].join('\n'); + return source; + } + function getSamplerFromInInfo(inInfo, enableShapeUniforms = false) { + const shape = inInfo.shapeInfo.logicalShape; + switch (shape.length) { + case 0: + return getSamplerScalar(inInfo, enableShapeUniforms); + case 1: + return getSampler1D(inInfo, enableShapeUniforms); + case 2: + return getSampler2D(inInfo, enableShapeUniforms); + case 3: + return getSampler3D(inInfo, enableShapeUniforms); + case 4: + return getSampler4D(inInfo, enableShapeUniforms); + case 5: + return getSampler5D(inInfo); + case 6: + return getSampler6D(inInfo); + default: + throw new Error(`${shape.length}-D input sampling` + + ` is not yet supported`); + } + } + function getPackedSamplerFromInInfo(inInfo, enableShapeUniforms) { + const shape = inInfo.shapeInfo.logicalShape; + switch (shape.length) { + case 0: + return getPackedSamplerScalar(inInfo); + case 1: + return getPackedSampler1D(inInfo, enableShapeUniforms); + case 2: + return getPackedSampler2D(inInfo, enableShapeUniforms); + case 3: + return getPackedSampler3D(inInfo, enableShapeUniforms); + default: + return getPackedSamplerND(inInfo, enableShapeUniforms); + } + } + function getInputSamplingSnippet(inInfo, outShapeInfo, usesPackedTextures = false, enableShapeUniforms) { + let res = ''; + if (usesPackedTextures) { + res += getPackedSamplerFromInInfo(inInfo, enableShapeUniforms); + } + else { + res += getSamplerFromInInfo(inInfo, enableShapeUniforms); + } + const inShape = inInfo.shapeInfo.logicalShape; + const outShape = outShapeInfo.logicalShape; + if (inShape.length <= outShape.length) { + if (usesPackedTextures) { + res += getPackedSamplerAtOutputCoords(inInfo, outShapeInfo); + } + else { + res += getSamplerAtOutputCoords(inInfo, outShapeInfo); + } + } + return res; + } + function getPackedOutputSamplingSnippet(outShape, outTexShape, enableShapeUniforms) { + switch (outShape.length) { + case 0: + return getOutputScalarCoords(); + case 1: + return getOutputPacked1DCoords(outShape, outTexShape, enableShapeUniforms); + case 2: + return getOutputPacked2DCoords(outShape, outTexShape, enableShapeUniforms); + case 3: + return getOutputPacked3DCoords(outShape, outTexShape, enableShapeUniforms); + default: + return getOutputPackedNDCoords(outShape, outTexShape, enableShapeUniforms); + } + } + function getOutputSamplingSnippet(outShape, outTexShape, enableShapeUniforms) { + switch (outShape.length) { + case 0: + return getOutputScalarCoords(); + case 1: + return getOutput1DCoords(outShape, outTexShape, enableShapeUniforms); + case 2: + return getOutput2DCoords(outShape, outTexShape, enableShapeUniforms); + case 3: + return getOutput3DCoords(outShape, outTexShape, enableShapeUniforms); + case 4: + return getOutput4DCoords(outShape, outTexShape, enableShapeUniforms); + case 5: + return getOutput5DCoords(outShape, outTexShape); + case 6: + return getOutput6DCoords(outShape, outTexShape); + default: + throw new Error(`${outShape.length}-D output sampling is not yet supported`); + } + } + function getFloatTextureSampleSnippet(glsl) { + return ` + float sampleTexture(sampler2D textureSampler, vec2 uv) { + return ${glsl.texture2D}(textureSampler, uv).r; + } + `; + } + function getFloatTextureSetRSnippet(glsl) { + return ` + void setOutput(float val) { + ${glsl.output} = vec4(val, 0, 0, 0); + } + `; + } + function getFloatTextureSetRGBASnippet(glsl) { + return ` + void setOutput(vec4 val) { + ${glsl.output} = val; + } + `; + } + function getShaderPrefix(glsl) { + const SHADER_PREFIX = `${glsl.version} + precision highp float; + precision highp int; + precision highp sampler2D; + ${glsl.varyingFs} vec2 resultUV; + ${glsl.defineOutput} + const vec2 halfCR = vec2(0.5, 0.5); + + struct ivec5 + { + int x; + int y; + int z; + int w; + int u; + }; + + struct ivec6 + { + int x; + int y; + int z; + int w; + int u; + int v; + }; + + uniform float NAN; + ${glsl.defineSpecialNaN} + ${glsl.defineSpecialInf} + ${glsl.defineRound} + + int imod(int x, int y) { + return x - y * (x / y); + } + + int idiv(int a, int b, float sign) { + int res = a / b; + int mod = imod(a, b); + if (sign < 0. && mod != 0) { + res -= 1; + } + return res; + } + + //Based on the work of Dave Hoskins + //https://www.shadertoy.com/view/4djSRW + #define HASHSCALE1 443.8975 + float random(float seed){ + vec2 p = resultUV * seed; + vec3 p3 = fract(vec3(p.xyx) * HASHSCALE1); + p3 += dot(p3, p3.yzx + 19.19); + return fract((p3.x + p3.y) * p3.z); + } + + ${SAMPLE_1D_SNIPPET} + ${SAMPLE_2D_SNIPPET} + ${SAMPLE_3D_SNIPPET} + `; + return SHADER_PREFIX; + } + const SAMPLE_1D_SNIPPET = ` +vec2 uvFromFlat(int texNumR, int texNumC, int index) { + int texR = index / texNumC; + int texC = index - texR * texNumC; + return (vec2(texC, texR) + halfCR) / vec2(texNumC, texNumR); +} +vec2 packedUVfrom1D(int texNumR, int texNumC, int index) { + int texelIndex = index / 2; + int texR = texelIndex / texNumC; + int texC = texelIndex - texR * texNumC; + return (vec2(texC, texR) + halfCR) / vec2(texNumC, texNumR); +} +`; + const SAMPLE_2D_SNIPPET = ` +vec2 packedUVfrom2D(int texelsInLogicalRow, int texNumR, + int texNumC, int row, int col) { + int texelIndex = (row / 2) * texelsInLogicalRow + (col / 2); + int texR = texelIndex / texNumC; + int texC = texelIndex - texR * texNumC; + return (vec2(texC, texR) + halfCR) / vec2(texNumC, texNumR); +} +`; + const SAMPLE_3D_SNIPPET = ` +vec2 packedUVfrom3D(int texNumR, int texNumC, + int texelsInBatch, int texelsInLogicalRow, int b, + int row, int col) { + int index = b * texelsInBatch + (row / 2) * texelsInLogicalRow + (col / 2); + int texR = index / texNumC; + int texC = index - texR * texNumC; + return (vec2(texC, texR) + halfCR) / vec2(texNumC, texNumR); +} +`; + const SHADER_PACKED_PREFIX = ` + float getChannel(vec4 frag, vec2 innerDims) { + vec2 modCoord = mod(innerDims, 2.); + return modCoord.x == 0. ? + (modCoord.y == 0. ? frag.r : frag.g) : + (modCoord.y == 0. ? frag.b : frag.a); + } + float getChannel(vec4 frag, int dim) { + float modCoord = mod(float(dim), 2.); + return modCoord == 0. ? frag.r : frag.g; + } +`; + function getOutputScalarCoords() { + return ` + int getOutputCoords() { + return 0; + } + `; + } + function getOutputPacked1DCoords(shape, texShape, enableShapeUniforms) { + const packedTexShape = [Math.ceil(texShape[0] / 2), Math.ceil(texShape[1] / 2)]; + if (packedTexShape[0] === 1) { + if (enableShapeUniforms) { + return ` + int getOutputCoords() { + return 2 * int(resultUV.x * ceil(float(outTexShape[1]) / 2.0)); + } + `; + } + return ` + int getOutputCoords() { + return 2 * int(resultUV.x * ${packedTexShape[1]}.0); + } + `; + } + if (packedTexShape[1] === 1) { + if (enableShapeUniforms) { + return ` + int getOutputCoords() { + return 2 * int(resultUV.y * ceil(float(outTexShape[0]) / 2.0)); + } + `; + } + return ` + int getOutputCoords() { + return 2 * int(resultUV.y * ${packedTexShape[0]}.0); + } + `; + } + if (enableShapeUniforms) { + return ` + int getOutputCoords() { + ivec2 packedTexShape = ivec2(ceil(float(outTexShape[0]) / 2.0), ceil(float(outTexShape[1]) / 2.0)); + ivec2 resTexRC = ivec2(resultUV.yx * + vec2(packedTexShape[0], packedTexShape[1])); + return 2 * (resTexRC.x * packedTexShape[1] + resTexRC.y); + } + `; + } + return ` + int getOutputCoords() { + ivec2 resTexRC = ivec2(resultUV.yx * + vec2(${packedTexShape[0]}, ${packedTexShape[1]})); + return 2 * (resTexRC.x * ${packedTexShape[1]} + resTexRC.y); + } + `; + } + function getOutput1DCoords(shape, texShape, enableShapeUniforms) { + if (texShape[0] === 1) { + if (enableShapeUniforms) { + return ` + int getOutputCoords() { + return int(resultUV.x * float(outTexShape[1])); + } + `; + } + return ` + int getOutputCoords() { + return int(resultUV.x * ${texShape[1]}.0); + } + `; + } + if (texShape[1] === 1) { + if (enableShapeUniforms) { + return ` + int getOutputCoords() { + return int(resultUV.y * float(outTexShape[0])); + } + `; + } + return ` + int getOutputCoords() { + return int(resultUV.y * ${texShape[0]}.0); + } + `; + } + if (enableShapeUniforms) { + return ` + int getOutputCoords() { + ivec2 resTexRC = ivec2(resultUV.yx * + vec2(outTexShape[0], outTexShape[1])); + return resTexRC.x * outTexShape[1] + resTexRC.y; + } + `; + } + return ` + int getOutputCoords() { + ivec2 resTexRC = ivec2(resultUV.yx * + vec2(${texShape[0]}, ${texShape[1]})); + return resTexRC.x * ${texShape[1]} + resTexRC.y; + } + `; + } + function getOutputPacked3DCoords(shape, texShape, enableShapeUniforms) { + if (enableShapeUniforms) { + return ` + ivec3 getOutputCoords() { + ivec2 packedTexShape = ivec2(ceil(float(outTexShape[0]) / 2.0), ceil(float(outTexShape[1]) / 2.0)); + int texelsInLogicalRow = int(ceil(float(outShape[2]) / 2.0)); + int texelsInBatch = texelsInLogicalRow * int(ceil(float(outShape[1]) / 2.0)); + ivec2 resTexRC = ivec2(resultUV.yx * + vec2(packedTexShape[0], packedTexShape[1])); + int index = resTexRC.x * packedTexShape[1] + resTexRC.y; + + int b = index / texelsInBatch; + index -= b * texelsInBatch; + + int r = 2 * (index / texelsInLogicalRow); + int c = imod(index, texelsInLogicalRow) * 2; + + return ivec3(b, r, c); + } + `; + } + const packedTexShape = [Math.ceil(texShape[0] / 2), Math.ceil(texShape[1] / 2)]; + const texelsInLogicalRow = Math.ceil(shape[2] / 2); + const texelsInBatch = texelsInLogicalRow * Math.ceil(shape[1] / 2); + return ` + ivec3 getOutputCoords() { + ivec2 resTexRC = ivec2(resultUV.yx * + vec2(${packedTexShape[0]}, ${packedTexShape[1]})); + int index = resTexRC.x * ${packedTexShape[1]} + resTexRC.y; + + int b = index / ${texelsInBatch}; + index -= b * ${texelsInBatch}; + + int r = 2 * (index / ${texelsInLogicalRow}); + int c = imod(index, ${texelsInLogicalRow}) * 2; + + return ivec3(b, r, c); + } + `; + } + function getOutput3DCoords(shape, texShape, enableShapeUniforms) { + if (enableShapeUniforms) { + const coordsFromIndexSnippet = getOutputLogicalCoordinatesFromFlatIndexByUniform(['r', 'c', 'd'], shape); + return ` + ivec3 getOutputCoords() { + ivec2 resTexRC = ivec2(resultUV.yx * + vec2(outTexShape[0], outTexShape[1])); + int index = resTexRC.x * outTexShape[1] + resTexRC.y; + ${coordsFromIndexSnippet} + return ivec3(r, c, d); + } +`; + } + const coordsFromIndexSnippet = getLogicalCoordinatesFromFlatIndex(['r', 'c', 'd'], shape); + return ` + ivec3 getOutputCoords() { + ivec2 resTexRC = ivec2(resultUV.yx * + vec2(${texShape[0]}, ${texShape[1]})); + int index = resTexRC.x * ${texShape[1]} + resTexRC.y; + ${coordsFromIndexSnippet} + return ivec3(r, c, d); + } + `; + } + function getOutputPackedNDCoords(shape, texShape, enableShapeUniforms) { + if (enableShapeUniforms) { + // TODO: support 5d and 6d + return ` + ivec4 getOutputCoords() { + ivec2 packedTexShape = ivec2(ceil(float(outTexShape[0]) / 2.0), ceil(float(outTexShape[1]) / 2.0)); + ivec2 resTexRC = ivec2(resultUV.yx * + vec2(packedTexShape[0], packedTexShape[1])); + int index = resTexRC.x * packedTexShape[1] + resTexRC.y; + + int texelsInLogicalRow = int(ceil(float(outShape[3]) / 2.0)); + int texelsInBatch = texelsInLogicalRow * int(ceil(float(outShape[2]) / 2.0)); + int texelsInBatchN = texelsInBatch * outShape[1]; + + int b2 = index / texelsInBatchN; + index -= b2 * texelsInBatchN; + + int b = index / texelsInBatch; + index -= b * texelsInBatch; + + int r = 2 * (index / texelsInLogicalRow); + int c = imod(index, texelsInLogicalRow) * 2; + + return ivec4(b2, b, r, c); + } + `; + } + const packedTexShape = [Math.ceil(texShape[0] / 2), Math.ceil(texShape[1] / 2)]; + const texelsInLogicalRow = Math.ceil(shape[shape.length - 1] / 2); + const texelsInBatch = texelsInLogicalRow * Math.ceil(shape[shape.length - 2] / 2); + let texelsInBatchN = texelsInBatch; + let batches = ``; + let coords = 'b, r, c'; + for (let b = 2; b < shape.length - 1; b++) { + texelsInBatchN *= shape[shape.length - b - 1]; + batches = ` + int b${b} = index / ${texelsInBatchN}; + index -= b${b} * ${texelsInBatchN}; + ` + batches; + coords = `b${b}, ` + coords; + } + return ` + ivec${shape.length} getOutputCoords() { + ivec2 resTexRC = ivec2(resultUV.yx * + vec2(${packedTexShape[0]}, ${packedTexShape[1]})); + int index = resTexRC.x * ${packedTexShape[1]} + resTexRC.y; + + ${batches} + + int b = index / ${texelsInBatch}; + index -= b * ${texelsInBatch}; + + int r = 2 * (index / ${texelsInLogicalRow}); + int c = imod(index, ${texelsInLogicalRow}) * 2; + + return ivec${shape.length}(${coords}); + } + `; + } + function getOutput4DCoords(shape, texShape, enableShapeUniforms) { + if (enableShapeUniforms) { + const coordsFromIndexSnippet = getOutputLogicalCoordinatesFromFlatIndexByUniform(['r', 'c', 'd', 'd2'], shape); + return ` + ivec4 getOutputCoords() { + ivec2 resTexRC = ivec2(resultUV.yx * + vec2(outTexShape[0], outTexShape[1])); + int index = resTexRC.x * outTexShape[1] + resTexRC.y; + ${coordsFromIndexSnippet} + return ivec4(r, c, d, d2); + } + `; + } + const coordsFromIndexSnippet = getLogicalCoordinatesFromFlatIndex(['r', 'c', 'd', 'd2'], shape); + return ` + ivec4 getOutputCoords() { + ivec2 resTexRC = ivec2(resultUV.yx * + vec2(${texShape[0]}, ${texShape[1]})); + int index = resTexRC.x * ${texShape[1]} + resTexRC.y; + ${coordsFromIndexSnippet} + return ivec4(r, c, d, d2); + } + `; + } + function getOutput5DCoords(shape, texShape) { + const coordsFromIndexSnippet = getLogicalCoordinatesFromFlatIndex(['r', 'c', 'd', 'd2', 'd3'], shape); + return ` + ivec5 getOutputCoords() { + ivec2 resTexRC = ivec2(resultUV.yx * vec2(${texShape[0]}, + ${texShape[1]})); + + int index = resTexRC.x * ${texShape[1]} + resTexRC.y; + + ${coordsFromIndexSnippet} + + ivec5 outShape = ivec5(r, c, d, d2, d3); + return outShape; + } + `; + } + function getOutput6DCoords(shape, texShape) { + const coordsFromIndexSnippet = getLogicalCoordinatesFromFlatIndex(['r', 'c', 'd', 'd2', 'd3', 'd4'], shape); + return ` + ivec6 getOutputCoords() { + ivec2 resTexRC = ivec2(resultUV.yx * + vec2(${texShape[0]}, ${texShape[1]})); + int index = resTexRC.x * ${texShape[1]} + resTexRC.y; + + ${coordsFromIndexSnippet} + + ivec6 result = ivec6(r, c, d, d2, d3, d4); + return result; + } + `; + } + function getOutputPacked2DCoords(shape, texShape, enableShapeUniforms) { + const packedTexShape = [Math.ceil(texShape[0] / 2), Math.ceil(texShape[1] / 2)]; + if (arraysEqual(shape, texShape)) { + if (enableShapeUniforms) { + return ` + ivec2 getOutputCoords() { + ivec2 packedTexShape = ivec2(ceil(float(outTexShape[0]) / 2.0), ceil(float(outTexShape[1]) / 2.0)); + return 2 * ivec2(resultUV.yx * vec2(packedTexShape[0], packedTexShape[1])); + } + `; + } + return ` + ivec2 getOutputCoords() { + return 2 * ivec2(resultUV.yx * vec2(${packedTexShape[0]}, ${packedTexShape[1]})); + } + `; + } + // texels needed to accommodate a logical row + const texelsInLogicalRow = Math.ceil(shape[1] / 2); + /** + * getOutputCoords + * + * resTexRC: The rows and columns of the texels. If you move over one + * texel to the right in the packed texture, you are moving over one column + * (not two). + * + * index: The texel index + */ + if (enableShapeUniforms) { + return ` + ivec2 getOutputCoords() { + ivec2 packedTexShape = ivec2(ceil(float(outTexShape[0]) / 2.0), ceil(float(outTexShape[1]) / 2.0)); + int texelsInLogicalRow = int(ceil(float(outShape[1]) / 2.0)); + ivec2 resTexRC = ivec2(resultUV.yx * + vec2(packedTexShape[0], packedTexShape[1])); + + int index = resTexRC.x * packedTexShape[1] + resTexRC.y; + int r = 2 * (index / texelsInLogicalRow); + int c = imod(index, texelsInLogicalRow) * 2; + + return ivec2(r, c); + } + `; + } + return ` + ivec2 getOutputCoords() { + ivec2 resTexRC = ivec2(resultUV.yx * + vec2(${packedTexShape[0]}, ${packedTexShape[1]})); + + int index = resTexRC.x * ${packedTexShape[1]} + resTexRC.y; + int r = 2 * (index / ${texelsInLogicalRow}); + int c = imod(index, ${texelsInLogicalRow}) * 2; + + return ivec2(r, c); + } + `; + } + function getOutput2DCoords(shape, texShape, enableShapeUniforms) { + if (arraysEqual(shape, texShape)) { + if (enableShapeUniforms) { + return ` + ivec2 getOutputCoords() { + return ivec2(resultUV.yx * vec2(outTexShape[0], outTexShape[1])); + } + `; + } + return ` + ivec2 getOutputCoords() { + return ivec2(resultUV.yx * vec2(${texShape[0]}, ${texShape[1]})); + } + `; + } + if (shape[1] === 1) { + if (enableShapeUniforms) { + return ` + ivec2 getOutputCoords() { + ivec2 resTexRC = ivec2(resultUV.yx * + vec2(outTexShape[0], outTexShape[1])); + int index = resTexRC.x * outTexShape[1] + resTexRC.y; + return ivec2(index, 0); + } + `; + } + return ` + ivec2 getOutputCoords() { + ivec2 resTexRC = ivec2(resultUV.yx * + vec2(${texShape[0]}, ${texShape[1]})); + int index = resTexRC.x * ${texShape[1]} + resTexRC.y; + return ivec2(index, 0); + } + `; + } + if (shape[0] === 1) { + if (enableShapeUniforms) { + return ` + ivec2 getOutputCoords() { + ivec2 resTexRC = ivec2(resultUV.yx * + vec2(outTexShape[0], outTexShape[1])); + int index = resTexRC.x * outTexShape[1] + resTexRC.y; + return ivec2(0, index); + } + `; + } + return ` + ivec2 getOutputCoords() { + ivec2 resTexRC = ivec2(resultUV.yx * + vec2(${texShape[0]}, ${texShape[1]})); + int index = resTexRC.x * ${texShape[1]} + resTexRC.y; + return ivec2(0, index); + } + `; + } + if (enableShapeUniforms) { + return ` + ivec2 getOutputCoords() { + ivec2 resTexRC = ivec2(resultUV.yx * + vec2(outTexShape[0], outTexShape[1])); + int index = resTexRC.x * outTexShape[1] + resTexRC.y; + int r = index / outShape[1]; + int c = index - r * outShape[1]; + return ivec2(r, c); + } + `; + } + return ` + ivec2 getOutputCoords() { + ivec2 resTexRC = ivec2(resultUV.yx * + vec2(${texShape[0]}, ${texShape[1]})); + int index = resTexRC.x * ${texShape[1]} + resTexRC.y; + int r = index / ${shape[1]}; + int c = index - r * ${shape[1]}; + return ivec2(r, c); + } + `; + } + function getFlatOffsetUniformName(texName) { + return `offset${texName}`; + } + function getPackedSamplerScalar(inputInfo) { + const texName = inputInfo.name; + const funcName = 'get' + texName.charAt(0).toUpperCase() + texName.slice(1); + const glsl = getGlslDifferences(); + return ` + vec4 ${funcName}() { + return ${glsl.texture2D}(${texName}, halfCR); + } + `; + } + function getSamplerScalar(inputInfo, enableShapeUniforms) { + const texName = inputInfo.name; + const funcName = 'get' + texName.charAt(0).toUpperCase() + texName.slice(1); + if (inputInfo.shapeInfo.isUniform) { + return `float ${funcName}() {return ${texName};}`; + } + const [texNumR, texNumC] = inputInfo.shapeInfo.texShape; + if (texNumR === 1 && texNumC === 1) { + return ` + float ${funcName}() { + return sampleTexture(${texName}, halfCR); + } + `; + } + const offset = getFlatOffsetUniformName(texName); + if (enableShapeUniforms) { + return ` + float ${funcName}() { + vec2 uv = uvFromFlat(${texName}TexShape[0], ${texName}TexShape[1], ${offset}); + return sampleTexture(${texName}, uv); + } + `; + } + const [tNumR, tNumC] = inputInfo.shapeInfo.texShape; + return ` + float ${funcName}() { + vec2 uv = uvFromFlat(${tNumR}, ${tNumC}, ${offset}); + return sampleTexture(${texName}, uv); + } + `; + } + function getPackedSampler1D(inputInfo, enableShapeUniforms) { + const texName = inputInfo.name; + const funcName = 'get' + texName.charAt(0).toUpperCase() + texName.slice(1); + const texShape = inputInfo.shapeInfo.texShape; + const glsl = getGlslDifferences(); + if (enableShapeUniforms) { + return ` + vec4 ${funcName}(int index) { + ivec2 packedTexShape = ivec2(ceil(float(${texName}TexShape[0]) / 2.0), ceil(float(${texName}TexShape[1]) / 2.0)); + vec2 uv = packedUVfrom1D( + packedTexShape[0], packedTexShape[1], index); + return ${glsl.texture2D}(${texName}, uv); + } + `; + } + const packedTexShape = [Math.ceil(texShape[0] / 2), Math.ceil(texShape[1] / 2)]; + return ` + vec4 ${funcName}(int index) { + vec2 uv = packedUVfrom1D( + ${packedTexShape[0]}, ${packedTexShape[1]}, index); + return ${glsl.texture2D}(${texName}, uv); + } + `; + } + function getSampler1D(inputInfo, enableShapeUniforms) { + const texName = inputInfo.name; + const funcName = 'get' + texName.charAt(0).toUpperCase() + texName.slice(1); + if (inputInfo.shapeInfo.isUniform) { + // Uniform arrays will be less than 65505 (no risk of float16 overflow). + return ` + float ${funcName}(int index) { + ${getUniformSampler(inputInfo)} + } + `; + } + const texShape = inputInfo.shapeInfo.texShape; + const tNumR = texShape[0]; + const tNumC = texShape[1]; + if (tNumC === 1 && tNumR === 1) { + return ` + float ${funcName}(int index) { + return sampleTexture(${texName}, halfCR); + } + `; + } + const offset = getFlatOffsetUniformName(texName); + if (tNumC === 1) { + if (enableShapeUniforms) { + return ` + float ${funcName}(int index) { + vec2 uv = vec2(0.5, (float(index + ${offset}) + 0.5) / float(${texName}TexShape[0])); + return sampleTexture(${texName}, uv); + } + `; + } + return ` + float ${funcName}(int index) { + vec2 uv = vec2(0.5, (float(index + ${offset}) + 0.5) / ${tNumR}.0); + return sampleTexture(${texName}, uv); + } + `; + } + if (tNumR === 1) { + if (enableShapeUniforms) { + return ` + float ${funcName}(int index) { + vec2 uv = vec2((float(index + ${offset}) + 0.5) / float(${texName}TexShape[1]), 0.5); + return sampleTexture(${texName}, uv); + } + `; + } + return ` + float ${funcName}(int index) { + vec2 uv = vec2((float(index + ${offset}) + 0.5) / ${tNumC}.0, 0.5); + return sampleTexture(${texName}, uv); + } + `; + } + if (enableShapeUniforms) { + return ` + float ${funcName}(int index) { + vec2 uv = uvFromFlat(${texName}TexShape[0], ${texName}TexShape[1], index + ${offset}); + return sampleTexture(${texName}, uv); + } + `; + } + return ` + float ${funcName}(int index) { + vec2 uv = uvFromFlat(${tNumR}, ${tNumC}, index + ${offset}); + return sampleTexture(${texName}, uv); + } + `; + } + function getPackedSampler2D(inputInfo, enableShapeUniforms) { + const shape = inputInfo.shapeInfo.logicalShape; + const texName = inputInfo.name; + const funcName = 'get' + texName.charAt(0).toUpperCase() + texName.slice(1); + const texShape = inputInfo.shapeInfo.texShape; + const texNumR = texShape[0]; + const texNumC = texShape[1]; + const glsl = getGlslDifferences(); + if (texShape != null && arraysEqual(shape, texShape)) { + if (enableShapeUniforms) { + return ` + vec4 ${funcName}(int row, int col) { + vec2 uv = (vec2(col, row) + halfCR) / vec2(${texName}TexShape[1], ${texName}TexShape[0]); + + return ${glsl.texture2D}(${texName}, uv); + } + `; + } + return ` + vec4 ${funcName}(int row, int col) { + vec2 uv = (vec2(col, row) + halfCR) / vec2(${texNumC}.0, ${texNumR}.0); + + return ${glsl.texture2D}(${texName}, uv); + } + `; + } + if (enableShapeUniforms) { + return ` + vec4 ${funcName}(int row, int col) { + ivec2 packedTexShape = ivec2(ceil(float(${texName}TexShape[0]) / 2.0), ceil(float(${texName}TexShape[1]) / 2.0)); + int valuesPerRow = int(ceil(float(${texName}Shape[1]) / 2.0)); + vec2 uv = packedUVfrom2D(valuesPerRow, packedTexShape[0], packedTexShape[1], row, col); + return ${glsl.texture2D}(${texName}, uv); + } + `; + } + const packedTexShape = [Math.ceil(texShape[0] / 2), Math.ceil(texShape[1] / 2)]; + const valuesPerRow = Math.ceil(shape[1] / 2); + return ` + vec4 ${funcName}(int row, int col) { + vec2 uv = packedUVfrom2D(${valuesPerRow}, ${packedTexShape[0]}, ${packedTexShape[1]}, row, col); + return ${glsl.texture2D}(${texName}, uv); + } + `; + } + function getSampler2D(inputInfo, enableShapeUniforms) { + const shape = inputInfo.shapeInfo.logicalShape; + const texName = inputInfo.name; + const funcName = 'get' + texName.charAt(0).toUpperCase() + texName.slice(1); + const texShape = inputInfo.shapeInfo.texShape; + if (texShape != null && arraysEqual(shape, texShape)) { + if (enableShapeUniforms) { + return ` + float ${funcName}(int row, int col) { + vec2 uv = (vec2(col, row) + halfCR) / vec2(${texName}TexShape[1], ${texName}TexShape[0]); + return sampleTexture(${texName}, uv); + } + `; + } + const texNumR = texShape[0]; + const texNumC = texShape[1]; + return ` + float ${funcName}(int row, int col) { + vec2 uv = (vec2(col, row) + halfCR) / vec2(${texNumC}.0, ${texNumR}.0); + return sampleTexture(${texName}, uv); + } + `; + } + const { newShape, keptDims } = squeezeShape(shape); + const squeezedShape = newShape; + if (squeezedShape.length < shape.length) { + const newInputInfo = squeezeInputInfo(inputInfo, squeezedShape); + const params = ['row', 'col']; + return ` + ${getSamplerFromInInfo(newInputInfo, enableShapeUniforms)} + float ${funcName}(int row, int col) { + return ${funcName}(${getSqueezedParams(params, keptDims)}); + } + `; + } + if (inputInfo.shapeInfo.isUniform) { + // Uniform arrays will be less than 65505 (no risk of float16 overflow). + return ` + float ${funcName}(int row, int col) { + int index = round(dot(vec2(row, col), vec2(${shape[1]}, 1))); + ${getUniformSampler(inputInfo)} + } + `; + } + const texNumR = texShape[0]; + const texNumC = texShape[1]; + const offset = getFlatOffsetUniformName(texName); + if (texNumC === 1) { + // index is used directly as physical (no risk of float16 overflow). + if (enableShapeUniforms) { + return ` + float ${funcName}(int row, int col) { + float index = dot(vec3(row, col, ${offset}), vec3(${texName}Shape[1], 1, 1)); + vec2 uv = vec2(0.5, (index + 0.5) / float(${texName}TexShape[0])); + return sampleTexture(${texName}, uv); + } + `; + } + return ` + float ${funcName}(int row, int col) { + float index = dot(vec3(row, col, ${offset}), vec3(${shape[1]}, 1, 1)); + vec2 uv = vec2(0.5, (index + 0.5) / ${texNumR}.0); + return sampleTexture(${texName}, uv); + } + `; + } + if (texNumR === 1) { + // index is used directly as physical (no risk of float16 overflow). + if (enableShapeUniforms) { + return ` + float ${funcName}(int row, int col) { + float index = dot(vec3(row, col, ${offset}), vec3(${texName}Shape[1], 1, 1)); + vec2 uv = vec2((index + 0.5) / float(${texName}TexShape[1]), 0.5); + return sampleTexture(${texName}, uv); + } + `; + } + return ` + float ${funcName}(int row, int col) { + float index = dot(vec3(row, col, ${offset}), vec3(${shape[1]}, 1, 1)); + vec2 uv = vec2((index + 0.5) / ${texNumC}.0, 0.5); + return sampleTexture(${texName}, uv); + } + `; + } + if (enableShapeUniforms) { + return ` + float ${funcName}(int row, int col) { + // Explicitly use integer operations as dot() only works on floats. + int index = row * ${texName}Shape[1] + col + ${offset}; + vec2 uv = uvFromFlat(${texName}TexShape[0], ${texName}TexShape[1], index); + return sampleTexture(${texName}, uv); + } + `; + } + return ` + float ${funcName}(int row, int col) { + // Explicitly use integer operations as dot() only works on floats. + int index = row * ${shape[1]} + col + ${offset}; + vec2 uv = uvFromFlat(${texNumR}, ${texNumC}, index); + return sampleTexture(${texName}, uv); + } +`; + } + function getPackedSampler3D(inputInfo, enableShapeUniforms) { + const shape = inputInfo.shapeInfo.logicalShape; + const texName = inputInfo.name; + const funcName = 'get' + texName.charAt(0).toUpperCase() + texName.slice(1); + const texShape = inputInfo.shapeInfo.texShape; + const packedTexShape = [Math.ceil(texShape[0] / 2), Math.ceil(texShape[1] / 2)]; + if (shape[0] === 1) { + const squeezedShape = shape.slice(1); + const keptDims = [1, 2]; + const newInputInfo = squeezeInputInfo(inputInfo, squeezedShape); + const params = ['b', 'row', 'col']; + return ` + ${getPackedSamplerFromInInfo(newInputInfo, enableShapeUniforms)} + vec4 ${funcName}(int b, int row, int col) { + return ${funcName}(${getSqueezedParams(params, keptDims)}); + } + `; + } + const glsl = getGlslDifferences(); + if (enableShapeUniforms) { + return ` + vec4 ${funcName}(int b, int row, int col) { + ivec2 packedTexShape = ivec2(ceil(float(${texName}TexShape[0]) / 2.0), ceil(float(${texName}TexShape[1]) / 2.0)); + int valuesPerRow = int(ceil(float(${texName}Shape[2]) / 2.0)); + int texelsInBatch = valuesPerRow * int(ceil(float(${texName}Shape[1]) / 2.0)); + vec2 uv = packedUVfrom3D( + packedTexShape[0], packedTexShape[1], texelsInBatch, valuesPerRow, b, row, col); + return ${glsl.texture2D}(${texName}, uv); + } + `; + } + const texNumR = packedTexShape[0]; + const texNumC = packedTexShape[1]; + const valuesPerRow = Math.ceil(shape[2] / 2); + const texelsInBatch = valuesPerRow * Math.ceil(shape[1] / 2); + return ` + vec4 ${funcName}(int b, int row, int col) { + vec2 uv = packedUVfrom3D( + ${texNumR}, ${texNumC}, ${texelsInBatch}, ${valuesPerRow}, b, row, col); + return ${glsl.texture2D}(${texName}, uv); + } + `; + } + function getSampler3D(inputInfo, enableShapeUniforms) { + const shape = inputInfo.shapeInfo.logicalShape; + const texName = inputInfo.name; + const funcName = 'get' + texName.charAt(0).toUpperCase() + texName.slice(1); + const stride0 = shape[1] * shape[2]; + const stride1 = shape[2]; + const { newShape, keptDims } = squeezeShape(shape); + const squeezedShape = newShape; + if (squeezedShape.length < shape.length) { + const newInputInfo = squeezeInputInfo(inputInfo, squeezedShape); + const params = ['row', 'col', 'depth']; + return ` + ${getSamplerFromInInfo(newInputInfo, enableShapeUniforms)} + float ${funcName}(int row, int col, int depth) { + return ${funcName}(${getSqueezedParams(params, keptDims)}); + } + `; + } + if (inputInfo.shapeInfo.isUniform) { + // Uniform arrays will be less than 65505 (no risk of float16 overflow). + return ` + float ${funcName}(int row, int col, int depth) { + int index = round(dot(vec3(row, col, depth), + vec3(${stride0}, ${stride1}, 1))); + ${getUniformSampler(inputInfo)} + } + `; + } + const texShape = inputInfo.shapeInfo.texShape; + const texNumR = texShape[0]; + const texNumC = texShape[1]; + const flatOffset = inputInfo.shapeInfo.flatOffset; + if (texNumC === stride0 && flatOffset == null) { + // texC is used directly as physical (no risk of float16 overflow). + if (enableShapeUniforms) { + return ` + float ${funcName}(int row, int col, int depth) { + int stride1 = ${texName}Shape[2]; + float texR = float(row); + float texC = dot(vec2(col, depth), vec2(stride1, 1)); + vec2 uv = (vec2(texC, texR) + halfCR) / + vec2(${texName}TexShape[1], ${texName}TexShape[0]); + return sampleTexture(${texName}, uv); + } + `; + } + return ` + float ${funcName}(int row, int col, int depth) { + float texR = float(row); + float texC = dot(vec2(col, depth), vec2(${stride1}, 1)); + vec2 uv = (vec2(texC, texR) + halfCR) / + vec2(${texNumC}.0, ${texNumR}.0); + return sampleTexture(${texName}, uv); + } + `; + } + if (texNumC === stride1 && flatOffset == null) { + // texR is used directly as physical (no risk of float16 overflow). + if (enableShapeUniforms) { + return ` + float ${funcName}(int row, int col, int depth) { + float texR = dot(vec2(row, col), vec2(${texName}Shape[1], 1)); + float texC = float(depth); + vec2 uv = (vec2(texC, texR) + halfCR) / vec2(${texName}TexShape[1], ${texName}TexShape[0]); + return sampleTexture(${texName}, uv); + } + `; + } + return ` + float ${funcName}(int row, int col, int depth) { + float texR = dot(vec2(row, col), vec2(${shape[1]}, 1)); + float texC = float(depth); + vec2 uv = (vec2(texC, texR) + halfCR) / vec2(${texNumC}.0, ${texNumR}.0); + return sampleTexture(${texName}, uv); + } + `; + } + const offset = getFlatOffsetUniformName(texName); + if (enableShapeUniforms) { + return ` + float ${funcName}(int row, int col, int depth) { + // Explicitly use integer operations as dot() only works on floats. + int stride0 = ${texName}Shape[1] * ${texName}Shape[2]; + int stride1 = ${texName}Shape[2]; + int index = row * stride0 + col * stride1 + depth + ${offset}; + vec2 uv = uvFromFlat(${texName}TexShape[0], ${texName}TexShape[1], index); + return sampleTexture(${texName}, uv); + } + `; + } + return ` + float ${funcName}(int row, int col, int depth) { + // Explicitly use integer operations as dot() only works on floats. + int index = row * ${stride0} + col * ${stride1} + depth + ${offset}; + vec2 uv = uvFromFlat(${texNumR}, ${texNumC}, index); + return sampleTexture(${texName}, uv); + } + `; + } + function getPackedSamplerND(inputInfo, enableShapeUniforms) { + const texName = inputInfo.name; + const funcName = 'get' + texName.charAt(0).toUpperCase() + texName.slice(1); + const glsl = getGlslDifferences(); + if (enableShapeUniforms) { + // TODO: support 5d and 6d + return ` + vec4 ${funcName}(int b2, int b, int row, int col) { + int valuesPerRow = int(ceil(float(${texName}Shape[3]) / 2.0)); + int texelsInBatch = valuesPerRow * int(ceil(float(${texName}Shape[2]) / 2.0)); + int index = b * texelsInBatch + (row / 2) * valuesPerRow + (col / 2); + texelsInBatch *= ${texName}Shape[1]; + index = b2 * texelsInBatch + index; + ivec2 packedTexShape = ivec2(ceil(float(${texName}TexShape[0]) / 2.0), ceil(float(${texName}TexShape[1]) / 2.0)); + int texR = index / packedTexShape[1]; + int texC = index - texR * packedTexShape[1]; + vec2 uv = (vec2(texC, texR) + halfCR) / vec2(packedTexShape[1], packedTexShape[0]); return ${glsl.texture2D}(${texName}, uv); + } + `; + } + const shape = inputInfo.shapeInfo.logicalShape; + const rank = shape.length; + const texShape = inputInfo.shapeInfo.texShape; + const packedTexShape = [Math.ceil(texShape[0] / 2), Math.ceil(texShape[1] / 2)]; + const texNumR = packedTexShape[0]; + const texNumC = packedTexShape[1]; + const valuesPerRow = Math.ceil(shape[rank - 1] / 2); + let texelsInBatch = valuesPerRow * Math.ceil(shape[rank - 2] / 2); + let params = `int b, int row, int col`; + let index = `b * ${texelsInBatch} + (row / 2) * ${valuesPerRow} + (col / 2)`; + for (let b = 2; b < rank - 1; b++) { + params = `int b${b}, ` + params; + texelsInBatch *= shape[rank - b - 1]; + index = `b${b} * ${texelsInBatch} + ` + index; + } + return ` + vec4 ${funcName}(${params}) { + int index = ${index}; + int texR = index / ${texNumC}; + int texC = index - texR * ${texNumC}; + vec2 uv = (vec2(texC, texR) + halfCR) / vec2(${texNumC}, ${texNumR}); + return ${glsl.texture2D}(${texName}, uv); + } + `; + } + function getSampler4D(inputInfo, enableShapeUniforms) { + const shape = inputInfo.shapeInfo.logicalShape; + const texName = inputInfo.name; + const funcName = 'get' + texName.charAt(0).toUpperCase() + texName.slice(1); + const stride2 = shape[3]; + const stride1 = shape[2] * stride2; + const stride0 = shape[1] * stride1; + const { newShape, keptDims } = squeezeShape(shape); + if (newShape.length < shape.length) { + const newInputInfo = squeezeInputInfo(inputInfo, newShape); + const params = ['row', 'col', 'depth', 'depth2']; + return ` + ${getSamplerFromInInfo(newInputInfo, enableShapeUniforms)} + float ${funcName}(int row, int col, int depth, int depth2) { + return ${funcName}(${getSqueezedParams(params, keptDims)}); + } + `; + } + if (inputInfo.shapeInfo.isUniform) { + // Uniform arrays will be less than 65505 (no risk of float16 overflow). + return ` + float ${funcName}(int row, int col, int depth, int depth2) { + int index = round(dot(vec4(row, col, depth, depth2), + vec4(${stride0}, ${stride1}, ${stride2}, 1))); + ${getUniformSampler(inputInfo)} + } + `; + } + const flatOffset = inputInfo.shapeInfo.flatOffset; + const texShape = inputInfo.shapeInfo.texShape; + const texNumR = texShape[0]; + const texNumC = texShape[1]; + const stride2Str = `int stride2 = ${texName}Shape[3];`; + const stride1Str = `int stride1 = ${texName}Shape[2] * stride2;`; + const stride0Str = `int stride0 = ${texName}Shape[1] * stride1;`; + if (texNumC === stride0 && flatOffset == null) { + // texC is used directly as physical (no risk of float16 overflow). + if (enableShapeUniforms) { + return ` + float ${funcName}(int row, int col, int depth, int depth2) { + ${stride2Str} + ${stride1Str} + float texR = float(row); + float texC = + dot(vec3(col, depth, depth2), + vec3(stride1, stride2, 1)); + vec2 uv = (vec2(texC, texR) + halfCR) / + vec2(${texName}TexShape[1], ${texName}TexShape[0]); + return sampleTexture(${texName}, uv); + } + `; + } + return ` + float ${funcName}(int row, int col, int depth, int depth2) { + float texR = float(row); + float texC = + dot(vec3(col, depth, depth2), + vec3(${stride1}, ${stride2}, 1)); + vec2 uv = (vec2(texC, texR) + halfCR) / + vec2(${texNumC}.0, ${texNumR}.0); + return sampleTexture(${texName}, uv); + } + `; + } + if (texNumC === stride2 && flatOffset == null) { + // texR is used directly as physical (no risk of float16 overflow). + if (enableShapeUniforms) { + return ` + float ${funcName}(int row, int col, int depth, int depth2) { + float texR = dot(vec3(row, col, depth), + vec3(${texName}Shape[1] * ${texName}Shape[2], ${texName}Shape[2], 1)); + float texC = float(depth2); + vec2 uv = (vec2(texC, texR) + halfCR) / + vec2(${texName}TexShape[1], ${texName}TexShape[0]); + return sampleTexture(${texName}, uv); + } + `; + } + return ` + float ${funcName}(int row, int col, int depth, int depth2) { + float texR = dot(vec3(row, col, depth), + vec3(${shape[1] * shape[2]}, ${shape[2]}, 1)); + float texC = float(depth2); + vec2 uv = (vec2(texC, texR) + halfCR) / + vec2(${texNumC}.0, ${texNumR}.0); + return sampleTexture(${texName}, uv); + } + `; + } + const offset = getFlatOffsetUniformName(texName); + if (enableShapeUniforms) { + return ` + float ${funcName}(int row, int col, int depth, int depth2) { + // Explicitly use integer operations as dot() only works on floats. + ${stride2Str} + ${stride1Str} + ${stride0Str} + int index = row * stride0 + col * stride1 + + depth * stride2 + depth2; + vec2 uv = uvFromFlat(${texName}TexShape[0], ${texName}TexShape[1], index + ${offset}); + return sampleTexture(${texName}, uv); + } + `; + } + return ` + float ${funcName}(int row, int col, int depth, int depth2) { + // Explicitly use integer operations as dot() only works on floats. + int index = row * ${stride0} + col * ${stride1} + + depth * ${stride2} + depth2; + vec2 uv = uvFromFlat(${texNumR}, ${texNumC}, index + ${offset}); + return sampleTexture(${texName}, uv); + } + `; + } + function getSampler5D(inputInfo) { + const shape = inputInfo.shapeInfo.logicalShape; + const texName = inputInfo.name; + const funcName = 'get' + texName.charAt(0).toUpperCase() + texName.slice(1); + const stride3 = shape[4]; + const stride2 = shape[3] * stride3; + const stride1 = shape[2] * stride2; + const stride0 = shape[1] * stride1; + const { newShape, keptDims } = squeezeShape(shape); + if (newShape.length < shape.length) { + const newInputInfo = squeezeInputInfo(inputInfo, newShape); + const params = ['row', 'col', 'depth', 'depth2', 'depth3']; + return ` + ${getSamplerFromInInfo(newInputInfo)} + float ${funcName}(int row, int col, int depth, int depth2, int depth3) { + return ${funcName}(${getSqueezedParams(params, keptDims)}); + } + `; + } + if (inputInfo.shapeInfo.isUniform) { + // Uniform arrays will be less than 65505 (no risk of float16 overflow). + return ` + float ${funcName}(int row, int col, int depth, int depth2, int depth3) { + float index = dot( + vec4(row, col, depth, depth2), + vec4(${stride0}, ${stride1}, ${stride2}, ${stride3})) + + depth3; + ${getUniformSampler(inputInfo)} + } + `; + } + const flatOffset = inputInfo.shapeInfo.flatOffset; + const texShape = inputInfo.shapeInfo.texShape; + const texNumR = texShape[0]; + const texNumC = texShape[1]; + if (texNumC === stride0 && flatOffset == null) { + // texC is used directly as physical (no risk of float16 overflow). + return ` + float ${funcName}(int row, int col, int depth, int depth2, int depth3) { + int texR = row; + float texC = dot(vec4(col, depth, depth2, depth3), + vec4(${stride1}, ${stride2}, ${stride3}, 1)); + vec2 uv = (vec2(texC, texR) + halfCR) / + vec2(${texNumC}.0, ${texNumR}.0); + return sampleTexture(${texName}, uv); + } + `; + } + if (texNumC === stride3 && flatOffset == null) { + // texR is used directly as physical (no risk of float16 overflow). + return ` + float ${funcName}(int row, int col, int depth, int depth2, int depth3) { + float texR = dot( + vec4(row, col, depth, depth2), + vec4(${shape[1] * shape[2] * shape[3]}, + ${shape[2] * shape[3]}, ${shape[3]}, 1)); + int texC = depth3; + vec2 uv = (vec2(texC, texR) + halfCR) / + vec2(${texNumC}.0, ${texNumR}.0); + return sampleTexture(${texName}, uv); + } + `; + } + const offset = getFlatOffsetUniformName(texName); + return ` + float ${funcName}(int row, int col, int depth, int depth2, int depth3) { + // Explicitly use integer operations as dot() only works on floats. + int index = row * ${stride0} + col * ${stride1} + depth * ${stride2} + + depth2 * ${stride3} + depth3 + ${offset}; + vec2 uv = uvFromFlat(${texNumR}, ${texNumC}, index); + return sampleTexture(${texName}, uv); + } + `; + } + function getSampler6D(inputInfo) { + const shape = inputInfo.shapeInfo.logicalShape; + const texName = inputInfo.name; + const funcName = 'get' + texName.charAt(0).toUpperCase() + texName.slice(1); + const { newShape, keptDims } = squeezeShape(shape); + if (newShape.length < shape.length) { + const newInputInfo = squeezeInputInfo(inputInfo, newShape); + const params = ['row', 'col', 'depth', 'depth2', 'depth3', 'depth4']; + return ` + ${getSamplerFromInInfo(newInputInfo)} + float ${funcName}(int row, int col, int depth, + int depth2, int depth3, int depth4) { + return ${funcName}(${getSqueezedParams(params, keptDims)}); + } + `; + } + const stride4 = shape[5]; + const stride3 = shape[4] * stride4; + const stride2 = shape[3] * stride3; + const stride1 = shape[2] * stride2; + const stride0 = shape[1] * stride1; + if (inputInfo.shapeInfo.isUniform) { + // Uniform arrays will be less than 65505 (no risk of float16 overflow). + return ` + float ${funcName}(int row, int col, int depth, + int depth2, int depth3, int depth4) { + int index = round(dot( + vec4(row, col, depth, depth2), + vec4(${stride0}, ${stride1}, ${stride2}, ${stride3})) + + dot( + vec2(depth3, depth4), + vec2(${stride4}, 1))); + ${getUniformSampler(inputInfo)} + } + `; + } + const flatOffset = inputInfo.shapeInfo.flatOffset; + const texShape = inputInfo.shapeInfo.texShape; + const texNumR = texShape[0]; + const texNumC = texShape[1]; + if (texNumC === stride0 && flatOffset == null) { + // texC is used directly as physical (no risk of float16 overflow). + return ` + float ${funcName}(int row, int col, int depth, + int depth2, int depth3, int depth4) { + int texR = row; + float texC = dot(vec4(col, depth, depth2, depth3), + vec4(${stride1}, ${stride2}, ${stride3}, ${stride4})) + + float(depth4); + vec2 uv = (vec2(texC, texR) + halfCR) / + vec2(${texNumC}.0, ${texNumR}.0); + return sampleTexture(${texName}, uv); + } + `; + } + if (texNumC === stride4 && flatOffset == null) { + // texR is used directly as physical (no risk of float16 overflow). + return ` + float ${funcName}(int row, int col, int depth, + int depth2, int depth3, int depth4) { + float texR = dot(vec4(row, col, depth, depth2), + vec4(${shape[1] * shape[2] * shape[3] * shape[4]}, + ${shape[2] * shape[3] * shape[4]}, + ${shape[3] * shape[4]}, + ${shape[4]})) + float(depth3); + int texC = depth4; + vec2 uv = (vec2(texC, texR) + halfCR) / + vec2(${texNumC}.0, ${texNumR}.0); + return sampleTexture(${texName}, uv); + } + `; + } + const offset = getFlatOffsetUniformName(texName); + return ` + float ${funcName}(int row, int col, int depth, + int depth2, int depth3, int depth4) { + // Explicitly use integer operations as dot() only works on floats. + int index = row * ${stride0} + col * ${stride1} + depth * ${stride2} + + depth2 * ${stride3} + depth3 * ${stride4} + depth4 + ${offset}; + vec2 uv = uvFromFlat(${texNumR}, ${texNumC}, index); + return sampleTexture(${texName}, uv); + } + `; + } + function getUniformSampler(inputInfo) { + const texName = inputInfo.name; + const inSize = sizeFromShape(inputInfo.shapeInfo.logicalShape); + if (inSize < 2) { + return `return ${texName};`; + } + return ` + for (int i = 0; i < ${inSize}; i++) { + if (i == index) { + return ${texName}[i]; + } + } + `; + } + function getPackedSamplerAtOutputCoords(inputInfo, outShapeInfo) { + const texName = inputInfo.name; + const texFuncSnippet = texName.charAt(0).toUpperCase() + texName.slice(1); + const funcName = 'get' + texFuncSnippet + 'AtOutCoords'; + const inRank = inputInfo.shapeInfo.logicalShape.length; + const outRank = outShapeInfo.logicalShape.length; + const broadcastDims = getBroadcastDims(inputInfo.shapeInfo.logicalShape, outShapeInfo.logicalShape); + const type = getCoordsDataType(outRank); + const rankDiff = outRank - inRank; + let coordsSnippet; + const fields = ['x', 'y', 'z', 'w', 'u', 'v']; + if (inRank === 0) { + coordsSnippet = ''; + } + else if (outRank < 2 && broadcastDims.length >= 1) { + coordsSnippet = 'coords = 0;'; + } + else { + coordsSnippet = + broadcastDims.map(d => `coords.${fields[d + rankDiff]} = 0;`) + .join('\n'); + } + let unpackedCoordsSnippet = ''; + if (outRank < 2 && inRank > 0) { + unpackedCoordsSnippet = 'coords'; + } + else { + unpackedCoordsSnippet = inputInfo.shapeInfo.logicalShape + .map((s, i) => `coords.${fields[i + rankDiff]}`) + .join(', '); + } + let output = `return outputValue;`; + const inSize = sizeFromShape(inputInfo.shapeInfo.logicalShape); + const isInputScalar = inSize === 1; + const outSize = sizeFromShape(outShapeInfo.logicalShape); + const isOutputScalar = outSize === 1; + if (inRank === 1 && !isInputScalar && !isOutputScalar) { + output = ` + return vec4(outputValue.xy, outputValue.xy); + `; + } + else if (isInputScalar && !isOutputScalar) { + if (outRank === 1) { + output = ` + return vec4(outputValue.x, outputValue.x, 0., 0.); + `; + } + else { + output = ` + return vec4(outputValue.x); + `; + } + } + else if (broadcastDims.length) { + const rows = inRank - 2; + const cols = inRank - 1; + if (broadcastDims.indexOf(rows) > -1 && broadcastDims.indexOf(cols) > -1) { + output = `return vec4(outputValue.x);`; + } + else if (broadcastDims.indexOf(rows) > -1) { + output = `return vec4(outputValue.x, outputValue.y, ` + + `outputValue.x, outputValue.y);`; + } + else if (broadcastDims.indexOf(cols) > -1) { + output = `return vec4(outputValue.xx, outputValue.zz);`; + } + } + return ` + vec4 ${funcName}() { + ${type} coords = getOutputCoords(); + ${coordsSnippet} + vec4 outputValue = get${texFuncSnippet}(${unpackedCoordsSnippet}); + ${output} + } + `; + } + function getSamplerAtOutputCoords(inputInfo, outShapeInfo) { + const texName = inputInfo.name; + const texFuncSnippet = texName.charAt(0).toUpperCase() + texName.slice(1); + const funcName = 'get' + texFuncSnippet + 'AtOutCoords'; + const outTexShape = outShapeInfo.texShape; + const inTexShape = inputInfo.shapeInfo.texShape; + const inRank = inputInfo.shapeInfo.logicalShape.length; + const outRank = outShapeInfo.logicalShape.length; + if (!inputInfo.shapeInfo.isUniform && inRank === outRank && + inputInfo.shapeInfo.flatOffset == null && + arraysEqual(inTexShape, outTexShape)) { + return ` + float ${funcName}() { + return sampleTexture(${texName}, resultUV); + } + `; + } + const type = getCoordsDataType(outRank); + const broadcastDims = getBroadcastDims(inputInfo.shapeInfo.logicalShape, outShapeInfo.logicalShape); + const rankDiff = outRank - inRank; + let coordsSnippet; + const fields = ['x', 'y', 'z', 'w', 'u', 'v']; + if (inRank === 0) { + coordsSnippet = ''; + } + else if (outRank < 2 && broadcastDims.length >= 1) { + coordsSnippet = 'coords = 0;'; + } + else { + coordsSnippet = + broadcastDims.map(d => `coords.${fields[d + rankDiff]} = 0;`) + .join('\n'); + } + let unpackedCoordsSnippet = ''; + if (outRank < 2 && inRank > 0) { + unpackedCoordsSnippet = 'coords'; + } + else { + unpackedCoordsSnippet = inputInfo.shapeInfo.logicalShape + .map((s, i) => `coords.${fields[i + rankDiff]}`) + .join(', '); + } + return ` + float ${funcName}() { + ${type} coords = getOutputCoords(); + ${coordsSnippet} + return get${texFuncSnippet}(${unpackedCoordsSnippet}); + } + `; + } + function getCoordsDataType(rank) { + if (rank <= 1) { + return 'int'; + } + else if (rank === 2) { + return 'ivec2'; + } + else if (rank === 3) { + return 'ivec3'; + } + else if (rank === 4) { + return 'ivec4'; + } + else if (rank === 5) { + return 'ivec5'; + } + else if (rank === 6) { + return 'ivec6'; + } + else { + throw Error(`GPU for rank ${rank} is not yet supported`); + } + } + function getUniformInfoFromShape(isPacked, shape, texShape) { + const { newShape, keptDims } = squeezeShape(shape); + const rank = shape.length; + const useSqueezePackedShape = isPacked && rank === 3 && shape[0] === 1; + const squeezeShape$1 = useSqueezePackedShape ? shape.slice(1) : newShape; + const useSqueezeShape = (!isPacked && rank > 1 && !arraysEqual(shape, texShape) && + newShape.length < rank) || + useSqueezePackedShape; + const uniformShape = useSqueezeShape ? squeezeShape$1 : shape; + return { useSqueezeShape, uniformShape, keptDims }; + } + /** Returns a new input info (a copy) that has a squeezed logical shape. */ + function squeezeInputInfo(inInfo, squeezedShape) { + // Deep copy. + const newInputInfo = JSON.parse(JSON.stringify(inInfo)); + newInputInfo.shapeInfo.logicalShape = squeezedShape; + return newInputInfo; + } + function getSqueezedParams(params, keptDims) { + return keptDims.map(d => params[d]).join(', '); + } + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function compileProgram(gpgpu, program, inputs, output) { + const inputInfos = inputs.map((input, i) => { + const shapeInfo = { + logicalShape: input.shape, + texShape: input.isUniform ? null : input.texData.texShape, + isUniform: input.isUniform, + isPacked: input.isUniform ? false : input.texData.isPacked, + flatOffset: null + }; + if (input.texData != null && input.texData.slice != null && + input.texData.slice.flatOffset > 0) { + shapeInfo.flatOffset = input.texData.slice.flatOffset; + } + return { name: program.variableNames[i], shapeInfo }; + }); + const inShapeInfos = inputInfos.map(x => x.shapeInfo); + const outShapeInfo = { + logicalShape: output.shape, + texShape: output.texData.texShape, + isUniform: false, + isPacked: output.texData.isPacked, + flatOffset: null + }; + const source = makeShader(inputInfos, outShapeInfo, program); + const fragmentShader = createFragmentShader(gpgpu.gl, source); + const webGLProgram = gpgpu.createProgram(fragmentShader); + if (!env().get('ENGINE_COMPILE_ONLY')) { + gpgpu.buildVao(webGLProgram); + return Object.assign({ program, + fragmentShader, + source, + webGLProgram, + inShapeInfos, + outShapeInfo }, getUniformLocations(gpgpu, program, webGLProgram)); + } + else { + return { + program, + fragmentShader, + source, + webGLProgram, + inShapeInfos, + outShapeInfo, + variablesLocations: null, + customUniformLocations: null, + infLoc: null, + nanLoc: null, + outShapeLocation: null, + outShapeStridesLocation: null, + outTexShapeLocation: null + }; + } + } + function getUniformLocations(gpgpu, program, webGLProgram) { + const variablesLocations = []; + const customUniformLocations = []; + let outShapeLocation; + let outTexShapeLocation; + let outShapeStridesLocation; + let infLoc = null; + let nanLoc = null; + // Add special uniforms (NAN, INFINITY) + nanLoc = gpgpu.getUniformLocation(webGLProgram, 'NAN', false); + if (env().getNumber('WEBGL_VERSION') === 1) { + infLoc = gpgpu.getUniformLocation(webGLProgram, 'INFINITY', false); + } + // Add user-defined uniforms + const shouldThrow = false; + for (const varName of program.variableNames) { + const varLocs = { + name: varName, + uniform: gpgpu.getUniformLocation(webGLProgram, varName, shouldThrow), + offset: gpgpu.getUniformLocation(webGLProgram, `offset${varName}`, shouldThrow), + }; + if (program.enableShapeUniforms) { + varLocs.shape = gpgpu.getUniformLocation(webGLProgram, `${varName}Shape`, shouldThrow); + varLocs.texShape = gpgpu.getUniformLocation(webGLProgram, `${varName}TexShape`, shouldThrow); + } + variablesLocations.push(varLocs); + } + if (program.enableShapeUniforms) { + outShapeLocation = + gpgpu.getUniformLocation(webGLProgram, 'outShape', shouldThrow); + outShapeStridesLocation = + gpgpu.getUniformLocation(webGLProgram, 'outShapeStrides', shouldThrow); + outTexShapeLocation = + gpgpu.getUniformLocation(webGLProgram, 'outTexShape', shouldThrow); + } + if (program.customUniforms) { + for (const d of program.customUniforms) { + customUniformLocations.push(gpgpu.getUniformLocation(webGLProgram, d.name, shouldThrow)); + } + } + return { + variablesLocations, + customUniformLocations, + infLoc, + nanLoc, + outShapeLocation, + outShapeStridesLocation, + outTexShapeLocation + }; + } + function validateBinaryAndProgram(shapeInfos, inputs) { + if (shapeInfos.length !== inputs.length) { + throw Error(`Binary was compiled with ${shapeInfos.length} inputs, but ` + + `was executed with ${inputs.length} inputs`); + } + shapeInfos.forEach((s, i) => { + const shapeA = s.logicalShape; + const input = inputs[i]; + const shapeB = input.shape; + if (!arraysEqual(shapeA, shapeB)) { + throw Error(`Binary was compiled with different shapes than ` + + `the current args. Shapes ${shapeA} and ${shapeB} must match`); + } + // The input is uploaded as uniform. + if (s.isUniform && input.isUniform) { + return; + } + const texShapeA = s.texShape; + const texShapeB = input.isUniform ? null : input.texData.texShape; + if (!arraysEqual(texShapeA, texShapeB)) { + throw Error(`Binary was compiled with different texture shapes than the` + + ` current args. Shape ${texShapeA} and ${texShapeB} must match`); + } + }); + } + function runProgram(gpgpu, binary, inputs, output, customUniformValues) { + if (!binary.program.enableShapeUniforms) { + validateBinaryAndProgram(binary.inShapeInfos, inputs); + validateBinaryAndProgram([binary.outShapeInfo], [output]); + } + const outTex = output.texData.texture; + const outTexShape = output.texData.texShape; + if (output.texData.isPacked) { + gpgpu.setOutputPackedMatrixTexture(outTex.texture, outTexShape[0], outTexShape[1]); + } + else { + gpgpu.setOutputMatrixTexture(outTex.texture, outTexShape[0], outTexShape[1]); + } + gpgpu.setProgram(binary.webGLProgram); + gpgpu.bindVertexArray(binary.webGLProgram.vao); + // Set special uniforms (NAN, INFINITY) + if (env().getNumber('WEBGL_VERSION') === 1) { + if (binary.infLoc !== null) { + gpgpu.gl.uniform1f(binary.infLoc, Infinity); + } + } + if (binary.nanLoc !== null) { + gpgpu.gl.uniform1f(binary.nanLoc, NaN); + } + // Set user-defined inputs + for (let i = 0; i < inputs.length; ++i) { + const input = inputs[i]; + const { uniform: varLoc, offset: varOffsetLoc, shape: varShapeLoc, texShape: varTexShapeLoc, } = binary.variablesLocations[i]; + if (varShapeLoc) { + const { uniformShape } = getUniformInfoFromShape(binary.program.packedInputs, input.shape, input.texData.texShape); + switch (uniformShape.length) { + case 1: + gpgpu.gl.uniform1iv(varShapeLoc, new Int32Array(uniformShape)); + break; + case 2: + gpgpu.gl.uniform2iv(varShapeLoc, new Int32Array(uniformShape)); + break; + case 3: + gpgpu.gl.uniform3iv(varShapeLoc, new Int32Array(uniformShape)); + break; + case 4: + gpgpu.gl.uniform4iv(varShapeLoc, new Int32Array(uniformShape)); + break; + default: + break; + } + } + if (varTexShapeLoc) { + gpgpu.gl.uniform2i(varTexShapeLoc, input.texData.texShape[0], input.texData.texShape[1]); + } + if (varLoc == null) { + // The compiler inferred that this variable is not used in this shader. + continue; + } + if (input.isUniform) { + // Upload the values of the tensor as uniform. + if (sizeFromShape(input.shape) < 2) { + gpgpu.gl.uniform1f(varLoc, input.uniformValues[0]); + } + else { + let vals = input.uniformValues; + if (!(vals instanceof Float32Array)) { + vals = new Float32Array(vals); + } + gpgpu.gl.uniform1fv(varLoc, vals); + } + continue; + } + // If the input was sliced, upload the flat offset index. + if (input.texData.slice != null && varOffsetLoc != null) { + gpgpu.gl.uniform1i(varOffsetLoc, input.texData.slice.flatOffset); + } + gpgpu.setInputMatrixTexture(input.texData.texture.texture, varLoc, i); + } + const outShapeLoc = binary.outShapeLocation; + if (outShapeLoc) { + switch (output.shape.length) { + case 1: + gpgpu.gl.uniform1iv(outShapeLoc, new Int32Array(output.shape)); + break; + case 2: + gpgpu.gl.uniform2iv(outShapeLoc, new Int32Array(output.shape)); + break; + case 3: + gpgpu.gl.uniform3iv(outShapeLoc, new Int32Array(output.shape)); + break; + case 4: + gpgpu.gl.uniform4iv(outShapeLoc, new Int32Array(output.shape)); + break; + default: + break; + } + } + if (binary.outShapeStridesLocation) { + const strides = computeStrides(output.shape); + switch (output.shape.length) { + case 2: + gpgpu.gl.uniform1iv(binary.outShapeStridesLocation, new Int32Array(strides)); + break; + case 3: + gpgpu.gl.uniform2iv(binary.outShapeStridesLocation, new Int32Array(strides)); + break; + case 4: + gpgpu.gl.uniform3iv(binary.outShapeStridesLocation, new Int32Array(strides)); + break; + default: + break; + } + } + if (binary.outTexShapeLocation) { + gpgpu.gl.uniform2i(binary.outTexShapeLocation, output.texData.texShape[0], output.texData.texShape[1]); + } + if (binary.program.customUniforms && customUniformValues) { + for (let i = 0; i < binary.program.customUniforms.length; ++i) { + const d = binary.program.customUniforms[i]; + const customLoc = binary.customUniformLocations[i]; + const customValue = customUniformValues[i]; + if (d.type === 'float') { + gpgpu.gl.uniform1fv(customLoc, customValue); + } + else if (d.type === 'vec2') { + gpgpu.gl.uniform2fv(customLoc, customValue); + } + else if (d.type === 'vec3') { + gpgpu.gl.uniform3fv(customLoc, customValue); + } + else if (d.type === 'vec4') { + gpgpu.gl.uniform4fv(customLoc, customValue); + } + else if (d.type === 'int') { + gpgpu.gl.uniform1iv(customLoc, customValue); + } + else if (d.type === 'ivec2') { + gpgpu.gl.uniform2iv(customLoc, customValue); + } + else if (d.type === 'ivec3') { + gpgpu.gl.uniform3iv(customLoc, customValue); + } + else if (d.type === 'ivec4') { + gpgpu.gl.uniform4iv(customLoc, customValue); + } + else { + throw Error(`uniform type ${d.type} is not supported yet.`); + } + } + } + gpgpu.executeProgram(); + } + function makeShaderKey(program, inputs, output) { + let keyInputs = ''; + inputs.concat(output).forEach(x => { + const hasOffset = x.texData != null && x.texData.slice != null && + x.texData.slice.flatOffset > 0; + // TODO: Remove the condition of !x.isUniform. + if (program.enableShapeUniforms && !x.isUniform) { + const xTexShape = x.texData.texShape; + const { useSqueezeShape, uniformShape, keptDims } = getUniformInfoFromShape(program.packedInputs, x.shape, xTexShape); + let rank1 = '', rank2 = '', rank34 = ''; + if (uniformShape.length === 1 && program.packedInputs) { + const packedTexShape = [Math.ceil(xTexShape[0] / 2), Math.ceil(xTexShape[1] / 2)]; + rank1 = `${packedTexShape[0] > 1}_${packedTexShape[1] > 1}`; + } + else if (uniformShape.length === 2 && !program.packedInputs) { + rank2 = `${uniformShape[0] > 1}_${uniformShape[1] > 1}`; + } + else if (uniformShape.length > 2 && !program.packedInputs) { + const strides = computeStrides(uniformShape); + rank34 = `${strides[0] === xTexShape[1]}_${strides[strides.length - 1] === xTexShape[1]}`; + } + const xRank = x.shape.length; + const isLogicalShapTexShapeEqual = uniformShape.length === 2 && arraysEqual(x.shape, xTexShape); + const isScalar = sizeFromShape(x.shape) === 1; + const broadcastDims = getBroadcastDims$1(x.shape, output.shape); + const isInOutTexShapeEqual = !program.packedInputs && + xRank === output.shape.length && + arraysEqual(xTexShape, output.texData.texShape); + const isTexShapeGreaterThanOne = program.packedInputs || uniformShape.length > 2 ? + '' : + `${xTexShape[0] > 1}_${xTexShape[1] > 1}`; + // These key components are needed due to shader_compiler is embedding + // them in the shader. + // |xRank| is used to determine the coords length. See + // get[Packed]SamplerAtOutputCoords. + // |isInOutTexShapeEqual| is used to determine whether going to an + // optimization path in getSamplerAtOutputCoords. + // |useSqueezeShape| is extracted from squeezeInputInfo of + // getSampler[2|3|4]D/getPackedSampler3D. + // |isScalar| is extracted from isInputScalar/isOutputScalar in + // getPackedSamplerAtOutputCoords. + // |broadcastDims| is extracted from get[Packed]SamplerAtOutputCoords. + // |isLogicalShapTexShapeEqual| is used in + // getOutput[Packed]2DCoords/get[Packed]Sampler2D. + // |rank1| is used in getOutputPacked1DCoords. + // |rank2| is used in getOutput2DCoords. + // |rank34| is used in getSampler3D/getSampler4D. + // |isTexShapeGreaterThanOne| are used in + // getSampler[Scalar|1D|2D]/getOutput1DCoords. + keyInputs += `${xRank}_${isInOutTexShapeEqual}_${useSqueezeShape ? keptDims : ''}_${uniformShape.length}_${isScalar}_${broadcastDims}_${isLogicalShapTexShapeEqual}_${rank1}_${rank2}_${rank34}_${isTexShapeGreaterThanOne}_${hasOffset}`; + } + else { + const texShape = x.isUniform ? 'uniform' : x.texData.texShape; + keyInputs += `${x.shape}_${texShape}_${hasOffset}`; + } + }); + const keyUserCode = program.userCode; + let key = program.constructor.name; + // Fast string concat. See https://jsperf.com/string-concatenation/14. + key += '_' + keyInputs + '_' + keyUserCode + + `${env().getNumber('WEBGL_VERSION')}`; + return key; + } + function useShapeUniforms(rank) { + // TODO: Remove the limitaion of rank <= 4. + return env().getBool('WEBGL_USE_SHAPES_UNIFORMS') && rank <= 4; + } + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class DecodeMatrixProgram { + constructor(outputShape) { + this.variableNames = ['A']; + this.packedInputs = false; + this.packedOutput = true; + this.outPackingScheme = PackingScheme.DENSE; + this.customUniforms = [{ name: 'texShape', type: 'ivec2' }]; + const glsl = getGlslDifferences(); + this.outputShape = outputShape; + this.enableShapeUniforms = useShapeUniforms(this.outputShape.length); + this.userCode = ` + ivec3 outCoordsFromFlatIndex(int index) { + ${this.enableShapeUniforms ? + getOutputLogicalCoordinatesFromFlatIndexByUniform(['r', 'c', 'd'], outputShape) : + getLogicalCoordinatesFromFlatIndex(['r', 'c', 'd'], outputShape)} + return ivec3(r, c, d); + } + + void main() { + ivec2 resTexRC = ivec2(resultUV.yx * vec2(texShape[0], texShape[1])); + int index = 4 * (resTexRC.x * texShape[1] + resTexRC.y); + + vec4 result = vec4(0.); + + for (int i=0; i<4; i++) { + int flatIndex = index + i; + ivec3 rc = outCoordsFromFlatIndex(flatIndex); + result[i] = getA(rc.x, rc.y, rc.z); + } + + ${glsl.output} = result; + } + `; + } + } + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class DecodeMatrixPackedProgram { + constructor(outputShape) { + this.variableNames = ['A']; + this.packedInputs = true; + this.packedOutput = true; + this.outPackingScheme = PackingScheme.DENSE; + this.customUniforms = [{ name: 'texShape', type: 'ivec2' }]; + const glsl = getGlslDifferences(); + this.outputShape = outputShape; + this.enableShapeUniforms = useShapeUniforms(this.outputShape.length); + this.userCode = ` + ivec3 outCoordsFromFlatIndex(int index) { + ${this.enableShapeUniforms ? + getOutputLogicalCoordinatesFromFlatIndexByUniform(['r', 'c', 'd'], outputShape) : + getLogicalCoordinatesFromFlatIndex(['r', 'c', 'd'], outputShape)} + return ivec3(r, c, d); + } + + void main() { + ivec2 resTexRC = ivec2(resultUV.yx * vec2(texShape[0], texShape[1])); + int index = 4 * (resTexRC.x * texShape[1] + resTexRC.y); + + vec4 result = vec4(0.); + + for (int i=0; i<4; i++) { + int flatIndex = index + i; + ivec3 rc = outCoordsFromFlatIndex(flatIndex); + result[i] = getChannel(getA(rc.x, rc.y, rc.z), vec2(rc.y, rc.z)); + } + + ${glsl.output} = result; + } + `; + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class EncodeFloatProgram { + constructor(outputShape) { + this.variableNames = ['A']; + this.outTexUsage = TextureUsage.DOWNLOAD; + const glsl = getGlslDifferences(); + this.outputShape = outputShape; + this.userCode = ` + ${ENCODE_FLOAT_SNIPPET} + + void main() { + float x = getAAtOutCoords(); + ${glsl.output} = encode_float(x); + } + `; + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class EncodeFloatPackedProgram { + constructor(outputShape) { + this.variableNames = ['A']; + this.packedInputs = true; + this.packedOutput = false; + this.outTexUsage = TextureUsage.DOWNLOAD; + const glsl = getGlslDifferences(); + this.outputShape = outputShape; + this.userCode = ` + ${ENCODE_FLOAT_SNIPPET} + + void main() { + ivec3 coords = getOutputCoords(); + float x = getChannel(getAAtOutCoords(), vec2(coords.y, coords.z)); + ${glsl.output} = encode_float(x); + } + `; + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const CHANNEL_CHAR_TO_INDEX_MAP = { + 'R': 0, + 'G': 1, + 'B': 2, + 'A': 3 + }; + class EncodeMatrixProgram { + constructor(outputShape, inputIsUnsignedByte = false, usedChannels = 'RGBA') { + this.variableNames = ['A']; + this.customUniforms = [{ name: 'texShape', type: 'ivec2' }]; + const glsl = getGlslDifferences(); + this.outputShape = outputShape; + this.enableShapeUniforms = useShapeUniforms(this.outputShape.length); + let output = `result`; + if (inputIsUnsignedByte) { + output = `floor(result * 255. + 0.5)`; + } + let mainLoop = ''; + for (let usedChannelIndex = 0; usedChannelIndex < usedChannels.length; usedChannelIndex++) { + const curChannel = usedChannels[usedChannelIndex]; + mainLoop += ` + if(offset == ${usedChannelIndex}) { + result = values[${CHANNEL_CHAR_TO_INDEX_MAP[curChannel]}]; + }`; + } + this.userCode = ` + ${this.enableShapeUniforms ? getFlatIndexFrom3DOutput() : + getFlatIndexFrom3D(outputShape)} + + void main() { + ivec3 coords = getOutputCoords(); + int flatIndex = getFlatIndex(coords); + float result = 0.; + int offset = imod(flatIndex, ${usedChannels.length}); + + flatIndex = idiv(flatIndex, ${usedChannels.length}, 1.); + + int r = flatIndex / texShape[1]; + if (r < texShape[0]) { + int c = imod(flatIndex, texShape[1]); + vec2 uv = (vec2(c, r) + halfCR) / vec2(texShape[1], texShape[0]); + vec4 values = ${glsl.texture2D}(A, uv); + ${mainLoop} + } + ${glsl.output} = vec4(${output}, 0., 0., 0.); + } + `; + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /* + This is how the shader encodes a tensor with shape = [2, 3, 5] + (indices are [batch, row, col]). + + 000|001 002|003 004|xxx 020|021 022|023 024|xxx + ------- ------- ------- ------- ------- ------- + 010|011 012|013 014|xxx xxx|xxx xxx|xxx xxx|xxx + + 100|101 102|103 104|xxx 120|121 122|123 124|xxx + ------- ------- ------- ------- ------- ------- + 110|111 112|113 114|xxx xxx|xxx xxx|xxx xxx|xxx + + Single texels contain only values from the same batch, and from adjacent rows + and columns. + */ + class EncodeMatrixPackedProgram { + constructor(outputShape, inputIsUnsignedByte = false) { + this.variableNames = ['A']; + this.packedInputs = false; + this.packedOutput = true; + this.customUniforms = [{ name: 'texShape', type: 'ivec2' }]; + const glsl = getGlslDifferences(); + this.outputShape = outputShape; + this.enableShapeUniforms = useShapeUniforms(this.outputShape.length); + let mainLoop = ''; + let output = 'result'; + if (inputIsUnsignedByte) { + output = 'floor(result * 255. + 0.5)'; + } + for (let row = 0; row <= 1; row++) { + for (let col = 0; col <= 1; col++) { + const channel = row * 2 + col; + mainLoop += ` + localCoords = coords; + if(localCoords[2] + ${col} < ${this.enableShapeUniforms ? 'outShape[2]' : `${outputShape[2]}`}) { + localCoords[2] += ${col}; + if (localCoords[1] + ${row} < ${this.enableShapeUniforms ? 'outShape[1]' : `${outputShape[1]}`}) { + localCoords[1] += ${row}; + + flatIndex = getFlatIndex(localCoords); + offset = imod(flatIndex, 4); + + flatIndex = idiv(flatIndex, 4, 1.); + + int r = flatIndex / texShape[1]; + int c = imod(flatIndex, texShape[1]); + vec2 uv = (vec2(c, r) + halfCR) / vec2(texShape[1], texShape[0]); + values = ${glsl.texture2D}(A, uv); + + if (offset == 0) { + result[${channel}] = values[0]; + } else if (offset == 1) { + result[${channel}] = values[1]; + } else if (offset == 2) { + result[${channel}] = values[2]; + } else { + result[${channel}] = values[3]; + } + } + } + `; + } + } + this.userCode = ` + ${this.enableShapeUniforms ? getFlatIndexFrom3DOutput() : + getFlatIndexFrom3D(outputShape)} + + void main() { + ivec3 coords = getOutputCoords(); + + vec4 result = vec4(0.); + int flatIndex, r, c, offset; + ivec3 localCoords; + vec2 uv; + vec4 values; + + ${mainLoop} + + ${glsl.output} = ${output}; + } + `; + } + } + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function createVertexShader(gl) { + const glsl = getGlslDifferences(); + const vertexShaderSource = `${glsl.version} + precision highp float; + ${glsl.attribute} vec3 clipSpacePos; + ${glsl.attribute} vec2 uv; + ${glsl.varyingVs} vec2 resultUV; + + void main() { + gl_Position = vec4(clipSpacePos, 1); + resultUV = uv; + }`; + return createVertexShader$1(gl, vertexShaderSource); + } + function createVertexBuffer(gl) { + // [x y z u v] * [upper-left, lower-left, upper-right, lower-right] + const vertexArray = new Float32Array([-1, 1, 0, 0, 1, -1, -1, 0, 0, 0, 1, 1, 0, 1, 1, 1, -1, 0, 1, 0]); + return createStaticVertexBuffer(gl, vertexArray); + } + function createIndexBuffer(gl) { + // OpenGL (and WebGL) have "CCW == front" winding + const triangleVertexIndices = new Uint16Array([0, 1, 2, 2, 1, 3]); + return createStaticIndexBuffer(gl, triangleVertexIndices); + } + function createAndConfigureTexture(gl, width, height, internalFormat, textureFormat, textureType) { + validateTextureSize(width, height); + const texture = createTexture(gl); + const tex2d = gl.TEXTURE_2D; + callAndCheck(gl, () => gl.bindTexture(tex2d, texture)); + callAndCheck(gl, () => gl.texParameteri(tex2d, gl.TEXTURE_WRAP_S, gl.CLAMP_TO_EDGE)); + callAndCheck(gl, () => gl.texParameteri(tex2d, gl.TEXTURE_WRAP_T, gl.CLAMP_TO_EDGE)); + callAndCheck(gl, () => gl.texParameteri(tex2d, gl.TEXTURE_MIN_FILTER, gl.NEAREST)); + callAndCheck(gl, () => gl.texParameteri(tex2d, gl.TEXTURE_MAG_FILTER, gl.NEAREST)); + if (env().getNumber('WEBGL_VERSION') === 1) { + callAndCheck(gl, () => gl.texImage2D(tex2d, 0, internalFormat, width, height, 0, textureFormat, textureType, null)); + } + else { + callAndCheck(gl, () => gl + .texStorage2D(tex2d, 1, internalFormat, width, height)); + } + callAndCheck(gl, () => gl.bindTexture(gl.TEXTURE_2D, null)); + return { texture, texShape: [height, width] }; + } + function getInternalFormatForFloat32MatrixTexture(textureConfig) { + return textureConfig.internalFormatFloat; + } + function createFloat32MatrixTexture(gl, rows, columns, textureConfig) { + const [width, height] = getUnpackedMatrixTextureShapeWidthHeight(rows, columns); + return createAndConfigureTexture(gl, width, height, getInternalFormatForFloat32MatrixTexture(textureConfig), textureConfig.textureFormatFloat, gl.FLOAT); + } + function getInternalFormatForFloat16MatrixTexture(textureConfig) { + return textureConfig.internalFormatHalfFloat; + } + function createFloat16MatrixTexture(gl, rows, columns, textureConfig) { + const [width, height] = getUnpackedMatrixTextureShapeWidthHeight(rows, columns); + return createAndConfigureTexture(gl, width, height, getInternalFormatForFloat16MatrixTexture(textureConfig), textureConfig.textureFormatFloat, textureConfig.textureTypeHalfFloat); + } + function getInternalFormatForUnsignedBytesMatrixTexture(textureConfig) { + return textureConfig.downloadTextureFormat; + } + function createUnsignedBytesMatrixTexture(gl, rows, columns, textureConfig) { + const [width, height] = getUnpackedMatrixTextureShapeWidthHeight(rows, columns); + return createAndConfigureTexture(gl, width, height, getInternalFormatForUnsignedBytesMatrixTexture(textureConfig), gl.RGBA, gl.UNSIGNED_BYTE); + } + function getInternalFormatForPackedMatrixTexture(textureConfig) { + return textureConfig.internalFormatPackedFloat; + } + function createPackedMatrixTexture(gl, rows, columns, textureConfig) { + const [width, height] = getPackedMatrixTextureShapeWidthHeight(rows, columns); + return createAndConfigureTexture(gl, width, height, getInternalFormatForPackedMatrixTexture(textureConfig), gl.RGBA, gl.FLOAT); + } + function getInternalFormatForFloat16PackedMatrixTexture(textureConfig) { + return textureConfig.internalFormatPackedHalfFloat; + } + function createFloat16PackedMatrixTexture(gl, rows, columns, textureConfig) { + const [width, height] = getPackedMatrixTextureShapeWidthHeight(rows, columns); + return createAndConfigureTexture(gl, width, height, getInternalFormatForFloat16PackedMatrixTexture(textureConfig), gl.RGBA, textureConfig.textureTypeHalfFloat); + } + function bindVertexProgramAttributeStreams(gl, program, vertexBuffer) { + const posOffset = 0; // x is the first buffer element + const uvOffset = 3 * 4; // uv comes after [x y z] + const stride = (3 * 4) + (2 * 4); // xyz + uv, each entry is 4-byte float. + callAndCheck(gl, () => gl.bindBuffer(gl.ARRAY_BUFFER, vertexBuffer)); + const success = bindVertexBufferToProgramAttribute(gl, program, 'clipSpacePos', vertexBuffer, 3, stride, posOffset); + return success && + bindVertexBufferToProgramAttribute(gl, program, 'uv', vertexBuffer, 2, stride, uvOffset); + } + function uploadDenseMatrixToTexture(gl, texture, width, height, data, textureConfig) { + callAndCheck(gl, () => gl.bindTexture(gl.TEXTURE_2D, texture)); + let dataForUpload, texelDataType, internalFormat; + if (data instanceof Uint8Array) { + dataForUpload = new Uint8Array(width * height * 4); + texelDataType = gl.UNSIGNED_BYTE; + internalFormat = gl.RGBA; + } + else { + dataForUpload = new Float32Array(width * height * 4); + texelDataType = gl.FLOAT; + internalFormat = textureConfig.internalFormatPackedFloat; + } + dataForUpload.set(data); + if (env().getNumber('WEBGL_VERSION') === 2) { + callAndCheck(gl, () => gl.texSubImage2D(gl.TEXTURE_2D, 0, 0, 0, width, height, gl.RGBA, texelDataType, dataForUpload)); + } + else { + callAndCheck(gl, () => gl.texImage2D(gl.TEXTURE_2D, 0, internalFormat, width, height, 0, gl.RGBA, texelDataType, dataForUpload)); + } + callAndCheck(gl, () => gl.bindTexture(gl.TEXTURE_2D, null)); + } + function uploadPixelDataToTexture(gl, texture, pixels) { + callAndCheck(gl, () => gl.bindTexture(gl.TEXTURE_2D, texture)); + if (pixels.data instanceof Uint8Array) { + if (env().getNumber('WEBGL_VERSION') === 2) { + callAndCheck(gl, () => gl.texSubImage2D(gl.TEXTURE_2D, 0, 0, 0, pixels.width, pixels.height, gl.RGBA, gl.UNSIGNED_BYTE, pixels.data)); + } + else { + callAndCheck(gl, () => gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, pixels.width, pixels.height, 0, gl.RGBA, gl.UNSIGNED_BYTE, pixels.data)); + } + } + else { + if (env().getNumber('WEBGL_VERSION') === 2) { + callAndCheck(gl, () => gl.texSubImage2D(gl.TEXTURE_2D, 0, 0, 0, gl.RGBA, gl.UNSIGNED_BYTE, pixels)); + } + else { + callAndCheck(gl, () => gl.texImage2D(gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, pixels)); + } + } + callAndCheck(gl, () => gl.bindTexture(gl.TEXTURE_2D, null)); + } + function createBufferFromOutputTexture(gl2, rows, columns, textureConfig) { + // Create and bind the buffer. + const buffer = gl2.createBuffer(); + callAndCheck(gl2, () => gl2.bindBuffer(gl2.PIXEL_PACK_BUFFER, buffer)); + // Initialize the buffer to the size of the texture in bytes. + const bytesPerFloat = 4; + const valuesPerTexel = 4; + const bufferSizeBytes = bytesPerFloat * valuesPerTexel * rows * columns; + callAndCheck(gl2, () => gl2.bufferData(gl2.PIXEL_PACK_BUFFER, bufferSizeBytes, gl2.STREAM_READ)); + // Enqueue a command on the GPU command queue to copy of texture into the + // buffer. + callAndCheck(gl2, () => gl2.readPixels(0, 0, columns, rows, gl2.RGBA, gl2.FLOAT, 0)); + callAndCheck(gl2, () => gl2.bindBuffer(gl2.PIXEL_PACK_BUFFER, null)); + return buffer; + } + function downloadFloat32MatrixFromBuffer(gl, buffer, size) { + const gl2 = gl; + const downloadTarget = new Float32Array(size); + gl2.bindBuffer(gl2.PIXEL_PACK_BUFFER, buffer); + gl2.getBufferSubData(gl2.PIXEL_PACK_BUFFER, 0, downloadTarget); + gl2.bindBuffer(gl2.PIXEL_PACK_BUFFER, null); + return downloadTarget; + } + function downloadByteEncodedFloatMatrixFromOutputTexture(gl, rows, columns, textureConfig) { + const [w, h] = getUnpackedMatrixTextureShapeWidthHeight(rows, columns); + const numChannels = 4; + const downloadTarget = new Uint8Array(getUnpackedArraySizeFromMatrixSize(rows * columns, numChannels)); + callAndCheck(gl, () => gl.readPixels(0, 0, w, h, textureConfig.downloadTextureFormat, gl.UNSIGNED_BYTE, downloadTarget)); + // By wrapping the buffer in a Float32Array, we use native browser IEEE 754 + // decoding of the 4 bytes that back each 32 bit float. + return new Float32Array(downloadTarget.buffer); + } + function downloadPackedMatrixFromBuffer(gl, buffer, batch, rows, cols, physicalRows, physicalCols, textureConfig) { + const gl2 = gl; + const downloadTarget = new Float32Array(getPackedRGBAArraySizeFromMatrixShape(physicalRows, physicalCols)); + gl2.bindBuffer(gl2.PIXEL_PACK_BUFFER, buffer); + gl2.getBufferSubData(gl2.PIXEL_PACK_BUFFER, 0, downloadTarget); + gl2.bindBuffer(gl2.PIXEL_PACK_BUFFER, null); + return downloadTarget; + } + function downloadMatrixFromPackedOutputTexture(gl, physicalRows, physicalCols) { + const packedRGBA = new Float32Array(physicalRows * physicalCols * 4); + callAndCheck(gl, () => gl.readPixels(0, 0, physicalCols, physicalRows, gl.RGBA, gl.FLOAT, packedRGBA)); + return packedRGBA; + } + + var gpgpu_util = /*#__PURE__*/Object.freeze({ + __proto__: null, + bindVertexProgramAttributeStreams: bindVertexProgramAttributeStreams, + createBufferFromOutputTexture: createBufferFromOutputTexture, + createFloat16MatrixTexture: createFloat16MatrixTexture, + createFloat16PackedMatrixTexture: createFloat16PackedMatrixTexture, + createFloat32MatrixTexture: createFloat32MatrixTexture, + createIndexBuffer: createIndexBuffer, + createPackedMatrixTexture: createPackedMatrixTexture, + createUnsignedBytesMatrixTexture: createUnsignedBytesMatrixTexture, + createVertexBuffer: createVertexBuffer, + createVertexShader: createVertexShader, + downloadByteEncodedFloatMatrixFromOutputTexture: downloadByteEncodedFloatMatrixFromOutputTexture, + downloadFloat32MatrixFromBuffer: downloadFloat32MatrixFromBuffer, + downloadMatrixFromPackedOutputTexture: downloadMatrixFromPackedOutputTexture, + downloadPackedMatrixFromBuffer: downloadPackedMatrixFromBuffer, + getInternalFormatForFloat16MatrixTexture: getInternalFormatForFloat16MatrixTexture, + getInternalFormatForFloat16PackedMatrixTexture: getInternalFormatForFloat16PackedMatrixTexture, + getInternalFormatForFloat32MatrixTexture: getInternalFormatForFloat32MatrixTexture, + getInternalFormatForPackedMatrixTexture: getInternalFormatForPackedMatrixTexture, + getInternalFormatForUnsignedBytesMatrixTexture: getInternalFormatForUnsignedBytesMatrixTexture, + uploadDenseMatrixToTexture: uploadDenseMatrixToTexture, + uploadPixelDataToTexture: uploadPixelDataToTexture + }); + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class GPGPUContext { + constructor(gl) { + this.outputTexture = null; + this.program = null; + this.disposed = false; + this.itemsToPoll = []; + const glVersion = env().getNumber('WEBGL_VERSION'); + if (gl != null) { + this.gl = gl; + setWebGLContext(glVersion, gl); + } + else { + this.gl = getWebGLContext(glVersion); + } + gl = this.gl; + if (env().getNumber('WEBGL_VERSION') === 2) { + const gl2 = gl; + this.createVertexArray = () => { + return callAndCheck(gl2, () => gl2.createVertexArray()); + }; + this.bindVertexArray = (vao) => { + return callAndCheck(gl2, () => gl2.bindVertexArray(vao)); + }; + this.deleteVertexArray = (vao) => { + return callAndCheck(gl2, () => gl2.deleteVertexArray(vao)); + }; + this.getVertexArray = () => { + return callAndCheck(gl2, () => gl2.getParameter(gl2.VERTEX_ARRAY_BINDING)); + }; + } + else if (gl != null) { + const ext = gl.getExtension('OES_vertex_array_object'); + if (ext == null) { + throw new Error('All WebGL1 implementations are expected to offer' + + ' OES_vertex_array_object.'); + } + this.createVertexArray = () => { + return callAndCheck(gl, () => ext.createVertexArrayOES()); + }; + this.bindVertexArray = (vao) => { + return callAndCheck(gl, () => ext.bindVertexArrayOES(vao)); + }; + this.deleteVertexArray = (vao) => { + return callAndCheck(gl, () => ext.deleteVertexArrayOES(vao)); + }; + this.getVertexArray = () => { + return callAndCheck(gl, () => gl.getParameter(ext.VERTEX_ARRAY_BINDING_OES)); + }; + } + // WebGL 2.0 enables texture floats without an extension. + let COLOR_BUFFER_FLOAT = 'WEBGL_color_buffer_float'; + const COLOR_BUFFER_HALF_FLOAT = 'EXT_color_buffer_half_float'; + this.parallelCompilationExtension = + this.gl.getExtension('KHR_parallel_shader_compile'); + if (env().getNumber('WEBGL_VERSION') === 1) { + const TEXTURE_FLOAT = 'OES_texture_float'; + const TEXTURE_HALF_FLOAT = 'OES_texture_half_float'; + this.textureFloatExtension = + getExtensionOrThrow(this.gl, TEXTURE_FLOAT); + if (hasExtension(this.gl, TEXTURE_HALF_FLOAT)) { + this.textureHalfFloatExtension = + getExtensionOrThrow(this.gl, TEXTURE_HALF_FLOAT); + } + else if (env().get('WEBGL_FORCE_F16_TEXTURES')) { + throw new Error('GL context does not support half float textures, yet the ' + + 'environment flag WEBGL_FORCE_F16_TEXTURES is set to true.'); + } + this.colorBufferFloatExtension = this.gl.getExtension(COLOR_BUFFER_FLOAT); + if (hasExtension(this.gl, COLOR_BUFFER_HALF_FLOAT)) { + this.colorBufferHalfFloatExtension = + getExtensionOrThrow(this.gl, COLOR_BUFFER_HALF_FLOAT); + } + else if (env().get('WEBGL_FORCE_F16_TEXTURES')) { + throw new Error('GL context does not support color renderable half floats, yet ' + + 'the environment flag WEBGL_FORCE_F16_TEXTURES is set to true.'); + } + } + else { + COLOR_BUFFER_FLOAT = 'EXT_color_buffer_float'; + if (hasExtension(this.gl, COLOR_BUFFER_FLOAT)) { + this.colorBufferFloatExtension = + this.gl.getExtension(COLOR_BUFFER_FLOAT); + } + else if (hasExtension(this.gl, COLOR_BUFFER_HALF_FLOAT)) { + this.colorBufferHalfFloatExtension = + this.gl.getExtension(COLOR_BUFFER_HALF_FLOAT); + } + else { + throw new Error('GL context does not support color renderable floats'); + } + } + this.vertexBuffer = createVertexBuffer(this.gl); + this.indexBuffer = createIndexBuffer(this.gl); + this.framebuffer = createFramebuffer(this.gl); + this.textureConfig = + getTextureConfig(this.gl, this.textureHalfFloatExtension); + } + get debug() { + return env().getBool('DEBUG'); + } + dispose() { + if (this.disposed) { + return; + } + if (this.program != null) { + console.warn('Disposing a GPGPUContext that still has a bound WebGLProgram.' + + ' This is probably a resource leak, delete the program with ' + + 'GPGPUContext.deleteProgram before disposing.'); + } + if (this.outputTexture != null) { + console.warn('Disposing a GPGPUContext that still has a bound output matrix ' + + 'texture. This is probably a resource leak, delete the output ' + + 'matrix texture with GPGPUContext.deleteMatrixTexture before ' + + 'disposing.'); + } + const gl = this.gl; + callAndCheck(gl, () => gl.finish()); + callAndCheck(gl, () => gl.bindFramebuffer(gl.FRAMEBUFFER, null)); + callAndCheck(gl, () => gl.deleteFramebuffer(this.framebuffer)); + callAndCheck(gl, () => gl.bindBuffer(gl.ARRAY_BUFFER, null)); + callAndCheck(gl, () => gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, null)); + callAndCheck(gl, () => gl.deleteBuffer(this.indexBuffer)); + this.disposed = true; + } + createFloat32MatrixTexture(rows, columns) { + this.throwIfDisposed(); + return createFloat32MatrixTexture(this.gl, rows, columns, this.textureConfig); + } + createFloat16MatrixTexture(rows, columns) { + this.throwIfDisposed(); + return createFloat16MatrixTexture(this.gl, rows, columns, this.textureConfig); + } + createUnsignedBytesMatrixTexture(rows, columns) { + this.throwIfDisposed(); + return createUnsignedBytesMatrixTexture(this.gl, rows, columns, this.textureConfig); + } + uploadPixelDataToTexture(texture, pixels) { + this.throwIfDisposed(); + uploadPixelDataToTexture(this.gl, texture, pixels); + } + uploadDenseMatrixToTexture(texture, width, height, data) { + this.throwIfDisposed(); + uploadDenseMatrixToTexture(this.gl, texture, width, height, data, this.textureConfig); + } + createFloat16PackedMatrixTexture(rows, columns) { + this.throwIfDisposed(); + return createFloat16PackedMatrixTexture(this.gl, rows, columns, this.textureConfig); + } + createPackedMatrixTexture(rows, columns) { + this.throwIfDisposed(); + return createPackedMatrixTexture(this.gl, rows, columns, this.textureConfig); + } + deleteMatrixTexture(texture) { + this.throwIfDisposed(); + if (this.outputTexture === texture) { + unbindColorTextureFromFramebuffer(this.gl, this.framebuffer); + this.outputTexture = null; + } + callAndCheck(this.gl, () => this.gl.deleteTexture(texture)); + } + downloadByteEncodedFloatMatrixFromOutputTexture(texture, rows, columns) { + return this.downloadMatrixDriver(texture, () => downloadByteEncodedFloatMatrixFromOutputTexture(this.gl, rows, columns, this.textureConfig)); + } + downloadPackedMatrixFromBuffer(buffer, batch, rows, columns, physicalRows, physicalCols) { + return downloadPackedMatrixFromBuffer(this.gl, buffer, batch, rows, columns, physicalRows, physicalCols, this.textureConfig); + } + downloadFloat32MatrixFromBuffer(buffer, size) { + return downloadFloat32MatrixFromBuffer(this.gl, buffer, size); + } + createBufferFromTexture(texture, rows, columns) { + this.bindTextureToFrameBuffer(texture); + const result = createBufferFromOutputTexture(this.gl, rows, columns, this.textureConfig); + this.unbindTextureToFrameBuffer(); + return result; + } + createAndWaitForFence() { + const fenceContext = this.createFence(this.gl); + return this.pollFence(fenceContext); + } + createFence(gl) { + let query; + let isFencePassed; + if (env().getBool('WEBGL_FENCE_API_ENABLED')) { + const gl2 = gl; + const sync = gl2.fenceSync(gl2.SYNC_GPU_COMMANDS_COMPLETE, 0); + gl.flush(); + isFencePassed = () => { + const status = gl2.clientWaitSync(sync, 0, 0); + return status === gl2.ALREADY_SIGNALED || + status === gl2.CONDITION_SATISFIED; + }; + query = sync; + } + else if (env().getNumber('WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION') > 0) { + query = this.beginQuery(); + this.endQuery(); + isFencePassed = () => this.isQueryAvailable(query, env().getNumber('WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION')); + } + else { + // If we have no way to fence, return true immediately. This will fire in + // WebGL 1.0 when there is no disjoint query timer. In this case, because + // the fence passes immediately, we'll immediately ask for a download of + // the texture, which will cause the UI thread to hang. + isFencePassed = () => true; + } + return { query, isFencePassed }; + } + downloadMatrixFromPackedTexture(texture, physicalRows, physicalCols) { + return this.downloadMatrixDriver(texture, () => downloadMatrixFromPackedOutputTexture(this.gl, physicalRows, physicalCols)); + } + createProgram(fragmentShader) { + this.throwIfDisposed(); + const gl = this.gl; + if (this.vertexShader == null) { + this.vertexShader = createVertexShader(gl); + } + const program = createProgram(gl); + callAndCheck(gl, () => gl.attachShader(program, this.vertexShader)); + callAndCheck(gl, () => gl.attachShader(program, fragmentShader)); + linkProgram(gl, program); + const program2 = Object.assign(program, { vao: this.createVertexArray() }); + if (this.debug) { + validateProgram(gl, program2); + } + return program2; + } + buildVao(program) { + this.setProgram(program); + this.bindVertexArray(program.vao); + const gl = this.gl; + // Bind index buffer, and vertex buffers based on program attrib + // locations. + callAndCheck(gl, () => gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, this.indexBuffer)); + bindVertexProgramAttributeStreams(gl, program, this.vertexBuffer); + } + deleteProgram(program) { + this.throwIfDisposed(); + if (program === this.program) { + this.program = null; + } + if (program != null) { + callAndCheck(this.gl, () => this.gl.deleteProgram(program)); + this.deleteVertexArray(program.vao); + } + } + setProgram(program) { + this.throwIfDisposed(); + this.program = program; + if (this.program != null) { + if (this.debug) { + validateProgram(this.gl, this.program); + } + } + callAndCheck(this.gl, () => this.gl.useProgram(program)); + } + getUniformLocation(program, uniformName, shouldThrow = true) { + this.throwIfDisposed(); + if (shouldThrow) { + return getProgramUniformLocationOrThrow(this.gl, program, uniformName); + } + else { + return getProgramUniformLocation(this.gl, program, uniformName); + } + } + getAttributeLocation(program, attribute) { + this.throwIfDisposed(); + return callAndCheck(this.gl, () => this.gl.getAttribLocation(program, attribute)); + } + getUniformLocationNoThrow(program, uniformName) { + this.throwIfDisposed(); + return this.gl.getUniformLocation(program, uniformName); + } + setInputMatrixTexture(inputMatrixTexture, uniformLocation, textureUnit) { + this.throwIfDisposed(); + this.throwIfNoProgram(); + bindTextureToProgramUniformSampler(this.gl, inputMatrixTexture, uniformLocation, textureUnit); + } + setOutputMatrixTexture(outputMatrixTexture, rows, columns) { + this.setOutputMatrixTextureDriver(outputMatrixTexture, columns, rows); + } + setOutputPackedMatrixTexture(outputPackedMatrixTexture, rows, columns) { + this.throwIfDisposed(); + const [width, height] = getPackedMatrixTextureShapeWidthHeight(rows, columns); + this.setOutputMatrixTextureDriver(outputPackedMatrixTexture, width, height); + } + setOutputMatrixWriteRegion(startRow, numRows, startColumn, numColumns) { + this.setOutputMatrixWriteRegionDriver(startColumn, startRow, numColumns, numRows); + } + setOutputPackedMatrixWriteRegion(startRow, numRows, startColumn, numColumns) { + throw new Error('setOutputPackedMatrixWriteRegion not implemented.'); + } + debugValidate() { + if (this.program != null) { + validateProgram(this.gl, this.program); + } + validateFramebuffer(this.gl); + } + executeProgram() { + this.throwIfDisposed(); + this.throwIfNoProgram(); + const gl = this.gl; + if (this.debug) { + const boundVao = this.getVertexArray(); + console.assert(boundVao === this.program.vao, 'VAO changed between setProgram and executeProgram!'); + this.debugValidate(); + } + callAndCheck(gl, () => gl.drawElements(gl.TRIANGLES, 6, gl.UNSIGNED_SHORT, 0)); + } + blockUntilAllProgramsCompleted() { + this.throwIfDisposed(); + callAndCheck(this.gl, () => this.gl.finish()); + } + getQueryTimerExtension() { + if (this.disjointQueryTimerExtension == null) { + this.disjointQueryTimerExtension = + getExtensionOrThrow(this.gl, env().getNumber('WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION') === 2 ? + 'EXT_disjoint_timer_query_webgl2' : + 'EXT_disjoint_timer_query'); + } + return this.disjointQueryTimerExtension; + } + getQueryTimerExtensionWebGL2() { + return this.getQueryTimerExtension(); + } + getQueryTimerExtensionWebGL1() { + return this.getQueryTimerExtension(); + } + beginQuery() { + if (env().getNumber('WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION') === 2) { + const gl2 = this.gl; + const ext = this.getQueryTimerExtensionWebGL2(); + const query = gl2.createQuery(); + gl2.beginQuery(ext.TIME_ELAPSED_EXT, query); + return query; + } + const ext = this.getQueryTimerExtensionWebGL1(); + const query = ext.createQueryEXT(); + ext.beginQueryEXT(ext.TIME_ELAPSED_EXT, query); + return query; + } + endQuery() { + if (env().getNumber('WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION') === 2) { + const gl2 = this.gl; + const ext = this.getQueryTimerExtensionWebGL2(); + gl2.endQuery(ext.TIME_ELAPSED_EXT); + return; + } + const ext = this.getQueryTimerExtensionWebGL1(); + ext.endQueryEXT(ext.TIME_ELAPSED_EXT); + } + async waitForQueryAndGetTime(query) { + await repeatedTry(() => this.disposed || // while testing contexts are created / disposed + // in rapid succession, so without this check we + // may poll for the query timer indefinitely + this.isQueryAvailable(query, env().getNumber('WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION'))); + return this.getQueryTime(query, env().getNumber('WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_VERSION')); + } + getQueryTime(query, queryTimerVersion) { + if (queryTimerVersion === 0) { + return null; + } + if (queryTimerVersion === 2) { + const gl2 = this.gl; + const timeElapsedNanos = gl2.getQueryParameter(query, gl2.QUERY_RESULT); + // Return milliseconds. + return timeElapsedNanos / 1000000; + } + else { + const ext = this.getQueryTimerExtensionWebGL1(); + const timeElapsedNanos = ext.getQueryObjectEXT(query, ext.QUERY_RESULT_EXT); + // Return milliseconds. + return timeElapsedNanos / 1000000; + } + } + isQueryAvailable(query, queryTimerVersion) { + if (queryTimerVersion === 0) { + return true; + } + if (queryTimerVersion === 2) { + const gl2 = this.gl; + const ext = this.getQueryTimerExtensionWebGL2(); + const available = gl2.getQueryParameter(query, gl2.QUERY_RESULT_AVAILABLE); + if (this.disjoint == null) { + this.disjoint = this.gl.getParameter(ext.GPU_DISJOINT_EXT); + } + return available && !this.disjoint; + } + else { + const ext = this.getQueryTimerExtensionWebGL1(); + const available = ext.getQueryObjectEXT(query, ext.QUERY_RESULT_AVAILABLE_EXT); + if (this.disjoint == null) { + this.disjoint = this.gl.getParameter(ext.GPU_DISJOINT_EXT); + } + return available && !this.disjoint; + } + } + pollFence(fenceContext) { + return new Promise(resolve => { + this.addItemToPoll(() => fenceContext.isFencePassed(), () => resolve()); + }); + } + pollItems() { + // Find the last query that has finished. + const index = linearSearchLastTrue(this.itemsToPoll.map(x => x.isDoneFn)); + for (let i = 0; i <= index; ++i) { + const { resolveFn } = this.itemsToPoll[i]; + resolveFn(); + } + this.itemsToPoll = this.itemsToPoll.slice(index + 1); + } + addItemToPoll(isDoneFn, resolveFn) { + this.itemsToPoll.push({ isDoneFn, resolveFn }); + if (this.itemsToPoll.length > 1) { + // We already have a running loop that polls. + return; + } + // Start a new loop that polls. + let scheduleFn = undefined; + if ('setTimeoutCustom' in env().platform) { + scheduleFn = env().platform.setTimeoutCustom.bind(env().platform); + } + repeatedTry(() => { + this.pollItems(); + // End the loop if no more items to poll. + return this.itemsToPoll.length === 0; + }, () => 0, null, scheduleFn); + } + bindTextureToFrameBuffer(texture) { + this.throwIfDisposed(); + bindColorTextureToFramebuffer(this.gl, texture, this.framebuffer); + if (this.debug) { + validateFramebuffer(this.gl); + } + } + unbindTextureToFrameBuffer() { + if (this.outputTexture != null) { + bindColorTextureToFramebuffer(this.gl, this.outputTexture, this.framebuffer); + if (this.debug) { + validateFramebuffer(this.gl); + } + } + else { + unbindColorTextureFromFramebuffer(this.gl, this.framebuffer); + } + } + downloadMatrixDriver(texture, downloadAndDecode) { + this.bindTextureToFrameBuffer(texture); + const result = downloadAndDecode(); + this.unbindTextureToFrameBuffer(); + return result; + } + setOutputMatrixTextureDriver(outputMatrixTextureMaybePacked, width, height) { + this.throwIfDisposed(); + const gl = this.gl; + bindColorTextureToFramebuffer(gl, outputMatrixTextureMaybePacked, this.framebuffer); + if (this.debug) { + validateFramebuffer(gl); + } + this.outputTexture = outputMatrixTextureMaybePacked; + callAndCheck(gl, () => gl.viewport(0, 0, width, height)); + callAndCheck(gl, () => gl.scissor(0, 0, width, height)); + } + setOutputMatrixWriteRegionDriver(x, y, width, height) { + this.throwIfDisposed(); + callAndCheck(this.gl, () => this.gl.scissor(x, y, width, height)); + } + throwIfDisposed() { + if (this.disposed) { + throw new Error('Attempted to use disposed GPGPUContext.'); + } + } + throwIfNoProgram() { + if (this.program == null) { + throw new Error('No GPU program is currently set.'); + } + } + } + /** + * Finds the index of the last true element using linear search. + * Note: We can't do binary search because Chrome expects us to explicitly + * test all fences before download: + * https://github.com/tensorflow/tfjs/issues/1145 + */ + function linearSearchLastTrue(arr) { + let i = 0; + for (; i < arr.length; ++i) { + const isDone = arr[i](); + if (!isDone) { + break; + } + } + return i - 1; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const { addImpl: addImplCPU, bincountImpl: bincountImplCPU, bincountReduceImpl: bincountReduceImplCPU, bitwiseAndImpl: bitwiseAndImplCPU, castImpl: castImplCPU, ceilImpl: ceilImplCPU, concatImpl: concatImplCPU, equalImpl: equalImplCPU, expImpl: expImplCPU, expm1Impl: expm1ImplCPU, floorImpl: floorImplCPU, gatherNdImpl: gatherNdImplCPU, gatherV2Impl: gatherV2ImplCPU, greaterImpl: greaterImplCPU, greaterEqualImpl: greaterEqualImplCPU, lessImpl: lessImplCPU, lessEqualImpl: lessEqualImplCPU, linSpaceImpl: linSpaceImplCPU, logImpl: logImplCPU, maxImpl: maxImplCPU, maximumImpl: maximumImplCPU, minimumImpl: minimumImplCPU, multiplyImpl: multiplyImplCPU, negImpl: negImplCPU, notEqualImpl: notEqualImplCPU, prodImpl: prodImplCPU, raggedGatherImpl: raggedGatherImplCPU, raggedRangeImpl: raggedRangeImplCPU, raggedTensorToTensorImpl: raggedTensorToTensorImplCPU, rangeImpl: rangeImplCPU, rsqrtImpl: rsqrtImplCPU, scatterImpl: scatterImplCPU, sigmoidImpl: sigmoidImplCPU, simpleAbsImpl: simpleAbsImplCPU, sliceImpl: sliceImplCPU, sparseFillEmptyRowsImpl: sparseFillEmptyRowsImplCPU, sparseReshapeImpl: sparseReshapeImplCPU, sparseSegmentReductionImpl: sparseSegmentReductionImplCPU, sqrtImpl: sqrtImplCPU, staticRegexReplaceImpl: staticRegexReplaceImplCPU, stridedSliceImpl: stridedSliceImplCPU, stringNGramsImpl: stringNGramsImplCPU, stringSplitImpl: stringSplitImplCPU, stringToHashBucketFastImpl: stringToHashBucketFastImplCPU, subImpl: subImplCPU, tileImpl: tileImplCPU, topKImpl: topKImplCPU, transposeImpl: transposeImplCPU, uniqueImpl: uniqueImplCPU, } = shared; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function getVecChannels(name, rank) { + return ['x', 'y', 'z', 'w', 'u', 'v'].slice(0, rank).map(d => `${name}.${d}`); + } + function getChannels(name, rank) { + if (rank === 1) { + return [name]; + } + return getVecChannels(name, rank); + } + function getSourceCoords$2(rank, dims) { + if (rank === 1) { + return 'rc'; + } + let coords = ''; + for (let i = 0; i < rank; i++) { + coords += dims[i]; + if (i < rank - 1) { + coords += ','; + } + } + return coords; + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class PackProgram { + constructor(outputShape) { + this.variableNames = ['A']; + this.packedInputs = false; + this.packedOutput = true; + // Only input / output 3D tensors. + this.outputShape = outputShape; + this.rank = outputShape.length; + this.enableShapeUniforms = useShapeUniforms(this.outputShape.length); + if (this.rank === 0) { + this.userCode = ` + void main() { + setOutput(vec4(getA(), 0., 0., 0.)); + } + `; + } + else { + const channels = getChannels('rc', this.rank); + const dtype = getCoordsDataType(this.rank); + const outOfBoundsCondition = this.getOutOfBoundsCondition(channels); + const setup = this.getSetup(channels); + const output = this.getOutput(channels); + this.userCode = ` + void main() { + ${dtype} rc = getOutputCoords(); + + if(${outOfBoundsCondition}) { + setOutput(vec4(0)); + } else { + ${setup} + + setOutput(vec4(${output})); + } + } + `; + } + } + getSourceCoordsArr(dims) { + const coords = []; + for (let row = 0; row <= 1; row++) { + for (let col = 0; col <= 1; col++) { + let coord = `${row === 0 ? 'r' : 'rp1'}, ${col === 0 ? 'c' : 'cp1'}`; + for (let d = 2; d < this.rank; d++) { + coord = `${dims[dims.length - 1 - d]},` + coord; + } + coords.push(coord); + } + } + return coords; + } + getOutOfBoundsCondition(dims) { + if (this.rank === 1) { + return `rc > ${this.enableShapeUniforms ? 'outShape' : this.outputShape[0]}`; + } + let cond = ''; + for (let i = this.rank - 2; i < this.rank; i++) { + cond += `${dims[i]} >= ${this.enableShapeUniforms ? `outShape[${i}]` : this.outputShape[i]}`; + if (i < this.rank - 1) { + cond += '||'; + } + } + return cond; + } + getSetup(dims) { + if (this.rank === 1) { + return ''; + } + const innerDims = dims.slice(-2); + const col = this.enableShapeUniforms ? `outShape[${this.rank} - 1]` : + this.outputShape[this.rank - 1]; + const row = this.enableShapeUniforms ? `outShape[${this.rank} - 2]` : + this.outputShape[this.rank - 2]; + return ` + int r = ${innerDims[0]}; + int c = ${innerDims[1]}; + int rp1 = r + 1; + int cp1 = c + 1; + + bool cEdge = cp1 >= ${col}; + bool rEdge = rp1 >= ${row}; + `; + } + getOutput(dims) { + const sourceCoords = this.getSourceCoordsArr(dims); + if (this.rank === 1) { + const outShape = this.enableShapeUniforms ? 'outShape' : this.outputShape[0]; + return `getA(rc), (rc + 1 >= ${outShape} ? 0. : getA(rc + 1)), 0, 0`; + } + return `getA(${sourceCoords[0]}), + cEdge ? 0. : getA(${sourceCoords[1]}), + rEdge ? 0. : getA(${sourceCoords[2]}), + rEdge || cEdge ? 0. : getA(${sourceCoords[3]})`; + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class ReshapePackedProgram { + constructor(outputShape, inputShape) { + this.variableNames = ['A']; + this.packedInputs = true; + this.packedOutput = true; + this.customUniforms = [{ name: 'inputShape', type: 'ivec3' }]; + this.outputShape = outputShape; + this.enableShapeUniforms = useShapeUniforms(this.outputShape.length); + let mainLoop = ``; + for (let i = 0; i < 4; i++) { + let thisRC = `thisRC = rc;`; + if (i % 2 === 1) { + thisRC += `thisRC.z += 1;`; + } + if (i > 1) { + thisRC += `thisRC.y += 1;`; + } + mainLoop += ` + ${thisRC} + ${i > 0 ? `if(thisRC.y < rows && thisRC.z < cols){` : ''} + int flatIndex = getFlatIndex(thisRC); + + ivec3 inputRC = inputCoordsFromReshapedOutCoords(flatIndex); + vec2 inputRCInnerDims = vec2(float(inputRC.y),float(inputRC.z)); + + result[${i}] = + getChannel(getA(inputRC.x, inputRC.y, inputRC.z), inputRCInnerDims); + ${i > 0 ? '}' : ''} + `; + } + this.userCode = ` + ${getReshapedInputCoords(inputShape, this.enableShapeUniforms)} + ${this.enableShapeUniforms ? getFlatIndexFrom3DOutput() : + getFlatIndexFrom3D(outputShape)} + + void main() { + ivec3 rc = getOutputCoords(); + + vec4 result = vec4(0.); + + ivec3 thisRC; + int rows = ${this.enableShapeUniforms ? 'outShape[1]' : outputShape[1]}; + int cols = ${this.enableShapeUniforms ? 'outShape[2]' : outputShape[2]}; + + ${mainLoop} + + setOutput(result); + } + `; + } + } + function getReshapedInputCoords(shape, enableShapeUniforms) { + const coordsFromIndexSnippet = enableShapeUniforms ? + getLogicalCoordinatesFromFlatIndexByUniform(['r', 'c', 'd'], 'inputShape') : + getLogicalCoordinatesFromFlatIndex(['r', 'c', 'd'], shape); + return ` + ivec3 inputCoordsFromReshapedOutCoords(int index) { + ${coordsFromIndexSnippet} + return ivec3(r, c, d); + } + `; + } + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class TextureManager { + constructor(gpgpu) { + this.gpgpu = gpgpu; + this.numUsedTextures = 0; + this.numFreeTextures = 0; + this._numBytesAllocated = 0; + // Number of bytes that have been allocated and available for reuse. + this._numBytesFree = 0; + this.freeTextures = {}; + this.usedTextures = {}; + this.logEnabled = false; + } + acquireTexture(shapeRC, usage, isPacked) { + const physicalTexType = getPhysicalFromLogicalTextureType(usage, isPacked); + const shapeKey = getKeyFromTextureShape(shapeRC, physicalTexType, isPacked); + if (!(shapeKey in this.freeTextures)) { + this.freeTextures[shapeKey] = []; + } + if (!(shapeKey in this.usedTextures)) { + this.usedTextures[shapeKey] = []; + } + const texBytes = computeBytes(shapeRC, physicalTexType, this.gpgpu.gl, this.gpgpu.textureConfig, isPacked); + if (this.freeTextures[shapeKey].length > 0) { + this.numFreeTextures--; + this.numUsedTextures++; + this._numBytesFree -= texBytes; + this.log(); + const newTexture = this.freeTextures[shapeKey].pop(); + this.usedTextures[shapeKey].push(newTexture); + return newTexture; + } + let newTexture; + if (physicalTexType === PhysicalTextureType.PACKED_2X2_FLOAT32) { + newTexture = this.gpgpu.createPackedMatrixTexture(shapeRC[0], shapeRC[1]); + } + else if (physicalTexType === PhysicalTextureType.PACKED_2X2_FLOAT16) { + newTexture = + this.gpgpu.createFloat16PackedMatrixTexture(shapeRC[0], shapeRC[1]); + } + else if (physicalTexType === PhysicalTextureType.UNPACKED_FLOAT32) { + newTexture = + this.gpgpu.createFloat32MatrixTexture(shapeRC[0], shapeRC[1]); + } + else if (physicalTexType === PhysicalTextureType.UNPACKED_FLOAT16) { + newTexture = + this.gpgpu.createFloat16MatrixTexture(shapeRC[0], shapeRC[1]); + } + else if (physicalTexType === PhysicalTextureType.PACKED_4X1_UNSIGNED_BYTE) { + newTexture = + this.gpgpu.createUnsignedBytesMatrixTexture(shapeRC[0], shapeRC[1]); + } + this.usedTextures[shapeKey].push(newTexture); + this.numUsedTextures++; + this._numBytesAllocated += texBytes; + this.log(); + return newTexture; + } + releaseTexture(texture, shape, logicalTexType, isPacked) { + if (this.freeTextures == null) { + // Already disposed. + return; + } + const physicalTexType = getPhysicalFromLogicalTextureType(logicalTexType, isPacked); + const shapeKey = getKeyFromTextureShape(shape, physicalTexType, isPacked); + if (!(shapeKey in this.freeTextures)) { + this.freeTextures[shapeKey] = []; + } + const texBytes = computeBytes(shape, physicalTexType, this.gpgpu.gl, this.gpgpu.textureConfig, isPacked); + const deleteTexThreshold = env() + .getNumber('WEBGL_DELETE_TEXTURE_THRESHOLD'); + if (deleteTexThreshold !== -1 && + this._numBytesAllocated > deleteTexThreshold) { + this.gpgpu.deleteMatrixTexture(texture.texture); + this._numBytesAllocated -= texBytes; + } + else { + this.freeTextures[shapeKey].push(texture); + this.numFreeTextures++; + this._numBytesFree += texBytes; + } + this.numUsedTextures--; + const texList = this.usedTextures[shapeKey]; + const texIndex = texList && texList.indexOf(texture); + if (texIndex == null || texIndex < 0) { + throw new Error('Cannot release a texture that was never provided by this ' + + 'texture manager'); + } + texList[texIndex] = texList[texList.length - 1]; + texList.pop(); + this.log(); + } + log() { + if (!this.logEnabled) { + return; + } + const total = this.numFreeTextures + this.numUsedTextures; + console.log('Free/Used', `${this.numFreeTextures} / ${this.numUsedTextures}`, `(${total})`); + const freeRatio = this._numBytesFree / this._numBytesAllocated; + console.log(`Bytes allocated: ${this._numBytesAllocated}`); + console.log(`Bytes unused: ${this._numBytesFree} (${Math.round(100 * freeRatio)}%)`); + } + get numBytesAllocated() { + return this._numBytesAllocated; + } + get numBytesFree() { + return this._numBytesFree; + } + getNumUsedTextures() { + return this.numUsedTextures; + } + getNumFreeTextures() { + return this.numFreeTextures; + } + dispose() { + if (this.freeTextures == null) { + // Already disposed. + return; + } + for (const texShape in this.freeTextures) { + this.freeTextures[texShape].forEach(tex => { + this.gpgpu.deleteMatrixTexture(tex.texture); + }); + } + for (const texShape in this.usedTextures) { + this.usedTextures[texShape].forEach(tex => { + this.gpgpu.deleteMatrixTexture(tex.texture); + }); + } + // TODO: Assign non-null value (empty object) to textures after disposed. + this.freeTextures = null; + this.usedTextures = null; + this.numUsedTextures = 0; + this.numFreeTextures = 0; + this._numBytesAllocated = 0; + this._numBytesFree = 0; + } + } + function numBytesForInternalFormat(gl, internalFormat) { + // tslint:disable-next-line:no-any + const glany = gl; + if (internalFormat === glany.R32F) { + return 4; + } + else if (internalFormat === glany.R16F) { + return 2; + } + else if (internalFormat === glany.RGBA32F) { + return 16; + } + else if (internalFormat === gl.RGBA) { + return 16; + } + else if (internalFormat === glany.RGBA16F) { + return 8; + } + else if (internalFormat === glany.RGBA8) { + return 4; + } + throw new Error(`Unknown internal format ${internalFormat}`); + } + function computeBytes(shape, physicalTexType, gl, textureConfig, isPacked) { + // It is not possible to infer packed status from the texture type because + // depending on the textureConfig, different texture types may resolve to the + // same internal format (e.g. in WebGL1, the internal format for + // UNPACKED_FLOAT16 textures is gl.RGBA). Therefore we pass in `isPacked` + // explicitly. + const internalFormat = internalFormatForPhysicalTexType(physicalTexType, textureConfig); + let numElements; + if (isPacked) { + const [packedWidth, packedHeight] = getPackedMatrixTextureShapeWidthHeight(shape[0], shape[1]); + numElements = packedWidth * packedHeight; + } + else { + const [width, height] = getUnpackedMatrixTextureShapeWidthHeight(shape[0], shape[1]); + numElements = width * height; + } + const bytesPerElement = numBytesForInternalFormat(gl, internalFormat); + return numElements * bytesPerElement; + } + function internalFormatForPhysicalTexType(physicalTexType, textureConfig) { + switch (physicalTexType) { + case PhysicalTextureType.PACKED_2X2_FLOAT32: + return getInternalFormatForPackedMatrixTexture(textureConfig); + case PhysicalTextureType.PACKED_2X2_FLOAT16: + return getInternalFormatForFloat16PackedMatrixTexture(textureConfig); + case PhysicalTextureType.UNPACKED_FLOAT32: + return getInternalFormatForFloat32MatrixTexture(textureConfig); + case PhysicalTextureType.UNPACKED_FLOAT16: + return getInternalFormatForFloat16MatrixTexture(textureConfig); + case PhysicalTextureType.PACKED_4X1_UNSIGNED_BYTE: + return getInternalFormatForUnsignedBytesMatrixTexture(textureConfig); + default: + throw new Error(`Unknown physical texture type ${physicalTexType}`); + } + } + function getPhysicalTextureForRendering(isPacked) { + if (env().getBool('WEBGL_RENDER_FLOAT32_ENABLED')) { + if (isPacked) { + return PhysicalTextureType.PACKED_2X2_FLOAT32; + } + return PhysicalTextureType.UNPACKED_FLOAT32; + } + if (isPacked) { + return PhysicalTextureType.PACKED_2X2_FLOAT16; + } + return PhysicalTextureType.UNPACKED_FLOAT16; + } + function getPhysicalFromLogicalTextureType(logicalTexType, isPacked) { + if (logicalTexType === TextureUsage.UPLOAD) { + return PhysicalTextureType.PACKED_2X2_FLOAT32; + } + else if (logicalTexType === TextureUsage.RENDER || logicalTexType == null) { + return getPhysicalTextureForRendering(isPacked); + } + else if (logicalTexType === TextureUsage.DOWNLOAD || + logicalTexType === TextureUsage.PIXELS) { + return PhysicalTextureType.PACKED_4X1_UNSIGNED_BYTE; + } + throw new Error(`Unknown logical texture type ${logicalTexType}`); + } + function getKeyFromTextureShape(shapeRowsCol, physicalTexType, isPacked) { + return `${shapeRowsCol[0]}_${shapeRowsCol[1]}_${physicalTexType}_${isPacked}`; + } + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class UnaryOpProgram { + constructor(aShape, opSnippet) { + this.variableNames = ['A']; + this.outputShape = aShape; + this.enableShapeUniforms = useShapeUniforms(this.outputShape.length); + this.userCode = ` + float unaryOperation(float x) { + ${opSnippet} + } + + void main() { + float x = getAAtOutCoords(); + float y = unaryOperation(x); + + setOutput(y); + } + `; + } + } + const CHECK_NAN_SNIPPET$1 = `if (isnan(x)) return x;`; + const LINEAR$1 = `return x;`; + const ABS$1 = `return abs(x);`; + function STEP(alpha = 0.0) { + return CHECK_NAN_SNIPPET$1 + ` + return x > 0.0 ? 1.0 : float(${alpha}); + `; + } + const ELU$2 = `return (x >= 0.0) ? x : (exp(x) - 1.0);`; + const RELU$2 = CHECK_NAN_SNIPPET$1 + ` + return (x < 0.0) ? 0.0 : x; +`; + const RELU6$2 = CHECK_NAN_SNIPPET$1 + ` + return (x < 0.0) ? 0.0 : min(6.0, x); +`; + const CLONE = 'return x;'; + const SIGMOID$2 = `return 1.0 / (1.0 + exp(-1.0 * x));`; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const LINEAR = `return x;`; + const ELU$1 = ` + vec4 result; + + result.r = (x.r >= 0.0) ? x.r : (exp(x.r) - 1.0); + result.g = (x.g >= 0.0) ? x.g : (exp(x.g) - 1.0); + result.b = (x.b >= 0.0) ? x.b : (exp(x.b) - 1.0); + result.a = (x.a >= 0.0) ? x.a : (exp(x.a) - 1.0); + + return result; +`; + const RELU$1 = ` + vec4 result = x * vec4(greaterThanEqual(x, vec4(0.0))); + bvec4 isNaN = isnan(x); + + result.r = isNaN.r ? x.r : result.r; + result.g = isNaN.g ? x.g : result.g; + result.b = isNaN.b ? x.b : result.b; + result.a = isNaN.a ? x.a : result.a; + + return result; +`; + const RELU6$1 = ` + vec4 result = min(x, vec4(6.)) * vec4(greaterThanEqual(x, vec4(0.0))); + bvec4 isNaN = isnan(x); + + result.r = isNaN.r ? x.r : result.r; + result.g = isNaN.g ? x.g : result.g; + result.b = isNaN.b ? x.b : result.b; + result.a = isNaN.a ? x.a : result.a; + + return result; +`; + const SIGMOID$1 = `return 1.0 / (1.0 + exp(-1.0 * x));`; + class UnaryOpPackedProgram { + constructor(aShape, opSnippet) { + this.variableNames = ['A']; + this.packedInputs = true; + this.packedOutput = true; + this.outputShape = aShape; + this.enableShapeUniforms = useShapeUniforms(this.outputShape.length); + this.userCode = ` + vec4 unaryOperation(vec4 x) { + ${opSnippet} + } + + void main() { + vec4 x = getAAtOutCoords(); + vec4 y = unaryOperation(x); + + setOutput(y); + } + `; + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class UnpackProgram { + constructor(outputShape) { + this.variableNames = ['A']; + this.packedInputs = true; + this.packedOutput = false; + this.outputShape = outputShape; + this.enableShapeUniforms = useShapeUniforms(this.outputShape.length); + const rank = outputShape.length; + const channels = getChannels('rc', rank); + const dtype = getCoordsDataType(rank); + const sourceCoords = getSourceCoords$2(rank, channels); + const innerDims = channels.slice(-2); + const coords = rank <= 1 ? 'rc' : `vec2(${innerDims.join(',')})`; + this.userCode = ` + void main() { + ${dtype} rc = getOutputCoords(); + vec4 packedInput = getA(${sourceCoords}); + + setOutput(getChannel(packedInput, ${coords})); + } + `; + } + } + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const whereImpl = whereImpl$2; + const EPSILON_FLOAT32 = 1e-7; + const EPSILON_FLOAT16 = 1e-4; + const binaryCaches = {}; + function getBinaryCache(webGLVersion) { + if (webGLVersion in binaryCaches) { + return binaryCaches[webGLVersion]; + } + binaryCaches[webGLVersion] = {}; + return binaryCaches[webGLVersion]; + } + // Empirically determined constant used to determine size threshold for handing + // off execution to the CPU. + const CPU_HANDOFF_SIZE_THRESHOLD = env().getNumber('CPU_HANDOFF_SIZE_THRESHOLD'); + // Empirically determined constant used to decide the number of MB on GPU + // before we warn about high memory use. The MB are this constant * screen area + // * dpi / 1024 / 1024. + const BEFORE_PAGING_CONSTANT = 600; + function numMBBeforeWarning() { + if (env().global.screen == null) { + return 1024; // 1 GB. + } + return (env().global.screen.height * env().global.screen.width * + window.devicePixelRatio) * + BEFORE_PAGING_CONSTANT / 1024 / 1024; + } + class MathBackendWebGL extends KernelBackend { + nextDataId() { + return MathBackendWebGL.nextDataId++; + } + constructor(gpuResource) { + super(); + // Maps data ids that have a pending read operation, to list of subscribers. + this.pendingRead = new WeakMap(); + // List of data ids that are scheduled for disposal, but are waiting on a + // pending read operation. + this.pendingDisposal = new WeakSet(); + // Used to count the number of 'shallow' sliced tensors that point to the + // same data id. + this.dataRefCount = new WeakMap(); + this.numBytesInGPU = 0; + // Accumulated time spent (including blocking) in uploading data to webgl. + this.uploadWaitMs = 0; + // Accumulated time spent (including blocking in downloading data from webgl. + this.downloadWaitMs = 0; + // record the last manual GL Flush time. + this.lastGlFlushTime = 0; + this.warnedAboutMemory = false; + this.pendingDeletes = 0; + this.disposed = false; + if (!env().getBool('HAS_WEBGL')) { + throw new Error('WebGL is not supported on this device'); + } + let newGPGPU; + if (gpuResource != null) { + if (gpuResource instanceof GPGPUContext) { + newGPGPU = gpuResource; + } + else { + const gl = getWebGLContext(env().getNumber('WEBGL_VERSION'), gpuResource); + newGPGPU = new GPGPUContext(gl); + } + this.binaryCache = {}; + this.gpgpuCreatedLocally = false; + } + else { + const gl = getWebGLContext(env().getNumber('WEBGL_VERSION')); + newGPGPU = new GPGPUContext(gl); + this.binaryCache = getBinaryCache(env().getNumber('WEBGL_VERSION')); + this.gpgpuCreatedLocally = true; + } + this.gpgpu = newGPGPU; + this.canvas = this.gpgpu.gl.canvas; + this.textureManager = new TextureManager(this.gpgpu); + this.numMBBeforeWarning = numMBBeforeWarning(); + this.texData = new DataStorage(this, engine()); + } + numDataIds() { + return this.texData.numDataIds() - this.pendingDeletes; + } + // Writes a new entry to the data store with a WebGL texture, and registers it + // to the texture manager. + writeTexture(texture, shape, dtype, texHeight, texWidth, channels) { + // Temporarily create an tensor info to make the texture compatible with + // the runWebGLProgram's input. + const input = this.makeTensorInfo(shape, dtype); + const inData = this.texData.get(input.dataId); + // Even though the input texture could be unpacked or dense packed, it is + // always considered as unpacked for EncodeMatrixProgram. + inData.isPacked = false; + // Bind texture to the input tensor. + inData.texture = { texture, texShape: [texHeight, texWidth] }; + inData.texShape = [texHeight, texWidth]; + const shapeAs3D = getShapeAs3D(shape); + const program = new EncodeMatrixProgram(shapeAs3D, false /* isByteArray */, channels); + const output = this.runWebGLProgram(program, [input], dtype, [[texHeight, texWidth]]); + output.shape = shape; + // Unbind the texture from the input tensor to avoid the texture being + // released. + inData.texture = null; + this.disposeIntermediateTensorInfo(input); + return output.dataId; + } + write(values, shape, dtype) { + if (env().getBool('WEBGL_CHECK_NUMERICAL_PROBLEMS') || + env().getBool('DEBUG')) { + this.checkNumericalProblems(values); + } + if (dtype === 'complex64' && values != null) { + throw new Error(`Cannot write to a complex64 dtype. ` + + `Please use tf.complex(real, imag).`); + } + const dataId = { id: this.nextDataId() }; + this.texData.set(dataId, { shape, dtype, values, usage: TextureUsage.UPLOAD, refCount: 1 }); + return dataId; + } + /** Return refCount of a `TensorData`. */ + refCount(dataId) { + if (this.texData.has(dataId)) { + const tensorData = this.texData.get(dataId); + return tensorData.refCount; + } + return 0; + } + /** Increase refCount of a `TextureData`. */ + incRef(dataId) { + const texData = this.texData.get(dataId); + texData.refCount++; + } + /** Decrease refCount of a `TextureData`. */ + decRef(dataId) { + if (this.texData.has(dataId)) { + const texData = this.texData.get(dataId); + texData.refCount--; + } + } + move(dataId, values, shape, dtype, refCount) { + if (env().getBool('DEBUG')) { + this.checkNumericalProblems(values); + } + if (dtype === 'complex64') { + throw new Error(`Cannot write to a complex64 dtype. ` + + `Please use tf.complex(real, imag).`); + } + this.texData.set(dataId, { shape, dtype, values, usage: TextureUsage.UPLOAD, refCount }); + } + disposeIntermediateTensorInfo(tensorInfo) { + this.disposeData(tensorInfo.dataId); + } + readSync(dataId) { + const texData = this.texData.get(dataId); + const { values, dtype, complexTensorInfos, slice, shape, isPacked } = texData; + // The presence of `slice` indicates this tensor is a shallow slice of a + // different tensor, and is using that original tensor's texture. Run + // `clone` in order to copy that texture and read from it. + if (slice != null) { + let program; + if (isPacked) { + program = new UnaryOpPackedProgram(shape, CLONE); + } + else { + program = new UnaryOpProgram(shape, CLONE); + } + const res = this.runWebGLProgram(program, [{ dataId, shape, dtype }], dtype); + const data = this.readSync(res.dataId); + this.disposeIntermediateTensorInfo(res); + return data; + } + if (values != null) { + return this.convertAndCacheOnCPU(dataId); + } + if (dtype === 'string') { + return values; + } + const shouldTimeProgram = this.activeTimers != null; + let start; + if (shouldTimeProgram) { + start = now(); + } + let result; + if (dtype === 'complex64') { + const realValues = this.readSync(complexTensorInfos.real.dataId); + const imagValues = this.readSync(complexTensorInfos.imag.dataId); + result = mergeRealAndImagArrays(realValues, imagValues); + } + else { + result = this.getValuesFromTexture(dataId); + } + if (shouldTimeProgram) { + this.downloadWaitMs += now() - start; + } + return this.convertAndCacheOnCPU(dataId, result); + } + async read(dataId) { + if (this.pendingRead.has(dataId)) { + const subscribers = this.pendingRead.get(dataId); + return new Promise(resolve => subscribers.push(resolve)); + } + const texData = this.texData.get(dataId); + const { values, shape, slice, dtype, complexTensorInfos, isPacked } = texData; + // The presence of `slice` indicates this tensor is a shallow slice of a + // different tensor, and is using that original tensor's texture. Run + // `clone` in order to copy that texture and read from it. + if (slice != null) { + let program; + if (isPacked) { + program = new UnaryOpPackedProgram(shape, CLONE); + } + else { + program = new UnaryOpProgram(shape, CLONE); + } + const res = this.runWebGLProgram(program, [{ dataId, shape, dtype }], dtype); + const data = this.read(res.dataId); + this.disposeIntermediateTensorInfo(res); + return data; + } + if (values != null) { + return this.convertAndCacheOnCPU(dataId); + } + if (env().getBool('DEBUG')) { + // getBool('WEBGL_DOWNLOAD_FLOAT_ENABLED') caused a blocking GPU call. + // For performance reason, only check it for debugging. In production, + // it doesn't handle this use case anyway, so behavior is not changed. + if (!env().getBool('WEBGL_DOWNLOAD_FLOAT_ENABLED') && + env().getNumber('WEBGL_VERSION') === 2) { + throw new Error(`tensor.data() with WEBGL_DOWNLOAD_FLOAT_ENABLED=false and ` + + `WEBGL_VERSION=2 not yet supported.`); + } + } + let buffer = null; + let tmpDownloadTarget; + if (dtype !== 'complex64' && env().get('WEBGL_BUFFER_SUPPORTED')) { + // Possibly copy the texture into a buffer before inserting a fence. + tmpDownloadTarget = this.decode(dataId); + const tmpData = this.texData.get(tmpDownloadTarget.dataId); + buffer = this.gpgpu.createBufferFromTexture(tmpData.texture.texture, ...getDenseTexShape(shape)); + } + this.pendingRead.set(dataId, []); + if (dtype !== 'complex64') { + // Create a fence and wait for it to resolve. + await this.gpgpu.createAndWaitForFence(); + } + // Download the values from the GPU. + let vals; + if (dtype === 'complex64') { + const ps = await Promise.all([ + this.read(complexTensorInfos.real.dataId), + this.read(complexTensorInfos.imag.dataId) + ]); + const realValues = ps[0]; + const imagValues = ps[1]; + vals = mergeRealAndImagArrays(realValues, imagValues); + } + else if (buffer == null) { + vals = this.getValuesFromTexture(dataId); + } + else { + const size = sizeFromShape(shape); + vals = this.gpgpu.downloadFloat32MatrixFromBuffer(buffer, size); + } + if (tmpDownloadTarget != null) { + this.disposeIntermediateTensorInfo(tmpDownloadTarget); + } + if (buffer != null) { + const gl = this.gpgpu.gl; + callAndCheck(gl, () => gl.deleteBuffer(buffer)); + } + const dTypeVals = this.convertAndCacheOnCPU(dataId, vals); + const subscribers = this.pendingRead.get(dataId); + this.pendingRead.delete(dataId); + // Notify all pending reads. + subscribers.forEach(resolve => resolve(dTypeVals)); + if (this.pendingDisposal.has(dataId)) { + this.pendingDisposal.delete(dataId); + if (this.disposeData(dataId)) { + engine().removeDataId(dataId, this); + } + this.pendingDeletes--; + } + return dTypeVals; + } + /** + * Read tensor to a new texture that is densely packed for ease of use. + * @param dataId The source tensor. + * @param options + * customTexShape: Optional. If set, will use the user defined texture + * shape to create the texture. + */ + readToGPU(dataId, options = {}) { + const texData = this.texData.get(dataId); + const { values, shape, slice, dtype, isPacked, texture } = texData; + if (dtype === 'complex64') { + throw new Error('Does not support reading texture for complex64 dtype.'); + } + // The presence of `slice` indicates this tensor is a shallow slice of a + // different tensor, and is using that original tensor's texture. Run + // `clone` in order to copy that texture and read from it. + if (slice != null) { + let program; + if (isPacked) { + program = new UnaryOpPackedProgram(shape, CLONE); + } + else { + program = new UnaryOpProgram(shape, CLONE); + } + const res = this.runWebGLProgram(program, [{ dataId, shape, dtype }], dtype); + const gpuResouorce = this.readToGPU(res, options); + this.disposeIntermediateTensorInfo(res); + return gpuResouorce; + } + if (texture == null) { + if (values != null) { + throw new Error('Data is not on GPU but on CPU.'); + } + else { + throw new Error('There is no data on GPU or CPU.'); + } + } + // Decode the texture so that it is stored densely (using four channels). + const tmpTarget = this.decode(dataId, options.customTexShape); + // Make engine track this tensor, so that we can dispose it later. + const tensorRef = engine().makeTensorFromTensorInfo(tmpTarget); + const tmpData = this.texData.get(tmpTarget.dataId); + return Object.assign({ tensorRef }, tmpData.texture); + } + bufferSync(t) { + const data = this.readSync(t.dataId); + if (t.dtype === 'string') { + try { + // Decode the bytes into string. + const strings = data.map(d => decodeString(d)); + return buffer(t.shape, t.dtype, strings); + } + catch (_a) { + throw new Error('Failed to decode encoded string bytes into utf-8'); + } + } + return buffer(t.shape, t.dtype, data); + } + checkNumericalProblems(values) { + if (values == null) { + return; + } + for (let i = 0; i < values.length; i++) { + const num = values[i]; + if (!canBeRepresented(num)) { + if (env().getBool('WEBGL_RENDER_FLOAT32_CAPABLE')) { + throw Error(`The value ${num} cannot be represented with your ` + + `current settings. Consider enabling float32 rendering: ` + + `'tf.env().set('WEBGL_RENDER_FLOAT32_ENABLED', true);'`); + } + throw Error(`The value ${num} cannot be represented on this device.`); + } + } + } + getValuesFromTexture(dataId) { + const { shape, dtype, isPacked } = this.texData.get(dataId); + const size = sizeFromShape(shape); + if (env().getBool('WEBGL_DOWNLOAD_FLOAT_ENABLED')) { + const tmpTarget = this.decode(dataId); + const tmpData = this.texData.get(tmpTarget.dataId); + const vals = this.gpgpu + .downloadMatrixFromPackedTexture(tmpData.texture.texture, ...getDenseTexShape(shape)) + .subarray(0, size); + this.disposeIntermediateTensorInfo(tmpTarget); + return vals; + } + const shouldUsePackedProgram = env().getBool('WEBGL_PACK') && isPacked === true; + const outputShape = shouldUsePackedProgram ? getShapeAs3D(shape) : shape; + const program = shouldUsePackedProgram ? + new EncodeFloatPackedProgram(outputShape) : + new EncodeFloatProgram(outputShape); + const output = this.runWebGLProgram(program, [{ shape: outputShape, dtype, dataId }], 'float32'); + const tmpData = this.texData.get(output.dataId); + const vals = this.gpgpu + .downloadByteEncodedFloatMatrixFromOutputTexture(tmpData.texture.texture, tmpData.texShape[0], tmpData.texShape[1]) + .subarray(0, size); + this.disposeIntermediateTensorInfo(output); + return vals; + } + timerAvailable() { + return env().getNumber('WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE') > 0; + } + time(f) { + const oldActiveTimers = this.activeTimers; + const newActiveTimers = []; + let outerMostTime = false; + if (this.programTimersStack == null) { + this.programTimersStack = newActiveTimers; + outerMostTime = true; + } + else { + this.activeTimers.push(newActiveTimers); + } + this.activeTimers = newActiveTimers; + f(); + // needing to split these up because util.flatten only accepts certain types + const flattenedActiveTimerQueries = flatten$2(this.activeTimers.map((d) => d.query)) + .filter(d => d != null); + const flattenedActiveTimerNames = flatten$2(this.activeTimers.map((d) => d.name)) + .filter(d => d != null); + this.activeTimers = oldActiveTimers; + if (outerMostTime) { + this.programTimersStack = null; + } + const res = { + uploadWaitMs: this.uploadWaitMs, + downloadWaitMs: this.downloadWaitMs, + kernelMs: null, + wallMs: null // will be filled by the engine + }; + return (async () => { + if (env().getNumber('WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE') > + 0) { + const kernelMs = await Promise.all(flattenedActiveTimerQueries); + res['kernelMs'] = sum$4(kernelMs); + res['getExtraProfileInfo'] = () => kernelMs + .map((d, i) => ({ name: flattenedActiveTimerNames[i], ms: d })) + .map(d => `${d.name}: ${d.ms}`) + .join(', '); + } + else { + res['kernelMs'] = { + error: 'WebGL query timers are not supported in this environment.' + }; + } + this.uploadWaitMs = 0; + this.downloadWaitMs = 0; + return res; + })(); + } + memory() { + return { + unreliable: false, + numBytesInGPU: this.numBytesInGPU, + numBytesInGPUAllocated: this.textureManager.numBytesAllocated, + numBytesInGPUFree: this.textureManager.numBytesFree + }; + } + startTimer() { + if (env().getNumber('WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE') > 0) { + return this.gpgpu.beginQuery(); + } + return { startMs: now(), endMs: null }; + } + endTimer(query) { + if (env().getNumber('WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE') > 0) { + this.gpgpu.endQuery(); + return query; + } + query.endMs = now(); + return query; + } + async getQueryTime(query) { + if (env().getNumber('WEBGL_DISJOINT_QUERY_TIMER_EXTENSION_RELIABLE') > 0) { + return this.gpgpu.waitForQueryAndGetTime(query); + } + const timerQuery = query; + return timerQuery.endMs - timerQuery.startMs; + } + /** + * Decrease the RefCount on the dataId and dispose the memory if the dataId + * has 0 refCount. If there are pending read on the data, the disposal would + * added to the pending delete queue. Return true if the dataId is removed + * from backend or the backend does not contain the dataId, false if the + * dataId is not removed. Memory may or may not be released even when dataId + * is removed, which also depends on dataRefCount, see `releaseGPU`. + * @param dataId + * @oaram force Optional, remove the data regardless of refCount + */ + disposeData(dataId, force = false) { + if (this.pendingDisposal.has(dataId)) { + return false; + } + // No-op if already disposed. + if (!this.texData.has(dataId)) { + return true; + } + // if force flag is set, change refCount to 0, this would ensure disposal + // when added to the pendingDisposal queue. Memory may or may not be + // released, which also depends on dataRefCount, see `releaseGPU`. + if (force) { + this.texData.get(dataId).refCount = 0; + } + else { + this.texData.get(dataId).refCount--; + } + if (!force && this.texData.get(dataId).refCount > 0) { + return false; + } + if (this.pendingRead.has(dataId)) { + this.pendingDisposal.add(dataId); + this.pendingDeletes++; + return false; + } + this.releaseGPUData(dataId); + const { complexTensorInfos } = this.texData.get(dataId); + if (complexTensorInfos != null) { + this.disposeData(complexTensorInfos.real.dataId, force); + this.disposeData(complexTensorInfos.imag.dataId, force); + } + this.texData.delete(dataId); + return true; + } + releaseGPUData(dataId) { + const { texture, dtype, texShape, usage, isPacked, slice } = this.texData.get(dataId); + const key = slice && slice.origDataId || dataId; + const refCount = this.dataRefCount.get(key); + if (refCount > 1) { + this.dataRefCount.set(key, refCount - 1); + } + else { + this.dataRefCount.delete(key); + if (texture != null) { + this.numBytesInGPU -= this.computeBytes(texShape, dtype); + this.textureManager.releaseTexture(texture, texShape, usage, isPacked); + } + } + const texData = this.texData.get(dataId); + texData.texture = null; + texData.texShape = null; + texData.isPacked = false; + texData.slice = null; + } + getTexture(dataId) { + this.uploadToGPU(dataId); + return this.texData.get(dataId).texture.texture; + } + /** + * Returns internal information for the specific data bucket. Used in unit + * tests. + */ + getDataInfo(dataId) { + return this.texData.get(dataId); + } + /* + Tests whether all the inputs to an op are small and on the CPU. This heuristic + determines when it would be faster to execute a kernel on the CPU. WebGL + kernels opt into running this check and forwarding when appropriate. + TODO(https://github.com/tensorflow/tfjs/issues/872): Develop a more + sustainable strategy for optimizing backend execution of ops. + */ + shouldExecuteOnCPU(inputs, sizeThreshold = CPU_HANDOFF_SIZE_THRESHOLD) { + return env().getBool('WEBGL_CPU_FORWARD') && + inputs.every(input => this.texData.get(input.dataId).texture == null && + sizeFromShape(input.shape) < sizeThreshold); + } + getGPGPUContext() { + return this.gpgpu; + } + where(condition) { + warn('tf.where() in webgl locks the UI thread. ' + + 'Call tf.whereAsync() instead'); + const condVals = condition.dataSync(); + return whereImpl(condition.shape, condVals); + } + packedUnaryOp(x, op, dtype) { + const program = new UnaryOpPackedProgram(x.shape, op); + const outInfo = this.compileAndRun(program, [x], dtype); + return engine().makeTensorFromTensorInfo(outInfo); + } + // TODO(msoulanille) remove this once the backend has been modularized + // a copy is needed here to break a circular dependency. + // Also remove the op from unary_op. + abs(x) { + // TODO: handle cases when x is complex. + if (this.shouldExecuteOnCPU([x]) && x.dtype !== 'complex64') { + const outValues = simpleAbsImplCPU(this.texData.get(x.dataId).values); + return this.makeOutput(x.shape, x.dtype, outValues); + } + if (env().getBool('WEBGL_PACK_UNARY_OPERATIONS')) { + return this.packedUnaryOp(x, ABS$1, x.dtype); + } + const program = new UnaryOpProgram(x.shape, ABS$1); + const outInfo = this.compileAndRun(program, [x]); + return engine().makeTensorFromTensorInfo(outInfo); + } + makeTensorInfo(shape, dtype, values) { + let dataId; + if (dtype === 'string' && values != null && values.length > 0 && + isString(values[0])) { + const encodedValues = values.map(d => encodeString(d)); + dataId = this.write(encodedValues, shape, dtype); + } + else { + dataId = this.write(values, shape, dtype); + } + this.texData.get(dataId).usage = null; + return { dataId, shape, dtype }; + } + makeOutput(shape, dtype, values) { + return engine().makeTensorFromTensorInfo(this.makeTensorInfo(shape, dtype, values), this); + } + unpackTensor(input) { + const program = new UnpackProgram(input.shape); + return this.runWebGLProgram(program, [input], input.dtype); + } + packTensor(input) { + const program = new PackProgram(input.shape); + const preventEagerUnpackingOutput = true; + return this.runWebGLProgram(program, [input], input.dtype, null /* customUniformValues */, preventEagerUnpackingOutput); + } + packedReshape(input, afterShape) { + const input3DShape = [ + getBatchDim(input.shape), + ...getRowsCols(input.shape) + ]; + const input3D = { + dtype: input.dtype, + shape: input3DShape, + dataId: input.dataId + }; + const afterShapeAs3D = [ + getBatchDim(afterShape), ...getRowsCols(afterShape) + ]; + const program = new ReshapePackedProgram(afterShapeAs3D, input3DShape); + const preventEagerUnpackingOfOutput = true; + const customValues = [input3DShape]; + const output = this.runWebGLProgram(program, [input3D], input.dtype, customValues, preventEagerUnpackingOfOutput); + return { dataId: output.dataId, shape: afterShape, dtype: output.dtype }; + } + decode(dataId, customTexShape) { + const texData = this.texData.get(dataId); + const { isPacked, shape, dtype } = texData; + if (customTexShape != null) { + const size = sizeFromShape(shape); + const texSize = customTexShape[0] * customTexShape[1] * 4; + assert$1(size <= texSize, () => 'customTexShape is too small. ' + + 'Row * Column * 4 should be equal or larger than the ' + + 'size of the tensor data.'); + } + const shapeAs3D = getShapeAs3D(shape); + let program; + if (isPacked) { + program = new DecodeMatrixPackedProgram(shapeAs3D); + } + else { + program = new DecodeMatrixProgram(shapeAs3D); + } + const preventEagerUnpackingOfOutput = true; + const customValues = [customTexShape != null ? customTexShape : + getDenseTexShape(shapeAs3D)]; + const out = this.runWebGLProgram(program, [{ shape: shapeAs3D, dtype, dataId }], dtype, customValues, preventEagerUnpackingOfOutput, customTexShape); + return { dtype, shape, dataId: out.dataId }; + } + runWebGLProgram(program, inputs, outputDtype, customUniformValues, preventEagerUnpackingOfOutput = false, customTexShape) { + const output = this.makeTensorInfo(program.outputShape, outputDtype); + const outData = this.texData.get(output.dataId); + if (program.packedOutput) { + outData.isPacked = true; + } + if (program.outPackingScheme === PackingScheme.DENSE) { + const texelShape = customTexShape != null ? + customTexShape : + getDenseTexShape(program.outputShape); + // For a densely packed output, we explicitly set texShape + // so it doesn't get assigned later according to our typical packing + // scheme wherein a single texel can only contain values from adjacent + // rows/cols. + outData.texShape = texelShape.map(d => d * 2); + } + if (program.outTexUsage != null) { + outData.usage = program.outTexUsage; + } + if (sizeFromShape(output.shape) === 0) { + // Short-circuit the computation since the result is empty (has 0 in its + // shape). + outData.values = + getTypedArrayFromDType(output.dtype, 0); + return output; + } + const dataToDispose = []; + const inputsData = inputs.map(input => { + if (input.dtype === 'complex64') { + throw new Error(`GPGPUProgram does not support complex64 input. For complex64 ` + + `dtypes, please separate the program into real and imaginary ` + + `parts.`); + } + let texData = this.texData.get(input.dataId); + if (texData.texture == null) { + if (!program.packedInputs && + sizeFromShape(input.shape) <= + env().getNumber('WEBGL_SIZE_UPLOAD_UNIFORM')) { + // Upload small tensors that live on the CPU as uniforms, not as + // textures. Do this only when the environment supports 32bit floats + // due to problems when comparing 16bit floats with 32bit floats. + // TODO(https://github.com/tensorflow/tfjs/issues/821): Make it + // possible for packed shaders to sample from uniforms. + return { + shape: input.shape, + texData: null, + isUniform: true, + uniformValues: texData.values + }; + } + // This ensures that if a packed program's inputs have not yet been + // uploaded to the GPU, they get uploaded as packed right off the bat. + if (program.packedInputs) { + texData.isPacked = true; + texData.shape = input.shape; + } + } + this.uploadToGPU(input.dataId); + if (!!texData.isPacked !== !!program.packedInputs) { + input = texData.isPacked ? this.unpackTensor(input) : + this.packTensor(input); + dataToDispose.push(input); + texData = this.texData.get(input.dataId); + } + else if (texData.isPacked && + !isReshapeFree(texData.shape, input.shape)) { + // This is a special case where a texture exists for a tensor + // but the shapes are incompatible (due to packing constraints) because + // the tensor did not have a chance to go through the packed reshape + // shader. This only happens when we reshape the *same* tensor to form + // *distinct* inputs to an op, e.g. dotting a vector with itself. This + // case will disappear once packed uploading is the default. + const savedInput = input; + const targetShape = input.shape; + input.shape = texData.shape; + input = this.packedReshape(input, targetShape); + dataToDispose.push(input); + texData = this.texData.get(input.dataId); + savedInput.shape = targetShape; + } + return { shape: input.shape, texData, isUniform: false }; + }); + this.uploadToGPU(output.dataId); + const outputData = { shape: output.shape, texData: outData, isUniform: false }; + const key = makeShaderKey(program, inputsData, outputData); + const binary = this.getAndSaveBinary(key, () => { + return compileProgram(this.gpgpu, program, inputsData, outputData); + }); + const shouldTimeProgram = this.activeTimers != null; + let query; + if (shouldTimeProgram) { + query = this.startTimer(); + } + if (!env().get('ENGINE_COMPILE_ONLY')) { + runProgram(this.gpgpu, binary, inputsData, outputData, customUniformValues); + } + dataToDispose.forEach(info => this.disposeIntermediateTensorInfo(info)); + if (shouldTimeProgram) { + query = this.endTimer(query); + this.activeTimers.push({ name: program.constructor.name, query: this.getQueryTime(query) }); + } + const glFlushThreshold = env().getNumber('WEBGL_FLUSH_THRESHOLD'); + // Manually GL flush requested + if (glFlushThreshold > 0) { + const time = now(); + if ((time - this.lastGlFlushTime) > glFlushThreshold) { + this.gpgpu.gl.flush(); + this.lastGlFlushTime = time; + } + } + if (!env().getBool('WEBGL_LAZILY_UNPACK') && outData.isPacked && + preventEagerUnpackingOfOutput === false) { + const unpacked = this.unpackTensor(output); + this.disposeIntermediateTensorInfo(output); + return unpacked; + } + return output; + } + compileAndRun(program, inputs, outputDtype, customUniformValues, preventEagerUnpackingOfOutput = false) { + outputDtype = outputDtype || inputs[0].dtype; + const outInfo = this.runWebGLProgram(program, inputs, outputDtype, customUniformValues, preventEagerUnpackingOfOutput); + return outInfo; + } + getAndSaveBinary(key, getBinary) { + if (!(key in this.binaryCache)) { + this.binaryCache[key] = getBinary(); + } + return this.binaryCache[key]; + } + getTextureManager() { + return this.textureManager; + } + dispose() { + if (this.disposed) { + return; + } + // Avoid disposing the compiled webgl programs during unit testing because + // it slows down test execution. + if (!env().getBool('IS_TEST')) { + const allKeys = Object.keys(this.binaryCache); + allKeys.forEach(key => { + this.gpgpu.deleteProgram(this.binaryCache[key].webGLProgram); + delete this.binaryCache[key]; + }); + } + this.textureManager.dispose(); + if (this.canvas != null && + (typeof (HTMLCanvasElement) !== 'undefined' && + this.canvas instanceof HTMLCanvasElement)) { + this.canvas.remove(); + } + else { + this.canvas = null; + } + if (this.gpgpuCreatedLocally) { + this.gpgpu.program = null; + this.gpgpu.dispose(); + } + this.disposed = true; + } + floatPrecision() { + if (this.floatPrecisionValue == null) { + this.floatPrecisionValue = tidy(() => { + if (!env().get('WEBGL_RENDER_FLOAT32_ENABLED')) { + // Momentarily switching DEBUG flag to false so we don't throw an + // error trying to upload a small value. + const debugFlag = env().getBool('DEBUG'); + env().set('DEBUG', false); + const underflowCheckValue = this.abs(scalar(1e-8)).dataSync()[0]; + env().set('DEBUG', debugFlag); + if (underflowCheckValue > 0) { + return 32; + } + } + return 16; + }); + } + return this.floatPrecisionValue; + } + /** Returns the smallest representable number. */ + epsilon() { + return this.floatPrecision() === 32 ? EPSILON_FLOAT32 : EPSILON_FLOAT16; + } + uploadToGPU(dataId) { + const texData = this.texData.get(dataId); + const { shape, dtype, values, texture, usage, isPacked } = texData; + if (texture != null) { + // Array is already on GPU. No-op. + return; + } + const shouldTimeProgram = this.activeTimers != null; + let start; + if (shouldTimeProgram) { + start = now(); + } + let texShape = texData.texShape; + if (texShape == null) { + // This texShape may not be the final texture shape. For packed or dense + // textures, the texShape will be changed when textures are created. + texShape = getTextureShapeFromLogicalShape(shape, isPacked); + texData.texShape = texShape; + } + if (values != null) { + const shapeAs3D = getShapeAs3D(shape); + let program; + let width = texShape[1], height = texShape[0]; + const isByteArray = values instanceof Uint8Array || values instanceof Uint8ClampedArray; + // texture for float array is PhysicalTextureType.PACKED_2X2_FLOAT32, we + // need to make sure the upload uses the same packed size + if (isPacked || !isByteArray) { + [width, height] = getPackedMatrixTextureShapeWidthHeight(texShape[0], texShape[1]); + } + if (isPacked) { + program = new EncodeMatrixPackedProgram(shapeAs3D, isByteArray); + } + else { + program = new EncodeMatrixProgram(shapeAs3D, isByteArray); + } + // TexShape for float array needs to be the original shape, which byte + // array needs to be packed size. This allow the data upload shape to be + // matched with texture creation logic. + const tempDenseInputTexShape = isByteArray ? [height, width] : texShape; + const tempDenseInputHandle = this.makeTensorInfo(tempDenseInputTexShape, dtype); + const tempDenseInputTexData = this.texData.get(tempDenseInputHandle.dataId); + if (isByteArray) { + tempDenseInputTexData.usage = TextureUsage.PIXELS; + } + else { + tempDenseInputTexData.usage = TextureUsage.UPLOAD; + } + tempDenseInputTexData.texShape = tempDenseInputTexShape; + this.gpgpu.uploadDenseMatrixToTexture(this.getTexture(tempDenseInputHandle.dataId), width, height, values); + const customValues = [[height, width]]; + // We want the output to remain packed regardless of the value of + // WEBGL_PACK. + const preventEagerUnpacking = true; + const encodedOutputTarget = this.runWebGLProgram(program, [tempDenseInputHandle], dtype, customValues, preventEagerUnpacking); + // Have the original texture assume the identity of the encoded output. + const outputTexData = this.texData.get(encodedOutputTarget.dataId); + texData.texShape = outputTexData.texShape; + texData.isPacked = outputTexData.isPacked; + texData.usage = outputTexData.usage; + if (!env().get('ENGINE_COMPILE_ONLY')) { + texData.texture = outputTexData.texture; + // Once uploaded, don't store the values on cpu. + texData.values = null; + this.texData.delete(encodedOutputTarget.dataId); + } + else { + this.disposeData(encodedOutputTarget.dataId); + } + this.disposeIntermediateTensorInfo(tempDenseInputHandle); + if (shouldTimeProgram) { + this.uploadWaitMs += now() - start; + } + } + else { + const newTexture = this.acquireTexture(texShape, usage, dtype, isPacked); + texData.texture = newTexture; + } + } + convertAndCacheOnCPU(dataId, float32Values) { + const texData = this.texData.get(dataId); + const { dtype } = texData; + if (float32Values != null) { + texData.values = float32ToTypedArray(float32Values, dtype); + } + return texData.values; + } + acquireTexture(texShape, texType, dtype, isPacked) { + this.numBytesInGPU += this.computeBytes(texShape, dtype); + if (!this.warnedAboutMemory && + this.numBytesInGPU > this.numMBBeforeWarning * 1024 * 1024) { + const mb = (this.numBytesInGPU / 1024 / 1024).toFixed(2); + this.warnedAboutMemory = true; + console.warn(`High memory usage in GPU: ${mb} MB, ` + + `most likely due to a memory leak`); + } + return this.textureManager.acquireTexture(texShape, texType, isPacked); + } + computeBytes(shape, dtype) { + return shape[0] * shape[1] * bytesPerElement(dtype); + } + checkCompileCompletion() { + for (const [, binary] of Object.entries(this.binaryCache)) { + this.checkCompletion_(binary); + } + } + async checkCompileCompletionAsync() { + const ps = []; + if (this.gpgpu.parallelCompilationExtension) { + for (const [, binary] of Object.entries(this.binaryCache)) { + ps.push(this.checkCompletionAsync_(binary)); + } + return Promise.all(ps); + } + else { + for (const [, binary] of Object.entries(this.binaryCache)) { + const p = new Promise((resolve) => { + try { + this.checkCompletion_(binary); + resolve(true); + } + catch (error) { + throw error; + } + }); + ps.push(p); + } + return Promise.all(ps); + } + } + async checkCompletionAsync_(binary) { + if (this.gpgpu.gl.getProgramParameter(binary.webGLProgram, this.gpgpu.parallelCompilationExtension.COMPLETION_STATUS_KHR)) { + return this.checkCompletion_(binary); + } + else { + await nextFrame(); + return this.checkCompletionAsync_(binary); + } + } + checkCompletion_(binary) { + if (this.gpgpu.gl.getProgramParameter(binary.webGLProgram, this.gpgpu.gl.LINK_STATUS) === false) { + console.log(this.gpgpu.gl.getProgramInfoLog(binary.webGLProgram)); + if (this.gpgpu.gl.getShaderParameter(binary.fragmentShader, this.gpgpu.gl.COMPILE_STATUS) === false) { + logShaderSourceAndInfoLog(binary.source, this.gpgpu.gl.getShaderInfoLog(binary.fragmentShader)); + throw new Error('Failed to compile fragment shader.'); + } + throw new Error('Failed to link vertex and fragment shaders.'); + } + return true; + } + getUniformLocations() { + for (const binary of Object.values(this.binaryCache)) { + // TODO: Iterating through all binaries to build VAOs is supposed to be in + // a seperate function, like 'setVaos'. However, to avoid breaking changes + // for the users using parallel compile feature now, buildVao is silently + // added here. + this.gpgpu.buildVao(binary.webGLProgram); + const { variablesLocations, customUniformLocations, infLoc, nanLoc, outShapeLocation, outShapeStridesLocation, outTexShapeLocation } = getUniformLocations(this.gpgpu, binary.program, binary.webGLProgram); + binary.variablesLocations = variablesLocations; + binary.customUniformLocations = customUniformLocations; + binary.infLoc = infLoc; + binary.nanLoc = nanLoc; + binary.outShapeLocation = outShapeLocation; + binary.outShapeStridesLocation = outShapeStridesLocation; + binary.outTexShapeLocation = outTexShapeLocation; + } + } + /** + * Create a TF.js tensor out of an existing WebGL texture. A new texture will + * be created. + */ + createTensorFromGPUData(values, shape, dtype) { + values.channels = values.channels || 'RGBA'; + const { texture, height, width, channels } = values; + const backend = engine().backend; + // Have to throw an error, otherwise WebGL just warns and returns wrong + // values. + if (!backend.gpgpu.gl.isTexture(texture)) { + throw new Error(`The texture is invalid. Also, please make sure the texture and ` + + `the TFJS WebGL backend are using the same canvas. If you want to ` + + `use your own custom canvas, you have to create and use the custom ` + + `TFJS WebGL backend created from the canvas through ` + + `'new tf.MathBackendWebGL(customCanvas)'.`); + } + const dataId = backend.writeTexture(texture, shape, dtype, height, width, channels); + return engine().makeTensorFromDataId(dataId, shape, dtype, backend); + } + } + MathBackendWebGL.nextDataId = 0; + function float32ToTypedArray(a, dtype) { + if (dtype === 'float32' || dtype === 'complex64') { + return a; + } + else if (dtype === 'int32' || dtype === 'bool') { + const result = (dtype === 'int32') ? new Int32Array(a.length) : + new Uint8Array(a.length); + for (let i = 0; i < result.length; ++i) { + result[i] = Math.round(a[i]); + } + return result; + } + else { + throw new Error(`Unknown dtype ${dtype}`); + } + } + + /** @license See the LICENSE file. */ + // This code is auto-generated, do not modify this file! + const version$2 = '4.22.0'; + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Enforce use of half precision textures if available on the platform. + * + * @doc {heading: 'Environment', namespace: 'webgl'} + */ + function forceHalfFloat() { + env().set('WEBGL_FORCE_F16_TEXTURES', true); + } + + /** + * @license + * Copyright 2020 Google Inc. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + if (isBrowser()) { + registerBackend('webgl', () => new MathBackendWebGL(), 2 /* priority */); + } + const webgl = { forceHalfFloat }; + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const CHECK_NAN_SNIPPET = ` + if (isnan(a)) return a; + if (isnan(b)) return b; +`; + const SQUARED_DIFFERENCE$1 = 'return (a - b) * (a - b);'; + class BinaryOpProgram { + constructor(op, aShape, bShape) { + this.variableNames = ['A', 'B']; + this.outputShape = assertAndGetBroadcastShape(aShape, bShape); + this.enableShapeUniforms = useShapeUniforms(this.outputShape.length); + this.userCode = ` + float binaryOperation(float a, float b) { + ${op} + } + + void main() { + float a = getAAtOutCoords(); + float b = getBAtOutCoords(); + setOutput(binaryOperation(a, b)); + } + `; + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const CHECK_NAN_SNIPPET_PACKED = ` + result.r = isNaN.r ? NAN : result.r; + result.g = isNaN.g ? NAN : result.g; + result.b = isNaN.b ? NAN : result.b; + result.a = isNaN.a ? NAN : result.a; +`; + const ELU_DER$1 = ` + vec4 bGTEZero = vec4(greaterThanEqual(b, vec4(0.))); + return (bGTEZero * a) + ((vec4(1.0) - bGTEZero) * (a * (b + vec4(1.0)))); +`; + const NOT_EQUAL$1 = ` + return vec4(notEqual(a, b)); +`; + class BinaryOpPackedProgram { + constructor(op, aShape, bShape, checkOutOfBounds = false) { + this.variableNames = ['A', 'B']; + this.supportsBroadcasting = true; + this.packedInputs = true; + this.packedOutput = true; + this.outputShape = assertAndGetBroadcastShape(aShape, bShape); + const rank = this.outputShape.length; + this.enableShapeUniforms = useShapeUniforms(rank); + let checkOutOfBoundsString = ''; + if (checkOutOfBounds) { + if (rank === 0 || sizeFromShape(this.outputShape) === 1) { + checkOutOfBoundsString = ` + result.y = 0.; + result.z = 0.; + result.w = 0.; + `; + } + else { + const dtype = getCoordsDataType(rank); + checkOutOfBoundsString = ` + ${dtype} coords = getOutputCoords(); + `; + if (rank === 1) { + if (this.enableShapeUniforms) { + checkOutOfBoundsString += ` + result.y = (coords + 1) >= outShape ? 0. : result.y; + result.z = 0.; + result.w = 0.; + `; + } + else { + checkOutOfBoundsString += ` + result.y = (coords + 1) >= ${this.outputShape[0]} ? 0. : result.y; + result.z = 0.; + result.w = 0.; + `; + } + } + else { + const channels = getChannels('coords', rank); + if (this.enableShapeUniforms) { + checkOutOfBoundsString += ` + bool nextRowOutOfBounds = + (${channels[rank - 2]} + 1) >= outShape[${rank} - 2]; + bool nextColOutOfBounds = + (${channels[rank - 1]} + 1) >= outShape[${rank} - 1]; + result.y = nextColOutOfBounds ? 0. : result.y; + result.z = nextRowOutOfBounds ? 0. : result.z; + result.w = nextColOutOfBounds || nextRowOutOfBounds ? 0. : result.w; + `; + } + else { + checkOutOfBoundsString += ` + bool nextRowOutOfBounds = + (${channels[rank - 2]} + 1) >= ${this.outputShape[rank - 2]}; + bool nextColOutOfBounds = + (${channels[rank - 1]} + 1) >= ${this.outputShape[rank - 1]}; + result.y = nextColOutOfBounds ? 0. : result.y; + result.z = nextRowOutOfBounds ? 0. : result.z; + result.w = nextColOutOfBounds || nextRowOutOfBounds ? 0. : result.w; + `; + } + } + } + } + this.userCode = ` + vec4 binaryOperation(vec4 a, vec4 b) { + ${op} + } + + void main() { + vec4 a = getAAtOutCoords(); + vec4 b = getBAtOutCoords(); + + vec4 result = binaryOperation(a, b); + ${checkOutOfBoundsString} + + setOutput(result); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function identity(args) { + const { inputs, backend } = args; + const { x } = inputs; + backend.incRef(x.dataId); + return { dataId: x.dataId, shape: x.shape, dtype: x.dtype }; + } + const identityConfig = { + kernelName: Identity$1, + backendName: 'webgl', + kernelFunc: identity + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * In WebGL data is stored in GPU textures which can't be efficiently copied, so + * complex tensors share data with their real and imaginary components. Complex + * tensors' reference to the components is tracked by refCount on the individual + * component. The refCounts are increased by the identity call. + * + * When a complex tensor is disposed, it will reduce the refCount on the + * components by calling disposeData on each. + */ + function complex(args) { + const { inputs, backend } = args; + const { real, imag } = inputs; + const complexInfo = backend.makeTensorInfo(real.shape, 'complex64'); + const complex = backend.texData.get(complexInfo.dataId); + const realTensorInfo = identity({ inputs: { x: real }, backend }); + const imagTensorInfo = identity({ inputs: { x: imag }, backend }); + complex.complexTensorInfos = { real: realTensorInfo, imag: imagTensorInfo }; + return complexInfo; + } + const complexConfig = { + kernelName: Complex, + backendName: 'webgl', + kernelFunc: complex + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const LEAKYRELU = `return (a < 0.) ? b * a : a;`; + const LEAKYRELU_PACKED = ` + vec4 aLessThanZero = vec4(lessThan(a, vec4(0.))); + return (aLessThanZero * (b * a)) + ((vec4(1.0) - aLessThanZero) * a); +`; + function leakyRelu(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { alpha } = attrs; + const $alpha = backend.makeTensorInfo([], 'float32', createScalarValue(alpha, 'float32')); + const program = env().getBool('WEBGL_PACK_BINARY_OPERATIONS') ? + new BinaryOpPackedProgram(LEAKYRELU_PACKED, x.shape, $alpha.shape) : + new BinaryOpProgram(LEAKYRELU, x.shape, $alpha.shape); + const result = backend.runWebGLProgram(program, [x, $alpha], 'float32'); + backend.disposeIntermediateTensorInfo($alpha); + return result; + } + const leakyReluConfig = { + kernelName: LeakyRelu, + backendName: 'webgl', + kernelFunc: leakyRelu + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const PRELU = `return (a < 0.) ? b * a : a;`; + const PRELU_PACKED = ` + vec4 aLessThanZero = vec4(lessThan(a, vec4(0.))); + return (aLessThanZero * (b * a)) + ((vec4(1.0) - aLessThanZero) * a); +`; + function prelu(args) { + const { inputs, backend } = args; + const { x, alpha } = inputs; + const program = env().getBool('WEBGL_PACK_BINARY_OPERATIONS') ? + new BinaryOpPackedProgram(PRELU_PACKED, x.shape, alpha.shape) : + new BinaryOpProgram(PRELU, x.shape, alpha.shape); + return backend.runWebGLProgram(program, [x, alpha], 'float32'); + } + const preluConfig = { + kernelName: Prelu, + backendName: 'webgl', + kernelFunc: prelu + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const CHECK_NAN_SNIPPET_UNARY = `if (isnan(x)) return x;`; + /** + * Template that creates a `KernelFunc` for unary ops. + * @param opSnippet Op snippet to create `UnaryOpProgram`. + * @param packedOpSnippet Op snippet to create `UnaryOpPackedProgram`. + * @param dtype Optional. If set, the result has this dtype. Otherwise, the + * result has the same dtype as the first input. This is mainly used in + * comparison kernels, such as Equal, Less, Greater, etc. + */ + function unaryKernelFunc({ opSnippet, packedOpSnippet, cpuKernelImpl, dtype }) { + return ({ inputs, backend }) => { + const { x } = inputs; + const webglBackend = backend; + const $dtype = dtype || x.dtype; + if (webglBackend.shouldExecuteOnCPU([x]) && cpuKernelImpl != null) { + const xData = webglBackend.texData.get(x.dataId); + const outValues = cpuKernelImpl(xData.values, $dtype); + return webglBackend.makeTensorInfo(x.shape, $dtype, outValues); + } + const shouldUsePackedProgram = env().getBool('WEBGL_PACK_UNARY_OPERATIONS') && packedOpSnippet != null; + let program; + if (shouldUsePackedProgram) { + program = new UnaryOpPackedProgram(x.shape, packedOpSnippet); + } + else { + program = new UnaryOpProgram(x.shape, opSnippet); + } + return webglBackend.runWebGLProgram(program, [x], $dtype); + }; + } + /** + * Template that creates a `KernelFunc` for binary ops. + * @param opSnippet Op snippet to create `BinaryOpProgram`. + * @param packedOpSnippet Op snippet to create `BinaryOpPackedProgram`. + * @param checkOutOfBoundsForPackedProgram Whether to set checkOutOfBounds=true + * when creating BinaryOpPackedProgram. + * @param dtype Optional. If set, the result has this dtype. Otherwise, the + * result has the same dtype as the first input. This is mainly used in + * comparison kernels, such as Equal, Less, Greater, etc. + */ + function binaryKernelFunc({ opSnippet, packedOpSnippet, checkOutOfBounds = false, supportsComplex = false, cpuKernelImpl, dtype }) { + return ({ inputs, backend }) => { + const { a, b } = inputs; + const webglBackend = backend; + if (supportsComplex && a.dtype === 'complex64') { + const aData = webglBackend.texData.get(a.dataId); + const bData = webglBackend.texData.get(b.dataId); + const [real, imag] = [ + [aData.complexTensorInfos.real, bData.complexTensorInfos.real], + [aData.complexTensorInfos.imag, bData.complexTensorInfos.imag] + ].map(complexParts => { + const [aPart, bPart] = complexParts; + const aHandle = { + dataId: aPart.dataId, + dtype: aPart.dtype, + shape: a.shape + }; + const bHandle = { + dataId: bPart.dataId, + dtype: bPart.dtype, + shape: b.shape + }; + const program = new BinaryOpProgram(opSnippet, a.shape, b.shape); + return webglBackend.runWebGLProgram(program, [aHandle, bHandle], upcastType(aPart.dtype, bPart.dtype)); + }); + const complexOutput = complex({ inputs: { real, imag }, backend: webglBackend }); + webglBackend.disposeIntermediateTensorInfo(real); + webglBackend.disposeIntermediateTensorInfo(imag); + // TODO(annxingyuan): Implement CPU forwarding for complex inputs. + return complexOutput; + } + const $dtype = dtype || upcastType(a.dtype, b.dtype); + if ((a.dtype === 'string' || b.dtype === 'string' || + webglBackend.shouldExecuteOnCPU([a, b])) && + cpuKernelImpl != null) { + const aVals = webglBackend.texData.get(a.dataId).values; + const bVals = webglBackend.texData.get(b.dataId).values; + const decodedAVals = a.dtype === 'string' ? + // tslint:disable-next-line: no-any + fromUint8ToStringArray(aVals) : + aVals; + const decodedBVals = a.dtype === 'string' ? + // tslint:disable-next-line: no-any + fromUint8ToStringArray(bVals) : + bVals; + const [outValues, outShape] = cpuKernelImpl(a.shape, b.shape, decodedAVals, decodedBVals, $dtype); + const out = webglBackend.makeTensorInfo(outShape, $dtype); + const outData = webglBackend.texData.get(out.dataId); + outData.values = outValues; + return out; + } + const shouldUsePackedProgram = env().getBool('WEBGL_PACK_BINARY_OPERATIONS') && + packedOpSnippet != null; + let program; + if (shouldUsePackedProgram) { + program = new BinaryOpPackedProgram(packedOpSnippet, a.shape, b.shape, checkOutOfBounds); + } + else { + program = new BinaryOpProgram(opSnippet, a.shape, b.shape); + } + return webglBackend.runWebGLProgram(program, [a, b], $dtype); + }; + } + function mapActivationToShaderProgram(activation, packed = false) { + if (activation === 'linear') { + if (packed) { + return LINEAR; + } + return LINEAR$1; + } + else if (activation === 'relu') { + if (packed) { + return RELU$1; + } + return RELU$2; + } + else if (activation === 'elu') { + if (packed) { + return ELU$1; + } + return ELU$2; + } + else if (activation === 'relu6') { + if (packed) { + return RELU6$1; + } + return RELU6$2; + } + else if (activation === 'prelu') { + if (packed) { + return PRELU_PACKED; + } + return PRELU; + } + else if (activation === 'leakyrelu') { + if (packed) { + return LEAKYRELU_PACKED; + } + return LEAKYRELU; + } + else if (activation === 'sigmoid') { + if (packed) { + return SIGMOID$1; + } + return SIGMOID$2; + } + throw new Error(`Activation ${activation} has not been implemented for the WebGL backend.`); + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class MatMulPackedProgram { + constructor(aShape, bShape, outputShape, transposeA = false, transposeB = false, addBias = false, activation = null, hasPreluActivation = false, hasLeakyreluActivation = false) { + this.variableNames = ['matrixA', 'matrixB']; + this.packedInputs = true; + this.packedOutput = true; + this.outputShape = outputShape; + this.enableShapeUniforms = useShapeUniforms(this.outputShape.length); + const sharedDim = transposeA ? aShape[1] : aShape[2]; + const sharedDimensionPacked = Math.ceil(sharedDim / 2); + const aSample = transposeA ? 'i * 2, rc.y' : 'rc.y, i * 2'; + const bSample = transposeB ? 'rc.z, i * 2' : 'i * 2, rc.z'; + const aSwizzle = transposeA ? ['a.xxyy', 'a.zzww'] : ['a.xxzz', 'a.yyww']; + const bSwizzle = transposeB ? ['b.xzxz', 'b.ywyw'] : ['b.xyxy', 'b.zwzw']; + let activationSnippet = '', applyActivationSnippet = ''; + if (activation) { + if (hasPreluActivation) { + activationSnippet = `vec4 activation(vec4 a) { + vec4 b = getPreluActivationWeightsAtOutCoords(); + ${activation} + }`; + } + else if (hasLeakyreluActivation) { + activationSnippet = `vec4 activation(vec4 a) { + vec4 b = getLeakyreluAlphaAtOutCoords(); + ${activation} + }`; + } + else { + activationSnippet = `vec4 activation(vec4 x) { + ${activation} + }`; + } + applyActivationSnippet = `result = activation(result);`; + } + const addBiasSnippet = addBias ? 'result += getBiasAtOutCoords();' : ''; + if (addBias) { + this.variableNames.push('bias'); + } + if (hasPreluActivation) { + this.variableNames.push('preluActivationWeights'); + } + if (hasLeakyreluActivation) { + this.variableNames.push('leakyreluAlpha'); + } + let batchASnippet = 'rc.x'; + let batchBSnippet = 'rc.x'; + if (aShape[0] < bShape[0]) { + batchASnippet = `imod(rc.x, ${aShape[0]})`; + } + else if (bShape[0] < aShape[0]) { + batchBSnippet = `imod(rc.x, ${bShape[0]})`; + } + this.userCode = ` + ${activationSnippet} + // Don't use uniform for sharedDimensionPacked for performance. + const float sharedDimension = ${sharedDimensionPacked}.0; + + vec4 dot2x2ARowBCol(ivec3 rc) { + vec4 result = vec4(0); + int batchA = ${batchASnippet}; + int batchB = ${batchBSnippet}; + for (int i = 0; i < ${sharedDimensionPacked}; i++) { + vec4 a = getMatrixA(batchA, ${aSample}); + vec4 b = getMatrixB(batchB, ${bSample}); + + // These swizzled products need to be separately added. + // See: https://github.com/tensorflow/tfjs/issues/1735 + result += (${aSwizzle[0]} * ${bSwizzle[0]}); + result += (${aSwizzle[1]} * ${bSwizzle[1]}); + } + return result; + } + + void main() { + ivec3 rc = getOutputCoords(); + vec4 result = dot2x2ARowBCol(rc); + + ${addBiasSnippet} + + ${applyActivationSnippet} + + setOutput(result); + } + `; + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + // (Ar + Ai)(Br + Bi) = + // ArBr + ArBi + AiBr + AiBi = ArBr - AB + ArBi + AiBr + // Yr = ArBr - AB + // Yi = ArBi + AiBr + const COMPLEX_MULTIPLY = { + REAL: 'return areal * breal - aimag * bimag;', + IMAG: 'return areal * bimag + aimag * breal;' + }; + class BinaryOpComplexProgram { + constructor(op, aShape, bShape) { + this.variableNames = ['AReal', 'AImag', 'BReal', 'BImag']; + this.outputShape = assertAndGetBroadcastShape(aShape, bShape); + this.userCode = ` + float binaryOpComplex( + float areal, float aimag, float breal, float bimag) { + ${op} + } + + void main() { + float areal = getARealAtOutCoords(); + float aimag = getAImagAtOutCoords(); + float breal = getBRealAtOutCoords(); + float bimag = getBImagAtOutCoords(); + setOutput(binaryOpComplex(areal, aimag, breal, bimag)); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const MUL = 'return a * b;'; + function multiply(args) { + const { inputs, backend } = args; + const { a, b } = inputs; + const dtype = upcastType(a.dtype, b.dtype); + if (a.dtype === 'complex64') { + const aData = backend.texData.get(a.dataId); + const bData = backend.texData.get(b.dataId); + const realProgram = new BinaryOpComplexProgram(COMPLEX_MULTIPLY.REAL, a.shape, b.shape); + const imagProgram = new BinaryOpComplexProgram(COMPLEX_MULTIPLY.IMAG, a.shape, b.shape); + const inputs = [ + { + dataId: aData.complexTensorInfos.real.dataId, + dtype: aData.complexTensorInfos.real.dtype, + shape: a.shape + }, + { + dataId: aData.complexTensorInfos.imag.dataId, + dtype: aData.complexTensorInfos.imag.dtype, + shape: a.shape + }, + { + dataId: bData.complexTensorInfos.real.dataId, + dtype: bData.complexTensorInfos.real.dtype, + shape: b.shape + }, + { + dataId: bData.complexTensorInfos.imag.dataId, + dtype: bData.complexTensorInfos.imag.dtype, + shape: b.shape + } + ]; + const realPart = backend.runWebGLProgram(realProgram, inputs, 'float32'); + const imagPart = backend.runWebGLProgram(imagProgram, inputs, 'float32'); + const complexOutput = complex({ inputs: { real: realPart, imag: imagPart }, backend }); + backend.disposeIntermediateTensorInfo(realPart); + backend.disposeIntermediateTensorInfo(imagPart); + // TODO(annxingyuan): CPU forwarding for complex inputs. + return complexOutput; + } + if (backend.shouldExecuteOnCPU([a, b])) { + const aData = backend.texData.get(a.dataId); + const bData = backend.texData.get(b.dataId); + const [outValues, outShape] = multiplyImplCPU(a.shape, b.shape, aData.values, bData.values, dtype); + const out = backend.makeTensorInfo(outShape, dtype); + const outData = backend.texData.get(out.dataId); + outData.values = outValues; + return out; + } + let program; + if (env().getBool('WEBGL_PACK_BINARY_OPERATIONS')) { + program = new BinaryOpPackedProgram(MUL, a.shape, b.shape); + } + else { + program = new BinaryOpProgram(MUL, a.shape, b.shape); + } + return backend.runWebGLProgram(program, [a, b], dtype); + } + const multiplyConfig = { + kernelName: Multiply$1, + backendName: 'webgl', + kernelFunc: multiply + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function packedReshape(input, afterShape, backend) { + const input3DShape = [getBatchDim(input.shape), + ...getRowsCols(input.shape)]; + const input3D = { + dtype: input.dtype, + shape: input3DShape, + dataId: input.dataId + }; + const afterShapeAs3D = [getBatchDim(afterShape), + ...getRowsCols(afterShape)]; + const program = new ReshapePackedProgram(afterShapeAs3D, input3DShape); + const preventEagerUnpackingOfOutput = true; + const customValues = [input3DShape]; + const output = backend.runWebGLProgram(program, [input3D], input.dtype, customValues, preventEagerUnpackingOfOutput); + return { dataId: output.dataId, shape: afterShape, dtype: output.dtype }; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function reshape(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { shape } = attrs; + const webglBackend = backend; + const xSize = sizeFromShape(x.shape); + const $shape = inferFromImplicitShape(shape, xSize); + const $xSize = sizeFromShape($shape); + assert$1(xSize === $xSize, () => `The new shape (${$shape}) has ${$xSize} elements and the old ` + + `shape (${x.shape}) has ${xSize} elements. The new shape and old ` + + `shape must have the same number of elements.`); + const xTexData = webglBackend.texData.get(x.dataId); + if (xTexData.isPacked && !isReshapeFree(x.shape, $shape) && + !(xTexData.texture !== null && isReshapeFree(xTexData.shape, $shape))) { + return packedReshape(x, $shape, webglBackend); + } + webglBackend.incRef(x.dataId); + return { dataId: x.dataId, shape: $shape, dtype: x.dtype }; + } + const reshapeConfig = { + kernelName: Reshape$1, + backendName: 'webgl', + kernelFunc: reshape + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class MeanProgram { + constructor(reduceInfo, divisor) { + this.variableNames = ['x']; + const { windowSize, batchSize, inSize, outSize } = reduceInfo; + this.outputShape = [batchSize, outSize]; + const windowSizeNearestVec4 = Math.floor(windowSize / 4) * 4; + const windowSizeVec4Remainder = windowSize % 4; + let updateSnippet = `sumValue += dot(values, ones);`; + if (divisor != null) { + const denominator = 1 / divisor; + updateSnippet = `sumValue += dot(values * ${isInt(denominator) ? denominator.toPrecision(2) : + denominator}, ones);`; + } + let checkOutOfBounds = ''; + if (inSize % windowSize > 0) { + checkOutOfBounds = ` + if (inIdx < 0 || inIdx >= ${inSize}) { + return 0.0; + } + `; + } + this.userCode = ` + const vec4 ones = vec4(1.0, 1.0, 1.0, 1.0); + + float getValue(int batch, int inIdx) { + ${checkOutOfBounds} + return getX(batch, inIdx); + } + + void main() { + ivec2 coords = getOutputCoords(); + int batch = coords[0]; + int outIdx = coords[1]; + int inOffset = outIdx * ${windowSize}; + + float sumValue = 0.0; + + for (int i = 0; i < ${windowSizeNearestVec4}; i += 4) { + int inIdx = inOffset + i; + vec4 values = vec4( + getValue(batch, inIdx), + getValue(batch, inIdx + 1), + getValue(batch, inIdx + 2), + getValue(batch, inIdx + 3) + ); + + ${updateSnippet} + } + + int inIdx = inOffset + ${windowSizeNearestVec4}; + if (${windowSizeVec4Remainder === 1}) { + vec4 values = vec4(getValue(batch, inIdx), 0.0, 0.0, 0.0); + + ${updateSnippet} + } else if (${windowSizeVec4Remainder === 2}) { + vec4 values = vec4( + getValue(batch, inIdx), + getValue(batch, inIdx + 1), 0.0, 0.0); + + ${updateSnippet} + } else if (${windowSizeVec4Remainder === 3}) { + vec4 values = vec4( + getValue(batch, inIdx), + getValue(batch, inIdx + 1), + getValue(batch, inIdx + 2), 0.0); + + ${updateSnippet} + } + setOutput(sumValue); + } + `; + } + } + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class ReduceProgram { + constructor(reduceInfo, reduceType) { + this.variableNames = ['x']; + const { windowSize, batchSize, inSize, outSize } = reduceInfo; + this.outputShape = [batchSize, outSize]; + let initializationValue = '0.0'; + let compareOp = ``; + if (reduceType === 'prod') { + initializationValue = '1.0'; + } + else if (reduceType === 'min') { + // WebGL on Firefox Linux can't compile 1/0 so we do 1/eps. + initializationValue = '1.0 / 1e-20'; + compareOp = `min`; + } + else if (reduceType === 'max') { + // WebGL on Firefox Linux can't compile 1/0 so we do 1/eps. + initializationValue = '-1.0 / 1e-20'; + compareOp = `max`; + } + let returnValue = `${reduceType}(${reduceType}(${reduceType}(` + + 'minMaxValue[0], minMaxValue[1]), minMaxValue[2]), minMaxValue[3])'; + if (reduceType === 'sum') { + returnValue = `sumValue`; + } + else if (reduceType === 'prod') { + returnValue = `prodValue`; + } + else if (reduceType === 'all') { + returnValue = `allValue`; + } + else if (reduceType === 'any') { + returnValue = `anyValue`; + } + const windowSizeNearestVec4 = Math.floor(windowSize / 4) * 4; + const windowSizeVec4Remainder = windowSize % 4; + let updateSnippet = ` + if (${reduceType === 'sum'}) { + sumValue += dot(values, ones); + } else if (${reduceType === 'prod'}) { + vec2 tmp = vec2(values[0], values[1]) * vec2(values[2], values[3]); + prodValue *= tmp[0] * tmp[1]; + } else { + minMaxValue = ${compareOp}(values, minMaxValue); + if (${reduceType === 'min'} || ${reduceType === 'max'}) { + minMaxValue = ${compareOp}(values, minMaxValue); + bvec4 isNaN = isnan(values); + if (isNaN.r || isNaN.g || isNaN.b || isNaN.a) { + minMaxValue = vec4(NAN); + } + } + } + `; + let vecType = `vec4`; + if (reduceType === 'all') { + initializationValue = '1.0'; + updateSnippet = ` + bool reducedAllValue = all(values); + float floatedReducedAllValue = float(reducedAllValue); + allValue = float(allValue >= 1.0 && floatedReducedAllValue >= 1.0); + `; + vecType = `bvec4`; + } + else if (reduceType === 'any') { + initializationValue = '0.0'; + updateSnippet = ` + bool reducedAnyValue = any(values); + float floatedReducedAnyValue = float(reducedAnyValue); + anyValue = float(anyValue >= 1.0 || floatedReducedAnyValue >= 1.0); + `; + vecType = `bvec4`; + } + let checkOutOfBounds = ''; + if (inSize % windowSize > 0) { + checkOutOfBounds = ` + if (inIdx < 0 || inIdx >= ${inSize}) { + return initializationValue; + } + `; + } + this.userCode = ` + const float initializationValue = ${initializationValue}; + const vec4 ones = vec4(1.0, 1.0, 1.0, 1.0); + + float getValue(int batch, int inIdx) { + ${checkOutOfBounds} + return getX(batch, inIdx); + } + + void main() { + ivec2 coords = getOutputCoords(); + int batch = coords[0]; + int outIdx = coords[1]; + int inOffset = outIdx * ${windowSize}; + + vec4 minMaxValue = vec4(${initializationValue}); + float prodValue = 1.0; + float sumValue = 0.0; + float allValue = 1.0; + float anyValue = 0.0; + + for (int i = 0; i < ${windowSizeNearestVec4}; i += 4) { + int inIdx = inOffset + i; + ${vecType} values = ${vecType}( + getValue(batch, inIdx), + getValue(batch, inIdx + 1), + getValue(batch, inIdx + 2), + getValue(batch, inIdx + 3) + ); + + ${updateSnippet} + } + + int inIdx = inOffset + ${windowSizeNearestVec4}; + if (${windowSizeVec4Remainder === 1}) { + ${vecType} values = ${vecType}( + getValue(batch, inIdx), + initializationValue, + initializationValue, + initializationValue + ); + + ${updateSnippet} + } else if (${windowSizeVec4Remainder === 2}) { + ${vecType} values = ${vecType}( + getValue(batch, inIdx), + getValue(batch, inIdx + 1), + initializationValue, + initializationValue + ); + + ${updateSnippet} + } else if (${windowSizeVec4Remainder === 3}) { + ${vecType} values = ${vecType}( + getValue(batch, inIdx), + getValue(batch, inIdx + 1), + getValue(batch, inIdx + 2), + initializationValue + ); + + ${updateSnippet} + } + setOutput(${returnValue}); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + // Returns an array of configuration objects that describe each stage of the + // reduction. + function getReductionStages(inShape) { + const stages = []; + while (stages.length === 0 || stages[stages.length - 1].outSize !== 1) { + const outSize = stages.length ? stages[stages.length - 1].outSize : inShape[1]; + const windowSize = computeOptimalWindowSize(outSize); + stages.push({ + inSize: outSize, + windowSize, + outSize: Math.ceil(outSize / windowSize) + }); + } + return stages; + } + function reduce(x, dtype, reductionType, backend) { + const reductionStages = getReductionStages(x.shape); + let result = x; + for (let i = 0; i < reductionStages.length; i++) { + const { inSize, windowSize, outSize } = reductionStages[i]; + let program; + let previousResult; + if (reductionType === 'mean') { + program = i === 0 ? + new MeanProgram({ windowSize, inSize, batchSize: x.shape[0], outSize }, inSize) : + new MeanProgram({ windowSize, inSize, batchSize: x.shape[0], outSize }); + } + else { + program = new ReduceProgram({ windowSize, inSize, batchSize: x.shape[0], outSize }, reductionType); + } + previousResult = result; + result = backend.runWebGLProgram(program, [result], dtype); + if (previousResult.dataId !== x.dataId) { + backend.disposeIntermediateTensorInfo(previousResult); + } + } + return result; + } + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class TransposeProgram { + constructor(aShape, newDim) { + this.variableNames = ['A']; + const outputShape = new Array(aShape.length); + for (let i = 0; i < outputShape.length; i++) { + outputShape[i] = aShape[newDim[i]]; + } + this.outputShape = outputShape; + this.rank = outputShape.length; + const dtype = getCoordsDataType(this.rank); + const switched = getSwitchedCoords(newDim); + this.userCode = ` + void main() { + ${dtype} resRC = getOutputCoords(); + setOutput(getA(${switched})); + } + `; + } + } + function getSwitchedCoords(newDim) { + const rank = newDim.length; + if (rank > 6) { + throw Error(`Transpose for rank ${rank} is not yet supported`); + } + const originalOrder = ['resRC.x', 'resRC.y', 'resRC.z', 'resRC.w', 'resRC.u', 'resRC.v']; + const switchedCoords = new Array(rank); + for (let i = 0; i < newDim.length; i++) { + switchedCoords[newDim[i]] = originalOrder[i]; + } + return switchedCoords.join(); + } + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class TransposePackedProgram { + constructor(aShape, newDim) { + this.variableNames = ['A']; + this.packedInputs = true; + this.packedOutput = true; + const outputShape = new Array(aShape.length); + for (let i = 0; i < outputShape.length; i++) { + outputShape[i] = aShape[newDim[i]]; + } + this.outputShape = outputShape; + this.rank = outputShape.length; + if (this.rank > 6) { + throw Error(`Packed transpose for rank ${this.rank} is not yet supported.`); + } + const dtype = getCoordsDataType(this.rank); + const outputOrder = getVecChannels('rc', this.rank); + const switchedOrder = new Array(this.rank); + for (let i = 0; i < newDim.length; i++) { + switchedOrder[newDim[i]] = outputOrder[i]; + } + const innerDims = `vec2(${switchedOrder.slice(-2).join()})`; + const nextColumn = `++${outputOrder[this.rank - 1]} < ${outputShape[this.rank - 1]}`; + const getc = `getChannel(getA(${switchedOrder.join()}), ${innerDims})`; + this.userCode = ` + void main() { + ${dtype} rc = getOutputCoords(); + vec4 result = vec4(0.); + result[0] = ${getc}; + if(${nextColumn}) { + result[1] = ${getc}; + } + --${outputOrder[this.rank - 1]}; + if(++${outputOrder[this.rank - 2]} < ${outputShape[this.rank - 2]}) { + result[2] = ${getc}; + if(${nextColumn}) { + result[3] = ${getc}; + } + } + setOutput(result); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function transposeImpl(x, perm, backend) { + const program = env().getBool('WEBGL_PACK_ARRAY_OPERATIONS') ? + new TransposePackedProgram(x.shape, perm) : + new TransposeProgram(x.shape, perm); + return backend.runWebGLProgram(program, [x], x.dtype); + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function sumImpl(x, axis, keepDims, backend) { + const reductionIndices = axis; + const xRank = x.shape.length; + const origAxes = parseAxisParam(reductionIndices, x.shape); + let axes = origAxes; + const permutedAxes = getAxesPermutation(axes, xRank); + const sumInputIsTransposed = permutedAxes != null; + let sumInput = x; + if (sumInputIsTransposed) { + sumInput = transposeImpl(x, permutedAxes, backend); + axes = getInnerMostAxes(axes.length, xRank); + } + assertAxesAreInnerMostDims('sum', axes, xRank); + const [sumOutShape, reduceShape] = computeOutAndReduceShapes(sumInput.shape, axes); + let outShape = sumOutShape; + if (keepDims) { + // rather than reshape at the end, set the target shape here. + outShape = expandShapeToKeepDim(sumOutShape, origAxes); + } + const inSize = sizeFromShape(reduceShape); + const xSize = sizeFromShape(x.shape); + const batchSize = xSize / inSize; + const reshapedInput = reshape({ inputs: { x: sumInput }, attrs: { shape: [batchSize, inSize] }, backend }); + const outType = sumOutType(x.dtype); + const reduced = reduce(reshapedInput, outType, 'sum', backend); + const out = reshape({ inputs: { x: reduced }, attrs: { shape: outShape }, backend }); + backend.disposeIntermediateTensorInfo(reshapedInput); + backend.disposeIntermediateTensorInfo(reduced); + if (sumInputIsTransposed) { + backend.disposeIntermediateTensorInfo(sumInput); + } + return out; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function sum(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { axis, keepDims } = attrs; + return sumImpl(x, axis, keepDims, backend); + } + const sumConfig = { + kernelName: Sum, + backendName: 'webgl', + kernelFunc: sum + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function transpose(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { perm } = attrs; + const webglBackend = backend; + const xRank = x.shape.length; + const newShape = new Array(xRank); + for (let i = 0; i < newShape.length; i++) { + newShape[i] = x.shape[perm[i]]; + } + let out; + if (webglBackend.shouldExecuteOnCPU([x])) { + const xTexData = webglBackend.texData.get(x.dataId); + const values = xTexData.values; + const outValues = transposeImplCPU(values, x.shape, x.dtype, perm, newShape); + out = webglBackend.makeTensorInfo(newShape, x.dtype); + const outData = webglBackend.texData.get(out.dataId); + outData.values = outValues; + } + else { + out = transposeImpl(x, perm, webglBackend); + } + return out; + } + const transposeConfig = { + kernelName: Transpose, + backendName: 'webgl', + kernelFunc: transpose + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + // Empirically determined minimal shared dimension in matmul before we forward + // to a.mul(b).sum() in order to take advantage of GPU parallelism. See + // https://github.com/tensorflow/tfjs-core/pull/1379 for benchmarks. + const MATMUL_SHARED_DIM_THRESHOLD = 1000; + function batchMatMulImpl({ a, b, transposeA, transposeB, backend, bias = null, preluActivationWeights = null, leakyreluAlpha = 0, activation = null }) { + const aRank = a.shape.length; + const bRank = b.shape.length; + const innerShapeA = transposeA ? a.shape[aRank - 2] : a.shape[aRank - 1]; + const innerShapeB = transposeB ? b.shape[bRank - 1] : b.shape[bRank - 2]; + const outerShapeA = transposeA ? a.shape[aRank - 1] : a.shape[aRank - 2]; + const outerShapeB = transposeB ? b.shape[bRank - 2] : b.shape[bRank - 1]; + const outerDimsA = a.shape.slice(0, -2); + const outerDimsB = b.shape.slice(0, -2); + const batchDimA = sizeFromShape(outerDimsA); + const batchDimB = sizeFromShape(outerDimsB); + const outShapeOuterDims = assertAndGetBroadcastShape(a.shape.slice(0, -2), b.shape.slice(0, -2)); + const outShape = outShapeOuterDims.concat([outerShapeA, outerShapeB]); + assert$1(innerShapeA === innerShapeB, () => `Error in matMul: inner shapes (${innerShapeA}) and (` + + `${innerShapeB}) of Tensors with shapes ${a.shape} and ` + + `${b.shape} and transposeA=${transposeA}` + + ` and transposeB=${transposeB} must match.`); + const a3dShape = transposeA ? + [batchDimA, innerShapeA, outerShapeA] : + [batchDimA, outerShapeA, innerShapeA]; + const b3dShape = transposeB ? + [batchDimB, outerShapeB, innerShapeB] : + [batchDimB, innerShapeB, outerShapeB]; + // The rest of the implementation is designed to operate on rank-3 tensors + const a3d = reshape({ inputs: { x: a }, backend, attrs: { shape: a3dShape } }); + const b3d = reshape({ inputs: { x: b }, backend, attrs: { shape: b3dShape } }); + const intermediates = [a3d, b3d]; + const batchDim = Math.max(batchDimA, batchDimB); + const sharedDim = transposeA ? a3d.shape[1] : a3d.shape[2]; + const hasBias = bias != null; + const hasPreluActivationWeights = preluActivationWeights != null; + const hasLeakyreluAlpha = activation === 'leakyrelu'; + const fusedActivation = activation != null ? + mapActivationToShaderProgram(activation, true) : + null; + const containsFusedOps = hasBias || hasPreluActivationWeights || + hasLeakyreluAlpha || fusedActivation != null; + let out; + // Since the matrices are vectors, it is faster to call mul().sum() + // because sum() is O(sqrt(N)) due to divide-and-conquer. + if ((outerShapeA === 1 || outerShapeB === 1) && + sharedDim > MATMUL_SHARED_DIM_THRESHOLD && containsFusedOps === false) { + let aVec = a3d; + let bVec = b3d; + if (transposeA) { + aVec = transpose({ inputs: { x: a3d }, backend, attrs: { perm: [0, 2, 1] } }); + intermediates.push(aVec); + } + if (transposeB) { + bVec = transpose({ inputs: { x: b3d }, backend, attrs: { perm: [0, 2, 1] } }); + intermediates.push(bVec); + } + const shouldReshapeA = outerShapeB !== 1; + const shouldReshapeB = outerShapeB === 1; + let aVec3d = aVec; + if (shouldReshapeA) { + aVec3d = reshape({ + inputs: { x: aVec }, + backend, + attrs: { shape: [batchDim, sharedDim, 1] } + }); + intermediates.push(aVec3d); + } + const axis = outerShapeB === 1 ? 2 : 1; + let bVec3d = bVec; + if (shouldReshapeB) { + bVec3d = reshape({ + inputs: { x: bVec }, + backend, + attrs: { shape: [batchDim, 1, sharedDim] } + }); + intermediates.push(bVec3d); + } + const product = multiply({ inputs: { a: aVec3d, b: bVec3d }, backend }); + out = sum({ inputs: { x: product }, backend, attrs: { axis, keepDims: true } }); + intermediates.push(product); + } + else { + const dtype = upcastType(a.dtype, b.dtype); + const program = new MatMulPackedProgram(a3dShape, b3dShape, [batchDim, outerShapeA, outerShapeB], transposeA, transposeB, hasBias, fusedActivation, hasPreluActivationWeights, hasLeakyreluAlpha); + const inputs = [a3d, b3d]; + if (bias != null) { + inputs.push(bias); + } + if (hasPreluActivationWeights) { + inputs.push(preluActivationWeights); + } + if (hasLeakyreluAlpha) { + const $leakyreluAlpha = backend.makeTensorInfo([], 'float32', createScalarValue(leakyreluAlpha, 'float32')); + inputs.push($leakyreluAlpha); + intermediates.push($leakyreluAlpha); + } + out = backend.runWebGLProgram(program, inputs, dtype); + } + const outReshaped = reshape({ inputs: { x: out }, backend, attrs: { shape: outShape } }); + intermediates.push(out); + for (const i of intermediates) { + backend.disposeIntermediateTensorInfo(i); + } + return outReshaped; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function _fusedMatMul(args) { + const { inputs, backend, attrs } = args; + const { a, b, bias, preluActivationWeights } = inputs; + const { transposeA, transposeB, activation, leakyreluAlpha } = attrs; + return batchMatMulImpl({ + a, + b, + transposeA, + transposeB, + backend, + bias, + preluActivationWeights, + leakyreluAlpha, + activation + }); + } + const _fusedMatMulConfig = { + kernelName: _FusedMatMul, + backendName: 'webgl', + kernelFunc: _fusedMatMul, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const ABS = `return abs(x);`; + function abs(args) { + const { inputs, backend } = args; + const { x } = inputs; + // TODO: handle cases when x is complex. Once the cpu implementation + // can handle complex values, refactor to use unaryKernelFunc. + if (backend.shouldExecuteOnCPU([x]) && x.dtype !== 'complex64') { + const xData = backend.texData.get(x.dataId); + const outValues = simpleAbsImplCPU(xData.values); + return backend.makeTensorInfo(x.shape, x.dtype, outValues); + } + let program; + if (env().getBool('WEBGL_PACK_UNARY_OPERATIONS')) { + program = new UnaryOpPackedProgram(x.shape, ABS); + } + else { + program = new UnaryOpProgram(x.shape, ABS); + } + return backend.runWebGLProgram(program, [x], x.dtype); + } + const absConfig = { + kernelName: Abs, + backendName: 'webgl', + kernelFunc: abs + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const ACOS = CHECK_NAN_SNIPPET$1 + ` + if (abs(x) > 1.) { + return NAN; + } + return acos(x); +`; + const acos = unaryKernelFunc({ opSnippet: ACOS }); + const acosConfig = { + kernelName: Acos, + backendName: 'webgl', + kernelFunc: acos, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const ACOSH = CHECK_NAN_SNIPPET$1 + ` + if (x < 1.0) return NAN; +return log(x + sqrt(x * x - 1.0));`; + const acosh = unaryKernelFunc({ opSnippet: ACOSH }); + const acoshConfig = { + kernelName: Acosh, + backendName: 'webgl', + kernelFunc: acosh, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const ADD = 'return a + b;'; + const addKernelFunc = binaryKernelFunc({ + opSnippet: ADD, + packedOpSnippet: ADD, + supportsComplex: true, + cpuKernelImpl: addImplCPU + }); + const addConfig = { + kernelName: Add$1, + backendName: 'webgl', + kernelFunc: addKernelFunc + }; + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class AddNProgram { + constructor(outputShape, shapes) { + this.outputShape = []; + this.outputShape = outputShape; + this.variableNames = shapes.map((_, i) => `T${i}`); + const snippets = []; + // Get target elements from every input tensor. + this.variableNames.forEach(variable => { + snippets.push(`float v${variable} = get${variable}AtOutCoords();`); + }); + // Calculate the sum of all elements. + const operation = this.variableNames + .map(variable => { + return `v${variable}`; + }) + .join(' + '); + this.userCode = ` + void main() { + ${snippets.join('\n ')} + + float result = ${operation}; + setOutput(result); + } + `; + } + } + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class AddNPackedProgram { + constructor(outputShape, shapes) { + this.outputShape = []; + this.packedInputs = true; + this.packedOutput = true; + this.outputShape = outputShape; + this.variableNames = shapes.map((_, i) => `T${i}`); + const snippets = []; + // Get target elements from every input tensor. + this.variableNames.forEach(variable => { + snippets.push(`vec4 v${variable} = get${variable}AtOutCoords();`); + }); + // Calculate the sum of all elements. + const operation = this.variableNames + .map(variable => { + return `v${variable}`; + }) + .join(' + '); + this.userCode = ` + void main() { + ${snippets.join('\n ')} + + vec4 result = ${operation}; + setOutput(result); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function addN(args) { + const { inputs, backend } = args; + const tensors = inputs; + if (tensors.length === 1) { + return identity({ inputs: { x: tensors[0] }, backend }); + } + // Limit the number of uploaded textures for optimization. + if (tensors.length > env().getNumber('WEBGL_MAX_TEXTURES_IN_SHADER')) { + const midIndex = Math.floor(tensors.length / 2); + const leftSide = addN({ inputs: tensors.slice(0, midIndex), backend }); + const rightSide = addN({ inputs: tensors.slice(midIndex), backend }); + return addN({ inputs: [leftSide, rightSide], backend }); + } + const dtype = tensors.map(t => t.dtype).reduce((d1, d2) => upcastType(d1, d2)); + const shapes = tensors.map(t => t.shape); + // We can make sure shapes are identical in op level. + const usePackedOp = env().getBool('WEBGL_PACK'); + const program = usePackedOp ? + new AddNPackedProgram(tensors[0].shape, shapes) : + new AddNProgram(tensors[0].shape, shapes); + return backend.runWebGLProgram(program, tensors, dtype); + } + const addNConfig = { + kernelName: AddN, + backendName: 'webgl', + kernelFunc: addN + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function all(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { axis, keepDims } = attrs; + const xRank = x.shape.length; + const origAxes = parseAxisParam(axis, x.shape); + let axes = origAxes; + const permutedAxes = getAxesPermutation(axes, xRank); + let permutedX = x; + if (permutedAxes != null) { + permutedX = transpose({ inputs: { x }, backend, attrs: { perm: permutedAxes } }); + axes = getInnerMostAxes(axes.length, xRank); + } + assertAxesAreInnerMostDims('all', axes, xRank); + const [outShape, reduceShape] = computeOutAndReduceShapes(permutedX.shape, axes); + const inSize = sizeFromShape(reduceShape); + const a2D = reshape({ inputs: { x: permutedX }, backend, attrs: { shape: [-1, inSize] } }); + const reduced = reduce(a2D, a2D.dtype, 'all', backend); + let res; + if (keepDims) { + const newShape = expandShapeToKeepDim(outShape, origAxes); + res = reshape({ inputs: { x: reduced }, backend, attrs: { shape: newShape } }); + } + else { + res = reshape({ inputs: { x: reduced }, backend, attrs: { shape: outShape } }); + } + backend.disposeIntermediateTensorInfo(a2D); + backend.disposeIntermediateTensorInfo(reduced); + if (permutedAxes != null) { + backend.disposeIntermediateTensorInfo(permutedX); + } + return res; + } + const allConfig = { + kernelName: All, + backendName: 'webgl', + kernelFunc: all + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function any(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { axis, keepDims } = attrs; + const xRank = x.shape.length; + const origAxes = parseAxisParam(axis, x.shape); + let axes = origAxes; + const permutedAxes = getAxesPermutation(axes, xRank); + let permutedX = x; + if (permutedAxes != null) { + permutedX = transpose({ inputs: { x }, backend, attrs: { perm: permutedAxes } }); + axes = getInnerMostAxes(axes.length, xRank); + } + assertAxesAreInnerMostDims('any', axes, xRank); + const [outShape, reduceShape] = computeOutAndReduceShapes(permutedX.shape, axes); + const inSize = sizeFromShape(reduceShape); + const a2D = reshape({ inputs: { x: permutedX }, backend, attrs: { shape: [-1, inSize] } }); + const reduced = reduce(a2D, a2D.dtype, 'any', backend); + let res; + if (keepDims) { + const newShape = expandShapeToKeepDim(outShape, origAxes); + res = reshape({ inputs: { x: reduced }, backend, attrs: { shape: newShape } }); + } + else { + res = reshape({ inputs: { x: reduced }, backend, attrs: { shape: outShape } }); + } + backend.disposeIntermediateTensorInfo(a2D); + backend.disposeIntermediateTensorInfo(reduced); + if (permutedAxes != null) { + backend.disposeIntermediateTensorInfo(permutedX); + } + return res; + } + const anyConfig = { + kernelName: Any, + backendName: 'webgl', + kernelFunc: any + }; + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class ArgMinMaxProgram { + constructor(reduceInfo, op, firstPass) { + this.variableNames = ['A']; + const { windowSize, batchSize, outSize } = reduceInfo; + if (!firstPass) { + this.variableNames.push('bestIndicesA'); + } + this.outputShape = [batchSize, outSize]; + const compOp = (op === 'max') ? '>' : '<'; + const indexSnippet = firstPass ? + 'inOffset + i;' : + 'round(getBestIndicesA(batch, inOffset + i));'; + this.userCode = ` + void main() { + ivec2 coords = getOutputCoords(); + int batch = coords[0]; + int outIdx = coords[1]; + int inOffset = outIdx * ${windowSize}; + + int bestIndex = inOffset; + float bestValue = getA(batch, bestIndex); + + for (int i = 0; i < ${windowSize}; i++) { + int inIdx = ${indexSnippet}; + float candidate = getA(batch, inIdx); + if (candidate ${compOp} bestValue) { + bestValue = candidate; + bestIndex = inIdx; + } + } + setOutput(float(bestIndex)); + } + `; + } + } + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class ArgMinMaxPackedProgram { + constructor(shape, windowSize, op, firstPass) { + this.variableNames = ['A']; + this.packedInputs = true; + this.packedOutput = true; + assert$1(shape.length > 2, () => `Packed arg${op.charAt(0).toUpperCase() + + op.slice(1)} supports only inputs with rank above 2.`); + const inSize = shape[shape.length - 1]; + const outSize = Math.ceil(inSize / windowSize); + this.outputShape = shape.slice(0, -1); + if (outSize > 1) { + this.outputShape.push(outSize); + } + if (!firstPass) { + this.variableNames.push('bestIndicesA'); + } + const outShape = this.outputShape; + const rank = outShape.length; + const dtype = getCoordsDataType(rank); + const coords = getChannels('coords', rank); + let sourceLocSetup; + let sourceRank; + if (outSize === 1) { + sourceRank = rank + 1; + const sourceLocDType = getCoordsDataType(sourceRank); + sourceLocSetup = ` + ${sourceLocDType} sourceLocR = ${sourceLocDType}(${coords.join()}, 0); + ++${coords[rank - 1]}; + ${sourceLocDType} sourceLocG = ${sourceLocDType}(${coords.join()}, 0); + ++${coords[rank - 2]}; + ${sourceLocDType} sourceLocA = ${sourceLocDType}(${coords.join()}, 0); + --${coords[rank - 1]}; + ${sourceLocDType} sourceLocB = ${sourceLocDType}(${coords.join()}, 0); + --${coords[rank - 2]};`; + } + else { + sourceRank = rank; + sourceLocSetup = ` + ${dtype} sourceLocR = coords; + ++${coords[rank - 1]}; + ${dtype} sourceLocG = coords; + ++${coords[rank - 2]}; + ${dtype} sourceLocA = coords; + --${coords[rank - 1]}; + ${dtype} sourceLocB = coords; + --${coords[rank - 2]};`; + } + const channels = ['x', 'y', 'z', 'w', 'u', 'v'].slice(0, sourceRank); + const inChannel = '.' + channels[sourceRank - 1]; // e.g. ".b" for rank 3. + const intChannels = channels.map(x => 'int ' + x); + const srcRCoords = getChannels('sourceLocR', sourceRank - 1).concat('inIdx.r'); + const srcGCoords = getChannels('sourceLocG', sourceRank - 1).concat('inIdx.g'); + const srcBCoords = getChannels('sourceLocB', sourceRank - 1).concat('inIdx.b'); + const srcACoords = getChannels('sourceLocA', sourceRank - 1).concat('inIdx.a'); + const compOp = (op === 'max') ? 'greaterThan' : 'lessThan'; + const fetchCandidateIdx = firstPass ? '' : ` + inIdx = round(vec4(getBestIndicesAChannel(${srcRCoords.join()}), + getBestIndicesAChannel(${srcGCoords.join()}), + getBestIndicesAChannel(${srcBCoords.join()}), + getBestIndicesAChannel(${srcACoords.join()})));`; + const fetchValue = `vec4( + getAChannel(${srcRCoords.join()}), + hasNextCol ? getAChannel(${srcGCoords.join()}) : 0., + hasNextRow ? getAChannel(${srcBCoords.join()}) : 0., + hasNextRow && hasNextCol ? getAChannel(${srcACoords.join()}) : 0.)`; + const getBestIndicesAChannelSnippet = firstPass ? '' : ` + float getBestIndicesAChannel(${intChannels.join()}) { + return getChannel(getBestIndicesA(${channels.join()}), + vec2(${channels.slice(-2).join()})); + }`; + this.userCode = ` + float getAChannel(${intChannels.join()}) { + return getChannel(getA(${channels.join()}), + vec2(${channels.slice(-2).join()})); + } + ${getBestIndicesAChannelSnippet} + void main() { + ${dtype} coords = getOutputCoords(); + bool hasNextCol = ${coords[rank - 1]} < ${outShape[rank - 1] - 1}; + bool hasNextRow = ${coords[rank - 2]} < ${outShape[rank - 2] - 1}; + ${sourceLocSetup} + ivec4 srcIdx = ivec4(sourceLocR${inChannel}, sourceLocG${inChannel}, + sourceLocB${inChannel}, sourceLocA${inChannel}) * ${windowSize}; + ivec4 inIdx = srcIdx; + vec4 bestIndex = vec4(inIdx); + vec4 bestValue = ${fetchValue}; + + for (int i = 0; i < ${windowSize}; i++) { + inIdx = srcIdx; + ${fetchCandidateIdx} + vec4 candidate = ${fetchValue}; + bvec4 nan = isnan(candidate); + bvec4 replace = bvec4( + vec4(${compOp}(candidate, bestValue)) * (vec4(1.0) - vec4(nan))); + + bestValue = vec4(replace.x ? candidate.x : bestValue.x, + replace.y ? candidate.y : bestValue.y, + replace.z ? candidate.z : bestValue.z, + replace.w ? candidate.w : bestValue.w); + bestIndex = mix(bestIndex, vec4(inIdx), vec4(replace)); + srcIdx++; + } + setOutput(bestIndex); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function argReduce(backend, x, reduceType, bestIndicesA = null) { + let batchSize = x.shape[0]; + let inSize = x.shape[1]; + if (bestIndicesA != null) { + batchSize = bestIndicesA.shape[0]; + inSize = bestIndicesA.shape[1]; + } + const windowSize = computeOptimalWindowSize(inSize); + const reduceInfo = { windowSize, inSize, batchSize, outSize: Math.ceil(inSize / windowSize) }; + const program = new ArgMinMaxProgram(reduceInfo, reduceType, bestIndicesA == null); + const inputs = [x]; + if (bestIndicesA != null) { + inputs.push(bestIndicesA); + } + const output = backend.runWebGLProgram(program, inputs, 'int32'); + // No need to run another GPGPU program. + if (output.shape[1] === 1) { + return output; + } + const result = argReduce(backend, x, reduceType, output); + backend.disposeIntermediateTensorInfo(output); + return result; + } + function argReducePacked(backend, x, reduceType, bestIndicesA = null) { + const inShape = bestIndicesA != null ? bestIndicesA.shape : x.shape; + const inSize = inShape[inShape.length - 1]; + const windowSize = computeOptimalWindowSize(inSize); + const program = new ArgMinMaxPackedProgram(inShape, windowSize, reduceType, bestIndicesA == null); + const inputs = bestIndicesA == null ? [x] : [x, bestIndicesA]; + const output = backend.runWebGLProgram(program, inputs, 'int32'); + if (output.shape.length === x.shape.length) { + const result = argReducePacked(backend, x, reduceType, output); + backend.disposeIntermediateTensorInfo(output); + return result; + } + return output; + } + function argMinMaxReduce(backend, x, axis, reduceType) { + const axes = [axis]; + assertAxesAreInnerMostDims('arg' + reduceType.charAt(0).toUpperCase() + reduceType.slice(1), axes, x.shape.length); + if (!env().getBool('WEBGL_PACK_REDUCE') || x.shape.length <= 2) { + const intermediateTensorInfos = []; + // Eagerly unpack x input since it is passed in to all the shaders which + // require unpacked inputs. + const xtexData = backend.texData.get(x.dataId); + const xIsPacked = xtexData !== null && xtexData.isPacked; + let xUnPacked = x; + if (xIsPacked) { + xUnPacked = backend.unpackTensor(x); + intermediateTensorInfos.push(xUnPacked); + } + const [outShape, reduceShape] = computeOutAndReduceShapes(xUnPacked.shape, axes); + const inSize = sizeFromShape(reduceShape); + const a2D = reshape({ inputs: { x: xUnPacked }, backend, attrs: { shape: [-1, inSize] } }); + intermediateTensorInfos.push(a2D); + const reduced = argReduce(backend, a2D, reduceType); + intermediateTensorInfos.push(reduced); + const reshaped = reshape({ inputs: { x: reduced }, backend, attrs: { shape: outShape } }); + intermediateTensorInfos.forEach(t => backend.disposeIntermediateTensorInfo(t)); + return reshaped; + } + return argReducePacked(backend, x, reduceType); + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function argMax(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { axis } = attrs; + let axes = parseAxisParam(axis, x.shape); + const permutedAxes = getAxesPermutation(axes, x.shape.length); + let $x = x; + const intermediateTensorInfos = []; + if (permutedAxes != null) { + $x = transpose({ inputs: { x }, backend, attrs: { perm: permutedAxes } }); + intermediateTensorInfos.push($x); + axes = getInnerMostAxes(axes.length, $x.shape.length); + } + assertAxesAreInnerMostDims('argMax', [axes[0]], $x.shape.length); + const out = argMinMaxReduce(backend, $x, axes[0], 'max'); + intermediateTensorInfos.forEach(t => backend.disposeIntermediateTensorInfo(t)); + return out; + } + const argMaxConfig = { + kernelName: ArgMax, + backendName: 'webgl', + kernelFunc: argMax + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function argMin(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { axis } = attrs; + let axes = parseAxisParam(axis, x.shape); + const permutedAxes = getAxesPermutation(axes, x.shape.length); + let $x = x; + const intermediateTensorInfos = []; + if (permutedAxes != null) { + $x = transpose({ inputs: { x }, backend, attrs: { perm: permutedAxes } }); + intermediateTensorInfos.push($x); + axes = getInnerMostAxes(axes.length, $x.shape.length); + } + assertAxesAreInnerMostDims('argMin', [axes[0]], $x.shape.length); + const out = argMinMaxReduce(backend, $x, axes[0], 'min'); + intermediateTensorInfos.forEach(t => backend.disposeIntermediateTensorInfo(t)); + return out; + } + const argMinConfig = { + kernelName: ArgMin, + backendName: 'webgl', + kernelFunc: argMin + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const ASIN = CHECK_NAN_SNIPPET$1 + ` + if (abs(x) > 1.) { + return NAN; + } + return asin(x); +`; + const asin = unaryKernelFunc({ opSnippet: ASIN }); + const asinConfig = { + kernelName: Asin, + backendName: 'webgl', + kernelFunc: asin, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const ASINH = CHECK_NAN_SNIPPET$1 + `return log(x + sqrt(x * x + 1.0));`; + const asinh = unaryKernelFunc({ opSnippet: ASINH }); + const asinhConfig = { + kernelName: Asinh, + backendName: 'webgl', + kernelFunc: asinh, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const ATAN = CHECK_NAN_SNIPPET$1 + ` + return atan(x); +`; + const atan = unaryKernelFunc({ opSnippet: ATAN }); + const atanConfig = { + kernelName: Atan, + backendName: 'webgl', + kernelFunc: atan, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const ATAN2 = CHECK_NAN_SNIPPET + ` + return atan(a, b); +`; + const ATAN2_PACKED = ` + vec4 result = atan(a, b); + bvec4 isNaNA = isnan(a); + bvec4 isNaNB = isnan(b); + bvec4 isNaN = bvec4(isNaNA.x || isNaNB.x, isNaNA.y || isNaNB.y, isNaNA.z || isNaNB.z, isNaNA.w || isNaNB.w); + ` + + CHECK_NAN_SNIPPET_PACKED + ` + return result; +`; + const atan2 = binaryKernelFunc({ opSnippet: ATAN2, packedOpSnippet: ATAN2_PACKED }); + const atan2Config = { + kernelName: Atan2, + backendName: 'webgl', + kernelFunc: atan2, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const ATANH = CHECK_NAN_SNIPPET$1 + ` + if ((x < -1.0) || (x > 1.0)) return NAN; +return (log(1.0 + x) - log(1.0 - x)) / 2.0;`; + const atanh = unaryKernelFunc({ opSnippet: ATANH }); + const atanhConfig = { + kernelName: Atanh, + backendName: 'webgl', + kernelFunc: atanh, + }; + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class Pool2DProgram { + constructor(convInfo, poolType, computePositions, flattenPositions = false, includeBatchInIndex = false) { + this.variableNames = ['x']; + if (poolType === 'avg' && computePositions) { + throw new Error('Cannot compute positions for average pool.'); + } + const filterWidth = convInfo.filterWidth; + const strideHeight = convInfo.strideHeight; + const strideWidth = convInfo.strideWidth; + const dilationHeight = convInfo.dilationHeight; + const dilationWidth = convInfo.dilationWidth; + const effectiveFilterHeight = convInfo.effectiveFilterHeight; + const effectiveFilterWidth = convInfo.effectiveFilterWidth; + const padTop = convInfo.padInfo.top; + const padLeft = convInfo.padInfo.left; + this.outputShape = convInfo.outShape; + const isAvgPool = poolType === 'avg'; + const batchFlattenPositionStr = `((batch * ${convInfo.inHeight} + xR) * ${convInfo.inWidth} + xC) * ${convInfo.inChannels} + d`; + const flattenPositionStr = `(xR * ${convInfo.inWidth} + xC) * ${convInfo.inChannels} + d`; + let initializationValue = '0.0'; + if (!isAvgPool) { + // WebGL on Firefox Linux can't compile 1/0 so we do 1/eps. + initializationValue = '-1.0 / 1e-20'; + } + if (computePositions) { + const compareOp = '>='; + this.userCode = ` + const ivec2 strides = ivec2(${strideHeight}, ${strideWidth}); + const ivec2 pads = ivec2(${padTop}, ${padLeft}); + + void main() { + ivec4 coords = getOutputCoords(); + int batch = coords[0]; + int d = coords[3]; + + ivec2 xRCCorner = coords.yz * strides - pads; + int xRCorner = xRCCorner.x; + int xCCorner = xRCCorner.y; + + // max/min x(?, ?, d) to get y(yR, yC, d). + // ? = to be determined + float minMaxValue = 0.0; + float minMaxValueFound = 0.0; + int minMaxPosition = 0; + float avgValue = 0.0; + + for (int wR = 0; wR < ${effectiveFilterHeight}; + wR += ${dilationHeight}) { + int xR = xRCorner + wR; + + if (xR < 0 || xR >= ${convInfo.inHeight}) { + continue; + } + + for (int wC = 0; wC < ${effectiveFilterWidth}; + wC += ${dilationWidth}) { + int xC = xCCorner + wC; + + if (xC < 0 || xC >= ${convInfo.inWidth}) { + continue; + } + + float value = getX(batch, xR, xC, d); + + // If a min / max value has already been found, use it. If not, + // use the current value. + float currMinMaxValue = mix( + value, minMaxValue, minMaxValueFound); + if (value ${compareOp} currMinMaxValue) { + minMaxValue = value; + minMaxValueFound = 1.0; + minMaxPosition = ${flattenPositions ? (includeBatchInIndex ? batchFlattenPositionStr : + flattenPositionStr) : + `wR * ${effectiveFilterWidth} + wC`}; + } + } + } + setOutput(float(minMaxPosition)); + } + `; + return; + } + const compareOp = 'max'; + let returnValue = `${poolType}(${poolType}(${poolType}(` + + 'minMaxValue[0], minMaxValue[1]), minMaxValue[2]), minMaxValue[3])'; + if (poolType === 'avg') { + returnValue = `avgValue / max(count, 1.0)`; + } + const filterWidthNearestVec4 = Math.floor(filterWidth / 4) * 4; + const filterWidthVec4Remainder = filterWidth % 4; + const updateSnippet = ` + if (${isAvgPool}) { + avgValue += dot(values, ones); + } else { + minMaxValue = ${compareOp}(values, minMaxValue); + } + `; + this.userCode = ` + const ivec2 strides = ivec2(${strideHeight}, ${strideWidth}); + const ivec2 pads = ivec2(${padTop}, ${padLeft}); + const float initializationValue = ${initializationValue}; + const vec4 ones = vec4(1.0, 1.0, 1.0, 1.0); + + float count = 0.0; + + float getValue(int batch, int xR, int xC, int d) { + if (xC < 0 || xC >= ${convInfo.inWidth}) { + return initializationValue; + } + count += 1.0; + return getX(batch, xR, xC, d); + } + + void main() { + ivec4 coords = getOutputCoords(); + int batch = coords[0]; + int d = coords[3]; + + ivec2 xRCCorner = coords.yz * strides - pads; + int xRCorner = xRCCorner.x; + int xCCorner = xRCCorner.y; + + // max/min x(?, ?, d) to get y(yR, yC, d). + // ? = to be determined + vec4 minMaxValue = vec4(${initializationValue}); + float avgValue = 0.0; + count = 0.0; + + for (int wR = 0; wR < ${effectiveFilterHeight}; + wR += ${dilationHeight}) { + int xR = xRCorner + wR; + + if (xR < 0 || xR >= ${convInfo.inHeight}) { + continue; + } + + for (int wC = 0; wC < ${filterWidthNearestVec4}; wC += 4) { + int xC = xCCorner + wC * ${dilationWidth}; + + vec4 values = vec4( + getValue(batch, xR, xC, d), + getValue(batch, xR, xC + ${dilationWidth}, d), + getValue(batch, xR, xC + 2 * ${dilationWidth}, d), + getValue(batch, xR, xC + 3 * ${dilationWidth}, d) + ); + + ${updateSnippet} + } + + int xC = xCCorner + ${filterWidthNearestVec4}; + if (${filterWidthVec4Remainder === 1}) { + vec4 values = vec4( + getValue(batch, xR, xC, d), + initializationValue, + initializationValue, + initializationValue + ); + + ${updateSnippet} + } else if (${filterWidthVec4Remainder === 2}) { + vec4 values = vec4( + getValue(batch, xR, xC, d), + getValue(batch, xR, xC + ${dilationWidth}, d), + initializationValue, + initializationValue + ); + + ${updateSnippet} + } else if (${filterWidthVec4Remainder === 3}) { + vec4 values = vec4( + getValue(batch, xR, xC, d), + getValue(batch, xR, xC + ${dilationWidth}, d), + getValue(batch, xR, xC + 2 * ${dilationWidth}, d), + initializationValue + ); + + ${updateSnippet} + } + } + setOutput(${returnValue}); + } + `; + } + } + class Pool3DProgram { + constructor(convInfo, poolType, computePositions, flattenPositions = false, includeBatchInIndex = false) { + this.variableNames = ['x']; + if (poolType === 'avg' && computePositions) { + throw new Error('Cannot compute positions for average pool.'); + } + const filterWidth = convInfo.filterWidth; + const strideDepth = convInfo.strideDepth; + const strideHeight = convInfo.strideHeight; + const strideWidth = convInfo.strideWidth; + const dilationDepth = convInfo.dilationDepth; + const dilationHeight = convInfo.dilationHeight; + const dilationWidth = convInfo.dilationWidth; + const effectiveFilterDepth = convInfo.effectiveFilterDepth; + const effectiveFilterHeight = convInfo.effectiveFilterHeight; + const effectiveFilterWidth = convInfo.effectiveFilterWidth; + const padFront = convInfo.padInfo.front; + const padTop = convInfo.padInfo.top; + const padLeft = convInfo.padInfo.left; + this.outputShape = convInfo.outShape; + const isAvgPool = poolType === 'avg'; + let initializationValue = '0.0'; + if (!isAvgPool) { + // WebGL on Firefox Linux can't compile 1/0 so we do 1/eps. + initializationValue = '-1.0 / 1e-20'; + } + if (computePositions) { + const compareOp = '>='; + this.userCode = ` + const ivec3 strides = + ivec3(${strideDepth}, ${strideHeight}, ${strideWidth}); + const ivec3 pads = ivec3(${padFront}, ${padTop}, ${padLeft}); + + void main() { + ivec5 coords = getOutputCoords(); + int batch = coords.x; + int ch = coords.u; + + ivec3 xCorner = ivec3(coords.y, coords.z, coords.w) * strides - pads; + int xDCorner = xCorner.x; + int xRCorner = xCorner.y; + int xCCorner = xCorner.z; + + // max/min x(?, ?, ?, ch) to get y(yD, yR, yC, ch). + // ? = to be determined + float minMaxValue = 0.0; + float minMaxValueFound = 0.0; + int minMaxPosition = 0; + + for (int wD = 0; wD < ${effectiveFilterDepth}; + wD += ${dilationDepth}) { + int xD = xDCorner + wD; + + if (xD < 0 || xD >= ${convInfo.inDepth}) { + continue; + } + + for (int wR = 0; wR < ${effectiveFilterHeight}; + wR += ${dilationHeight}) { + int xR = xRCorner + wR; + + if (xR < 0 || xR >= ${convInfo.inHeight}) { + continue; + } + + for (int wC = 0; wC < ${effectiveFilterWidth}; + wC += ${dilationWidth}) { + int xC = xCCorner + wC; + + if (xC < 0 || xC >= ${convInfo.inWidth}) { + continue; + } + + float value = getX(batch, xD, xR, xC, ch); + + // If a min / max value has already been found, use it. If not, + // use the current value. + float currMinMaxValue = mix( + value, minMaxValue, minMaxValueFound); + if (value ${compareOp} currMinMaxValue) { + minMaxValue = value; + minMaxValueFound = 1.0; + minMaxPosition = ${flattenPositions ? + (includeBatchInIndex ? + `(((batch * ${convInfo.inDepth} + xD) * ${convInfo.inHeight} + xR) * ${convInfo.inWidth} + xC) * ${convInfo.inChannels} + ch` : + `((xD * ${convInfo.inHeight} + xR) * ${convInfo.inWidth} + xC) * ${convInfo.inChannels} + ch`) : + `wD * ${effectiveFilterHeight} * ${effectiveFilterWidth} + + wR * ${effectiveFilterWidth} + wC`}; + } + } + } + } + setOutput(float(minMaxPosition)); + } + `; + return; + } + const compareOp = 'max'; + let returnValue = `${poolType}(${poolType}(${poolType}(` + + 'minMaxValue[0], minMaxValue[1]), minMaxValue[2]), minMaxValue[3])'; + if (poolType === 'avg') { + // Use `max(count, 1.0)` instead of `count` in case count === 0.0. + // If count === 0.0, `avgValue` is always 0.0 and we change `count`'s + // value to avoid dividing zero. + returnValue = `avgValue / max(count, 1.0)`; + } + const filterWidthNearestVec4 = Math.floor(filterWidth / 4) * 4; + const filterWidthVec4Remainder = filterWidth % 4; + const updateSnippet = ` + if (${isAvgPool}) { + avgValue += dot(values, ones); + } else { + minMaxValue = ${compareOp}(values, minMaxValue); + } + `; + this.userCode = ` + const ivec3 strides = + ivec3(${strideDepth}, ${strideHeight}, ${strideWidth}); + const ivec3 pads = ivec3(${padFront}, ${padTop}, ${padLeft}); + const float initializationValue = ${initializationValue}; + const vec4 ones = vec4(1.0, 1.0, 1.0, 1.0); + + float count = 0.0; + + float getValue(int batch, int xD, int xR, int xC, int ch) { + if (xC < 0 || xC >= ${convInfo.inWidth}) { + return initializationValue; + } + count += 1.0; + return getX(batch, xD, xR, xC, ch); + } + + void main() { + ivec5 coords = getOutputCoords(); + int batch = coords.x; + int ch = coords.u; + + ivec3 xCorner = ivec3(coords.y, coords.z, coords.w) * strides - pads; + int xDCorner = xCorner.x; + int xRCorner = xCorner.y; + int xCCorner = xCorner.z; + + // max/min x(?, ?, ?, d) to get y(yD, yR, yC, ch). + // ? = to be determined + vec4 minMaxValue = vec4(${initializationValue}); + float avgValue = 0.0; + count = 0.0; + + for (int wD = 0; wD < ${effectiveFilterDepth}; + wD += ${dilationDepth}) { + int xD = xDCorner + wD; + + if (xD < 0 || xD >= ${convInfo.inDepth}) { + continue; + } + + for (int wR = 0; wR < ${effectiveFilterHeight}; + wR += ${dilationHeight}) { + int xR = xRCorner + wR; + + if (xR < 0 || xR >= ${convInfo.inHeight}) { + continue; + } + + for (int wC = 0; wC < ${filterWidthNearestVec4}; wC += 4) { + int xC = xCCorner + wC * ${dilationWidth}; + + vec4 values = vec4( + getValue(batch, xD, xR, xC, ch), + getValue(batch, xD, xR, xC + ${dilationWidth}, ch), + getValue(batch, xD, xR, xC + 2 * ${dilationWidth}, ch), + getValue(batch, xD, xR, xC + 3 * ${dilationWidth}, ch) + ); + + ${updateSnippet} + } + + int xC = xCCorner + ${filterWidthNearestVec4}; + if (${filterWidthVec4Remainder === 1}) { + vec4 values = vec4( + getValue(batch, xD, xR, xC, ch), + initializationValue, + initializationValue, + initializationValue + ); + + ${updateSnippet} + } else if (${filterWidthVec4Remainder === 2}) { + vec4 values = vec4( + getValue(batch, xD, xR, xC, ch), + getValue(batch, xD, xR, xC + ${dilationWidth}, ch), + initializationValue, + initializationValue + ); + + ${updateSnippet} + } else if (${filterWidthVec4Remainder === 3}) { + vec4 values = vec4( + getValue(batch, xD, xR, xC, ch), + getValue(batch, xD, xR, xC + ${dilationWidth}, ch), + getValue(batch, xD, xR, xC + 2 * ${dilationWidth}, ch), + initializationValue + ); + + ${updateSnippet} + } + } + } + setOutput(${returnValue}); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function avgPool(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + assertNotComplex(x, 'avgPool'); + const { filterSize, strides, pad, dimRoundingMode } = attrs; + const dilations = 1; + assert$1(eitherStridesOrDilationsAreOne(strides, dilations), () => 'Error in avgPool: Either strides or dilations must be 1. ' + + `Got strides ${strides} and dilations '${dilations}'`); + const convInfo = computePool2DInfo(x.shape, filterSize, strides, dilations, pad, dimRoundingMode); + if (convInfo.filterWidth === 1 && convInfo.filterHeight === 1 && + arraysEqual(convInfo.inShape, convInfo.outShape)) { + return identity({ inputs: { x }, backend }); + } + const avgPoolProgram = new Pool2DProgram(convInfo, 'avg', false); + return backend.runWebGLProgram(avgPoolProgram, [x], 'float32'); + } + const avgPoolConfig = { + kernelName: AvgPool, + backendName: 'webgl', + kernelFunc: avgPool + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function avgPool3D(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { filterSize, strides, pad, dimRoundingMode, dataFormat } = attrs; + const dilations = [1, 1, 1]; + const convInfo = computePool3DInfo(x.shape, filterSize, strides, dilations, pad, dimRoundingMode, dataFormat); + const avgPoolProgram = new Pool3DProgram(convInfo, 'avg', false); + return backend.runWebGLProgram(avgPoolProgram, [x], 'float32'); + } + const avgPool3DConfig = { + kernelName: AvgPool3D, + backendName: 'webgl', + kernelFunc: avgPool3D + }; + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class AvgPool2DBackpropProgram { + constructor(convInfo) { + this.variableNames = ['dy']; + this.outputShape = convInfo.inShape; + const filterHeight = convInfo.filterHeight; + const filterWidth = convInfo.filterWidth; + const strideHeight = convInfo.strideHeight; + const strideWidth = convInfo.strideWidth; + const dilationHeight = convInfo.dilationHeight; + const dilationWidth = convInfo.dilationWidth; + const effectiveFilterHeight = convInfo.effectiveFilterHeight; + const effectiveFilterWidth = convInfo.effectiveFilterWidth; + const padTop = effectiveFilterHeight - 1 - convInfo.padInfo.top; + const padLeft = effectiveFilterWidth - 1 - convInfo.padInfo.left; + const avgMultiplier = 1 / (filterHeight * filterWidth); + this.userCode = ` + const ivec2 pads = ivec2(${padTop}, ${padLeft}); + const float avgMultiplier = float(${avgMultiplier}); + + void main() { + ivec4 coords = getOutputCoords(); + int b = coords[0]; + int d = coords[3]; + + ivec2 dyRCCorner = coords.yz - pads; + int dyRCorner = dyRCCorner.x; + int dyCCorner = dyRCCorner.y; + + // Convolve dy(?, ?, d) with pos mask(:, :, d) to get dx(xR, xC, d). + // ? = to be determined. : = across all values in that axis. + float dotProd = 0.0; + for (int wR = 0; wR < ${effectiveFilterHeight}; + wR += ${dilationHeight}) { + float dyR = float(dyRCorner + wR) / ${strideHeight}.0; + + if (dyR < 0.0 || dyR >= ${convInfo.outHeight}.0 || fract(dyR) > 0.0) { + continue; + } + int idyR = int(dyR); + + for (int wC = 0; wC < ${effectiveFilterWidth}; + wC+= ${dilationWidth}) { + float dyC = float(dyCCorner + wC) / ${strideWidth}.0; + + if (dyC < 0.0 || dyC >= ${convInfo.outWidth}.0 || + fract(dyC) > 0.0) { + continue; + } + int idyC = int(dyC); + + float dyValue = getDy(b, idyR, idyC, d); + + dotProd += dyValue * avgMultiplier; + } + } + setOutput(dotProd); + } + `; + } + } + class AvgPool3DBackpropProgram { + constructor(convInfo) { + this.variableNames = ['dy']; + this.outputShape = convInfo.inShape; + const filterDepth = convInfo.filterDepth; + const filterHeight = convInfo.filterHeight; + const filterWidth = convInfo.filterWidth; + const strideDepth = convInfo.strideDepth; + const strideHeight = convInfo.strideHeight; + const strideWidth = convInfo.strideWidth; + const dilationDepth = convInfo.dilationDepth; + const dilationHeight = convInfo.dilationHeight; + const dilationWidth = convInfo.dilationWidth; + const effectiveFilterDepth = convInfo.effectiveFilterDepth; + const effectiveFilterHeight = convInfo.effectiveFilterHeight; + const effectiveFilterWidth = convInfo.effectiveFilterWidth; + const padFront = effectiveFilterDepth - 1 - convInfo.padInfo.front; + const padTop = effectiveFilterHeight - 1 - convInfo.padInfo.top; + const padLeft = effectiveFilterWidth - 1 - convInfo.padInfo.left; + const avgMultiplier = 1 / (filterDepth * filterHeight * filterWidth); + this.userCode = ` + const ivec3 pads = ivec3(${padFront}, ${padTop}, ${padLeft}); + const float avgMultiplier = float(${avgMultiplier}); + + void main() { + ivec5 coords = getOutputCoords(); + int batch = coords.x; + int ch = coords.u; + + ivec3 dyCorner = ivec3(coords.y, coords.z, coords.w) - pads; + int dyDCorner = dyCorner.x; + int dyRCorner = dyCorner.y; + int dyCCorner = dyCorner.z; + + // Convolve dy(?, ?, ?, d) with pos mask(:, :, :, ch) to get + // dx(xD, xR, xC, ch). + // ? = to be determined. : = across all values in that axis. + float dotProd = 0.0; + + for (int wD = 0; wD < ${effectiveFilterDepth}; + wD += ${dilationDepth}) { + float dyD = float(dyDCorner + wD) / ${strideDepth}.0; + + if (dyD < 0.0 || dyD >= ${convInfo.outDepth}.0 || fract(dyD) > 0.0) { + continue; + } + int idyD = int(dyD); + + for (int wR = 0; wR < ${effectiveFilterHeight}; + wR += ${dilationHeight}) { + float dyR = float(dyRCorner + wR) / ${strideHeight}.0; + + if (dyR < 0.0 || dyR >= ${convInfo.outHeight}.0 || + fract(dyR) > 0.0) { + continue; + } + int idyR = int(dyR); + + for (int wC = 0; wC < ${effectiveFilterWidth}; + wC += ${dilationWidth}) { + float dyC = float(dyCCorner + wC) / ${strideWidth}.0; + + if (dyC < 0.0 || dyC >= ${convInfo.outWidth}.0 || + fract(dyC) > 0.0) { + continue; + } + int idyC = int(dyC); + + float dyValue = getDy(batch, idyD, idyR, idyC, ch); + + dotProd += dyValue * avgMultiplier; + } + } + } + setOutput(dotProd); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function avgPool3DGrad(args) { + const { inputs, backend, attrs } = args; + const { dy, input } = inputs; + const x = input; + const { filterSize, strides, pad, dimRoundingMode } = attrs; + const dilations = [1, 1, 1]; + const convInfo = computePool3DInfo(x.shape, filterSize, strides, dilations, pad, dimRoundingMode); + const avgPoolBackpropProgram = new AvgPool3DBackpropProgram(convInfo); + return backend.runWebGLProgram(avgPoolBackpropProgram, [dy], x.dtype); + } + const avgPool3DGradConfig = { + kernelName: AvgPool3DGrad, + backendName: 'webgl', + kernelFunc: avgPool3DGrad + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function avgPoolGrad(args) { + const { inputs, backend, attrs } = args; + const { dy, input } = inputs; + const x = input; + assertNotComplex([dy, input], 'avgPoolGrad'); + const { filterSize, strides, pad } = attrs; + const convInfo = computePool2DInfo(x.shape, filterSize, strides, 1 /* dilations */, pad); + const avgPoolBackpropProgram = new AvgPool2DBackpropProgram(convInfo); + return backend.runWebGLProgram(avgPoolBackpropProgram, [dy], x.dtype); + } + const avgPoolGradConfig = { + kernelName: AvgPoolGrad, + backendName: 'webgl', + kernelFunc: avgPoolGrad + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function batchMatMul(args) { + const { inputs, backend, attrs } = args; + const { a, b } = inputs; + const { transposeA, transposeB } = attrs; + return batchMatMulImpl({ a, b, transposeA, transposeB, backend }); + } + const batchMatMulConfig = { + kernelName: BatchMatMul, + backendName: 'webgl', + kernelFunc: batchMatMul, + }; + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class BatchNormProgram { + constructor(xShape, meanShape, varianceShape, offsetShape, scaleShape, varianceEpsilon) { + this.outputShape = []; + this.variableNames = ['x', 'mean', 'variance']; + assertAndGetBroadcastShape(xShape, meanShape); + assertAndGetBroadcastShape(xShape, varianceShape); + let offsetSnippet = '0.0'; + if (offsetShape != null) { + assertAndGetBroadcastShape(xShape, offsetShape); + this.variableNames.push('offset'); + offsetSnippet = 'getOffsetAtOutCoords()'; + } + let scaleSnippet = '1.0'; + if (scaleShape != null) { + assertAndGetBroadcastShape(xShape, scaleShape); + this.variableNames.push('scale'); + scaleSnippet = 'getScaleAtOutCoords()'; + } + this.outputShape = xShape; + this.userCode = ` + void main() { + float x = getXAtOutCoords(); + float mean = getMeanAtOutCoords(); + float variance = getVarianceAtOutCoords(); + float offset = ${offsetSnippet}; + float scale = ${scaleSnippet}; + float inv = scale * inversesqrt(variance + float(${varianceEpsilon})); + setOutput(dot(vec3(x, -mean, offset), vec3(inv, inv, 1))); + } + `; + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class BatchNormPackedProgram { + constructor(xShape, meanShape, varianceShape, offsetShape, scaleShape, varianceEpsilon) { + this.packedInputs = true; + this.packedOutput = true; + this.variableNames = ['x', 'mean', 'variance']; + assertAndGetBroadcastShape(xShape, meanShape); + assertAndGetBroadcastShape(xShape, varianceShape); + let offsetSnippet = 'vec4(0.0)'; + if (offsetShape != null) { + assertAndGetBroadcastShape(xShape, offsetShape); + this.variableNames.push('offset'); + offsetSnippet = 'getOffsetAtOutCoords()'; + } + let scaleSnippet = 'vec4(1.0)'; + if (scaleShape != null) { + assertAndGetBroadcastShape(xShape, scaleShape); + this.variableNames.push('scale'); + scaleSnippet = 'getScaleAtOutCoords()'; + } + this.outputShape = xShape; + this.userCode = ` + void main() { + vec4 offset = ${offsetSnippet}; + vec4 scale = ${scaleSnippet}; + + vec4 x = getXAtOutCoords(); + vec4 mean = getMeanAtOutCoords(); + vec4 variance = getVarianceAtOutCoords(); + + vec4 inv = scale * inversesqrt(variance + vec4(${varianceEpsilon})); + + setOutput((x - mean) * inv + offset); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const batchNorm = ({ inputs, backend, attrs }) => { + const { x, mean, variance, offset, scale } = inputs; + assert$1(mean.shape.length === variance.shape.length, () => 'Batch normalization gradient requires mean and variance to have ' + + 'equal ranks.'); + assert$1(offset == null || mean.shape.length === offset.shape.length, () => 'Batch normalization gradient requires mean and offset to have ' + + 'equal ranks.'); + assert$1(scale == null || mean.shape.length === scale.shape.length, () => 'Batch normalization gradient requires mean and scale to have ' + + 'equal ranks.'); + let { varianceEpsilon } = attrs; + if (varianceEpsilon == null) { + varianceEpsilon = 0.001; + } + const finalInputs = [x, mean, variance]; + let offsetShape = null; + if (offset != null) { + offsetShape = offset.shape; + finalInputs.push(offset); + } + let scaleShape = null; + if (scale != null) { + scaleShape = scale.shape; + finalInputs.push(scale); + } + const program = env().getBool('WEBGL_PACK_NORMALIZATION') ? + new BatchNormPackedProgram(x.shape, mean.shape, variance.shape, offsetShape, scaleShape, varianceEpsilon) : + new BatchNormProgram(x.shape, mean.shape, variance.shape, offsetShape, scaleShape, varianceEpsilon); + const output = backend.runWebGLProgram(program, finalInputs, finalInputs[0].dtype); + return output; + }; + const batchNormConfig = { + kernelName: FusedBatchNorm, + backendName: 'webgl', + kernelFunc: batchNorm, + }; + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class SliceProgram { + constructor(destSize) { + this.variableNames = ['source']; + this.outputShape = destSize; + this.rank = destSize.length; + const dtype = getCoordsDataType(this.rank); + this.customUniforms = [{ name: 'start', arrayIndex: this.rank, type: 'int' }]; + const sourceCoords = getCoords$1(this.rank); + let body; + const coordSum = destSize.map((_, i) => { + return `sourceLoc.${coords[i]} = start[${i}] + coords.${coords[i]};`; + }); + body = ` + ${dtype} sourceLoc; + ${dtype} coords = getOutputCoords(); + ${coordSum.join('\n')} + `; + this.userCode = ` + void main() { + ${body} + setOutput(getSource(${sourceCoords})); + } + `; + } + } + const coords = ['x', 'y', 'z', 'w', 'u', 'v']; + function getCoords$1(rank) { + if (rank === 1) { + return 'sourceLoc'; + } + else if (rank <= 6) { + return coords.slice(0, rank).map(x => 'sourceLoc.' + x).join(','); + } + else { + throw Error(`Slicing for rank ${rank} is not yet supported`); + } + } + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class SlicePackedProgram { + constructor(destSize) { + this.variableNames = ['source']; + this.packedInputs = true; + this.packedOutput = true; + this.outputShape = destSize; + this.rank = destSize.length; + this.customUniforms = [{ name: 'start', arrayIndex: this.rank, type: 'int' }]; + const dtype = getCoordsDataType(this.rank); + const coords = getChannels('coords', this.rank); + const sourceLoc = getChannels('sourceLoc', this.rank); + const innerDims = this.rank === 1 ? 'sourceLoc' : `vec2(${sourceLoc.slice(-2).join()})`; + const getChannel = `getChannel(getSource(${sourceLoc.join()}), ${innerDims})`; + const upperRow = ` + result.x = ${getChannel}; + if (++${coords[this.rank - 1]} < ${destSize[this.rank - 1]}) { + ++${sourceLoc[this.rank - 1]}; + result.y = ${getChannel}; + --${sourceLoc[this.rank - 1]}; + } + `; + const lowerRow = this.rank === 1 ? '' : ` + --${coords[this.rank - 1]}; + if (++${coords[this.rank - 2]} < ${destSize[this.rank - 2]}) { + ++${sourceLoc[this.rank - 2]}; + result.z = ${getChannel}; + if (++${coords[this.rank - 1]} < ${destSize[this.rank - 1]}) { + ++${sourceLoc[this.rank - 1]}; + result.w = ${getChannel}; + } + } + `; + const sourceLocSetup = this.rank <= 4 ? + `sourceLoc = coords + + ${dtype}(${destSize.map((_, i) => `start[${i}]`).join()});` : + destSize.map((_, i) => `${sourceLoc[i]} = ${coords[i]} + start[${i}];`) + .join('\n'); + this.userCode = ` + void main() { + ${dtype} coords = getOutputCoords(); + ${dtype} sourceLoc; + ${sourceLocSetup} + vec4 result = vec4(0.); + ${upperRow} + ${lowerRow} + setOutput(result); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function shallowSlice(x, begin, size, backend) { + const xTexData = backend.texData.get(x.dataId); + const t = backend.makeTensorInfo(size, x.dtype); + const newTexData = backend.texData.get(t.dataId); + // Copy texture data from the original tensor. + Object.assign(newTexData, xTexData); + newTexData.refCount = 1; + newTexData.shape = size; + newTexData.dtype = x.dtype; + let flatOffset = computeFlatOffset(begin, computeStrides(x.shape)); + if (xTexData.slice) { + // We are slicing an already sliced tensor, so we have to accumulate + // the offset. + flatOffset += xTexData.slice.flatOffset; + } + newTexData.slice = { + flatOffset, + // Point to the original dataId, which is used to do ref counting. + origDataId: xTexData.slice && xTexData.slice.origDataId || x.dataId + }; + // Increase the ref count for that data bucket. + const refCount = backend.dataRefCount.get(newTexData.slice.origDataId) || 1; + backend.dataRefCount.set(newTexData.slice.origDataId, refCount + 1); + return t; + } + function slice(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { begin, size } = attrs; + const [$begin, $size] = parseSliceParams(x, begin, size); + assertParamsValid(x, $begin, $size); + if (sizeFromShape($size) === 0) { + return backend.makeTensorInfo($size, x.dtype, []); + } + // Run on cpu if dtype is string. For string, the backend represents it + // as Uint8Array[], where each Uint8Array is a character. Given that the + // computation is only on the outer array, uploading the whole data onto + // gpu is wasteful. Also, currently webgl doesn't have a design to + // upload and retrieve Uint8Array[] between cpu and gpu. Therefore, we + // just run the kernel on cpu if dtype is string. + if (backend.shouldExecuteOnCPU([x]) || x.dtype === 'string') { + const xTexData = backend.texData.get(x.dataId); + const outValues = sliceImplCPU(xTexData.values, $begin, $size, x.shape, x.dtype); + return backend.makeTensorInfo($size, x.dtype, outValues); + } + const { isPacked } = backend.texData.get(x.dataId); + const isContinous = isSliceContinous(x.shape, $begin, $size); + if (isPacked || !isContinous) { + const program = env().getBool('WEBGL_PACK_ARRAY_OPERATIONS') ? + new SlicePackedProgram($size) : + new SliceProgram($size); + const customValues = [$begin]; + return backend.runWebGLProgram(program, [x], x.dtype, customValues); + } + backend.uploadToGPU(x.dataId); + return shallowSlice(x, $begin, $size, backend); + } + const sliceConfig = { + kernelName: Slice, + backendName: 'webgl', + kernelFunc: slice + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const batchToSpaceND = (args) => { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { blockShape, crops } = attrs; + assert$1(x.shape.length <= 4, () => 'batchToSpaceND for rank > 4 with a WebGL backend not ' + + 'implemented yet'); + const prod = blockShape.reduce((a, b) => a * b); + const reshaped = getReshaped(x.shape, blockShape, prod); + const permuted = getPermuted(reshaped.length, blockShape.length); + const reshapedPermuted = getReshapedPermuted(x.shape, blockShape, prod); + const sliceBeginCoords = getSliceBeginCoords(crops, blockShape.length); + const sliceSize = getSliceSize(reshapedPermuted, crops, blockShape.length); + const toDispose = []; + const reshapedIntermediate = reshape({ inputs: { x }, backend, attrs: { shape: reshaped } }); + const transposedIntermediate = transpose({ inputs: { x: reshapedIntermediate }, backend, attrs: { perm: permuted } }); + const reshapedIntermediate2 = reshape({ + inputs: { x: transposedIntermediate }, + backend, + attrs: { shape: reshapedPermuted } + }); + const sliced = slice({ + inputs: { x: reshapedIntermediate2 }, + backend, + attrs: { begin: sliceBeginCoords, size: sliceSize } + }); + toDispose.push(reshapedIntermediate); + toDispose.push(transposedIntermediate); + toDispose.push(reshapedIntermediate2); + toDispose.forEach(t => backend.disposeIntermediateTensorInfo(t)); + return sliced; + }; + const batchToSpaceNDConfig = { + kernelName: BatchToSpaceND, + backendName: 'webgl', + kernelFunc: batchToSpaceND + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function bincount(args) { + const { inputs, backend, attrs } = args; + const { x, weights } = inputs; + const { size } = attrs; + const xVals = backend.readSync(x.dataId); + const weightsVals = backend.readSync(weights.dataId); + const outVals = bincountImplCPU(xVals, weightsVals, weights.dtype, weights.shape, size); + return backend.makeTensorInfo([size], weights.dtype, outVals); + } + const bincountConfig = { + kernelName: Bincount, + backendName: 'webgl', + kernelFunc: bincount + }; + + /** + * @license + * Copyright 2023 Google LLC. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const BITWISEAND = ` + int r = int(a.r) & int(b.r); + int g = int(a.g) & int(b.g); + int rb = int(a.b) & int(b.b); + int ra = int(a.a) & int(b.a); + return vec4(r, g, rb, ra); +`; + const BITWISEAND_UNPACKED = ` + return float(int(a.r) & int(b.r)); +`; + function bitwiseAnd(args) { + const { inputs, backend } = args; + const { a, b } = inputs; + const shouldUsePackedProgram = env().getBool('WEBGL_PACK_BINARY_OPERATIONS'); + const versionNumber = env().getNumber('WEBGL_VERSION'); + // The type of a and b are ensured to be `int32` in core, therefore no need to + // consider other type situations. + if ((backend.shouldExecuteOnCPU([a, b])) || versionNumber === 1) { + const aVals = backend.texData.get(a.dataId).values; + const bVals = backend.texData.get(b.dataId).values; + const [outValues, outShape] = bitwiseAndImplCPU(a.shape, b.shape, aVals, bVals, a.dtype); + const out = backend.makeTensorInfo(outShape, a.dtype); + const outData = backend.texData.get(out.dataId); + outData.values = outValues; + return out; + } + let program; + if (shouldUsePackedProgram) { + program = new BinaryOpPackedProgram(BITWISEAND, a.shape, b.shape, false); + } + else { + program = new BinaryOpProgram(BITWISEAND_UNPACKED, a.shape, b.shape); + } + return backend.runWebGLProgram(program, [a, b], a.dtype); + } + const bitwiseAndConfig = { + kernelName: BitwiseAnd, + backendName: 'webgl', + kernelFunc: bitwiseAnd + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function broadcastArgs(args) { + const { inputs, backend } = args; + const { s0, s1 } = inputs; + const s0Vals = backend.readSync(s0.dataId); + const s1Vals = backend.readSync(s1.dataId); + const broadcastShape = assertAndGetBroadcastShape(Array.from(s0Vals), Array.from(s1Vals)); + return backend.makeTensorInfo([broadcastShape.length], 'int32', Int32Array.from(broadcastShape)); + } + const broadcastArgsConfig = { + kernelName: BroadcastArgs, + backendName: 'webgl', + kernelFunc: broadcastArgs + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const NOT_EQUAL = `return float(a != b);`; + const notEqual = binaryKernelFunc({ opSnippet: NOT_EQUAL, cpuKernelImpl: notEqualImplCPU, dtype: 'bool' }); + const notEqualConfig = { + kernelName: NotEqual, + backendName: 'webgl', + kernelFunc: notEqual, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function real(args) { + const { inputs, backend } = args; + const { input } = inputs; + const inputData = backend.texData.get(input.dataId); + return identity({ inputs: { x: inputData.complexTensorInfos.real }, backend }); + } + const realConfig = { + kernelName: Real, + backendName: 'webgl', + kernelFunc: real + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const TO_INT = `return float(int(x));`; + function int(input, backend) { + const program = new UnaryOpProgram(input.shape, TO_INT); + const output = backend.runWebGLProgram(program, [input], 'int32'); + return { dataId: output.dataId, shape: output.shape, dtype: output.dtype }; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function cast(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { dtype } = attrs; + // Casting to complex64. + if (dtype === 'complex64') { + if (x.dtype === 'complex64') { + return identity({ inputs: { x }, backend }); + } + // TODO(annxingyuan): Import kernel function once zeros is modularized. + const zerosTensor = zeros$2(x.shape); + const floatX = cast({ inputs: { x }, backend, attrs: { dtype: 'float32' } }); + const result = complex({ inputs: { real: floatX, imag: zerosTensor }, backend }); + zerosTensor.dispose(); + backend.disposeIntermediateTensorInfo(floatX); + return result; + } + // Casting from complex64 + if (x.dtype === 'complex64') { + const realPart = real({ inputs: { input: x }, backend }); + const result = cast({ inputs: { x: realPart }, backend, attrs: { dtype } }); + backend.disposeIntermediateTensorInfo(realPart); + return result; + } + if (!hasEncodingLoss(x.dtype, dtype)) { + // We don't change the underlying data, since we cast to higher + // precision. + const result = identity({ inputs: { x }, backend }); + return { dataId: result.dataId, shape: result.shape, dtype }; + } + if (backend.shouldExecuteOnCPU([x])) { + const values = backend.texData.get(x.dataId).values; + const [resultShape, resultType, resultData] = castImplCPU(values, x.shape, x.dtype, dtype); + return backend.makeTensorInfo(resultShape, resultType, resultData); + } + if (dtype === 'int32') { + return int(x, backend); + } + if (dtype === 'bool') { + const zerosTensorInfo = backend.makeTensorInfo([], 'bool', getTypedArrayFromDType('bool', 1)); + const binaryInputs = { a: x, b: zerosTensorInfo }; + const result = notEqual({ inputs: binaryInputs, backend }); + backend.disposeIntermediateTensorInfo(zerosTensorInfo); + return result; + } + throw new Error(`Error in Cast: failed to cast ${x.dtype} to ${dtype}`); + } + const castConfig = { + kernelName: Cast, + backendName: 'webgl', + kernelFunc: cast + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const CEIL = `return ceil(x);`; + const ceil = unaryKernelFunc({ opSnippet: CEIL, packedOpSnippet: CEIL, cpuKernelImpl: ceilImplCPU }); + const ceilConfig = { + kernelName: Ceil, + backendName: 'webgl', + kernelFunc: ceil + }; + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class ClipProgram { + constructor(aShape) { + this.variableNames = ['A']; + this.customUniforms = [ + { name: 'minVal', type: 'float' }, + { name: 'maxVal', type: 'float' } + ]; + this.outputShape = aShape; + this.userCode = ` + + void main() { + float value = getAAtOutCoords(); + if (isnan(value)) { + setOutput(value); + return; + } + + setOutput(clamp(value, minVal, maxVal)); + } + `; + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class ClipPackedProgram { + constructor(aShape) { + this.variableNames = ['A']; + this.packedInputs = true; + this.packedOutput = true; + this.customUniforms = [ + { name: 'minVal', type: 'float' }, + { name: 'maxVal', type: 'float' } + ]; + this.outputShape = aShape; + this.userCode = ` + void main() { + vec4 value = getAAtOutCoords(); + + if (any(isnan(value))) { + setOutput(value); + return; + } + + setOutput(clamp(value, vec4(minVal), vec4(maxVal))); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function clipByValue(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { clipValueMin, clipValueMax } = attrs; + let program; + if (env().getBool('WEBGL_PACK_CLIP')) { + program = new ClipPackedProgram(x.shape); + } + else { + program = new ClipProgram(x.shape); + } + const customValues = [[clipValueMin], [clipValueMax]]; + return backend.runWebGLProgram(program, [x], x.dtype, customValues); + } + const clipByValueConfig = { + kernelName: ClipByValue, + backendName: 'webgl', + kernelFunc: clipByValue + }; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class ComplexAbsProgram { + constructor(shape) { + this.variableNames = ['real', 'imag']; + this.outputShape = shape; + this.userCode = ` + void main() { + float re = abs(getRealAtOutCoords()); + float im = abs(getImagAtOutCoords()); + float mx = max(re, im); + + // sadly the length function in glsl is not underflow-safe + // (at least not on Intel GPUs). So the safe solution is + // to ensure underflow-safety in all cases. + setOutput( + mx == 0.0 ? 0.0 : mx * length(vec2(1, min(re, im)/mx)) + ); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + // Returns a TensorInfo with the complex shape and the dataId of the + // underlying part. We need to do this because a reshaped complex tensor is + // not reflected in its parts. + function makeComplexComponentTensorInfo(complexTensor, complexPart) { + return { + dataId: complexPart.dataId, + dtype: complexPart.dtype, + shape: complexTensor.shape + }; + } + function complexAbs(args) { + const { inputs, backend } = args; + const { x } = inputs; + const xData = backend.texData.get(x.dataId); + const program = new ComplexAbsProgram(x.shape); + const programInputs = [ + makeComplexComponentTensorInfo(x, xData.complexTensorInfos.real), + makeComplexComponentTensorInfo(x, xData.complexTensorInfos.imag), + ]; + return backend.runWebGLProgram(program, programInputs, programInputs[0].dtype); + } + const complexAbsConfig = { + kernelName: ComplexAbs, + backendName: 'webgl', + kernelFunc: complexAbs + }; + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class ConcatProgram { + // Concats 2d tensors along axis=1. See comments in MathBackendWebGL.concat(). + constructor(shapes) { + this.outputShape = []; + this.outputShape = computeOutShape$1(shapes, 1 /* axis */); + this.variableNames = shapes.map((_, i) => `T${i}`); + const offsets = new Array(shapes.length - 1); + offsets[0] = shapes[0][1]; + for (let i = 1; i < offsets.length; i++) { + offsets[i] = offsets[i - 1] + shapes[i][1]; + } + const snippets = [`if (yC < ${offsets[0]}) setOutput(getT0(yR, yC));`]; + for (let i = 1; i < offsets.length; i++) { + const shift = offsets[i - 1]; + snippets.push(`else if (yC < ${offsets[i]}) ` + + `setOutput(getT${i}(yR, yC-${shift}));`); + } + const lastIndex = offsets.length; + const lastShift = offsets[offsets.length - 1]; + snippets.push(`else setOutput(getT${lastIndex}(yR, yC-${lastShift}));`); + this.userCode = ` + void main() { + ivec2 coords = getOutputCoords(); + int yR = coords.x; + int yC = coords.y; + + ${snippets.join('\n ')} + } + `; + } + } + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class ConcatPackedProgram { + constructor(shapes, axis) { + this.packedInputs = true; + this.packedOutput = true; + this.outputShape = []; + this.outputShape = computeOutShape$1(shapes, axis); + const shape = this.outputShape; + const rank = shape.length; + const dtype = getCoordsDataType(rank); + const coords = getChannels('coords', rank); + const channels = ['x', 'y', 'z', 'w', 'u', 'v'].slice(0, rank); + this.variableNames = shapes.map((_, i) => `T${i}`); + const offsets = new Array(shapes.length - 1); + offsets[0] = shapes[0][axis]; + for (let i = 1; i < offsets.length; i++) { + offsets[i] = offsets[i - 1] + shapes[i][axis]; + } + const channel = channels[axis]; + const lastChannels = channels.slice(-2); + const allChannels = channels.join(); + let getValueSnippet = `if (${channel} < ${offsets[0]}) { + return getChannel( + getT0(${allChannels}), vec2(${lastChannels.join()})); + }`; + for (let i = 1; i < offsets.length; i++) { + const shift = offsets[i - 1]; + // Note: the >= comparison below may seem unnecessary given the check + // above but is needed to workaround branch execution issues on some + // devices. It makes all the conditions exclusive without relying on + // execution order. + getValueSnippet += ` + if (${channel} < ${offsets[i]} && ${channel} >= ${offsets[i - 1]}) { + return getChannel( + getT${i}(${shiftedChannels(channels, channel, shift)}), + vec2(${shiftedChannels(lastChannels, channel, shift)})); + }`; + } + const lastIndex = offsets.length; + const shift = offsets[offsets.length - 1]; + getValueSnippet += ` + return getChannel( + getT${lastIndex}(${shiftedChannels(channels, channel, shift)}), + vec2(${shiftedChannels(lastChannels, channel, shift)}));`; + this.userCode = ` + float getValue(${channels.map(x => 'int ' + x)}) { + ${getValueSnippet} + } + + void main() { + ${dtype} coords = getOutputCoords(); + vec4 result = vec4(getValue(${coords}), 0., 0., 0.); + + ${coords[rank - 1]} = ${coords[rank - 1]} + 1; + if (${coords[rank - 1]} < ${shape[rank - 1]}) { + result.g = getValue(${coords}); + } + + ${coords[rank - 2]} = ${coords[rank - 2]} + 1; + if (${coords[rank - 2]} < ${shape[rank - 2]}) { + result.a = getValue(${coords}); + } + + ${coords[rank - 1]} = ${coords[rank - 1]} - 1; + if (${coords[rank - 2]} < ${shape[rank - 2]} && + ${coords[rank - 1]} < ${shape[rank - 1]}) { + result.b = getValue(${coords}); + } + setOutput(result); + } + `; + } + } + /** + * Return an expression for coordinates into a vector where a given channel + * will be offset by [shift]. + * + * @param channels the channels to consider + * @param channel the channel we want shifted + * @param shift the amount to subtract from the channel. + * + * @returns a string of the form 'x, y-[shift], z' where any one channel can + * have the shift applied. + */ + function shiftedChannels(channels, channel, shift) { + const channelIdx = channels.indexOf(channel); + const res = channels.map((c, idx) => { + if (idx === channelIdx) { + return `${c} - ${shift}`; + } + else { + return c; + } + }); + return res.join(); + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function imag(args) { + const { inputs, backend } = args; + const { input } = inputs; + const inputData = backend.texData.get(input.dataId); + return identity({ inputs: { x: inputData.complexTensorInfos.imag }, backend }); + } + const imagConfig = { + kernelName: Imag, + backendName: 'webgl', + kernelFunc: imag + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function concatImpl(inputs, axis, backend) { + const dtype = inputs[0].dtype; + if (dtype === 'complex64') { + const reals = inputs.map((t) => real({ inputs: { input: t }, backend })); + const imags = inputs.map((t) => imag({ inputs: { input: t }, backend })); + const realConcated = concatImpl(reals, axis, backend); + const imagConcated = concatImpl(imags, axis, backend); + const result = complex({ inputs: { real: realConcated, imag: imagConcated }, backend }); + reals.forEach(r => backend.disposeIntermediateTensorInfo(r)); + imags.forEach(i => backend.disposeIntermediateTensorInfo(i)); + backend.disposeIntermediateTensorInfo(realConcated); + backend.disposeIntermediateTensorInfo(imagConcated); + return result; + } + let runOnCpu = backend.shouldExecuteOnCPU(inputs); + // Run on cpu if dtype is string. For string, the backend represents it + // as Uint8Array[], where each Uint8Array is a character. Given that the + // computation is only on the outer array, uploading the whole data onto + // gpu is wasteful. Also, currently webgl doesn't have a design to + // upload and retrieve Uint8Array[] between cpu and gpu. Therefore, we + // just run the kernel on cpu if dtype is string. + if (dtype === 'string') { + runOnCpu = true; + } + if (runOnCpu) { + // Any concat of n-dimensional tensors across any axis can be reduced to + // a concatenation of two-dimensional tensors across the axis 1 by first + // partitioning the axes of the original tensors into those less than the + // axis to be concatenated and the rest. Then reshape the tensors + // into a two-dimensional tensor by collapsing these two sets of axes and + // concatenate the resulting matrices across the axis 1, finally reshaping + // the result to have the proper shape. + const tensors2D = inputs.map(t => { + const innerSize = sizeFromShape(t.shape.slice(axis)); + const shape = [-1, innerSize]; + return reshape({ inputs: { x: t }, backend, attrs: { shape } }); + }); + const inputsValShapes = tensors2D.map(t => { + return { vals: backend.readSync(t.dataId), shape: t.shape }; + }); + // Concats 2d tensors along axis=1. + const outShape = computeOutShape$1(tensors2D.map(t => t.shape), 1 /* axis */); + const simplyConcat = tensors2D[0].shape[0] === 1; + const outVals = concatImplCPU(inputsValShapes, outShape, dtype, simplyConcat); + const finalOutShape = computeOutShape$1(inputs.map(t => t.shape), axis); + const outInfo = backend.makeTensorInfo(finalOutShape, dtype, outVals); + tensors2D.forEach(t => backend.disposeIntermediateTensorInfo(t)); + return outInfo; + } + // Keep only non-empty tensors (ignore tensors with 0 in their shape). + const $inputs = inputs.filter(t => sizeFromShape(t.shape) > 0); + const shouldPack = env().getBool('WEBGL_PACK_ARRAY_OPERATIONS') && + $inputs[0].shape.length > 1; + if ($inputs.length === 1) { + // Clone tensor. + const program = shouldPack ? + new UnaryOpProgram(inputs[0].shape, CLONE) : + new UnaryOpPackedProgram(inputs[0].shape, CLONE); + return backend.runWebGLProgram(program, inputs, dtype); + } + const maxTexturesInShader = env().getNumber('WEBGL_MAX_TEXTURES_IN_SHADER'); + if ($inputs.length > maxTexturesInShader) { + const reducedInputs = []; + for (let i = 0; i < $inputs.length; i += maxTexturesInShader) { + const subArray = $inputs.slice(i, i + maxTexturesInShader); + reducedInputs.push(concatImpl(subArray, axis, backend)); + } + const result = concatImpl(reducedInputs, axis, backend); + for (const i of reducedInputs) { + backend.disposeIntermediateTensorInfo(i); + } + return result; + } + if (shouldPack) { + const program = new ConcatPackedProgram($inputs.map(t => t.shape), axis); + return backend.runWebGLProgram(program, $inputs, dtype); + } + const { tensors2D, outShape } = computeTensors2D($inputs, axis, backend); + const program = new ConcatProgram(tensors2D.map(t => t.shape)); + const result = backend.runWebGLProgram(program, tensors2D, dtype); + tensors2D.forEach(r => backend.disposeIntermediateTensorInfo(r)); + const reshapedResult = reshape({ inputs: { x: result }, attrs: { shape: outShape }, backend }); + backend.disposeIntermediateTensorInfo(result); + return reshapedResult; + } + function computeTensors2D(inputs, axis, backend) { + // Any concat of n-dimensional tensors across any axis can be reduced to + // a concatenation of two-dimensional tensors across the axis 1 by first + // partitioning the axes of the original tensors into those less than the + // axis to be concatenated and the rest. Then reshape the tensors + // into a two-dimensional tensor by collapsing these two sets of axes and + // concatenate the resulting matrices across the axis 1, finally reshaping + // the result to have the proper shape. + const outShape = computeOutShape$1(inputs.map(t => t.shape), axis); + const tensors2D = inputs.map(x => reshape({ + inputs: { x }, + attrs: { shape: [-1, sizeFromShape(x.shape.slice(axis))] }, + backend + })); + return { tensors2D, outShape }; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function concat(args) { + const { inputs, backend, attrs } = args; + const { axis } = attrs; + const $axis = parseAxisParam(axis, inputs[0].shape)[0]; + const shapes = inputs.map(t => t.shape); + assertParamsConsistent(shapes, $axis); + const outShape = computeOutShape$1(inputs.map(t => t.shape), $axis); + if (sizeFromShape(outShape) === 0) { + return backend.makeTensorInfo(outShape, inputs[0].dtype, []); + } + // Keep only non-empty tensors (ignore tensors with 0 in their shape). + const $inputs = inputs.filter(t => sizeFromShape(t.shape) > 0); + if ($inputs.length === 1) { + return identity({ inputs: { x: $inputs[0] }, backend }); + } + return concatImpl($inputs, $axis, backend); + } + const concatConfig = { + kernelName: Concat, + backendName: 'webgl', + kernelFunc: concat + }; + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class Conv2DProgram { + constructor(convInfo, addBias = false, activation = null, hasPreluActivationWeights = false, hasLeakyreluAlpha = false) { + this.variableNames = ['x', 'W']; + this.outputShape = convInfo.outShape; + const padTop = convInfo.padInfo.top; + const padLeft = convInfo.padInfo.left; + const strideHeight = convInfo.strideHeight; + const strideWidth = convInfo.strideWidth; + const dilationHeight = convInfo.dilationHeight; + const dilationWidth = convInfo.dilationWidth; + const filterHeight = convInfo.filterHeight; + const filterWidth = convInfo.filterWidth; + const inputDepthNearestVec4 = Math.floor(convInfo.inChannels / 4) * 4; + const inputDepthVec4Remainder = convInfo.inChannels % 4; + const isChannelsLast = convInfo.dataFormat === 'channelsLast'; + const rowDim = isChannelsLast ? 1 : 2; + const colDim = isChannelsLast ? 2 : 3; + const channelDim = isChannelsLast ? 3 : 1; + let activationSnippet = '', applyActivationSnippet = ''; + if (activation) { + if (hasPreluActivationWeights) { + activationSnippet = `float activation(float a) { + float b = getPreluActivationWeightsAtOutCoords(); + ${activation} + }`; + } + else if (hasLeakyreluAlpha) { + activationSnippet = `float activation(float a) { + float b = getLeakyreluAlphaAtOutCoords(); + ${activation} + }`; + } + else { + activationSnippet = ` + float activation(float x) { + ${activation} + } + `; + } + applyActivationSnippet = `result = activation(result);`; + } + const addBiasSnippet = addBias ? 'result += getBiasAtOutCoords();' : ''; + if (addBias) { + this.variableNames.push('bias'); + } + if (hasPreluActivationWeights) { + this.variableNames.push('preluActivationWeights'); + } + if (hasLeakyreluAlpha) { + this.variableNames.push('leakyreluAlpha'); + } + this.userCode = ` + ${activationSnippet} + + const ivec2 strides = ivec2(${strideHeight}, ${strideWidth}); + const ivec2 pads = ivec2(${padTop}, ${padLeft}); + + void main() { + ivec4 coords = getOutputCoords(); + int batch = coords[0]; + int d2 = coords[${channelDim}]; + + ivec2 xRCCorner = + ivec2(coords[${rowDim}], coords[${colDim}]) * strides - pads; + int xRCorner = xRCCorner.x; + int xCCorner = xRCCorner.y; + + // Convolve x(?, ?, d1) with w(:, :, d1, d2) to get y(yR, yC, d2). + // ? = to be determined. : = across all values in that axis. + float dotProd = 0.0; + for (int wR = 0; wR < ${filterHeight}; wR++) { + int xR = xRCorner + wR * ${dilationHeight}; + + if (xR < 0 || xR >= ${convInfo.inHeight}) { + continue; + } + + for (int wC = 0; wC < ${filterWidth}; wC++) { + int xC = xCCorner + wC * ${dilationWidth}; + + if (xC < 0 || xC >= ${convInfo.inWidth}) { + continue; + } + + for (int d1 = 0; d1 < ${inputDepthNearestVec4}; d1 += 4) { + vec4 wValues = vec4( + getW(wR, wC, d1, d2), + getW(wR, wC, d1 + 1, d2), + getW(wR, wC, d1 + 2, d2), + getW(wR, wC, d1 + 3, d2) + ); + + if (${isChannelsLast}) { + vec4 xValues = vec4( + getX(batch, xR, xC, d1), + getX(batch, xR, xC, d1 + 1), + getX(batch, xR, xC, d1 + 2), + getX(batch, xR, xC, d1 + 3) + ); + dotProd += dot(xValues, wValues); + } else { + vec4 xValues = vec4( + getX(batch, d1, xR, xC), + getX(batch, d1 + 1, xR, xC), + getX(batch, d1 + 2, xR, xC), + getX(batch, d1 + 3, xR, xC) + ); + dotProd += dot(xValues, wValues); + } + } + + if (${inputDepthVec4Remainder === 1}) { + + if (${isChannelsLast}) { + dotProd += + getX(batch, xR, xC, ${inputDepthNearestVec4}) * + getW(wR, wC, ${inputDepthNearestVec4}, d2); + } else { + dotProd += + getX(batch, ${inputDepthNearestVec4}, xR, xC) * + getW(wR, wC, ${inputDepthNearestVec4}, d2); + } + + } else if (${inputDepthVec4Remainder === 2}) { + vec2 wValues = vec2( + getW(wR, wC, ${inputDepthNearestVec4}, d2), + getW(wR, wC, ${inputDepthNearestVec4} + 1, d2) + ); + + if (${isChannelsLast}) { + vec2 xValues = vec2( + getX(batch, xR, xC, ${inputDepthNearestVec4}), + getX(batch, xR, xC, ${inputDepthNearestVec4} + 1) + ); + dotProd += dot(xValues, wValues); + } else { + vec2 xValues = vec2( + getX(batch, ${inputDepthNearestVec4}, xR, xC), + getX(batch, ${inputDepthNearestVec4} + 1, xR, xC) + ); + dotProd += dot(xValues, wValues); + } + + } else if (${inputDepthVec4Remainder === 3}) { + vec3 wValues = vec3( + getW(wR, wC, ${inputDepthNearestVec4}, d2), + getW(wR, wC, ${inputDepthNearestVec4} + 1, d2), + getW(wR, wC, ${inputDepthNearestVec4} + 2, d2) + ); + + if (${isChannelsLast}) { + vec3 xValues = vec3( + getX(batch, xR, xC, ${inputDepthNearestVec4}), + getX(batch, xR, xC, ${inputDepthNearestVec4} + 1), + getX(batch, xR, xC, ${inputDepthNearestVec4} + 2) + ); + dotProd += dot(xValues, wValues); + } else { + vec3 xValues = vec3( + getX(batch, ${inputDepthNearestVec4}, xR, xC), + getX(batch, ${inputDepthNearestVec4} + 1, xR, xC), + getX(batch, ${inputDepthNearestVec4} + 2, xR, xC) + ); + dotProd += dot(xValues, wValues); + } + + } + } + } + + float result = dotProd; + ${addBiasSnippet} + ${applyActivationSnippet} + setOutput(result); + } + `; + } + } + class Conv3DProgram { + constructor(convInfo) { + this.variableNames = ['x', 'W']; + this.outputShape = convInfo.outShape; + const padFront = convInfo.padInfo.front; + const padTop = convInfo.padInfo.top; + const padLeft = convInfo.padInfo.left; + const strideDepth = convInfo.strideDepth; + const strideHeight = convInfo.strideHeight; + const strideWidth = convInfo.strideWidth; + const dilationDepth = convInfo.dilationDepth; + const dilationHeight = convInfo.dilationHeight; + const dilationWidth = convInfo.dilationWidth; + const filterDepth = convInfo.filterDepth; + const filterHeight = convInfo.filterHeight; + const filterWidth = convInfo.filterWidth; + const inputDepthNearestVec4 = Math.floor(convInfo.inChannels / 4) * 4; + const inputDepthVec4Remainder = convInfo.inChannels % 4; + this.userCode = ` + const ivec3 strides = ivec3(${strideDepth}, ${strideHeight}, ${strideWidth}); + const ivec3 pads = ivec3(${padFront}, ${padTop}, ${padLeft}); + + void main() { + ivec5 coords = getOutputCoords(); + int batch = coords.x; + int d2 = coords.u; + + ivec3 xFRCCorner = ivec3(coords.y, coords.z, coords.w) * strides - pads; + int xFCorner = xFRCCorner.x; + int xRCorner = xFRCCorner.y; + int xCCorner = xFRCCorner.z; + + // Convolve x(?, ?, ?, d1) with w(:, :, :, d1, d2) to get + // y(yF, yR, yC, d2). ? = to be determined. : = across all + // values in that axis. + float dotProd = 0.0; + for (int wF = 0; wF < ${filterDepth}; wF++) { + int xF = xFCorner + wF * ${dilationDepth}; + + if (xF < 0 || xF >= ${convInfo.inDepth}) { + continue; + } + + for (int wR = 0; wR < ${filterHeight}; wR++) { + int xR = xRCorner + wR * ${dilationHeight}; + + if (xR < 0 || xR >= ${convInfo.inHeight}) { + continue; + } + + for (int wC = 0; wC < ${filterWidth}; wC++) { + int xC = xCCorner + wC * ${dilationWidth}; + + if (xC < 0 || xC >= ${convInfo.inWidth}) { + continue; + } + + for (int d1 = 0; d1 < ${inputDepthNearestVec4}; d1 += 4) { + vec4 xValues = vec4( + getX(batch, xF, xR, xC, d1), + getX(batch, xF, xR, xC, d1 + 1), + getX(batch, xF, xR, xC, d1 + 2), + getX(batch, xF, xR, xC, d1 + 3) + ); + vec4 wValues = vec4( + getW(wF, wR, wC, d1, d2), + getW(wF, wR, wC, d1 + 1, d2), + getW(wF, wR, wC, d1 + 2, d2), + getW(wF, wR, wC, d1 + 3, d2) + ); + + dotProd += dot(xValues, wValues); + } + + if (${inputDepthVec4Remainder === 1}) { + dotProd += + getX(batch, xF, xR, xC, ${inputDepthNearestVec4}) * + getW(wF, wR, wC, ${inputDepthNearestVec4}, d2); + } else if (${inputDepthVec4Remainder === 2}) { + vec2 xValues = vec2( + getX(batch, xF, xR, xC, ${inputDepthNearestVec4}), + getX(batch, xF, xR, xC, ${inputDepthNearestVec4} + 1) + ); + vec2 wValues = vec2( + getW(wF, wR, wC, ${inputDepthNearestVec4}, d2), + getW(wF, wR, wC, ${inputDepthNearestVec4} + 1, d2) + ); + dotProd += dot(xValues, wValues); + } else if (${inputDepthVec4Remainder === 3}) { + vec3 xValues = vec3( + getX(batch, xF, xR, xC, ${inputDepthNearestVec4}), + getX(batch, xF, xR, xC, ${inputDepthNearestVec4} + 1), + getX(batch, xF, xR, xC, ${inputDepthNearestVec4} + 2) + ); + vec3 wValues = vec3( + getW(wF, wR, wC, ${inputDepthNearestVec4}, d2), + getW(wF, wR, wC, ${inputDepthNearestVec4} + 1, d2), + getW(wF, wR, wC, ${inputDepthNearestVec4} + 2, d2) + ); + dotProd += dot(xValues, wValues); + } + } + } + } + setOutput(dotProd); + } + `; + } + } + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class Conv2DPackedProgram { + constructor(convInfo, addBias = false, activation = null, hasPreluActivation = false, hasLeakyReluAlpha = false) { + this.variableNames = ['x', 'W']; + this.packedInputs = true; + this.packedOutput = true; + this.customUniforms = [ + { name: 'pads', type: 'ivec2' }, + { name: 'strides', type: 'ivec2' }, + { name: 'dilations', type: 'ivec2' }, + { name: 'inDims', type: 'ivec2' }, + ]; + this.outputShape = convInfo.outShape; + this.enableShapeUniforms = useShapeUniforms(this.outputShape.length); + const padLeft = convInfo.padInfo.left; + const strideWidth = convInfo.strideWidth; + const dilationWidth = convInfo.dilationWidth; + const filterHeight = convInfo.filterHeight; + const filterWidth = convInfo.filterWidth; + const texelsAcross = filterWidth; + let mainLoop = ` + int xR; int xC; int xCOffset; + vec4 wTexel; vec4 previous; vec4 final;`; + for (let c = 0; c < filterWidth; c++) { + mainLoop += ` + vec4 xTexelC${c * 2}; + int xTexelC${c * 2}Ready; + vec4 xTexelC${c * 2 + 1}; + int xTexelC${c * 2 + 1}Ready; + vec4 xC${c};`; + } + /** + * This vectorized implementation works by gathering the values needed for + * each output channel's dot product into vec4's and then multiplying them + * all together (this happens in the final double for-loop below). Most of + * the main loop consists of constructing these vec4's with the minimum + * number of texture2D calls, which means making use of all four returned + * values from a texture2D call at once. + */ + mainLoop += ` + for (int r = 0; r < ${filterHeight}; r++) { + for (int d1 = 0; d1 < ${convInfo.inChannels}; d1 += 2) { + `; + for (let c = 0; c < filterWidth; c++) { + mainLoop += ` + xTexelC${c * 2} = vec4(0.0); + xTexelC${c * 2}Ready = 0; + xTexelC${c * 2 + 1} = vec4(0.0); + xTexelC${c * 2 + 1}Ready = 0; + xC${c} = vec4(0.0);`; + } + mainLoop += ` + xR = xRCorner + r * dilations[0]; + if (xR >=0 && xR < inDims[0]) { + `; + for (let texelC = 0; texelC < (texelsAcross + 1) / 2; texelC++) { + const colIndex = texelC * 2; + mainLoop += ` + xC = xCCorner + ${colIndex * dilationWidth}; + `; + if (strideWidth === 1) { + if (colIndex < filterWidth) { + // If padding is odd, the outer texels have to be composed. + if (padLeft % 2 === 1) { + // TODO: Ensure vec4 previous does not result in redundant sample, + // and avoid setting xTexelRC's that exceed the boundary in the + // first place rather than resetting them to vec4(0)). + // To compute xCOffset: + // - If padding is odd, we must add 1 to ensure we ask for an + // even-numbered row. + // - We subtract 2 to access the previous texel. + mainLoop += ` + xCOffset = xC + 1; + if (xCOffset >= 0 && xCOffset < inDims[1] && xTexelC${colIndex}Ready == 0) { + xTexelC${colIndex} = getX(batch, xR, xCOffset, d1); + + // Need to manually clear unused channels in case + // we're reading from recycled texture. + if (xCOffset + 1 >= inDims[1]) { + xTexelC${colIndex}.zw = vec2(0.0); + } + xTexelC${colIndex}Ready = 1; + } + `; + // This texel has been read in previous iteration if the dilation + // is 1. + if (dilationWidth === 1 && colIndex > 0) { + mainLoop += ` + xC${colIndex} = vec4(xTexelC${colIndex - 2}.zw, xTexelC${colIndex}.xy); + `; + } + else { + mainLoop += ` + xCOffset = xC + 1 - 2; + + if (xCOffset >= 0 && xCOffset < inDims[1]) { + previous = getX(batch, xR, xCOffset, d1); + + // Need to manually clear unused channels in case + // we're reading from recycled texture. + if (xCOffset + 1 >= inDims[1]) { + previous.zw = vec2(0.0); + } + + xC${colIndex} = vec4(previous.zw, xTexelC${colIndex}.xy); + } else { + xC${colIndex} = vec4(0.0, 0.0, xTexelC${colIndex}.xy); + } + `; + } + } + else { + // Padding is even, so xRC corresponds to a single texel. + mainLoop += ` + if (xC >= 0 && xC < inDims[1] && xTexelC${colIndex}Ready == 0) { + xTexelC${colIndex} = getX(batch, xR, xC, d1); + if (xC + 1 >= inDims[1]) { + xTexelC${colIndex}.zw = vec2(0.0); + } + xTexelC${colIndex}Ready = 1; + } + + xC${colIndex} = xTexelC${colIndex}; + `; + } + if (colIndex + 1 < filterWidth) { + // If dilation is even, the second entry should match the first + // (either both are composed or both are single samples). But if + // dilation is odd, then the second entry should be the opposite + // of the first (if the first is composed, the second is a single + // sample, and vice versa.) + const nextTexelOffset = padLeft % 2 === 0 ? + nearestLargerEven(dilationWidth) : + dilationWidth; + if ((dilationWidth % 2 === 0 && padLeft % 2 === 1) || + (dilationWidth % 2 !== 0 && padLeft % 2 !== 1)) { + mainLoop += ` + xCOffset = xC + imod(pads[1], 2) + ${nextTexelOffset}; + + if (xCOffset >= 0 && xCOffset < inDims[1] && xTexelC${colIndex + 1}Ready == 0) { + xTexelC${colIndex + 1} = getX(batch, xR, xCOffset, d1); + + // Need to manually clear unused channels in case + // we're reading from recycled texture. + if (xCOffset + 1 >= inDims[1]) { + xTexelC${colIndex + 1}.zw = vec2(0.0); + } + xTexelC${colIndex + 1}Ready = 1; + } + `; + // If dilation > 1 then the xRC's will not be able to share any + // values, so each xRC will require two unique calls to getX. + if (dilationWidth > 1) { + mainLoop += ` + xCOffset -= 2; + if (xCOffset >= 0 && xCOffset < inDims[1]) { + previous = getX(batch, xR, xCOffset, d1); + xC${colIndex + 1} = vec4(previous.zw, xTexelC${colIndex + 1}.xy); + } else { + xC${colIndex + 1} = vec4(0.0, 0.0, xTexelC${colIndex + 1}.xy); + } + `; + } + else { + mainLoop += ` + xC${colIndex + 1} = vec4(xTexelC${colIndex}.zw, xTexelC${colIndex + 1}.xy); + `; + } + } + else { + // If dilation is 1 and padding is odd, we have already read the + // texel when constructing the previous x value. Here we can + // simply skip the texture read. + if (nextTexelOffset === 1) { + mainLoop += ` + xC${colIndex + 1} = xTexelC${colIndex}; + `; + } + else { + mainLoop += ` + xCOffset = xC + ${nextTexelOffset}; + + if (xCOffset >= 0 && xCOffset < inDims[1] && xTexelC${colIndex + 1}Ready == 0) { + xTexelC${colIndex + 1} = getX(batch, xR, xCOffset, d1); + if (xCOffset + 1 >= inDims[1]) { + xTexelC${colIndex + 1}.zw = vec2(0.0); + } + xTexelC${colIndex + 1}Ready = 1; + } + + xC${colIndex + 1} = xTexelC${colIndex + 1}; + `; + } + } + } + } + } + else { // stride === 2 + if (colIndex < filterWidth) { + // Depending on whether padLeft is even or odd, we want either the + // xy or zw channels from X texels for xC${colIndex}. If padLeft is + // even, xC${colIndex +1} is simply the zw channels of texels we've + // already sampled. But if padLeft is odd, xC{$c + 1}.zw will + // need to come from the xy channels of a new texel, hence the ` + // vec4 + // final` initialized below. + if (padLeft % 2 === 1) { + mainLoop += ` + xCOffset = xC + 1 - strides[1]; + if(xCOffset >= 0 && xCOffset < inDims[1] && xTexelC${colIndex}Ready == 0) { + xTexelC${colIndex} = getX(batch, xR, xCOffset, d1); + // Need to manually clear unused channels in case + // we're reading from recycled texture. + if (xCOffset + 1 >= inDims[1]) { + xTexelC${colIndex}.zw = vec2(0.0); + } + xTexelC${colIndex}Ready = 1; + } + + if(xC + 1 >= 0 && xC + 1 < inDims[1] && xTexelC${colIndex + 1}Ready == 0) { + xTexelC${colIndex + 1} = getX(batch, xR, xC + 1, d1); + // Need to manually clear unused channels in case + // we're reading from recycled texture. + if (xC + 2 >= inDims[1]) { + xTexelC${colIndex + 1}.zw = vec2(0.0); + } + xTexelC${colIndex + 1}Ready = 1; + } + + xC${colIndex} = vec4(xTexelC${colIndex}.zw, xTexelC${colIndex + 1}.zw); + `; + if (colIndex + 1 < filterWidth) { + mainLoop += ` + final = vec4(0.0); + xCOffset = xC + 1 + strides[1]; + if(xCOffset >= 0 && xCOffset < inDims[1]) { + final = getX(batch, xR, xCOffset, d1); + } + xC${colIndex + 1} = vec4(xTexelC${colIndex + 1}.xy, final.xy); + `; + } + } + else { + mainLoop += ` + if(xC >= 0 && xC < inDims[1] && xTexelC${colIndex}Ready == 0) { + xTexelC${colIndex} = getX(batch, xR, xC, d1); + if (xC + 1 >= inDims[1]) { + xTexelC${colIndex}.zw = vec2(0.0); + } + xTexelC${colIndex}Ready = 1; + } + + xCOffset = xC + strides[1]; + if(xCOffset >= 0 && xCOffset < inDims[1] && xTexelC${colIndex + 1}Ready == 0) { + xTexelC${colIndex + 1} = getX(batch, xR, xCOffset, d1); + if (xCOffset + 1 >= inDims[1]) { + xTexelC${colIndex + 1}.zw = vec2(0.); + } + xTexelC${colIndex + 1}Ready = 1; + } + + xC${colIndex} = vec4( + xTexelC${colIndex}.xy, xTexelC${colIndex + 1}.xy); + `; + if (colIndex + 1 < filterWidth) { + mainLoop += ` + xC${colIndex + 1} = vec4(xTexelC${colIndex}.zw, xTexelC${colIndex + 1}.zw); + `; + } + } + } + } + // localize the dotProd accumulation within the loop, the theory is for + // GPU with limited cache, accumulate sum across large amount of + // veriables will cause lots of cache misses. (i.e. 5x5 filter will have + // 50 variables) + if (colIndex < filterWidth) { + mainLoop += ` + wTexel = getW(r, ${colIndex}, d1, d2); + dotProd += xC${colIndex}.xxzz * vec4(wTexel.xy, wTexel.xy); + if(d1 + 1 < ${convInfo.inChannels}) { + dotProd += xC${colIndex}.yyww * vec4(wTexel.zw, wTexel.zw); + } + `; + if (colIndex + 1 < filterWidth) { + mainLoop += ` + wTexel = getW(r, ${colIndex + 1}, d1, d2); + dotProd += xC${colIndex + 1}.xxzz * vec4(wTexel.xy, wTexel.xy); + if(d1 + 1 < ${convInfo.inChannels}) { + dotProd += xC${colIndex + 1}.yyww * vec4(wTexel.zw, wTexel.zw); + } + `; + } + } + } + mainLoop += ` + } + `; + mainLoop += ` + } + `; + mainLoop += ` + } + `; + let activationSnippet = '', applyActivationSnippet = ''; + if (activation) { + if (hasPreluActivation) { + activationSnippet = `vec4 activation(vec4 a) { + vec4 b = getPreluActivationWeightsAtOutCoords(); + ${activation} + }`; + } + else if (hasLeakyReluAlpha) { + activationSnippet = `vec4 activation(vec4 a) { + vec4 b = getLeakyreluAlphaAtOutCoords(); + ${activation} + }`; + } + else { + activationSnippet = `vec4 activation(vec4 x) { + ${activation} + }`; + } + applyActivationSnippet = `result = activation(result);`; + } + const addBiasSnippet = addBias ? 'result += getBiasAtOutCoords();' : ''; + if (addBias) { + this.variableNames.push('bias'); + } + if (hasPreluActivation) { + this.variableNames.push('preluActivationWeights'); + } + if (hasLeakyReluAlpha) { + this.variableNames.push('leakyreluAlpha'); + } + this.userCode = ` + ${activationSnippet} + + void main() { + ivec4 coords = getOutputCoords(); + int batch = coords.x; + ivec2 xRCCorner = coords.yz * strides - pads; + int d2 = coords.w; + int xRCorner = xRCCorner.x; + int xCCorner = xRCCorner.y; + + //intialize dotProd with a small epsilon seems to reduce GPU accuracy loss. + vec4 dotProd = vec4(0.000000000000001); + + ${mainLoop} + + vec4 result = dotProd - vec4(0.000000000000001); + ${addBiasSnippet} + ${applyActivationSnippet} + setOutput(result); + } + `; + } + } + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class Im2ColPackedProgram { + constructor(outputShape, convInfo) { + this.variableNames = ['A']; + this.packedInputs = true; + this.packedOutput = true; + this.customUniforms = [ + { name: 'inputShape', type: 'ivec4' }, + { name: 'pad', type: 'ivec2' }, + { name: 'stride', type: 'ivec2' }, + { name: 'dilation', type: 'ivec2' }, + { name: 'inChannels', type: 'int' }, + { name: 'itemsPerBlockRow', type: 'int' }, + { name: 'outWidth', type: 'int' }, + ]; + this.outputShape = outputShape; + this.enableShapeUniforms = useShapeUniforms(this.outputShape.length); + const { dataFormat } = convInfo; + const glsl = getGlslDifferences(); + const isChannelsLast = dataFormat === 'channelsLast'; + const rowDim = isChannelsLast ? 1 : 2; + const colDim = isChannelsLast ? 2 : 3; + const boundsCheckingSnippet = this.enableShapeUniforms ? + 'if(blockIndex < outShape[2] && pos < outShape[1]) {' : + `if(blockIndex < ${outputShape[2]} && pos < ${outputShape[1]}) {`; + let unrolled = ``; + for (let row = 0; row <= 1; row++) { + for (let col = 0; col <= 1; col++) { + unrolled += ` + blockIndex = rc.z + ${col}; + pos = rc.y + ${row}; + + ${boundsCheckingSnippet} + offsetY = int(blockIndex / outWidth) * stride[0] - pad[0]; + d0 = offsetY + dilation[0] * (pos / itemsPerBlockRow); + + if(d0 < inputShape[${rowDim}] && d0 >= 0) { + // Use custom imod instead mod. On Intel GPU, mod may generate + // unexpected value. + // https://github.com/tensorflow/tfjs/issues/5447 + offsetX = imod(blockIndex, outWidth) * stride[1] - pad[1]; + d1 = offsetX + dilation[1] * (imod(pos, itemsPerBlockRow) / + inChannels); + + if(d1 < inputShape[${colDim}] && d1 >= 0) { + + ch = imod(pos, inChannels); + + if (${isChannelsLast}) { + innerDims = vec2(d1, ch); + result[${row * 2 + col}] = getChannel( + getA(rc.x, d0, int(innerDims.x), + int(innerDims.y)), innerDims); + } else { + innerDims = vec2(d0, d1); + result[${row * 2 + col}] = getChannel( + getA(rc.x, ch, int(innerDims.x), + int(innerDims.y)), innerDims); + } + } + } + } + `; + } + } + this.userCode = ` + void main() { + ivec3 rc = getOutputCoords(); + + vec4 result = vec4(0); + + int blockIndex, pos, offsetY, d0, offsetX, d1, ch; + vec2 innerDims; + + ${unrolled} + + ${glsl.output} = result; + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + // Both conv2dByMatMul and conv2dWithIm2Row fuse height and width into one + // dimension to compute batchMatMul, so bias and activation weights are also + // supposed to fuse the two dimensions into one. + // + // This function computes the target shape for fusing height and width + // dimensions. Returning null means the shape is already compatible. + // + // Even though the bias is not supposed to be a 3-D or a 4-D (including + // batch) tensor and PReLU activiation weights is not supposed to be a 4-D + // tensor, we still need to support them, because we haven't disabled + // them for NHWC format. + // https://github.com/tensorflow/tfjs/blob/b53bd47e880367ae57493f0ea628abaf08db2d5d/tfjs-core/src/ops/fused/conv2d.ts#L181-L196 + function getShapeForBatchMatMul(shape, isChannelsLast) { + const length = shape.length; + if (length >= 3) { + return isChannelsLast ? + [ + ...shape.slice(0, -3) /* batch */, + shape[length - 3] * shape[length - 2] /* height * width */, + shape[length - 1] /* channel */ + ] : + [ + ...shape.slice(0, -3) /* batch */, shape[length - 3] /* channel */, + shape[length - 2] * shape[length - 1] /* height * width */ + ]; + } + else if (!isChannelsLast && length === 1 && shape[0] > 1) { + return [shape[0], 1]; + } + else { + return null; + } + } + // For 1x1 kernels that iterate through every point in the input, convolution + // can be expressed as matrix multiplication (without need for memory + // remapping). + function conv2dByMatMul({ x, filter, convInfo, backend, bias = null, preluActivationWeights = null, leakyreluAlpha = 0, activation = null }) { + // Reshapes conv2D input to 2D tensors, uses matMul and then reshape the + // result from 2D to 4D. + const xShape = x.shape; + const xTexData = backend.texData.get(x.dataId); + const sharedMatMulDim = convInfo.inChannels; + const outerShapeX = xShape[0] * xShape[1] * xShape[2]; + const outerShapeFilter = convInfo.outChannels; + const isChannelsLast = convInfo.dataFormat === 'channelsLast'; + const transposeA = false; + const transposeB = false; + let out; + const intermediates = []; + if (preluActivationWeights != null) { + const targetShape = getShapeForBatchMatMul(preluActivationWeights.shape, isChannelsLast); + if (targetShape != null) { + preluActivationWeights = reshape({ + inputs: { x: preluActivationWeights }, + backend, + attrs: { shape: targetShape } + }); + intermediates.push(preluActivationWeights); + } + } + if (bias != null) { + const targetShape = getShapeForBatchMatMul(bias.shape, isChannelsLast); + if (targetShape != null) { + bias = reshape({ inputs: { x: bias }, backend, attrs: { shape: targetShape } }); + intermediates.push(bias); + } + } + // TODO: Once reduction ops are packed, batchMatMul will always be packed + // and we can remove this condition. + const batchMatMulWillBeUnpacked = (outerShapeX === 1 || outerShapeFilter === 1) && + sharedMatMulDim > MATMUL_SHARED_DIM_THRESHOLD; + // The algorithm in the if condition assumes (1) the output will be packed, + // (2) x is packed, (3) x isChannelsLast, (4) x's packed texture is already + // on GPU, (5) col is odd, (6) the width, height and inChannels are the same + // for xTexData.shape and xShape. + const canOptimize = !batchMatMulWillBeUnpacked && xTexData.isPacked && + isChannelsLast && xTexData.texture != null && xShape[2] % 2 !== 0 && + arraysEqual(xTexData.shape.slice(-3), xShape.slice(-3)); + if (canOptimize) { + // We avoid expensive packed 2x2 reshape by padding col count to next, + // even number. When col is odd, the result of packed batchMatMul is + // the same (has the same texture layout and and values in the texture) as + // it is for next even col. We make the odd-cols tensor to look like + // even-cols tensor before the operation and, after the batchMatMul, + // fix the even-cols result to have odd number of cols. + const targetShape = xShape[0] * xShape[1] * (xShape[2] + 1); + const xReshaped = { + dataId: x.dataId, + shape: [1, targetShape, convInfo.inChannels], + dtype: x.dtype + }; + // xTexData.shape gets referenced from GPGPUBinary.inShapeInfos. + // Decrementing col count, after batchMatMul->...->compileProgram leads to + // invalid col count within the reference in GPGPUBinary.inShapeInfos. + // Alternative fix would be to provide a copy to GPGPUBinary.inShapeInfos + // in compileProgram method, but that would affect compilation of all + // programs - instead, provide a copy here, with even col count, before + // calling batchMatMul->...->compileProgram and after that, the original + // xTexData.shape is restored. + const originalXTexDataShape = xTexData.shape; + xTexData.shape = xTexData.shape.slice(); + xTexData.shape[xTexData.shape.length - 2]++; + assert$1(isReshapeFree(xTexData.shape, xReshaped.shape), () => `packed reshape ${xTexData.shape} to ${xReshaped.shape} isn't free`); + const filterReshaped = reshape({ + inputs: { x: filter }, + backend, + attrs: { shape: [1, convInfo.inChannels, convInfo.outChannels] } + }); + intermediates.push(filterReshaped); + const pointwiseConv = batchMatMulImpl({ + a: xReshaped, + b: filterReshaped, + backend, + transposeA, + transposeB, + bias, + activation, + preluActivationWeights, + leakyreluAlpha + }); + const pointwiseConvTexData = backend.texData.get(pointwiseConv.dataId); + assert$1(pointwiseConvTexData.isPacked, () => 'batchMatMul result is expected to be packed'); + // Restore the input shape to original. + xTexData.shape = originalXTexDataShape; + // Set the output shape - there is no need for expensive reshape as data + // layout is already correct. + pointwiseConvTexData.shape = convInfo.outShape; + out = identity({ inputs: { x: pointwiseConv }, backend }); + out.shape = convInfo.outShape; + intermediates.push(pointwiseConv); + } + else { + const numCols = convInfo.outHeight * convInfo.outWidth; + const xReshaped = reshape({ + inputs: { x }, + backend, + attrs: { + shape: isChannelsLast ? + [convInfo.batchSize, numCols, convInfo.inChannels] : + [convInfo.batchSize, convInfo.inChannels, numCols] + } + }); + const filterReshaped = reshape({ + inputs: { x: filter }, + backend, + attrs: { shape: [1, convInfo.inChannels, convInfo.outChannels] } + }); + const result = batchMatMulImpl({ + a: isChannelsLast ? xReshaped : filterReshaped, + b: isChannelsLast ? filterReshaped : xReshaped, + transposeA: !isChannelsLast, + transposeB, + backend, + bias, + activation, + preluActivationWeights, + leakyreluAlpha + }); + out = reshape({ inputs: { x: result }, backend, attrs: { shape: convInfo.outShape } }); + intermediates.push(xReshaped); + intermediates.push(filterReshaped); + intermediates.push(result); + } + for (const i of intermediates) { + backend.disposeIntermediateTensorInfo(i); + } + return out; + } + // Implements the im2row algorithm as outlined in "High Performance + // Convolutional Neural Networks for Document Processing" (Suvisoft, 2006) + function conv2dWithIm2Row({ x, filter, convInfo, backend, bias = null, preluActivationWeights = null, leakyreluAlpha = 0, activation = null }) { + // Rearranges conv2d input so each block to be convolved over forms the + // column of a new matrix with shape [filterWidth * filterHeight * + // inChannels, outHeight * outWidth]. The filter is also rearranged so each + // output channel forms a row of a new matrix with shape [outChannels, + // filterWidth * filterHeight * inChannels]. The convolution is then + // computed by multiplying these matrices and reshaping the result. + const { filterWidth, filterHeight, inChannels, outWidth, outHeight, dataFormat } = convInfo; + const isChannelsLast = dataFormat === 'channelsLast'; + const sharedDim = filterWidth * filterHeight * inChannels; + const numCols = outHeight * outWidth; + const x2ColShape = [convInfo.batchSize, sharedDim, numCols]; + const transposeA = true; + const transposeB = false; + const intermediates = []; + if (preluActivationWeights != null) { + const targetShape = getShapeForBatchMatMul(preluActivationWeights.shape, isChannelsLast); + if (targetShape != null) { + preluActivationWeights = reshape({ + inputs: { x: preluActivationWeights }, + backend, + attrs: { shape: targetShape } + }); + intermediates.push(preluActivationWeights); + } + } + if (bias != null) { + const targetShape = getShapeForBatchMatMul(bias.shape, isChannelsLast); + if (targetShape != null) { + bias = reshape({ inputs: { x: bias }, backend, attrs: { shape: targetShape } }); + intermediates.push(bias); + } + } + const w2Row = reshape({ + inputs: { x: filter }, + backend, + attrs: { shape: [1, sharedDim, sizeFromShape(filter.shape) / sharedDim] } + }); + intermediates.push(w2Row); + const im2ColProgram = new Im2ColPackedProgram(x2ColShape, convInfo); + const customValues = [ + x.shape, [convInfo.padInfo.top, convInfo.padInfo.left], + [convInfo.strideHeight, convInfo.strideWidth], + [convInfo.dilationHeight, convInfo.dilationWidth], [convInfo.inChannels], + [convInfo.filterWidth * convInfo.inChannels], [convInfo.outWidth] + ]; + const im2Col = backend.runWebGLProgram(im2ColProgram, [x], 'float32', customValues); + const im2ColReshaped = reshape({ inputs: { x: im2Col }, backend, attrs: { shape: x2ColShape } }); + intermediates.push(im2Col); + intermediates.push(im2ColReshaped); + const hasBias = bias != null; + const hasPreluActivationWeights = preluActivationWeights != null; + const hasLeakyreluAlpha = activation === 'leakyrelu'; + const fusedActivation = activation ? mapActivationToShaderProgram(activation, true) : null; + const matmulProgram = new MatMulPackedProgram(isChannelsLast ? im2ColReshaped.shape : + w2Row.shape, isChannelsLast ? w2Row.shape : + im2ColReshaped.shape, isChannelsLast ? [convInfo.batchSize, numCols, convInfo.outChannels] : + [convInfo.batchSize, convInfo.outChannels, numCols], transposeA, transposeB, hasBias, fusedActivation, hasPreluActivationWeights, hasLeakyreluAlpha); + const inputs = isChannelsLast ? [im2ColReshaped, w2Row] : [w2Row, im2ColReshaped]; + if (bias) { + inputs.push(bias); + } + if (hasPreluActivationWeights) { + inputs.push(preluActivationWeights); + } + if (hasLeakyreluAlpha) { + const $leakyreluAlpha = backend.makeTensorInfo([], 'float32', createScalarValue(leakyreluAlpha, 'float32')); + inputs.push($leakyreluAlpha); + intermediates.push($leakyreluAlpha); + } + const product = backend.runWebGLProgram(matmulProgram, inputs, 'float32'); + const out = reshape({ inputs: { x: product }, backend, attrs: { shape: convInfo.outShape } }); + intermediates.push(product); + for (const i of intermediates) { + backend.disposeIntermediateTensorInfo(i); + } + return out; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function conv2d(args) { + const { inputs, backend, attrs } = args; + const { x, filter } = inputs; + const { strides, pad, dataFormat, dilations, dimRoundingMode } = attrs; + const $dataFormat = convertConv2DDataFormat(dataFormat); + const convInfo = computeConv2DInfo(x.shape, filter.shape, strides, dilations, pad, dimRoundingMode, false /* depthwise */, $dataFormat); + let out; + if (convInfo.filterHeight === 1 && convInfo.filterWidth === 1 && + convInfo.dilationHeight === 1 && convInfo.dilationWidth === 1 && + convInfo.strideHeight === 1 && convInfo.strideWidth === 1 && + (convInfo.padInfo.type === 'SAME' || convInfo.padInfo.type === 'VALID')) { + out = conv2dByMatMul({ x, filter, convInfo, backend }); + } + else if (convInfo.strideWidth <= 2 && $dataFormat === 'channelsLast' + && env().getBool('WEBGL_EXP_CONV')) { + const program = new Conv2DPackedProgram(convInfo); + const customValues = [ + [convInfo.padInfo.top, convInfo.padInfo.left], + [convInfo.strideHeight, convInfo.strideWidth], + [convInfo.dilationHeight, convInfo.dilationWidth], + [convInfo.inHeight, convInfo.inWidth] + ]; + out = + backend.runWebGLProgram(program, [x, filter], 'float32', customValues); + } + else if (env().getBool('WEBGL_CONV_IM2COL')) { + out = conv2dWithIm2Row({ x, filter, convInfo, backend }); + } + else { + const program = new Conv2DProgram(convInfo); + out = backend.runWebGLProgram(program, [x, filter], 'float32'); + } + const outReshaped = reshape({ inputs: { x: out }, backend, attrs: { shape: convInfo.outShape } }); + backend.disposeIntermediateTensorInfo(out); + return outReshaped; + } + const conv2DConfig = { + kernelName: Conv2D$1, + backendName: 'webgl', + kernelFunc: conv2d, + }; + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class Conv2DDerFilterProgram { + constructor(convInfo) { + this.variableNames = ['x', 'dy']; + this.outputShape = convInfo.filterShape; + const strideHeight = convInfo.strideHeight; + const strideWidth = convInfo.strideWidth; + const padTop = convInfo.padInfo.top; + const padLeft = convInfo.padInfo.left; + const isChannelsLast = convInfo.dataFormat === 'channelsLast'; + this.userCode = ` + void main() { + ivec4 coords = getOutputCoords(); + int wR = coords.x; + int wC = coords.y; + int d1 = coords.z; + int d2 = coords.w; + + // Convolve x(?, ?, d1) with dy(:, :, d2) to get dw(wR, wC, d1, d2). + // ? = to be determined. : = across all values in that axis. + float dotProd = 0.0; + + for (int b = 0; b < ${convInfo.batchSize}; b++) { + for (int yR = 0; yR < ${convInfo.outHeight}; yR++) { + int xR = wR + yR * ${strideHeight} - ${padTop}; + + if (xR < 0 || xR >= ${convInfo.inHeight}) { + continue; + } + + for (int yC = 0; yC < ${convInfo.outWidth}; yC++) { + int xC = wC + yC * ${strideWidth} - ${padLeft}; + + if (xC < 0 || xC >= ${convInfo.inWidth}) { + continue; + } + + ${isChannelsLast ? + `float dyValue = getDy(b, yR, yC, d2); + float xValue = getX(b, xR, xC, d1); + dotProd += (xValue * dyValue);` : + `float dyValue = getDy(b, d2, yR, yC); + float xValue = getX(b, d1, xR, xC); + dotProd += (xValue * dyValue);`} + } + } + } + setOutput(dotProd); + } + `; + } + } + class Conv2DDerInputProgram { + constructor(convInfo) { + this.variableNames = ['dy', 'W']; + this.outputShape = convInfo.inShape; + const filterHeight = convInfo.filterHeight; + const filterWidth = convInfo.filterWidth; + const strideHeight = convInfo.strideHeight; + const strideWidth = convInfo.strideWidth; + const isChannelsLast = convInfo.dataFormat === 'channelsLast'; + const padTop = filterHeight - 1 - convInfo.padInfo.top; + const padLeft = filterWidth - 1 - convInfo.padInfo.left; + const rowDim = isChannelsLast ? 1 : 2; + const colDim = isChannelsLast ? 2 : 3; + const channelDim = isChannelsLast ? 3 : 1; + this.userCode = ` + const ivec2 pads = ivec2(${padTop}, ${padLeft}); + + void main() { + ivec4 coords = getOutputCoords(); + int batch = coords[0]; + int d1 = coords[${channelDim}]; + + ivec2 dyCorner = ivec2(coords[${rowDim}], coords[${colDim}]) - pads; + int dyRCorner = dyCorner.x; + int dyCCorner = dyCorner.y; + + // Convolve dy(?, ?, d2) with w(:, :, d1, d2) to compute dx(xR, xC, d1). + // ? = to be determined. : = across all values in that axis. + float dotProd = 0.0; + for (int wR = 0; wR < ${filterHeight}; wR++) { + float dyR = float(dyRCorner + wR) / ${strideHeight}.0; + + if (dyR < 0.0 || dyR >= ${convInfo.outHeight}.0 || fract(dyR) > 0.0) { + continue; + } + int idyR = int(dyR); + + int wRPerm = ${filterHeight} - 1 - wR; + + for (int wC = 0; wC < ${filterWidth}; wC++) { + float dyC = float(dyCCorner + wC) / ${strideWidth}.0; + + if (dyC < 0.0 || dyC >= ${convInfo.outWidth}.0 || + fract(dyC) > 0.0) { + continue; + } + int idyC = int(dyC); + + int wCPerm = ${filterWidth} - 1 - wC; + + for (int d2 = 0; d2 < ${convInfo.outChannels}; d2++) { + + if (${isChannelsLast}) { + float xValue = getDy(batch, idyR, idyC, d2); + float wValue = getW(wRPerm, wCPerm, d1, d2); + dotProd += xValue * wValue; + } else { + float xValue = getDy(batch, d2, idyR, idyC); + float wValue = getW(wRPerm, wCPerm, d1, d2); + dotProd += xValue * wValue; + } + + } + } + } + setOutput(dotProd); + } + `; + } + } + class Conv3DDerFilterProgram { + constructor(convInfo) { + this.variableNames = ['x', 'dy']; + this.outputShape = convInfo.filterShape; + const strideDepth = convInfo.strideDepth; + const strideHeight = convInfo.strideHeight; + const strideWidth = convInfo.strideWidth; + const padFront = convInfo.padInfo.front; + const padTop = convInfo.padInfo.top; + const padLeft = convInfo.padInfo.left; + this.userCode = ` + void main() { + ivec5 coords = getOutputCoords(); + int wF = coords.x; + int wR = coords.y; + int wC = coords.z; + int d1 = coords.w; + int d2 = coords.u; + + float dotProd = 0.0; + + for (int b = 0; b < ${convInfo.batchSize}; b++) { + for (int yF = 0; yF < ${convInfo.outDepth}; yF++) { + int xF = wF + yF * ${strideDepth} - ${padFront}; + + if (xF < 0 || xF >= ${convInfo.inDepth}) { + continue; + } + + for (int yR = 0; yR < ${convInfo.outHeight}; yR++) { + int xR = wR + yR * ${strideHeight} - ${padTop}; + + if (xR < 0 || xR >= ${convInfo.inHeight}) { + continue; + } + + for (int yC = 0; yC < ${convInfo.outWidth}; yC++) { + int xC = wC + yC * ${strideWidth} - ${padLeft}; + + if (xC < 0 || xC >= ${convInfo.inWidth}) { + continue; + } + + float dyValue = getDy(b, yF, yR, yC, d2); + float xValue = getX(b, xF, xR, xC, d1); + dotProd += (xValue * dyValue); + } + } + } + } + setOutput(dotProd); + } + `; + } + } + class Conv3DDerInputProgram { + constructor(convInfo) { + this.variableNames = ['dy', 'W']; + this.outputShape = convInfo.inShape; + const filterDepth = convInfo.filterDepth; + const filterHeight = convInfo.filterHeight; + const filterWidth = convInfo.filterWidth; + const strideDepth = convInfo.strideDepth; + const strideHeight = convInfo.strideHeight; + const strideWidth = convInfo.strideWidth; + const padFront = filterDepth - 1 - convInfo.padInfo.front; + const padTop = filterHeight - 1 - convInfo.padInfo.top; + const padLeft = filterWidth - 1 - convInfo.padInfo.left; + this.userCode = ` + const ivec3 pads = ivec3(${padFront}, ${padTop}, ${padLeft}); + + void main() { + ivec5 coords = getOutputCoords(); + int batch = coords.x; + int d1 = coords.u; + + + ivec3 dyCorner = ivec3(coords.y, coords.z, coords.w) - pads; + int dyFCorner = dyCorner.x; + int dyRCorner = dyCorner.y; + int dyCCorner = dyCorner.z; + + float dotProd = 0.0; + for (int wF = 0; wF < ${filterDepth}; wF++) { + float dyF = float(dyFCorner + wF) / ${strideDepth}.0; + + if (dyF < 0.0 || dyF >= ${convInfo.outDepth}.0 || fract(dyF) > 0.0) { + continue; + } + int idyF = int(dyF); + + int wFPerm = ${filterDepth} - 1 - wF; + + for (int wR = 0; wR < ${filterHeight}; wR++) { + float dyR = float(dyRCorner + wR) / ${strideHeight}.0; + + if (dyR < 0.0 || dyR >= ${convInfo.outHeight}.0 || + fract(dyR) > 0.0) { + continue; + } + int idyR = int(dyR); + + int wRPerm = ${filterHeight} - 1 - wR; + + for (int wC = 0; wC < ${filterWidth}; wC++) { + float dyC = float(dyCCorner + wC) / ${strideWidth}.0; + + if (dyC < 0.0 || dyC >= ${convInfo.outWidth}.0 || + fract(dyC) > 0.0) { + continue; + } + int idyC = int(dyC); + + int wCPerm = ${filterWidth} - 1 - wC; + + for (int d2 = 0; d2 < ${convInfo.outChannels}; d2++) { + float xValue = getDy(batch, idyF, idyR, idyC, d2); + float wValue = getW(wFPerm, wRPerm, wCPerm, d1, d2); + dotProd += xValue * wValue; + } + } + } + } + setOutput(dotProd); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function conv2DBackpropFilter(args) { + const { inputs, backend, attrs } = args; + const { x, dy } = inputs; + const { strides, pad, dataFormat, dimRoundingMode, filterShape } = attrs; + const $dataFormat = convertConv2DDataFormat(dataFormat); + const convInfo = computeConv2DInfo(x.shape, filterShape, strides, 1 /* dilations */, pad, dimRoundingMode, false /* depthwise */, $dataFormat); + const program = new Conv2DDerFilterProgram(convInfo); + return backend.runWebGLProgram(program, [x, dy], 'float32'); + } + const conv2DBackpropFilterConfig = { + kernelName: Conv2DBackpropFilter, + backendName: 'webgl', + kernelFunc: conv2DBackpropFilter, + }; + + /** + * @license + * Copyright 2023 Google LLC. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class Conv2DDerInputPackedProgram { + constructor(convInfo) { + this.variableNames = ['dy', 'W']; + this.packedInputs = true; + this.packedOutput = true; + this.customUniforms = [ + { name: 'strides', type: 'vec2' }, + ]; + this.outputShape = convInfo.inShape; + this.enableShapeUniforms = useShapeUniforms(this.outputShape.length); + const filterHeight = convInfo.filterHeight; + const filterWidth = convInfo.filterWidth; + const padTop = filterHeight - 1 - convInfo.padInfo.top; + const padLeft = filterWidth - 1 - convInfo.padInfo.left; + this.userCode = ` + const ivec2 pads = ivec2(${padTop}, ${padLeft}); + + void main() { + ivec4 coords = getOutputCoords(); + int batch = coords[0]; + int d1 = coords[3]; + + ivec2 dyCorner = ivec2(coords[1], coords[2]) - pads; + int dyRCorner = dyCorner.x; + int dyCCorner = dyCorner.y; + + vec4 result = vec4(0.); + for (int wR = 0; wR < ${filterHeight}; wR++) { + float dyR = float(dyRCorner + wR) / strides[0]; + if (dyR < 0.0 || dyR >= ${convInfo.outHeight}.0 || fract(dyR) > 0.0) { + continue; + } + int idyR = int(dyR); + int wRPerm = ${filterHeight} - 1 - wR; + + for (int wC = 0; wC < ${filterWidth}; wC++) { + int wCPerm = ${filterWidth} - 1 - wC; + + float dyC = float(dyCCorner + wC) / strides[1]; + bool idyCVal = (dyC >= 0.0) && (dyC < ${convInfo.outWidth}.0) + && (fract(dyC) == 0.0); + int idyC = int(dyC); + + float dyC2 = float(dyCCorner + wC + 1) / strides[1]; + bool idyCVal2 = (dyC2 >= 0.0) && (dyC2 < ${convInfo.outWidth}.0) + && (fract(dyC2) == 0.0); + int idyC2 = int(dyC2); + + if (idyCVal && idyCVal2) { + for (int d2 = 0; d2 < ${convInfo.outChannels}; d2 += 2) { + vec4 wValue = getW(wRPerm, wCPerm, d1, d2); + vec4 dySample = getDy(batch, idyR, idyC, d2); + vec4 dySample2 = (idyC / 2 == idyC2 / 2) ? + dySample : getDy(batch, idyR, idyC2, d2); + + vec2 dyValue = mod(float(idyC), 2.) == 0. ? + dySample.xy : dySample.zw; + result.xy += vec2(dot(dyValue, wValue.xy), + dot(dyValue, wValue.zw)); + + dyValue = mod(float(idyC2), 2.) == 0. ? + dySample2.xy : dySample2.zw; + result.zw += vec2(dot(dyValue, wValue.xy), + dot(dyValue, wValue.zw)); + } + } else if (idyCVal) { + for (int d2 = 0; d2 < ${convInfo.outChannels}; d2 += 2) { + vec4 wValue = getW(wRPerm, wCPerm, d1, d2); + vec4 dySample = getDy(batch, idyR, idyC, d2); + vec2 dyValue = mod(float(idyC), 2.) == 0. ? + dySample.xy : dySample.zw; + result.xy += vec2(dot(dyValue, wValue.xy), + dot(dyValue, wValue.zw)); + } + } else if (idyCVal2) { + for (int d2 = 0; d2 < ${convInfo.outChannels}; d2 += 2) { + vec4 wValue = getW(wRPerm, wCPerm, d1, d2); + vec4 dySample = getDy(batch, idyR, idyC2, d2); + vec2 dyValue = mod(float(idyC2), 2.) == 0. ? + dySample.xy : dySample.zw; + result.zw += vec2(dot(dyValue, wValue.xy), + dot(dyValue, wValue.zw)); + } + } + } + } + setOutput(result); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function conv2DBackpropInput(args) { + const { inputs, backend, attrs } = args; + const { dy, filter } = inputs; + const { inputShape, strides, pad, dataFormat, dimRoundingMode } = attrs; + const $dataFormat = convertConv2DDataFormat(dataFormat); + const convInfo = computeConv2DInfo(inputShape, filter.shape, strides, 1 /* dilations */, pad, dimRoundingMode, false, $dataFormat); + if (env().getBool('WEBGL_PACK_CONV2DTRANSPOSE') && + $dataFormat === 'channelsLast') { + const customValues = [ + [convInfo.strideHeight, convInfo.strideWidth], + ]; + const program = new Conv2DDerInputPackedProgram(convInfo); + return backend.runWebGLProgram(program, [dy, filter], 'float32', customValues); + } + else { + const program = new Conv2DDerInputProgram(convInfo); + return backend.runWebGLProgram(program, [dy, filter], 'float32'); + } + } + const conv2DBackpropInputConfig = { + kernelName: Conv2DBackpropInput, + backendName: 'webgl', + kernelFunc: conv2DBackpropInput, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function conv3D(args) { + const { inputs, backend, attrs } = args; + const { x, filter } = inputs; + const { strides, pad, dilations } = attrs; + const convInfo = computeConv3DInfo(x.shape, filter.shape, strides, dilations, pad); + const program = new Conv3DProgram(convInfo); + return backend.runWebGLProgram(program, [x, filter], 'float32'); + } + const conv3DConfig = { + kernelName: Conv3D$1, + backendName: 'webgl', + kernelFunc: conv3D, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function conv3DBackpropFilterV2(args) { + const { inputs, backend, attrs } = args; + const { x, dy } = inputs; + const { strides, pad, filterShape } = attrs; + const convInfo = computeConv3DInfo(x.shape, filterShape, strides, 1 /* dilations */, pad); + const program = new Conv3DDerFilterProgram(convInfo); + return backend.runWebGLProgram(program, [x, dy], 'float32'); + } + const conv3DBackpropFilterV2Config = { + kernelName: Conv3DBackpropFilterV2, + backendName: 'webgl', + kernelFunc: conv3DBackpropFilterV2 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function conv3DBackpropInput(args) { + const { inputs, backend, attrs } = args; + const { dy, filter } = inputs; + const { pad, strides, inputShape } = attrs; + const convInfo = computeConv3DInfo(inputShape, filter.shape, strides, 1 /* dilations */, pad); + const program = new Conv3DDerInputProgram(convInfo); + return backend.runWebGLProgram(program, [dy, filter], 'float32'); + } + const conv3DBackpropInputConfig = { + kernelName: Conv3DBackpropInputV2, + backendName: 'webgl', + kernelFunc: conv3DBackpropInput, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const COS = CHECK_NAN_SNIPPET_UNARY + ` + return cos(x); +`; + const COS_PACKED = ` + vec4 result = cos(x); + bvec4 isNaN = isnan(x); + ${CHECK_NAN_SNIPPET_PACKED} + return result; +`; + const cos = unaryKernelFunc({ opSnippet: COS, packedOpSnippet: COS_PACKED }); + const cosConfig = { + kernelName: Cos, + backendName: 'webgl', + kernelFunc: cos, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const COSH = ` + float e2x = exp(-x); + return (e2x + 1.0 / e2x) / 2.0; +`; + const cosh = unaryKernelFunc({ opSnippet: COSH }); + const coshConfig = { + kernelName: Cosh, + backendName: 'webgl', + kernelFunc: cosh, + }; + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class CropAndResizeProgram { + constructor(imageShape, boxShape, cropSize, method, extrapolationValue) { + this.variableNames = ['Image', 'Boxes', 'BoxInd']; + this.outputShape = []; + const [batch, imageHeight, imageWidth, depth] = imageShape; + const [numBoxes,] = boxShape; + const [cropHeight, cropWidth] = cropSize; + this.outputShape = [numBoxes, cropHeight, cropWidth, depth]; + const methodId = method === 'bilinear' ? 1 : 0; + const [inputHeightFloat, inputWidthFloat] = [`${imageHeight - 1}.0`, `${imageWidth - 1}.0`]; + const [heightRatio, heightScale, inY] = cropHeight > 1 ? + [ + `${(imageHeight - 1) / (cropHeight - 1)}`, + '(y2-y1) * height_ratio', + `y1*${inputHeightFloat} + float(y)*(height_scale)`, + ] : + [ + '0.0', + '0.0', + `0.5 * (y1+y2) * ${inputHeightFloat}`, + ]; + const [widthRatio, widthScale, inX] = cropWidth > 1 ? + [ + `${(imageWidth - 1) / (cropWidth - 1)}`, + '(x2-x1) * width_ratio', + `x1*${inputWidthFloat} + float(x)*(width_scale)`, + ] : + [ + '0.0', + '0.0', + `0.5 * (x1+x2) * ${inputWidthFloat}`, + ]; + // Reference implementation + // tslint:disable-next-line:max-line-length + // https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/kernels/crop_and_resize_op_gpu.cu.cc + this.userCode = ` + const float height_ratio = float(${heightRatio}); + const float width_ratio = float(${widthRatio}); + void main() { + ivec4 coords = getOutputCoords(); + int b = coords[0]; + int y = coords[1]; + int x = coords[2]; + int d = coords[3]; + + // get box vals + float y1 = getBoxes(b,0); + float x1 = getBoxes(b,1); + float y2 = getBoxes(b,2); + float x2 = getBoxes(b,3); + + // get image in batch index + int bInd = round(getBoxInd(b)); + if(bInd < 0 || bInd >= ${batch}) { + return; + } + + float height_scale = ${heightScale}; + float width_scale = ${widthScale}; + + float in_y = ${inY}; + if( in_y < 0.0 || in_y > ${inputHeightFloat} ) { + setOutput(float(${extrapolationValue})); + return; + } + float in_x = ${inX}; + if( in_x < 0.0 || in_x > ${inputWidthFloat} ) { + setOutput(float(${extrapolationValue})); + return; + } + + vec2 sourceFracIndexCR = vec2(in_x,in_y); + if(${methodId} == 1) { + // Compute the four integer indices. + ivec2 sourceFloorCR = ivec2(sourceFracIndexCR); + ivec2 sourceCeilCR = ivec2(ceil(sourceFracIndexCR)); + + float topLeft = getImage(b, sourceFloorCR.y, sourceFloorCR.x, d); + float bottomLeft = getImage(b, sourceCeilCR.y, sourceFloorCR.x, d); + float topRight = getImage(b, sourceFloorCR.y, sourceCeilCR.x, d); + float bottomRight = getImage(b, sourceCeilCR.y, sourceCeilCR.x, d); + + vec2 fracCR = sourceFracIndexCR - vec2(sourceFloorCR); + + float top = topLeft + (topRight - topLeft) * fracCR.x; + float bottom = bottomLeft + (bottomRight - bottomLeft) * fracCR.x; + float newValue = top + (bottom - top) * fracCR.y; + setOutput(newValue); + } else { + // Compute the coordinators of nearest neighbor point. + ivec2 sourceNearestCR = ivec2(floor( + sourceFracIndexCR + vec2(0.5,0.5))); + float newValue = getImage(b, sourceNearestCR.y, sourceNearestCR.x, d); + setOutput(newValue); + } + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const cropAndResize = (args) => { + const { inputs, backend, attrs } = args; + const { image, boxes, boxInd } = inputs; + const { cropSize, method, extrapolationValue } = attrs; + const program = new CropAndResizeProgram(image.shape, boxes.shape, cropSize, method, extrapolationValue); + return backend.runWebGLProgram(program, [image, boxes, boxInd], 'float32'); + }; + const cropAndResizeConfig = { + kernelName: CropAndResize, + backendName: 'webgl', + kernelFunc: cropAndResize + }; + + var CumOpType; + (function (CumOpType) { + CumOpType["Prod"] = "*"; + CumOpType["Sum"] = "+"; + })(CumOpType || (CumOpType = {})); + class CumProgram { + constructor(op, outputShape, exclusive, reverse) { + this.op = op; + this.outputShape = outputShape; + this.variableNames = ['x']; + this.customUniforms = [{ name: 'index', type: 'float' }]; + const rank = this.outputShape.length; + const initVal = this.op === CumOpType.Prod ? '1.0' : '0.0'; + const val = exclusive ? initVal : `getX(${getCoords(rank, 'coords', this.op)})`; + const length = this.outputShape[this.outputShape.length - 1]; + let condition = ''; + let idxString = ''; + // When exclusive is set, the cum op becomes roll op that copies the + // value from the previous index based on the direction specified by the + // reverse flag. + if (exclusive) { + condition = reverse ? `end != ${length - 1}` : 'end != 0'; + idxString = reverse ? 'end + 1' : 'end - 1'; + } + else { + condition = reverse ? `end + pow2 < ${length}` : 'end >= pow2'; + idxString = (reverse ? 'end + pow2' : 'end - pow2'); + } + this.userCode = ` + void main() { + ${getCoordsDataType(rank)} coords = getOutputCoords(); + int end = ${getFinalCoord(rank, 'coords', this.op)}; + float val = ${val}; + int pow2 = int(pow(2.0, index)); + if (${condition}) { + int idx = ${idxString}; + ${getFinalCoord(rank, 'coords', this.op)} = idx; + val ${this.op}= getX(${getCoords(rank, 'coords', this.op)}); + } + setOutput(val); + } + `; + } + } + function getCoords(rank, name, op) { + if (rank === 1) { + return `${name}`; + } + else if (rank === 2) { + return `${name}.x, ${name}.y`; + } + else if (rank === 3) { + return `${name}.x, ${name}.y, ${name}.z`; + } + else if (rank === 4) { + return `${name}.x, ${name}.y, ${name}.z, ${name}.w`; + } + else { + throw new Error(`Cumulative ${op} for rank ${rank} is not yet supported`); + } + } + function getFinalCoord(rank, name, op) { + if (rank === 1) { + return `${name}`; + } + else if (rank === 2) { + return `${name}.y`; + } + else if (rank === 3) { + return `${name}.z`; + } + else if (rank === 4) { + return `${name}.w`; + } + else { + throw new Error(`Cumulative ${op} for rank ${rank} is not yet supported`); + } + } + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function cumImpl(op, x, backend, axis, exclusive, reverse) { + const xRank = x.shape.length; + const permutation = getAxesPermutation([axis], xRank); + let permutedX = x; + if (permutation != null) { + permutedX = transpose({ inputs: { x }, backend, attrs: { perm: permutation } }); + } + const permutedAxis = getInnerMostAxes(1, xRank)[0]; + if (permutedAxis !== xRank - 1) { + throw new Error(`WebGL cumprod shader expects an inner-most axis=${x.shape.length - 1} ` + + `but got axis=${axis}`); + } + const size = permutedX.shape[permutedAxis]; + let result = identity({ inputs: { x: permutedX }, backend }); + // Use cum parallel algorithm, inspired by: + // https://developer.nvidia.com/gpugems/gpugems3/part-vi-gpu-computing/chapter-39-parallel-prefix-sum-scan-cuda + // Note: although the algorithm is called sum, it works for any associtative + // operator with an identity. + for (let i = 0; i <= Math.ceil(Math.log2(size)) - 1; i++) { + const program = new CumProgram(op, permutedX.shape, false, reverse); + const customValues = [[i]]; + const prevResult = result; + result = + backend.runWebGLProgram(program, [result], result.dtype, customValues); + backend.disposeIntermediateTensorInfo(prevResult); + } + // For exclusive cum, shift the end result in the direction of product or sum + // and add 1 for product or 0 for sum to the front index. + if (exclusive) { + const program = new CumProgram(op, permutedX.shape, exclusive, reverse); + const prevResult = result; + result = backend.runWebGLProgram(program, [result], result.dtype); + backend.disposeIntermediateTensorInfo(prevResult); + } + if (permutation != null) { + const reversePermutation = getUndoAxesPermutation(permutation); + const reverseTransposedResult = transpose({ inputs: { x: result }, backend, attrs: { perm: reversePermutation } }); + backend.disposeIntermediateTensorInfo(result); + backend.disposeIntermediateTensorInfo(permutedX); + return reverseTransposedResult; + } + return result; + } + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function cumprod(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { axis, exclusive, reverse } = attrs; + return cumImpl(CumOpType.Prod, x, backend, axis, exclusive, reverse); + } + const cumprodConfig = { + kernelName: Cumprod, + backendName: 'webgl', + kernelFunc: cumprod + }; + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function cumsum(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { axis, exclusive, reverse } = attrs; + return cumImpl(CumOpType.Sum, x, backend, axis, exclusive, reverse); + } + const cumsumConfig = { + kernelName: Cumsum, + backendName: 'webgl', + kernelFunc: cumsum + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function denseBincount(args) { + const { inputs, backend, attrs } = args; + const { x, weights } = inputs; + const { size, binaryOutput } = attrs; + if (x.shape.length === 1) { + const xVals = backend.readSync(x.dataId); + const weightsVals = backend.readSync(weights.dataId); + const outVals = bincountImplCPU(xVals, weightsVals, weights.dtype, weights.shape, size); + return backend.makeTensorInfo([size], weights.dtype, outVals); + } + else if (x.shape.length === 2) { + const xBuf = backend.bufferSync(x); + const weightsBuf = backend.bufferSync(weights); + const outBuf = bincountReduceImplCPU(xBuf, weightsBuf, size, binaryOutput); + return backend.makeTensorInfo(outBuf.shape, weights.dtype, outBuf.values); + } + throw new Error(`Error in denseBincount: input must be at most rank 2, but got rank` + + `${x.shape.length}.`); + } + const denseBincountConfig = { + kernelName: DenseBincount, + backendName: 'webgl', + kernelFunc: denseBincount + }; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class DepthToSpaceProgram { + constructor(outputShape, blockSize, dataFormat) { + this.variableNames = ['x']; + this.outputShape = []; + this.outputShape = outputShape; + this.blockSize = blockSize; + this.dataFormat = dataFormat; + this.userCode = ` + void main() { + ivec4 coords = getOutputCoords(); + int b = coords[0]; + int h = ${this.getHeightCoordString()}; + int w = ${this.getWidthCoordString()}; + int d = ${this.getDepthCoordString()}; + + int in_h = h / ${blockSize}; + int offset_h = imod(h, ${blockSize}); + int in_w = w / ${blockSize}; + int offset_w = imod(w, ${blockSize}); + int offset_d = (offset_h * ${blockSize} + offset_w) * + ${this.getOutputDepthSize()}; + int in_d = d + offset_d; + + float result = ${this.getInputSamplingString()}; + setOutput(result); + } + `; + } + getHeightCoordString() { + if (this.dataFormat === 'NHWC') { + return `coords[1]`; + } + else { + return `coords[2]`; + } + } + getWidthCoordString() { + if (this.dataFormat === 'NHWC') { + return `coords[2]`; + } + else { + return `coords[3]`; + } + } + getDepthCoordString() { + if (this.dataFormat === 'NHWC') { + return `coords[3]`; + } + else { + return `coords[1]`; + } + } + getOutputDepthSize() { + if (this.dataFormat === 'NHWC') { + return this.outputShape[3]; + } + else { + return this.outputShape[1]; + } + } + getInputSamplingString() { + if (this.dataFormat === 'NHWC') { + return `getX(b, in_h, in_w, in_d)`; + } + else { + return `getX(b, in_d, in_h, in_w)`; + } + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function depthToSpace(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { blockSize, dataFormat } = attrs; + const batchSize = x.shape[0]; + const inputHeight = (dataFormat === 'NHWC') ? x.shape[1] : x.shape[2]; + const inputWidth = (dataFormat === 'NHWC') ? x.shape[2] : x.shape[3]; + const inputDepth = (dataFormat === 'NHWC') ? x.shape[3] : x.shape[1]; + const outputHeight = inputHeight * blockSize; + const outputWidth = inputWidth * blockSize; + const outputDepth = inputDepth / (blockSize * blockSize); + const outputShape = (dataFormat === 'NHWC') ? + [batchSize, outputHeight, outputWidth, outputDepth] : + [batchSize, outputDepth, outputHeight, outputWidth]; + const program = new DepthToSpaceProgram(outputShape, blockSize, dataFormat); + return backend.runWebGLProgram(program, [x], x.dtype); + } + const depthToSpaceConfig = { + kernelName: DepthToSpace, + backendName: 'webgl', + kernelFunc: depthToSpace + }; + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class DepthwiseConv2DProgram { + constructor(convInfo, addBias = false, activation = null, hasPreluActivation = false, hasLeakyReluAlpha = false) { + this.variableNames = ['x', 'W']; + this.customUniforms = [ + { name: 'pads', type: 'ivec2' }, + { name: 'strides', type: 'ivec2' }, + { name: 'dilations', type: 'ivec2' }, + { name: 'inDims', type: 'ivec2' }, + ]; + this.outputShape = convInfo.outShape; + this.enableShapeUniforms = useShapeUniforms(this.outputShape.length); + const filterHeight = convInfo.filterHeight; + const filterWidth = convInfo.filterWidth; + const channelMul = convInfo.outChannels / convInfo.inChannels; + let activationSnippet = '', applyActivationSnippet = ''; + if (activation) { + if (hasPreluActivation) { + activationSnippet = `float activation(float a) { + float b = getPreluActivationWeightsAtOutCoords(); + ${activation} + }`; + } + else if (hasLeakyReluAlpha) { + activationSnippet = `float activation(float a) { + float b = getLeakyreluAlphaAtOutCoords(); + ${activation} + }`; + } + else { + activationSnippet = ` + float activation(float x) { + ${activation} + } + `; + } + applyActivationSnippet = `result = activation(result);`; + } + const addBiasSnippet = addBias ? 'result += getBiasAtOutCoords();' : ''; + if (addBias) { + this.variableNames.push('bias'); + } + if (hasPreluActivation) { + this.variableNames.push('preluActivationWeights'); + } + if (hasLeakyReluAlpha) { + this.variableNames.push('leakyreluAlpha'); + } + this.userCode = ` + ${activationSnippet} + + void main() { + ivec4 coords = getOutputCoords(); + int batch = coords.x; + ivec2 xRCCorner = coords.yz * strides - pads; + int d2 = coords.w; + int d1 = d2 / ${channelMul}; + int q = d2 - d1 * ${channelMul}; + + int xRCorner = xRCCorner.x; + int xCCorner = xRCCorner.y; + + // Convolve x(?, ?, d1) with w(:, :, d1, q) to get y(yR, yC, d2). + // ? = to be determined. : = across all values in that axis. + float dotProd = 0.0; + // TO DO(dsmilkov): Flatten the two for loops and vec4 the operations. + for (int wR = 0; wR < ${filterHeight}; wR++) { + int xR = xRCorner + wR * dilations[0]; + + if (xR < 0 || xR >= inDims[0]) { + continue; + } + + for (int wC = 0; wC < ${filterWidth}; wC++) { + int xC = xCCorner + wC * dilations[1]; + + if (xC < 0 || xC >= inDims[1]) { + continue; + } + + float xVal = getX(batch, xR, xC, d1); + float wVal = getW(wR, wC, d1, q); + dotProd += xVal * wVal; + } + } + + float result = dotProd; + ${addBiasSnippet} + ${applyActivationSnippet} + setOutput(result); + } + `; + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class DepthwiseConvPacked2DProgram { + constructor(convInfo, addBias = false, activation = null, hasPreluActivation = false, hasLeakyReluAlpha = false) { + this.variableNames = ['x', 'W']; + this.packedInputs = true; + this.packedOutput = true; + this.customUniforms = [ + { name: 'pads', type: 'ivec2' }, + { name: 'strides', type: 'ivec2' }, + { name: 'dilations', type: 'ivec2' }, + { name: 'inDims', type: 'ivec2' }, + ]; + this.outputShape = convInfo.outShape; + this.enableShapeUniforms = useShapeUniforms(this.outputShape.length); + const channelMul = convInfo.outChannels / convInfo.inChannels; + const padLeft = convInfo.padInfo.left; + const strideWidth = convInfo.strideWidth; + const dilationWidth = convInfo.dilationWidth; + const filterHeight = convInfo.filterHeight; + const filterWidth = convInfo.filterWidth; + const texelsAcross = filterWidth; + let mainLoop = ` + int xR; int xC; int xCOffset; + vec4 wTexel; vec4 previous; vec4 final;`; + for (let c = 0; c < filterWidth; c++) { + mainLoop += ` + vec4 xTexelC${c * 2}; + int xTexelC${c * 2}Ready; + vec4 xTexelC${c * 2 + 1}; + int xTexelC${c * 2 + 1}Ready; + vec4 xC${c};`; + } + /** + * This vectorized implementation works by gathering the values needed for + * each output channel's dot product into vec4's and then multiplying them + * all together (this happens in the final double for-loop below). Most of + * the main loop consists of constructing these vec4's with the minimum + * number of texture2D calls, which means making use of all four returned + * values from a texture2D call at once. + */ + mainLoop += ` + for (int r = 0; r < ${filterHeight}; r++) { + `; + for (let c = 0; c < filterWidth; c++) { + mainLoop += ` + xTexelC${c * 2} = vec4(0.0); + xTexelC${c * 2}Ready = 0; + xTexelC${c * 2 + 1} = vec4(0.0); + xTexelC${c * 2 + 1}Ready = 0; + xC${c} = vec4(0.0);`; + } + mainLoop += ` + xR = xRCorner + r * dilations[0]; + if (xR >=0 && xR < inDims[0]) { + `; + for (let texelC = 0; texelC < (texelsAcross + 1) / 2; texelC++) { + const colIndex = texelC * 2; + mainLoop += ` + xC = xCCorner + ${colIndex * dilationWidth}; + `; + if (strideWidth === 1) { + if (colIndex < filterWidth) { + // If padding is odd, the outer texels have to be composed. + if (padLeft % 2 === 1) { + // TODO: Ensure vec4 previous does not result in redundant sample, + // and avoid setting xTexelRC's that exceed the boundary in the + // first place rather than resetting them to vec4(0)). + // To compute xCOffset: + // - If padding is odd, we must add 1 to ensure we ask for an + // even-numbered row. + // - We subtract 2 to access the previous texel. + mainLoop += ` + xCOffset = xC + 1; + if (xCOffset >= 0 && xCOffset < inDims[1] && xTexelC${colIndex}Ready == 0) { + xTexelC${colIndex} = getX(batch, xR, xCOffset, d1); + + // Need to manually clear unused channels in case + // we're reading from recycled texture. + if (xCOffset + 1 >= inDims[1]) { + xTexelC${colIndex}.zw = vec2(0.0); + } + xTexelC${colIndex}Ready = 1; + } + `; + // This texel has been read in previous iteration if the dilation + // is 1. + if (dilationWidth === 1 && colIndex > 0) { + mainLoop += ` + xC${colIndex} = vec4(xTexelC${colIndex - 2}.zw, xTexelC${colIndex}.xy); + `; + } + else { + mainLoop += ` + xCOffset = xC + 1 - 2; + + if (xCOffset >= 0 && xCOffset < inDims[1]) { + previous = getX(batch, xR, xCOffset, d1); + + // Need to manually clear unused channels in case + // we're reading from recycled texture. + if (xCOffset + 1 >= inDims[1]) { + previous.zw = vec2(0.0); + } + + xC${colIndex} = vec4(previous.zw, xTexelC${colIndex}.xy); + } else { + xC${colIndex} = vec4(0.0, 0.0, xTexelC${colIndex}.xy); + } + `; + } + } + else { + // Padding is even, so xRC corresponds to a single texel. + mainLoop += ` + if (xC >= 0 && xC < inDims[1] && xTexelC${colIndex}Ready == 0) { + xTexelC${colIndex} = getX(batch, xR, xC, d1); + if (xC + 1 >= inDims[1]) { + xTexelC${colIndex}.zw = vec2(0.0); + } + xTexelC${colIndex}Ready = 1; + } + + xC${colIndex} = xTexelC${colIndex}; + `; + } + if (colIndex + 1 < filterWidth) { + // If dilation is even, the second entry should match the first + // (either both are composed or both are single samples). But if + // dilation is odd, then the second entry should be the opposite + // of the first (if the first is composed, the second is a single + // sample, and vice versa.) + const nextTexelOffset = padLeft % 2 === 0 ? + nearestLargerEven(dilationWidth) : + dilationWidth; + if ((dilationWidth % 2 === 0 && padLeft % 2 === 1) || + (dilationWidth % 2 !== 0 && padLeft % 2 !== 1)) { + mainLoop += ` + xCOffset = xC + imod(pads[1], 2) + ${nextTexelOffset}; + + if (xCOffset >= 0 && xCOffset < inDims[1] && xTexelC${colIndex + 1}Ready == 0) { + xTexelC${colIndex + 1} = getX(batch, xR, xCOffset, d1); + + // Need to manually clear unused channels in case + // we're reading from recycled texture. + if (xCOffset + 1 >= inDims[1]) { + xTexelC${colIndex + 1}.zw = vec2(0.0); + } + xTexelC${colIndex + 1}Ready = 1; + } + `; + // If dilation > 1 then the xRC's will not be able to share any + // values, so each xRC will require two unique calls to getX. + if (dilationWidth > 1) { + mainLoop += ` + xCOffset -= 2; + if (xCOffset >= 0 && xCOffset < inDims[1]) { + previous = getX(batch, xR, xCOffset, d1); + xC${colIndex + 1} = vec4(previous.zw, xTexelC${colIndex + 1}.xy); + } else { + xC${colIndex + 1} = vec4(0.0, 0.0, xTexelC${colIndex + 1}.xy); + } + `; + } + else { + mainLoop += ` + xC${colIndex + 1} = vec4(xTexelC${colIndex}.zw, xTexelC${colIndex + 1}.xy); + `; + } + } + else { + // If dilation is 1 and padding is odd, we have already read the + // texel when constructing the previous x value. Here we can + // simply skip the texture read. + if (nextTexelOffset === 1) { + mainLoop += ` + xC${colIndex + 1} = xTexelC${colIndex}; + `; + } + else { + mainLoop += ` + xCOffset = xC + ${nextTexelOffset}; + + if (xCOffset >= 0 && xCOffset < inDims[1] && xTexelC${colIndex + 1}Ready == 0) { + xTexelC${colIndex + 1} = getX(batch, xR, xCOffset, d1); + if (xCOffset + 1 >= inDims[1]) { + xTexelC${colIndex + 1}.zw = vec2(0.0); + } + xTexelC${colIndex + 1}Ready = 1; + } + + xC${colIndex + 1} = xTexelC${colIndex + 1}; + `; + } + } + } + } + } + else { // stride === 2 + if (colIndex < filterWidth) { + // Depending on whether padLeft is even or odd, we want either the + // xy or zw channels from X texels for xC${colIndex}. If padLeft is + // even, xC${colIndex +1} is simply the zw channels of texels we've + // already sampled. But if padLeft is odd, xC{$c + 1}.zw will + // need to come from the xy channels of a new texel, hence the ` + // vec4 + // final` initialized below. + if (padLeft % 2 === 1) { + mainLoop += ` + xCOffset = xC + 1 - strides[1]; + if(xCOffset >= 0 && xCOffset < inDims[1] && xTexelC${colIndex}Ready == 0) { + xTexelC${colIndex} = getX(batch, xR, xCOffset, d1); + // Need to manually clear unused channels in case + // we're reading from recycled texture. + if (xCOffset + 1 >= inDims[1]) { + xTexelC${colIndex}.zw = vec2(0.0); + } + xTexelC${colIndex}Ready = 1; + } + + if(xC + 1 >= 0 && xC + 1 < inDims[1] && xTexelC${colIndex + 1}Ready == 0) { + xTexelC${colIndex + 1} = getX(batch, xR, xC + 1, d1); + // Need to manually clear unused channels in case + // we're reading from recycled texture. + if (xC + 2 >= inDims[1]) { + xTexelC${colIndex + 1}.zw = vec2(0.0); + } + xTexelC${colIndex + 1}Ready = 1; + } + + xC${colIndex} = vec4(xTexelC${colIndex}.zw, xTexelC${colIndex + 1}.zw); + `; + if (colIndex + 1 < filterWidth) { + mainLoop += ` + final = vec4(0.0); + xCOffset = xC + 1 + strides[1]; + if(xCOffset >= 0 && xCOffset < inDims[1]) { + final = getX(batch, xR, xCOffset, d1); + } + xC${colIndex + 1} = vec4(xTexelC${colIndex + 1}.xy, final.xy); + `; + } + } + else { + mainLoop += ` + if(xC >= 0 && xC < inDims[1] && xTexelC${colIndex}Ready == 0) { + xTexelC${colIndex} = getX(batch, xR, xC, d1); + if (xC + 1 >= inDims[1]) { + xTexelC${colIndex}.zw = vec2(0.0); + } + xTexelC${colIndex}Ready = 1; + } + + xCOffset = xC + strides[1]; + if(xCOffset >= 0 && xCOffset < inDims[1] && xTexelC${colIndex + 1}Ready == 0) { + xTexelC${colIndex + 1} = getX(batch, xR, xCOffset, d1); + if (xCOffset + 1 >= inDims[1]) { + xTexelC${colIndex + 1}.zw = vec2(0.); + } + xTexelC${colIndex + 1}Ready = 1; + } + + xC${colIndex} = vec4( + xTexelC${colIndex}.xy, xTexelC${colIndex + 1}.xy); + `; + if (colIndex + 1 < filterWidth) { + mainLoop += ` + xC${colIndex + 1} = vec4(xTexelC${colIndex}.zw, xTexelC${colIndex + 1}.zw); + `; + } + } + } + } + // localize the dotProd accumulation within the loop, the theory is for + // GPU with limited cache, accumulate sum across large amount of + // veriables will cause lots of cache misses. (i.e. 5x5 filter will have + // 50 variables) + if (colIndex < filterWidth) { + mainLoop += ` + wTexel = getW(r, ${colIndex}, d1, q); + dotProd += xC${colIndex} * vec4(wTexel.xz, wTexel.xz); + `; + if (colIndex + 1 < filterWidth) { + mainLoop += ` + wTexel = getW(r, ${colIndex + 1}, d1, q); + dotProd += xC${colIndex + 1} * vec4(wTexel.xz, wTexel.xz); + `; + } + } + } + mainLoop += ` + } + `; + mainLoop += ` + } + `; + let activationSnippet = '', applyActivationSnippet = ''; + if (activation) { + if (hasPreluActivation) { + activationSnippet = `vec4 activation(vec4 a) { + vec4 b = getPreluActivationWeightsAtOutCoords(); + ${activation} + }`; + } + else if (hasLeakyReluAlpha) { + activationSnippet = `vec4 activation(vec4 a) { + vec4 b = getLeakyreluAlphaAtOutCoords(); + ${activation} + }`; + } + else { + activationSnippet = `vec4 activation(vec4 x) { + ${activation} + }`; + } + applyActivationSnippet = `result = activation(result);`; + } + const addBiasSnippet = addBias ? 'result += getBiasAtOutCoords();' : ''; + if (addBias) { + this.variableNames.push('bias'); + } + if (hasPreluActivation) { + this.variableNames.push('preluActivationWeights'); + } + if (hasLeakyReluAlpha) { + this.variableNames.push('leakyreluAlpha'); + } + this.userCode = ` + ${activationSnippet} + + void main() { + ivec4 coords = getOutputCoords(); + int batch = coords.x; + ivec2 xRCCorner = coords.yz * strides - pads; + int d2 = coords.w; + int d1 = d2 / ${channelMul}; + int q = d2 - d1 * ${channelMul}; + int xRCorner = xRCCorner.x; + int xCCorner = xRCCorner.y; + + //intialize dotProd with a small epsilon seems to reduce GPU accuracy loss. + vec4 dotProd = vec4(0.000000000000001); + + ${mainLoop} + + vec4 result = dotProd - vec4(0.000000000000001); + ${addBiasSnippet} + ${applyActivationSnippet} + setOutput(result); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function depthwiseConv2dNative(args) { + const { inputs, backend, attrs } = args; + const { x, filter } = inputs; + const { strides, pad, dilations, dimRoundingMode } = attrs; + let $dilations = dilations; + if ($dilations == null) { + $dilations = [1, 1]; + } + assert$1(eitherStridesOrDilationsAreOne(strides, $dilations), () => 'Error in depthwiseConv2d: Either strides or dilations must be ' + + `1. Got strides ${strides} and dilations '${$dilations}'`); + const convInfo = computeConv2DInfo(x.shape, filter.shape, strides, $dilations, pad, dimRoundingMode, true /* depthwise */); + let program; + if (env().getBool('WEBGL_PACK_DEPTHWISECONV') && convInfo.strideWidth <= 2 && + convInfo.outChannels / convInfo.inChannels === 1) { + program = new DepthwiseConvPacked2DProgram(convInfo); + } + else { + program = new DepthwiseConv2DProgram(convInfo); + } + const customValues = [ + [convInfo.padInfo.top, convInfo.padInfo.left], + [convInfo.strideHeight, convInfo.strideWidth], + [convInfo.dilationHeight, convInfo.dilationWidth], + [convInfo.inHeight, convInfo.inWidth] + ]; + return backend.runWebGLProgram(program, [x, filter], 'float32', customValues); + } + const depthwiseConv2dNativeConfig = { + kernelName: DepthwiseConv2dNative, + backendName: 'webgl', + kernelFunc: depthwiseConv2dNative, + }; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class DepthwiseConv2DDerFilterProgram { + constructor(convInfo) { + this.variableNames = ['x', 'dy']; + this.outputShape = convInfo.filterShape; + const strideHeight = convInfo.strideHeight; + const strideWidth = convInfo.strideWidth; + const padTop = convInfo.padInfo.top; + const padLeft = convInfo.padInfo.left; + const channelMul = convInfo.outChannels / convInfo.inChannels; + this.userCode = ` + void main() { + ivec4 coords = getOutputCoords(); + int wR = coords.x; + int wC = coords.y; + int d1 = coords.z; + int dm = coords.w; + int d2 = d1 * ${channelMul} + dm; + + float dotProd = 0.0; + + // TO DO: Vec4 over the batch size + for (int b = 0; b < ${convInfo.batchSize}; b++) { + for (int yR = 0; yR < ${convInfo.outHeight}; yR++) { + int xR = wR + yR * ${strideHeight} - ${padTop}; + + if (xR < 0 || xR >= ${convInfo.inHeight}) { + continue; + } + + for (int yC = 0; yC < ${convInfo.outWidth}; yC++) { + int xC = wC + yC * ${strideWidth} - ${padLeft}; + + if (xC < 0 || xC >= ${convInfo.inWidth}) { + continue; + } + + float dyValue = getDy(b, yR, yC, d2); + float xValue = getX(b, xR, xC, d1); + dotProd += (xValue * dyValue); + } + } + } + setOutput(dotProd); + } + `; + } + } + class DepthwiseConv2DDerInputProgram { + constructor(convInfo) { + this.variableNames = ['dy', 'W']; + this.outputShape = convInfo.inShape; + const filterHeight = convInfo.filterHeight; + const filterWidth = convInfo.filterWidth; + const strideHeight = convInfo.strideHeight; + const strideWidth = convInfo.strideWidth; + const padTop = filterHeight - 1 - convInfo.padInfo.top; + const padLeft = filterWidth - 1 - convInfo.padInfo.left; + const channelMul = convInfo.outChannels / convInfo.inChannels; + this.userCode = ` + const ivec2 pads = ivec2(${padTop}, ${padLeft}); + + void main() { + ivec4 coords = getOutputCoords(); + int batch = coords[0]; + int d1 = coords[3]; + ivec2 dyCorner = coords.yz - pads; + int dyRCorner = dyCorner.x; + int dyCCorner = dyCorner.y; + + float dotProd = 0.0; + + for (int wR = 0; wR < ${filterHeight}; wR++) { + float dyR = float(dyRCorner + wR) / ${strideHeight}.0; + + if (dyR < 0.0 || dyR >= ${convInfo.outHeight}.0 || fract(dyR) > 0.0) { + continue; + } + int idyR = int(dyR); + + int wRPerm = ${filterHeight} - 1 - wR; + + for (int wC = 0; wC < ${filterWidth}; wC++) { + float dyC = float(dyCCorner + wC) / ${strideWidth}.0; + + if (dyC < 0.0 || dyC >= ${convInfo.outWidth}.0 || + fract(dyC) > 0.0) { + continue; + } + int idyC = int(dyC); + + int wCPerm = ${filterWidth} - 1 - wC; + + // TO DO: Vec4 over the channelMul + for (int dm = 0; dm < ${channelMul}; dm++) { + int d2 = d1 * ${channelMul} + dm; + float xValue = getDy(batch, idyR, idyC, d2); + float wValue = getW(wRPerm, wCPerm, d1, dm); + dotProd += xValue * wValue; + } + } + } + setOutput(dotProd); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function depthwiseConv2dNativeBackpropFilter(args) { + const { inputs, backend, attrs } = args; + const { x, dy } = inputs; + const { strides, dilations, pad, dimRoundingMode, filterShape } = attrs; + const convInfo = computeConv2DInfo(x.shape, filterShape, strides, dilations, pad, dimRoundingMode, true /* depthwise */); + const program = new DepthwiseConv2DDerFilterProgram(convInfo); + return backend.runWebGLProgram(program, [x, dy], 'float32'); + } + const depthwiseConv2dNativeBackpropFilterConfig = { + kernelName: DepthwiseConv2dNativeBackpropFilter, + backendName: 'webgl', + kernelFunc: depthwiseConv2dNativeBackpropFilter + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function depthwiseConv2dNativeBackpropInput(args) { + const { inputs, backend, attrs } = args; + const { dy, filter } = inputs; + const { strides, dilations, pad, dimRoundingMode, inputShape } = attrs; + const convInfo = computeConv2DInfo(inputShape, filter.shape, strides, dilations, pad, dimRoundingMode, true /* depthwise */); + const program = new DepthwiseConv2DDerInputProgram(convInfo); + return backend.runWebGLProgram(program, [dy, filter], 'float32'); + } + const depthwiseConv2dNativeBackpropInputConfig = { + kernelName: DepthwiseConv2dNativeBackpropInput, + backendName: 'webgl', + kernelFunc: depthwiseConv2dNativeBackpropInput + }; + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class DiagProgram { + constructor(size) { + this.variableNames = ['X']; + this.outputShape = [size, size]; + this.userCode = ` + void main() { + ivec2 coords = getOutputCoords(); + float val = coords[0] == coords[1] ? getX(coords[0]) : 0.0; + setOutput(val); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function diag(args) { + const { inputs, backend } = args; + const { x } = inputs; + const outShape = [...x.shape, ...x.shape]; + const xSize = sizeFromShape(x.shape); + const flat = reshape({ inputs: { x }, backend, attrs: { shape: [xSize] } }); + const program = new DiagProgram(xSize); + const res = backend.runWebGLProgram(program, [flat], flat.dtype); + const out = reshape({ inputs: { x: res }, backend, attrs: { shape: outShape } }); + backend.disposeIntermediateTensorInfo(flat); + backend.disposeIntermediateTensorInfo(res); + return out; + } + const diagConfig = { + kernelName: Diag, + backendName: 'webgl', + kernelFunc: diag + }; + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class Dilation2DProgram { + constructor(convInfo) { + this.variableNames = ['x', 'W']; + this.outputShape = convInfo.outShape; + const { inHeight, inWidth, padInfo, strideHeight, strideWidth, filterHeight, filterWidth, dilationHeight, dilationWidth } = convInfo; + const { top: padTop, left: padLeft } = padInfo; + this.userCode = ` + const ivec2 strides = ivec2(${strideHeight}, ${strideWidth}); + const ivec2 pads = ivec2(${padTop}, ${padLeft}); + const float neg_infinity = -3.4e38; + + void main() { + ivec4 coords = getOutputCoords(); + int batch = coords.x; + int d1 = coords.w; + ivec2 outTopLeftCorner = + coords.yz * strides - pads; + int hBeg = outTopLeftCorner.x; + int wBeg = outTopLeftCorner.y; + + float curVal = neg_infinity; + for (int h = 0; h < ${filterHeight}; h++) { + int hIn = hBeg + h * ${dilationHeight}; + + if (hIn >= 0 && hIn < ${inHeight}) { + for (int w = 0; w < ${filterWidth}; w++) { + int wIn = wBeg + w * ${dilationWidth}; + + if (wIn >= 0 && wIn < ${inWidth}) { + float xVal = getX(batch, hIn, wIn, d1); + float wVal = getW(h, w, d1); + + float val = xVal + wVal; + if (val > curVal) { + curVal = val; + } + } + } + } + } + + float result = curVal; + setOutput(result); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function dilation2D(args) { + const { inputs, backend, attrs } = args; + const { x, filter } = inputs; + const { strides, pad, dilations } = attrs; + const convInfo = computeDilation2DInfo(x.shape, filter.shape, strides, pad, 'NHWC' /* dataFormat */, dilations); + let out; + const program = new Dilation2DProgram(convInfo); + out = backend.runWebGLProgram(program, [x, filter], 'float32'); + const outReshaped = reshape({ inputs: { x: out }, backend, attrs: { shape: convInfo.outShape } }); + backend.disposeIntermediateTensorInfo(out); + return outReshaped; + } + const dilation2DConfig = { + kernelName: Dilation2D, + backendName: 'webgl', + kernelFunc: dilation2D, + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function einsum(args) { + const { inputs, backend, attrs } = args; + const { equation } = attrs; + const tensors = inputs; + const { allDims, summedDims, idDims } = decodeEinsumEquation(equation, tensors.length); + checkEinsumDimSizes(allDims.length, idDims, tensors); + const { path, steps } = getEinsumComputePath(summedDims, idDims); + const nSteps = steps.length; + let out = null; + let numDimsRemaining = allDims.length; + const tensorsToDispose = []; + for (let i = 0; i < nSteps; ++i) { + for (const idTerm of steps[i]) { + const { permutationIndices: perm, expandDims: dimsToExpand } = getEinsumPermutation(numDimsRemaining, idDims[idTerm]); + let x; + if (isIdentityPermutation(perm)) { + x = tensors[idTerm]; + } + else { + x = transpose({ inputs: { x: tensors[idTerm] }, backend, attrs: { perm } }); + tensorsToDispose.push(x); + } + const targetShape = x.shape.slice(); + for (let k = 0; k < dimsToExpand.length; ++k) { + targetShape.splice(dimsToExpand[k], 0, 1); + } + if (!arraysEqual(x.shape, targetShape)) { + x = reshape({ inputs: { x }, backend, attrs: { shape: targetShape } }); + tensorsToDispose.push(x); + } + if (out === null) { + out = x; + } + else { + // tslint:disable-next-line: no-unnecessary-type-assertion + out = multiply({ inputs: { a: x, b: out }, backend }); + tensorsToDispose.push(out); + } + } + if (i < nSteps - 1) { + if (path[i] >= 0) { + out = sum({ + inputs: { x: out }, + backend, + attrs: { + axis: path[i] - (allDims.length - numDimsRemaining), + keepDims: false + } + }); + tensorsToDispose.push(out); + } + numDimsRemaining--; + } + } + // Clean up intermediate tensors. + for (const tensorInfo of tensorsToDispose) { + if (tensorInfo === out) { + continue; + } + backend.disposeIntermediateTensorInfo(tensorInfo); + } + return out; + } + const einsumConfig = { + kernelName: Einsum, + backendName: 'webgl', + kernelFunc: einsum + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const ELU = `return (x >= 0.0) ? x : (exp(x) - 1.0);`; + const ELU_PACKED = ` + vec4 result; + + result.r = (x.r >= 0.0) ? x.r : (exp(x.r) - 1.0); + result.g = (x.g >= 0.0) ? x.g : (exp(x.g) - 1.0); + result.b = (x.b >= 0.0) ? x.b : (exp(x.b) - 1.0); + result.a = (x.a >= 0.0) ? x.a : (exp(x.a) - 1.0); + + return result; +`; + const elu = unaryKernelFunc({ opSnippet: ELU, packedOpSnippet: ELU_PACKED }); + const eluConfig = { + kernelName: Elu$1, + backendName: 'webgl', + kernelFunc: elu + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const ELU_DER = `return (b >= 0.0) ? a : a * (b + 1.0);`; + const ELU_DER_PACKED = ` + vec4 bGTEZero = vec4(greaterThanEqual(b, vec4(0.))); + return (bGTEZero * a) + ((vec4(1.0) - bGTEZero) * (a * (b + vec4(1.0)))); +`; + const eluGrad = (args) => { + const { inputs, backend } = args; + const { dy, y } = inputs; + const program = env().getBool('WEBGL_PACK_BINARY_OPERATIONS') ? + new BinaryOpPackedProgram(ELU_DER_PACKED, dy.shape, y.shape) : + new BinaryOpProgram(ELU_DER, dy.shape, y.shape); + return backend.runWebGLProgram(program, [dy, y], dy.dtype); + }; + const eluGradConfig = { + kernelName: EluGrad, + backendName: 'webgl', + kernelFunc: eluGrad + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const PACKED_EQUAL = ` + return vec4(equal(a, b)); +`; + const EQUAL = `return float(a == b);`; + const equal = binaryKernelFunc({ + opSnippet: EQUAL, + packedOpSnippet: PACKED_EQUAL, + dtype: 'bool', + cpuKernelImpl: equalImplCPU, + }); + const equalConfig = { + kernelName: Equal, + backendName: 'webgl', + kernelFunc: equal + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const ERF = ` + // Error function is calculated approximately with elementary function. + // See "Handbook of Mathematical Functions with Formulas, + // Graphs, and Mathematical Tables", Abramowitz and Stegun. + float p = ${ERF_P}; + float a1 = ${ERF_A1}; + float a2 = ${ERF_A2}; + float a3 = ${ERF_A3}; + float a4 = ${ERF_A4}; + float a5 = ${ERF_A5}; + + float sign = sign(x); + x = abs(x); + float t = 1.0 / (1.0 + p * x); + return sign * (1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*exp(-x*x)); +`; + const erf = unaryKernelFunc({ opSnippet: ERF }); + const erfConfig = { + kernelName: Erf, + backendName: 'webgl', + kernelFunc: erf, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const EXP = CHECK_NAN_SNIPPET_UNARY + ` + return exp(x); +`; + const EXP_PACKED = ` + vec4 result = exp(x); + bvec4 isNaN = isnan(x); + result.r = isNaN.r ? x.r : result.r; + result.g = isNaN.g ? x.g : result.g; + result.b = isNaN.b ? x.b : result.b; + result.a = isNaN.a ? x.a : result.a; + + return result; +`; + const exp = unaryKernelFunc({ + opSnippet: EXP, + packedOpSnippet: EXP_PACKED, + cpuKernelImpl: expImplCPU, + dtype: 'float32', + }); + const expConfig = { + kernelName: Exp, + backendName: 'webgl', + kernelFunc: exp + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function expandDims(args) { + const { inputs, attrs, backend } = args; + const { dim } = attrs; + const { input } = inputs; + const inputRank = input.shape.length; + const newShape = input.shape.slice(); + let $dim = dim; + if (dim < 0) { + // Negative value is counted from the tail of rank. + assert$1(-(inputRank + 1) <= dim, () => `Axis must be in the interval [${-(inputRank + 1)}, ${inputRank}]`); + $dim = inputRank + dim + 1; + } + newShape.splice($dim, 0, 1); + return reshape({ inputs: { x: input }, backend, attrs: { shape: newShape } }); + } + const expandDimsConfig = { + kernelName: ExpandDims, + backendName: 'webgl', + kernelFunc: expandDims, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const EXPM1 = `return exp(x) - 1.0;`; + const expm1 = unaryKernelFunc({ opSnippet: EXPM1, packedOpSnippet: EXPM1, cpuKernelImpl: expm1ImplCPU }); + const expm1Config = { + kernelName: Expm1, + backendName: 'webgl', + kernelFunc: expm1 + }; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class FFTProgram { + constructor(component, inputShape, inverse) { + this.variableNames = ['real', 'imag']; + const innerDim = inputShape[1]; + this.outputShape = inputShape; + const exponentMultiplierSnippet = inverse ? `2.0 * ${Math.PI}` : `-2.0 * ${Math.PI}`; + const resultDenominator = inverse ? `${innerDim}.0` : '1.0'; + let opString; + if (component === 'real') { + opString = 'return real * expR - imag * expI;'; + } + else if (component === 'imag') { + opString = 'return real * expI + imag * expR;'; + } + else { + throw new Error(`FFT component must be either "real" or "imag", got ${component}.`); + } + this.userCode = ` + const float exponentMultiplier = ${exponentMultiplierSnippet}; + + float unaryOpComplex(float real, float expR, float imag, float expI) { + ${opString} + } + + float mulMatDFT(int batch, int index) { + float indexRatio = float(index) / float(${innerDim}); + float exponentMultiplierTimesIndexRatio = + exponentMultiplier * indexRatio; + + float result = 0.0; + + for (int i = 0; i < ${innerDim}; i++) { + // x = (-2|2 * PI / N) * index * i; + float x = exponentMultiplierTimesIndexRatio * float(i); + float expR = cos(x); + float expI = sin(x); + float real = getReal(batch, i); + float imag = getImag(batch, i); + + result += + unaryOpComplex(real, expR, imag, expI) / ${resultDenominator}; + } + + return result; + } + + void main() { + ivec2 coords = getOutputCoords(); + setOutput(mulMatDFT(coords[0], coords[1])); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function fftImpl(x, inverse, backend) { + const xData = backend.texData.get(x.dataId); + const inputSize = sizeFromShape(x.shape); + // Collapse all outer dimensions to a single batch dimension. + const innerDimensionSize = x.shape[x.shape.length - 1]; + const batch = inputSize / innerDimensionSize; + const input2D = reshape({ inputs: { x }, backend, attrs: { shape: [batch, innerDimensionSize] } }); + const xShape = input2D.shape; + const realProgram = new FFTProgram('real', xShape, inverse); + const imagProgram = new FFTProgram('imag', xShape, inverse); + const inputs = [ + { + dataId: xData.complexTensorInfos.real.dataId, + dtype: xData.complexTensorInfos.real.dtype, + shape: xShape + }, + { + dataId: xData.complexTensorInfos.imag.dataId, + dtype: xData.complexTensorInfos.imag.dtype, + shape: xShape + } + ]; + const realPart = backend.runWebGLProgram(realProgram, inputs, 'float32'); + const imagPart = backend.runWebGLProgram(imagProgram, inputs, 'float32'); + const complexOutput = complex({ inputs: { real: realPart, imag: imagPart }, backend }); + backend.disposeIntermediateTensorInfo(realPart); + backend.disposeIntermediateTensorInfo(imagPart); + const complexOutputReshaped = reshape({ inputs: { x: complexOutput }, backend, attrs: { shape: x.shape } }); + backend.disposeIntermediateTensorInfo(input2D); + backend.disposeIntermediateTensorInfo(complexOutput); + return complexOutputReshaped; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function fft(args) { + const { inputs, backend } = args; + const { input } = inputs; + return fftImpl(input, false /* inverse */, backend); + } + const fftConfig = { + kernelName: FFT, + backendName: 'webgl', + kernelFunc: fft + }; + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class FillProgram { + constructor(shape, value) { + this.outputShape = []; + this.customUniforms = [{ name: 'value', type: 'float' }]; + this.variableNames = ['x']; + this.outputShape = shape; + this.userCode = ` + void main() { + // Input can be obtained from uniform value. + setOutput(value); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function fill(args) { + const { backend, attrs } = args; + const { shape, value } = attrs; + let { dtype } = attrs; + dtype = dtype || inferDtype(value); + if (dtype === 'string') { + // String type should be handled in CPU memory. + const values = getArrayFromDType(dtype, sizeFromShape(shape)); + values.fill(value); + return backend.makeTensorInfo(shape, dtype, values); + } + else { + const program = new FillProgram(shape, value); + const customValues = [[value]]; + return backend.runWebGLProgram(program, [], dtype, customValues); + } + } + const fillConfig = { + kernelName: Fill, + backendName: 'webgl', + kernelFunc: fill + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class FlipLeftRightProgram { + constructor(imageShape) { + this.variableNames = ['Image']; + this.outputShape = []; + const imageWidth = imageShape[2]; + this.outputShape = imageShape; + this.userCode = ` + void main() { + ivec4 coords = getOutputCoords(); + int x = coords[2]; + + int coordX = ${imageWidth} - x - 1; + float outputValue; + if(coordX >= 0 && coordX < ${imageWidth}) { + outputValue = getImage(coords[0], coords[1], coordX, coords[3]); + } else { + outputValue = getImage(coords[0], coords[1], coords[2], coords[3]); + } + setOutput(outputValue); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const flipLeftRightConfig = { + kernelName: FlipLeftRight, + backendName: 'webgl', + kernelFunc: ({ inputs, backend }) => { + const { image } = inputs; + const webglBackend = backend; + const program = new FlipLeftRightProgram(image.shape); + const output = webglBackend.runWebGLProgram(program, [image], image.dtype); + return output; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const FLOOR = `return floor(x);`; + const floor = unaryKernelFunc({ opSnippet: FLOOR, packedOpSnippet: FLOOR, cpuKernelImpl: floorImplCPU }); + const floorConfig = { + kernelName: Floor, + backendName: 'webgl', + kernelFunc: floor, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + // We use native integer division to deal with floating point imprecision. Since + // we implement floor division and glsl implements truncated division, we + // correct for this by subtracting 1 from result when the result is negative and + // there is a remainder. + const INT_DIV = ` + float s = sign(a) * sign(b); + int ia = round(a); + int ib = round(b); + if (ib != 0) { + // Windows (D3D) wants guaranteed non-zero int division at compile-time. + return float(idiv(ia, ib, s)); + } else { + return NAN; + } +`; + const INT_DIV_PACKED = ` + ivec4 ia = round(a); + ivec4 ib = round(b); + bvec4 cond = notEqual(ib, ivec4(0)); + ivec4 result = ivec4(0); + vec4 s = sign(a) * sign(b); + + // Windows (D3D) wants guaranteed non-zero int division at compile-time. + if (cond[0]) { + result[0] = idiv(ia[0], ib[0], s[0]); + } + if (cond[1]) { + result[1] = idiv(ia[1], ib[1], s[1]); + } + if (cond[2]) { + result[2] = idiv(ia[2], ib[2], s[2]); + } + if (cond[3]) { + result[3] = idiv(ia[3], ib[3], s[3]); + } + return vec4(result); +`; + const floorDiv = binaryKernelFunc({ opSnippet: INT_DIV, packedOpSnippet: INT_DIV_PACKED, dtype: 'int32' }); + const floorDivConfig = { + kernelName: FloorDiv, + backendName: 'webgl', + kernelFunc: floorDiv + }; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class FromPixelsProgram { + constructor(outputShape) { + this.variableNames = ['A']; + const glsl = getGlslDifferences(); + const [height, width,] = outputShape; + this.outputShape = outputShape; + this.userCode = ` + void main() { + ivec3 coords = getOutputCoords(); + int texR = coords[0]; + int texC = coords[1]; + int depth = coords[2]; + vec2 uv = (vec2(texC, texR) + halfCR) / vec2(${width}.0, ${height}.0); + + vec4 values = ${glsl.texture2D}(A, uv); + float value; + if (depth == 0) { + value = values.r; + } else if (depth == 1) { + value = values.g; + } else if (depth == 2) { + value = values.b; + } else if (depth == 3) { + value = values.a; + } + + setOutput(floor(value * 255.0 + 0.5)); + } + `; + } + } + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class FromPixelsPackedProgram { + constructor(outputShape) { + this.variableNames = ['A']; + this.packedInputs = false; + this.packedOutput = true; + const glsl = getGlslDifferences(); + const [height, width,] = outputShape; + this.outputShape = outputShape; + this.userCode = ` + void main() { + ivec3 coords = getOutputCoords(); + int texR = coords[0]; + int texC = coords[1]; + int depth = coords[2]; + + vec4 result = vec4(0.); + + for(int row=0; row<=1; row++) { + for(int col=0; col<=1; col++) { + texC = coords[1] + row; + depth = coords[2] + col; + + vec2 uv = (vec2(texC, texR) + halfCR) / + vec2(${width}.0, ${height}.0); + vec4 values = ${glsl.texture2D}(A, uv); + float value; + if (depth == 0) { + value = values.r; + } else if (depth == 1) { + value = values.g; + } else if (depth == 2) { + value = values.b; + } else if (depth == 3) { + value = values.a; + } + + result[row * 2 + col] = floor(value * 255.0 + 0.5); + } + } + + ${glsl.output} = result; + } + `; + } + } + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const fromPixelsConfig = { + kernelName: FromPixels, + backendName: 'webgl', + kernelFunc: fromPixels, + }; + let fromPixels2DContext; + let willReadFrequently = env().getBool('CANVAS2D_WILL_READ_FREQUENTLY_FOR_GPU'); + function fromPixels(args) { + const { inputs, backend, attrs } = args; + let { pixels } = inputs; + const { numChannels } = attrs; + const isVideo = typeof (HTMLVideoElement) !== 'undefined' && + pixels instanceof HTMLVideoElement; + const isImage = typeof (HTMLImageElement) !== 'undefined' && + pixels instanceof HTMLImageElement; + const [width, height] = isVideo ? + [ + pixels.videoWidth, + pixels.videoHeight + ] : + [pixels.width, pixels.height]; + const texShape = [height, width]; + const outShape = [height, width, numChannels]; + if (isImage || isVideo) { + const newWillReadFrequently = env().getBool('CANVAS2D_WILL_READ_FREQUENTLY_FOR_GPU'); + if (fromPixels2DContext == null || + newWillReadFrequently !== willReadFrequently) { + willReadFrequently = newWillReadFrequently; + fromPixels2DContext = + document.createElement('canvas').getContext('2d', { willReadFrequently }); + } + fromPixels2DContext.canvas.width = width; + fromPixels2DContext.canvas.height = height; + fromPixels2DContext.drawImage(pixels, 0, 0, width, height); + pixels = fromPixels2DContext.canvas; + } + const tempPixelHandle = backend.makeTensorInfo(texShape, 'int32'); + // This is a byte texture with pixels. + backend.texData.get(tempPixelHandle.dataId).usage = TextureUsage.PIXELS; + backend.gpgpu.uploadPixelDataToTexture(backend.getTexture(tempPixelHandle.dataId), pixels); + const program = env().getBool('WEBGL_PACK') ? + new FromPixelsPackedProgram(outShape) : + new FromPixelsProgram(outShape); + const res = backend.runWebGLProgram(program, [tempPixelHandle], 'int32'); + backend.disposeData(tempPixelHandle.dataId); + return res; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function fusedConv2d(args) { + const { inputs, backend, attrs } = args; + const { x, filter, bias, preluActivationWeights } = inputs; + const { strides, pad, dataFormat, dilations, dimRoundingMode, activation, leakyreluAlpha } = attrs; + const $dataFormat = convertConv2DDataFormat(dataFormat); + const convInfo = computeConv2DInfo(x.shape, filter.shape, strides, dilations, pad, dimRoundingMode, false /* depthwise */, $dataFormat); + let out; + const intermediates = []; + const hasBias = bias != null; + const hasPreluActivationWeights = preluActivationWeights != null; + const hasLeakyreluAlpha = activation === 'leakyrelu'; + const prepareInputs = () => { + const inputs = [x, filter]; + // If the input is a 1-D tensor, align it with the channels. + // + // For fusedConv2d, the inputs (x, W, bias, preluActivationWeights) are + // supposed to be aligned with the dataFormat. The 4-D tensor inputs or + // scalar inputs are originally aligned, but the 1-D tensor inputs are + // supposed to be aligned with the channels (only bias and PReLU activation + // weights could be a 1-D tensor). + const alignInputWithDataFormat = (input, dataFormat) => { + if (dataFormat === 'NCHW' && input.shape.length === 1 && + input.shape[0] !== 1) { + const alignedInput = reshape({ + inputs: { x: input }, + backend, + attrs: { shape: [input.shape[0], 1, 1] } + }); + intermediates.push(alignedInput); + return alignedInput; + } + return input; + }; + if (hasBias) { + inputs.push(alignInputWithDataFormat(bias, dataFormat)); + } + if (hasPreluActivationWeights) { + inputs.push(alignInputWithDataFormat(preluActivationWeights, dataFormat)); + } + if (hasLeakyreluAlpha) { + const $leakyreluAlpha = backend.makeTensorInfo([], 'float32', createScalarValue(leakyreluAlpha, 'float32')); + inputs.push($leakyreluAlpha); + intermediates.push($leakyreluAlpha); + } + return inputs; + }; + if (convInfo.filterHeight === 1 && convInfo.filterWidth === 1 && + convInfo.dilationHeight === 1 && convInfo.dilationWidth === 1 && + convInfo.strideHeight === 1 && convInfo.strideWidth === 1 && + (convInfo.padInfo.type === 'SAME' || convInfo.padInfo.type === 'VALID')) { + out = conv2dByMatMul({ + x, + filter, + convInfo, + backend, + bias, + activation, + preluActivationWeights, + leakyreluAlpha + }); + } + else if (convInfo.strideWidth <= 2 && $dataFormat === 'channelsLast' + && env().getBool('WEBGL_EXP_CONV')) { + const fusedActivation = activation ? mapActivationToShaderProgram(activation, true) : null; + const program = new Conv2DPackedProgram(convInfo, hasBias, fusedActivation, hasPreluActivationWeights, hasLeakyreluAlpha); + const customValues = [ + [convInfo.padInfo.top, convInfo.padInfo.left], + [convInfo.strideHeight, convInfo.strideWidth], + [convInfo.dilationHeight, convInfo.dilationWidth], + [convInfo.inHeight, convInfo.inWidth] + ]; + const inputs = prepareInputs(); + out = backend.runWebGLProgram(program, inputs, 'float32', customValues); + } + else if (env().getBool('WEBGL_CONV_IM2COL')) { + out = conv2dWithIm2Row({ + x, + filter, + convInfo, + backend, + bias, + activation, + preluActivationWeights, + leakyreluAlpha + }); + } + else { + const fusedActivation = activation ? mapActivationToShaderProgram(activation, false) : null; + const program = new Conv2DProgram(convInfo, hasBias, fusedActivation, hasPreluActivationWeights, hasLeakyreluAlpha); + const inputs = prepareInputs(); + out = backend.runWebGLProgram(program, inputs, 'float32'); + } + const outReshaped = reshape({ inputs: { x: out }, backend, attrs: { shape: convInfo.outShape } }); + intermediates.push(out); + intermediates.forEach(t => backend.disposeIntermediateTensorInfo(t)); + return outReshaped; + } + const fusedConv2DConfig = { + kernelName: FusedConv2D, + backendName: 'webgl', + kernelFunc: fusedConv2d, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function fusedDepthwiseConv2D(args) { + const { inputs, backend, attrs } = args; + const { x, filter, bias, preluActivationWeights } = inputs; + const { strides, pad, dilations, dimRoundingMode, activation, leakyreluAlpha } = attrs; + const intermediates = []; + let $dilations = dilations; + if ($dilations == null) { + $dilations = [1, 1]; + } + assert$1(eitherStridesOrDilationsAreOne(strides, $dilations), () => 'Error in depthwiseConv2d: Either strides or dilations must be ' + + `1. Got strides ${strides} and dilations '${$dilations}'`); + const convInfo = computeConv2DInfo(x.shape, filter.shape, strides, $dilations, pad, dimRoundingMode, true /* depthwise */); + const shouldPackDepthwiseConv = env().getBool('WEBGL_PACK_DEPTHWISECONV') && + convInfo.strideWidth <= 2 && + convInfo.outChannels / convInfo.inChannels === 1; + const fusedActivation = activation ? + mapActivationToShaderProgram(activation, shouldPackDepthwiseConv) : + null; + const programInputs = [x, filter]; + const hasBias = bias != null; + const hasPreluActivationWeights = preluActivationWeights != null; + const hasLeakyreluAlpha = activation === 'leakyrelu'; + if (hasBias) { + programInputs.push(bias); + } + if (hasPreluActivationWeights) { + programInputs.push(preluActivationWeights); + } + if (hasLeakyreluAlpha) { + const $leakyreluAlpha = backend.makeTensorInfo([], 'float32', createScalarValue(leakyreluAlpha, 'float32')); + programInputs.push($leakyreluAlpha); + intermediates.push($leakyreluAlpha); + } + let program; + if (shouldPackDepthwiseConv) { + program = new DepthwiseConvPacked2DProgram(convInfo, hasBias, fusedActivation, hasPreluActivationWeights, hasLeakyreluAlpha); + } + else { + program = new DepthwiseConv2DProgram(convInfo, hasBias, fusedActivation, hasPreluActivationWeights, hasLeakyreluAlpha); + } + const customValues = [ + [convInfo.padInfo.top, convInfo.padInfo.left], + [convInfo.strideHeight, convInfo.strideWidth], + [convInfo.dilationHeight, convInfo.dilationWidth], + [convInfo.inHeight, convInfo.inWidth] + ]; + const result = backend.runWebGLProgram(program, programInputs, 'float32', customValues); + intermediates.forEach(t => backend.disposeIntermediateTensorInfo(t)); + return result; + } + const fusedDepthwiseConv2DConfig = { + kernelName: FusedDepthwiseConv2D, + backendName: 'webgl', + kernelFunc: fusedDepthwiseConv2D, + }; + + class GatherNDProgram { + constructor(sliceDim, strides, shape, paramsShape) { + this.sliceDim = sliceDim; + this.strides = strides; + this.paramsShape = paramsShape; + this.variableNames = ['x', 'indices']; + this.outputShape = shape; + const dtype = getCoordsDataType(shape.length); + let mainLoop = ` + int index;`; + for (let j = 0; j < this.sliceDim; j++) { + mainLoop += ` + index = round(getIndices(coords[0], ${j})); + out_of_bounds = out_of_bounds || index < 0; + out_of_bounds = out_of_bounds || index >= ${this.paramsShape[j]}; + flattenIndex += index * ${this.strides[j]};`; + } + this.userCode = ` + void main() { + ${dtype} coords = getOutputCoords(); + int flattenIndex = 0; + bool out_of_bounds = false; + + ${mainLoop} + + setOutput(out_of_bounds ? 0.0 : getX(flattenIndex, coords[1])); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function gatherNd(args) { + const { inputs, backend } = args; + const { params, indices } = inputs; + const indicesShape = indices.shape; + const sliceRank = indicesShape[indicesShape.length - 1]; + const paramsSize = sizeFromShape(params.shape); + const [resultShape, numSlices, sliceSize, strides] = prepareAndValidate(params, indices); + const flattenIndices = reshape({ inputs: { x: indices }, backend, attrs: { shape: [numSlices, sliceRank] } }); + const flattenX = reshape({ + inputs: { x: params }, + backend, + attrs: { shape: [(sizeFromShape(params.shape) / sliceSize), sliceSize] } + }); + if (backend.shouldExecuteOnCPU([params, indices]) || + params.dtype === 'string') { + const indicesData = backend.readSync(indices.dataId); + const paramsBuf = backend.bufferSync(params); + const outValue = gatherNdImplCPU(indicesData, paramsBuf, params.dtype, numSlices, sliceRank, sliceSize, strides, params.shape, paramsSize); + return backend.makeTensorInfo(resultShape, params.dtype, outValue.values); + } + const program = new GatherNDProgram(sliceRank, strides, [numSlices, sliceSize], params.shape); + const res = backend.runWebGLProgram(program, [flattenX, flattenIndices], flattenX.dtype); + const reshaped = reshape({ inputs: { x: res }, backend, attrs: { shape: resultShape } }); + backend.disposeIntermediateTensorInfo(flattenIndices); + backend.disposeIntermediateTensorInfo(flattenX); + backend.disposeIntermediateTensorInfo(res); + return reshaped; + } + const gatherNdConfig = { + kernelName: GatherNd, + backendName: 'webgl', + kernelFunc: gatherNd + }; + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class GatherProgram { + constructor(aShape, outputShape) { + this.variableNames = ['A', 'indices']; + this.outputShape = outputShape; + this.rank = outputShape.length; + const dtype = getCoordsDataType(this.rank); + const sourceCoords = getSourceCoords$1(aShape, 2); + this.userCode = ` + void main() { + ${dtype} resRC = getOutputCoords(); + int index = int(getIndices(resRC.x, resRC.z)); + float inBounds = (index >= 0) && (index < ${aShape[2]}) ? 1.0 : 0.0; + setOutput(inBounds * getA(${sourceCoords})); + } + `; + } + } + // The input and output are always flattened into rank 4 tensors. + function getSourceCoords$1(aShape, axis) { + const currentCoords = ['resRC.x', 'resRC.y', 'resRC.z', 'resRC.w']; + const sourceCoords = []; + for (let i = 0; i < aShape.length; i++) { + if (i === 2) { + sourceCoords.push('index'); + } + else { + sourceCoords.push(`${currentCoords[i]}`); + } + } + return sourceCoords.join(); + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function gatherV2(args) { + const { inputs, backend, attrs } = args; + const { x, indices } = inputs; + const { axis, batchDims } = attrs; + const parsedAxis = parseAxisParam(axis, x.shape)[0]; + if (env().get('DEBUG')) { + // In debug mode, throw error when any index is out of bound. + // Otherwise, just fill out of bounds with zeroes. + const indicesVals = backend.readSync(indices.dataId); + const axisDim = x.shape[parsedAxis]; + for (let i = 0; i < indicesVals.length; ++i) { + const index = indicesVals[i]; + assert$1(index <= axisDim - 1 && index >= 0, () => `GatherV2: the index value ${index} is not in [0, ${axisDim - 1}]`); + } + } + const shapeInfo = collectGatherOpShapeInfo(x, indices, parsedAxis, batchDims); + const indicesSize = sizeFromShape(indices.shape); + const toDispose = []; + const flattenX = reshape({ + inputs: { x }, + backend, + attrs: { + shape: [ + shapeInfo.batchSize, shapeInfo.outerSize, shapeInfo.dimSize, + shapeInfo.sliceSize + ] + } + }); + const flattenIndex = reshape({ + inputs: { x: indices }, + backend, + attrs: { shape: [shapeInfo.batchSize, indicesSize / shapeInfo.batchSize] } + }); + toDispose.push(flattenX); + toDispose.push(flattenIndex); + const flattenOutputShape = [ + shapeInfo.batchSize, shapeInfo.outerSize, indicesSize / shapeInfo.batchSize, + shapeInfo.sliceSize + ]; + if (backend.shouldExecuteOnCPU([x, indices]) || x.dtype === 'string') { + const indicesBuf = backend.bufferSync(flattenIndex); + const xBuf = backend.bufferSync(flattenX); + const outBuf = gatherV2ImplCPU(xBuf, indicesBuf, flattenOutputShape); + toDispose.forEach(t => backend.disposeIntermediateTensorInfo(t)); + return backend.makeTensorInfo(shapeInfo.outputShape, outBuf.dtype, outBuf.values); + } + const program = new GatherProgram(flattenX.shape, flattenOutputShape); + const res = backend.runWebGLProgram(program, [flattenX, flattenIndex], flattenX.dtype); + toDispose.push(res); + const reshaped = reshape({ inputs: { x: res }, backend, attrs: { shape: shapeInfo.outputShape } }); + toDispose.forEach(t => backend.disposeIntermediateTensorInfo(t)); + return reshaped; + } + const gatherV2Config = { + kernelName: GatherV2, + backendName: 'webgl', + kernelFunc: gatherV2 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const GREATER = `return float(a > b);`; + const GREATER_PACKED = ` + return vec4(greaterThan(a, b)); +`; + const greater = binaryKernelFunc({ + opSnippet: GREATER, + packedOpSnippet: GREATER_PACKED, + cpuKernelImpl: greaterImplCPU, + dtype: 'bool' + }); + const greaterConfig = { + kernelName: Greater, + backendName: 'webgl', + kernelFunc: greater + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const GREATER_EQUAL = `return float(a >= b);`; + const GREATER_EQUAL_PACKED = ` + return vec4(greaterThanEqual(a, b)); +`; + const greaterEqual = binaryKernelFunc({ + opSnippet: GREATER_EQUAL, + packedOpSnippet: GREATER_EQUAL_PACKED, + dtype: 'bool', + cpuKernelImpl: greaterEqualImplCPU + }); + const greaterEqualConfig = { + kernelName: GreaterEqual, + backendName: 'webgl', + kernelFunc: greaterEqual + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function ifft(args) { + const { inputs, backend } = args; + const { input } = inputs; + return fftImpl(input, true /* inverse */, backend); + } + const ifftConfig = { + kernelName: IFFT, + backendName: 'webgl', + kernelFunc: ifft + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const IS_FINITE = `return float(!isnan(x) && !isinf(x));`; + const isFinite$1 = unaryKernelFunc({ opSnippet: IS_FINITE, dtype: 'bool' }); + const isFiniteConfig = { + kernelName: IsFinite, + backendName: 'webgl', + kernelFunc: isFinite$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const IS_INF = `return float(isinf(x));`; + const isInf = unaryKernelFunc({ opSnippet: IS_INF, dtype: 'bool' }); + const isInfConfig = { + kernelName: IsInf, + backendName: 'webgl', + kernelFunc: isInf, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const IS_NAN = `return float(isnan(x));`; + const isNaN$1 = unaryKernelFunc({ opSnippet: IS_NAN, dtype: 'bool' }); + const isNaNConfig = { + kernelName: IsNan, + backendName: 'webgl', + kernelFunc: isNaN$1, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const LESS = `return float(a < b);`; + const LESS_PACKED = ` + return vec4(lessThan(a, b)); +`; + const less = binaryKernelFunc({ + opSnippet: LESS, + packedOpSnippet: LESS_PACKED, + cpuKernelImpl: lessImplCPU, + dtype: 'bool' + }); + const lessConfig = { + kernelName: Less, + backendName: 'webgl', + kernelFunc: less + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const LESS_EQUAL = `return float(a <= b);`; + const LESS_EQUAL_PACKED = ` + return vec4(lessThanEqual(a, b)); +`; + const lessEqual = binaryKernelFunc({ + opSnippet: LESS_EQUAL, + packedOpSnippet: LESS_EQUAL_PACKED, + cpuKernelImpl: lessEqualImplCPU, + dtype: 'bool' + }); + const lessEqualConfig = { + kernelName: LessEqual, + backendName: 'webgl', + kernelFunc: lessEqual + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function linSpace(args) { + const { backend, attrs } = args; + const { start, stop, num } = attrs; + // TODO: Use CPU implementation due to the precision problem in Safari. + const outVals = linSpaceImplCPU(start, stop, num); + return backend.makeTensorInfo([outVals.length], 'float32', outVals); + } + const linSpaceConfig = { + kernelName: LinSpace, + backendName: 'webgl', + kernelFunc: linSpace + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + // Windows chrome return 0 if the input is negative value. We will specifically + // return NaN if the input is 0 to solve compatiblity issue. + const LOG = CHECK_NAN_SNIPPET_UNARY + ` + return x < 0.0 ? 0./0. : log(x); +`; + const LOG_PACKED = ` + vec4 result = log(x); + bvec4 isNaN = isnan(x); + result.r = isNaN.r ? x.r : (x.r < 0.0 ? 0./0. : result.r); + result.g = isNaN.g ? x.g : (x.g < 0.0 ? 0./0. : result.g); + result.b = isNaN.b ? x.b : (x.b < 0.0 ? 0./0. : result.b); + result.a = isNaN.a ? x.a : (x.a < 0.0 ? 0./0. : result.a); + return result; +`; + const log = unaryKernelFunc({ opSnippet: LOG, packedOpSnippet: LOG_PACKED, cpuKernelImpl: logImplCPU }); + const logConfig = { + kernelName: Log, + backendName: 'webgl', + kernelFunc: log + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const LOG1P = CHECK_NAN_SNIPPET_UNARY + ` + return log(1.0 + x); +`; + const log1p = unaryKernelFunc({ opSnippet: LOG1P }); + const log1pConfig = { + kernelName: Log1p, + backendName: 'webgl', + kernelFunc: log1p, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const LOGICAL_AND = `return float(a >= 1.0 && b >= 1.0);`; + const LOGICAL_AND_PACKED = ` + return vec4( + vec4(greaterThanEqual(a, vec4(1.0))) * + vec4(greaterThanEqual(b, vec4(1.0)))); +`; + const logicalAnd = binaryKernelFunc({ + opSnippet: LOGICAL_AND, + packedOpSnippet: LOGICAL_AND_PACKED, + dtype: 'bool' + }); + const logicalAndConfig = { + kernelName: LogicalAnd, + backendName: 'webgl', + kernelFunc: logicalAnd + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const LOGICAL_NOT = `return float(!(x >= 1.0));`; + const logicalNot = unaryKernelFunc({ opSnippet: LOGICAL_NOT }); + const logicalNotConfig = { + kernelName: LogicalNot, + backendName: 'webgl', + kernelFunc: logicalNot, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const LOGICAL_OR = `return float(a >= 1.0 || b >= 1.0);`; + const LOGICAL_OR_PACKED = ` + return min( + vec4(greaterThanEqual(a, vec4(1.0))) + + vec4(greaterThanEqual(b, vec4(1.0))), + vec4(1.0)); +`; + const logicalOr = binaryKernelFunc({ opSnippet: LOGICAL_OR, packedOpSnippet: LOGICAL_OR_PACKED, dtype: 'bool' }); + const logicalOrConfig = { + kernelName: LogicalOr, + backendName: 'webgl', + kernelFunc: logicalOr + }; + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class LRNProgram { + constructor(xShape, radius, bias, alpha, beta) { + this.variableNames = ['x']; + this.outputShape = []; + const rad = radius; + const maxD = xShape[3] - 1; + this.outputShape = xShape; + // optimize pow(bias + alpha * sum, -beta) + // src: https://github.com/tensorflow/tensorflow/.. + // blob/26033a1644a9c4a5fbe3170ab2e864b6a4ccd4ca/.. + // tensorflow/core/kernels/mkl_lrn_op.cc#L320 + let powOperator; + const basis = `float(${bias}) + float(${alpha}) * sum`; + if (beta === 0.5) { + powOperator = `inversesqrt(${basis})`; + } + else if (beta === 1.0) { + powOperator = `1.0/(${basis})`; + } + else { + powOperator = `exp(log(${basis}) * float(-${beta}));`; + } + this.userCode = ` + void main() { + ivec4 coords = getOutputCoords(); + int b = coords[0]; + int r = coords[1]; + int c = coords[2]; + int d = coords[3]; + float x = getX(b, r, c, d); + float sum = 0.0; + for (int j = -${rad}; j <= ${rad}; j++) { + int idx = d + j; + if (idx >= 0 && idx <= ${maxD}) { + float z = getX(b, r, c, idx); + sum += z * z; + } + } + float val = x * ${powOperator}; + setOutput(val); + } + `; + } + } + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class LRNPackedProgram { + constructor(xShape, radius, bias, alpha, beta) { + this.variableNames = ['x']; + this.outputShape = []; + this.packedInputs = true; + this.packedOutput = true; + const rad = radius; + const maxD = xShape[3] - 1; + this.outputShape = xShape; + // optimize pow(bias + alpha * sum, -beta) + // src: https://github.com/tensorflow/tensorflow/.. + // blob/26033a1644a9c4a5fbe3170ab2e864b6a4ccd4ca/.. + // tensorflow/core/kernels/mkl_lrn_op.cc#L320 + let powOperator; + const basis = `float(${bias}) + float(${alpha}) * sum`; + if (beta === 0.5) { + powOperator = `inversesqrt(${basis})`; + } + else if (beta === 1.0) { + powOperator = `1.0/(${basis})`; + } + else { + powOperator = `exp(log(${basis}) * float(-${beta}));`; + } + this.userCode = ` + void main() { + ivec4 coords = getOutputCoords(); + int b = coords.x; + int r = coords.y; + int c = coords.z; + int d = coords.w; + + bool hasNextCol = d < ${this.outputShape[3]}; + bool hasNextRow = c < ${this.outputShape[2]}; + + vec4 sum = vec4(0.); + vec4 xFragAtOutputCoords = getX(b, r, c, d); + + vec4 xAtOutputCoords = vec4( + getChannel(xFragAtOutputCoords, vec2(c, d)), + hasNextCol ? + getChannel(xFragAtOutputCoords, vec2(c, d + 1)) : 0.0, + hasNextRow ? + getChannel(xFragAtOutputCoords , vec2(c + 1, d)) : 0.0, + (hasNextRow && hasNextCol) ? + getChannel(xFragAtOutputCoords, vec2(c + 1, d + 1)) : 0.0 + ); + + int firstChannel = d - ${rad}; + vec2 cache = vec2(0.); + if(firstChannel >= 0){ + vec4 firstChannelFrag = getX(b, r, c, firstChannel); + cache.x = getChannel(firstChannelFrag, vec2(c, firstChannel)); + if(hasNextRow){ + cache.y = getChannel(firstChannelFrag, vec2(c + 1, firstChannel)); + } + } + + ivec2 depth = ivec2(d, d + 1); + for (int j = - ${rad}; j <= ${rad}; j++) { + ivec2 idx = depth + j; + bvec2 aboveLowerBound = greaterThanEqual(idx, ivec2(0)); + bvec2 belowUpperBound = lessThanEqual(idx, ivec2(${maxD})); + + bool depthInRange = aboveLowerBound.x && belowUpperBound.x; + bool depthPlusOneInRange = aboveLowerBound.y && belowUpperBound.y; + + if(depthInRange || depthPlusOneInRange){ + vec4 z = vec4(0.); + vec4 xFragAtCurrentDepth; + z.xz = cache.xy; + if(depthPlusOneInRange && hasNextCol){ + xFragAtCurrentDepth = idx.y != d ? + getX(b, r, c, idx.y) : xFragAtOutputCoords; + z.y = getChannel(xFragAtCurrentDepth, vec2(c, idx.y)); + if(hasNextRow){ + z.w = getChannel(xFragAtCurrentDepth, vec2(c + 1, idx.y)); + } + } + cache.xy = z.yw; + sum += z * z; + } + } + vec4 result = xAtOutputCoords * ${powOperator}; + setOutput(result); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const lrn = (args) => { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { depthRadius, bias, alpha, beta } = attrs; + const program = env().getBool('WEBGL_PACK_NORMALIZATION') ? + new LRNPackedProgram(x.shape, depthRadius, bias, alpha, beta) : + new LRNProgram(x.shape, depthRadius, bias, alpha, beta); + return backend.runWebGLProgram(program, [x], x.dtype); + }; + // tslint:disable-next-line: variable-name + const LRNConfig = { + kernelName: LRN, + backendName: 'webgl', + kernelFunc: lrn + }; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class LRNGradProgram { + constructor(inputShape, depthRadius, bias, alpha, beta) { + this.variableNames = ['inputImage', 'outputImage', 'dy']; + this.outputShape = []; + this.outputShape = inputShape; + this.depth = inputShape[3]; + this.depthRadius = depthRadius; + this.bias = bias; + this.alpha = alpha; + this.beta = beta; + this.userCode = ` + void main() { + ivec4 coords = getOutputCoords(); + int b = coords[0]; + int r = coords[1]; + int c = coords[2]; + + float result = 0.0; + for (int d = 0; d < ${this.depth}; ++d) { + int depthBegin = int(max(0.0, float(d - ${depthRadius}))); + int depthEnd = int(min(float(${this.depth}), + float(d + ${depthRadius} + 1))); + + const int MIN_DEPTH_BEGIN = 0; + const int MAX_DEPTH_END = ${this.depth}; + + float norm = 0.0; + for (int k = MIN_DEPTH_BEGIN; k < MAX_DEPTH_END; ++k) { + if (k < depthBegin){ + continue; + } + else if (k >= depthBegin && k < depthEnd) { + norm += getInputImage(b, r, c, k) * getInputImage(b, r, c, k); + } + else { + break; + } + } + + norm = float(${alpha}) * norm + float(${bias}); + + for(int k = MIN_DEPTH_BEGIN; k < MAX_DEPTH_END; ++k){ + if (k < depthBegin){ + continue; + } + else if (k >= depthBegin && k < depthEnd){ + float dyi = -2.0 * float(${alpha}) + * float(${beta}) + * getInputImage(b, r, c, k) * getOutputImage(b, r, c, d) + / norm; + if (k == d) { + dyi += pow(norm, -1.0 * ${beta}); + } + if (k == coords[3]) { + dyi *= getDy(b, r, c, d); + result += dyi; + } + } + else { + break; + } + } + } + setOutput(result); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const lrnGrad = (args) => { + const { inputs, backend, attrs } = args; + const { x, y, dy } = inputs; + const { depthRadius, bias, alpha, beta } = attrs; + const program = new LRNGradProgram(x.shape, depthRadius, bias, alpha, beta); + return backend.runWebGLProgram(program, [x, y, dy], x.dtype); + }; + // tslint:disable-next-line: variable-name + const LRNGradConfig = { + kernelName: LRNGrad, + backendName: 'webgl', + kernelFunc: lrnGrad + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function maxImpl(x, reduceShape, outShape, backend) { + const inSize = sizeFromShape(reduceShape); + const xSize = sizeFromShape(x.shape); + const batchSize = xSize / inSize; + const reshapedInput = reshape({ inputs: { x }, attrs: { shape: [batchSize, inSize] }, backend }); + const reduced = reduce(reshapedInput, x.dtype, 'max', backend); + const reshapedOutput = reshape({ inputs: { x: reduced }, attrs: { shape: outShape }, backend }); + backend.disposeIntermediateTensorInfo(reshapedInput); + backend.disposeIntermediateTensorInfo(reduced); + return reshapedOutput; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function max(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { reductionIndices, keepDims } = attrs; + const xRank = x.shape.length; + const origAxes = parseAxisParam(reductionIndices, x.shape); + let axes = origAxes; + const permutedAxes = getAxesPermutation(axes, xRank); + const maxInputIsTransposed = permutedAxes != null; + const shouldExecuteOnCPU = backend.shouldExecuteOnCPU([x]); + let maxInput = x; + if (maxInputIsTransposed) { + if (shouldExecuteOnCPU) { + const xTexData = backend.texData.get(maxInput.dataId); + const values = xTexData.values; + const newShape = new Array(xRank); + for (let i = 0; i < newShape.length; i++) { + newShape[i] = x.shape[permutedAxes[i]]; + } + const maxInputValues = transposeImplCPU(values, x.shape, x.dtype, permutedAxes, newShape); + maxInput = backend.makeTensorInfo(newShape, x.dtype); + const maxInputData = backend.texData.get(maxInput.dataId); + maxInputData.values = maxInputValues; + } + else { + maxInput = transposeImpl(x, permutedAxes, backend); + } + axes = getInnerMostAxes(axes.length, xRank); + } + assertAxesAreInnerMostDims('max', axes, xRank); + const [maxOutShape, reduceShape] = computeOutAndReduceShapes(maxInput.shape, axes); + let outShape = maxOutShape; + if (keepDims) { + // rather than reshape at the end, set the target shape here. + outShape = expandShapeToKeepDim(maxOutShape, origAxes); + } + let out; + if (shouldExecuteOnCPU) { + const xTexData = backend.texData.get(maxInput.dataId); + const values = xTexData.values; + const outValues = maxImplCPU(values, sizeFromShape(reduceShape), outShape, x.dtype); + out = backend.makeTensorInfo(outShape, x.dtype); + const outData = backend.texData.get(out.dataId); + outData.values = outValues; + } + else { + out = maxImpl(maxInput, reduceShape, outShape, backend); + } + if (maxInputIsTransposed) { + backend.disposeIntermediateTensorInfo(maxInput); + } + return out; + } + const maxConfig = { + kernelName: Max, + backendName: 'webgl', + kernelFunc: max + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const MAXIMUM = CHECK_NAN_SNIPPET + ` + return max(a, b); +`; + const MAXIMUM_PACKED = ` + vec4 result = vec4(max(a, b)); + bvec4 isNaNA = isnan(a); + bvec4 isNaNB = isnan(b); + bvec4 isNaN = bvec4(isNaNA.x || isNaNB.x, isNaNA.y || isNaNB.y, isNaNA.z || isNaNB.z, isNaNA.w || isNaNB.w); + ` + + CHECK_NAN_SNIPPET_PACKED + ` + return result; +`; + const maximum = binaryKernelFunc({ + opSnippet: MAXIMUM, + packedOpSnippet: MAXIMUM_PACKED, + cpuKernelImpl: maximumImplCPU + }); + const maximumConfig = { + kernelName: Maximum$1, + backendName: 'webgl', + kernelFunc: maximum + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function maxPool(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + assertNotComplex(x, 'maxPool'); + const { filterSize, strides, pad, dimRoundingMode } = attrs; + const dilations = 1; + assert$1(eitherStridesOrDilationsAreOne(strides, dilations), () => 'Error in maxPool: Either strides or dilations must be 1. ' + + `Got strides ${strides} and dilations '${dilations}'`); + const convInfo = computePool2DInfo(x.shape, filterSize, strides, dilations, pad, dimRoundingMode); + if (convInfo.filterWidth === 1 && convInfo.filterHeight === 1 && + arraysEqual(convInfo.inShape, convInfo.outShape)) { + return identity({ inputs: { x }, backend }); + } + const maxPoolProgram = new Pool2DProgram(convInfo, 'max', false); + return backend.runWebGLProgram(maxPoolProgram, [x], x.dtype); + } + const maxPoolConfig = { + kernelName: MaxPool, + backendName: 'webgl', + kernelFunc: maxPool + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function maxPool3d(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { filterSize, strides, pad, dataFormat, dimRoundingMode } = attrs; + const dilations = [1, 1, 1]; + const convInfo = computePool3DInfo(x.shape, filterSize, strides, dilations, pad, dimRoundingMode, dataFormat); + const maxPoolProgram = new Pool3DProgram(convInfo, 'max', false); + return backend.runWebGLProgram(maxPoolProgram, [x], x.dtype); + } + const maxPool3DConfig = { + kernelName: MaxPool3D, + backendName: 'webgl', + kernelFunc: maxPool3d + }; + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class MaxPool2DBackpropProgram { + constructor(convInfo) { + this.variableNames = ['dy', 'maxPos']; + this.outputShape = convInfo.inShape; + const strideHeight = convInfo.strideHeight; + const strideWidth = convInfo.strideWidth; + const dilationHeight = convInfo.dilationHeight; + const effectiveFilterHeight = convInfo.effectiveFilterHeight; + const effectiveFilterWidth = convInfo.effectiveFilterWidth; + const padTop = effectiveFilterHeight - 1 - convInfo.padInfo.top; + const padLeft = effectiveFilterWidth - 1 - convInfo.padInfo.left; + const lastIndex = effectiveFilterHeight * effectiveFilterWidth - 1; + this.userCode = ` + const ivec2 pads = ivec2(${padTop}, ${padLeft}); + + void main() { + ivec4 coords = getOutputCoords(); + int b = coords[0]; + int d = coords[3]; + + ivec2 dyRCCorner = coords.yz - pads; + int dyRCorner = dyRCCorner.x; + int dyCCorner = dyRCCorner.y; + + // Convolve dy(?, ?, d) with pos mask(:, :, d) to get dx(xR, xC, d). + // ? = to be determined. : = across all values in that axis. + float dotProd = 0.0; + for (int wR = 0; wR < ${effectiveFilterHeight}; + wR += ${dilationHeight}) { + float dyR = float(dyRCorner + wR) / ${strideHeight}.0; + + if (dyR < 0.0 || dyR >= ${convInfo.outHeight}.0 || fract(dyR) > 0.0) { + continue; + } + int idyR = int(dyR); + + for (int wC = 0; wC < ${effectiveFilterWidth}; wC++) { + float dyC = float(dyCCorner + wC) / ${strideWidth}.0; + + if (dyC < 0.0 || dyC >= ${convInfo.outWidth}.0 || + fract(dyC) > 0.0) { + continue; + } + int idyC = int(dyC); + + float dyValue = getDy(b, idyR, idyC, d); + int maxPosValue = ${lastIndex} - int(getMaxPos(b, idyR, idyC, d)); + + // Get the current value, check it against the value from the + // position matrix. + int curPosValue = wR * ${effectiveFilterWidth} + wC; + float mask = float(maxPosValue == curPosValue ? 1.0 : 0.0); + + dotProd += dyValue * mask; + } + } + setOutput(dotProd); + } + `; + } + } + class MaxPool3DBackpropProgram { + constructor(convInfo) { + this.variableNames = ['dy', 'maxPos']; + this.outputShape = convInfo.inShape; + const strideDepth = convInfo.strideDepth; + const strideHeight = convInfo.strideHeight; + const strideWidth = convInfo.strideWidth; + const dilationDepth = convInfo.dilationDepth; + const dilationHeight = convInfo.dilationHeight; + const dilationWidth = convInfo.dilationWidth; + const effectiveFilterDepth = convInfo.effectiveFilterDepth; + const effectiveFilterHeight = convInfo.effectiveFilterHeight; + const effectiveFilterWidth = convInfo.effectiveFilterWidth; + const padFront = effectiveFilterDepth - 1 - convInfo.padInfo.front; + const padTop = effectiveFilterHeight - 1 - convInfo.padInfo.top; + const padLeft = effectiveFilterWidth - 1 - convInfo.padInfo.left; + const lastIndex = effectiveFilterDepth * effectiveFilterHeight * effectiveFilterWidth - 1; + this.userCode = ` + const ivec3 pads = ivec3(${padFront}, ${padTop}, ${padLeft}); + + void main() { + ivec5 coords = getOutputCoords(); + int batch = coords.x; + int ch = coords.u; + + ivec3 dyCorner = ivec3(coords.y, coords.z, coords.w) - pads; + int dyDCorner = dyCorner.x; + int dyRCorner = dyCorner.y; + int dyCCorner = dyCorner.z; + + // Convolve dy(?, ?, ?, ch) with pos mask(:, :, :, d) to get + // dx(xD, xR, xC, ch). + // ? = to be determined. : = across all values in that axis. + float dotProd = 0.0; + + for (int wD = 0; wD < ${effectiveFilterDepth}; + wD += ${dilationDepth}) { + float dyD = float(dyDCorner + wD) / ${strideDepth}.0; + + if (dyD < 0.0 || dyD >= ${convInfo.outDepth}.0 || fract(dyD) > 0.0) { + continue; + } + int idyD = int(dyD); + + for (int wR = 0; wR < ${effectiveFilterHeight}; + wR += ${dilationHeight}) { + float dyR = float(dyRCorner + wR) / ${strideHeight}.0; + + if (dyR < 0.0 || dyR >= ${convInfo.outHeight}.0 || + fract(dyR) > 0.0) { + continue; + } + int idyR = int(dyR); + + for (int wC = 0; wC < ${effectiveFilterWidth}; + wC += ${dilationWidth}) { + float dyC = float(dyCCorner + wC) / ${strideWidth}.0; + + if (dyC < 0.0 || dyC >= ${convInfo.outWidth}.0 || + fract(dyC) > 0.0) { + continue; + } + int idyC = int(dyC); + + float dyValue = getDy(batch, idyD, idyR, idyC, ch); + int maxPosValue = ${lastIndex} - + int(getMaxPos(batch, idyD, idyR, idyC, ch)); + + // Get the current value, check it against the value from the + // position matrix. + int curPosValue = + wD * ${effectiveFilterHeight} * ${effectiveFilterWidth} + + wR * ${effectiveFilterWidth} + wC; + float mask = float(maxPosValue == curPosValue ? 1.0 : 0.0); + + dotProd += dyValue * mask; + } + } + } + setOutput(dotProd); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function maxPool3DGrad(args) { + const { inputs, backend, attrs } = args; + const { dy, input } = inputs; + const x = input; + const { filterSize, strides, pad, dimRoundingMode } = attrs; + const dilations = [1, 1, 1]; + const convInfo = computePool3DInfo(x.shape, filterSize, strides, dilations, pad, dimRoundingMode); + const maxPool3dPositionsProgram = new Pool3DProgram(convInfo, 'max', true /* get positions */); + const maxPool3dPositions = backend.runWebGLProgram(maxPool3dPositionsProgram, [x], x.dtype); + const maxPoolBackpropProgram = new MaxPool3DBackpropProgram(convInfo); + const result = backend.runWebGLProgram(maxPoolBackpropProgram, [dy, maxPool3dPositions], x.dtype); + backend.disposeIntermediateTensorInfo(maxPool3dPositions); + return result; + } + const maxPool3DGradConfig = { + kernelName: MaxPool3DGrad, + backendName: 'webgl', + kernelFunc: maxPool3DGrad + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function maxPoolGrad(args) { + const { inputs, backend, attrs } = args; + const { dy, input, output } = inputs; + const x = input; + assertNotComplex([input, output], 'maxPoolGrad'); + const { filterSize, strides, pad, dimRoundingMode } = attrs; + const convInfo = computePool2DInfo(x.shape, filterSize, strides, 1 /* dilations */, pad, dimRoundingMode); + const getPositions = true; + const maxPoolPositionsProgram = new Pool2DProgram(convInfo, 'max', getPositions); + const maxPoolPositions = backend.runWebGLProgram(maxPoolPositionsProgram, [x], x.dtype); + const maxPoolBackPropProgram = new MaxPool2DBackpropProgram(convInfo); + const result = backend.runWebGLProgram(maxPoolBackPropProgram, [dy, maxPoolPositions], x.dtype); + backend.disposeIntermediateTensorInfo(maxPoolPositions); + return result; + } + const maxPoolGradConfig = { + kernelName: MaxPoolGrad, + backendName: 'webgl', + kernelFunc: maxPoolGrad + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function maxPoolWithArgmaxImpl(x, includeBatchInIndex, convInfo, backend) { + let program = new Pool2DProgram(convInfo, 'max', false); + const poolOutput = backend.runWebGLProgram(program, [x], 'float32'); + program = new Pool2DProgram(convInfo, 'max', true, true, includeBatchInIndex); + const indexOutput = backend.runWebGLProgram(program, [x], 'float32'); + return [poolOutput, indexOutput]; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const maxPoolWithArgmaxConfig = { + kernelName: MaxPoolWithArgmax, + backendName: 'webgl', + kernelFunc: ({ inputs, attrs, backend }) => { + const { x } = inputs; + const { filterSize, strides, pad, includeBatchInIndex } = attrs; + const webglBackend = backend; + assert$1(x.shape.length === 4, () => `Error in maxPool: input must be rank 4 but got rank ${x.shape.length}.`); + const dilations = [1, 1]; + assert$1(eitherStridesOrDilationsAreOne(strides, dilations), () => 'Error in maxPool: Either strides or dilations must be 1. ' + + `Got strides ${strides} and dilations '${dilations}'`); + const convInfo = computePool2DInfo(x.shape, filterSize, strides, dilations, pad); + const [result, indexes] = maxPoolWithArgmaxImpl(x, includeBatchInIndex, convInfo, webglBackend); + return [result, indexes]; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function meanImpl(x, reduceShape, outShape, backend) { + const inSize = sizeFromShape(reduceShape); + const xSize = sizeFromShape(x.shape); + const batchSize = xSize / inSize; + const reshapedInput = reshape({ inputs: { x }, attrs: { shape: [batchSize, inSize] }, backend }); + const reduced = reduce(reshapedInput, 'float32', 'mean', backend); + const reshapedOutput = reshape({ inputs: { x: reduced }, attrs: { shape: outShape }, backend }); + backend.disposeIntermediateTensorInfo(reshapedInput); + backend.disposeIntermediateTensorInfo(reduced); + return reshapedOutput; + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const meanConfig = { + kernelName: Mean, + backendName: 'webgl', + kernelFunc: ({ inputs, attrs, backend }) => { + const { x } = inputs; + const { keepDims, axis } = attrs; + const webglBackend = backend; + const xRank = x.shape.length; + const origAxes = parseAxisParam(axis, x.shape); + let axes = origAxes; + const permutedAxes = getAxesPermutation(axes, xRank); + const meanInputIsTransposed = permutedAxes != null; + const shouldExecuteOnCPU = webglBackend.shouldExecuteOnCPU([x]); + const intermediates = []; + let meanInput = x; + if (meanInputIsTransposed) { + if (shouldExecuteOnCPU) { + const xTexData = webglBackend.texData.get(meanInput.dataId); + const values = xTexData.values; + const newShape = new Array(xRank); + for (let i = 0; i < newShape.length; i++) { + newShape[i] = x.shape[permutedAxes[i]]; + } + const meanInputValues = transposeImplCPU(values, x.shape, x.dtype, permutedAxes, newShape); + meanInput = webglBackend.makeTensorInfo(newShape, x.dtype); + const meanInputData = webglBackend.texData.get(meanInput.dataId); + meanInputData.values = meanInputValues; + } + else { + meanInput = transposeImpl(x, permutedAxes, webglBackend); + } + intermediates.push(meanInput); + axes = getInnerMostAxes(axes.length, xRank); + } + assertAxesAreInnerMostDims('sum', axes, xRank); + const [meanOutShape, reduceShape] = computeOutAndReduceShapes(meanInput.shape, axes); + let outShape = meanOutShape; + if (keepDims) { + // rather than reshape at the end, set the target shape here. + outShape = expandShapeToKeepDim(meanOutShape, origAxes); + } + const out = meanImpl(meanInput, reduceShape, outShape, webglBackend); + for (const i of intermediates) { + webglBackend.disposeIntermediateTensorInfo(i); + } + return out; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function min(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { axis, keepDims } = attrs; + const xRank = x.shape.length; + const origAxes = parseAxisParam(axis, x.shape); + let axes = origAxes; + const permutedAxes = getAxesPermutation(axes, xRank); + let permutedX = x; + if (permutedAxes != null) { + permutedX = transpose({ inputs: { x }, backend, attrs: { perm: permutedAxes } }); + axes = getInnerMostAxes(axes.length, x.shape.length); + } + assertAxesAreInnerMostDims('min', axes, xRank); + const [outShape, reduceShape] = computeOutAndReduceShapes(permutedX.shape, axes); + const inSize = sizeFromShape(reduceShape); + const a2D = reshape({ inputs: { x: permutedX }, backend, attrs: { shape: [-1, inSize] } }); + const reduced = reduce(a2D, a2D.dtype, 'min', backend); + let res; + if (keepDims) { + const newShape = expandShapeToKeepDim(outShape, origAxes); + res = reshape({ inputs: { x: reduced }, backend, attrs: { shape: newShape } }); + } + else { + res = reshape({ inputs: { x: reduced }, backend, attrs: { shape: outShape } }); + } + backend.disposeIntermediateTensorInfo(a2D); + backend.disposeIntermediateTensorInfo(reduced); + if (permutedAxes != null) { + backend.disposeIntermediateTensorInfo(permutedX); + } + return res; + } + const minConfig = { + kernelName: Min, + backendName: 'webgl', + kernelFunc: min + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const MINIMUM = CHECK_NAN_SNIPPET + ` + return min(a, b); +`; + const MINIMUM_PACKED = ` + vec4 result = vec4(min(a, b)); + bvec4 isNaNA = isnan(a); + bvec4 isNaNB = isnan(b); + bvec4 isNaN = bvec4(isNaNA.x || isNaNB.x, isNaNA.y || isNaNB.y, isNaNA.z || isNaNB.z, isNaNA.w || isNaNB.w); + ` + + CHECK_NAN_SNIPPET_PACKED + ` + return result; +`; + const minimum = binaryKernelFunc({ + opSnippet: MINIMUM, + packedOpSnippet: MINIMUM_PACKED, + cpuKernelImpl: minimumImplCPU + }); + const minimumConfig = { + kernelName: Minimum$1, + backendName: 'webgl', + kernelFunc: minimum + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class MirrorPadProgram { + constructor(xShape, paddings, mode) { + this.variableNames = ['x']; + this.outputShape = paddings.map((p, i) => p[0] /* beforePad */ + xShape[i] + p[1] /* afterPad */); + const rank = xShape.length; + const dtype = getCoordsDataType(rank); + const start = paddings.map(p => p[0]).join(','); + const end = paddings.map((p, i) => p[0] + xShape[i]).join(','); + const unpackedCoords = ['coords[0]', 'coords[1]', 'coords[2]', 'coords[3]'].slice(0, rank); + const offset = mode === 'reflect' ? 0 : 1; + if (rank === 1) { + this.userCode = ` + int start = ${start}; + int end = ${end}; + + void main() { + int outC = getOutputCoords(); + if (outC < start) { + outC = start * 2 - outC - ${offset}; + } else if(outC >= end) { + outC = (end - 1) * 2 - outC + ${offset}; + } + setOutput(getX(outC - start)); + } + `; + return; + } + this.userCode = ` + ${dtype} start = ${dtype}(${start}); + ${dtype} end = ${dtype}(${end}); + + void main() { + ${dtype} outC = getOutputCoords(); + for (int i = 0; i < ${rank}; i++) { + if (outC[i] < start[i]) { + outC[i] = start[i] * 2 - outC[i] - ${offset}; + } else if(outC[i] >= end[i]) { + outC[i] = (end[i] - 1) * 2 - outC[i] + ${offset}; + } + } + ${dtype} coords = outC - start; + setOutput(getX(${unpackedCoords})); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + /** + * Example shader code for + * `mirrorPad(tf.tensor1d([1, 2, 3], 'int32'), [[2, 2]], 'reflect')` + * ``` + * const int start = int(2); + * const int end = int(5); + * + * void main() { + * int outputLoc = getOutputCoords(); + * vec4 result = vec4(0.); + * + * int rc = outputLoc; + * + * int source = rc; + * if (source < start) { + * source = start * 2 - source - 0; + * } else if (source >= end) { + * source = (end - 1) * 2 - source + 0; + * } + * source -= start; + * + * result[0] = getChannel(getX(source), source); + * rc += 1; + * if(rc < 6) { + * int source = rc; + * if (source < start) { + * source = start * 2 - source - 0; + * } else if (source >= end) { + * source = (end - 1) * 2 - source + 0; + * } + * source -= start; + * + * result[1] = getChannel(getX(source), source); + * } + * + * setOutput(result); + * } + * ``` + */ + class MirrorPadPackedProgram { + constructor(xShape, paddings, mode) { + this.variableNames = ['x']; + this.packedInputs = true; + this.packedOutput = true; + this.outputShape = paddings.map((p, i) => p[0] /* beforePad */ + xShape[i] + p[1] /* afterPad */); + const rank = xShape.length; + const dtype = getCoordsDataType(rank); + const start = paddings.map(p => p[0]).join(','); + const end = paddings.map((p, i) => p[0] + xShape[i]).join(','); + const coords = getChannels('rc', rank); + const source = getChannels('source', rank); + const cLimit = `${coords[rank - 1]} < ${this.outputShape[rank - 1]}`; + const innerDims = rank === 1 ? 'source' : `vec2(${source.slice(-2).join()})`; + const offset = mode === 'reflect' ? 0 : 1; + let mainLoop = ''; + if (rank === 1) { + const padSetup = ` + ${dtype} source = rc; + if (source < start) { + source = start * 2 - source - ${offset}; + } else if (source >= end) { + source = (end - 1) * 2 - source + ${offset}; + } + source -= start; + `; + mainLoop = ` + ${dtype} rc = outputLoc; + ${padSetup} + result[0] = getChannel(getX(${source.join()}), ${innerDims}); + ${coords[rank - 1]} += 1; + if(${cLimit}) { + ${padSetup} + result[1] = getChannel(getX(${source.join()}), ${innerDims}); + } + `; + } + else { + const padSetup = ` + ${dtype} source = rc; + ${dtype} lt = ${dtype}(lessThan(source, start)); + ${dtype} gte = ${dtype}(greaterThanEqual(source, end)); + ${dtype} orig = 1 - (lt + gte); + source = orig * source + + lt * (start * 2 - source - ${offset}) + + gte * ((end - 1) * 2 - source + ${offset}); + source -= start; + `; + mainLoop = ` + ${dtype} rc = outputLoc; + ${padSetup} + result[0] = getChannel(getX(${source.join()}), ${innerDims}); + ${coords[rank - 1]} += 1; + if(${cLimit}) { + ${padSetup} + result[1] = getChannel(getX(${source.join()}), ${innerDims}); + } + rc = outputLoc; + ${coords[rank - 2]} += 1; + if(${coords[rank - 2]} < ${this.outputShape[rank - 2]}) { + ${padSetup} + result[2] = getChannel(getX(${source.join()}), ${innerDims}); + ${coords[rank - 1]} += 1; + if(${cLimit}) { + ${padSetup} + result[3] = getChannel(getX(${source.join()}), ${innerDims}); + } + } + `; + } + this.userCode = ` + const ${dtype} start = ${dtype}(${start}); + const ${dtype} end = ${dtype}(${end}); + + void main() { + ${dtype} outputLoc = getOutputCoords(); + vec4 result = vec4(0.); + ${mainLoop} + setOutput(result); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const mirrorPadKernelFunc = ({ inputs, backend, attrs }) => { + const { x } = inputs; + const { paddings, mode } = attrs; + const program = env().getBool('WEBGL_PACK_ARRAY_OPERATIONS') ? + new MirrorPadPackedProgram(x.shape, paddings, mode) : + new MirrorPadProgram(x.shape, paddings, mode); + const output = backend.runWebGLProgram(program, [x], x.dtype); + return output; + }; + const mirrorPadConfig = { + kernelName: MirrorPad, + backendName: 'webgl', + kernelFunc: mirrorPadKernelFunc, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const MOD = `if (b == 0.0) return NAN; + return mod(a, b);`; + const MOD_PACKED = ` + vec4 result = mod(a, b); + bvec4 isNaN = equal(b, vec4(0.0)); + ` + + CHECK_NAN_SNIPPET_PACKED + ` + return result; +`; + const mod = binaryKernelFunc({ + opSnippet: MOD, + packedOpSnippet: MOD_PACKED, + }); + const modConfig = { + kernelName: Mod, + backendName: 'webgl', + kernelFunc: mod + }; + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class MultinomialProgram { + constructor(batchSize, numOutcomes, numSamples) { + this.variableNames = ['probs']; + this.customUniforms = [{ name: 'seed', type: 'float' }]; + this.outputShape = [batchSize, numSamples]; + this.userCode = ` + void main() { + ivec2 coords = getOutputCoords(); + int batch = coords[0]; + + float r = random(seed); + float cdf = 0.0; + + for (int i = 0; i < ${numOutcomes - 1}; i++) { + cdf += getProbs(batch, i); + + if (r < cdf) { + setOutput(float(i)); + return; + } + } + + // If no other event happened, last event happened. + setOutput(float(${numOutcomes - 1})); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + // Without the equality check div produces 0.9999 for a = b, which when + // floored can cause errors. + const DIV = ` +if (a == b) { + return 1.0; +}; +return a / b;`; + // We do the same as in ./binaryop_gpu, with vec4 and ivec4. + // On Linux, the vectorized implementation produces NaNs when a and b are 0. + const DIV_PACKED = ` + // vec4 one = vec4(equal(a, b)); + // return one + (vec4(1.0) - one) * a / b; + vec4 result = a / b; + if(a.x == b.x) { + result.x = 1.; + } + if(a.y == b.y) { + result.y = 1.; + } + if(a.z == b.z) { + result.z = 1.; + } + if(a.w == b.w) { + result.w = 1.; + } + + return result; +`; + const realDiv = binaryKernelFunc({ opSnippet: DIV, packedOpSnippet: DIV_PACKED, checkOutOfBounds: true }); + const realDivConfig = { + kernelName: RealDiv, + backendName: 'webgl', + kernelFunc: realDiv, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const SUB = 'return a - b;'; + const sub = binaryKernelFunc({ + opSnippet: SUB, + packedOpSnippet: SUB, + supportsComplex: true, + cpuKernelImpl: subImplCPU + }); + const subConfig = { + kernelName: Sub, + backendName: 'webgl', + kernelFunc: sub + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function softmax(args) { + const { inputs, backend, attrs } = args; + const { logits } = inputs; + const { dim } = attrs; + const axes = parseAxisParam([dim], logits.shape); + const maxLogit = max({ + inputs: { x: logits }, + backend, + attrs: { reductionIndices: axes, keepDims: false } + }); + const expandedShape = expandShapeToKeepDim(maxLogit.shape, axes); + const maxLogitsReshaped = reshape({ inputs: { x: maxLogit }, backend, attrs: { shape: expandedShape } }); + const a = sub({ inputs: { a: logits, b: maxLogitsReshaped }, backend }); + const b = exp({ inputs: { x: a }, backend }); + const sumExp = sum({ inputs: { x: b }, backend, attrs: { axis: axes, keepDims: false } }); + const sumExpReshaped = reshape({ inputs: { x: sumExp }, backend, attrs: { shape: expandedShape } }); + const res = realDiv({ inputs: { a: b, b: sumExpReshaped }, backend }); + backend.disposeIntermediateTensorInfo(maxLogit); + backend.disposeIntermediateTensorInfo(maxLogitsReshaped); + backend.disposeIntermediateTensorInfo(a); + backend.disposeIntermediateTensorInfo(b); + backend.disposeIntermediateTensorInfo(sumExp); + backend.disposeIntermediateTensorInfo(sumExpReshaped); + return res; + } + const softmaxConfig = { + kernelName: Softmax$2, + backendName: 'webgl', + kernelFunc: softmax + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function multinomial(args) { + const { inputs, backend, attrs } = args; + const { logits } = inputs; + const { numSamples, seed, normalized } = attrs; + const probs = normalized ? + logits : + softmax({ inputs: { logits }, backend, attrs: { dim: logits.shape.length - 1 } }); + const batchSize = probs.shape[0]; + const numOutcomes = probs.shape[1]; + const program = new MultinomialProgram(batchSize, numOutcomes, numSamples); + const customValues = [[seed]]; + const res = backend.runWebGLProgram(program, [probs], 'int32', customValues); + if (!normalized) { + backend.disposeIntermediateTensorInfo(probs); + } + return res; + } + const multinomialConfig = { + kernelName: Multinomial, + backendName: 'webgl', + kernelFunc: multinomial + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const NEG = CHECK_NAN_SNIPPET$1 + ` + return -x; +`; + const NEG_PACKED = ` + vec4 result = -x; + bvec4 isNaN = isnan(x); + + result.r = isNaN.r ? x.r : result.r; + result.g = isNaN.g ? x.g : result.g; + result.b = isNaN.b ? x.b : result.b; + result.a = isNaN.a ? x.a : result.a; + + return result; +`; + // This doesn't use unaryKernelFunc because negImplCPU is not of type + // SimpleUnaryKernelImplCPU. + function neg(args) { + const { inputs, backend } = args; + const { x } = inputs; + if (backend.shouldExecuteOnCPU([x])) { + const xData = backend.texData.get(x.dataId); + const [outValues, newShape] = negImplCPU(xData.values, x.shape, x.dtype); + return backend.makeTensorInfo(newShape, x.dtype, outValues); + } + let program; + if (env().getBool('WEBGL_PACK_UNARY_OPERATIONS')) { + program = new UnaryOpPackedProgram(x.shape, NEG_PACKED); + } + else { + program = new UnaryOpProgram(x.shape, NEG); + } + return backend.runWebGLProgram(program, [x], x.dtype); + } + const negConfig = { + kernelName: Neg, + backendName: 'webgl', + kernelFunc: neg + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const nonMaxSuppressionV3Impl = nonMaxSuppressionV3Impl$2; + function nonMaxSuppressionV3(args) { + warn('tf.nonMaxSuppression() in webgl locks the UI thread. ' + + 'Call tf.nonMaxSuppressionAsync() instead'); + const { inputs, backend, attrs } = args; + const { boxes, scores } = inputs; + const { maxOutputSize, iouThreshold, scoreThreshold } = attrs; + const boxesVals = backend.readSync(boxes.dataId); + const scoresVals = backend.readSync(scores.dataId); + const { selectedIndices } = nonMaxSuppressionV3Impl(boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold); + return backend.makeTensorInfo([selectedIndices.length], 'int32', new Int32Array(selectedIndices)); + } + const nonMaxSuppressionV3Config = { + kernelName: NonMaxSuppressionV3, + backendName: 'webgl', + kernelFunc: nonMaxSuppressionV3 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const nonMaxSuppressionV4Impl = nonMaxSuppressionV4Impl$2; + function nonMaxSuppressionV4(args) { + warn('tf.nonMaxSuppression() in webgl locks the UI thread. ' + + 'Call tf.nonMaxSuppressionAsync() instead'); + const { inputs, backend, attrs } = args; + const { boxes, scores } = inputs; + const { maxOutputSize, iouThreshold, scoreThreshold, padToMaxOutputSize } = attrs; + const boxesVals = backend.readSync(boxes.dataId); + const scoresVals = backend.readSync(scores.dataId); + const { selectedIndices, validOutputs } = nonMaxSuppressionV4Impl(boxesVals, scoresVals, maxOutputSize, iouThreshold, scoreThreshold, padToMaxOutputSize); + return [ + backend.makeTensorInfo([selectedIndices.length], 'int32', new Int32Array(selectedIndices)), + backend.makeTensorInfo([], 'int32', new Int32Array([validOutputs])) + ]; + } + const nonMaxSuppressionV4Config = { + kernelName: NonMaxSuppressionV4, + backendName: 'webgl', + kernelFunc: nonMaxSuppressionV4 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const nonMaxSuppressionV5Impl = nonMaxSuppressionV5Impl$2; + function nonMaxSuppressionV5(args) { + warn('tf.nonMaxSuppression() in webgl locks the UI thread. ' + + 'Call tf.nonMaxSuppressionAsync() instead'); + const { inputs, backend, attrs } = args; + const { boxes, scores } = inputs; + const { maxOutputSize, iouThreshold, scoreThreshold, softNmsSigma } = attrs; + const boxesVals = backend.readSync(boxes.dataId); + const scoresVals = backend.readSync(scores.dataId); + const maxOutputSizeVal = maxOutputSize; + const iouThresholdVal = iouThreshold; + const scoreThresholdVal = scoreThreshold; + const softNmsSigmaVal = softNmsSigma; + const { selectedIndices, selectedScores } = nonMaxSuppressionV5Impl(boxesVals, scoresVals, maxOutputSizeVal, iouThresholdVal, scoreThresholdVal, softNmsSigmaVal); + return [ + backend.makeTensorInfo([selectedIndices.length], 'int32', new Int32Array(selectedIndices)), + backend.makeTensorInfo([selectedScores.length], 'float32', new Float32Array(selectedScores)) + ]; + } + const nonMaxSuppressionV5Config = { + kernelName: NonMaxSuppressionV5, + backendName: 'webgl', + kernelFunc: nonMaxSuppressionV5 + }; + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class OneHotProgram { + constructor(numIndices, depth, onValue, offValue) { + this.variableNames = ['indices']; + this.outputShape = [numIndices, depth]; + this.userCode = ` + void main() { + ivec2 coords = getOutputCoords(); + int index = round(getIndices(coords.x)); + setOutput(mix(float(${offValue}), float(${onValue}), + float(index == coords.y))); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const oneHot = (args) => { + const { inputs, backend, attrs } = args; + const { indices } = inputs; + const { dtype, depth, onValue, offValue } = attrs; + const indicesSize = sizeFromShape(indices.shape); + const program = new OneHotProgram(indicesSize, depth, onValue, offValue); + const reshaped = reshape({ inputs: { x: indices }, backend, attrs: { shape: [indicesSize] } }); + const result = backend.runWebGLProgram(program, [reshaped], dtype); + backend.disposeIntermediateTensorInfo(reshaped); + const outShape = [...indices.shape, depth]; + const out = reshape({ inputs: { x: result }, backend, attrs: { shape: outShape } }); + backend.disposeIntermediateTensorInfo(result); + return out; + }; + const oneHotConfig = { + kernelName: OneHot, + backendName: 'webgl', + kernelFunc: oneHot + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function zerosLike(args) { + const { inputs, backend } = args; + const { x } = inputs; + if (x.dtype === 'complex64') { + const realPart = real({ inputs: { input: x }, backend }); + const r = zerosLike({ inputs: { x: realPart }, backend }); + const imagPart = imag({ inputs: { input: x }, backend }); + const i = zerosLike({ inputs: { x: imagPart }, backend }); + const result = complex({ inputs: { real: r, imag: i }, backend }); + backend.disposeIntermediateTensorInfo(realPart); + backend.disposeIntermediateTensorInfo(r); + backend.disposeIntermediateTensorInfo(imagPart); + backend.disposeIntermediateTensorInfo(i); + return result; + } + else { + return fill({ + attrs: { + shape: x.shape, + dtype: x.dtype, + value: x.dtype === 'string' ? '' : 0 + }, + backend + }); + } + } + const zerosLikeConfig = { + kernelName: ZerosLike, + backendName: 'webgl', + kernelFunc: zerosLike + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function onesLike(args) { + const { inputs, backend } = args; + const { x } = inputs; + if (x.dtype === 'string') { + throw new Error('onesLike is not supported under string dtype'); + } + else if (x.dtype === 'complex64') { + const realPart = real({ inputs: { input: x }, backend }); + const r = onesLike({ inputs: { x: realPart }, backend }); + const imagPart = imag({ inputs: { input: x }, backend }); + const i = zerosLike({ inputs: { x: imagPart }, backend }); + const result = complex({ inputs: { real: r, imag: i }, backend }); + backend.disposeIntermediateTensorInfo(realPart); + backend.disposeIntermediateTensorInfo(r); + backend.disposeIntermediateTensorInfo(imagPart); + backend.disposeIntermediateTensorInfo(i); + return result; + } + else { + // TODO(cais, smilkov): Add WebGL shader for onesLike: + // https://github.com/tensorflow/tfjs/issues/1293 + return fill({ attrs: { shape: x.shape, dtype: x.dtype, value: 1 }, backend }); + } + } + const onesLikeConfig = { + kernelName: OnesLike, + backendName: 'webgl', + kernelFunc: onesLike + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function pack(args) { + const { inputs, backend, attrs } = args; + const { axis } = attrs; + if (inputs.length === 1) { + return expandDims({ inputs: { input: inputs[0] }, backend, attrs: { dim: axis } }); + } + const shape = inputs[0].shape; + const dtype = inputs[0].dtype; + inputs.forEach(t => { + assertShapesMatch(shape, t.shape, 'All tensors passed to stack must have matching shapes'); + assert$1(dtype === t.dtype, () => 'All tensors passed to stack must have matching dtypes'); + }); + const intermediateTensorInfos = []; + const expandedTensors = inputs.map(t => { + const expandedT = expandDims({ inputs: { input: t }, backend, attrs: { dim: axis } }); + intermediateTensorInfos.push(expandedT); + return expandedT; + }); + const result = concat({ inputs: expandedTensors, backend, attrs: { axis } }); + intermediateTensorInfos.forEach(t => backend.disposeIntermediateTensorInfo(t)); + return result; + } + const packConfig = { + kernelName: Pack, + backendName: 'webgl', + kernelFunc: pack + }; + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class PadProgram { + constructor(xShape, paddings, constantValue) { + this.variableNames = ['x']; + this.customUniforms = [{ name: 'value', type: 'float' }]; + this.outputShape = paddings.map((p, i) => p[0] /* beforePad */ + xShape[i] + p[1] /* afterPad */); + const rank = xShape.length; + const type = getCoordsDataType(rank); + const start = paddings.map(p => p[0]).join(','); + const end = paddings.map((p, i) => p[0] + xShape[i]).join(','); + const unpackedCoords = ['coords[0]', 'coords[1]', 'coords[2]', 'coords[3]'].slice(0, rank); + if (rank === 1) { + this.userCode = ` + int start = ${start}; + int end = ${end}; + + void main() { + int outC = getOutputCoords(); + if (outC < start || outC >= end) { + setOutput(value); + } else { + setOutput(getX(outC - start)); + } + } + `; + return; + } + this.userCode = ` + ${type} start = ${type}(${start}); + ${type} end = ${type}(${end}); + + void main() { + ${type} outC = getOutputCoords(); + if (any(lessThan(outC, start)) || any(greaterThanEqual(outC, end))) { + setOutput(value); + } else { + ${type} coords = outC - start; + setOutput(getX(${unpackedCoords})); + } + } + `; + } + } + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class PadPackedProgram { + constructor(xShape, paddings, constantValue) { + this.variableNames = ['x']; + this.packedInputs = true; + this.packedOutput = true; + this.customUniforms = [{ name: 'value', type: 'float' }]; + this.outputShape = paddings.map((p, i) => p[0] /* beforePad */ + xShape[i] + p[1] /* afterPad */); + const rank = xShape.length; + const dtype = getCoordsDataType(rank); + const start = paddings.map(p => p[0]).join(','); + const end = paddings.map((p, i) => p[0] + xShape[i]).join(','); + const coords = getChannels('rc', rank); + const source = getChannels('source', rank); + const cLimit = `${coords[rank - 1]} < ${this.outputShape[rank - 1]}`; + const innerDims = rank === 1 ? 'source' : `vec2(${source.slice(-2).join()})`; + const componentSetup = [ + `${dtype} rc = outputLoc;`, `${coords[rank - 1]} += 1; + if(${cLimit}) { + `, + rank === 1 ? '' : `} + rc = outputLoc; + ${coords[rank - 2]} += 1; + if(${coords[rank - 2]} < ${this.outputShape[rank - 2]}) {`, + rank === 1 ? '' : ` ${coords[rank - 1]} += 1; + if(${cLimit}) {` + ]; + const paddingArea = rank === 1 ? + 'rc < start || rc >= end' : + 'any(lessThan(rc, start)) || any(greaterThanEqual(rc, end))'; + let mainLoop = ''; + for (let i = 0, j = rank === 1 ? 2 : 4; i < j; i++) { + mainLoop += ` + ${componentSetup[i]} + if (${paddingArea}) { + result[${i}] = float(value); + } else { + ${dtype} source = rc - start; + result[${i}] = getChannel(getX(${source.join()}), ${innerDims}); + } + `; + } + mainLoop += (rank === 1 ? `} ` : `}}`); + this.userCode = ` + const ${dtype} start = ${dtype}(${start}); + const ${dtype} end = ${dtype}(${end}); + + void main() { + ${dtype} outputLoc = getOutputCoords(); + vec4 result = vec4(0.); + ${mainLoop} + setOutput(result); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const padV2 = (args) => { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { paddings, constantValue } = attrs; + if (sizeFromShape(x.shape) === 0) { + // Short-circuit the computation, since x doesn't have value, only + // the shape is used to compute output shape to pad. + const outputShape = paddings.map((p, i) => p[0] /* beforePad */ + x.shape[i] + p[1] /* afterPad */); + return fill({ + backend, + attrs: { shape: outputShape, value: constantValue, dtype: x.dtype } + }); + } + const program = env().getBool('WEBGL_PACK_ARRAY_OPERATIONS') ? + new PadPackedProgram(x.shape, paddings, constantValue) : + new PadProgram(x.shape, paddings, constantValue); + const customValues = [[constantValue]]; + return backend.runWebGLProgram(program, [x], x.dtype, customValues); + }; + const padV2Config = { + kernelName: PadV2, + backendName: 'webgl', + kernelFunc: padV2 + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const POW = ` + if(a < 0.0 && floor(b) < b){ + return NAN; + } + if (b == 0.0) { + return 1.0; + } + return (round(mod(b, 2.0)) != 1) ? + pow(abs(a), b) : sign(a) * pow(abs(a), b); +`; + const POW_PACKED = ` + // isModRound1 has 1 for components with round(mod(b, 2.0)) == 1, 0 otherwise. + vec4 isModRound1 = vec4(equal(round(mod(b, 2.0)), ivec4(1))); + vec4 multiplier = sign(a) * isModRound1 + (vec4(1.0) - isModRound1); + vec4 result = multiplier * pow(abs(a), b); + + // Ensure that a^0 = 1, including 0^0 = 1 as this correspond to TF and JS + bvec4 isExpZero = equal(b, vec4(0.0)); + result.r = isExpZero.r ? 1.0 : result.r; + result.g = isExpZero.g ? 1.0 : result.g; + result.b = isExpZero.b ? 1.0 : result.b; + result.a = isExpZero.a ? 1.0 : result.a; + + bvec4 isNaN1 = lessThan(a, vec4(0.0)); + bvec4 isNaN2 = lessThan(floor(b), b); + bvec4 isNaN = bvec4(isNaN1.x && isNaN2.x, isNaN1.y && isNaN2.y, isNaN1.z && isNaN2.z, isNaN1.w && isNaN2.w); + ` + + CHECK_NAN_SNIPPET_PACKED + ` + return result; +`; + const pow = binaryKernelFunc({ opSnippet: POW, packedOpSnippet: POW_PACKED }); + const powConfig = { + kernelName: Pow, + backendName: 'webgl', + kernelFunc: pow + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function prod(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { axis, keepDims } = attrs; + const xRank = x.shape.length; + const toDispose = []; + const origAxes = parseAxisParam(axis, x.shape); + let axes = origAxes; + const permutedAxes = getAxesPermutation(axes, xRank); + let permutedX = x; + if (permutedAxes != null) { + permutedX = transpose({ inputs: { x }, backend, attrs: { perm: permutedAxes } }); + axes = getInnerMostAxes(axes.length, xRank); + toDispose.push(permutedX); + } + assertAxesAreInnerMostDims('prod', axes, xRank); + let res; + if (backend.shouldExecuteOnCPU([permutedX])) { + const xVals = backend.texData.get(permutedX.dataId).values; + const { outVals, outShape, outDtype } = prodImplCPU(permutedX.shape, permutedX.dtype, xVals, axes); + res = backend.makeTensorInfo(outShape, outDtype, outVals); + } + else { + const [outShape, reduceShape] = computeOutAndReduceShapes(permutedX.shape, axes); + const inSize = sizeFromShape(reduceShape); + const a2D = reshape({ inputs: { x: permutedX }, backend, attrs: { shape: [-1, inSize] } }); + const outputDType = sumOutType(x.dtype); + const reduced = reduce(a2D, outputDType, 'prod', backend); + res = reshape({ inputs: { x: reduced }, backend, attrs: { shape: outShape } }); + toDispose.push(a2D); + toDispose.push(reduced); + } + if (keepDims) { + toDispose.push(res); + const newShape = expandShapeToKeepDim(res.shape, origAxes); + res = reshape({ inputs: { x: res }, backend, attrs: { shape: newShape } }); + } + toDispose.forEach(t => backend.disposeIntermediateTensorInfo(t)); + return res; + } + const prodConfig = { + kernelName: Prod, + backendName: 'webgl', + kernelFunc: prod + }; + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function raggedGather(args) { + const { inputs, backend, attrs } = args; + const { paramsNestedSplits, paramsDenseValues, indices } = inputs; + const { outputRaggedRank } = attrs; + const $paramsNestedSplits = paramsNestedSplits.map(t => backend.readSync(t.dataId)); + const $paramsNestedSplitsShapes = paramsNestedSplits.map(t => t.shape); + const $paramsDenseValues = backend.readSync(paramsDenseValues.dataId); + const $indices = backend.readSync(indices.dataId); + const [outputNestedSplits, outputDenseValues, outputDenseValuesShape] = raggedGatherImplCPU($paramsNestedSplits, $paramsNestedSplitsShapes, $paramsDenseValues, paramsDenseValues.shape, paramsDenseValues.dtype, $indices, indices.shape, outputRaggedRank); + const outputNestedSplitsTensors = outputNestedSplits.map((splits) => backend.makeTensorInfo([splits.length], 'int32', splits)); + const outputDenseValuesTensor = backend.makeTensorInfo(outputDenseValuesShape, paramsDenseValues.dtype, outputDenseValues); + return outputNestedSplitsTensors.concat([outputDenseValuesTensor]); + } + const raggedGatherConfig = { + kernelName: RaggedGather, + backendName: 'webgl', + kernelFunc: raggedGather, + }; + + /** + * @license + * Copyright 2022 Google LLC. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function raggedRange(args) { + const { inputs, backend } = args; + const { starts, limits, deltas } = inputs; + const $starts = backend.readSync(starts.dataId); + const $limits = backend.readSync(limits.dataId); + const $deltas = backend.readSync(deltas.dataId); + const [rtNestedSplitsData, rtDenseValuesData] = raggedRangeImplCPU($starts, starts.shape, starts.dtype, $limits, limits.shape, $deltas, deltas.shape); + const rtNestedSplits = backend.makeTensorInfo([rtNestedSplitsData.length], 'int32', rtNestedSplitsData); + const rtDenseValues = backend.makeTensorInfo([rtDenseValuesData.length], starts.dtype, rtDenseValuesData); + return [rtNestedSplits, rtDenseValues]; + } + const raggedRangeConfig = { + kernelName: RaggedRange, + backendName: 'webgl', + kernelFunc: raggedRange, + }; + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function raggedTensorToTensor(args) { + const { inputs, backend, attrs } = args; + const { shape, values, defaultValue, rowPartitionTensors } = inputs; + const { rowPartitionTypes } = attrs; + const $shape = backend.readSync(shape.dataId); + const $values = backend.readSync(values.dataId); + const $defaultValue = backend.readSync(defaultValue.dataId); + const $rowPartitionValues = rowPartitionTensors.map(t => backend.readSync(t.dataId)); + const rowPartitionValuesShapes = rowPartitionTensors.map(t => t.shape); + const [outputShape, output] = raggedTensorToTensorImplCPU($shape, shape.shape, $values, values.shape, values.dtype, $defaultValue, defaultValue.shape, $rowPartitionValues, rowPartitionValuesShapes, rowPartitionTypes); + return backend.makeTensorInfo(outputShape, values.dtype, output); + } + const raggedTensorToTensorConfig = { + kernelName: RaggedTensorToTensor, + backendName: 'webgl', + kernelFunc: raggedTensorToTensor, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const range = (args) => { + const { backend, attrs } = args; + const { start, stop, step, dtype } = attrs; + const values = rangeImplCPU(start, stop, step, dtype); + return backend.makeTensorInfo([values.length], dtype, values); + }; + const rangeConfig = { + kernelName: Range, + backendName: 'webgl', + kernelFunc: range + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const RECIPROCAL = `return 1.0 / x;`; + const reciprocal = unaryKernelFunc({ opSnippet: RECIPROCAL }); + const reciprocalConfig = { + kernelName: Reciprocal, + backendName: 'webgl', + kernelFunc: reciprocal, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const RELU = CHECK_NAN_SNIPPET$1 + ` + return (x < 0.0) ? 0.0 : x; +`; + const RELU_PACKED = ` + vec4 result = x * vec4(greaterThanEqual(x, vec4(0.0))); + bvec4 isNaN = isnan(x); + + result.r = isNaN.r ? x.r : result.r; + result.g = isNaN.g ? x.g : result.g; + result.b = isNaN.b ? x.b : result.b; + result.a = isNaN.a ? x.a : result.a; + + return result; +`; + const relu = unaryKernelFunc({ opSnippet: RELU, packedOpSnippet: RELU_PACKED }); + const reluConfig = { + kernelName: Relu$1, + backendName: 'webgl', + kernelFunc: relu + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const RELU6 = CHECK_NAN_SNIPPET$1 + ` + return (x < 0.0) ? 0.0 : min(6.0, x); +`; + const RELU6_PACKED = ` + vec4 result = min(x, vec4(6.)) * vec4(greaterThanEqual(x, vec4(0.0))); + bvec4 isNaN = isnan(x); + + result.r = isNaN.r ? x.r : result.r; + result.g = isNaN.g ? x.g : result.g; + result.b = isNaN.b ? x.b : result.b; + result.a = isNaN.a ? x.a : result.a; + + return result; +`; + const relu6 = unaryKernelFunc({ opSnippet: RELU6, packedOpSnippet: RELU6_PACKED }); + const relu6Config = { + kernelName: Relu6$1, + backendName: 'webgl', + kernelFunc: relu6 + }; + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class ResizeBilinearProgram { + constructor(inputShape, newHeight, newWidth, alignCorners, halfPixelCenters) { + this.variableNames = ['A']; + this.outputShape = []; + const [batch, oldHeight, oldWidth, depth] = inputShape; + this.outputShape = [batch, newHeight, newWidth, depth]; + const effectiveInSize = [ + (alignCorners && newHeight > 1) ? oldHeight - 1 : oldHeight, + (alignCorners && newWidth > 1) ? oldWidth - 1 : oldWidth + ]; + const effectiveOutSize = [ + (alignCorners && newHeight > 1) ? newHeight - 1 : newHeight, + (alignCorners && newWidth > 1) ? newWidth - 1 : newWidth + ]; + let sourceFracIndexRC; + if (halfPixelCenters) { + sourceFracIndexRC = + `(vec2(yRC) + vec2(0.5)) * effectiveInputOverOutputRatioRC` + + ` - vec2(0.5)`; + } + else { + sourceFracIndexRC = `vec2(yRC) * effectiveInputOverOutputRatioRC`; + } + this.userCode = ` + const vec2 effectiveInputOverOutputRatioRC = vec2( + ${effectiveInSize[0] / effectiveOutSize[0]}, + ${effectiveInSize[1] / effectiveOutSize[1]}); + const vec2 inputShapeRC = vec2(${oldHeight}.0, ${oldWidth}.0); + + void main() { + ivec4 coords = getOutputCoords(); + int b = coords[0]; + int d = coords[3]; + ivec2 yRC = coords.yz; + + // Fractional source index. + vec2 sourceFracIndexRC = ${sourceFracIndexRC}; + + // Compute the four integer indices. + ivec2 sourceFloorRC = ivec2(max(sourceFracIndexRC, vec2(0.0))); + ivec2 sourceCeilRC = ivec2( + min(inputShapeRC - 1.0, ceil(sourceFracIndexRC))); + + float topLeft = getA(b, sourceFloorRC.x, sourceFloorRC.y, d); + float bottomLeft = getA(b, sourceCeilRC.x, sourceFloorRC.y, d); + float topRight = getA(b, sourceFloorRC.x, sourceCeilRC.y, d); + float bottomRight = getA(b, sourceCeilRC.x, sourceCeilRC.y, d); + + vec2 fracRC = sourceFracIndexRC - vec2(sourceFloorRC); + + float top = topLeft + (topRight - topLeft) * fracRC.y; + float bottom = bottomLeft + (bottomRight - bottomLeft) * fracRC.y; + float newValue = top + (bottom - top) * fracRC.x; + + setOutput(newValue); + } + `; + } + } + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class ResizeBilinearPackedProgram { + constructor(inputShape, newHeight, newWidth, alignCorners, halfPixelCenters) { + this.variableNames = ['A']; + this.packedInputs = true; + this.packedOutput = true; + this.outputShape = []; + const [batch, oldHeight, oldWidth, depth] = inputShape; + this.outputShape = [batch, newHeight, newWidth, depth]; + const effectiveInSize = [ + (alignCorners && newHeight > 1) ? oldHeight - 1 : oldHeight, + (alignCorners && newWidth > 1) ? oldWidth - 1 : oldWidth + ]; + const effectiveOutSize = [ + (alignCorners && newHeight > 1) ? newHeight - 1 : newHeight, + (alignCorners && newWidth > 1) ? newWidth - 1 : newWidth + ]; + let sourceFracIndexRC; + if (halfPixelCenters) { + sourceFracIndexRC = `(vec3(yRC) + vec3(0.5)) * ` + + `effectiveInputOverOutputRatioRC - vec3(0.5)`; + } + else { + sourceFracIndexRC = `vec3(yRC) * effectiveInputOverOutputRatioRC`; + } + this.userCode = ` + const vec3 effectiveInputOverOutputRatioRC = vec3( + ${effectiveInSize[0] / effectiveOutSize[0]}, + ${effectiveInSize[1] / effectiveOutSize[1]}, + ${effectiveInSize[1] / effectiveOutSize[1]}); + const vec3 inputShapeRC = vec3(${oldHeight}.0, ${oldWidth}.0, + ${oldWidth}.0); + + float getAValue(int b, int r, int c, int d) { + return getChannel(getA(b, r, c, d), vec2(c, d)); + } + + void main() { + ivec4 coords = getOutputCoords(); + int b = coords[0]; + int d = coords[3]; + // Calculate values for next column in yRC.z. + ivec3 yRC = coords.yzz + ivec3(0, 0, 1); + + // Fractional source index. + vec3 sourceFracIndexRC = ${sourceFracIndexRC}; + + // Compute the four integer indices. + ivec3 sourceFloorRC = ivec3(max(sourceFracIndexRC, vec3(0.0))); + ivec3 sourceCeilRC = ivec3( + min(inputShapeRC - 1.0, ceil(sourceFracIndexRC))); + + // Should we calculate next column and row elements in 2x2 packed cell. + bool hasNextCol = d < ${depth - 1}; + bool hasNextRow = coords.z < ${newWidth - 1}; + + // In parallel, construct four corners for all four components in + // packed 2x2 cell. + vec4 topLeft = vec4( + getAValue(b, sourceFloorRC.x, sourceFloorRC.y, d), + hasNextCol ? getAValue(b, sourceFloorRC.x, sourceFloorRC.y, d + 1) + : 0.0, + hasNextRow ? getAValue(b, sourceFloorRC.x, sourceFloorRC.z, d) + : 0.0, + (hasNextRow && hasNextCol) ? + getAValue(b, sourceFloorRC.x, sourceFloorRC.z, d + 1) : 0.0); + + vec4 bottomLeft = vec4( + getAValue(b, sourceCeilRC.x, sourceFloorRC.y, d), + hasNextCol ? getAValue(b, sourceCeilRC.x, sourceFloorRC.y, d + 1) + : 0.0, + hasNextRow ? getAValue(b, sourceCeilRC.x, sourceFloorRC.z, d) + : 0.0, + (hasNextRow && hasNextCol) ? + getAValue(b, sourceCeilRC.x, sourceFloorRC.z, d + 1) : 0.0); + + vec4 topRight = vec4( + getAValue(b, sourceFloorRC.x, sourceCeilRC.y, d), + hasNextCol ? getAValue(b, sourceFloorRC.x, sourceCeilRC.y, d + 1) + : 0.0, + hasNextRow ? getAValue(b, sourceFloorRC.x, sourceCeilRC.z, d) + : 0.0, + (hasNextRow && hasNextCol) ? + getAValue(b, sourceFloorRC.x, sourceCeilRC.z, d + 1) : 0.0); + + vec4 bottomRight = vec4( + getAValue(b, sourceCeilRC.x, sourceCeilRC.y, d), + hasNextCol ? getAValue(b, sourceCeilRC.x, sourceCeilRC.y, d + 1) + : 0.0, + hasNextRow ? getAValue(b, sourceCeilRC.x, sourceCeilRC.z, d) + : 0.0, + (hasNextRow && hasNextCol) ? + getAValue(b, sourceCeilRC.x, sourceCeilRC.z, d + 1) : 0.0); + + vec3 fracRC = sourceFracIndexRC - vec3(sourceFloorRC); + + vec4 top = mix(topLeft, topRight, fracRC.yyzz); + vec4 bottom = mix(bottomLeft, bottomRight, fracRC.yyzz); + vec4 newValue = mix(top, bottom, fracRC.x); + + setOutput(newValue); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function resizeBilinear(args) { + const { inputs, backend, attrs } = args; + const { images } = inputs; + const { alignCorners, halfPixelCenters, size } = attrs; + const [newHeight, newWidth] = size; + const program = env().getBool('WEBGL_PACK_IMAGE_OPERATIONS') ? + new ResizeBilinearPackedProgram(images.shape, newHeight, newWidth, alignCorners, halfPixelCenters) : + new ResizeBilinearProgram(images.shape, newHeight, newWidth, alignCorners, halfPixelCenters); + return backend.runWebGLProgram(program, [images], 'float32'); + } + const resizeBilinearConfig = { + kernelName: ResizeBilinear, + backendName: 'webgl', + kernelFunc: resizeBilinear + }; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class ResizeBilinearBackpropProgram { + constructor(dyShape, inputShape, alignCorners) { + this.variableNames = ['dy']; + this.outputShape = []; + this.outputShape = inputShape; + const [, xHeight, xWidth,] = inputShape; + const [, yHeight, yWidth] = dyShape; + // In the backwards pass, we want to find the pixels that were generated for + // each pixel in the input image the forward pass and add the corresponding + // coefficient from dy to the gradient (with some interpolation). + const effectiveXSize = [ + (alignCorners && yHeight > 1) ? xHeight - 1 : xHeight, + (alignCorners && yWidth > 1) ? xWidth - 1 : xWidth + ]; + const effectiveYSize = [ + (alignCorners && yHeight > 1) ? yHeight - 1 : yHeight, + (alignCorners && yWidth > 1) ? yWidth - 1 : yWidth + ]; + const heightScale = effectiveXSize[0] / effectiveYSize[0]; + const widthScale = effectiveXSize[1] / effectiveYSize[1]; + const invHeightScale = 1 / heightScale; + const invWidthScale = 1 / widthScale; + // This defines the size of the window of values around a particular + // index in dy that we want to search for contributions to dx. + const winHeight = (Math.ceil(invHeightScale) * 2) + 2; + const winWidth = (Math.ceil(invWidthScale) * 2) + 2; + this.userCode = ` + void main() { + ivec4 coords = getOutputCoords(); + int b = coords[0]; + int d = coords[3]; + int r = coords[1]; + int c = coords[2]; + + float accumulator = 0.0; + + const float heightScale = float(${heightScale}); + const float widthScale = float(${widthScale}); + + const float invHeightScale = float(${invHeightScale}); + const float invWidthScale = float(${invWidthScale}); + + const int winHeight = int(${winHeight}); + const int winWidth = int(${winWidth}); + + // Compute bounds for where in dy we will look + float startRLerp = floor(float(r) * invHeightScale); + int startDyR = int(startRLerp - float(winHeight / 2)); + + float startCLerp = floor(float(c) * invWidthScale); + int startDyC = int(startCLerp - float(winWidth / 2)); + + // Loop over dy + for (int dyROffset = 0; dyROffset < winHeight; dyROffset++) { + int dyR = dyROffset + startDyR; + + // Guard against the window exceeding the bounds of dy + if (dyR < 0 || dyR >= ${yHeight}) { + continue; + } + + for (int dyCOffset = 0; dyCOffset < winWidth; dyCOffset++) { + int dyC = dyCOffset + startDyC; + + // Guard against the window exceeding the bounds of dy + if (dyC < 0 || dyC >= ${yWidth}) { + continue; + } + + float dxR = float(dyR) * heightScale; + int topDxRIndex = int(floor(dxR)); + int bottomDxRIndex = int(min(ceil(dxR), ${xHeight - 1}.0)); + float dxRLerp = dxR - float(topDxRIndex); + float inverseDxRLerp = 1.0 - dxRLerp; + + float dxC = float(dyC) * widthScale; + int leftDxCIndex = int(floor(dxC)); + int rightDxCIndex = int(min(ceil(dxC), ${xWidth - 1}.0)); + float dxCLerp = dxC - float(leftDxCIndex); + float inverseDxCLerp = 1.0 - dxCLerp; + + if (r == topDxRIndex && c == leftDxCIndex) { + // topLeft + accumulator += + getDy(b, dyR, dyC, d) * inverseDxRLerp * inverseDxCLerp; + } + + if (r == topDxRIndex && c == rightDxCIndex) { + // topRight + accumulator += getDy(b, dyR, dyC, d) * inverseDxRLerp * dxCLerp; + } + + if (r == bottomDxRIndex && c == leftDxCIndex) { + // bottomLeft + accumulator += getDy(b, dyR, dyC, d) * dxRLerp * inverseDxCLerp; + } + + if (r == bottomDxRIndex && c == rightDxCIndex) { + // bottomRight + accumulator += getDy(b, dyR, dyC, d) * dxRLerp * dxCLerp; + } + } + } + // End loop over dy + + setOutput(accumulator); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function resizeBilinearGrad(args) { + const { inputs, backend, attrs } = args; + const { images, dy } = inputs; + const { alignCorners } = attrs; + const program = new ResizeBilinearBackpropProgram(dy.shape, images.shape, alignCorners); + return backend.runWebGLProgram(program, [dy], dy.dtype); + } + const resizeBilinearGradConfig = { + kernelName: ResizeBilinearGrad, + backendName: 'webgl', + kernelFunc: resizeBilinearGrad + }; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class ResizeNearestNeighborProgram { + constructor(inputShape, newHeight, newWidth, alignCorners, halfPixelCenters) { + this.variableNames = ['A']; + this.outputShape = []; + const [batch, oldHeight, oldWidth, depth] = inputShape; + this.outputShape = [batch, newHeight, newWidth, depth]; + const effectiveInSize = [ + (alignCorners && newHeight > 1) ? oldHeight - 1 : oldHeight, + (alignCorners && newWidth > 1) ? oldWidth - 1 : oldWidth + ]; + const effectiveOutSize = [ + (alignCorners && newHeight > 1) ? newHeight - 1 : newHeight, + (alignCorners && newWidth > 1) ? newWidth - 1 : newWidth + ]; + // When align corners is false, we rounds the value with floor. + const roundBase = alignCorners ? '0.5' : '0.0'; + let sourceFracIndexRC; + if (halfPixelCenters) { + sourceFracIndexRC = + `max((vec2(yRC) + vec2(0.5)) * effectiveInputOverOutputRatioRC` + + `, vec2(0.0))`; + } + else { + sourceFracIndexRC = `vec2(yRC) * effectiveInputOverOutputRatioRC`; + } + this.userCode = ` + const vec2 effectiveInputOverOutputRatioRC = vec2( + ${effectiveInSize[0] / effectiveOutSize[0]}, + ${effectiveInSize[1] / effectiveOutSize[1]}); + const vec2 inputShapeRC = vec2(${oldHeight}.0, ${oldWidth}.0); + + void main() { + ivec4 coords = getOutputCoords(); + int b = coords[0]; + int d = coords[3]; + ivec2 yRC = coords.yz; + + // Fractional source index. + vec2 sourceFracIndexRC = ${sourceFracIndexRC}; + + // Compute the coordinators of nearest neighbor point. + ivec2 sourceNearestRC = ivec2( + min(inputShapeRC - 1.0, floor(sourceFracIndexRC + ${roundBase}))); + float newValue = getA(b, sourceNearestRC.x, sourceNearestRC.y, d); + + setOutput(newValue); + } + `; + } + } + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class ResizeNearestNeighborPackedProgram { + constructor(inputShape, newHeight, newWidth, alignCorners, halfPixelCenters) { + this.variableNames = ['A']; + this.packedInputs = true; + this.packedOutput = true; + this.outputShape = []; + const [batch, oldHeight, oldWidth, depth] = inputShape; + this.outputShape = [batch, newHeight, newWidth, depth]; + const effectiveInSize = [ + (alignCorners && newHeight > 1) ? oldHeight - 1 : oldHeight, + (alignCorners && newWidth > 1) ? oldWidth - 1 : oldWidth + ]; + const effectiveOutSize = [ + (alignCorners && newHeight > 1) ? newHeight - 1 : newHeight, + (alignCorners && newWidth > 1) ? newWidth - 1 : newWidth + ]; + // When align corners is false, we rounds the value with floor. + const roundBase = alignCorners ? '0.5' : '0.0'; + let sourceFracIndexRC; + if (halfPixelCenters) { + sourceFracIndexRC = `max((vec3(yRC) + vec3(0.5)) * ` + + `effectiveInputOverOutputRatioRC, vec3(0.0))`; + } + else { + sourceFracIndexRC = `vec3(yRC) * effectiveInputOverOutputRatioRC`; + } + this.userCode = ` + const vec3 effectiveInputOverOutputRatioRC = vec3( + ${effectiveInSize[0] / effectiveOutSize[0]}, + ${effectiveInSize[1] / effectiveOutSize[1]}, + ${effectiveInSize[1] / effectiveOutSize[1]}); + const vec3 inputShapeRC = vec3(${oldHeight}.0, ${oldWidth}.0, + ${oldWidth}.0); + + float getAValue(int b, int r, int c, int d) { + return getChannel(getA(b, r, c, d), vec2(c, d)); + } + + void main() { + ivec4 coords = getOutputCoords(); + int b = coords[0]; + int d = coords[3]; + // Calculate values for next column in yRC.z. + ivec3 yRC = coords.yzz + ivec3(0, 0, 1); + + // Fractional source index. + vec3 sourceFracIndexRC = ${sourceFracIndexRC}; + + // Compute the coordinators of nearest neighbor point. + ivec3 sourceNearestRC = ivec3( + min(inputShapeRC - 1.0, floor(sourceFracIndexRC + ${roundBase}))); + + // Should we calculate next column and row elements in 2x2 packed cell. + bool hasNextCol = d < ${depth - 1}; + bool hasNextRow = coords.z < ${newWidth - 1}; + + vec4 newValue = vec4( + getAValue(b, sourceNearestRC.x, sourceNearestRC.y, d), + hasNextCol ? getAValue(b, sourceNearestRC.x, sourceNearestRC.y, d + 1) + : 0.0, + hasNextRow ? getAValue(b, sourceNearestRC.x, sourceNearestRC.z, d) + : 0.0, + (hasNextRow && hasNextCol) ? + getAValue(b, sourceNearestRC.x, sourceNearestRC.z, d + 1) : 0.0); + + setOutput(newValue); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function resizeNearestNeighbor(args) { + const { inputs, backend, attrs } = args; + const { images } = inputs; + const { alignCorners, halfPixelCenters, size } = attrs; + const [newHeight, newWidth] = size; + const program = env().getBool('WEBGL_PACK_IMAGE_OPERATIONS') ? + new ResizeNearestNeighborPackedProgram(images.shape, newHeight, newWidth, alignCorners, halfPixelCenters) : + new ResizeNearestNeighborProgram(images.shape, newHeight, newWidth, alignCorners, halfPixelCenters); + return backend.runWebGLProgram(program, [images], images.dtype); + } + const resizeNearestNeighborConfig = { + kernelName: ResizeNearestNeighbor, + backendName: 'webgl', + kernelFunc: resizeNearestNeighbor + }; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class ResizeNearestNeigborBackpropProgram { + constructor(dyShape, inputShape, alignCorners) { + this.variableNames = ['dy']; + this.outputShape = []; + this.outputShape = inputShape; + const [, xHeight, xWidth,] = inputShape; + const [, yHeight, yWidth] = dyShape; + // In the backwards pass, we want to find the pixels that were generated for + // each pixel in the input image the forward pass and add the corresponding + // coefficient from dy to the gradient (with some interpolation). + const effectiveXSize = [ + (alignCorners && yHeight > 1) ? xHeight - 1 : xHeight, + (alignCorners && yWidth > 1) ? xWidth - 1 : xWidth + ]; + const effectiveYSize = [ + (alignCorners && yHeight > 1) ? yHeight - 1 : yHeight, + (alignCorners && yWidth > 1) ? yWidth - 1 : yWidth + ]; + const heightScale = effectiveXSize[0] / effectiveYSize[0]; + const widthScale = effectiveXSize[1] / effectiveYSize[1]; + const invHeightScale = 1 / heightScale; + const invWidthScale = 1 / widthScale; + // This defines the size of the window of values around a particular + // index in dy that we want to search for contributions to dx. + const winHeight = (Math.ceil(invHeightScale) * 2) + 2; + const winWidth = (Math.ceil(invWidthScale) * 2) + 2; + this.userCode = ` + void main() { + ivec4 coords = getOutputCoords(); + int b = coords[0]; + int d = coords[3]; + int r = coords[1]; + int c = coords[2]; + + float accumulator = 0.0; + + const float heightScale = float(${heightScale}); + const float widthScale = float(${widthScale}); + + const float invHeightScale = float(${invHeightScale}); + const float invWidthScale = float(${invWidthScale}); + + const int winHeight = int(${winHeight}); + const int winWidth = int(${winWidth}); + + // Compute bounds for where in dy we will look + float startRLerp = floor(float(r) * invHeightScale); + int startDyR = int(floor(startRLerp - float(winHeight / 2))); + + float startCLerp = floor(float(c) * invWidthScale); + int startDyC = int(floor(startCLerp - float(winWidth / 2))); + + // Loop over dy + for (int dyROffset = 0; dyROffset < winHeight; dyROffset++) { + int dyR = dyROffset + startDyR; + + // Guard against the window exceeding the bounds of dy + if (dyR < 0 || dyR >= ${yHeight}) { + continue; + } + + for (int dyCOffset = 0; dyCOffset < winWidth; dyCOffset++) { + int dyC = dyCOffset + startDyC; + + // Guard against the window exceeding the bounds of dy + if (dyC < 0 || dyC >= ${yWidth}) { + continue; + } + + float sourceFracRow = + float(${effectiveXSize[0]}) * + (float(dyR) / float(${effectiveYSize[0]})); + + float sourceFracCol = + float(${effectiveXSize[1]}) * + (float(dyC) / float(${effectiveYSize[1]})); + + int sourceNearestRow = int(min( + float(int(${xHeight}) - 1), + ${alignCorners} ? float(round(sourceFracRow)) : + float(floor(sourceFracRow)))); + + int sourceNearestCol = int(min( + float(int(${xWidth}) - 1), + ${alignCorners} ? float(round(sourceFracCol)) : + float(floor(sourceFracCol)))); + + if (r == sourceNearestRow && c == sourceNearestCol) { + accumulator += getDy(b, dyR, dyC, d); + } + } + } + // End loop over dy + + setOutput(accumulator); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function resizeNearestNeighborGrad(args) { + const { inputs, backend, attrs } = args; + const { images, dy } = inputs; + const { alignCorners } = attrs; + const program = new ResizeNearestNeigborBackpropProgram(dy.shape, images.shape, alignCorners); + return backend.runWebGLProgram(program, [dy], dy.dtype); + } + const resizeNearestNeighborGradConfig = { + kernelName: ResizeNearestNeighborGrad, + backendName: 'webgl', + kernelFunc: resizeNearestNeighborGrad + }; + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class ReverseProgram { + constructor(xShape, axis) { + this.variableNames = ['x']; + const rank = xShape.length; + if (rank > 4) { + throw new Error(`WebGL backend: Reverse of rank-${rank} tensor is not yet supported`); + } + this.outputShape = xShape; + if (rank === 1) { + this.userCode = ` + void main() { + int coord = getOutputCoords(); + setOutput(getX(${xShape[0]} - coord - 1)); + } + `; + return; + } + const getInCoord = (i) => { + if (axis.indexOf(i) !== -1 && xShape[i] !== 1) { + return `${xShape[i]} - coords[${i}] - 1`; + } + return `coords[${i}]`; + }; + const inCoords = xShape.map((_, i) => getInCoord(i)).join(','); + const type = getCoordsDataType(rank); + this.userCode = ` + void main() { + ${type} coords = getOutputCoords(); + setOutput(getX(${inCoords})); + } + `; + } + } + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class ReversePackedProgram { + constructor(xShape, axis) { + this.variableNames = ['x']; + this.packedInputs = true; + this.packedOutput = true; + const rank = xShape.length; + if (rank > 4) { + throw new Error(`WebGL backend: Reverse of rank-${rank} tensor is not yet supported`); + } + this.outputShape = xShape; + const channels = getChannels('rc', rank); + const nextColumn = `${channels[rank - 1]} + 1 < ${this.outputShape[rank - 1]}`; + const nextRow = `${channels[rank - 2]} + 1 < ${this.outputShape[rank - 2]}`; + const type = getCoordsDataType(rank); + if (rank === 1) { + this.userCode = ` + void main(){ + int rc = getOutputCoords(); + vec4 result = vec4(0.); + result.r = getChannel(getX(${xShape[0]} - rc - 1), + ${xShape[0]} - rc - 1); + if(${nextColumn}){ + result.g = getChannel(getX(${xShape[0]} - (rc + 1) - 1), + ${xShape[0]} - (rc + 1) - 1); + } + setOutput(result); + } + `; + } + else { + this.userCode = ` + void main() { + ${type} rc = getOutputCoords(); + vec4 result = vec4(0.); + result.r = ${getR(channels.slice())}; + if(${nextColumn}){ + result.g = ${getG(channels.slice())}; + } + if(${nextRow}) { + result.b = ${getB(channels.slice())}; + if(${nextColumn}) { + result.a = ${getA(channels.slice())}; + } + } + setOutput(result); + } + `; + } + function getR(channels) { + return getChannel(channels); + } + function getG(channels) { + channels[rank - 1] = '(' + channels[rank - 1] + ` + 1)`; + return getChannel(channels); + } + function getB(channels) { + channels[rank - 2] = '(' + channels[rank - 2] + ` + 1)`; + return getChannel(channels); + } + function getA(channels) { + channels[rank - 1] = '(' + channels[rank - 1] + ` + 1)`; + channels[rank - 2] = '(' + channels[rank - 2] + ` + 1)`; + return getChannel(channels); + } + function getChannel(channels) { + const inCoordsArray = xShape.map((_, i) => getInCoord(i, channels)); + const inCoords = inCoordsArray.join(','); + const innerDims = inCoordsArray.slice(-2).join(','); + return `getChannel(getX(${inCoords}), vec2(${innerDims}))`; + } + function getInCoord(i, channels1) { + if (axis.indexOf(i) !== -1 && xShape[i] !== 1) { + return `${xShape[i]} - ${channels1[i]} - 1`; + } + else { + return `${channels1[i]}`; + } + } + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function reverse(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { dims } = attrs; + const xRank = x.shape.length; + const $dims = parseAxisParam(dims, x.shape); + if (xRank === 0) { + return identity({ inputs: { x }, backend }); + } + const program = env().getBool('WEBGL_PACK_ARRAY_OPERATIONS') ? + new ReversePackedProgram(x.shape, $dims) : + new ReverseProgram(x.shape, $dims); + return backend.runWebGLProgram(program, [x], x.dtype); + } + const reverseConfig = { + kernelName: Reverse, + backendName: 'webgl', + kernelFunc: reverse + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class RotateProgram { + constructor(imageShape, fillValue) { + this.variableNames = ['Image']; + this.outputShape = []; + this.customUniforms = [{ name: 'params', type: 'vec4' }]; + const imageHeight = imageShape[1]; + const imageWidth = imageShape[2]; + this.outputShape = imageShape; + let fillSnippet = ''; + if (typeof fillValue === 'number') { + fillSnippet = `float outputValue = ${fillValue.toFixed(2)};`; + } + else { + fillSnippet = ` + vec3 fill = vec3(${fillValue.join(',')}); + float outputValue = fill[coords[3]];`; + } + this.userCode = ` + void main() { + ivec4 coords = getOutputCoords(); + int x = coords[2]; + int y = coords[1]; + float coordXFloat = (float(x) - params[0]) * params[3] - + (float(y) - params[1]) * params[2]; + float coordYFloat = (float(x) - params[0]) * params[2] + + (float(y) - params[1]) * params[3]; + int coordX = int(round(coordXFloat + params[0])); + int coordY = int(round(coordYFloat + params[1])); + ${fillSnippet} + if(coordX >= 0 && coordX < ${imageWidth} && coordY >= 0 && coordY < ${imageHeight}) { + outputValue = getImage(coords[0], coordY, coordX, coords[3]); + } + setOutput(outputValue); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const rotateWithOffsetConfig = { + kernelName: RotateWithOffset, + backendName: 'webgl', + kernelFunc: ({ inputs, attrs, backend }) => { + const { image } = inputs; + const { radians, fillValue, center } = attrs; + const webglBackend = backend; + const program = new RotateProgram(image.shape, fillValue); + const [centerX, centerY] = getImageCenter(center, image.shape[1], image.shape[2]); + const customValues = [[centerX, centerY, Math.sin(radians), Math.cos(radians)]]; + const output = webglBackend.runWebGLProgram(program, [image], image.dtype, customValues); + return output; + } + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const ROUND = ` + // OpenGL ES does not support round function. + // The algorithm is based on banker's rounding. + float base = floor(x); + if ((x - base) < 0.5) { + return floor(x); + } else if ((x - base) > 0.5) { + return ceil(x); + } else { + if (mod(base, 2.0) == 0.0) { + return base; + } else { + return base + 1.0; + } + } +`; + const round = unaryKernelFunc({ opSnippet: ROUND }); + const roundConfig = { + kernelName: Round, + backendName: 'webgl', + kernelFunc: round, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const RSQRT = `return inversesqrt(x);`; + const rsqrt = unaryKernelFunc({ opSnippet: RSQRT, cpuKernelImpl: rsqrtImplCPU }); + const rsqrtConfig = { + kernelName: Rsqrt, + backendName: 'webgl', + kernelFunc: rsqrt + }; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class ScatterProgram { + constructor(updateSize, sliceDim, indicesRank, updatesRank, strides, shape, summingDupeIndex = true, defaultIsTensor = false) { + this.variableNames = ['updates', 'indices', 'defaultValue']; + this.outputShape = shape; + const stridesType = getCoordsDataType(strides.length); + const dtype = getCoordsDataType(shape.length); + let indicesString = ''; + if (indicesRank === 1) { + indicesString = 'i'; + } + else if (indicesRank === 2) { + indicesString = 'i, j'; + } + const indicesSnippet = `getIndices(${indicesString})`; + let updatesString = ''; + if (updatesRank === 1) { + updatesString = 'i'; + } + else if (updatesRank === 2) { + updatesString = 'i, coords[1]'; + } + const updatesSnippet = `getUpdates(${updatesString})`; + let defaultValuesString = ''; + if (defaultIsTensor) { + defaultValuesString = 'coords[0], coords[1]'; + } + const defaultValueSnippet = `getDefaultValue(${defaultValuesString})`; + const strideString = sliceDim > 1 ? 'strides[j]' : 'strides'; + this.userCode = ` + ${stridesType} strides = ${stridesType}(${strides}); + + void main() { + ${dtype} coords = getOutputCoords(); + float sum = 0.0; + bool found = false; + for (int i = 0; i < ${updateSize}; i++) { + int flattenedIndex = 0; + for (int j = 0; j < ${sliceDim}; j++) { + int index = round(${indicesSnippet}); + flattenedIndex += index * ${strideString}; + } + if (flattenedIndex == coords[0]) { + sum += ${updatesSnippet}; + found = true; + } + } + setOutput(mix(${defaultValueSnippet}, sum, float(found))); + } + `; + } + } + + /** + * @license + * Copyright 2023 Google LLC. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class ScatterPackedProgram { + constructor(updateSize, sliceDim, indicesRank, updatesRank, strides, shape, summingDupeIndex = true, defaultIsTensor = false) { + this.variableNames = ['updates', 'indices', 'defaultValue']; + this.packedInputs = true; + this.packedOutput = true; + this.outputShape = shape; + const stridesType = getCoordsDataType(strides.length); + const dtype = getCoordsDataType(shape.length); + let indicesString = ''; + if (indicesRank === 1) { + indicesString = 'i'; + } + else if (indicesRank === 2) { + indicesString = 'i, j'; + } + const indicesSnippet = `getIndices(${indicesString})`; + let updatesString = ''; + if (updatesRank === 1) { + updatesString = 'i'; + } + else if (updatesRank === 2) { + updatesString = 'i, coords[1]'; + } + const updatesSnippet = `getUpdates(${updatesString})`; + let defaultValuesString = ''; + if (defaultIsTensor) { + defaultValuesString = 'coords[0], coords[1]'; + } + const defaultValueSnippet = `getDefaultValue(${defaultValuesString})`; + const strideString = sliceDim > 1 ? 'strides[j]' : 'strides'; + const strideString2 = sliceDim > 1 ? 'strides[j + 1]' : 'strides'; + this.userCode = ` + ${stridesType} strides = ${stridesType}(${strides}); + + void main() { + ${dtype} coords = getOutputCoords(); + vec4 sum = vec4(0.); + vec4 found = vec4(0.); + for (int i = 0; i < ${updateSize}; i+=2) { + ivec2 flattenedIndex = ivec2(0); + for (int j = 0; j < ${sliceDim}; j+=2) { + ivec4 index = round(${indicesSnippet}); + flattenedIndex += index.xz * ${strideString}; + if (j + 1 < ${sliceDim}) { + flattenedIndex += index.yw * ${strideString2}; + } + } + if (flattenedIndex[0] == coords[0] || flattenedIndex[1] == coords[0] || + flattenedIndex[0] == coords[0] + 1 || flattenedIndex[1] == coords[0] + 1) { + vec4 updVals = ${updatesSnippet}; + if (flattenedIndex[0] == coords[0]) { + sum.xy += updVals.xy; + found.xy = vec2(1.); + } else if (flattenedIndex[0] == coords[0] + 1) { + sum.zw += updVals.xy; + found.zw = vec2(1.); + } + if (flattenedIndex[1] == coords[0]) { + sum.xy += updVals.zw; + found.xy = vec2(1.); + } else if (flattenedIndex[1] == coords[0] + 1) { + sum.zw += updVals.zw; + found.zw = vec2(1.); + } + } + } + setOutput(mix(${defaultValueSnippet}, sum, found)); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function scatterNd(args) { + const { inputs, backend, attrs } = args; + const { indices, updates } = inputs; + const { shape } = attrs; + const { sliceRank, numUpdates, sliceSize, strides, outputSize } = calculateShapes(updates, indices, shape); + const flattenShape = [outputSize / sliceSize, sliceSize]; + if (outputSize === 0) { + return backend.makeTensorInfo(shape, indices.dtype); + } + const flattenIndices = reshape({ inputs: { x: indices }, backend, attrs: { shape: [numUpdates, sliceRank] } }); + const flattenX = reshape({ inputs: { x: updates }, backend, attrs: { shape: [numUpdates, sliceSize] } }); + const defaultValue = backend.makeTensorInfo([], 'float32', new Float32Array([0])); // scalar(0) + let program; + if (env().getBool('WEBGL_PACK')) { + program = new ScatterPackedProgram(numUpdates, sliceRank, flattenIndices.shape.length, flattenX.shape.length, strides, flattenShape); + } + else { + program = new ScatterProgram(numUpdates, sliceRank, flattenIndices.shape.length, flattenX.shape.length, strides, flattenShape); + } + const res = backend.runWebGLProgram(program, [flattenX, flattenIndices, defaultValue], flattenX.dtype); + const reshaped = reshape({ inputs: { x: res }, backend, attrs: { shape } }); + backend.disposeIntermediateTensorInfo(flattenIndices); + backend.disposeIntermediateTensorInfo(flattenX); + backend.disposeIntermediateTensorInfo(res); + backend.disposeIntermediateTensorInfo(defaultValue); + return reshaped; + } + const scatterNdConfig = { + kernelName: ScatterNd, + backendName: 'webgl', + kernelFunc: scatterNd + }; + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class SearchSortedProgram { + constructor(batchSize, numInputs, numValues, side) { + this.variableNames = ['sortedSequence', 'values']; + this.customUniforms = [{ name: 'numInputs', type: 'int' }]; + this.outputShape = [batchSize, numValues]; + const webGL2LoopHead = 'while (left < right) {'; + // WebGL1 doesn't accept non constant loop conditions, so upper bound loop + // iterations. + const webGL1LoopHead = `for (int i = 0; i < ${Math.ceil(Math.log2(numInputs + 1))}; ++i) { if (left >= right) break;`; + const loopHead = env().getNumber('WEBGL_VERSION') === 2 ? webGL2LoopHead : + webGL1LoopHead; + // left corresponds to lower bound and right to upper bound. + const boundComparator = side === 'left' ? '<' : '<='; + this.userCode = ` + int findBound(int batch, float value) { + int left = 0; + int right = numInputs; + int mid; + ${loopHead} + mid = (left + right) / 2; + if (getSortedSequence(batch, mid) ${boundComparator} value) { + left = mid + 1; + } else { + right = mid; + } + } + return right; + } + + void main() { + ivec2 coords = getOutputCoords(); + int batch = coords[0]; + int valueIndex = coords[1]; + + float value = getValues(batch, valueIndex); + + setOutput(float(findBound(batch, value))); + } + `; + } + } + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function searchSorted(args) { + const { inputs, backend, attrs } = args; + const { sortedSequence, values } = inputs; + const { side } = attrs; + const program = new SearchSortedProgram(sortedSequence.shape[0], sortedSequence.shape[1], values.shape[1], side); + const customValues = [[sortedSequence.shape[1]]]; + return backend.runWebGLProgram(program, [sortedSequence, values], 'int32', customValues); + } + const searchSortedConfig = { + kernelName: SearchSorted, + backendName: 'webgl', + kernelFunc: searchSorted, + }; + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class SelectProgram { + constructor(cRank, shape, rank) { + this.variableNames = ['c', 'a', 'b']; + this.outputShape = shape; + let cCoords; + let abCoords; + if (rank > 4) { + throw Error(`Where for rank ${rank} is not yet supported`); + } + if (rank === 1) { + abCoords = `resRC`; + cCoords = `resRC`; + } + else { + const currentCoords = ['resRC.x', 'resRC.y', 'resRC.z', 'resRC.w']; + const cCoordVars = []; + const abCoordVars = []; + for (let i = 0; i < shape.length; i++) { + abCoordVars.push(`${currentCoords[i]}`); + if (i < cRank) { + cCoordVars.push(`${currentCoords[i]}`); + } + } + cCoords = cCoordVars.join(); + abCoords = abCoordVars.join(); + } + const dtype = getCoordsDataType(rank); + this.userCode = ` + void main() { + ${dtype} resRC = getOutputCoords(); + float cVal = getC(${cCoords}); + if (cVal >= 1.0) { + setOutput(getA(${abCoords})); + } else { + setOutput(getB(${abCoords})); + } + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function select(args) { + const { inputs, backend } = args; + const { condition, t, e } = inputs; + const program = new SelectProgram(condition.shape.length, t.shape, t.shape.length); + return backend.runWebGLProgram(program, [condition, t, e], upcastType(t.dtype, e.dtype)); + } + const selectConfig = { + kernelName: Select, + backendName: 'webgl', + kernelFunc: select + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const SELU = ` + // Stable and Attracting Fixed Point (0, 1) for Normalized Weights. + // see: https://arxiv.org/abs/1706.02515 + float scaleAlpha = ${SELU_SCALEALPHA}; + float scale = ${SELU_SCALE}; + return (x >= 0.0) ? scale * x : scaleAlpha * (exp(x) - 1.0); +`; + const selu = unaryKernelFunc({ opSnippet: SELU }); + const seluConfig = { + kernelName: Selu$1, + backendName: 'webgl', + kernelFunc: selu, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const SIGMOID = CHECK_NAN_SNIPPET_UNARY + ` + return 1.0 / (1.0 + exp(-1.0 * x)); +`; + const SIGMOID_PACKED = ` + vec4 result = 1.0 / (1.0 + exp(-1.0 * x)); + bvec4 isNaN = isnan(x); + + result.r = isNaN.r ? x.r : result.r; + result.g = isNaN.g ? x.g : result.g; + result.b = isNaN.b ? x.b : result.b; + result.a = isNaN.a ? x.a : result.a; + + return result; +`; + const sigmoid = unaryKernelFunc({ + opSnippet: SIGMOID, + packedOpSnippet: SIGMOID_PACKED, + cpuKernelImpl: sigmoidImplCPU + }); + const sigmoidConfig = { + kernelName: Sigmoid$1, + backendName: 'webgl', + kernelFunc: sigmoid, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + // Sign does not propagate NANs. + const SIGN = ` + if (isnan(x)) { return 0.0; } + return sign(x); +`; + const sign = unaryKernelFunc({ opSnippet: SIGN }); + const signConfig = { + kernelName: Sign, + backendName: 'webgl', + kernelFunc: sign, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const SIN = CHECK_NAN_SNIPPET_UNARY + ` + return sin(x); +`; + const SIN_PACKED = ` + vec4 result = sin(x); + bvec4 isNaN = isnan(x); + ${CHECK_NAN_SNIPPET_PACKED} + return result; +`; + const sin = unaryKernelFunc({ opSnippet: SIN, packedOpSnippet: SIN_PACKED }); + const sinConfig = { + kernelName: Sin, + backendName: 'webgl', + kernelFunc: sin, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const SINH = ` + float e2x = exp(x); + return (e2x - 1.0 / e2x) / 2.0; +`; + const sinh = unaryKernelFunc({ opSnippet: SINH }); + const sinhConfig = { + kernelName: Sinh, + backendName: 'webgl', + kernelFunc: sinh, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const SOFTPLUS = ` + float epsilon = 1.1920928955078125e-7; + float threshold = log(epsilon) + 2.0; + + bool too_large = x > -threshold; + bool too_small = x < threshold; + + float result; + float exp_x = exp(x); + + if (too_large){ + result = x; + } + else if (too_small){ + result = exp_x; + } + else{ + result = log(exp_x + 1.0); + } + return result; +`; + const softplus = unaryKernelFunc({ opSnippet: SOFTPLUS }); + const softplusConfig = { + kernelName: Softplus$1, + backendName: 'webgl', + kernelFunc: softplus, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const spaceToBatchND = (args) => { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { blockShape, paddings } = attrs; + assert$1(x.shape.length <= 4, () => 'spaceToBatchND for rank > 4 with a WebGL backend not ' + + 'implemented yet'); + const prod = blockShape.reduce((a, b) => a * b); + const completePaddings = [[0, 0]]; + completePaddings.push(...paddings); + for (let i = 1 + blockShape.length; i < x.shape.length; ++i) { + completePaddings.push([0, 0]); + } + const toDispose = []; + const paddedX = padV2({ + inputs: { x }, + backend, + attrs: { paddings: completePaddings, constantValue: 0 } + }); + const reshapedPaddedShape = getReshaped(paddedX.shape, blockShape, prod, false); + const permutedReshapedPaddedPermutation = getPermuted(reshapedPaddedShape.length, blockShape.length, false); + const flattenShape = getReshapedPermuted(paddedX.shape, blockShape, prod, false); + const reshapedPaddedX = reshape({ inputs: { x: paddedX }, backend, attrs: { shape: reshapedPaddedShape } }); + const paddedXT = transpose({ + inputs: { x: reshapedPaddedX }, + backend, + attrs: { perm: permutedReshapedPaddedPermutation } + }); + const result = reshape({ inputs: { x: paddedXT }, backend, attrs: { shape: flattenShape } }); + toDispose.push(paddedX); + toDispose.push(reshapedPaddedX); + toDispose.push(paddedXT); + toDispose.forEach(t => backend.disposeIntermediateTensorInfo(t)); + return result; + }; + const spaceToBatchNDConfig = { + kernelName: SpaceToBatchND, + backendName: 'webgl', + kernelFunc: spaceToBatchND + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function sparseFillEmptyRows(args) { + const { inputs, backend } = args; + const { indices, values, denseShape, defaultValue } = inputs; + if (denseShape.shape.length !== 1) { + throw new Error(`Dense shape must be a vector, saw: + ${denseShape.shape}`); + } + if (indices.shape.length !== 2) { + throw new Error(`Indices must be a matrix, saw: + ${indices.shape}`); + } + if (values.shape.length !== 1) { + throw new Error(`Values must be a vector, saw: + ${values.shape}`); + } + if (defaultValue.shape.length !== 0) { + throw new Error(`Default value must be a scalar, saw: + ${defaultValue.shape}`); + } + const $indices = backend.readSync(indices.dataId); + const $values = backend.readSync(values.dataId); + const $denseShape = backend.readSync(denseShape.dataId); + const $defaultValue = backend.readSync(defaultValue.dataId)[0]; + const [outputIndices, outputIndicesShape, outputValues, emptyRowIndicator, reverseIndexMap] = sparseFillEmptyRowsImplCPU($indices, indices.shape, indices.dtype, $values, values.dtype, $denseShape, $defaultValue); + return [ + backend.makeTensorInfo(outputIndicesShape, indices.dtype, outputIndices), + backend.makeTensorInfo([outputIndicesShape[0]], values.dtype, outputValues), + backend.makeTensorInfo([emptyRowIndicator.length], 'bool', new Uint8Array(emptyRowIndicator.map((value) => Number(value)))), + backend.makeTensorInfo([reverseIndexMap.length], indices.dtype, new Int32Array(reverseIndexMap)), + ]; + } + const sparseFillEmptyRowsConfig = { + kernelName: SparseFillEmptyRows, + backendName: 'webgl', + kernelFunc: sparseFillEmptyRows, + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function sparseReshape(args) { + const { inputs, backend } = args; + const { inputIndices, inputShape, newShape } = inputs; + if (inputIndices.shape.length !== 2) { + throw new Error(`Input indices should be a matrix but received shape ${inputIndices.shape}`); + } + if (inputShape.shape.length !== 1) { + throw new Error(`Input shape should be a vector but received shape ${inputShape.shape}`); + } + if (newShape.shape.length !== 1) { + throw new Error(`Target shape should be a vector but received shape ${newShape.shape}`); + } + const $inputShape = Array.from(backend.readSync(inputShape.dataId)); + const $inputIndices = backend.readSync(inputIndices.dataId); + const targetShape = Array.from(backend.readSync(newShape.dataId)); + const [newIndices, indicesShape, outputShape] = sparseReshapeImplCPU($inputIndices, inputIndices.shape, inputIndices.dtype, $inputShape, targetShape); + return [ + backend.makeTensorInfo(indicesShape, inputIndices.dtype, newIndices), + backend.makeTensorInfo([outputShape.length], newShape.dtype, new Int32Array(outputShape)), + ]; + } + const sparseReshapeConfig = { + kernelName: SparseReshape, + backendName: 'webgl', + kernelFunc: sparseReshape, + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function sparseSegmentMean(args) { + const { inputs, backend } = args; + const { data, indices, segmentIds } = inputs; + if (data.shape.length < 1) { + throw new Error(`Data should be at least 1 dimensional but received scalar`); + } + if (indices.shape.length !== 1) { + throw new Error(`Indices should be a vector but received shape + ${indices.shape}`); + } + if (segmentIds.shape.length !== 1) { + throw new Error(`Segment ids should be a vector but received shape + ${segmentIds.shape}`); + } + const $data = backend.readSync(data.dataId); + const $indices = backend.readSync(indices.dataId); + const $segmentIds = backend.readSync(segmentIds.dataId); + const [outputData, outputDataShape] = sparseSegmentReductionImplCPU($data, data.shape, data.dtype, $indices, $segmentIds, true); + return backend.makeTensorInfo(outputDataShape, data.dtype, outputData); + } + const sparseSegmentMeanConfig = { + kernelName: SparseSegmentMean, + backendName: 'webgl', + kernelFunc: sparseSegmentMean, + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function sparseSegmentSum(args) { + const { inputs, backend } = args; + const { data, indices, segmentIds } = inputs; + if (data.shape.length < 1) { + throw new Error(`Data should be at least 1 dimensional but received scalar`); + } + if (indices.shape.length !== 1) { + throw new Error(`Indices should be a vector but received shape + ${indices.shape}`); + } + if (segmentIds.shape.length !== 1) { + throw new Error(`Segment ids should be a vector but received shape + ${segmentIds.shape}`); + } + const $data = backend.readSync(data.dataId); + const $indices = backend.readSync(indices.dataId); + const $segmentIds = backend.readSync(segmentIds.dataId); + const [outputData, outputDataShape] = sparseSegmentReductionImplCPU($data, data.shape, data.dtype, $indices, $segmentIds); + return backend.makeTensorInfo(outputDataShape, data.dtype, outputData); + } + const sparseSegmentSumConfig = { + kernelName: SparseSegmentSum, + backendName: 'webgl', + kernelFunc: sparseSegmentSum, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function sparseToDense(args) { + const { inputs, backend, attrs } = args; + const { sparseIndices, sparseValues, defaultValue } = inputs; + const { outputShape } = attrs; + const { sliceRank, numUpdates, sliceSize, strides, outputSize } = calculateShapes(sparseValues, sparseIndices, outputShape); + const sumDupeIndices = false; + if (sparseValues.dtype === 'string') { + const indicesBuf = backend.bufferSync(sparseIndices); + const updatesBuf = backend.bufferSync(sparseValues); + const $defaultValue = decodeString(backend.readSync(defaultValue.dataId)[0]); + const outBuf = scatterImplCPU(indicesBuf, updatesBuf, outputShape, outputSize, sliceSize, numUpdates, sliceRank, strides, $defaultValue, sumDupeIndices); + return backend.makeTensorInfo(outputShape, outBuf.dtype, outBuf.values); + } + const program = new ScatterProgram(numUpdates, sliceRank, sparseIndices.shape.length, sparseValues.shape.length, strides, [outputSize, 1], sumDupeIndices); + const res = backend.runWebGLProgram(program, [sparseValues, sparseIndices, defaultValue], sparseValues.dtype); + const reshaped = reshape({ inputs: { x: res }, backend, attrs: { shape: outputShape } }); + backend.disposeIntermediateTensorInfo(res); + return reshaped; + } + const sparseToDenseConfig = { + kernelName: SparseToDense, + backendName: 'webgl', + kernelFunc: sparseToDense + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function splitV(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { numOrSizeSplits, axis } = attrs; + const $axis = parseAxisParam(axis, x.shape)[0]; + const splitSizes = prepareSplitSize(x, numOrSizeSplits, $axis); + const xRank = x.shape.length; + const begin = new Array(xRank).fill(0); + const size = x.shape.slice(); + return splitSizes.map(s => { + const sliceSize = [...size]; + sliceSize[$axis] = s; + const sliceT = slice({ inputs: { x }, backend, attrs: { begin, size: sliceSize } }); + begin[$axis] += s; + return sliceT; + }); + } + const splitVConfig = { + kernelName: SplitV, + backendName: 'webgl', + kernelFunc: splitV + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const SQRT = `return sqrt(x);`; + const sqrt = unaryKernelFunc({ opSnippet: SQRT, packedOpSnippet: SQRT, cpuKernelImpl: sqrtImplCPU }); + const sqrtConfig = { + kernelName: Sqrt, + backendName: 'webgl', + kernelFunc: sqrt + }; + + /** + * @license + * Copyright 2019 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const SQUARE = `return x * x;`; + const square = unaryKernelFunc({ opSnippet: SQUARE }); + const squareConfig = { + kernelName: Square, + backendName: 'webgl', + kernelFunc: square, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const SQUARED_DIFFERENCE = 'return (a - b) * (a - b);'; + const squaredDifference = binaryKernelFunc({ opSnippet: SQUARED_DIFFERENCE, packedOpSnippet: SQUARED_DIFFERENCE }); + const squaredDifferenceConfig = { + kernelName: SquaredDifference, + backendName: 'webgl', + kernelFunc: squaredDifference, + }; + + /** + * @license + * Copyright 2023 Google LLC. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function staticRegexReplace(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + if (x.dtype !== 'string') { + throw new Error('Input must be of datatype string'); + } + const $x = backend.readSync(x.dataId); + const stringInput = fromUint8ToStringArray($x); + const output = staticRegexReplaceImplCPU(stringInput, 'string', attrs); + return backend.makeTensorInfo(x.shape, 'string', output); + } + const staticRegexReplaceConfig = { + kernelName: StaticRegexReplace, + backendName: 'webgl', + kernelFunc: staticRegexReplace, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function step({ inputs, attrs, backend }) { + const { x } = inputs; + const opSnippet = CHECK_NAN_SNIPPET$1 + ` + return x > 0.0 ? 1.0 : float(${attrs.alpha}); + `; + const program = new UnaryOpProgram(x.shape, opSnippet); + return backend.runWebGLProgram(program, [x], x.dtype); + } + const stepConfig = { + kernelName: Step, + backendName: 'webgl', + kernelFunc: step, + }; + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class StridedSliceProgram { + constructor(begin, strides, size) { + this.variableNames = ['x']; + this.outputShape = size; + const rank = size.length; + const inputDtype = getCoordsDataType(size.length); + const dtype = getCoordsDataType(size.length); + let newCoords = ''; + if (rank === 1) { + newCoords = 'coords * strides + begin'; + } + else { + let outputAxis = 0; + newCoords = + size.map((_, i) => { + outputAxis++; + return size.length === 1 ? + `coords * strides[${i}] + begin[${i}]` : + `coords[${outputAxis - 1}] * strides[${i}] + begin[${i}]`; + }) + .join(','); + } + this.userCode = ` + ${inputDtype} begin = ${inputDtype}(${begin}); + ${inputDtype} strides = ${inputDtype}(${strides}); + + void main() { + ${dtype} coords = getOutputCoords(); + setOutput(getX(${newCoords})); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function stridedSlice(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { begin, end, strides, beginMask, endMask, ellipsisMask, newAxisMask, shrinkAxisMask } = attrs; + const { finalShapeSparse, finalShape, isIdentity, sliceDim0, isSimpleSlice, begin: $begin, end: $end, strides: $strides } = sliceInfo(x.shape, begin, end, strides, beginMask, endMask, ellipsisMask, newAxisMask, shrinkAxisMask); + let result; + if (isIdentity) { + // Optimization #1, slice is a no-op plus reshape + result = reshape({ inputs: { x }, backend, attrs: { shape: finalShape } }); + } + else if (sliceDim0 || isSimpleSlice) { + // Optimization #2, slice is memory contiguous (only occurs in dim 0) + assert$1(x.shape.length >= 1, () => `Input must have rank at least 1, got: ${x.shape.length}`); + const size = computeOutShape$2($begin, $end, $strides); + // To tolerate begin[0] > end[0] (a 0-output slice), we min(begin, end). + const sliced = slice({ inputs: { x }, backend, attrs: { begin: $begin, size } }); + result = + reshape({ inputs: { x: sliced }, backend, attrs: { shape: finalShape } }); + backend.disposeIntermediateTensorInfo(sliced); + } + else { + const shouldExecuteOnCPU = backend.shouldExecuteOnCPU([x]); + if (shouldExecuteOnCPU) { + // tslint:disable-next-line: no-unnecessary-type-assertion + const values = backend.readSync(x.dataId); + // tslint:disable-next-line: no-unnecessary-type-assertion + const xBuf = buffer(x.shape, x.dtype, values); + const resultValues = stridedSliceImplCPU(finalShapeSparse, xBuf, $strides, $begin); + result = backend.makeTensorInfo(finalShape, x.dtype, resultValues.values); + } + else { + const program = new StridedSliceProgram($begin, $strides, finalShapeSparse); + result = backend.runWebGLProgram(program, [x], x.dtype); + } + } + const resultReshaped = reshape({ inputs: { x: result }, backend, attrs: { shape: finalShape } }); + backend.disposeIntermediateTensorInfo(result); + return resultReshaped; + } + const stridedSliceConfig = { + kernelName: StridedSlice, + backendName: 'webgl', + kernelFunc: stridedSlice + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function stringNGrams(args) { + const { inputs, backend, attrs } = args; + const { separator, nGramWidths, leftPad, rightPad, padWidth, preserveShortSequences } = attrs; + const { data, dataSplits } = inputs; + const $data = backend.readSync(data.dataId); + const $dataSplits = backend.readSync(dataSplits.dataId); + const [nGrams, nGramsSplits] = stringNGramsImplCPU($data, $dataSplits, separator, nGramWidths, leftPad, rightPad, padWidth, preserveShortSequences); + return [ + backend.makeTensorInfo([nGrams.length], 'string', nGrams), + backend.makeTensorInfo(dataSplits.shape, 'int32', nGramsSplits), + ]; + } + const stringNGramsConfig = { + kernelName: StringNGrams, + backendName: 'webgl', + kernelFunc: stringNGrams, + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function stringSplit(args) { + const { inputs, backend, attrs } = args; + const { skipEmpty } = attrs; + const { input, delimiter } = inputs; + if (input.dtype !== 'string') { + throw new Error('Input must be of datatype string'); + } + if (input.shape.length !== 1) { + throw new Error(`Input must be a vector, got shape: ${input.shape}`); + } + if (delimiter.shape.length !== 0) { + throw new Error(`Delimiter must be a scalar, got shape: ${delimiter.shape}`); + } + const $input = backend.readSync(input.dataId); + const $delimiter = backend.readSync(delimiter.dataId)[0]; + const [indices, values, shape] = stringSplitImplCPU($input, $delimiter, skipEmpty); + const outputSize = values.length; + return [ + backend.makeTensorInfo([outputSize, 2], 'int32', indices), + backend.makeTensorInfo([outputSize], 'string', values), + backend.makeTensorInfo([2], 'int32', new Int32Array(shape)) + ]; + } + const stringSplitConfig = { + kernelName: StringSplit, + backendName: 'webgl', + kernelFunc: stringSplit, + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function stringToHashBucketFast(args) { + const { inputs, backend, attrs } = args; + const { numBuckets } = attrs; + const { input } = inputs; + if (input.dtype !== 'string') { + throw new Error('Input must be of datatype string'); + } + if (numBuckets <= 0) { + throw new Error(`Number of buckets must be at least 1`); + } + const $input = backend.readSync(input.dataId); + const output = stringToHashBucketFastImplCPU($input, numBuckets); + return backend.makeTensorInfo(input.shape, 'int32', output); + } + const stringToHashBucketFastConfig = { + kernelName: StringToHashBucketFast, + backendName: 'webgl', + kernelFunc: stringToHashBucketFast, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const TAN = `return tan(x);`; + const tan = unaryKernelFunc({ opSnippet: TAN }); + const tanConfig = { + kernelName: Tan, + backendName: 'webgl', + kernelFunc: tan, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const TANH = ` + float e2x = exp(-2.0 * abs(x)); + return sign(x) * (1.0 - e2x) / (1.0 + e2x); +`; + const tanh = unaryKernelFunc({ opSnippet: TANH }); + const tanhConfig = { + kernelName: Tanh$1, + backendName: 'webgl', + kernelFunc: tanh, + }; + + /** + * @license + * Copyright 2022 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function tensorScatterUpdate(args) { + const { inputs, backend, attrs } = args; + const { tensor, indices, updates } = inputs; + const {} = attrs; + const { sliceRank, numUpdates, sliceSize, strides, outputSize } = calculateShapes(updates, indices, tensor.shape); + const flattenShape = [outputSize / sliceSize, sliceSize]; + if (outputSize === 0) { + return backend.makeTensorInfo(tensor.shape, indices.dtype); + } + const flattenIndices = reshape({ inputs: { x: indices }, backend, attrs: { shape: [numUpdates, sliceRank] } }); + const flattenX = reshape({ inputs: { x: updates }, backend, attrs: { shape: [numUpdates, sliceSize] } }); + const flattenTensor = reshape({ inputs: { x: tensor }, backend, attrs: { shape: flattenShape } }); + const program = new ScatterProgram(numUpdates, sliceRank, flattenIndices.shape.length, flattenX.shape.length, strides, flattenShape, false, true); + const res = backend.runWebGLProgram(program, [flattenX, flattenIndices, flattenTensor], flattenTensor.dtype); + const reshaped = reshape({ inputs: { x: res }, backend, attrs: { shape: tensor.shape } }); + backend.disposeIntermediateTensorInfo(flattenIndices); + backend.disposeIntermediateTensorInfo(flattenX); + backend.disposeIntermediateTensorInfo(flattenTensor); + backend.disposeIntermediateTensorInfo(res); + return reshaped; + } + const tensorScatterUpdateConfig = { + kernelName: TensorScatterUpdate, + backendName: 'webgl', + kernelFunc: tensorScatterUpdate + }; + + /** + * @license + * Copyright 2017 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class TileProgram { + constructor(aShape, reps) { + this.variableNames = ['A']; + const outputShape = new Array(aShape.length); + for (let i = 0; i < outputShape.length; i++) { + outputShape[i] = aShape[i] * reps[i]; + } + this.outputShape = outputShape; + this.rank = outputShape.length; + const dtype = getCoordsDataType(this.rank); + const sourceCoords = getSourceCoords(aShape); + this.userCode = ` + void main() { + ${dtype} resRC = getOutputCoords(); + setOutput(getA(${sourceCoords})); + } + `; + } + } + function getSourceCoords(aShape) { + const rank = aShape.length; + if (rank > 5) { + throw Error(`Tile for rank ${rank} is not yet supported`); + } + if (rank === 1) { + return `imod(resRC, ${aShape[0]})`; + } + const currentCoords = ['resRC.x', 'resRC.y', 'resRC.z', 'resRC.w', 'resRC.u']; + const sourceCoords = []; + for (let i = 0; i < aShape.length; i++) { + sourceCoords.push(`imod(${currentCoords[i]}, ${aShape[i]})`); + } + return sourceCoords.join(); + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function tile(params) { + const { inputs, backend, attrs } = params; + const { x } = inputs; + const { reps } = attrs; + // tile gpu program cannot handle rank > 5 case. + if (x.dtype === 'string' || x.shape.length > 5) { + // Even thought string tensor is always on CPU, just to be consistent on how + // to access tensor data. + const data = backend.readSync(x.dataId); + const value = x.dtype === 'string' ? + data.map(d => decodeString(d)) : + data; + const buf = buffer(x.shape, x.dtype, value); + const outBuf = tileImplCPU(buf, reps); + return backend.makeTensorInfo(outBuf.shape, outBuf.dtype, outBuf.values); + } + const program = new TileProgram(x.shape, reps); + const output = backend.runWebGLProgram(program, [x], x.dtype); + return output; + } + const tileConfig = { + kernelName: Tile, + backendName: 'webgl', + kernelFunc: tile, + }; + + // Based on Algorithm 2 of Bitonic Top K, ref: + // https://anilshanbhag.in/static/papers/gputopk_sigmod18.pdf + // The original algorithm is based on computing the top K only, however + // since for TFJS we require the indices of the top K values as well then the + // algorithm found here is a bit modified. Rather than producing the values + // at each step, the indices containing the top K are generated instead. + // The output values are not generated to reduce the number of outputs in the + // GPU, the values can easily be retrieved from the indices using a gather + // op. + class SwapProgram { + /** + * @param shape desired output shape (can be larger than input shape, output + * will be padded with -Infinity) + */ + constructor(shape) { + this.variableNames = ['x', 'indices']; + // |n| Size of the original input of TopK. + // |firstPass|indicates if this is the first time swap is being used which + // means no indices input containing the top K is present yet. + // |inc| Swaps pairs of indices (0, inc), (1, inc + 1), (2, inc + 2) ... + this.customUniforms = [ + { name: 'n', type: 'int' }, + { name: 'firstPass', type: 'int' }, + { name: 'negativeInf', type: 'float' }, + { name: 'dir', type: 'int' }, + { name: 'inc', type: 'int' } + ]; + this.outputShape = shape; + this.userCode = ` + void main() { + ivec2 coords = getOutputCoords(); + int batch = coords[0]; + int elemIdx = coords[1]; + + // We compare elements pair-wise within a group of size 2 * inc. + // The comparing rule for each group alternates between ascending + // and descending. Within each group, we compare each pair at + // positions i and i+inc. To decide whether an element at position i + // is x0 or x1, we mod it by 2 * inc, if the result is smaller than + // inc, it is in the first half of the group, we denote it as x0, + // otherwise we denote it as x1. + // For example, as shown in the Bitonic top K paper referenced above, + // Figure5(a) shows that element[1] is in the + // second half of the group when group size is 2, but it is in the + // first half of the group when group size is 4. + + bool isFirstInPair = imod(elemIdx, 2 * inc) < inc; + int i = isFirstInPair ? elemIdx : elemIdx - inc; + + int i0 = firstPass == 1 ? i : int(getIndices(batch, i)); + int i1 = firstPass == 1 ? i + inc : int(getIndices(batch, i + inc)); + float x0 = i0 < n ? getX(batch, i0) : negativeInf; + float x1 = i1 < n ? getX(batch, i1) : negativeInf; + + // Denotes which direction indices are in (ascending or descending). + bool reverse = imod(elemIdx, 2 * dir) >= dir; + bool isGreater = x0 > x1 || (x0 == x1 && i1 > i0); + if (reverse == isGreater) { // Elements in opposite order of direction + int iTemp = i0; + i0 = i1; + i1 = iTemp; + } + if (isFirstInPair) { + setOutput(float(i0)); + } else { + setOutput(float(i1)); + } + } + `; + } + } + class MergeProgram { + /** + * @param shape desired output shape (must be half of the input size) + */ + constructor(shape) { + this.variableNames = ['x', 'indices']; + // |n| Size of the original input of TopK + // |firstPass| indicates if this is the first time swap is being used which + // means no indices input containing the top K is present yet. + // |k| Top k elements desired + this.customUniforms = [ + { name: 'n', type: 'int' }, + { name: 'firstPass', type: 'int' }, + { name: 'k', type: 'int' } + ]; + this.outputShape = shape; + this.userCode = ` + void main() { + // Takes max of indices (0, k), (1, k + 1), (2, k + 2) ... + ivec2 coords = getOutputCoords(); + int batch = coords[0]; + int elemIdx = coords[1]; + + // The output size is half of the previous size. + // If the previous sequence is | | | | _ _ _ _ | | | | _ _ _ _ (k=4), + // we only need to output the indices at positions |, the indices at + // positions _ can be thrown away, see Figure5(b) After Phase 2 + // (Merge phase) in the Bitonic Top K paper referenced above. + // For example, the paper shows we only need to output the orange bars. + // The output sequence should look like this | | | | | | | |. + // Because the sequence is halved, to map the output index back + // to the previous sequence to find the corresponding value, + // we need to double the index. When we double the index, + // we basically interpolate a position, so 2i looks like + // | _ | _ | _ | _ | _ | _ | _. We move the | to the first k position + // of each 2k positions by - elemIdx % k. E.g. for output at + // index 4,5,6,7, we want to get the corresponding element at + // original index 8,9,10,11, for output at index 8,9,10,11, + // we want to get the corresponding element at original index + // 16,17,18,19, so on and so forth. + + int i = elemIdx < k ? elemIdx : (elemIdx * 2 - imod(elemIdx, k)); + int i0 = firstPass == 1 ? i : int(getIndices(batch, i)); + int i1 = firstPass == 1 ? i + k : int(getIndices(batch, i + k)); + + float x0 = getX(batch, i0); + float x1 = i1 < n ? getX(batch, i1) : x0; + + setOutput(x0 >= x1 ? float(i0) : float(i1)); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function disposeIntermediateTensorInfoOrNull(backend, tensorInfo) { + if (tensorInfo !== null) { + backend.disposeIntermediateTensorInfo(tensorInfo); + } + } + function roundUpToPow2(num) { + let pow2 = 1; + while (pow2 < num) { + pow2 *= 2; + } + return pow2; + } + // Based on Algorithm 2 of Bitonic Top K, ref: + // https://anilshanbhag.in/static/papers/gputopk_sigmod18.pdf + function topK(args) { + const { inputs, backend, attrs } = args; + const { x } = inputs; + const { k, sorted } = attrs; + // Empirically determined constant used to determine last dim threshold for + // handing off execution to the CPU. + const TOPK_LAST_DIM_CPU_HANDOFF_SIZE_THRESHOLD = env().getNumber('TOPK_LAST_DIM_CPU_HANDOFF_SIZE_THRESHOLD'); + // Empirically determined constant used to determine k threshold for handing + // off execution to the CPU. + const TOPK_K_CPU_HANDOFF_THRESHOLD = env().getNumber('TOPK_K_CPU_HANDOFF_THRESHOLD'); + const xShape = x.shape; + const lastDim = xShape[xShape.length - 1]; + if (backend.shouldExecuteOnCPU([x]) || + lastDim < TOPK_LAST_DIM_CPU_HANDOFF_SIZE_THRESHOLD || + k > TOPK_K_CPU_HANDOFF_THRESHOLD) { + const xVals = backend.readSync(x.dataId); + const [allTopKVals, allTopKIndices] = topKImplCPU(xVals, xShape, x.dtype, k, sorted); + return [ + backend.makeTensorInfo(allTopKVals.shape, allTopKVals.dtype, allTopKVals.values), + backend.makeTensorInfo(allTopKIndices.shape, allTopKIndices.dtype, allTopKIndices.values) + ]; + } + if (k === 0) { + xShape[xShape.length - 1] = 0; + return [ + backend.makeTensorInfo(xShape, x.dtype, []), + backend.makeTensorInfo(xShape, 'int32', []) + ]; + } + if (lastDim === 1 /* firstPass */) { + return [ + x, fill({ attrs: { shape: xShape, dtype: 'int32', value: 0 }, backend }) + ]; + } + // Eagerly unpack x input since it is passed in to all the shaders which + // require unpacked inputs. + const xtexData = backend.texData.get(x.dataId); + const xIsPacked = xtexData !== null && xtexData.isPacked; + const xUnPacked = xIsPacked ? backend.unpackTensor(x) : x; + // Reshape into a 2d tensor [batch, lastDim] and compute topk along lastDim. + const xSize = sizeFromShape(xShape); + const batch = xSize / lastDim; + const x2D = reshape({ inputs: { x: xUnPacked }, attrs: { shape: [batch, lastDim] }, backend }); + if (xIsPacked) { + disposeIntermediateTensorInfoOrNull(backend, xUnPacked); + } + const kPow2 = roundUpToPow2(k); + const lastDimPow2 = roundUpToPow2(lastDim); + // Only the indices containing the top K are kept at every step to reduce + // number of outputs in the GPU algorithms, so once the final set of indices + // is computed then gather is used to grab the corresponding values + // from the original input. + let indices = null; + // GPU algorithm always takes in an indices input but this input is not used + // on the first run of a GPU algorithm, therefore if indices is null we simply + // pass in x2D instead of it but the value will not actually be used + const getInputs = () => indices === null ? [x2D, x2D] : [x2D, indices]; + const runSwap = (dir, inc, shape) => { + const inputs = getInputs(); + const program = new SwapProgram(shape); + const fistPass = indices === null ? 1 : 0; + const customValues = [[lastDim], [fistPass], [Number.NEGATIVE_INFINITY], [dir], [inc]]; + const prevIndices = indices; + indices = backend.runWebGLProgram(program, inputs, 'int32', customValues); + disposeIntermediateTensorInfoOrNull(backend, prevIndices); + }; + // Step 1: local sort + for (let len = 1; len < kPow2; len *= 2) { + const dir = len * 2; + for (let inc = len; inc >= 1; inc /= 2) { + runSwap(dir, inc, [batch, lastDimPow2]); + } + } + // Step 2: merge + for (let indicesSize = lastDimPow2; indicesSize > kPow2; indicesSize /= 2) { + const inputs = getInputs(); + const mergeProgram = new MergeProgram([batch, indicesSize / 2]); + const firstPass = indices === null ? 1 : 0; + const customValues = [[lastDim], [firstPass], [kPow2]]; + const prevIndices = indices; + indices = + backend.runWebGLProgram(mergeProgram, inputs, 'int32', customValues); + disposeIntermediateTensorInfoOrNull(backend, prevIndices); + // Step 3: rebuild + const len = kPow2 / 2; + const dir = len * 2; + for (let inc = len; inc >= 1; inc /= 2) { + runSwap(dir, inc, indices.shape); + } + } + // Keep only the requested top K results instead of kPow2 + let prevIndices = indices; + indices = slice({ inputs: { x: indices }, backend, attrs: { begin: 0, size: [batch, k] } }); + disposeIntermediateTensorInfoOrNull(backend, prevIndices); + // Gather values on last dimension + let values = gatherV2({ inputs: { x: x2D, indices }, backend, attrs: { axis: 1, batchDims: 1 } }); + disposeIntermediateTensorInfoOrNull(backend, x2D); + // Reshape back to the original input shape, except that the last + // dimension is k. + const newShape = xShape.slice(0, -1); + newShape.push(k); + prevIndices = indices; + indices = reshape({ inputs: { x: indices }, attrs: { shape: newShape }, backend }); + disposeIntermediateTensorInfoOrNull(backend, prevIndices); + const prevValues = values; + values = reshape({ inputs: { x: values }, attrs: { shape: newShape }, backend }); + disposeIntermediateTensorInfoOrNull(backend, prevValues); + return [values, indices]; + } + const topKConfig = { + kernelName: TopK, + backendName: 'webgl', + kernelFunc: topK + }; + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class TransformProgram { + constructor(imageHeight, imageWidth, interpolation, fillMode, fillValue, outShape) { + this.variableNames = ['Image', 'Transforms']; + this.outputShape = outShape; + const interpolationModeId = interpolation === 'nearest' ? 1 : 2; + let fillModeId; + switch (fillMode) { + case 'constant': + fillModeId = 1; + break; + case 'reflect': + fillModeId = 2; + break; + case 'wrap': + fillModeId = 3; + break; + case 'nearest': + fillModeId = 4; + break; + default: + fillModeId = 1; + break; + } + this.userCode = ` + float mapCoord(float outCoord, float len) { + float inCoord = outCoord; + if(${fillModeId} == 2) { + if (inCoord < 0.0) { + if (len <= 1.0) { + inCoord = 0.0; + } else { + float sz2 = 2.0 * len; + if (inCoord < sz2) { + inCoord = sz2 * float(int(float(-inCoord / sz2))) + + inCoord; + } + inCoord = inCoord < -len ? inCoord + sz2 : -inCoord - 1.0; + } + } else if (inCoord > len - 1.0) { + if (len <= 1.0) { + inCoord = 0.0; + } else { + float sz2 = 2.0 * len; + inCoord -= sz2 * float(int(float(inCoord / sz2))); + if (inCoord >= len) { + inCoord = sz2 - inCoord - 1.0; + } + } + } + return clamp(inCoord, 0.0, len - 1.0); + } else if (${fillModeId} == 3) { + if (inCoord < 0.0) { + if (len <= 1.0) { + inCoord = 0.0; + } else { + float sz = len - 1.0; + inCoord += len * (float(int(float(-inCoord / sz))) + 1.0); + } + } else if (inCoord > len - 1.0) { + if (len <= 1.0) { + inCoord = 0.0; + } else { + float sz = len - 1.0; + inCoord -= len * float(int(float(inCoord / sz))); + } + } + return clamp(inCoord, 0.0, len - 1.0); + } else if (${fillModeId} == 4) { + return clamp(outCoord, 0.0, len - 1.0); + } else { + return outCoord; + } + } + + float readWithFillValue(int batch, int coordY, int coordX, + int channel) { + float outputValue; + if (0 <= coordY && coordY < ${imageHeight} && 0 <= coordX && coordX < ${imageWidth}) { + outputValue = getImage(batch, coordY, coordX, channel); + } else { + outputValue = float(${fillValue}); + } + return outputValue; + } + + void main() { + ivec4 coords = getOutputCoords(); + float outputValue; + int batch = coords[0]; + int x = coords[2]; + int y = coords[1]; + int channel = coords[3]; + float xf = float(x); + float yf = float(y); + float a1 = getTransforms(batch, 0); + float a2 = getTransforms(batch, 1); + float a3 = getTransforms(batch, 2); + float b1 = getTransforms(batch, 3); + float b2 = getTransforms(batch, 4); + float b3 = getTransforms(batch, 5); + float c1 = getTransforms(batch, 6); + float c2 = getTransforms(batch, 7); + float projection = c1 * xf + c2 * yf + 1.0; + if (projection == 0.0) { + outputValue = float(${fillValue}); + } else { + float inX = (a1 * xf + a2 * yf + a3) / projection; + float inY = (b1 * xf + b2 * yf + b3) / projection; + float mapX = mapCoord(inX, float(${imageWidth})); + float mapY = mapCoord(inY, float(${imageHeight})); + + if (${interpolationModeId} == 1) { + int coordY = int(round(mapY)); + int coordX = int(round(mapX)); + outputValue = readWithFillValue(batch, coordY, coordX, + channel); + } else { + float yFloor = floor(mapY); + float xFloor = floor(mapX); + float yCeil = yFloor + 1.0; + float xCeil = xFloor + 1.0; + float valueYFloor = (xCeil - mapX) * + readWithFillValue(batch, int(yFloor), int(xFloor), channel) + + (mapX - xFloor) * + readWithFillValue(batch, int(yFloor), int(xCeil), channel); + float valueYCeil = (xCeil - mapX) * + readWithFillValue(batch, int(yCeil), int(xFloor), channel) + + (mapX - xFloor) * + readWithFillValue(batch, int(yCeil), int(xCeil), channel); + outputValue = (yCeil - mapY) * valueYFloor + + (mapY - yFloor) * valueYCeil; + } + } + setOutput(outputValue); + } + `; + } + } + + /** + * @license + * Copyright 2021 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function transform(args) { + const { inputs, backend, attrs } = args; + const { image, transforms } = inputs; + const { interpolation, fillMode, fillValue, outputShape } = attrs; + const [batch, imageHeight, imageWidth, numChannels] = image.shape; + const [outHeight, outWidth] = outputShape != null ? outputShape : [imageHeight, imageWidth]; + const outShape = [batch, outHeight, outWidth, + numChannels]; + const program = new TransformProgram(imageHeight, imageWidth, interpolation, fillMode, fillValue, outShape); + return backend.runWebGLProgram(program, [image, transforms], 'float32'); + } + const transformConfig = { + kernelName: Transform, + backendName: 'webgl', + kernelFunc: transform + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the License); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function unique(args) { + const { inputs, attrs, backend } = args; + const { axis } = attrs; + const { x } = inputs; + assertNotComplex(x, 'unique'); + // For now, always forward calculation to the CPU backend. + console.warn('WARNING: ', 'UI might be locked temporarily as data is being downloaded'); + const values = backend.readSync(x.dataId); + const { outputValues, outputShape, indices } = uniqueImplCPU(values, axis, x.shape, x.dtype); + return [ + backend.makeTensorInfo(outputShape, x.dtype, outputValues), + backend.makeTensorInfo([indices.length], 'int32', indices), + ]; + } + const uniqueConfig = { + kernelName: Unique, + backendName: 'webgl', + kernelFunc: unique, + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function unpack(args) { + const { inputs, backend, attrs } = args; + const { value } = inputs; + let { axis } = attrs; + if (axis < 0) { + axis += value.shape.length; + } + const x = value; + const xRank = x.shape.length; + const num = value.shape[axis]; + const outShape = new Array(xRank - 1); + let outIndex = 0; + for (let i = 0; i < xRank; i++) { + if (i !== axis) { + outShape[outIndex++] = x.shape[i]; + } + } + const toDispose = []; + const begin = new Array(xRank).fill(0); + const size = x.shape.slice(); + size[axis] = 1; + const res = new Array(num); + for (let i = 0; i < res.length; i++) { + begin[axis] = i; + const sliced = slice({ inputs: { x }, backend, attrs: { begin, size } }); + const reshaped = reshape({ inputs: { x: sliced }, backend, attrs: { shape: outShape } }); + res[i] = reshaped; + toDispose.push(sliced); + } + toDispose.forEach(t => backend.disposeIntermediateTensorInfo(t)); + return res; + } + const unpackConfig = { + kernelName: Unpack, + backendName: 'webgl', + kernelFunc: unpack + }; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + class SegmentOpProgram { + constructor(segOpInfo, segOpType) { + this.variableNames = ['x', 'segmentIds']; + const windowSize = segOpInfo.windowSize; + const batchSize = segOpInfo.batchSize; + const inSize = segOpInfo.inSize; + const numSegments = segOpInfo.numSegments; + const outSize = numSegments * Math.ceil(inSize / windowSize); + this.outputShape = [batchSize, outSize]; + const initializationValue = '0.0'; + const returnValue = `sumValue`; + const windowSizeNearestVec4 = Math.floor(windowSize / 4) * 4; + const windowSizeVec4Remainder = windowSize % 4; + const updateSnippet = ` + sumValue += dot(values, segFilter); + `; + let checkValueOutOfBounds = ''; + if (inSize % windowSize > 0) { + checkValueOutOfBounds = ` + if (inIdx < 0 || inIdx >= ${inSize}) { + return initializationValue; + } + `; + } + let checkSegmentIdOutOfBounds = ''; + if (inSize % windowSize > 0) { + checkSegmentIdOutOfBounds = ` + if (inIdx < 0 || inIdx >= ${inSize}) { + return -1.0; + } + `; + } + this.userCode = ` + const float initializationValue = ${initializationValue}; + + float getValue(int batch, int inIdx) { + ${checkValueOutOfBounds} + return getX(batch, inIdx); + } + + float getSegmentIdAtIndex(int inIdx) { + ${checkSegmentIdOutOfBounds} + return getSegmentIds(inIdx); + } + + void main() { + ivec2 coords = getOutputCoords(); + int batch = coords[0]; + int outIdx = coords[1]; + int inOffset = int(floor(float(outIdx) / float( + ${numSegments})) * float(${windowSize})); + int currentSeg = int(mod(float(outIdx), float(${numSegments}))); + + float sumValue = 0.0; + + for (int i = 0; i < ${windowSizeNearestVec4}; i += 4) { + int inIdx = inOffset + i; + vec4 values = vec4( + getValue(batch, inIdx), + getValue(batch, inIdx + 1), + getValue(batch, inIdx + 2), + getValue(batch, inIdx + 3) + ); + + vec4 segFilter = vec4( + int(getSegmentIdAtIndex(inIdx)) == currentSeg ? 1 : 0, + int(getSegmentIdAtIndex(inIdx + 1)) == currentSeg ? 1 : 0, + int(getSegmentIdAtIndex(inIdx + 2)) == currentSeg ? 1 : 0, + int(getSegmentIdAtIndex(inIdx + 3)) == currentSeg ? 1 : 0 + ); + + ${updateSnippet} + } + + int inIdx = inOffset + ${windowSizeNearestVec4}; + if (${windowSizeVec4Remainder === 1}) { + vec4 values = vec4( + getValue(batch, inIdx), + initializationValue, + initializationValue, + initializationValue + ); + + int inIdxSeg = int(getSegmentIdAtIndex(inIdx)); + + vec4 segFilter = vec4( + int(getSegmentIdAtIndex(inIdx)) == currentSeg ? 1 : 0, + 0, + 0, + 0 + ); + + ${updateSnippet} + } else if (${windowSizeVec4Remainder === 2}) { + vec4 values = vec4( + getValue(batch, inIdx), + getValue(batch, inIdx + 1), + initializationValue, + initializationValue + ); + + vec4 segFilter = vec4( + int(getSegmentIdAtIndex(inIdx)) == currentSeg ? 1 : 0, + int(getSegmentIdAtIndex(inIdx + 1)) == currentSeg ? 1 : 0, + 0, + 0 + ); + + ${updateSnippet} + } else if (${windowSizeVec4Remainder === 3}) { + vec4 values = vec4( + getValue(batch, inIdx), + getValue(batch, inIdx + 1), + getValue(batch, inIdx + 2), + initializationValue + ); + + vec4 segFilter = vec4( + int(getSegmentIdAtIndex(inIdx)) == currentSeg ? 1 : 0, + int(getSegmentIdAtIndex(inIdx + 1)) == currentSeg ? 1 : 0, + int(getSegmentIdAtIndex(inIdx + 2)) == currentSeg ? 1 : 0, + 0 + ); + + ${updateSnippet} + } + setOutput(${returnValue}); + } + `; + } + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + function unsortedSegmentSum(args) { + const { inputs, backend, attrs } = args; + const { x, segmentIds } = inputs; + const { numSegments } = attrs; + const xRank = x.shape.length; + const toDispose = []; + let axis = 0; + const permutation = getAxesPermutation([axis], xRank); + let permutedX = x; + if (permutation != null) { + permutedX = transpose({ inputs: { x }, backend, attrs: { perm: permutation } }); + toDispose.push(permutedX); + axis = getInnerMostAxes(1, xRank)[0]; + } + const outShape = computeOutShape(permutedX.shape, axis, numSegments); + const inSize = sizeFromShape([permutedX.shape[axis]]); + const a2D = reshape({ inputs: { x: permutedX }, backend, attrs: { shape: [-1, inSize] } }); + toDispose.push(a2D); + const outputDType = sumOutType(x.dtype); + const segOpCompute = (x, segOpType, segmentIds, dtype, numSegments) => { + const batchSize = x.shape[0]; + const inSize = x.shape[1]; + const windowSize = segOpComputeOptimalWindowSize(inSize, numSegments); + const segOpInfo = { windowSize, inSize, batchSize, numSegments }; + const program = new SegmentOpProgram(segOpInfo, segOpType); + const output = backend.compileAndRun(program, [x, segmentIds], dtype); + toDispose.push(output); + // No need to run another GPGPU program. + if (output.shape[1] === numSegments) { + return output; + } + const rangeInfo = range({ + backend, + attrs: { start: 0, stop: numSegments, step: 1, dtype: 'float32' } + }); + const tileInfo = tile({ + inputs: { x: rangeInfo }, + backend, + attrs: { reps: [inSize / windowSize] } + }); + toDispose.push(rangeInfo); + toDispose.push(tileInfo); + const result = segOpCompute(output, segOpType, tileInfo, dtype, numSegments); + return result; + }; + const segOpResult = segOpCompute(a2D, 'unsortedSegmentSum', segmentIds, outputDType, numSegments); + const reshaped = reshape({ inputs: { x: segOpResult }, backend, attrs: { shape: outShape } }); + let result = reshaped; + if (permutation != null) { + toDispose.push(reshaped); + const perm = getUndoAxesPermutation(permutation); + result = transpose({ inputs: { x: result }, backend, attrs: { perm } }); + } + toDispose.forEach(t => backend.disposeIntermediateTensorInfo(t)); + return result; + } + const unsortedSegmentSumConfig = { + kernelName: UnsortedSegmentSum, + backendName: 'webgl', + kernelFunc: unsortedSegmentSum + }; + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + // List all kernel configs here + const kernelConfigs = [ + _fusedMatMulConfig, + absConfig, + acosConfig, + acoshConfig, + addConfig, + addNConfig, + allConfig, + anyConfig, + argMaxConfig, + argMinConfig, + asinConfig, + asinhConfig, + atanConfig, + atan2Config, + atanhConfig, + avgPoolConfig, + avgPool3DConfig, + avgPool3DGradConfig, + avgPoolGradConfig, + batchMatMulConfig, + batchNormConfig, + batchToSpaceNDConfig, + bincountConfig, + bitwiseAndConfig, + broadcastArgsConfig, + castConfig, + ceilConfig, + clipByValueConfig, + complexConfig, + complexAbsConfig, + concatConfig, + conv2DConfig, + conv2DBackpropFilterConfig, + conv2DBackpropInputConfig, + conv3DConfig, + conv3DBackpropFilterV2Config, + conv3DBackpropInputConfig, + cosConfig, + coshConfig, + cropAndResizeConfig, + cumprodConfig, + cumsumConfig, + denseBincountConfig, + depthToSpaceConfig, + depthwiseConv2dNativeConfig, + depthwiseConv2dNativeBackpropFilterConfig, + depthwiseConv2dNativeBackpropInputConfig, + diagConfig, + dilation2DConfig, + einsumConfig, + eluConfig, + eluGradConfig, + equalConfig, + erfConfig, + expConfig, + expandDimsConfig, + expm1Config, + fftConfig, + fillConfig, + flipLeftRightConfig, + floorConfig, + floorDivConfig, + fromPixelsConfig, + fusedConv2DConfig, + fusedDepthwiseConv2DConfig, + gatherNdConfig, + gatherV2Config, + greaterConfig, + greaterEqualConfig, + identityConfig, + ifftConfig, + imagConfig, + isFiniteConfig, + isInfConfig, + isNaNConfig, + leakyReluConfig, + lessConfig, + lessEqualConfig, + linSpaceConfig, + logConfig, + log1pConfig, + logicalAndConfig, + logicalNotConfig, + logicalOrConfig, + LRNConfig, + LRNGradConfig, + maxConfig, + maximumConfig, + maxPoolConfig, + maxPool3DConfig, + maxPool3DGradConfig, + maxPoolGradConfig, + maxPoolWithArgmaxConfig, + meanConfig, + minConfig, + minimumConfig, + mirrorPadConfig, + modConfig, + multinomialConfig, + multiplyConfig, + negConfig, + nonMaxSuppressionV3Config, + nonMaxSuppressionV4Config, + nonMaxSuppressionV5Config, + notEqualConfig, + oneHotConfig, + onesLikeConfig, + packConfig, + padV2Config, + powConfig, + preluConfig, + prodConfig, + raggedGatherConfig, + raggedRangeConfig, + raggedTensorToTensorConfig, + rangeConfig, + realConfig, + realDivConfig, + reciprocalConfig, + reluConfig, + relu6Config, + reshapeConfig, + resizeBilinearConfig, + resizeBilinearGradConfig, + resizeNearestNeighborConfig, + resizeNearestNeighborGradConfig, + reverseConfig, + rotateWithOffsetConfig, + roundConfig, + rsqrtConfig, + scatterNdConfig, + searchSortedConfig, + selectConfig, + seluConfig, + sigmoidConfig, + signConfig, + sinConfig, + sinhConfig, + sliceConfig, + softmaxConfig, + softplusConfig, + spaceToBatchNDConfig, + sparseFillEmptyRowsConfig, + sparseReshapeConfig, + sparseSegmentMeanConfig, + sparseSegmentSumConfig, + sparseToDenseConfig, + splitVConfig, + sqrtConfig, + squareConfig, + squaredDifferenceConfig, + staticRegexReplaceConfig, + stepConfig, + stridedSliceConfig, + stringNGramsConfig, + stringSplitConfig, + stringToHashBucketFastConfig, + subConfig, + sumConfig, + tanConfig, + tanhConfig, + tensorScatterUpdateConfig, + tileConfig, + topKConfig, + transformConfig, + transposeConfig, + uniqueConfig, + unpackConfig, + unsortedSegmentSumConfig, + zerosLikeConfig + ]; + for (const kernelConfig of kernelConfigs) { + registerKernel(kernelConfig); + } + + /** + * @license + * Copyright 2020 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + + /** @license See the LICENSE file. */ + // This code is auto-generated, do not modify this file! + const version$1 = '4.22.0'; + + /** + * @license + * Copyright 2018 Google LLC. All Rights Reserved. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * ============================================================================= + */ + const version = { + 'tfjs-core': version$7, + 'tfjs-backend-cpu': version$3, + 'tfjs-backend-webgl': version$2, + 'tfjs-data': version$4, + 'tfjs-layers': version$6, + 'tfjs-converter': version$5, + 'tfjs': version$1 + }; + + exports.Abs = Abs; + exports.Acos = Acos; + exports.Acosh = Acosh; + exports.AdadeltaOptimizer = AdadeltaOptimizer; + exports.AdagradOptimizer = AdagradOptimizer; + exports.AdamOptimizer = AdamOptimizer; + exports.AdamaxOptimizer = AdamaxOptimizer; + exports.Add = Add$1; + exports.AddN = AddN; + exports.All = All; + exports.Any = Any; + exports.ArgMax = ArgMax; + exports.ArgMin = ArgMin; + exports.Asin = Asin; + exports.Asinh = Asinh; + exports.Atan = Atan; + exports.Atan2 = Atan2; + exports.Atanh = Atanh; + exports.AvgPool = AvgPool; + exports.AvgPool3D = AvgPool3D; + exports.AvgPool3DGrad = AvgPool3DGrad; + exports.AvgPoolGrad = AvgPoolGrad; + exports.BatchMatMul = BatchMatMul; + exports.BatchToSpaceND = BatchToSpaceND; + exports.Bincount = Bincount; + exports.BitwiseAnd = BitwiseAnd; + exports.BroadcastArgs = BroadcastArgs; + exports.BroadcastTo = BroadcastTo; + exports.Callback = Callback; + exports.CallbackList = CallbackList; + exports.Cast = Cast; + exports.Ceil = Ceil; + exports.ClipByValue = ClipByValue; + exports.Complex = Complex; + exports.ComplexAbs = ComplexAbs; + exports.Concat = Concat; + exports.Conv2D = Conv2D$1; + exports.Conv2DBackpropFilter = Conv2DBackpropFilter; + exports.Conv2DBackpropInput = Conv2DBackpropInput; + exports.Conv3D = Conv3D$1; + exports.Conv3DBackpropFilterV2 = Conv3DBackpropFilterV2; + exports.Conv3DBackpropInputV2 = Conv3DBackpropInputV2; + exports.Cos = Cos; + exports.Cosh = Cosh; + exports.CropAndResize = CropAndResize; + exports.Cumprod = Cumprod; + exports.Cumsum = Cumsum; + exports.CustomCallback = CustomCallback; + exports.DataStorage = DataStorage; + exports.DenseBincount = DenseBincount; + exports.DepthToSpace = DepthToSpace; + exports.DepthwiseConv2dNative = DepthwiseConv2dNative; + exports.DepthwiseConv2dNativeBackpropFilter = DepthwiseConv2dNativeBackpropFilter; + exports.DepthwiseConv2dNativeBackpropInput = DepthwiseConv2dNativeBackpropInput; + exports.Diag = Diag; + exports.Dilation2D = Dilation2D; + exports.Dilation2DBackpropFilter = Dilation2DBackpropFilter; + exports.Dilation2DBackpropInput = Dilation2DBackpropInput; + exports.Draw = Draw; + exports.EarlyStopping = EarlyStopping; + exports.Einsum = Einsum; + exports.Elu = Elu$1; + exports.EluGrad = EluGrad; + exports.Environment = Environment; + exports.Equal = Equal; + exports.Erf = Erf; + exports.Exp = Exp; + exports.ExpandDims = ExpandDims; + exports.Expm1 = Expm1; + exports.FFT = FFT; + exports.Fill = Fill; + exports.FlipLeftRight = FlipLeftRight; + exports.Floor = Floor; + exports.FloorDiv = FloorDiv; + exports.FromPixels = FromPixels; + exports.FusedBatchNorm = FusedBatchNorm; + exports.FusedConv2D = FusedConv2D; + exports.FusedDepthwiseConv2D = FusedDepthwiseConv2D; + exports.GPGPUContext = GPGPUContext; + exports.GatherNd = GatherNd; + exports.GatherV2 = GatherV2; + exports.GraphModel = GraphModel; + exports.Greater = Greater; + exports.GreaterEqual = GreaterEqual; + exports.History = History; + exports.IFFT = IFFT; + exports.Identity = Identity$1; + exports.Imag = Imag; + exports.InputSpec = InputSpec; + exports.IsFinite = IsFinite; + exports.IsInf = IsInf; + exports.IsNan = IsNan; + exports.KernelBackend = KernelBackend; + exports.LRN = LRN; + exports.LRNGrad = LRNGrad; + exports.LayerVariable = LayerVariable; + exports.LayersModel = LayersModel; + exports.LeakyRelu = LeakyRelu; + exports.Less = Less; + exports.LessEqual = LessEqual; + exports.LinSpace = LinSpace; + exports.Log = Log; + exports.Log1p = Log1p; + exports.LogSoftmax = LogSoftmax$1; + exports.LogicalAnd = LogicalAnd; + exports.LogicalNot = LogicalNot; + exports.LogicalOr = LogicalOr; + exports.LogicalXor = LogicalXor; + exports.LowerBound = LowerBound; + exports.MathBackendCPU = MathBackendCPU; + exports.MathBackendWebGL = MathBackendWebGL; + exports.MatrixBandPart = MatrixBandPart; + exports.Max = Max; + exports.MaxPool = MaxPool; + exports.MaxPool3D = MaxPool3D; + exports.MaxPool3DGrad = MaxPool3DGrad; + exports.MaxPoolGrad = MaxPoolGrad; + exports.MaxPoolWithArgmax = MaxPoolWithArgmax; + exports.Maximum = Maximum$1; + exports.Mean = Mean; + exports.Min = Min; + exports.Minimum = Minimum$1; + exports.MirrorPad = MirrorPad; + exports.Mod = Mod; + exports.MomentumOptimizer = MomentumOptimizer; + exports.Multinomial = Multinomial; + exports.Multiply = Multiply$1; + exports.Neg = Neg; + exports.NonMaxSuppressionV3 = NonMaxSuppressionV3; + exports.NonMaxSuppressionV4 = NonMaxSuppressionV4; + exports.NonMaxSuppressionV5 = NonMaxSuppressionV5; + exports.NotEqual = NotEqual; + exports.OP_SCOPE_SUFFIX = OP_SCOPE_SUFFIX; + exports.OneHot = OneHot; + exports.OnesLike = OnesLike; + exports.Optimizer = Optimizer; + exports.OptimizerConstructors = OptimizerConstructors; + exports.Pack = Pack; + exports.PadV2 = PadV2; + exports.Pool = Pool; + exports.Pow = Pow; + exports.Prelu = Prelu; + exports.Prod = Prod; + exports.RMSPropOptimizer = RMSPropOptimizer; + exports.RNN = RNN; + exports.RaggedGather = RaggedGather; + exports.RaggedRange = RaggedRange; + exports.RaggedTensorToTensor = RaggedTensorToTensor; + exports.Range = Range; + exports.Real = Real; + exports.RealDiv = RealDiv; + exports.Reciprocal = Reciprocal; + exports.Relu = Relu$1; + exports.Relu6 = Relu6$1; + exports.Reshape = Reshape$1; + exports.ResizeBilinear = ResizeBilinear; + exports.ResizeBilinearGrad = ResizeBilinearGrad; + exports.ResizeNearestNeighbor = ResizeNearestNeighbor; + exports.ResizeNearestNeighborGrad = ResizeNearestNeighborGrad; + exports.Reverse = Reverse; + exports.RotateWithOffset = RotateWithOffset; + exports.Round = Round; + exports.Rsqrt = Rsqrt; + exports.SGDOptimizer = SGDOptimizer; + exports.ScatterNd = ScatterNd; + exports.SearchSorted = SearchSorted; + exports.Select = Select; + exports.Selu = Selu$1; + exports.Sequential = Sequential; + exports.Sigmoid = Sigmoid$1; + exports.Sign = Sign; + exports.Sin = Sin; + exports.Sinh = Sinh; + exports.Slice = Slice; + exports.Softmax = Softmax$2; + exports.Softplus = Softplus$1; + exports.SpaceToBatchND = SpaceToBatchND; + exports.SparseFillEmptyRows = SparseFillEmptyRows; + exports.SparseReshape = SparseReshape; + exports.SparseSegmentMean = SparseSegmentMean; + exports.SparseSegmentSum = SparseSegmentSum; + exports.SparseToDense = SparseToDense; + exports.SplitV = SplitV; + exports.Sqrt = Sqrt; + exports.Square = Square; + exports.SquaredDifference = SquaredDifference; + exports.StaticRegexReplace = StaticRegexReplace; + exports.Step = Step; + exports.StridedSlice = StridedSlice; + exports.StringNGrams = StringNGrams; + exports.StringSplit = StringSplit; + exports.StringToHashBucketFast = StringToHashBucketFast; + exports.Sub = Sub; + exports.Sum = Sum; + exports.SymbolicTensor = SymbolicTensor; + exports.Tan = Tan; + exports.Tanh = Tanh$1; + exports.Tensor = Tensor; + exports.TensorBuffer = TensorBuffer; + exports.TensorScatterUpdate = TensorScatterUpdate; + exports.Tile = Tile; + exports.TopK = TopK; + exports.Transform = Transform; + exports.Transpose = Transpose; + exports.Unique = Unique; + exports.Unpack = Unpack; + exports.UnsortedSegmentSum = UnsortedSegmentSum; + exports.UpperBound = UpperBound; + exports.Variable = Variable; + exports.ZerosLike = ZerosLike; + exports._FusedMatMul = _FusedMatMul; + exports.abs = abs$2; + exports.acos = acos$2; + exports.acosh = acosh$2; + exports.add = add$3; + exports.addN = addN$2; + exports.all = all$2; + exports.any = any$2; + exports.argMax = argMax$2; + exports.argMin = argMin$2; + exports.asin = asin$2; + exports.asinh = asinh$2; + exports.atan = atan$2; + exports.atan2 = atan2$2; + exports.atanh = atanh$2; + exports.avgPool = avgPool$2; + exports.avgPool3d = avgPool3d$1; + exports.backend = backend$1; + exports.backend_util = backend_util; + exports.basicLSTMCell = basicLSTMCell; + exports.batchNorm = batchNorm$2; + exports.batchNorm2d = batchNorm2d; + exports.batchNorm3d = batchNorm3d; + exports.batchNorm4d = batchNorm4d; + exports.batchToSpaceND = batchToSpaceND$2; + exports.bincount = bincount$2; + exports.bitwiseAnd = bitwiseAnd$2; + exports.booleanMaskAsync = booleanMaskAsync; + exports.broadcastArgs = broadcastArgs$2; + exports.broadcastTo = broadcastTo; + exports.broadcast_util = broadcast_util; + exports.browser = browser; + exports.buffer = buffer; + exports.callbacks = callbacks; + exports.cast = cast$3; + exports.ceil = ceil$2; + exports.clipByValue = clipByValue$2; + exports.clone = clone; + exports.complex = complex$2; + exports.concat = concat$2; + exports.concat1d = concat1d; + exports.concat2d = concat2d; + exports.concat3d = concat3d; + exports.concat4d = concat4d; + exports.constraints = exports_constraints; + exports.conv1d = conv1d$2; + exports.conv2d = conv2d$4; + exports.conv2dTranspose = conv2dTranspose$1; + exports.conv3d = conv3d$2; + exports.conv3dTranspose = conv3dTranspose$1; + exports.copyRegisteredKernels = copyRegisteredKernels; + exports.cos = cos$2; + exports.cosh = cosh$2; + exports.cosineWindow = cosineWindow; + exports.cumprod = cumprod$2; + exports.cumsum = cumsum$2; + exports.customGrad = customGrad; + exports.data = index; + exports.denseBincount = denseBincount$2; + exports.deprecationWarn = deprecationWarn; + exports.depthToSpace = depthToSpace$2; + exports.depthwiseConv2d = depthwiseConv2d$3; + exports.deregisterOp = deregisterOp; + exports.device_util = device_util; + exports.diag = diag$2; + exports.dilation2d = dilation2d; + exports.disableDeprecationWarnings = disableDeprecationWarnings; + exports.dispose = dispose; + exports.disposeVariables = disposeVariables; + exports.div = div$1; + exports.divNoNan = divNoNan; + exports.dot = dot$2; + exports.dropout = dropout$2; + exports.einsum = einsum$2; + exports.elu = elu$4; + exports.enableDebugMode = enableDebugMode; + exports.enableProdMode = enableProdMode; + exports.enclosingPowerOfTwo = enclosingPowerOfTwo; + exports.engine = engine; + exports.ensureShape = ensureShape; + exports.env = env; + exports.equal = equal$2; + exports.erf = erf$2; + exports.euclideanNorm = euclideanNorm; + exports.exp = exp$2; + exports.expandDims = expandDims$3; + exports.expm1 = expm1$2; + exports.eye = eye; + exports.fft = fft$2; + exports.fill = fill$2; + exports.findBackend = findBackend; + exports.findBackendFactory = findBackendFactory; + exports.floor = floor$2; + exports.floorDiv = floorDiv$2; + exports.forceHalfFloat = forceHalfFloat; + exports.fused = fused_ops; + exports.gather = gather$1; + exports.gatherND = gatherND; + exports.gather_util = gather_nd_util; + exports.getBackend = getBackend$1; + exports.getGradient = getGradient; + exports.getKernel = getKernel; + exports.getKernelsForBackend = getKernelsForBackend; + exports.gpgpu_util = gpgpu_util; + exports.grad = grad; + exports.grads = grads; + exports.greater = greater$3; + exports.greaterEqual = greaterEqual$2; + exports.ifft = ifft$2; + exports.imag = imag$2; + exports.image = image$1; + exports.inTopKAsync = inTopKAsync; + exports.initializers = exports_initializers; + exports.input = input; + exports.io = io; + exports.irfft = irfft; + exports.isFinite = isFinite$3; + exports.isInf = isInf$2; + exports.isNaN = isNaN$3; + exports.keep = keep; + exports.kernel_impls = kernel_impls; + exports.layers = exports_layers; + exports.leakyRelu = leakyRelu$2; + exports.less = less$3; + exports.lessEqual = lessEqual$2; + exports.linalg = linalg; + exports.linspace = linspace; + exports.loadGraphModel = loadGraphModel; + exports.loadGraphModelSync = loadGraphModelSync; + exports.loadLayersModel = loadLayersModel; + exports.localResponseNormalization = localResponseNormalization; + exports.log = log$2; + exports.log1p = log1p$2; + exports.logSigmoid = logSigmoid; + exports.logSoftmax = logSoftmax; + exports.logSumExp = logSumExp; + exports.logicalAnd = logicalAnd$2; + exports.logicalNot = logicalNot$2; + exports.logicalOr = logicalOr$2; + exports.logicalXor = logicalXor; + exports.losses = losses; + exports.lowerBound = lowerBound$1; + exports.matMul = matMul$1; + exports.math = math; + exports.max = max$3; + exports.maxPool = maxPool$2; + exports.maxPool3d = maxPool3d$1; + exports.maxPoolWithArgmax = maxPoolWithArgmax; + exports.maximum = maximum$4; + exports.mean = mean$3; + exports.memory = memory; + exports.meshgrid = meshgrid; + exports.metrics = exports_metrics; + exports.min = min$3; + exports.minimum = minimum$4; + exports.mirrorPad = mirrorPad$1; + exports.mod = mod$2; + exports.model = model; + exports.models = exports_models; + exports.moments = moments; + exports.movingAverage = movingAverage; + exports.mul = mul; + exports.multiRNNCell = multiRNNCell; + exports.multinomial = multinomial$2; + exports.neg = neg$2; + exports.nextFrame = nextFrame; + exports.norm = norm; + exports.notEqual = notEqual$2; + exports.oneHot = oneHot$3; + exports.ones = ones$1; + exports.onesLike = onesLike$3; + exports.op = op; + exports.outerProduct = outerProduct; + exports.pad = pad; + exports.pad1d = pad1d; + exports.pad2d = pad2d; + exports.pad3d = pad3d; + exports.pad4d = pad4d; + exports.pool = pool$1; + exports.pow = pow$3; + exports.prelu = prelu$3; + exports.print = print; + exports.prod = prod$2; + exports.profile = profile; + exports.raggedGather = raggedGather$2; + exports.raggedRange = raggedRange$2; + exports.raggedTensorToTensor = raggedTensorToTensor$2; + exports.rand = rand; + exports.randomGamma = randomGamma; + exports.randomNormal = randomNormal$2; + exports.randomStandardNormal = randomStandardNormal; + exports.randomUniform = randomUniform$1; + exports.randomUniformInt = randomUniformInt; + exports.range = range$3; + exports.ready = ready; + exports.real = real$2; + exports.reciprocal = reciprocal$2; + exports.registerBackend = registerBackend; + exports.registerCallbackConstructor = registerCallbackConstructor; + exports.registerGradient = registerGradient; + exports.registerKernel = registerKernel; + exports.registerOp = registerOp; + exports.regularizers = exports_regularizers; + exports.relu = relu$2; + exports.relu6 = relu6$2; + exports.removeBackend = removeBackend; + exports.reshape = reshape$3; + exports.reverse = reverse$2; + exports.reverse1d = reverse1d; + exports.reverse2d = reverse2d; + exports.reverse3d = reverse3d; + exports.reverse4d = reverse4d; + exports.rfft = rfft; + exports.round = round$2; + exports.rsqrt = rsqrt$2; + exports.scalar = scalar; + exports.scatterND = scatterND; + exports.scatter_util = scatter_nd_util; + exports.searchSorted = searchSorted$2; + exports.selu = selu$2; + exports.separableConv2d = separableConv2d$1; + exports.sequential = sequential; + exports.serialization = serialization; + exports.setBackend = setBackend$1; + exports.setPlatform = setPlatform; + exports.setWebGLContext = setWebGLContext; + exports.setdiff1dAsync = setdiff1dAsync; + exports.shared = shared; + exports.sigmoid = sigmoid$2; + exports.sign = sign$3; + exports.signal = signal; + exports.sin = sin$2; + exports.sinh = sinh$2; + exports.slice = slice$2; + exports.slice1d = slice1d; + exports.slice2d = slice2d; + exports.slice3d = slice3d; + exports.slice4d = slice4d; + exports.slice_util = slice_util; + exports.softmax = softmax$3; + exports.softplus = softplus$2; + exports.spaceToBatchND = spaceToBatchND$2; + exports.sparse = sparse$1; + exports.sparseToDense = sparseToDense$2; + exports.spectral = spectral$1; + exports.split = split$3; + exports.sqrt = sqrt$2; + exports.square = square$2; + exports.squaredDifference = squaredDifference$2; + exports.squeeze = squeeze; + exports.stack = stack; + exports.step = step$2; + exports.stridedSlice = stridedSlice$2; + exports.string = string$1; + exports.sub = sub$2; + exports.sum = sum$3; + exports.sumOutType = sumOutType; + exports.tan = tan$2; + exports.tanh = tanh$2; + exports.tensor = tensor; + exports.tensor1d = tensor1d; + exports.tensor2d = tensor2d; + exports.tensor3d = tensor3d; + exports.tensor4d = tensor4d; + exports.tensor5d = tensor5d; + exports.tensor6d = tensor6d; + exports.tensorScatterUpdate = tensorScatterUpdate$2; + exports.tensor_util = tensor_util; + exports.test_util = test_util; + exports.tidy = tidy; + exports.tile = tile$3; + exports.time = time; + exports.topk = topk; + exports.train = train; + exports.transpose = transpose$2; + exports.truncatedNormal = truncatedNormal$1; + exports.unique = unique$3; + exports.unregisterGradient = unregisterGradient; + exports.unregisterKernel = unregisterKernel; + exports.unsortedSegmentSum = unsortedSegmentSum$2; + exports.unstack = unstack; + exports.upcastType = upcastType; + exports.upperBound = upperBound$1; + exports.util = util; + exports.valueAndGrad = valueAndGrad; + exports.valueAndGrads = valueAndGrads; + exports.variable = variable$1; + exports.variableGrads = variableGrads; + exports.version = version; + exports.version_converter = version$5; + exports.version_core = version$7; + exports.version_cpu = version$3; + exports.version_layers = version$6; + exports.version_webgl = version$2; + exports.webgl = webgl; + exports.webgl_util = webgl_util; + exports.where = where; + exports.whereAsync = whereAsync; + exports.zeros = zeros$2; + exports.zerosLike = zerosLike$3; + +})); +//# sourceMappingURL=tf.es2017.js.map diff --git a/graalwasm/graalwasm-tensorflow/src/main/resources/tfjs-backend-wasm-simd.wasm b/graalwasm/graalwasm-tensorflow/src/main/resources/tfjs-backend-wasm-simd.wasm new file mode 100755 index 00000000..b515dee9 Binary files /dev/null and b/graalwasm/graalwasm-tensorflow/src/main/resources/tfjs-backend-wasm-simd.wasm differ diff --git a/graalwasm/graalwasm-tensorflow/src/test/java/com/example/Tensorflow/TensorflowApplicationTests.java b/graalwasm/graalwasm-tensorflow/src/test/java/com/example/Tensorflow/TensorflowApplicationTests.java new file mode 100644 index 00000000..2724ac4f --- /dev/null +++ b/graalwasm/graalwasm-tensorflow/src/test/java/com/example/Tensorflow/TensorflowApplicationTests.java @@ -0,0 +1,13 @@ +package com.example.Tensorflow; + +import org.junit.jupiter.api.Test; +import org.springframework.boot.test.context.SpringBootTest; + +@SpringBootTest +class TensorflowApplicationTests { + + @Test + void contextLoads() { + } + +}