Skip to content

Instantly share code, notes, and snippets.

@kevmal
Created August 23, 2019 23:01
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save kevmal/ad379243b50fd7ec13de9bf191453b89 to your computer and use it in GitHub Desktop.
Save kevmal/ad379243b50fd7ec13de9bf191453b89 to your computer and use it in GitHub Desktop.
quick TensorFlowSharp wrapper for F#
This file has been truncated, but you can view the full file.
#load @"E:\profile\fsi\.\packagegrp\CE8A46E8AAAB2DAD2A75F9585A0E8F0563C98AEA859A445DC4BF25F4B0C33943\.paket\load\main.group.fsx"
open System
open System.IO
open TensorFlow
Environment.SetEnvironmentVariable("Path",
Environment.GetEnvironmentVariable("Path") + ";" + @"E:\profile\fsi\packagegrp\CE8A46E8AAAB2DAD2A75F9585A0E8F0563C98AEA859A445DC4BF25F4B0C33943\packages\TensorFlowSharp\runtimes\win7-x64\native")
type TF(g : TFGraph, y : TFOutput) =
member x.TFGraph = g
member x.TFOutput = y
static member Abs(x : TF) = TF(x.TFGraph, x.TFGraph.Abs(x.TFOutput))
static member Acos(x : TF) = TF(x.TFGraph, x.TFGraph.Acos(x.TFOutput))
static member Asin(x : TF) = TF(x.TFGraph, x.TFGraph.Asin(x.TFOutput))
static member Atan(x : TF) = TF(x.TFGraph, x.TFGraph.Atan(x.TFOutput))
static member Atan2(x : TF, y : TF) = TF(x.TFGraph, x.TFGraph.Atan2(x.TFOutput, y.TFOutput))
static member Ceiling(x : TF) = TF(x.TFGraph, x.TFGraph.Ceil(x.TFOutput))
static member Exp(x : TF) = TF(x.TFGraph, x.TFGraph.Exp(x.TFOutput))
static member Floor(x : TF) = TF(x.TFGraph, x.TFGraph.Floor(x.TFOutput))
//static member Truncate(x : TF) = TF(x.TFGraph, x.TFGraph.Truncate(x.TFOutput))
static member Round(x : TF) = TF(x.TFGraph, x.TFGraph.Round(x.TFOutput))
static member Log(x : TF) = TF(x.TFGraph, x.TFGraph.Log(x.TFOutput))
//static member Log10(x : TF) = TF(x.TFGraph, x.TFGraph.Log10(x.TFOutput))
static member Sqrt(x : TF) = TF(x.TFGraph, x.TFGraph.Sqrt(x.TFOutput))
static member Cos(x : TF) = TF(x.TFGraph, x.TFGraph.Cos(x.TFOutput))
static member Cosh(x : TF) = TF(x.TFGraph, x.TFGraph.Cosh(x.TFOutput))
static member Sin(x : TF) = TF(x.TFGraph, x.TFGraph.Sin(x.TFOutput))
static member Sinh(x : TF) = TF(x.TFGraph, x.TFGraph.Sinh(x.TFOutput))
static member Tan(x : TF) = TF(x.TFGraph, x.TFGraph.Tan(x.TFOutput))
static member Tanh(x : TF) = TF(x.TFGraph, x.TFGraph.Tanh(x.TFOutput))
static member Pow(x : TF, y : TF) = TF(x.TFGraph, x.TFGraph.Pow(x.TFOutput, y.TFOutput))
static member (~-)(a : TF) = TF(a.TFGraph, a.TFGraph.Neg(a.TFOutput))
static member (&&&)(a : TF, b : TF) = TF(a.TFGraph, a.TFGraph.BitwiseAnd(a.TFOutput, b.TFOutput))
static member (|||)(a : TF, b : TF) = TF(a.TFGraph, a.TFGraph.BitwiseOr(a.TFOutput, b.TFOutput))
static member (^^^)(a : TF, b : TF) = TF(a.TFGraph, a.TFGraph.BitwiseXor(a.TFOutput, b.TFOutput))
static member (+)(a : TF, b : TF) = TF(a.TFGraph, a.TFGraph.Add(a.TFOutput, b.TFOutput))
static member ( * )(a : TF, b : TF) = TF(a.TFGraph, a.TFGraph.MatMul(a.TFOutput, b.TFOutput))
static member (.*)(a : TF, b : TF) = TF(a.TFGraph, a.TFGraph.Mul(a.TFOutput, b.TFOutput))
static member (/)(a : TF, b : TF) = TF(a.TFGraph, a.TFGraph.Div(a.TFOutput, b.TFOutput))
static member (-)(a : TF, b : TF) = TF(a.TFGraph, a.TFGraph.Sub(a.TFOutput, b.TFOutput))
static member (%)(a : TF, b : TF) = TF(a.TFGraph, a.TFGraph.Mod(a.TFOutput, b.TFOutput))
static member (.=)(a : TF, b : TF) = TF(a.TFGraph, a.TFGraph.Equal(a.TFOutput, b.TFOutput))
static member (.>=)(a : TF, b : TF) = TF(a.TFGraph, a.TFGraph.GreaterEqual(a.TFOutput, b.TFOutput))
static member (.<=)(a : TF, b : TF) = TF(a.TFGraph, a.TFGraph.LessEqual(a.TFOutput, b.TFOutput))
static member (.>)(a : TF, b : TF) = TF(a.TFGraph, a.TFGraph.Greater(a.TFOutput, b.TFOutput))
static member (.<)(a : TF, b : TF) = TF(a.TFGraph, a.TFGraph.Less(a.TFOutput, b.TFOutput))
[<AutoOpen>]
module Ops =
let log1p (x : TF) = TF(x.TFGraph, x.TFGraph.Log1p(x.TFOutput))
type TFG(graph : TFGraph) =
member x.TFGraph = graph
member this.Where(condition : TF, x : TF option, y : TF option, ?name : String) =
let name = defaultArg name null
TF(this.TFGraph, this.TFGraph.Where(condition.TFOutput, x |> Option.map (fun x -> x.TFOutput) |> Option.toNullable, y |> Option.map (fun x -> x.TFOutput) |> Option.toNullable, name))
/// <summary>
/// Adds new dependencies for new tensors and operations created while the context is active.
/// </summary>
member this.WithDependencies(dependencies : TFOperation[]) =
this.TFGraph.WithDependencies(dependencies)
/// <summary>
/// Imports a graph serialized into the graph
/// </summary><param name="graphDef">Serialized graph definition (in protocol buffer format).</param><param name="options">Import options.</param><param name="returnOutputs">Array large enough to contain all the return options.</param><param name="status">Status buffer, if specified a status code will be left here, if not specified, a <see cref="T:TensorFlow.TFException" /> exception is raised if there is an error.</param><remarks>
/// If you are tryig to load a file stored using the SavedModel file format, you should use the <see cref="T:TensorFlow.TFSession.FromSavedModel" /> API instead.
/// </remarks>
member this.ImportGraphDef(graphDef : TFBuffer, options : TFImportGraphDefOptions, returnOutputs : TF[], ?status : TFStatus) =
let status = defaultArg status null
this.TFGraph.ImportGraphDef(graphDef, options, returnOutputs |> Array.map (fun x -> x.TFOutput), status)
/// <summary>
/// Constructs a while loop with the specified inputs and a callback that composes the while loop
/// </summary><param name="inputs">Inputs.</param><param name="constructor">Callback method that fills out the various while loop parameters.</param><param name="status">Status buffer, if specified a status code will be left here, if not specified, a <see cref="T:TensorFlow.TFException" /> exception is raised if there is an error.</param><returns>
/// An array of TFOutputs from creating the While loop, or null if there is an error creating the
/// while loop, or if the constructor raised an exception when it was invoked.
/// </returns>
member this.While(inputs : TF[], constructor : TFGraph.WhileConstructor, ?status : TFStatus) =
let status = defaultArg status null
this.TFGraph.While(inputs |> Array.map (fun x -> x.TFOutput), constructor, status) |> Array.map (fun i -> TF(this.TFGraph, i))
/// <summary>
/// Adds a gradient: the operations needed to compute the partial derivatives of sum of <paramref name="y" />` wrt to <paramref name="x" />.
/// </summary><returns>The partial derivatives, the size of the array is the same as the length of the <paramref name="y" /> array.</returns><param name="y">The y elements.</param><param name="x">The x elements.</param><param name="dx">Initial gradients, which represent the symbolic partial derivatives of some loss function `L` w.r.t. <paramref name="y" /> ).
/// If the parameter is null, the implementation will use dx for 'OnesLike' for all shapes in <paramref name="y" /></param><param name="status">Status buffer, if specified a status code will be left here, if not specified, a <see cref="T:TensorFlow.TFException" /> exception is raised if there is an error.</param><remarks>
/// d(y[0] + y[1]+ ...)/dx[0], d(y[0] + y[1] + ...)/dx[1]z...
/// </remarks>
member this.AddGradients(y : TF[], x : TF[], ?dx : TF[], ?status : TFStatus) =
let dx = defaultArg dx null
let status = defaultArg status null
this.TFGraph.AddGradients(y |> Array.map (fun x -> x.TFOutput), x |> Array.map (fun x -> x.TFOutput), dx |> Array.map (fun x -> x.TFOutput), status) |> Array.map (fun i -> TF(this.TFGraph, i))
/// <summary>
/// Adds a gradient: the operations needed to compute the partial derivatives of sum of <paramref name="y" />` wrt to <paramref name="x" />.
/// </summary><returns>The partial derivatives, the size of the array is the same as the length of the <paramref name="y" /> array.</returns><param name="y">The y elements.</param><param name="x">The x elements.</param><param name="dx">Initial gradients, which represent the symbolic partial derivatives of some loss function `L` w.r.t. <paramref name="y" /> ).
/// If the parameter is null, the implementation will use dx for 'OnesLike' for all shapes in <paramref name="y" /></param><param name="status">Status buffer, if specified a status code will be left here, if not specified, a <see cref="T:TensorFlow.TFException" /> exception is raised if there is an error.</param><remarks>
/// d(y[0] + y[1]+ ...)/dx[0], d(y[0] + y[1] + ...)/dx[1]z...
/// </remarks>
member this.AddGradients(prefix : String, y : TF[], x : TF[], ?dx : TF[], ?status : TFStatus) =
let dx = defaultArg dx null
let status = defaultArg status null
this.TFGraph.AddGradients(prefix, y |> Array.map (fun x -> x.TFOutput), x |> Array.map (fun x -> x.TFOutput), dx |> Array.map (fun x -> x.TFOutput), status) |> Array.map (fun i -> TF(this.TFGraph, i))
/// <summary>
/// Creates a TFFunction from a TFGraph
/// </summary><returns>The function.</returns><param name="functionName">Name of the new function. Should match the operation name (OpDef.name) regexp [A-Z][A-Za-z0-9_.\\-/]*. If appendHashToFunctioName is false, the name must be unique (at least those registered in graphs where this function will be used).</param><param name="description">Optional, human readable description of this function.</param><param name="operations">Array of operations to become the body of the function or null.
/// If no array is given , all the
/// operations in function body will become part of the function
/// except operations referenced in inputs. These operations
/// must have a single output (these operations are typically
/// placeholders created for the sole purpose of representing
/// an input).
///
/// If an array is given, all operations
/// in it will become part of the function. In particular, no
/// automatic skipping of dummy input operations is performed.
/// </param><param name="inputs">Array that specify the inputs to the function, or null. The names used for function inputs are normalized
/// names of the operations (usually placeholders) pointed to by
/// inputs. These operation names should start with a letter.
/// Normalization will convert all letters to lowercase and
/// non-alphanumeric characters to '_' to make resulting names match
/// the "[a-z][a-z0-9_]*" pattern for operation argument names.
/// `inputs` cannot contain the same tensor twice.</param><param name="outputs">rray that specify the inputs to the function, or null. This can contain the same tensor twice.</param><param name="outputNames">The names of the function's outputs. The array either has the same elements of outputs, or be null. Names must match "[a-z][a-z0-9_]*" regexp, if null is passed, the names are generated automatically.</param><param name="appendHashToFunctionName">If set to <c>true</c> appends hash to functionName, otherwise it will use the specified name in functionName.</param><param name="status">Status buffer, if specified a status code will be left here, if not specified, a <see cref="T:TensorFlow.TFException" /> exception is raised if there is an error.</param><remarks><para>
/// This method converts the graph whose operations (or a subset of its operations) will be converted
/// into a TFFunction.
/// </para><para>
/// Note that when the same TF_Output is listed as both an input and an output,
/// the corresponding function's output will equal to this input,
/// instead of the original node's output.
/// </para><para>
/// Callers must also satisfy the following constraints:
/// </para><para><paramref name="inputs" /> cannot refer to TFOutputs within a control flow context. For
/// example, one cannot use the output of "switch" node as input.
/// </para><para><paramref name="inputs" /> and <paramref name="outputs" /> cannot have reference types. Reference types are
/// not exposed through C API and are being replaced with Resources. We support
/// reference types inside function's body to support legacy code. Do not
/// use them in new code.
/// </para><para>
/// Every node in the function's body must have all of its inputs (including
/// control inputs). In other words, for every node in the body, each input
/// must be either listed in <paramref name="inputs" /> or must come from another node in
/// the body. In particular, it is an error to have a control edge going from
/// a node outside of the body into a node in the body. This applies to control
/// edges going from nodes referenced in <paramref name="inputs" /> to nodes in the body when
/// the former nodes are not in the body (automatically skipped or not
/// included in explicitly specified body).
/// </para></remarks>
member this.ToFunction(functionName : String, description : String, operations : TFOperation[], inputs : TF[], outputs : TF[], outputNames : String[], ?appendHashToFunctionName : Boolean, ?status : TFStatus) =
let appendHashToFunctionName = defaultArg appendHashToFunctionName false
let status = defaultArg status null
this.TFGraph.ToFunction(functionName, description, operations, inputs |> Array.map (fun x -> x.TFOutput), outputs |> Array.map (fun x -> x.TFOutput), outputNames, appendHashToFunctionName, status)
/// <summary>
/// Returns the serialized VersionDef proto for this graph.
/// </summary><returns>The versions.</returns><param name="outputVersionDef">The buffer where the serialized protocol buffer will be stored.</param><param name="status">Status buffer, if specified a status code will be left here, if not specified, a <see cref="T:TensorFlow.TFException" /> exception is raised if there is an error.</param>
member this.Versions(outputVersionDef : TFBuffer, ?status : TFStatus) =
let status = defaultArg status null
this.TFGraph.Versions(outputVersionDef, status)
member this.get_NumFunctions() =
this.TFGraph.get_NumFunctions()
member this.get_Functions() =
this.TFGraph.get_Functions()
member this.get_DeviceName() =
this.TFGraph.get_DeviceName()
/// <summary>
/// Attempts to evaluate the <paramref name="output" />. This is only possible if <paramref name="output" /> does not
/// depend on any graph inputs - the function is safe to call if this is not the case though.
/// </summary><returns><c>true</c>, if the evaluation is successful, in which case the result is returned in <paramref name="tensor" />, <c>false</c> otherwise.</returns><param name="output">Output.</param><param name="tensor">Tensor.</param>
member this.TryEvaluateConstant(output : TF, tensor : TFTensor byref) =
this.TFGraph.TryEvaluateConstant(output.TFOutput, &tensor)
member this.ToString() =
this.TFGraph.ToString()
/// <summary>
/// Gradient of Unbatch.
/// </summary><param name="original_input"></param><param name="batch_index"></param><param name="grad"></param><param name="id"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'UnbatchGrad'.
/// </param><param name="container">
/// Optional argument
/// </param><param name="shared_name">
/// Optional argument
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Acts like Batch but using the given batch_index index of batching things as they
/// become available. This ensures that the gradients are propagated back in the
/// same session which did the forward pass.
///
/// original_input: The input to the Unbatch operation this is the gradient of.
/// batch_index: The batch_index given to the Unbatch operation this is the gradient
/// of.
/// grad: The downstream gradient.
/// id: The id scalar emitted by Batch.
/// batched_grad: The return value, either an empty tensor or the batched gradient.
/// container: Container to control resource sharing.
/// shared_name: Instances of UnbatchGrad with the same container and shared_name
/// are assumed to possibly belong to the same batch. If left empty, the op name
/// will be used as the shared name.
/// </remarks>
member this.UnbatchGrad(original_input : TF, batch_index : TF, grad : TF, id : TF, ?container : String, ?shared_name : String, ?operName : String) =
let container = defaultArg container null
let shared_name = defaultArg shared_name null
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.UnbatchGrad(original_input.TFOutput, batch_index.TFOutput, grad.TFOutput, id.TFOutput, container, shared_name, operName))
/// <summary>
/// Determine the script codes of a given tensor of Unicode integer code points.
/// </summary><param name="input">
/// A Tensor of int32 Unicode code points.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'UnicodeScript'.
/// </param><returns>
/// A Tensor of int32 script codes corresponding to each input code point.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This operation converts Unicode code points to script codes corresponding to
/// each code point. Script codes correspond to International Components for
/// Unicode (ICU) UScriptCode values. See http://icu-project.org/apiref/icu4c/uscript_8h.html.
/// Returns -1 (USCRIPT_INVALID_CODE) for invalid codepoints. Output shape will
/// match input shape.
/// </remarks>
member this.UnicodeScript(input : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.UnicodeScript(input.TFOutput, operName))
/// <summary>
/// Generates labels for candidate sampling with a uniform distribution.
/// </summary><param name="true_classes">
/// A batch_size * num_true matrix, in which each row contains the
/// IDs of the num_true target_classes in the corresponding original label.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'UniformCandidateSampler'.
/// </param><param name="seed">
/// Optional argument
/// If either seed or seed2 are set to be non-zero, the random number
/// generator is seeded by the given seed. Otherwise, it is seeded by a
/// random seed.
/// </param><param name="seed2">
/// Optional argument
/// An second seed to avoid seed collision.
/// </param><param name="num_true">
/// Number of true labels per context.
/// </param><param name="num_sampled">
/// Number of candidates to randomly sample.
/// </param><param name="unique">
/// If unique is true, we sample with rejection, so that all sampled
/// candidates in a batch are unique. This requires some approximation to
/// estimate the post-rejection sampling probabilities.
/// </param><param name="range_max">
/// The sampler will sample integers from the interval [0, range_max).
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// sampled_candidates: A vector of length num_sampled, in which each element is
/// the ID of a sampled candidate.
/// true_expected_count: A batch_size * num_true matrix, representing
/// the number of times each candidate is expected to occur in a batch
/// of sampled candidates. If unique=true, then this is a probability.
/// sampled_expected_count: A vector of length num_sampled, for each sampled
/// candidate representing the number of times the candidate is expected
/// to occur in a batch of sampled candidates. If unique=true, then this is a
/// probability.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// See explanations of candidate sampling and the data formats at
/// go/candidate-sampling.
///
/// For each batch, this op picks a single set of sampled candidate labels.
///
/// The advantages of sampling candidates per-batch are simplicity and the
/// possibility of efficient dense matrix multiplication. The disadvantage is that
/// the sampled candidates must be chosen independently of the context and of the
/// true labels.
/// </remarks>
member this.UniformCandidateSampler(true_classes : TF, num_true : Int64, num_sampled : Int64, unique : Boolean, range_max : Int64, ?seed : Int64, ?seed2 : Int64, ?operName : String) =
let seed = defaultArg (seed |> Option.map Nullable) (Nullable())
let seed2 = defaultArg (seed2 |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.UniformCandidateSampler(true_classes.TFOutput, num_true, num_sampled, unique, range_max, seed, seed2, operName)
/// <summary>
/// Finds unique elements in a 1-D tensor.
/// </summary><param name="x">
/// 1-D.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Unique'.
/// </param><param name="out_idx">
/// Optional argument
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// y: 1-D.
/// idx: 1-D.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// This operation returns a tensor <c>y</c> containing all of the unique elements of <c>x</c>
/// sorted in the same order that they occur in <c>x</c>. This operation also returns a
/// tensor <c>idx</c> the same size as <c>x</c> that contains the index of each value of <c>x</c>
/// in the unique output <c>y</c>. In other words:
///
/// <c>y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]</c>
///
/// For example:
///
/// <code>
/// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
/// y, idx = unique(x)
/// y ==&amp;gt; [1, 2, 4, 7, 8]
/// idx ==&amp;gt; [0, 0, 1, 2, 2, 2, 3, 4, 4]
/// </code></remarks>
member this.Unique(x : TF, ?out_idx : TFDataType, ?operName : String) =
let out_idx = defaultArg (out_idx |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.Unique(x.TFOutput, out_idx, operName)
member this.UniqueV2(x : TF, axis : TF, ?out_idx : TFDataType, ?operName : String) =
let out_idx = defaultArg (out_idx |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.UniqueV2(x.TFOutput, axis.TFOutput, out_idx, operName)
/// <summary>
/// Finds unique elements in a 1-D tensor.
/// </summary><param name="x">
/// 1-D.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'UniqueWithCounts'.
/// </param><param name="out_idx">
/// Optional argument
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// y: 1-D.
/// idx: 1-D.
/// count: 1-D.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// This operation returns a tensor <c>y</c> containing all of the unique elements of <c>x</c>
/// sorted in the same order that they occur in <c>x</c>. This operation also returns a
/// tensor <c>idx</c> the same size as <c>x</c> that contains the index of each value of <c>x</c>
/// in the unique output <c>y</c>. Finally, it returns a third tensor <c>count</c> that
/// contains the count of each element of <c>y</c> in <c>x</c>. In other words:
///
/// <c>y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]</c>
///
/// For example:
///
/// <code>
/// # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8]
/// y, idx, count = unique_with_counts(x)
/// y ==&amp;gt; [1, 2, 4, 7, 8]
/// idx ==&amp;gt; [0, 0, 1, 2, 2, 2, 3, 4, 4]
/// count ==&amp;gt; [2, 1, 3, 1, 2]
/// </code></remarks>
member this.UniqueWithCounts(x : TF, ?out_idx : TFDataType, ?operName : String) =
let out_idx = defaultArg (out_idx |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.UniqueWithCounts(x.TFOutput, out_idx, operName)
member this.UniqueWithCountsV2(x : TF, axis : TF, ?out_idx : TFDataType, ?operName : String) =
let out_idx = defaultArg (out_idx |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.UniqueWithCountsV2(x.TFOutput, axis.TFOutput, out_idx, operName)
/// <summary>
/// Unpacks a given dimension of a rank-<c>R</c> tensor into <c>num</c> rank-<c>(R-1)</c> tensors.
/// </summary><param name="value">
/// 1-D or higher, with <c>axis</c> dimension size equal to <c>num</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Unpack'.
/// </param><param name="axis">
/// Optional argument
/// Dimension along which to unpack. Negative values wrap around, so the
/// valid range is <c>[-R, R)</c>.
/// </param><param name="num"></param><returns>
/// The list of tensors unpacked from <c>value</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Unpacks <c>num</c> tensors from <c>value</c> by chipping it along the <c>axis</c> dimension.
/// For example, given a tensor of shape <c>(A, B, C, D)</c>;
///
/// If <c>axis == 0</c> then the i'th tensor in <c>output</c> is the slice <c>value[i, :, :, :]</c>
/// and each tensor in <c>output</c> will have shape <c>(B, C, D)</c>. (Note that the
/// dimension unpacked along is gone, unlike <c>split</c>).
///
/// If <c>axis == 1</c> then the i'th tensor in <c>output</c> is the slice <c>value[:, i, :, :]</c>
/// and each tensor in <c>output</c> will have shape <c>(A, C, D)</c>.
/// Etc.
///
/// This is the opposite of <c>pack</c>.
/// </remarks>
member this.Unpack(value : TF, num : Int64, ?axis : Int64, ?operName : String) =
let axis = defaultArg (axis |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.Unpack(value.TFOutput, num, axis, operName) |> Array.map (fun i -> TF(this.TFGraph, i))
/// <summary>
/// Converts a flat index or array of flat indices into a tuple of
/// </summary><param name="indices">
/// An 0-D or 1-D <c>int</c> Tensor whose elements are indices into the
/// flattened version of an array of dimensions dims.
/// </param><param name="dims">
/// An 1-D <c>int</c> Tensor. The shape of the array to use for unraveling
/// indices.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'UnravelIndex'.
/// </param><returns>
/// An 2-D (or 1-D if indices is 0-D) tensor where each row has the
/// same shape as the indices array.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// coordinate arrays.
///
/// @compatibility(numpy)
/// Equivalent to np.unravel_index
/// @end_compatibility
/// </remarks>
member this.UnravelIndex(indices : TF, dims : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.UnravelIndex(indices.TFOutput, dims.TFOutput, operName))
/// <summary>
/// Computes the maximum along segments of a tensor.
/// </summary><param name="data"></param><param name="segment_ids">
/// A tensor whose shape is a prefix of <c>data.shape</c>.END
/// }
/// out_arg {
/// name: "output"
/// description: &amp;lt;&amp;lt;END
/// Has same shape as data, except for the first <c>segment_ids.rank</c>
/// dimensions, which are replaced with a single dimension which has size
/// <c>num_segments</c>.
/// </param><param name="num_segments"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'UnsortedSegmentMax'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Read
/// [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation)
/// for an explanation of segments.
///
/// This operator is similar to the unsorted segment sum operator found
/// [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
/// Instead of computing the sum over segments, it computes the maximum such that:
///
/// \\(output_i = \max_{j...} data[j...]\\) where max is over tuples <c>j...</c> such
/// that <c>segment_ids[j...] == i</c>.
///
/// If the maximum is empty for a given segment ID <c>i</c>, it outputs the smallest
/// possible value for the specific numeric type,
/// <c>output[i] = numeric_limits&amp;lt;T&amp;gt;::lowest()</c>.
///
/// If the given segment ID <c>i</c> is negative, then the corresponding value is
/// dropped, and will not be included in the result.
///
/// &amp;lt;div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"&amp;gt;
/// &amp;lt;img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentMax.png" alt&amp;gt;
/// &amp;lt;/div&amp;gt;
/// </remarks>
member this.UnsortedSegmentMax(data : TF, segment_ids : TF, num_segments : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.UnsortedSegmentMax(data.TFOutput, segment_ids.TFOutput, num_segments.TFOutput, operName))
/// <summary>
/// Computes the minimum along segments of a tensor.
/// </summary><param name="data"></param><param name="segment_ids">
/// A tensor whose shape is a prefix of <c>data.shape</c>.
/// </param><param name="num_segments"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'UnsortedSegmentMin'.
/// </param><returns>
/// Has same shape as data, except for the first <c>segment_ids.rank</c>
/// dimensions, which are replaced with a single dimension which has size
/// <c>num_segments</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Read
/// [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#segmentation)
/// for an explanation of segments.
///
/// This operator is similar to the unsorted segment sum operator found
/// [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
/// Instead of computing the sum over segments, it computes the minimum such that:
///
/// \\(output_i = \min_{j...} data_[j...]\\) where min is over tuples <c>j...</c> such
/// that <c>segment_ids[j...] == i</c>.
///
/// If the minimum is empty for a given segment ID <c>i</c>, it outputs the largest
/// possible value for the specific numeric type,
/// <c>output[i] = numeric_limits&amp;lt;T&amp;gt;::max()</c>.
///
/// If the given segment ID <c>i</c> is negative, then the corresponding value is
/// dropped, and will not be included in the result.
/// </remarks>
member this.UnsortedSegmentMin(data : TF, segment_ids : TF, num_segments : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.UnsortedSegmentMin(data.TFOutput, segment_ids.TFOutput, num_segments.TFOutput, operName))
/// <summary>
/// Computes the product along segments of a tensor.
/// </summary><param name="data"></param><param name="segment_ids">
/// A tensor whose shape is a prefix of <c>data.shape</c>.
/// </param><param name="num_segments"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'UnsortedSegmentProd'.
/// </param><returns>
/// Has same shape as data, except for the first <c>segment_ids.rank</c>
/// dimensions, which are replaced with a single dimension which has size
/// <c>num_segments</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Read
/// [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#segmentation)
/// for an explanation of segments.
///
/// This operator is similar to the unsorted segment sum operator found
/// [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
/// Instead of computing the sum over segments, it computes the product of all
/// entries belonging to a segment such that:
///
/// \\(output_i = \prod_{j...} data[j...]\\) where the product is over tuples
/// <c>j...</c> such that <c>segment_ids[j...] == i</c>.
///
/// If there is no entry for a given segment ID <c>i</c>, it outputs 1.
///
/// If the given segment ID <c>i</c> is negative, then the corresponding value is
/// dropped, and will not be included in the result.
/// </remarks>
member this.UnsortedSegmentProd(data : TF, segment_ids : TF, num_segments : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.UnsortedSegmentProd(data.TFOutput, segment_ids.TFOutput, num_segments.TFOutput, operName))
/// <summary>
/// Computes the sum along segments of a tensor.
/// </summary><param name="data"></param><param name="segment_ids">
/// A tensor whose shape is a prefix of <c>data.shape</c>.
/// </param><param name="num_segments"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'UnsortedSegmentSum'.
/// </param><returns>
/// Has same shape as data, except for the first <c>segment_ids.rank</c>
/// dimensions, which are replaced with a single dimension which has size
/// <c>num_segments</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Read
/// [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation)
/// for an explanation of segments.
///
/// Computes a tensor such that
/// \\(output[i] = \sum_{j...} data[j...]\\) where the sum is over tuples <c>j...</c> such
/// that <c>segment_ids[j...] == i</c>. Unlike <c>SegmentSum</c>, <c>segment_ids</c>
/// need not be sorted and need not cover all values in the full
/// range of valid values.
///
/// If the sum is empty for a given segment ID <c>i</c>, <c>output[i] = 0</c>.
/// If the given segment ID <c>i</c> is negative, the value is dropped and will not be
/// added to the sum of the segment.
///
/// <c>num_segments</c> should equal the number of distinct segment IDs.
///
/// &amp;lt;div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"&amp;gt;
/// &amp;lt;img style="width:100%" src="https://www.tensorflow.org/images/UnsortedSegmentSum.png" alt&amp;gt;
/// &amp;lt;/div&amp;gt;
/// </remarks>
member this.UnsortedSegmentSum(data : TF, segment_ids : TF, num_segments : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.UnsortedSegmentSum(data.TFOutput, segment_ids.TFOutput, num_segments.TFOutput, operName))
/// <summary>
/// Op is similar to a lightweight Dequeue.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Unstage'.
/// </param><param name="capacity">
/// Optional argument
/// </param><param name="memory_limit">
/// Optional argument
/// </param><param name="container">
/// Optional argument
/// </param><param name="shared_name">
/// Optional argument
/// </param><param name="dtypes"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// The basic functionality is similar to dequeue with many fewer
/// capabilities and options. This Op is optimized for performance.
/// </remarks>
member this.Unstage(dtypes : TFDataType[], ?capacity : Int64, ?memory_limit : Int64, ?container : String, ?shared_name : String, ?operName : String) =
let capacity = defaultArg (capacity |> Option.map Nullable) (Nullable())
let memory_limit = defaultArg (memory_limit |> Option.map Nullable) (Nullable())
let container = defaultArg container null
let shared_name = defaultArg shared_name null
let operName = defaultArg operName null
this.TFGraph.Unstage(dtypes, capacity, memory_limit, container, shared_name, operName) |> Array.map (fun i -> TF(this.TFGraph, i))
/// <summary>
/// Applies upper_bound(sorted_search_values, values) along each row.
/// </summary><param name="sorted_inputs">
/// 2-D Tensor where each row is ordered.
/// </param><param name="values">
/// 2-D Tensor with the same numbers of rows as <c>sorted_search_values</c>. Contains
/// the values that will be searched for in <c>sorted_search_values</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'UpperBound'.
/// </param><param name="out_type">
/// Optional argument
/// </param><returns>
/// A <c>Tensor</c> with the same shape as <c>values</c>. It contains the last scalar index
/// into the last dimension where values can be inserted without changing the
/// ordered property.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Each set of rows with the same index in (sorted_inputs, values) is treated
/// independently. The resulting row is the equivalent of calling
/// <c>np.searchsorted(sorted_inputs, values, side='right')</c>.
///
/// The result is not a global index to the entire
/// <c>Tensor</c>, but rather just the index in the last dimension.
///
/// A 2-D example:
/// sorted_sequence = [[0, 3, 9, 9, 10],
/// [1, 2, 3, 4, 5]]
/// values = [[2, 4, 9],
/// [0, 2, 6]]
///
/// result = UpperBound(sorted_sequence, values)
///
/// result == [[1, 2, 4],
/// [0, 2, 5]]
/// </remarks>
member this.UpperBound(sorted_inputs : TF, values : TF, ?out_type : TFDataType, ?operName : String) =
let out_type = defaultArg (out_type |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.UpperBound(sorted_inputs.TFOutput, values.TFOutput, out_type, operName))
/// <summary>
/// Creates a handle to a Variable resource.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'VarHandleOp'.
/// </param><param name="container">
/// Optional argument
/// the container this variable is placed in.
/// </param><param name="shared_name">
/// Optional argument
/// the name by which this variable is referred to.
/// </param><param name="dtype">
/// the type of this variable. Must agree with the dtypes
/// of all ops using this variable.
/// </param><param name="shape">
/// The (possibly partially specified) shape of this variable.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.VarHandleOp(dtype : TFDataType, shape : TFShape, ?container : String, ?shared_name : String, ?operName : String) =
let container = defaultArg container null
let shared_name = defaultArg shared_name null
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.VarHandleOp(dtype, shape, container, shared_name, operName))
/// <summary>
/// Use VariableV2 instead.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Variable'.
/// </param><param name="container">
/// Optional argument
/// </param><param name="shared_name">
/// Optional argument
/// </param><param name="shape"></param><param name="dtype"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.Variable(shape : TFShape, dtype : TFDataType, ?container : String, ?shared_name : String, ?operName : String) =
let container = defaultArg container null
let shared_name = defaultArg shared_name null
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Variable(shape, dtype, container, shared_name, operName))
/// <summary>
/// Returns the shape of the variable pointed to by <c>resource</c>.
/// </summary><param name="input"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'VariableShape'.
/// </param><param name="out_type">
/// Optional argument
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This operation returns a 1-D integer tensor representing the shape of <c>input</c>.
///
/// For example:
///
/// <code>
/// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
/// shape(t) ==&amp;gt; [2, 2, 3]
/// </code></remarks>
member this.VariableShape(input : TF, ?out_type : TFDataType, ?operName : String) =
let out_type = defaultArg (out_type |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.VariableShape(input.TFOutput, out_type, operName))
/// <summary>
/// Holds state in the form of a tensor that persists across steps.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'VariableV2'.
/// </param><param name="container">
/// Optional argument
/// If non-empty, this variable is placed in the given container.
/// Otherwise, a default container is used.
/// </param><param name="shared_name">
/// Optional argument
/// If non-empty, this variable is named in the given bucket
/// with this shared_name. Otherwise, the node name is used instead.
/// </param><param name="shape">
/// The shape of the variable tensor.
/// </param><param name="dtype">
/// The type of elements in the variable tensor.
/// </param><returns>
/// A reference to the variable tensor.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Outputs a ref to the tensor state so it may be read or modified.
/// TODO(zhifengc/mrry): Adds a pointer to a more detail document
/// about sharing states in tensorflow.
/// </remarks>
member this.VariableV2(shape : TFShape, dtype : TFDataType, ?container : String, ?shared_name : String, ?operName : String) =
let container = defaultArg container null
let shared_name = defaultArg shared_name null
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.VariableV2(shape, dtype, container, shared_name, operName))
/// <summary>
/// Checks whether a resource handle-based variable has been initialized.
/// </summary><param name="resource">
/// the input resource handle.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'VarIsInitializedOp'.
/// </param><returns>
/// a scalar boolean which is true if the variable has been
/// initialized.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.VarIsInitializedOp(resource : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.VarIsInitializedOp(resource.TFOutput, operName))
/// <summary>
/// A Reader that outputs the entire contents of a file as a value.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'WholeFileReader'.
/// </param><param name="container">
/// Optional argument
/// If non-empty, this reader is placed in the given container.
/// Otherwise, a default container is used.
/// </param><param name="shared_name">
/// Optional argument
/// If non-empty, this reader is named in the given bucket
/// with this shared_name. Otherwise, the node name is used instead.
/// </param><returns>
/// The handle to reference the Reader.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// To use, enqueue filenames in a Queue. The output of ReaderRead will
/// be a filename (key) and the contents of that file (value).
/// </remarks>
member this.WholeFileReader(?container : String, ?shared_name : String, ?operName : String) =
let container = defaultArg container null
let shared_name = defaultArg shared_name null
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.WholeFileReader(container, shared_name, operName))
/// <summary>
/// A Reader that outputs the entire contents of a file as a value.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'WholeFileReaderV2'.
/// </param><param name="container">
/// Optional argument
/// If non-empty, this reader is placed in the given container.
/// Otherwise, a default container is used.
/// </param><param name="shared_name">
/// Optional argument
/// If non-empty, this reader is named in the given bucket
/// with this shared_name. Otherwise, the node name is used instead.
/// </param><returns>
/// The handle to reference the Reader.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// To use, enqueue filenames in a Queue. The output of ReaderRead will
/// be a filename (key) and the contents of that file (value).
/// </remarks>
member this.WholeFileReaderV2(?container : String, ?shared_name : String, ?operName : String) =
let container = defaultArg container null
let shared_name = defaultArg shared_name null
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.WholeFileReaderV2(container, shared_name, operName))
/// <summary>
/// A dataset that creates window datasets from the input dataset.
/// </summary><param name="input_dataset"></param><param name="size">
/// A scalar representing the number of elements to accumulate in a window.
/// </param><param name="shift">
/// A scalar representing the steps moving the sliding window forward in one
/// iteration. It must be positive.
/// </param><param name="stride">
/// A scalar representing the stride of the input elements of the sliding window.
/// It must be positive.
/// </param><param name="drop_remainder">
/// A scalar representing whether a window should be dropped in case its size is
/// smaller than desired.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'WindowDataset'.
/// </param><param name="output_types"></param><param name="output_shapes"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.WindowDataset(input_dataset : TF, size : TF, shift : TF, stride : TF, drop_remainder : TF, output_types : TFDataType[], output_shapes : TFShape[], ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.WindowDataset(input_dataset.TFOutput, size.TFOutput, shift.TFOutput, stride.TFOutput, drop_remainder.TFOutput, output_types, output_shapes, operName))
/// <summary>
/// Worker heartbeat op.
/// </summary><param name="request">
/// A string tensor containing a serialized WorkerHeartbeatRequest
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'WorkerHeartbeat'.
/// </param><returns>
/// A string tensor containing a serialized WorkerHeartbeatResponse
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Heartbeats may be sent periodically to indicate the coordinator is still active,
/// to retrieve the current worker status and to expedite shutdown when necessary.
/// </remarks>
member this.WorkerHeartbeat(request : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.WorkerHeartbeat(request.TFOutput, operName))
/// <summary>
/// Writes contents to the file at input filename. Creates file and recursively
/// </summary><param name="filename">
/// scalar. The name of the file to which we write the contents.
/// </param><param name="contents">
/// scalar. The content to be written to the output file.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'WriteFile'.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// creates directory if not existing.
/// </remarks>
member this.WriteFile(filename : TF, contents : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.WriteFile(filename.TFOutput, contents.TFOutput, operName)
/// <summary>
/// Returns 0 if x == 0, and x / y otherwise, elementwise.
/// </summary><param name="x"></param><param name="y"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Xdivy'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.Xdivy(x : TF, y : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Xdivy(x.TFOutput, y.TFOutput, operName))
/// <summary>
/// Returns 0 if x == 0, and x * log(y) otherwise, elementwise.
/// </summary><param name="x"></param><param name="y"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Xlogy'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.Xlogy(x : TF, y : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Xlogy(x.TFOutput, y.TFOutput, operName))
/// <summary>
/// Returns a tensor of zeros with the same shape and type as x.
/// </summary><param name="x">
/// a tensor of type T.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ZerosLike'.
/// </param><returns>
/// a tensor of the same shape and type as x but filled with zeros.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.ZerosLike(x : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ZerosLike(x.TFOutput, operName))
/// <summary>
/// Compute the Hurwitz zeta function \\(\zeta(x, q)\\).
/// </summary><param name="x"></param><param name="q"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Zeta'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// The Hurwitz zeta function is defined as:
///
///
/// \\(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\\)
/// </remarks>
member this.Zeta(x : TF, q : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Zeta(x.TFOutput, q.TFOutput, operName))
/// <summary>
/// Creates a dataset that zips together <c>input_datasets</c>.
/// </summary><param name="input_datasets"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ZipDataset'.
/// </param><param name="output_types"></param><param name="output_shapes"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.ZipDataset(input_datasets : TF[], output_types : TFDataType[], output_shapes : TFShape[], ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ZipDataset(input_datasets |> Array.map (fun x -> x.TFOutput), output_types, output_shapes, operName))
/// <summary>
/// Returns a constant tensor.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Const'.
/// </param><param name="value">
/// Attr <c>value</c> is the tensor to return.
/// </param><param name="dtype"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.Const(value : TFTensor, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Const(value, operName))
/// <summary>
/// Computes the sum of elements across dimensions of a tensor.
/// </summary><returns>The reduced tensor.</returns><param name="input">The tensor to reduce. Should have numeric type.</param><param name="axis">The dimensions to reduce. If not se (the default), reduces all dimensions.</param><param name="keep_dims">If set to <c>true</c> retains reduced dimensions with length 1.</param><param name="operName">A name for the operation, optional.</param><remarks>
/// Reduces input_tensor along the dimensions given in axis.
/// Unless keep_dims is true, the rank of the tensor is reduced by 1 for each
/// entry in axis. If keep_dims is true, the reduced dimensions
/// are retained with length 1.
///
/// If axis has no entries, all dimensions are reduced, and a
/// tensor with a single element is returned.
/// </remarks>
member this.ReduceSum(input : TF, ?axis : TF, ?keep_dims : Nullable<Boolean>, ?operName : String) =
let keep_dims = defaultArg keep_dims (Nullable(false))
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ReduceSum(input.TFOutput, axis |> Option.map (fun x -> x.TFOutput) |> Option.toNullable, keep_dims, operName))
/// <summary>
/// Computes the product of elements across dimensions of a tensor.
/// </summary><returns>The reduced tensor.</returns><param name="input">The tensor to reduce. Should have numeric type.</param><param name="axis">The dimensions to reduce. If not se (the default), reduces all dimensions.</param><param name="keep_dims">If set to <c>true</c> retains reduced dimensions with length 1.</param><param name="operName">A name for the operation, optional.</param><remarks>
/// Reduces input_tensor along the dimensions given in axis.
/// Unless keep_dims is true, the rank of the tensor is reduced by 1 for each
/// entry in axis. If keep_dims is true, the reduced dimensions
/// are retained with length 1.
///
/// If axis has no entries, all dimensions are reduced, and a
/// tensor with a single element is returned.
/// </remarks>
member this.ReduceProd(input : TF, ?axis : TF, ?keep_dims : Nullable<Boolean>, ?operName : String) =
let keep_dims = defaultArg keep_dims (Nullable(false))
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ReduceProd(input.TFOutput, axis |> Option.map (fun x -> x.TFOutput) |> Option.toNullable, keep_dims, operName))
/// <summary>
/// Computes the mean of elements across dimensions of a tensor.
/// </summary><returns>The reduced tensor.</returns><param name="input">The tensor to reduce. Should have numeric type.</param><param name="axis">The dimensions to reduce. If not set (the default), reduces all dimensions.</param><param name="keep_dims">If set to <c>true</c> retains reduced dimensions with length 1.</param><param name="operName">A name for the operation, optional.</param><remarks><para>
/// Reduces input_tensor along the dimensions given in axis.
/// Unless keep_dims is true, the rank of the tensor is reduced by 1 for each
/// entry in axis. If keep_dims is true, the reduced dimensions
/// are retained with length 1.</para><para>
/// If axis has no entries, all dimensions are reduced, and a
/// tensor with a single element is returned.</para></remarks>
member this.ReduceMean(input : TF, ?axis : TF, ?keep_dims : Nullable<Boolean>, ?operName : String) =
let keep_dims = defaultArg keep_dims (Nullable(false))
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ReduceMean(input.TFOutput, axis |> Option.map (fun x -> x.TFOutput) |> Option.toNullable, keep_dims, operName))
/// <summary>
/// Use VariableV2 instead.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Variable'.
/// </param><param name="container">
/// Optional argument
/// </param><param name="shared_name">
/// Optional argument
/// </param><param name="shape"></param><param name="dtype"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.Variable(initialValue : TF, init : TFOperation byref, value : TFOutput byref, ?trainable : Boolean, ?operName : String) =
let trainable = defaultArg trainable true
let operName = defaultArg operName null
this.TFGraph.Variable(initialValue.TFOutput, &init, &value, trainable, operName)
/// <summary>
/// Registers a specified variable as an initialization variable.
/// </summary><param name="variable">Variable to register.</param><remarks><para>
/// This is a convenience method to track the variables that need to be initialized in the graph,
/// you can retrieve the list of all those variables by calling the <see cref="M:TensorFlow.TFGraph.GetGlobalVariablesInitializer" />
/// which will return this list and clear the state at that point.
/// </para><para>
/// You typically use this method from helper methods to register all the variables that you want
/// initialized, and a higher level method will retrieve all these variables and initialize them
/// at their convenience.
/// </para></remarks>
member this.AddInitVariable(variable : TFOperation) =
this.TFGraph.AddInitVariable(variable)
/// <summary>
/// Gets the list of all registered global variables.
/// </summary><returns>The array of variables that should be initialized.</returns><remarks>
/// After this method is invoked the list of pending initialization variables
/// is cleared.
/// </remarks>
member this.GetGlobalVariablesInitializer() =
this.TFGraph.GetGlobalVariablesInitializer()
/// <summary>
/// Gets the list of all registered trainable variables.
/// </summary><returns>The array of variables that should be trained.</returns>
member this.GetTrainableVariables() =
this.TFGraph.GetTrainableVariables()
/// <summary>
/// Use VariableV2 instead.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Variable'.
/// </param><param name="container">
/// Optional argument
/// </param><param name="shared_name">
/// Optional argument
/// </param><param name="shape"></param><param name="dtype"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.Variable(initialValue : TF, value : TFOutput byref, ?trainable : Boolean, ?operName : String) =
let trainable = defaultArg trainable true
let operName = defaultArg operName null
this.TFGraph.Variable(initialValue.TFOutput, &value, trainable, operName)
/// <summary>
/// Use VariableV2 instead.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Variable'.
/// </param><param name="container">
/// Optional argument
/// </param><param name="shared_name">
/// Optional argument
/// </param><param name="shape"></param><param name="dtype"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.Variable(initialValue : TF, ?trainable : Boolean, ?operName : String) =
let trainable = defaultArg trainable true
let operName = defaultArg operName null
this.TFGraph.Variable(initialValue.TFOutput, trainable, operName)
member this.get_Seed() =
this.TFGraph.get_Seed()
member this.set_Seed(value : Nullable<Int32>) =
this.TFGraph.set_Seed(value)
/// <summary>
/// Returns the graph and local seeds based on an optionally set incoming seed value.
/// </summary><param name="operationSeed">The seed value that might be set.</param><param name="graphSeed">Returned graph seed.</param><param name="localSeed">Returned local seed.</param><remarks>
/// This helper function returns two seeds derived from graph-level and op-level seeds.
/// Many random operations internally use the two seeds to allow user to change
/// the seed globally for a graph, or for only specific operations.
/// </remarks>
member this.GetRandomSeeds(operationSeed : Nullable<Int32>, graphSeed : Int32 byref, localSeed : Int32 byref) =
this.TFGraph.GetRandomSeeds(operationSeed, &graphSeed, &localSeed)
/// <summary>
/// Computes dropout.
/// </summary><param name="x">A tensor.</param><param name="keep_prob">A scalar Tensor with the same type as x. The probability that each element is kept.</param><param name="noise_shape">A 1-D Tensor of type int32, representing the shape for randomly generated keep/drop flags.</param><param name="seed">Integer seed used for the random distribution, using the TensorFlow SetRandomSeed .</param><param name="operName">Operation name, optional.</param><remarks>
/// With probability keep_prob, outputs the input element scaled up by 1 / keep_prob,
/// otherwise outputs 0. The scaling is so that the expected sum is unchanged.
/// </remarks>
member this.Dropout(x : TF, keep_prob : TF, ?noise_shape : TFShape, ?seed : Int32, ?operName : String) =
let noise_shape = defaultArg noise_shape null
let seed = defaultArg (seed |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Dropout(x.TFOutput, keep_prob.TFOutput, noise_shape, seed, operName))
/// <summary>
/// Computes dropout.
/// </summary><param name="x">A tensor.</param><param name="keep_prob">A scalar Tensor with the same type as x. The probability that each element is kept.</param><param name="noise_shape">A 1-D Tensor of type int32, representing the shape for randomly generated keep/drop flags.</param><param name="seed">Integer seed used for the random distribution, using the TensorFlow SetRandomSeed .</param><param name="operName">Operation name, optional.</param><remarks>
/// With probability keep_prob, outputs the input element scaled up by 1 / keep_prob,
/// otherwise outputs 0. The scaling is so that the expected sum is unchanged.
/// </remarks>
member this.Dropout(x : TF, keep_prob : Double, ?noise_shape : TFShape, ?seed : Int32, ?operName : String) =
let noise_shape = defaultArg noise_shape null
let seed = defaultArg (seed |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Dropout(x.TFOutput, keep_prob, noise_shape, seed, operName))
/// <summary>
/// Clips tensor values to a maximum L2-norm.
/// </summary><remarks><para>
/// Given a tensor <paramref name="x" />, and a maximum clip value <paramref name="clip_norm" />, this operation normalizes
/// <paramref name="x" /> so that its L2-norm is less than or equal to <paramref name="clip_norm" />, along the dimensions
/// given in <paramref name="axes" />. Specifically, in the default case where all dimensions are used for calculation, if
/// the L2-norm of <paramref name="x" /> is already less than or equal to <paramref name="clip_norm" />, then <paramref name="x" />
/// is not modified. If the L2-norm is greater than <paramref name="clip_norm" />, then this operation returns a tensor of
/// the same type and shape as <paramref name="x" /> with its values set to: <c>t* clip_norm / l2norm(t)</c></para></remarks><param name="x">The tensor.</param><param name="clip_norm">The minimum value to clip by. A 0 - D(scalar) tensor, or a tensor with the same shape as <paramref name="x" />.</param><param name="axes">The minimum value to clip by. A 0 - D(scalar) tensor, or a tensor with the same shape as <paramref name="x" />.</param><param name="operName">Operation name, optional.</param><returns>A clipped <see cref="T:TensorFlow.TFOutput">tensor</see>.</returns>
member this.ClipByNorm(x : TF, clip_norm : TF, ?axes : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ClipByNorm(x.TFOutput, clip_norm.TFOutput, axes |> Option.map (fun x -> x.TFOutput) |> Option.toNullable, operName))
/// <summary>
/// Computes the global norm of multiple tensors.
/// </summary><remarks><para>
/// Given a tuple or list of tensors <paramref name="tensors" />, this operation returns the global norm of the elements in all tensors
/// in <paramref name="tensors" />. The global norm is computed as: <c>global_norm = sqrt(sum([l2norm(t)**2 for t in t_list]))</c>. Any
/// entries in <paramref name="tensors" /> that are of type None are ignored.</para></remarks><param name="tensors">The input tensors.</param><param name="operName">Operation name, optional.</param><returns>A clipped <see cref="T:TensorFlow.TFOutput">tensor</see>.</returns>
member this.GlobalNorm(tensors : TF[], ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.GlobalNorm(tensors |> Array.map (fun x -> x.TFOutput), operName))
/// <summary>
/// Clips tensor values to a maximum average L2-norm.
/// </summary><remarks>
/// Given a tensor <paramref name="x" />, and a maximum clip value <paramref name="clip_norm" />, this operation
/// normalizes <paramref name="x" /> so that its its average L2-norm is less than or equal to <paramref name="clip_norm" />.
/// Specifically, if the average L2-norm is already less than or equal to <paramref name="clip_norm" />, then <paramref name="x" />
/// is not modified. If the average L2-norm is greater than <paramref name="clip_norm" />, then this operation returns a tensor of the same
/// type and shape as <paramref name="x" /> with its values set to: <c>t* clip_norm / l2norm_avg(t)</c>. In this case,
/// the average L2-norm of the output tensor is <paramref name="clip_norm" />.
/// </remarks><param name="x">The input tensor.</param><param name="clip_norm">A maximum clipping value.</param><param name="operName">Name of the oper.</param>
member this.ClipByAverageNorm(x : TF, clip_norm : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ClipByAverageNorm(x.TFOutput, clip_norm.TFOutput, operName))
/// <summary>
/// Computes sigmoid cross entropy given `logits`.
/// </summary><remarks>
/// Measures the probability error in discrete classification tasks in which each
/// class is independent and not mutually exclusive.For instance, one could
/// perform multilabel classification where a picture can contain both an elephant
/// and a dog at the same time.
/// </remarks>
member this.SigmoidCrossEntropyWithLogits(labels : TF, logits : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SigmoidCrossEntropyWithLogits(labels.TFOutput, logits.TFOutput, operName))
/// <summary>
/// Shuffle dimensions of x according to a permutation.
/// </summary><param name="x"></param><param name="perm"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Transpose'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// The output <c>y</c> has the same rank as <c>x</c>. The shapes of <c>x</c> and <c>y</c> satisfy:
/// <c>y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]</c></remarks>
member this.Transpose(x : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Transpose(x.TFOutput, operName))
/// <summary>
/// A conditional accumulator for aggregating gradients.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ConditionalAccumulator'.
/// </param><param name="container">
/// Optional argument
/// If non-empty, this accumulator is placed in the given container.
/// Otherwise, a default container is used.
/// </param><param name="shared_name">
/// Optional argument
/// If non-empty, this accumulator will be shared under the
/// given name across multiple sessions.
/// </param><param name="reduction_type">
/// Optional argument
/// </param><param name="dtype">
/// The type of the value being accumulated.
/// </param><param name="shape">
/// The shape of the values, can be [], in which case shape is unknown.
/// </param><returns>
/// The handle to the accumulator.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// The accumulator accepts gradients marked with local_step greater or
/// equal to the most recent global_step known to the accumulator. The
/// average can be extracted from the accumulator, provided sufficient
/// gradients have been accumulated. Extracting the average automatically
/// resets the aggregate to 0, and increments the global_step recorded by
/// the accumulator.
/// </remarks>
member this.Cond(pred : TF, true_fn : unit -> TF, false_fn : unit -> TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Cond(pred.TFOutput, (fun () -> (true_fn()).TFOutput), (fun () -> (false_fn()).TFOutput), operName))
/// <summary>
/// Deprecated, use StackV2.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Stack'.
/// </param><param name="stack_name">
/// Optional argument
/// </param><param name="elem_type"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.Stack(values : TF[], ?axis : Nullable<Int32>, ?operName : String) =
let axis = defaultArg axis (Nullable(0))
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Stack(values |> Array.map (fun x -> x.TFOutput), axis, operName))
/// <summary>
/// Creates a sequence of numbers.
/// </summary><param name="start">
/// 0-D (scalar). First entry in the sequence.
/// </param><param name="limit">
/// 0-D (scalar). Upper limit of sequence, exclusive.
/// </param><param name="delta">
/// 0-D (scalar). Optional. Default is 1. Number that increments <c>start</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Range'.
/// </param><returns>
/// 1-D.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This operation creates a sequence of numbers that begins at <c>start</c> and
/// extends by increments of <c>delta</c> up to but not including <c>limit</c>.
///
/// For example:
///
/// <code>
/// # 'start' is 3
/// # 'limit' is 18
/// # 'delta' is 3
/// tf.range(start, limit, delta) ==&amp;gt; [3, 6, 9, 12, 15]
/// </code></remarks>
member this.Range(start : TF, ?limit : TF, ?delta : TF, ?dataType : TFDataType, ?operName : String) =
let dataType = defaultArg (dataType |> Option.map Nullable) (Nullable())
let operName = defaultArg operName "range"
TF(this.TFGraph, this.TFGraph.Range(start.TFOutput, limit |> Option.map (fun x -> x.TFOutput) |> Option.toNullable, delta |> Option.map (fun x -> x.TFOutput) |> Option.toNullable, dataType, operName))
/// <summary>
/// Returns a tensor of zeros with the same shape and type as x.
/// </summary><param name="x">
/// a tensor of type T.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ZerosLike'.
/// </param><returns>
/// a tensor of the same shape and type as x but filled with zeros.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.Zeros(shape : TFShape, ?dtype : TFDataType, ?operName : String) =
let dtype = defaultArg dtype TFDataType.Double
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Zeros(shape, dtype, operName))
/// <summary>
/// Returns a tensor of ones with the same shape and type as x.
/// </summary><param name="x">
/// a tensor of type T.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'OnesLike'.
/// </param><returns>
/// a tensor of the same shape and type as x but filled with ones.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.Ones(shape : TFShape, ?dtype : TFDataType, ?operName : String) =
let dtype = defaultArg dtype TFDataType.Double
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Ones(shape, dtype, operName))
/// <summary>
/// Create a constant tensor based on a shape
/// Used by Zeros and Ones
/// </summary><param name="value">Value for tensor</param><param name="tfshape">Shape of the tensor</param><param name="dtype">Optional Type of the Zero value. Default: Double</param><param name="operName">Operation name, optional.</param><returns></returns>
/// see https://github.com/tensorflow/tensorflow/blob/r1.1/tensorflow/python/framework/constant_op.py
///
member this.Constant(value : Object, tfshape : TFShape, ?dtype : TFDataType, ?operName : String) =
let dtype = defaultArg dtype TFDataType.Double
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Constant(value, tfshape, dtype, operName))
/// <summary>
/// Outputs random values from a normal distribution
/// </summary><returns>A tensor of the specified shape filled with random normal values.</returns><param name="shape">Shape of the output tensor.</param><param name="mean">The mean of the standard distribution.</param><param name="stddev">The standard deviation of the normal distribution.</param><param name="seed">Integer seed used for the random distribution, using the TensorFlow SetRandomSeed .</param><param name="operName">Operation name, optional.</param>
member this.RandomNormal(shape : TFShape, ?mean : Double, ?stddev : Double, ?seed : Int32, ?operName : String) =
let mean = defaultArg mean 0.000000
let stddev = defaultArg stddev 1.000000
let seed = defaultArg (seed |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.RandomNormal(shape, mean, stddev, seed, operName))
/// <summary>
/// Outputs random values from a uniform distribution.
/// </summary><param name="shape">
/// The shape of the output tensor.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RandomUniform'.
/// </param><param name="seed">
/// Optional argument
/// If either <c>seed</c> or <c>seed2</c> are set to be non-zero, the random number
/// generator is seeded by the given seed. Otherwise, it is seeded by a
/// random seed.
/// </param><param name="seed2">
/// Optional argument
/// A second seed to avoid seed collision.
/// </param><param name="dtype">
/// The type of the output.
/// </param><returns>
/// A tensor of the specified shape filled with uniform random values.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// The generated values follow a uniform distribution in the range <c>[0, 1)</c>. The
/// lower bound 0 is included in the range, while the upper bound 1 is excluded.
/// </remarks>
member this.RandomUniform(shape : TFShape, ?minval : Double, ?maxval : Double, ?seed : Int32, ?operName : String) =
let minval = defaultArg minval 0.000000
let maxval = defaultArg maxval 1.000000
let seed = defaultArg (seed |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.RandomUniform(shape, minval, maxval, seed, operName))
/// <summary>
/// Outputs random values from a uniform distribution.
/// </summary><param name="shape">
/// The shape of the output tensor.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RandomUniform'.
/// </param><param name="seed">
/// Optional argument
/// If either <c>seed</c> or <c>seed2</c> are set to be non-zero, the random number
/// generator is seeded by the given seed. Otherwise, it is seeded by a
/// random seed.
/// </param><param name="seed2">
/// Optional argument
/// A second seed to avoid seed collision.
/// </param><param name="dtype">
/// The type of the output.
/// </param><returns>
/// A tensor of the specified shape filled with uniform random values.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// The generated values follow a uniform distribution in the range <c>[0, 1)</c>. The
/// lower bound 0 is included in the range, while the upper bound 1 is excluded.
/// </remarks>
member this.RandomUniform(shape : TFShape, ?minval : Single, ?maxval : Single, ?seed : Int32, ?operName : String) =
let minval = defaultArg minval 0.000000f
let maxval = defaultArg maxval 1.000000f
let seed = defaultArg (seed |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.RandomUniform(shape, minval, maxval, seed, operName))
/// <summary>
/// Sets the tensor shape of the tensor referenced by <paramref name="output" /> to the shape described by <paramref name="dims" />.
/// </summary><param name="output">The tensor on which this method will operate in the graph.</param><param name="dims">The tensor shape, specified as an array of dimensions.</param><param name="status">Status buffer, if specified a status code will be left here, if not specified, a <see cref="T:TensorFlow.TFException" /> exception is raised if there is an error.</param>
member this.SetTensorShape(output : TF, dims : Int64[], ?status : TFStatus) =
let status = defaultArg status null
this.TFGraph.SetTensorShape(output.TFOutput, dims, status)
/// <summary>
/// Returns the number of dimensions of the Tensor referenced by output
/// </summary><returns>The number of dimensions of the tensor.</returns><param name="output">The tensor to probe.</param><param name="status">Status buffer, if specified a status code will be left here, if not specified, a <see cref="T:TensorFlow.TFException" /> exception is raised if there is an error.</param>
member this.GetTensorNumDims(output : TF, ?status : TFStatus) =
let status = defaultArg status null
this.TFGraph.GetTensorNumDims(output.TFOutput, status)
/// <summary>
/// Returns the shape of a tensor specified in <paramref name="output" />.
/// </summary><returns>The tensor shape. If the number of dimensions in the shape is unknown or the shape is, a scalar, the values in the array will be zero. Otherwise, each element of will be set corresponding to the size of the dimension. An unknown dimension is represented by -1.</returns><param name="output">The tensor that you want to look up. </param><param name="status">Status buffer, if specified a status code will be left here, if not specified, a <see cref="T:TensorFlow.TFException" /> exception is raised if there is an error.</param>
member this.GetTensorShape(output : TF, ?status : TFStatus) =
let status = defaultArg status null
this.TFGraph.GetTensorShape(output.TFOutput, status)
/// <summary>
/// Write out a serialized representation of the graph (as a GraphDef protocol buffer message) into <paramref name="outputGraphDef" />.
/// </summary><param name="outputGraphDef">Target buffer where the graphs is serialized into.</param><param name="status">Status buffer, if specified a status code will be left here, if not specified, a <see cref="T:TensorFlow.TFException" /> exception is raised if there is an error.</param>
member this.ToGraphDef(outputGraphDef : TFBuffer, ?status : TFStatus) =
let status = defaultArg status null
this.TFGraph.ToGraphDef(outputGraphDef, status)
/// <summary>
/// Import a serialized graph into this graph, using the specified prefix.
/// </summary><returns>The import.</returns><param name="graphDef">A buffer containing the serialized graph.</param><param name="prefix">A prefix that will be prepended to names of nodes in the <paramref name="graphDef" /> when they are imported into the graph.</param><param name="status">Status buffer, if specified a status code will be left here, if not specified, a <see cref="T:TensorFlow.TFException" /> exception is raised if there is an error.</param>
member this.Import(graphDef : TFBuffer, ?prefix : String, ?status : TFStatus) =
let prefix = defaultArg prefix ""
let status = defaultArg status null
this.TFGraph.Import(graphDef, prefix, status)
/// <summary>
/// Import a serialized graph into this graph, using the specified prefix.
/// </summary><returns>The import.</returns><param name="graphDef">A buffer containing the serialized graph.</param><param name="prefix">A prefix that will be prepended to names of nodes in the <paramref name="graphDef" /> when they are imported into the graph.</param><param name="status">Status buffer, if specified a status code will be left here, if not specified, a <see cref="T:TensorFlow.TFException" /> exception is raised if there is an error.</param>
member this.Import(graphDef : TFBuffer, options : TFImportGraphDefOptions, ?status : TFStatus) =
let status = defaultArg status null
this.TFGraph.Import(graphDef, options, status)
/// <summary>
/// Import a serialized graph into this graph, using the specified prefix.
/// </summary><returns>The import.</returns><param name="graphDef">A buffer containing the serialized graph.</param><param name="prefix">A prefix that will be prepended to names of nodes in the <paramref name="graphDef" /> when they are imported into the graph.</param><param name="status">Status buffer, if specified a status code will be left here, if not specified, a <see cref="T:TensorFlow.TFException" /> exception is raised if there is an error.</param>
member this.Import(buffer : Byte[], ?prefix : String, ?status : TFStatus) =
let prefix = defaultArg prefix ""
let status = defaultArg status null
this.TFGraph.Import(buffer, prefix, status)
/// <summary>
/// Import a serialized graph into this graph, using the specified prefix.
/// </summary><returns>The import.</returns><param name="graphDef">A buffer containing the serialized graph.</param><param name="prefix">A prefix that will be prepended to names of nodes in the <paramref name="graphDef" /> when they are imported into the graph.</param><param name="status">Status buffer, if specified a status code will be left here, if not specified, a <see cref="T:TensorFlow.TFException" /> exception is raised if there is an error.</param>
member this.Import(buffer : Byte[], options : TFImportGraphDefOptions, ?status : TFStatus) =
let status = defaultArg status null
this.TFGraph.Import(buffer, options, status)
member this.get_Item(name : String) =
this.TFGraph.get_Item(name)
/// <summary>
/// Returns the enumerator that returns all the TFOperations in a graph.
/// </summary><returns>The enumerator.</returns>
member this.GetEnumerator() =
this.TFGraph.GetEnumerator()
/// <summary>
/// Returns the tensor shape for the specific output pparameters as an array of longs.
/// </summary><returns>null for single dimension, .</returns><param name="output">The output operation to probe.</param><param name="status">Status buffer, if specified a status code will be left here, if not specified, a <see cref="T:TensorFlow.TFException" /> exception is raised if there is an error.</param>
member this.GetShape(output : TF, ?status : TFStatus) =
let status = defaultArg status null
this.TFGraph.GetShape(output.TFOutput, status)
member this.get_CurrentNameScope() =
this.TFGraph.get_CurrentNameScope()
/// <summary>
/// Creates a new namescope by setting the scope to the description provided.
/// </summary><returns>A new scope that will remain in use until the return TFScope is disposed.</returns><param name="nameScopeDesc">The namescope description, if the value is null, this
/// will reset the toplevel namescope to be the empty value. </param><remarks><para>
/// To more easily name your operations and group then, you can use the
/// WithScope method to set a current name scope that alter the complete name
/// of an operation added to the graph.
/// </para><para>
/// The graph starts with a scope set to the empty string, you can introduce new
/// scopes by calling WithScope, and can be conveniently used with the C# using
/// statement, like this:
/// </para><code>
/// Assert (graph.CurrentNamescope, "");
/// using (var nested = graph.WithScope ("nested")){
/// Assert (graph.CurrentNameScope, "nested");
/// using (var inner = graph.WithScope ("inner")){
/// Assert (graph.CurrentNameScope, "nested/inner");
/// }
/// }
/// </code></remarks>
member this.WithScope(nameScopeDesc : String) =
this.TFGraph.WithScope(nameScopeDesc)
/// <summary>
/// Set the device to be used for all the operations defined in the using block.
/// Creates <see cref="T:TensorFlow.TFDevice" /> and sets <see cref="P:TensorFlow.TFGraph.DeviceName" /> to the intended device.
/// If device is already set throws exception.
/// </summary><param name="deviceName">Name of the device to be used e.g. "/cpu:0", "/device:GPU:0"</param>
member this.WithDevice(deviceName : String) =
this.TFGraph.WithDevice(deviceName)
member this.get_CurrentDependencies() =
this.TFGraph.get_CurrentDependencies()
/// <summary>
/// Reorders a SparseTensor into the canonical, row-major ordering.
/// </summary><param name="input_indices">
/// 2-D. <c>N x R</c> matrix with the indices of non-empty values in a
/// SparseTensor, possibly not in canonical ordering.
/// </param><param name="input_values">
/// 1-D. <c>N</c> non-empty values corresponding to <c>input_indices</c>.
/// </param><param name="input_shape">
/// 1-D. Shape of the input SparseTensor.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseReorder'.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// output_indices: 2-D. <c>N x R</c> matrix with the same indices as input_indices, but
/// in canonical row-major ordering.
/// output_values: 1-D. <c>N</c> non-empty values corresponding to <c>output_indices</c>.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// Note that by convention, all sparse ops preserve the canonical ordering along
/// increasing dimension number. The only time ordering can be violated is during
/// manual manipulation of the indices and values vectors to add entries.
///
/// Reordering does not affect the shape of the SparseTensor.
///
/// If the tensor has rank <c>R</c> and <c>N</c> non-empty values, <c>input_indices</c> has
/// shape <c>[N, R]</c>, input_values has length <c>N</c>, and input_shape has length <c>R</c>.
/// </remarks>
member this.SparseReorder(input_indices : TF, input_values : TF, input_shape : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.SparseReorder(input_indices.TFOutput, input_values.TFOutput, input_shape.TFOutput, operName)
/// <summary>
/// Reshapes a SparseTensor to represent values in a new dense shape.
/// </summary><param name="input_indices">
/// 2-D. <c>N x R_in</c> matrix with the indices of non-empty values in a
/// SparseTensor.
/// </param><param name="input_shape">
/// 1-D. <c>R_in</c> vector with the input SparseTensor's dense shape.
/// </param><param name="new_shape">
/// 1-D. <c>R_out</c> vector with the requested new dense shape.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseReshape'.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// output_indices: 2-D. <c>N x R_out</c> matrix with the updated indices of non-empty
/// values in the output SparseTensor.
/// output_shape: 1-D. <c>R_out</c> vector with the full dense shape of the output
/// SparseTensor. This is the same as <c>new_shape</c> but with any -1 dimensions
/// filled in.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// This operation has the same semantics as reshape on the represented dense
/// tensor. The <c>input_indices</c> are recomputed based on the requested <c>new_shape</c>.
///
/// If one component of <c>new_shape</c> is the special value -1, the size of that
/// dimension is computed so that the total dense size remains constant. At
/// most one component of <c>new_shape</c> can be -1. The number of dense elements
/// implied by <c>new_shape</c> must be the same as the number of dense elements
/// originally implied by <c>input_shape</c>.
///
/// Reshaping does not affect the order of values in the SparseTensor.
///
/// If the input tensor has rank <c>R_in</c> and <c>N</c> non-empty values, and <c>new_shape</c>
/// has length <c>R_out</c>, then <c>input_indices</c> has shape <c>[N, R_in]</c>,
/// <c>input_shape</c> has length <c>R_in</c>, <c>output_indices</c> has shape <c>[N, R_out]</c>, and
/// <c>output_shape</c> has length <c>R_out</c>.
/// </remarks>
member this.SparseReshape(input_indices : TF, input_shape : TF, new_shape : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.SparseReshape(input_indices.TFOutput, input_shape.TFOutput, new_shape.TFOutput, operName)
/// <summary>
/// Computes the mean along sparse segments of a tensor.
/// </summary><param name="data"></param><param name="indices">
/// A 1-D tensor. Has same rank as <c>segment_ids</c>.
/// </param><param name="segment_ids">
/// A 1-D tensor. Values should be sorted and can be repeated.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSegmentMean'.
/// </param><returns>
/// Has same shape as data, except for dimension 0 which
/// has size <c>k</c>, the number of segments.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Read
/// [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation)
/// for an explanation of segments.
///
/// Like <c>SegmentMean</c>, but <c>segment_ids</c> can have rank less than <c>data</c>'s first
/// dimension, selecting a subset of dimension 0, specified by <c>indices</c>.
/// </remarks>
member this.SparseSegmentMean(data : TF, indices : TF, segment_ids : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SparseSegmentMean(data.TFOutput, indices.TFOutput, segment_ids.TFOutput, operName))
/// <summary>
/// Computes gradients for SparseSegmentMean.
/// </summary><param name="grad">
/// gradient propagated to the SparseSegmentMean op.
/// </param><param name="indices">
/// indices passed to the corresponding SparseSegmentMean op.
/// </param><param name="segment_ids">
/// segment_ids passed to the corresponding SparseSegmentMean op.
/// </param><param name="output_dim0">
/// dimension 0 of "data" passed to SparseSegmentMean op.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSegmentMeanGrad'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Returns tensor "output" with same shape as grad, except for dimension 0 whose
/// value is output_dim0.
/// </remarks>
member this.SparseSegmentMeanGrad(grad : TF, indices : TF, segment_ids : TF, output_dim0 : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SparseSegmentMeanGrad(grad.TFOutput, indices.TFOutput, segment_ids.TFOutput, output_dim0.TFOutput, operName))
/// <summary>
/// Computes the mean along sparse segments of a tensor.
/// </summary><param name="data"></param><param name="indices">
/// A 1-D tensor. Has same rank as <c>segment_ids</c>.
/// </param><param name="segment_ids">
/// A 1-D tensor. Values should be sorted and can be repeated.
/// </param><param name="num_segments">
/// Should equal the number of distinct segment IDs.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSegmentMeanWithNumSegments'.
/// </param><returns>
/// Has same shape as data, except for dimension 0 which has size
/// <c>num_segments</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Like <c>SparseSegmentMean</c>, but allows missing ids in <c>segment_ids</c>. If an id is
/// misisng, the <c>output</c> tensor at that position will be zeroed.
///
/// Read
/// [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation)
/// for an explanation of segments.
/// </remarks>
member this.SparseSegmentMeanWithNumSegments(data : TF, indices : TF, segment_ids : TF, num_segments : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SparseSegmentMeanWithNumSegments(data.TFOutput, indices.TFOutput, segment_ids.TFOutput, num_segments.TFOutput, operName))
/// <summary>
/// Computes the sum along sparse segments of a tensor divided by the sqrt of N.
/// </summary><param name="data"></param><param name="indices">
/// A 1-D tensor. Has same rank as <c>segment_ids</c>.
/// </param><param name="segment_ids">
/// A 1-D tensor. Values should be sorted and can be repeated.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSegmentSqrtN'.
/// </param><returns>
/// Has same shape as data, except for dimension 0 which
/// has size <c>k</c>, the number of segments.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// N is the size of the segment being reduced.
///
/// Read
/// [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation)
/// for an explanation of segments.
/// </remarks>
member this.SparseSegmentSqrtN(data : TF, indices : TF, segment_ids : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SparseSegmentSqrtN(data.TFOutput, indices.TFOutput, segment_ids.TFOutput, operName))
/// <summary>
/// Computes gradients for SparseSegmentSqrtN.
/// </summary><param name="grad">
/// gradient propagated to the SparseSegmentSqrtN op.
/// </param><param name="indices">
/// indices passed to the corresponding SparseSegmentSqrtN op.
/// </param><param name="segment_ids">
/// segment_ids passed to the corresponding SparseSegmentSqrtN op.
/// </param><param name="output_dim0">
/// dimension 0 of "data" passed to SparseSegmentSqrtN op.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSegmentSqrtNGrad'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Returns tensor "output" with same shape as grad, except for dimension 0 whose
/// value is output_dim0.
/// </remarks>
member this.SparseSegmentSqrtNGrad(grad : TF, indices : TF, segment_ids : TF, output_dim0 : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SparseSegmentSqrtNGrad(grad.TFOutput, indices.TFOutput, segment_ids.TFOutput, output_dim0.TFOutput, operName))
/// <summary>
/// Computes the sum along sparse segments of a tensor divided by the sqrt of N.
/// </summary><param name="data"></param><param name="indices">
/// A 1-D tensor. Has same rank as <c>segment_ids</c>.
/// </param><param name="segment_ids">
/// A 1-D tensor. Values should be sorted and can be repeated.
/// </param><param name="num_segments">
/// Should equal the number of distinct segment IDs.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSegmentSqrtNWithNumSegments'.
/// </param><returns>
/// Has same shape as data, except for dimension 0 which
/// has size <c>k</c>, the number of segments.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// N is the size of the segment being reduced.
///
/// Like <c>SparseSegmentSqrtN</c>, but allows missing ids in <c>segment_ids</c>. If an id is
/// misisng, the <c>output</c> tensor at that position will be zeroed.
///
/// Read
/// [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation)
/// for an explanation of segments.
/// </remarks>
member this.SparseSegmentSqrtNWithNumSegments(data : TF, indices : TF, segment_ids : TF, num_segments : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SparseSegmentSqrtNWithNumSegments(data.TFOutput, indices.TFOutput, segment_ids.TFOutput, num_segments.TFOutput, operName))
/// <summary>
/// Computes the sum along sparse segments of a tensor.
/// </summary><param name="data"></param><param name="indices">
/// A 1-D tensor. Has same rank as <c>segment_ids</c>.
/// </param><param name="segment_ids">
/// A 1-D tensor. Values should be sorted and can be repeated.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSegmentSum'.
/// </param><returns>
/// Has same shape as data, except for dimension 0 which
/// has size <c>k</c>, the number of segments.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Read
/// [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation)
/// for an explanation of segments.
///
/// Like <c>SegmentSum</c>, but <c>segment_ids</c> can have rank less than <c>data</c>'s first
/// dimension, selecting a subset of dimension 0, specified by <c>indices</c>.
///
/// For example:
///
/// <code>
/// c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
///
/// # Select two rows, one segment.
/// tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
/// # =&amp;gt; [[0 0 0 0]]
///
/// # Select two rows, two segment.
/// tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
/// # =&amp;gt; [[ 1 2 3 4]
/// # [-1 -2 -3 -4]]
///
/// # Select all rows, two segments.
/// tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
/// # =&amp;gt; [[0 0 0 0]
/// # [5 6 7 8]]
///
/// # Which is equivalent to:
/// tf.segment_sum(c, tf.constant([0, 0, 1]))
/// </code></remarks>
member this.SparseSegmentSum(data : TF, indices : TF, segment_ids : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SparseSegmentSum(data.TFOutput, indices.TFOutput, segment_ids.TFOutput, operName))
/// <summary>
/// Computes the sum along sparse segments of a tensor.
/// </summary><param name="data"></param><param name="indices">
/// A 1-D tensor. Has same rank as <c>segment_ids</c>.
/// </param><param name="segment_ids">
/// A 1-D tensor. Values should be sorted and can be repeated.
/// </param><param name="num_segments">
/// Should equal the number of distinct segment IDs.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSegmentSumWithNumSegments'.
/// </param><returns>
/// Has same shape as data, except for dimension 0 which
/// has size <c>num_segments</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Like <c>SparseSegmentSum</c>, but allows missing ids in <c>segment_ids</c>. If an id is
/// misisng, the <c>output</c> tensor at that position will be zeroed.
///
/// Read
/// [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation)
/// for an explanation of segments.
///
/// For example:
///
/// <code>
/// c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
///
/// tf.sparse_segment_sum_with_num_segments(
/// c, tf.constant([0, 1]), tf.constant([0, 0]), num_segments=3)
/// # =&amp;gt; [[0 0 0 0]
/// # [0 0 0 0]
/// # [0 0 0 0]]
///
/// tf.sparse_segment_sum_with_num_segments(c,
/// tf.constant([0, 1]),
/// tf.constant([0, 2],
/// num_segments=4))
/// # =&amp;gt; [[ 1 2 3 4]
/// # [ 0 0 0 0]
/// # [-1 -2 -3 -4]
/// # [ 0 0 0 0]]
/// </code></remarks>
member this.SparseSegmentSumWithNumSegments(data : TF, indices : TF, segment_ids : TF, num_segments : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SparseSegmentSumWithNumSegments(data.TFOutput, indices.TFOutput, segment_ids.TFOutput, num_segments.TFOutput, operName))
/// <summary>
/// Slice a <c>SparseTensor</c> based on the <c>start</c> and <c>size</c>.
/// </summary><param name="indices">
/// 2-D tensor represents the indices of the sparse tensor.
/// </param><param name="values">
/// 1-D tensor represents the values of the sparse tensor.
/// </param><param name="shape">
/// 1-D. tensor represents the shape of the sparse tensor.
/// </param><param name="start">
/// 1-D. tensor represents the start of the slice.
/// </param><param name="size">
/// 1-D. tensor represents the size of the slice.
/// output indices: A list of 1-D tensors represents the indices of the output
/// sparse tensors.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSlice'.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// output_indices:
/// output_values: A list of 1-D tensors represents the values of the output sparse
/// tensors.
/// output_shape: A list of 1-D tensors represents the shape of the output sparse
/// tensors.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// For example, if the input is
///
/// input_tensor = shape = [2, 7]
/// [ a d e ]
/// [b c ]
///
/// Graphically the output tensors are:
///
/// sparse_slice([0, 0], [2, 4]) = shape = [2, 4]
/// [ a ]
/// [b c ]
///
/// sparse_slice([0, 4], [2, 3]) = shape = [2, 3]
/// [ d e ]
/// [ ]
/// </remarks>
member this.SparseSlice(indices : TF, values : TF, shape : TF, start : TF, size : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.SparseSlice(indices.TFOutput, values.TFOutput, shape.TFOutput, start.TFOutput, size.TFOutput, operName)
/// <summary>
/// The gradient operator for the SparseSlice op.
/// </summary><param name="backprop_val_grad">
/// 1-D. The gradient with respect to
/// the non-empty values of the sliced <c>SparseTensor</c>.
/// </param><param name="input_indices">
/// 2-D. The <c>indices</c> of the input <c>SparseTensor</c>.
/// </param><param name="input_start">
/// 1-D. tensor represents the start of the slice.
/// </param><param name="output_indices">
/// 2-D. The <c>indices</c> of the sliced <c>SparseTensor</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSliceGrad'.
/// </param><returns>
/// 1-D. The gradient with respect to the non-empty values of input <c>SparseTensor</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This op takes in the upstream gradient w.r.t. non-empty values of
/// the sliced <c>SparseTensor</c>, and outputs the gradients w.r.t.
/// the non-empty values of input <c>SparseTensor</c>.
/// </remarks>
member this.SparseSliceGrad(backprop_val_grad : TF, input_indices : TF, input_start : TF, output_indices : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SparseSliceGrad(backprop_val_grad.TFOutput, input_indices.TFOutput, input_start.TFOutput, output_indices.TFOutput, operName))
/// <summary>
/// Applies softmax to a batched N-D <c>SparseTensor</c>.
/// </summary><param name="sp_indices">
/// 2-D. <c>NNZ x R</c> matrix with the indices of non-empty values in a
/// SparseTensor, in canonical ordering.
/// </param><param name="sp_values">
/// 1-D. <c>NNZ</c> non-empty values corresponding to <c>sp_indices</c>.
/// </param><param name="sp_shape">
/// 1-D. Shape of the input SparseTensor.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSoftmax'.
/// </param><returns>
/// 1-D. The <c>NNZ</c> values for the result <c>SparseTensor</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// The inputs represent an N-D SparseTensor with logical shape <c>[..., B, C]</c>
/// (where <c>N &amp;gt;= 2</c>), and with indices sorted in the canonical lexicographic order.
///
/// This op is equivalent to applying the normal <c>tf.nn.softmax()</c> to each innermost
/// logical submatrix with shape <c>[B, C]</c>, but with the catch that *the implicitly
/// zero elements do not participate*. Specifically, the algorithm is equivalent
/// to the following:
///
/// (1) Applies <c>tf.nn.softmax()</c> to a densified view of each innermost submatrix
/// with shape <c>[B, C]</c>, along the size-C dimension;
/// (2) Masks out the original implicitly-zero locations;
/// (3) Renormalizes the remaining elements.
///
/// Hence, the <c>SparseTensor</c> result has exactly the same non-zero indices and
/// shape.
/// </remarks>
member this.SparseSoftmax(sp_indices : TF, sp_values : TF, sp_shape : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SparseSoftmax(sp_indices.TFOutput, sp_values.TFOutput, sp_shape.TFOutput, operName))
/// <summary>
/// Computes softmax cross entropy cost and gradients to backpropagate.
/// </summary><param name="features">
/// batch_size x num_classes matrix
/// </param><param name="labels">
/// batch_size vector with values in [0, num_classes).
/// This is the label for the given minibatch entry.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSoftmaxCrossEntropyWithLogits'.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// loss: Per example loss (batch_size vector).
/// backprop: backpropagated gradients (batch_size x num_classes matrix).
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// Unlike <c>SoftmaxCrossEntropyWithLogits</c>, this operation does not accept
/// a matrix of label probabilities, but rather a single label per row
/// of features. This label is considered to have probability 1.0 for the
/// given row.
///
/// Inputs are the logits, not probabilities.
/// </remarks>
member this.SparseSoftmaxCrossEntropyWithLogits(features : TF, labels : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.SparseSoftmaxCrossEntropyWithLogits(features.TFOutput, labels.TFOutput, operName)
/// <summary>
/// Returns the element-wise max of two SparseTensors.
/// </summary><param name="a_indices">
/// 2-D. <c>N x R</c> matrix with the indices of non-empty values in a
/// SparseTensor, in the canonical lexicographic ordering.
/// </param><param name="a_values">
/// 1-D. <c>N</c> non-empty values corresponding to <c>a_indices</c>.
/// </param><param name="a_shape">
/// 1-D. Shape of the input SparseTensor.
/// </param><param name="b_indices">
/// counterpart to <c>a_indices</c> for the other operand.
/// </param><param name="b_values">
/// counterpart to <c>a_values</c> for the other operand; must be of the same dtype.
/// </param><param name="b_shape">
/// counterpart to <c>a_shape</c> for the other operand; the two shapes must be equal.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSparseMaximum'.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// output_indices: 2-D. The indices of the output SparseTensor.
/// output_values: 1-D. The values of the output SparseTensor.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
/// </remarks>
member this.SparseSparseMaximum(a_indices : TF, a_values : TF, a_shape : TF, b_indices : TF, b_values : TF, b_shape : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.SparseSparseMaximum(a_indices.TFOutput, a_values.TFOutput, a_shape.TFOutput, b_indices.TFOutput, b_values.TFOutput, b_shape.TFOutput, operName)
/// <summary>
/// Returns the element-wise min of two SparseTensors.
/// </summary><param name="a_indices">
/// 2-D. <c>N x R</c> matrix with the indices of non-empty values in a
/// SparseTensor, in the canonical lexicographic ordering.
/// </param><param name="a_values">
/// 1-D. <c>N</c> non-empty values corresponding to <c>a_indices</c>.
/// </param><param name="a_shape">
/// 1-D. Shape of the input SparseTensor.
/// </param><param name="b_indices">
/// counterpart to <c>a_indices</c> for the other operand.
/// </param><param name="b_values">
/// counterpart to <c>a_values</c> for the other operand; must be of the same dtype.
/// </param><param name="b_shape">
/// counterpart to <c>a_shape</c> for the other operand; the two shapes must be equal.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSparseMinimum'.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// output_indices: 2-D. The indices of the output SparseTensor.
/// output_values: 1-D. The values of the output SparseTensor.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
/// </remarks>
member this.SparseSparseMinimum(a_indices : TF, a_values : TF, a_shape : TF, b_indices : TF, b_values : TF, b_shape : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.SparseSparseMinimum(a_indices.TFOutput, a_values.TFOutput, a_shape.TFOutput, b_indices.TFOutput, b_values.TFOutput, b_shape.TFOutput, operName)
/// <summary>
/// Split a <c>SparseTensor</c> into <c>num_split</c> tensors along one dimension.
/// </summary><param name="split_dim">
/// 0-D. The dimension along which to split. Must be in the range
/// <c>[0, rank(shape))</c>.
/// </param><param name="indices">
/// 2-D tensor represents the indices of the sparse tensor.
/// </param><param name="values">
/// 1-D tensor represents the values of the sparse tensor.
/// </param><param name="shape">
/// 1-D. tensor represents the shape of the sparse tensor.
/// output indices: A list of 1-D tensors represents the indices of the output
/// sparse tensors.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseSplit'.
/// </param><param name="num_split">
/// The number of ways to split.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// output_indices:
/// output_values: A list of 1-D tensors represents the values of the output sparse
/// tensors.
/// output_shape: A list of 1-D tensors represents the shape of the output sparse
/// tensors.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// If the <c>shape[split_dim]</c> is not an integer multiple of <c>num_split</c>. Slices
/// <c>[0 : shape[split_dim] % num_split]</c> gets one extra dimension.
/// For example, if <c>split_dim = 1</c> and <c>num_split = 2</c> and the input is
///
/// input_tensor = shape = [2, 7]
/// [ a d e ]
/// [b c ]
///
/// Graphically the output tensors are:
///
/// output_tensor[0] = shape = [2, 4]
/// [ a ]
/// [b c ]
///
/// output_tensor[1] = shape = [2, 3]
/// [ d e ]
/// [ ]
/// </remarks>
member this.SparseSplit(split_dim : TF, indices : TF, values : TF, shape : TF, num_split : Int64, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.SparseSplit(split_dim.TFOutput, indices.TFOutput, values.TFOutput, shape.TFOutput, num_split, operName)
/// <summary>
/// Adds up a <c>SparseTensor</c> and a dense <c>Tensor</c>, producing a dense <c>Tensor</c>.
/// </summary><param name="a_indices">
/// 2-D. The <c>indices</c> of the <c>SparseTensor</c>, with shape <c>[nnz, ndims]</c>.
/// </param><param name="a_values">
/// 1-D. The <c>values</c> of the <c>SparseTensor</c>, with shape <c>[nnz]</c>.
/// </param><param name="a_shape">
/// 1-D. The <c>shape</c> of the <c>SparseTensor</c>, with shape <c>[ndims]</c>.
/// </param><param name="b"><c>ndims</c>-D Tensor. With shape <c>a_shape</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseTensorDenseAdd'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This Op does not require <c>a_indices</c> be sorted in standard lexicographic order.
/// </remarks>
member this.SparseTensorDenseAdd(a_indices : TF, a_values : TF, a_shape : TF, b : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SparseTensorDenseAdd(a_indices.TFOutput, a_values.TFOutput, a_shape.TFOutput, b.TFOutput, operName))
/// <summary>
/// Multiply SparseTensor (of rank 2) "A" by dense matrix "B".
/// </summary><param name="a_indices">
/// 2-D. The <c>indices</c> of the <c>SparseTensor</c>, size <c>[nnz, 2]</c> Matrix.
/// </param><param name="a_values">
/// 1-D. The <c>values</c> of the <c>SparseTensor</c>, size <c>[nnz]</c> Vector.
/// </param><param name="a_shape">
/// 1-D. The <c>shape</c> of the <c>SparseTensor</c>, size <c>[2]</c> Vector.
/// </param><param name="b">
/// 2-D. A dense Matrix.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseTensorDenseMatMul'.
/// </param><param name="adjoint_a">
/// Optional argument
/// Use the adjoint of A in the matrix multiply. If A is complex, this
/// is transpose(conj(A)). Otherwise it's transpose(A).
/// </param><param name="adjoint_b">
/// Optional argument
/// Use the adjoint of B in the matrix multiply. If B is complex, this
/// is transpose(conj(B)). Otherwise it's transpose(B).
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// No validity checking is performed on the indices of A. However, the following
/// input format is recommended for optimal behavior:
///
/// if adjoint_a == false:
/// A should be sorted in lexicographically increasing order. Use SparseReorder
/// if you're not sure.
/// if adjoint_a == true:
/// A should be sorted in order of increasing dimension 1 (i.e., "column major"
/// order instead of "row major" order).
/// </remarks>
member this.SparseTensorDenseMatMul(a_indices : TF, a_values : TF, a_shape : TF, b : TF, ?adjoint_a : Boolean, ?adjoint_b : Boolean, ?operName : String) =
let adjoint_a = defaultArg (adjoint_a |> Option.map Nullable) (Nullable())
let adjoint_b = defaultArg (adjoint_b |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SparseTensorDenseMatMul(a_indices.TFOutput, a_values.TFOutput, a_shape.TFOutput, b.TFOutput, adjoint_a, adjoint_b, operName))
/// <summary>
/// Creates a dataset that splits a SparseTensor into elements row-wise.
/// </summary><param name="indices"></param><param name="values"></param><param name="dense_shape"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseTensorSliceDataset'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.SparseTensorSliceDataset(indices : TF, values : TF, dense_shape : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SparseTensorSliceDataset(indices.TFOutput, values.TFOutput, dense_shape.TFOutput, operName))
/// <summary>
/// Converts a sparse representation into a dense tensor.
/// </summary><param name="sparse_indices">
/// 0-D, 1-D, or 2-D. <c>sparse_indices[i]</c> contains the complete
/// index where <c>sparse_values[i]</c> will be placed.
/// </param><param name="output_shape">
/// 1-D. Shape of the dense output tensor.
/// </param><param name="sparse_values">
/// 1-D. Values corresponding to each row of <c>sparse_indices</c>,
/// or a scalar value to be used for all sparse indices.
/// </param><param name="default_value">
/// Scalar value to set for indices not specified in
/// <c>sparse_indices</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseToDense'.
/// </param><param name="validate_indices">
/// Optional argument
/// If true, indices are checked to make sure they are sorted in
/// lexicographic order and that there are no repeats.
/// </param><returns>
/// Dense output tensor of shape <c>output_shape</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Builds an array <c>dense</c> with shape <c>output_shape</c> such that
///
/// <code>
/// # If sparse_indices is scalar
/// dense[i] = (i == sparse_indices ? sparse_values : default_value)
///
/// # If sparse_indices is a vector, then for each i
/// dense[sparse_indices[i]] = sparse_values[i]
///
/// # If sparse_indices is an n by d matrix, then for each i in [0, n)
/// dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]
/// </code>
///
/// All other values in <c>dense</c> are set to <c>default_value</c>. If <c>sparse_values</c> is a
/// scalar, all sparse indices are set to this single value.
///
/// Indices should be sorted in lexicographic order, and indices must not
/// contain any repeats. If <c>validate_indices</c> is true, these properties
/// are checked during execution.
/// </remarks>
member this.SparseToDense(sparse_indices : TF, output_shape : TF, sparse_values : TF, default_value : TF, ?validate_indices : Boolean, ?operName : String) =
let validate_indices = defaultArg (validate_indices |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SparseToDense(sparse_indices.TFOutput, output_shape.TFOutput, sparse_values.TFOutput, default_value.TFOutput, validate_indices, operName))
/// <summary>
/// Applies set operation along last dimension of 2 <c>SparseTensor</c> inputs.
/// </summary><param name="set1_indices">
/// 2D <c>Tensor</c>, indices of a <c>SparseTensor</c>. Must be in row-major
/// order.
/// </param><param name="set1_values">
/// 1D <c>Tensor</c>, values of a <c>SparseTensor</c>. Must be in row-major
/// order.
/// </param><param name="set1_shape">
/// 1D <c>Tensor</c>, shape of a <c>SparseTensor</c>. <c>set1_shape[0...n-1]</c> must
/// be the same as <c>set2_shape[0...n-1]</c>, <c>set1_shape[n]</c> is the
/// max set size across <c>0...n-1</c> dimensions.
/// </param><param name="set2_indices">
/// 2D <c>Tensor</c>, indices of a <c>SparseTensor</c>. Must be in row-major
/// order.
/// </param><param name="set2_values">
/// 1D <c>Tensor</c>, values of a <c>SparseTensor</c>. Must be in row-major
/// order.
/// </param><param name="set2_shape">
/// 1D <c>Tensor</c>, shape of a <c>SparseTensor</c>. <c>set2_shape[0...n-1]</c> must
/// be the same as <c>set1_shape[0...n-1]</c>, <c>set2_shape[n]</c> is the
/// max set size across <c>0...n-1</c> dimensions.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseToSparseSetOperation'.
/// </param><param name="validate_indices">
/// Optional argument
/// </param><param name="set_operation"></param><returns>
/// Returns a tuple with multiple values, as follows:
/// result_indices: 2D indices of a <c>SparseTensor</c>.
/// result_values: 1D values of a <c>SparseTensor</c>.
/// result_shape: 1D <c>Tensor</c> shape of a <c>SparseTensor</c>. <c>result_shape[0...n-1]</c> is
/// the same as the 1st <c>n-1</c> dimensions of <c>set1</c> and <c>set2</c>, <c>result_shape[n]</c>
/// is the max result set size across all <c>0...n-1</c> dimensions.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// See SetOperationOp::SetOperationFromContext for values of <c>set_operation</c>.
///
/// If <c>validate_indices</c> is <c>True</c>, <c>SparseToSparseSetOperation</c> validates the
/// order and range of <c>set1</c> and <c>set2</c> indices.
///
/// Input <c>set1</c> is a <c>SparseTensor</c> represented by <c>set1_indices</c>, <c>set1_values</c>,
/// and <c>set1_shape</c>. For <c>set1</c> ranked <c>n</c>, 1st <c>n-1</c> dimensions must be the same
/// as <c>set2</c>. Dimension <c>n</c> contains values in a set, duplicates are allowed but
/// ignored.
///
/// Input <c>set2</c> is a <c>SparseTensor</c> represented by <c>set2_indices</c>, <c>set2_values</c>,
/// and <c>set2_shape</c>. For <c>set2</c> ranked <c>n</c>, 1st <c>n-1</c> dimensions must be the same
/// as <c>set1</c>. Dimension <c>n</c> contains values in a set, duplicates are allowed but
/// ignored.
///
/// If <c>validate_indices</c> is <c>True</c>, this op validates the order and range of <c>set1</c>
/// and <c>set2</c> indices.
///
/// Output <c>result</c> is a <c>SparseTensor</c> represented by <c>result_indices</c>,
/// <c>result_values</c>, and <c>result_shape</c>. For <c>set1</c> and <c>set2</c> ranked <c>n</c>, this
/// has rank <c>n</c> and the same 1st <c>n-1</c> dimensions as <c>set1</c> and <c>set2</c>. The <c>nth</c>
/// dimension contains the result of <c>set_operation</c> applied to the corresponding
/// <c>[0...n-1]</c> dimension of <c>set</c>.
/// </remarks>
member this.SparseToSparseSetOperation(set1_indices : TF, set1_values : TF, set1_shape : TF, set2_indices : TF, set2_values : TF, set2_shape : TF, set_operation : String, ?validate_indices : Boolean, ?operName : String) =
let validate_indices = defaultArg (validate_indices |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.SparseToSparseSetOperation(set1_indices.TFOutput, set1_values.TFOutput, set1_shape.TFOutput, set2_indices.TFOutput, set2_values.TFOutput, set2_shape.TFOutput, set_operation, validate_indices, operName)
/// <summary>
/// Splits a tensor into <c>num_split</c> tensors along one dimension.
/// </summary><param name="split_dim">
/// 0-D. The dimension along which to split. Must be in the range
/// <c>[-rank(value), rank(value))</c>.
/// </param><param name="value">
/// The tensor to split.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Split'.
/// </param><param name="num_split">
/// The number of ways to split. Must evenly divide
/// <c>value.shape[split_dim]</c>.
/// </param><returns>
/// They are identically shaped tensors, whose shape matches that of <c>value</c>
/// except along <c>axis</c>, where their sizes are
/// <c>values.shape[split_dim] / num_split</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.Split(split_dim : TF, value : TF, num_split : Int64, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.Split(split_dim.TFOutput, value.TFOutput, num_split, operName) |> Array.map (fun i -> TF(this.TFGraph, i))
/// <summary>
/// Splits a tensor into <c>num_split</c> tensors along one dimension.
/// </summary><param name="value">
/// The tensor to split.
/// </param><param name="size_splits">
/// list containing the sizes of each output tensor along the split
/// dimension. Must sum to the dimension of value along split_dim.
/// Can contain one -1 indicating that dimension is to be inferred.
/// </param><param name="split_dim">
/// 0-D. The dimension along which to split. Must be in the range
/// <c>[-rank(value), rank(value))</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SplitV'.
/// </param><param name="num_split"></param><returns>
/// Tensors whose shape matches that of <c>value</c>
/// except along <c>axis</c>, where their sizes are
/// <c>size_splits[i]</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.SplitV(value : TF, size_splits : TF, split_dim : TF, num_split : Int64, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.SplitV(value.TFOutput, size_splits.TFOutput, split_dim.TFOutput, num_split, operName) |> Array.map (fun i -> TF(this.TFGraph, i))
/// <summary>
/// Computes square root of x element-wise.
/// </summary><param name="x"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Sqrt'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// I.e., \\(y = \sqrt{x} = x^{1/2}\\).
/// </remarks>
member this.Sqrt(x : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Sqrt(x.TFOutput, operName))
/// <summary>
/// Computes the gradient for the sqrt of <c>x</c> wrt its input.
/// </summary><param name="y"></param><param name="dy"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SqrtGrad'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Specifically, <c>grad = dy * 0.5 / y</c>, where <c>y = sqrt(x)</c>, and <c>dy</c>
/// is the corresponding input gradient.
/// </remarks>
member this.SqrtGrad(y : TF, dy : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SqrtGrad(y.TFOutput, dy.TFOutput, operName))
/// <summary>
/// Computes square of x element-wise.
/// </summary><param name="x"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Square'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// I.e., \\(y = x * x = x^2\\).
/// </remarks>
member this.Square(x : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Square(x.TFOutput, operName))
/// <summary>
/// Returns (x - y)(x - y) element-wise.
/// </summary><param name="x"></param><param name="y"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SquaredDifference'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// *NOTE*: <c>SquaredDifference</c> supports broadcasting. More about broadcasting
/// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
/// </remarks>
member this.SquaredDifference(x : TF, y : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SquaredDifference(x.TFOutput, y.TFOutput, operName))
/// <summary>
/// Removes dimensions of size 1 from the shape of a tensor.
/// </summary><param name="input">
/// The <c>input</c> to squeeze.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Squeeze'.
/// </param><param name="squeeze_dims">
/// Optional argument
/// If specified, only squeezes the dimensions listed. The dimension
/// index starts at 0. It is an error to squeeze a dimension that is not 1. Must
/// be in the range <c>[-rank(input), rank(input))</c>.
/// </param><returns>
/// Contains the same data as <c>input</c>, but has one or more dimensions of
/// size 1 removed.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Given a tensor <c>input</c>, this operation returns a tensor of the same type with
/// all dimensions of size 1 removed. If you don't want to remove all size 1
/// dimensions, you can remove specific size 1 dimensions by specifying
/// <c>axis</c>.
///
/// For example:
///
/// <code>
/// # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
/// shape(squeeze(t)) ==&amp;gt; [2, 3]
/// </code>
///
/// Or, to remove specific size 1 dimensions:
///
/// <code>
/// # 't' is a tensor of shape [1, 2, 1, 3, 1, 1]
/// shape(squeeze(t, [2, 4])) ==&amp;gt; [1, 2, 3, 1]
/// </code></remarks>
member this.Squeeze(input : TF, ?squeeze_dims : Int64[], ?operName : String) =
let squeeze_dims = defaultArg squeeze_dims null
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Squeeze(input.TFOutput, squeeze_dims, operName))
/// <summary>
/// Deprecated, use StackV2.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Stack'.
/// </param><param name="stack_name">
/// Optional argument
/// </param><param name="elem_type"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.Stack(elem_type : TFDataType, ?stack_name : String, ?operName : String) =
let stack_name = defaultArg stack_name null
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Stack(elem_type, stack_name, operName))
/// <summary>
/// Deprecated, use StackCloseV2.
/// </summary><param name="handle"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'StackClose'.
/// </param><returns>
/// Returns the description of the operation
/// </returns>
member this.StackClose(handle : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.StackClose(handle.TFOutput, operName)
/// <summary>
/// Delete the stack from its resource container.
/// </summary><param name="handle">
/// The handle to a stack.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'StackCloseV2'.
/// </param><returns>
/// Returns the description of the operation
/// </returns>
member this.StackCloseV2(handle : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.StackCloseV2(handle.TFOutput, operName)
/// <summary>
/// Deprecated, use StackPopV2.
/// </summary><param name="handle"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'StackPop'.
/// </param><param name="elem_type"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.StackPop(handle : TF, elem_type : TFDataType, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.StackPop(handle.TFOutput, elem_type, operName))
/// <summary>
/// Pop the element at the top of the stack.
/// </summary><param name="handle">
/// The handle to a stack.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'StackPopV2'.
/// </param><param name="elem_type">
/// The type of the elem that is popped.
/// </param><returns>
/// The tensor that is popped from the top of the stack.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.StackPopV2(handle : TF, elem_type : TFDataType, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.StackPopV2(handle.TFOutput, elem_type, operName))
/// <summary>
/// Deprecated, use StackPushV2.
/// </summary><param name="handle"></param><param name="elem"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'StackPush'.
/// </param><param name="swap_memory">
/// Optional argument
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.StackPush(handle : TF, elem : TF, ?swap_memory : Boolean, ?operName : String) =
let swap_memory = defaultArg (swap_memory |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.StackPush(handle.TFOutput, elem.TFOutput, swap_memory, operName))
/// <summary>
/// Push an element onto the stack.
/// </summary><param name="handle">
/// The handle to a stack.
/// </param><param name="elem">
/// The tensor to be pushed onto the stack.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'StackPushV2'.
/// </param><param name="swap_memory">
/// Optional argument
/// Swap <c>elem</c> to CPU. Default to false.
/// </param><returns>
/// The same tensor as the input 'elem'.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.StackPushV2(handle : TF, elem : TF, ?swap_memory : Boolean, ?operName : String) =
let swap_memory = defaultArg (swap_memory |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.StackPushV2(handle.TFOutput, elem.TFOutput, swap_memory, operName))
/// <summary>
/// A stack that produces elements in first-in last-out order.
/// </summary><param name="max_size">
/// The maximum size of the stack if non-negative. If negative, the stack
/// size is unlimited.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'StackV2'.
/// </param><param name="stack_name">
/// Optional argument
/// Overrides the name used for the temporary stack resource. Default
/// value is the name of the 'Stack' op (which is guaranteed unique).
/// </param><param name="elem_type">
/// The type of the elements on the stack.
/// </param><returns>
/// The handle to the stack.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.StackV2(max_size : TF, elem_type : TFDataType, ?stack_name : String, ?operName : String) =
let stack_name = defaultArg stack_name null
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.StackV2(max_size.TFOutput, elem_type, stack_name, operName))
/// <summary>
/// Stage values similar to a lightweight Enqueue.
/// </summary><param name="values">
/// a list of tensors
/// dtypes A list of data types that inserted values should adhere to.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Stage'.
/// </param><param name="capacity">
/// Optional argument
/// Maximum number of elements in the Staging Area. If &amp;gt; 0, inserts
/// on the container will block when the capacity is reached.
/// </param><param name="memory_limit">
/// Optional argument
/// The maximum number of bytes allowed for Tensors in the Staging Area.
/// If &amp;gt; 0, inserts will block until sufficient space is available.
/// </param><param name="container">
/// Optional argument
/// If non-empty, this queue is placed in the given container. Otherwise,
/// a default container is used.
/// </param><param name="shared_name">
/// Optional argument
/// It is necessary to match this name to the matching Unstage Op.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// The basic functionality of this Op is similar to a queue with many
/// fewer capabilities and options. This Op is optimized for performance.
/// </remarks>
member this.Stage(values : TF[], ?capacity : Int64, ?memory_limit : Int64, ?container : String, ?shared_name : String, ?operName : String) =
let capacity = defaultArg (capacity |> Option.map Nullable) (Nullable())
let memory_limit = defaultArg (memory_limit |> Option.map Nullable) (Nullable())
let container = defaultArg container null
let shared_name = defaultArg shared_name null
let operName = defaultArg operName null
this.TFGraph.Stage(values |> Array.map (fun x -> x.TFOutput), capacity, memory_limit, container, shared_name, operName)
/// <summary>
/// Op removes all elements in the underlying container.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'StageClear'.
/// </param><param name="capacity">
/// Optional argument
/// </param><param name="memory_limit">
/// Optional argument
/// </param><param name="container">
/// Optional argument
/// </param><param name="shared_name">
/// Optional argument
/// </param><param name="dtypes"></param><returns>
/// Returns the description of the operation
/// </returns>
member this.StageClear(dtypes : TFDataType[], ?capacity : Int64, ?memory_limit : Int64, ?container : String, ?shared_name : String, ?operName : String) =
let capacity = defaultArg (capacity |> Option.map Nullable) (Nullable())
let memory_limit = defaultArg (memory_limit |> Option.map Nullable) (Nullable())
let container = defaultArg container null
let shared_name = defaultArg shared_name null
let operName = defaultArg operName null
this.TFGraph.StageClear(dtypes, capacity, memory_limit, container, shared_name, operName)
/// <summary>
/// Op peeks at the values at the specified index. If the
/// </summary><param name="index"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'StagePeek'.
/// </param><param name="capacity">
/// Optional argument
/// </param><param name="memory_limit">
/// Optional argument
/// </param><param name="container">
/// Optional argument
/// </param><param name="shared_name">
/// Optional argument
/// </param><param name="dtypes"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// underlying container does not contain sufficient elements
/// this op will block until it does. This Op is optimized for
/// performance.
/// </remarks>
member this.StagePeek(index : TF, dtypes : TFDataType[], ?capacity : Int64, ?memory_limit : Int64, ?container : String, ?shared_name : String, ?operName : String) =
let capacity = defaultArg (capacity |> Option.map Nullable) (Nullable())
let memory_limit = defaultArg (memory_limit |> Option.map Nullable) (Nullable())
let container = defaultArg container null
let shared_name = defaultArg shared_name null
let operName = defaultArg operName null
this.TFGraph.StagePeek(index.TFOutput, dtypes, capacity, memory_limit, container, shared_name, operName) |> Array.map (fun i -> TF(this.TFGraph, i))
/// <summary>
/// Op returns the number of elements in the underlying container.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'StageSize'.
/// </param><param name="capacity">
/// Optional argument
/// </param><param name="memory_limit">
/// Optional argument
/// </param><param name="container">
/// Optional argument
/// </param><param name="shared_name">
/// Optional argument
/// </param><param name="dtypes"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.StageSize(dtypes : TFDataType[], ?capacity : Int64, ?memory_limit : Int64, ?container : String, ?shared_name : String, ?operName : String) =
let capacity = defaultArg (capacity |> Option.map Nullable) (Nullable())
let memory_limit = defaultArg (memory_limit |> Option.map Nullable) (Nullable())
let container = defaultArg container null
let shared_name = defaultArg shared_name null
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.StageSize(dtypes, capacity, memory_limit, container, shared_name, operName))
/// <summary>
/// Draws samples from a multinomial distribution.
/// </summary><param name="logits">
/// 2-D Tensor with shape <c>[batch_size, num_classes]</c>. Each slice <c>[i, :]</c>
/// represents the unnormalized log probabilities for all classes.
/// </param><param name="num_samples">
/// 0-D. Number of independent samples to draw for each row slice.
/// </param><param name="seed">
/// 2 seeds (shape [2]).
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'StatelessMultinomial'.
/// </param><param name="output_dtype">
/// Optional argument
/// </param><returns>
/// 2-D Tensor with shape <c>[batch_size, num_samples]</c>. Each slice <c>[i, :]</c>
/// contains the drawn class labels with range <c>[0, num_classes)</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.StatelessMultinomial(logits : TF, num_samples : TF, seed : TF, ?output_dtype : TFDataType, ?operName : String) =
let output_dtype = defaultArg (output_dtype |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.StatelessMultinomial(logits.TFOutput, num_samples.TFOutput, seed.TFOutput, output_dtype, operName))
/// <summary>
/// Outputs deterministic pseudorandom values from a normal distribution.
/// </summary><param name="shape">
/// The shape of the output tensor.
/// </param><param name="seed">
/// 2 seeds (shape [2]).
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'StatelessRandomNormal'.
/// </param><param name="dtype">
/// Optional argument
/// The type of the output.
/// </param><returns>
/// Random values with specified shape.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// The generated values will have mean 0 and standard deviation 1.
///
/// The outputs are a deterministic function of <c>shape</c> and <c>seed</c>.
/// </remarks>
member this.StatelessRandomNormal(shape : TF, seed : TF, ?dtype : TFDataType, ?operName : String) =
let dtype = defaultArg (dtype |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.StatelessRandomNormal(shape.TFOutput, seed.TFOutput, dtype, operName))
/// <summary>
/// Outputs deterministic pseudorandom random values from a uniform distribution.
/// </summary><param name="shape">
/// The shape of the output tensor.
/// </param><param name="seed">
/// 2 seeds (shape [2]).
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'StatelessRandomUniform'.
/// </param><param name="dtype">
/// Optional argument
/// The type of the output.
/// </param><returns>
/// Random values with specified shape.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// The generated values follow a uniform distribution in the range <c>[0, 1)</c>. The
/// lower bound 0 is included in the range, while the upper bound 1 is excluded.
///
/// The outputs are a deterministic function of <c>shape</c> and <c>seed</c>.
/// </remarks>
member this.StatelessRandomUniform(shape : TF, seed : TF, ?dtype : TFDataType, ?operName : String) =
let dtype = defaultArg (dtype |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.StatelessRandomUniform(shape.TFOutput, seed.TFOutput, dtype, operName))
/// <summary>
/// Outputs deterministic pseudorandom values from a truncated normal distribution.
/// </summary><param name="shape">
/// The shape of the output tensor.
/// </param><param name="seed">
/// 2 seeds (shape [2]).
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'StatelessTruncatedNormal'.
/// </param><param name="dtype">
/// Optional argument
/// The type of the output.
/// </param><returns>
/// Random values with specified shape.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// The generated values follow a normal distribution with mean 0 and standard
/// deviation 1, except that values whose magnitude is more than 2 standard
/// deviations from the mean are dropped and re-picked.
///
/// The outputs are a deterministic function of <c>shape</c> and <c>seed</c>.
/// </remarks>
member this.StatelessTruncatedNormal(shape : TF, seed : TF, ?dtype : TFDataType, ?operName : String) =
let dtype = defaultArg (dtype |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.StatelessTruncatedNormal(shape.TFOutput, seed.TFOutput, dtype, operName))
/// <summary>
/// Check if the input matches the regex pattern.
/// </summary><param name="input">
/// A string tensor of the text to be processed.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'StaticRegexFullMatch'.
/// </param><param name="pattern">
/// The regular expression to match the input.
/// </param><returns>
/// A bool tensor with the same shape as <c>input</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// The input is a string tensor of any shape. The pattern is the
/// regular expression to be matched with every element of the input tensor.
/// The boolean values (True or False) of the output tensor indicate
/// if the input matches the regex pattern provided.
///
/// The pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)
/// </remarks>
member this.StaticRegexFullMatch(input : TF, pattern : String, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.StaticRegexFullMatch(input.TFOutput, pattern, operName))
/// <summary>
/// Replaces the match of pattern in input with rewrite.
/// </summary><param name="input">
/// The text to be processed.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'StaticRegexReplace'.
/// </param><param name="replace_global">
/// Optional argument
/// If True, the replacement is global, otherwise the replacement
/// is done only on the first match.
/// </param><param name="pattern">
/// The regular expression to match the input.
/// </param><param name="rewrite">
/// The rewrite to be applied to the matched expresion.
/// </param><returns>
/// The text after applying pattern and rewrite.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)
/// </remarks>
member this.StaticRegexReplace(input : TF, pattern : String, rewrite : String, ?replace_global : Boolean, ?operName : String) =
let replace_global = defaultArg (replace_global |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.StaticRegexReplace(input.TFOutput, pattern, rewrite, replace_global, operName))
/// <summary>
/// Stops gradient computation.
/// </summary><param name="input"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'StopGradient'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// When executed in a graph, this op outputs its input tensor as-is.
///
/// When building ops to compute gradients, this op prevents the contribution of
/// its inputs to be taken into account. Normally, the gradient generator adds ops
/// to a graph to compute the derivatives of a specified 'loss' by recursively
/// finding out inputs that contributed to its computation. If you insert this op
/// in the graph it inputs are masked from the gradient generator. They are not
/// taken into account for computing gradients.
///
/// This is useful any time you want to compute a value with TensorFlow but need
/// to pretend that the value was a constant. Some examples include:
///
/// * The *EM* algorithm where the *M-step* should not involve backpropagation
/// through the output of the *E-step*.
/// * Contrastive divergence training of Boltzmann machines where, when
/// differentiating the energy function, the training must not backpropagate
/// through the graph that generated the samples from the model.
/// * Adversarial training, where no backprop should happen through the adversarial
/// example generation process.
/// </remarks>
member this.StopGradient(input : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.StopGradient(input.TFOutput, operName))
/// <summary>
/// Return a strided slice from <c>input</c>.
/// </summary><param name="input"></param><param name="begin"><c>begin[k]</c> specifies the offset into the <c>k</c>th range specification.
/// The exact dimension this corresponds to will be determined by context.
/// Out-of-bounds values will be silently clamped. If the <c>k</c>th bit of
/// <c>begin_mask</c> then <c>begin[k]</c> is ignored and the full range of the
/// appropriate dimension is used instead. Negative values causes indexing
/// to start from the highest element e.g. If <c>foo==[1,2,3]</c> then <c>foo[-1]==3</c>.
/// </param><param name="end"><c>end[i]</c> is like <c>begin</c> with the exception that <c>end_mask</c> is
/// used to determine full ranges.
/// </param><param name="strides"><c>strides[i]</c> specifies the increment in the <c>i</c>th specification
/// after extracting a given element. Negative indices will reverse
/// the original order. Out or range values are
/// clamped to <c>[0,dim[i]) if slice[i]&amp;gt;0</c> or <c>[-1,dim[i]-1] if slice[i] &amp;lt; 0</c></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'StridedSlice'.
/// </param><param name="begin_mask">
/// Optional argument
/// a bitmask where a bit i being 1 means to ignore the begin
/// value and instead use the largest interval possible. At runtime
/// begin[i] will be replaced with <c>[0, n-1)</c> if <c>stride[i] &amp;gt; 0</c> or
/// <c>[-1, n-1]</c> if <c>stride[i] &amp;lt; 0</c></param><param name="end_mask">
/// Optional argument
/// analogous to <c>begin_mask</c></param><param name="ellipsis_mask">
/// Optional argument
/// a bitmask where bit <c>i</c> being 1 means the <c>i</c>th
/// position is actually an ellipsis. One bit at most can be 1.
/// If <c>ellipsis_mask == 0</c>, then an implicit ellipsis mask of <c>1 &amp;lt;&amp;lt; (m+1)</c>
/// is provided. This means that <c>foo[3:5] == foo[3:5, ...]</c>. An ellipsis
/// implicitly creates as many range specifications as necessary to fully
/// specify the sliced range for every dimension. For example for a 4-dimensional
/// tensor <c>foo</c> the slice <c>foo[2, ..., 5:8]</c> implies <c>foo[2, :, :, 5:8]</c>.
/// </param><param name="new_axis_mask">
/// Optional argument
/// a bitmask where bit <c>i</c> being 1 means the <c>i</c>th
/// specification creates a new shape 1 dimension. For example
/// <c>foo[:4, tf.newaxis, :2]</c> would produce a shape <c>(4, 1, 2)</c> tensor.
/// </param><param name="shrink_axis_mask">
/// Optional argument
/// a bitmask where bit <c>i</c> implies that the <c>i</c>th
/// specification should shrink the dimensionality. begin and end
/// must imply a slice of size 1 in the dimension. For example in
/// python one might do <c>foo[:, 3, :]</c> which would result in
/// <c>shrink_axis_mask</c> being 2.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Note, most python users will want to use the Python <c>Tensor.__getitem__</c>
/// or <c>Variable.__getitem__</c> rather than this op directly.
///
/// The goal of this op is to produce a new tensor with a subset of
/// the elements from the <c>n</c> dimensional <c>input</c> tensor. The subset is chosen using
/// a sequence of <c>m</c> sparse range specifications encoded into the arguments
/// of this function. Note, in some cases
/// <c>m</c> could be equal to <c>n</c>, but this need not be the case. Each
/// range specification entry can be one of the following:
///
/// - An ellipsis (...). Ellipses are used to imply zero or more
/// dimensions of full-dimension selection and are produced using
/// <c>ellipsis_mask</c>. For example, <c>foo[...]</c> is the identity slice.
///
/// - A new axis. This is used to insert a new shape=1 dimension and is
/// produced using <c>new_axis_mask</c>. For example, <c>foo[:, ...]</c> where
/// <c>foo</c> is shape <c>(3, 4)</c> produces a <c>(1, 3, 4)</c> tensor.
///
///
/// - A range <c>begin:end:stride</c>. This is used to specify how much to choose from
/// a given dimension. <c>stride</c> can be any integer but 0. <c>begin</c> is an integer
/// which represents the index of the first value to select while <c>end</c> represents
/// the index of the last value to select. The number of values selected in each
/// dimension is <c>end - begin</c> if <c>stride &amp;gt; 0</c> and <c>begin - end</c> if <c>stride &amp;lt; 0</c>.
/// <c>begin</c> and <c>end</c> can be negative where <c>-1</c> is the last element, <c>-2</c> is
/// the second to last. <c>begin_mask</c> controls whether to replace the explicitly
/// given <c>begin</c> with an implicit effective value of <c>0</c> if <c>stride &amp;gt; 0</c> and
/// <c>-1</c> if <c>stride &amp;lt; 0</c>. <c>end_mask</c> is analogous but produces the number
/// required to create the largest open interval. For example, given a shape
/// <c>(3,)</c> tensor <c>foo[:]</c>, the effective <c>begin</c> and <c>end</c> are <c>0</c> and <c>3</c>. Do
/// not assume this is equivalent to <c>foo[0:-1]</c> which has an effective <c>begin</c>
/// and <c>end</c> of <c>0</c> and <c>2</c>. Another example is <c>foo[-2::-1]</c> which reverses the
/// first dimension of a tensor while dropping the last two (in the original
/// order elements). For example <c>foo = [1,2,3,4]; foo[-2::-1]</c> is <c>[4,3]</c>.
///
/// - A single index. This is used to keep only elements that have a given
/// index. For example (<c>foo[2, :]</c> on a shape <c>(5,6)</c> tensor produces a
/// shape <c>(6,)</c> tensor. This is encoded in <c>begin</c> and <c>end</c> and
/// <c>shrink_axis_mask</c>.
///
/// Each conceptual range specification is encoded in the op's argument. This
/// encoding is best understand by considering a non-trivial example. In
/// particular,
/// <c>foo[1, 2:4, None, ..., :-3:-1, :]</c> will be encoded as
///
/// <code>
/// begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0)
/// end = [2, 4, x, x, -3, x]
/// strides = [1, 1, x, x, -1, 1]
/// begin_mask = 1&amp;lt;&amp;lt;4 | 1 &amp;lt;&amp;lt; 5 = 48
/// end_mask = 1&amp;lt;&amp;lt;5 = 32
/// ellipsis_mask = 1&amp;lt;&amp;lt;3 = 8
/// new_axis_mask = 1&amp;lt;&amp;lt;2 4
/// shrink_axis_mask = 1&amp;lt;&amp;lt;0
/// </code>
///
/// In this case if <c>foo.shape</c> is (5, 5, 5, 5, 5, 5) the final shape of
/// the slice becomes (2, 1, 5, 5, 2, 5).
/// Let us walk step by step through each argument specification.
///
/// 1. The first argument in the example slice is turned into <c>begin = 1</c> and
/// <c>end = begin + 1 = 2</c>. To disambiguate from the original spec <c>2:4</c> we
/// also set the appropriate bit in <c>shrink_axis_mask</c>.
///
/// 2. <c>2:4</c> is contributes 2, 4, 1 to begin, end, and stride. All masks have
/// zero bits contributed.
///
/// 3. None is a synonym for <c>tf.newaxis</c>. This means insert a dimension of size 1
/// dimension in the final shape. Dummy values are contributed to begin,
/// end and stride, while the new_axis_mask bit is set.
///
/// 4. <c>...</c> grab the full ranges from as many dimensions as needed to
/// fully specify a slice for every dimension of the input shape.
///
/// 5. <c>:-3:-1</c> shows the use of negative indices. A negative index <c>i</c> associated
/// with a dimension that has shape <c>s</c> is converted to a positive index
/// <c>s + i</c>. So <c>-1</c> becomes <c>s-1</c> (i.e. the last element). This conversion
/// is done internally so begin, end and strides receive x, -3, and -1.
/// The appropriate begin_mask bit is set to indicate the start range is the
/// full range (ignoring the x).
///
/// 6. <c>:</c> indicates that the entire contents of the corresponding dimension
/// is selected. This is equivalent to <c>::</c> or <c>0::1</c>. begin, end, and strides
/// receive 0, 0, and 1, respectively. The appropriate bits in <c>begin_mask</c> and
/// <c>end_mask</c> are also set.
///
/// *Requirements*:
/// <c>0 != strides[i] for i in [0, m)</c><c>ellipsis_mask must be a power of two (only one ellipsis)</c></remarks>
member this.StridedSlice(input : TF, ``begin`` : TF, ``end`` : TF, strides : TF, ?begin_mask : Int64, ?end_mask : Int64, ?ellipsis_mask : Int64, ?new_axis_mask : Int64, ?shrink_axis_mask : Int64, ?operName : String) =
let begin_mask = defaultArg (begin_mask |> Option.map Nullable) (Nullable())
let end_mask = defaultArg (end_mask |> Option.map Nullable) (Nullable())
let ellipsis_mask = defaultArg (ellipsis_mask |> Option.map Nullable) (Nullable())
let new_axis_mask = defaultArg (new_axis_mask |> Option.map Nullable) (Nullable())
let shrink_axis_mask = defaultArg (shrink_axis_mask |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.StridedSlice(input.TFOutput, ``begin``.TFOutput, ``end``.TFOutput, strides.TFOutput, begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask, operName))
member this.StridedSliceAssign(reference : TF, ``begin`` : TF, ``end`` : TF, strides : TF, value : TF, ?begin_mask : Int64, ?end_mask : Int64, ?ellipsis_mask : Int64, ?new_axis_mask : Int64, ?shrink_axis_mask : Int64, ?operName : String) =
let begin_mask = defaultArg (begin_mask |> Option.map Nullable) (Nullable())
let end_mask = defaultArg (end_mask |> Option.map Nullable) (Nullable())
let ellipsis_mask = defaultArg (ellipsis_mask |> Option.map Nullable) (Nullable())
let new_axis_mask = defaultArg (new_axis_mask |> Option.map Nullable) (Nullable())
let shrink_axis_mask = defaultArg (shrink_axis_mask |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.StridedSliceAssign(reference.TFOutput, ``begin``.TFOutput, ``end``.TFOutput, strides.TFOutput, value.TFOutput, begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask, operName))
/// <summary>
/// Returns the gradient of <c>StridedSlice</c>.
/// </summary><param name="shape"></param><param name="begin"></param><param name="end"></param><param name="strides"></param><param name="dy"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'StridedSliceGrad'.
/// </param><param name="begin_mask">
/// Optional argument
/// </param><param name="end_mask">
/// Optional argument
/// </param><param name="ellipsis_mask">
/// Optional argument
/// </param><param name="new_axis_mask">
/// Optional argument
/// </param><param name="shrink_axis_mask">
/// Optional argument
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Since <c>StridedSlice</c> cuts out pieces of its <c>input</c> which is size
/// <c>shape</c>, its gradient will have the same shape (which is passed here
/// as <c>shape</c>). The gradient will be zero in any element that the slice
/// does not select.
///
/// Arguments are the same as StridedSliceGrad with the exception that
/// <c>dy</c> is the input gradient to be propagated and <c>shape</c> is the
/// shape of <c>StridedSlice</c>'s <c>input</c>.
/// </remarks>
member this.StridedSliceGrad(shape : TF, ``begin`` : TF, ``end`` : TF, strides : TF, dy : TF, ?begin_mask : Int64, ?end_mask : Int64, ?ellipsis_mask : Int64, ?new_axis_mask : Int64, ?shrink_axis_mask : Int64, ?operName : String) =
let begin_mask = defaultArg (begin_mask |> Option.map Nullable) (Nullable())
let end_mask = defaultArg (end_mask |> Option.map Nullable) (Nullable())
let ellipsis_mask = defaultArg (ellipsis_mask |> Option.map Nullable) (Nullable())
let new_axis_mask = defaultArg (new_axis_mask |> Option.map Nullable) (Nullable())
let shrink_axis_mask = defaultArg (shrink_axis_mask |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.StridedSliceGrad(shape.TFOutput, ``begin``.TFOutput, ``end``.TFOutput, strides.TFOutput, dy.TFOutput, begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask, operName))
/// <summary>
/// Formats a string template using a list of tensors.
/// </summary><param name="inputs">
/// The list of tensors to format into the placeholder string.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'StringFormat'.
/// </param><param name="template">
/// Optional argument
/// A string, the template to format tensor summaries into.
/// </param><param name="placeholder">
/// Optional argument
/// A string, at each placeholder in the template a subsequent tensor summary will be inserted.
/// </param><param name="summarize">
/// Optional argument
/// When formatting the tensor summaries print the first and last summarize entries of each tensor dimension.
/// </param><returns>
/// = The resulting string scalar.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Formats a string template using a list of tensors, pretty-printing tensor summaries.
/// </remarks>
member this.StringFormat(inputs : TF[], ?template : String, ?placeholder : String, ?summarize : Int64, ?operName : String) =
let template = defaultArg template null
let placeholder = defaultArg placeholder null
let summarize = defaultArg (summarize |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.StringFormat(inputs |> Array.map (fun x -> x.TFOutput), template, placeholder, summarize, operName))
/// <summary>
/// Joins the strings in the given list of string tensors into one tensor;
/// </summary><param name="inputs">
/// A list of string tensors. The tensors must all have the same shape,
/// or be scalars. Scalars may be mixed in; these will be broadcast to the shape
/// of non-scalar inputs.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'StringJoin'.
/// </param><param name="separator">
/// Optional argument
/// string, an optional join separator.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// with the given separator (default is an empty separator).
/// </remarks>
member this.StringJoin(inputs : TF[], ?separator : String, ?operName : String) =
let separator = defaultArg separator null
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.StringJoin(inputs |> Array.map (fun x -> x.TFOutput), separator, operName))
/// <summary>
/// String lengths of <c>input</c>.
/// </summary><param name="input">
/// The string for which to compute the length.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'StringLength'.
/// </param><param name="unit">
/// Optional argument
/// The unit that is counted to compute string length. One of: <c>"BYTE"</c> (for
/// the number of bytes in each string) or <c>"UTF8_CHAR"</c> (for the number of UTF-8
/// encoded Unicode code points in each string). Results are undefined
/// if <c>unit=UTF8_CHAR</c> and the <c>input</c> strings do not contain structurally
/// valid UTF-8.
/// </param><returns>
/// Integer tensor that has the same shape as <c>input</c>. The output contains the
/// element-wise string lengths of <c>input</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Computes the length of each string given in the input tensor.
/// </remarks>
member this.StringLength(input : TF, ?unit : String, ?operName : String) =
let unit = defaultArg unit null
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.StringLength(input.TFOutput, unit, operName))
/// <summary>
/// Split elements of <c>input</c> based on <c>delimiter</c> into a <c>SparseTensor</c>.
/// </summary><param name="input">
/// 1-D. Strings to split.
/// </param><param name="delimiter">
/// 0-D. Delimiter characters (bytes), or empty string.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'StringSplit'.
/// </param><param name="skip_empty">
/// Optional argument
/// A <c>bool</c>. If <c>True</c>, skip the empty strings from the result.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// indices: A dense matrix of int64 representing the indices of the sparse tensor.
/// values: A vector of strings corresponding to the splited values.
/// shape: a length-2 vector of int64 representing the shape of the sparse
/// tensor, where the first value is N and the second value is the maximum number
/// of tokens in a single input entry.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// Let N be the size of source (typically N will be the batch size). Split each
/// element of <c>input</c> based on <c>delimiter</c> and return a <c>SparseTensor</c>
/// containing the splitted tokens. Empty tokens are ignored.
///
/// <c>delimiter</c> can be empty, or a string of split characters. If <c>delimiter</c> is an
/// empty string, each element of <c>input</c> is split into individual single-byte
/// character strings, including splitting of UTF-8 multibyte sequences. Otherwise
/// every character of <c>delimiter</c> is a potential split point.
///
/// For example:
/// N = 2, input[0] is 'hello world' and input[1] is 'a b c', then the output
/// will be
///
/// indices = [0, 0;
/// 0, 1;
/// 1, 0;
/// 1, 1;
/// 1, 2]
/// shape = [2, 3]
/// values = ['hello', 'world', 'a', 'b', 'c']
/// </remarks>
member this.StringSplit(input : TF, delimiter : TF, ?skip_empty : Boolean, ?operName : String) =
let skip_empty = defaultArg (skip_empty |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.StringSplit(input.TFOutput, delimiter.TFOutput, skip_empty, operName)
/// <summary>
/// Split elements of <c>source</c> based on <c>sep</c> into a <c>SparseTensor</c>.
/// </summary><param name="input"><c>1-D</c> string <c>Tensor</c>, the strings to split.
/// </param><param name="sep"><c>0-D</c> string <c>Tensor</c>, the delimiter character.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'StringSplitV2'.
/// </param><param name="maxsplit">
/// Optional argument
/// An <c>int</c>. If <c>maxsplit &amp;gt; 0</c>, limit of the split of the result.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// indices:
/// values:
/// shape:
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// Let N be the size of source (typically N will be the batch size). Split each
/// element of <c>source</c> based on <c>sep</c> and return a <c>SparseTensor</c>
/// containing the split tokens. Empty tokens are ignored.
///
/// For example, N = 2, source[0] is 'hello world' and source[1] is 'a b c',
/// then the output will be
/// <code>
/// st.indices = [0, 0;
/// 0, 1;
/// 1, 0;
/// 1, 1;
/// 1, 2]
/// st.shape = [2, 3]
/// st.values = ['hello', 'world', 'a', 'b', 'c']
/// </code>
///
/// If <c>sep</c> is given, consecutive delimiters are not grouped together and are
/// deemed to delimit empty strings. For example, source of <c>"1&amp;lt;&amp;gt;2&amp;lt;&amp;gt;&amp;lt;&amp;gt;3"</c> and
/// sep of <c>"&amp;lt;&amp;gt;"</c> returns <c>["1", "2", "", "3"]</c>. If <c>sep</c> is None or an empty
/// string, consecutive whitespace are regarded as a single separator, and the
/// result will contain no empty strings at the startor end if the string has
/// leading or trailing whitespace.
///
/// Note that the above mentioned behavior matches python's str.split.
/// </remarks>
member this.StringSplitV2(input : TF, sep : TF, ?maxsplit : Int64, ?operName : String) =
let maxsplit = defaultArg (maxsplit |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.StringSplitV2(input.TFOutput, sep.TFOutput, maxsplit, operName)
/// <summary>
/// Strip leading and trailing whitespaces from the Tensor.
/// </summary><param name="input">
/// A string <c>Tensor</c> of any shape.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'StringStrip'.
/// </param><returns>
/// A string <c>Tensor</c> of the same shape as the input.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.StringStrip(input : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.StringStrip(input.TFOutput, operName))
/// <summary>
/// Converts each string in the input Tensor to its hash mod by a number of buckets.
/// </summary><param name="string_tensor"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'StringToHashBucket'.
/// </param><param name="num_buckets">
/// The number of buckets.
/// </param><returns>
/// A Tensor of the same shape as the input <c>string_tensor</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// The hash function is deterministic on the content of the string within the
/// process.
///
/// Note that the hash function may change from time to time.
/// This functionality will be deprecated and it's recommended to use
/// <c>tf.string_to_hash_bucket_fast()</c> or <c>tf.string_to_hash_bucket_strong()</c>.
/// </remarks>
member this.StringToHashBucket(string_tensor : TF, num_buckets : Int64, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.StringToHashBucket(string_tensor.TFOutput, num_buckets, operName))
/// <summary>
/// Converts each string in the input Tensor to its hash mod by a number of buckets.
/// </summary><param name="input">
/// The strings to assign a hash bucket.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'StringToHashBucketFast'.
/// </param><param name="num_buckets">
/// The number of buckets.
/// </param><returns>
/// A Tensor of the same shape as the input <c>string_tensor</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// The hash function is deterministic on the content of the string within the
/// process and will never change. However, it is not suitable for cryptography.
/// This function may be used when CPU time is scarce and inputs are trusted or
/// unimportant. There is a risk of adversaries constructing inputs that all hash
/// to the same bucket. To prevent this problem, use a strong hash function with
/// <c>tf.string_to_hash_bucket_strong</c>.
/// </remarks>
member this.StringToHashBucketFast(input : TF, num_buckets : Int64, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.StringToHashBucketFast(input.TFOutput, num_buckets, operName))
/// <summary>
/// Converts each string in the input Tensor to its hash mod by a number of buckets.
/// </summary><param name="input">
/// The strings to assign a hash bucket.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'StringToHashBucketStrong'.
/// </param><param name="num_buckets">
/// The number of buckets.
/// </param><param name="key">
/// The key for the keyed hash function passed as a list of two uint64
/// elements.
/// </param><returns>
/// A Tensor of the same shape as the input <c>string_tensor</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// The hash function is deterministic on the content of the string within the
/// process. The hash function is a keyed hash function, where attribute <c>key</c>
/// defines the key of the hash function. <c>key</c> is an array of 2 elements.
///
/// A strong hash is important when inputs may be malicious, e.g. URLs with
/// additional components. Adversaries could try to make their inputs hash to the
/// same bucket for a denial-of-service attack or to skew the results. A strong
/// hash prevents this by making it difficult, if not infeasible, to compute inputs
/// that hash to the same bucket. This comes at a cost of roughly 4x higher compute
/// time than <c>tf.string_to_hash_bucket_fast</c>.
/// </remarks>
member this.StringToHashBucketStrong(input : TF, num_buckets : Int64, key : Int64[], ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.StringToHashBucketStrong(input.TFOutput, num_buckets, key, operName))
/// <summary>
/// Converts each string in the input Tensor to the specified numeric type.
/// </summary><param name="string_tensor"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'StringToNumber'.
/// </param><param name="out_type">
/// Optional argument
/// The numeric type to interpret each string in <c>string_tensor</c> as.
/// </param><returns>
/// A Tensor of the same shape as the input <c>string_tensor</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// (Note that int32 overflow results in an error while float overflow
/// results in a rounded value.)
/// </remarks>
member this.StringToNumber(string_tensor : TF, ?out_type : TFDataType, ?operName : String) =
let out_type = defaultArg (out_type |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.StringToNumber(string_tensor.TFOutput, out_type, operName))
/// <summary>
/// Returns x - y element-wise.
/// </summary><param name="x"></param><param name="y"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Sub'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// *NOTE*: <c>Subtract</c> supports broadcasting. More about broadcasting
/// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
/// </remarks>
member this.Sub(x : TF, y : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Sub(x.TFOutput, y.TFOutput, operName))
/// <summary>
/// Computes the sum of elements across dimensions of a tensor.
/// </summary><param name="input">
/// The tensor to reduce.
/// </param><param name="reduction_indices">
/// The dimensions to reduce. Must be in the range
/// <c>[-rank(input), rank(input))</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Sum'.
/// </param><param name="keep_dims">
/// Optional argument
/// If true, retain reduced dimensions with length 1.
/// </param><returns>
/// The reduced tensor.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Reduces <c>input</c> along the dimensions given in <c>axis</c>. Unless
/// <c>keep_dims</c> is true, the rank of the tensor is reduced by 1 for each entry in
/// <c>axis</c>. If <c>keep_dims</c> is true, the reduced dimensions are
/// retained with length 1.
/// </remarks>
member this.Sum(input : TF, reduction_indices : TF, ?keep_dims : Boolean, ?operName : String) =
let keep_dims = defaultArg (keep_dims |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Sum(input.TFOutput, reduction_indices.TFOutput, keep_dims, operName))
/// <summary>
/// Computes the singular value decompositions of one or more matrices.
/// </summary><param name="input">
/// A tensor of shape <c>[..., M, N]</c> whose inner-most 2 dimensions
/// form matrices of size <c>[M, N]</c>. Let <c>P</c> be the minimum of <c>M</c> and <c>N</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Svd'.
/// </param><param name="compute_uv">
/// Optional argument
/// If true, left and right singular vectors will be
/// computed and returned in <c>u</c> and <c>v</c>, respectively.
/// If false, <c>u</c> and <c>v</c> are not set and should never referenced.
/// </param><param name="full_matrices">
/// Optional argument
/// If true, compute full-sized <c>u</c> and <c>v</c>. If false
/// (the default), compute only the leading <c>P</c> singular vectors.
/// Ignored if <c>compute_uv</c> is <c>False</c>.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// s: Singular values. Shape is <c>[..., P]</c>.
/// u: Left singular vectors. If <c>full_matrices</c> is <c>False</c> then shape is
/// <c>[..., M, P]</c>; if <c>full_matrices</c> is <c>True</c> then shape is
/// <c>[..., M, M]</c>. Undefined if <c>compute_uv</c> is <c>False</c>.
/// v: Left singular vectors. If <c>full_matrices</c> is <c>False</c> then shape is
/// <c>[..., N, P]</c>. If <c>full_matrices</c> is <c>True</c> then shape is <c>[..., N, N]</c>.
/// Undefined if <c>compute_uv</c> is false.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// Computes the SVD of each inner matrix in <c>input</c> such that
/// <c>input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])</c><code>
/// # a is a tensor containing a batch of matrices.
/// # s is a tensor of singular values for each matrix.
/// # u is the tensor containing of left singular vectors for each matrix.
/// # v is the tensor containing of right singular vectors for each matrix.
/// s, u, v = svd(a)
/// s, _, _ = svd(a, compute_uv=False)
/// </code></remarks>
member this.Svd(input : TF, ?compute_uv : Boolean, ?full_matrices : Boolean, ?operName : String) =
let compute_uv = defaultArg (compute_uv |> Option.map Nullable) (Nullable())
let full_matrices = defaultArg (full_matrices |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.Svd(input.TFOutput, compute_uv, full_matrices, operName)
/// <summary>
/// Forwards <c>data</c> to the output port determined by <c>pred</c>.
/// </summary><param name="data">
/// The tensor to be forwarded to the appropriate output.
/// </param><param name="pred">
/// A scalar that specifies which output port will receive data.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Switch'.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// output_false: If <c>pred</c> is false, data will be forwarded to this output.
/// output_true: If <c>pred</c> is true, data will be forwarded to this output.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// If <c>pred</c> is true, the <c>data</c> input is forwarded to <c>output_true</c>. Otherwise,
/// the data goes to <c>output_false</c>.
///
/// See also <c>RefSwitch</c> and <c>Merge</c>.
/// </remarks>
member this.Switch(data : TF, pred : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.Switch(data.TFOutput, pred.TFOutput, operName)
/// <summary>
/// Creates a dataset that contains <c>count</c> elements from the <c>input_dataset</c>.
/// </summary><param name="input_dataset"></param><param name="count">
/// A scalar representing the number of elements from the <c>input_dataset</c>
/// that should be taken. A value of <c>-1</c> indicates that all of <c>input_dataset</c>
/// is taken.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TakeDataset'.
/// </param><param name="output_types"></param><param name="output_shapes"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.TakeDataset(input_dataset : TF, count : TF, output_types : TFDataType[], output_shapes : TFShape[], ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TakeDataset(input_dataset.TFOutput, count.TFOutput, output_types, output_shapes, operName))
/// <summary>
/// Read <c>SparseTensors</c> from a <c>SparseTensorsMap</c> and concatenate them.
/// </summary><param name="sparse_handles">
/// 1-D, The <c>N</c> serialized <c>SparseTensor</c> objects.
/// Shape: <c>[N]</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TakeManySparseFromTensorsMap'.
/// </param><param name="container">
/// Optional argument
/// The container name for the <c>SparseTensorsMap</c> read by this op.
/// </param><param name="shared_name">
/// Optional argument
/// The shared name for the <c>SparseTensorsMap</c> read by this op.
/// It should not be blank; rather the <c>shared_name</c> or unique Operation name
/// of the Op that created the original <c>SparseTensorsMap</c> should be used.
/// </param><param name="dtype">
/// The <c>dtype</c> of the <c>SparseTensor</c> objects stored in the
/// <c>SparseTensorsMap</c>.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// sparse_indices: 2-D. The <c>indices</c> of the minibatch <c>SparseTensor</c>.
/// sparse_values: 1-D. The <c>values</c> of the minibatch <c>SparseTensor</c>.
/// sparse_shape: 1-D. The <c>shape</c> of the minibatch <c>SparseTensor</c>.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// The input <c>sparse_handles</c> must be an <c>int64</c> matrix of shape <c>[N, 1]</c> where
/// <c>N</c> is the minibatch size and the rows correspond to the output handles of
/// <c>AddSparseToTensorsMap</c> or <c>AddManySparseToTensorsMap</c>. The ranks of the
/// original <c>SparseTensor</c> objects that went into the given input ops must all
/// match. When the final <c>SparseTensor</c> is created, it has rank one
/// higher than the ranks of the incoming <c>SparseTensor</c> objects
/// (they have been concatenated along a new row dimension on the left).
///
/// The output <c>SparseTensor</c> object's shape values for all dimensions but the
/// first are the max across the input <c>SparseTensor</c> objects' shape values
/// for the corresponding dimensions. Its first shape value is <c>N</c>, the minibatch
/// size.
///
/// The input <c>SparseTensor</c> objects' indices are assumed ordered in
/// standard lexicographic order. If this is not the case, after this
/// step run <c>SparseReorder</c> to restore index ordering.
///
/// For example, if the handles represent an input, which is a <c>[2, 3]</c> matrix
/// representing two original <c>SparseTensor</c> objects:
///
/// <code>
/// index = [ 0]
/// [10]
/// [20]
/// values = [1, 2, 3]
/// shape = [50]
/// </code>
///
/// and
///
/// <code>
/// index = [ 2]
/// [10]
/// values = [4, 5]
/// shape = [30]
/// </code>
///
/// then the final <c>SparseTensor</c> will be:
///
/// <code>
/// index = [0 0]
/// [0 10]
/// [0 20]
/// [1 2]
/// [1 10]
/// values = [1, 2, 3, 4, 5]
/// shape = [2 50]
/// </code></remarks>
member this.TakeManySparseFromTensorsMap(sparse_handles : TF, dtype : TFDataType, ?container : String, ?shared_name : String, ?operName : String) =
let container = defaultArg container null
let shared_name = defaultArg shared_name null
let operName = defaultArg operName null
this.TFGraph.TakeManySparseFromTensorsMap(sparse_handles.TFOutput, dtype, container, shared_name, operName)
/// <summary>
/// Computes tan of x element-wise.
/// </summary><param name="x"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Tan'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.Tan(x : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Tan(x.TFOutput, operName))
/// <summary>
/// Computes hyperbolic tangent of <c>x</c> element-wise.
/// </summary><param name="x"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Tanh'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.Tanh(x : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Tanh(x.TFOutput, operName))
/// <summary>
/// Computes the gradient for the tanh of <c>x</c> wrt its input.
/// </summary><param name="y"></param><param name="dy"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TanhGrad'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Specifically, <c>grad = dy * (1 - y*y)</c>, where <c>y = tanh(x)</c>, and <c>dy</c>
/// is the corresponding input gradient.
/// </remarks>
member this.TanhGrad(y : TF, dy : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TanhGrad(y.TFOutput, dy.TFOutput, operName))
/// <summary>
/// Returns a tensor that may be mutated, but only persists within a single step.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TemporaryVariable'.
/// </param><param name="var_name">
/// Optional argument
/// Overrides the name used for the temporary variable resource. Default
/// value is the name of the 'TemporaryVariable' op (which is guaranteed unique).
/// </param><param name="shape">
/// The shape of the variable tensor.
/// </param><param name="dtype">
/// The type of elements in the variable tensor.
/// </param><returns>
/// A reference to the variable tensor.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This is an experimental op for internal use only and it is possible to use this
/// op in unsafe ways. DO NOT USE unless you fully understand the risks.
///
/// It is the caller's responsibility to ensure that 'ref' is eventually passed to a
/// matching 'DestroyTemporaryVariable' op after all other uses have completed.
///
/// Outputs a ref to the tensor state so it may be read or modified.
///
/// E.g.
/// var = state_ops._temporary_variable([1, 2], types.float_)
/// var_name = var.op.name
/// var = state_ops.assign(var, [[4.0, 5.0]])
/// var = state_ops.assign_add(var, [[6.0, 7.0]])
/// final = state_ops._destroy_temporary_variable(var, var_name=var_name)
/// </remarks>
member this.TemporaryVariable(shape : TFShape, dtype : TFDataType, ?var_name : String, ?operName : String) =
let var_name = defaultArg var_name null
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TemporaryVariable(shape, dtype, var_name, operName))
/// <summary>
/// Deprecated. Use TensorArrayCloseV3
/// </summary><param name="handle"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArrayCloseV2'.
/// </param><returns>
/// Returns the description of the operation
/// </returns>
member this.TensorArrayCloseV2(handle : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.TensorArrayCloseV2(handle.TFOutput, operName)
/// <summary>
/// Delete the TensorArray from its resource container.
/// </summary><param name="handle">
/// The handle to a TensorArray (output of TensorArray or TensorArrayGrad).
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArrayCloseV3'.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// This enables the user to close and release the resource in the middle
/// of a step/run.
/// </remarks>
member this.TensorArrayCloseV3(handle : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.TensorArrayCloseV3(handle.TFOutput, operName)
/// <summary>
/// Deprecated. Use TensorArrayConcatV3
/// </summary><param name="handle"></param><param name="flow_in"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArrayConcatV2'.
/// </param><param name="element_shape_except0">
/// Optional argument
/// </param><param name="dtype"></param><returns>
/// Returns a tuple with multiple values, as follows:
/// value:
/// lengths:
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns>
member this.TensorArrayConcatV2(handle : TF, flow_in : TF, dtype : TFDataType, ?element_shape_except0 : TFShape, ?operName : String) =
let element_shape_except0 = defaultArg element_shape_except0 null
let operName = defaultArg operName null
this.TFGraph.TensorArrayConcatV2(handle.TFOutput, flow_in.TFOutput, dtype, element_shape_except0, operName)
/// <summary>
/// Concat the elements from the TensorArray into value <c>value</c>.
/// </summary><param name="handle">
/// The handle to a TensorArray.
/// </param><param name="flow_in">
/// A float scalar that enforces proper chaining of operations.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArrayConcatV3'.
/// </param><param name="element_shape_except0">
/// Optional argument
/// The expected shape of an element, if known,
/// excluding the first dimension. Used to validate the shapes of
/// TensorArray elements. If this shape is not fully specified, concatenating
/// zero-size TensorArrays is an error.
/// </param><param name="dtype">
/// The type of the elem that is returned.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// value: All of the elements in the TensorArray, concatenated along the first
/// axis.
/// lengths: A vector of the row sizes of the original T elements in the
/// value output. In the example above, this would be the values:
/// <c>(n1, n2, ..., n(T-1))</c>.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// Takes <c>T</c> elements of shapes
///
/// <code>
/// (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...)
/// </code>
///
/// and concatenates them into a Tensor of shape:
///
/// <code>
/// (n0 + n1 + ... + n(T-1) x d0 x d1 x ...)
/// </code>
///
/// All elements must have the same shape (excepting the first dimension).
/// </remarks>
member this.TensorArrayConcatV3(handle : TF, flow_in : TF, dtype : TFDataType, ?element_shape_except0 : TFShape, ?operName : String) =
let element_shape_except0 = defaultArg element_shape_except0 null
let operName = defaultArg operName null
this.TFGraph.TensorArrayConcatV3(handle.TFOutput, flow_in.TFOutput, dtype, element_shape_except0, operName)
/// <summary>
/// Deprecated. Use TensorArrayGatherV3
/// </summary><param name="handle"></param><param name="indices"></param><param name="flow_in"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArrayGatherV2'.
/// </param><param name="element_shape">
/// Optional argument
/// </param><param name="dtype"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.TensorArrayGatherV2(handle : TF, indices : TF, flow_in : TF, dtype : TFDataType, ?element_shape : TFShape, ?operName : String) =
let element_shape = defaultArg element_shape null
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TensorArrayGatherV2(handle.TFOutput, indices.TFOutput, flow_in.TFOutput, dtype, element_shape, operName))
/// <summary>
/// Gather specific elements from the TensorArray into output <c>value</c>.
/// </summary><param name="handle">
/// The handle to a TensorArray.
/// </param><param name="indices">
/// The locations in the TensorArray from which to read tensor elements.
/// </param><param name="flow_in">
/// A float scalar that enforces proper chaining of operations.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArrayGatherV3'.
/// </param><param name="element_shape">
/// Optional argument
/// The expected shape of an element, if known. Used to
/// validate the shapes of TensorArray elements. If this shape is not
/// fully specified, gathering zero-size TensorArrays is an error.
/// </param><param name="dtype">
/// The type of the elem that is returned.
/// </param><returns>
/// All of the elements in the TensorArray, concatenated along a new
/// axis (the new dimension 0).
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// All elements selected by <c>indices</c> must have the same shape.
/// </remarks>
member this.TensorArrayGatherV3(handle : TF, indices : TF, flow_in : TF, dtype : TFDataType, ?element_shape : TFShape, ?operName : String) =
let element_shape = defaultArg element_shape null
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TensorArrayGatherV3(handle.TFOutput, indices.TFOutput, flow_in.TFOutput, dtype, element_shape, operName))
/// <summary>
/// Deprecated. Use TensorArrayGradV3
/// </summary><param name="handle"></param><param name="flow_in"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArrayGradV2'.
/// </param><param name="source"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.TensorArrayGradV2(handle : TF, flow_in : TF, source : String, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TensorArrayGradV2(handle.TFOutput, flow_in.TFOutput, source, operName))
/// <summary>
/// Creates a TensorArray for storing the gradients of values in the given handle.
/// </summary><param name="handle">
/// The handle to the forward TensorArray.
/// </param><param name="flow_in">
/// A float scalar that enforces proper chaining of operations.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArrayGradV3'.
/// </param><param name="source">
/// The gradient source string, used to decide which gradient TensorArray
/// to return.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// grad_handle:
/// flow_out:
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// If the given TensorArray gradient already exists, returns a reference to it.
///
/// Locks the size of the original TensorArray by disabling its dynamic size flag.
///
/// **A note about the input flow_in:**
///
/// The handle flow_in forces the execution of the gradient lookup to occur
/// only after certain other operations have occurred. For example, when
/// the forward TensorArray is dynamically sized, writes to this TensorArray
/// may resize the object. The gradient TensorArray is statically sized based
/// on the size of the forward TensorArray when this operation executes.
/// Furthermore, the size of the forward TensorArray is frozen by this call.
/// As a result, the flow is used to ensure that the call to generate the gradient
/// TensorArray only happens after all writes are executed.
///
/// In the case of dynamically sized TensorArrays, gradient computation should
/// only be performed on read operations that have themselves been chained via
/// flow to occur only after all writes have executed. That way the final size
/// of the forward TensorArray is known when this operation is called.
///
/// **A note about the source attribute:**
///
/// TensorArray gradient calls use an accumulator TensorArray object. If
/// multiple gradients are calculated and run in the same session, the multiple
/// gradient nodes may accidentally flow through the same accumulator TensorArray.
/// This double counts and generally breaks the TensorArray gradient flow.
///
/// The solution is to identify which gradient call this particular
/// TensorArray gradient is being called in. This is performed by identifying
/// a unique string (e.g. "gradients", "gradients_1", ...) from the input
/// gradient Tensor's name. This string is used as a suffix when creating
/// the TensorArray gradient object here (the attribute <c>source</c>).
///
/// The attribute <c>source</c> is added as a suffix to the forward TensorArray's
/// name when performing the creation / lookup, so that each separate gradient
/// calculation gets its own TensorArray accumulator.
/// </remarks>
member this.TensorArrayGradV3(handle : TF, flow_in : TF, source : String, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.TensorArrayGradV3(handle.TFOutput, flow_in.TFOutput, source, operName)
/// <summary>
/// Creates a TensorArray for storing multiple gradients of values in the given handle.
/// </summary><param name="handle">
/// The handle to the forward TensorArray.
/// </param><param name="flow_in">
/// A float scalar that enforces proper chaining of operations.
/// </param><param name="shape_to_prepend">
/// An int32 vector representing a shape. Elements in the gradient accumulator will
/// have shape which is this shape_to_prepend value concatenated with shape of the
/// elements in the TensorArray corresponding to the input handle.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArrayGradWithShape'.
/// </param><param name="source">
/// The gradient source string, used to decide which gradient TensorArray
/// to return.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// grad_handle:
/// flow_out:
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// Similar to TensorArrayGradV3. However it creates an accumulator with an
/// expanded shape compared to the input TensorArray whose gradient is being
/// computed. This enables multiple gradients for the same TensorArray to be
/// calculated using the same accumulator.
/// </remarks>
member this.TensorArrayGradWithShape(handle : TF, flow_in : TF, shape_to_prepend : TF, source : String, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.TensorArrayGradWithShape(handle.TFOutput, flow_in.TFOutput, shape_to_prepend.TFOutput, source, operName)
/// <summary>
/// Deprecated. Use TensorArrayReadV3
/// </summary><param name="handle"></param><param name="index"></param><param name="flow_in"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArrayReadV2'.
/// </param><param name="dtype"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.TensorArrayReadV2(handle : TF, index : TF, flow_in : TF, dtype : TFDataType, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TensorArrayReadV2(handle.TFOutput, index.TFOutput, flow_in.TFOutput, dtype, operName))
/// <summary>
/// Read an element from the TensorArray into output <c>value</c>.
/// </summary><param name="handle">
/// The handle to a TensorArray.
/// </param><param name="index"></param><param name="flow_in">
/// A float scalar that enforces proper chaining of operations.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArrayReadV3'.
/// </param><param name="dtype">
/// The type of the elem that is returned.
/// </param><returns>
/// The tensor that is read from the TensorArray.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.TensorArrayReadV3(handle : TF, index : TF, flow_in : TF, dtype : TFDataType, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TensorArrayReadV3(handle.TFOutput, index.TFOutput, flow_in.TFOutput, dtype, operName))
/// <summary>
/// Deprecated. Use TensorArrayScatterV3
/// </summary><param name="handle"></param><param name="indices"></param><param name="value"></param><param name="flow_in"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArrayScatterV2'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.TensorArrayScatterV2(handle : TF, indices : TF, value : TF, flow_in : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TensorArrayScatterV2(handle.TFOutput, indices.TFOutput, value.TFOutput, flow_in.TFOutput, operName))
/// <summary>
/// Scatter the data from the input value into specific TensorArray elements.
/// </summary><param name="handle">
/// The handle to a TensorArray.
/// </param><param name="indices">
/// The locations at which to write the tensor elements.
/// </param><param name="value">
/// The concatenated tensor to write to the TensorArray.
/// </param><param name="flow_in">
/// A float scalar that enforces proper chaining of operations.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArrayScatterV3'.
/// </param><returns>
/// A float scalar that enforces proper chaining of operations.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks><c>indices</c> must be a vector, its length must match the first dim of <c>value</c>.
/// </remarks>
member this.TensorArrayScatterV3(handle : TF, indices : TF, value : TF, flow_in : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TensorArrayScatterV3(handle.TFOutput, indices.TFOutput, value.TFOutput, flow_in.TFOutput, operName))
/// <summary>
/// Deprecated. Use TensorArraySizeV3
/// </summary><param name="handle"></param><param name="flow_in"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArraySizeV2'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.TensorArraySizeV2(handle : TF, flow_in : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TensorArraySizeV2(handle.TFOutput, flow_in.TFOutput, operName))
/// <summary>
/// Get the current size of the TensorArray.
/// </summary><param name="handle">
/// The handle to a TensorArray (output of TensorArray or TensorArrayGrad).
/// </param><param name="flow_in">
/// A float scalar that enforces proper chaining of operations.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArraySizeV3'.
/// </param><returns>
/// The current size of the TensorArray.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.TensorArraySizeV3(handle : TF, flow_in : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TensorArraySizeV3(handle.TFOutput, flow_in.TFOutput, operName))
/// <summary>
/// Deprecated. Use TensorArraySplitV3
/// </summary><param name="handle"></param><param name="value"></param><param name="lengths"></param><param name="flow_in"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArraySplitV2'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.TensorArraySplitV2(handle : TF, value : TF, lengths : TF, flow_in : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TensorArraySplitV2(handle.TFOutput, value.TFOutput, lengths.TFOutput, flow_in.TFOutput, operName))
member this.TensorArraySplitV3(handle : TF, value : TF, lengths : TF, flow_in : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TensorArraySplitV3(handle.TFOutput, value.TFOutput, lengths.TFOutput, flow_in.TFOutput, operName))
/// <summary>
/// Deprecated. Use TensorArrayV3
/// </summary><param name="size"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArrayV2'.
/// </param><param name="element_shape">
/// Optional argument
/// </param><param name="dynamic_size">
/// Optional argument
/// </param><param name="clear_after_read">
/// Optional argument
/// </param><param name="tensor_array_name">
/// Optional argument
/// </param><param name="dtype"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.TensorArrayV2(size : TF, dtype : TFDataType, ?element_shape : TFShape, ?dynamic_size : Boolean, ?clear_after_read : Boolean, ?tensor_array_name : String, ?operName : String) =
let element_shape = defaultArg element_shape null
let dynamic_size = defaultArg (dynamic_size |> Option.map Nullable) (Nullable())
let clear_after_read = defaultArg (clear_after_read |> Option.map Nullable) (Nullable())
let tensor_array_name = defaultArg tensor_array_name null
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TensorArrayV2(size.TFOutput, dtype, element_shape, dynamic_size, clear_after_read, tensor_array_name, operName))
/// <summary>
/// An array of Tensors of given size.
/// </summary><param name="size">
/// The size of the array.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArrayV3'.
/// </param><param name="element_shape">
/// Optional argument
/// The expected shape of an element, if known. Used to
/// validate the shapes of TensorArray elements. If this shape is not
/// fully specified, gathering zero-size TensorArrays is an error.
/// </param><param name="dynamic_size">
/// Optional argument
/// A boolean that determines whether writes to the TensorArray
/// are allowed to grow the size. By default, this is not allowed.
/// </param><param name="clear_after_read">
/// Optional argument
/// If true (default), Tensors in the TensorArray are cleared
/// after being read. This disables multiple read semantics but allows early
/// release of memory.
/// </param><param name="identical_element_shapes">
/// Optional argument
/// If true (default is false), then all
/// elements in the TensorArray will be expected to have have identical shapes.
/// This allows certain behaviors, like dynamically checking for
/// consistent shapes on write, and being able to fill in properly
/// shaped zero tensors on stack -- even if the element_shape attribute
/// is not fully defined.
/// </param><param name="tensor_array_name">
/// Optional argument
/// Overrides the name used for the temporary tensor_array
/// resource. Default value is the name of the 'TensorArray' op (which
/// is guaranteed unique).
/// </param><param name="dtype">
/// The type of the elements on the tensor_array.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// handle: The handle to the TensorArray.
/// flow: A scalar used to control gradient flow.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// Write data via Write and read via Read or Pack.
/// </remarks>
member this.TensorArrayV3(size : TF, dtype : TFDataType, ?element_shape : TFShape, ?dynamic_size : Boolean, ?clear_after_read : Boolean, ?identical_element_shapes : Boolean, ?tensor_array_name : String, ?operName : String) =
let element_shape = defaultArg element_shape null
let dynamic_size = defaultArg (dynamic_size |> Option.map Nullable) (Nullable())
let clear_after_read = defaultArg (clear_after_read |> Option.map Nullable) (Nullable())
let identical_element_shapes = defaultArg (identical_element_shapes |> Option.map Nullable) (Nullable())
let tensor_array_name = defaultArg tensor_array_name null
let operName = defaultArg operName null
this.TFGraph.TensorArrayV3(size.TFOutput, dtype, element_shape, dynamic_size, clear_after_read, identical_element_shapes, tensor_array_name, operName)
/// <summary>
/// Deprecated. Use TensorArrayGradV3
/// </summary><param name="handle"></param><param name="index"></param><param name="value"></param><param name="flow_in"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArrayWriteV2'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.TensorArrayWriteV2(handle : TF, index : TF, value : TF, flow_in : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TensorArrayWriteV2(handle.TFOutput, index.TFOutput, value.TFOutput, flow_in.TFOutput, operName))
/// <summary>
/// Push an element onto the tensor_array.
/// </summary><param name="handle">
/// The handle to a TensorArray.
/// </param><param name="index">
/// The position to write to inside the TensorArray.
/// </param><param name="value">
/// The tensor to write to the TensorArray.
/// </param><param name="flow_in">
/// A float scalar that enforces proper chaining of operations.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorArrayWriteV3'.
/// </param><returns>
/// A float scalar that enforces proper chaining of operations.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.TensorArrayWriteV3(handle : TF, index : TF, value : TF, flow_in : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TensorArrayWriteV3(handle.TFOutput, index.TFOutput, value.TFOutput, flow_in.TFOutput, operName))
/// <summary>
/// Creates a dataset that emits <c>components</c> as a tuple of tensors once.
/// </summary><param name="components"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorDataset'.
/// </param><param name="output_shapes"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.TensorDataset(components : TF[], output_shapes : TFShape[], ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TensorDataset(components |> Array.map (fun x -> x.TFOutput), output_shapes, operName))
/// <summary>
/// The shape of the elements of the given list, as a tensor.
/// </summary><param name="input_handle"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorListElementShape'.
/// </param><param name="shape_type"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// input_handle: the list
/// element_shape: the shape of elements of the list
/// </remarks>
member this.TensorListElementShape(input_handle : TF, shape_type : TFDataType, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TensorListElementShape(input_handle.TFOutput, shape_type, operName))
/// <summary>
/// Creates a TensorList which, when stacked, has the value of <c>tensor</c>.
/// </summary><param name="tensor"></param><param name="element_shape"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorListFromTensor'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Each tensor in the result list corresponds to one row of the input tensor.
///
/// tensor: The input tensor.
/// output_handle: The list.
/// </remarks>
member this.TensorListFromTensor(tensor : TF, element_shape : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TensorListFromTensor(tensor.TFOutput, element_shape.TFOutput, operName))
/// <summary>
/// Creates a Tensor by indexing into the TensorList.
/// </summary><param name="input_handle"></param><param name="indices"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorListGather'.
/// </param><param name="element_dtype"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Each row in the produced Tensor corresponds to the element in the TensorList
/// specified by the given index (see <c>tf.gather</c>).
///
/// input_handle: The input tensor list.
/// indices: The indices used to index into the list.
/// values: The tensor.
/// </remarks>
member this.TensorListGather(input_handle : TF, indices : TF, element_dtype : TFDataType, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TensorListGather(input_handle.TFOutput, indices.TFOutput, element_dtype, operName))
/// <summary>
/// Returns the item in the list with the given index.
/// </summary><param name="input_handle"></param><param name="index"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorListGetItem'.
/// </param><param name="element_dtype"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// input_handle: the list
/// index: the position in the list from which an element will be retrieved
/// item: the element at that position
///
///
/// </remarks>
member this.TensorListGetItem(input_handle : TF, index : TF, element_dtype : TFDataType, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TensorListGetItem(input_handle.TFOutput, index.TFOutput, element_dtype, operName))
/// <summary>
/// Returns the number of tensors in the input tensor list.
/// </summary><param name="input_handle"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorListLength'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// input_handle: the input list
/// length: the number of tensors in the list
/// </remarks>
member this.TensorListLength(input_handle : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TensorListLength(input_handle.TFOutput, operName))
/// <summary>
/// Returns the last element of the input list as well as a list with all but that element.
/// </summary><param name="input_handle"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorListPopBack'.
/// </param><param name="element_dtype"></param><returns>
/// Returns a tuple with multiple values, as follows:
/// output_handle:
/// tensor:
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// Fails if the list is empty.
///
/// input_handle: the input list
/// tensor: the withdrawn last element of the list
/// element_dtype: the type of elements in the list
/// element_shape: the shape of the output tensor
/// </remarks>
member this.TensorListPopBack(input_handle : TF, element_dtype : TFDataType, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.TensorListPopBack(input_handle.TFOutput, element_dtype, operName)
/// <summary>
/// Returns a list list which has the passed-in <c>Tensor</c> as last element and the other elements of the given list in <c>input_handle</c>.
/// </summary><param name="input_handle"></param><param name="tensor"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorListPushBack'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// tensor: The tensor to put on the list.
/// input_handle: The old list.
/// output_handle: A list with the elements of the old list followed by tensor.
/// element_dtype: the type of elements in the list.
/// element_shape: a shape compatible with that of elements in the list.
/// </remarks>
member this.TensorListPushBack(input_handle : TF, tensor : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TensorListPushBack(input_handle.TFOutput, tensor.TFOutput, operName))
/// <summary>
/// List of the given size with empty elements.
/// </summary><param name="element_shape"></param><param name="num_elements"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorListReserve'.
/// </param><param name="element_dtype"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// element_shape: the shape of the future elements of the list
/// num_elements: the number of elements to reserve
/// handle: the output list
/// element_dtype: the desired type of elements in the list.
/// </remarks>
member this.TensorListReserve(element_shape : TF, num_elements : TF, element_dtype : TFDataType, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TensorListReserve(element_shape.TFOutput, num_elements.TFOutput, element_dtype, operName))
/// <summary>
/// Creates a TensorList by indexing into a Tensor.
/// </summary><param name="tensor"></param><param name="indices"></param><param name="element_shape"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorListScatter'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Each member of the TensorList corresponds to one row of the input tensor,
/// specified by the given index (see <c>tf.gather</c>).
///
/// tensor: The input tensor.
/// indices: The indices used to index into the list.
/// element_shape: The shape of the elements in the list (can be less specified than
/// the shape of the tensor).
/// output_handle: The TensorList.
/// </remarks>
member this.TensorListScatter(tensor : TF, indices : TF, element_shape : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TensorListScatter(tensor.TFOutput, indices.TFOutput, element_shape.TFOutput, operName))
/// <summary>
/// Sets the index-th position of the list to contain the given tensor.
/// </summary><param name="input_handle"></param><param name="index"></param><param name="item"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorListSetItem'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// input_handle: the list
/// index: the position in the list to which the tensor will be assigned
/// item: the element to be assigned to that position
/// output_handle: the new list, with the element in the proper position
///
/// </remarks>
member this.TensorListSetItem(input_handle : TF, index : TF, item : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TensorListSetItem(input_handle.TFOutput, index.TFOutput, item.TFOutput, operName))
/// <summary>
/// Stacks all tensors in the list.
/// </summary><param name="input_handle"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorListStack'.
/// </param><param name="num_elements">
/// Optional argument
/// </param><param name="element_dtype"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Requires that all tensors have the same shape.
///
/// input_handle: the input list
/// tensor: the gathered result
/// num_elements: optional. If not -1, the number of elements in the list.
///
/// </remarks>
member this.TensorListStack(input_handle : TF, element_dtype : TFDataType, ?num_elements : Int64, ?operName : String) =
let num_elements = defaultArg (num_elements |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TensorListStack(input_handle.TFOutput, element_dtype, num_elements, operName))
/// <summary>
/// Creates a dataset that emits each dim-0 slice of <c>components</c> once.
/// </summary><param name="components"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorSliceDataset'.
/// </param><param name="output_shapes"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.TensorSliceDataset(components : TF[], output_shapes : TFShape[], ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TensorSliceDataset(components |> Array.map (fun x -> x.TFOutput), output_shapes, operName))
/// <summary>
/// Outputs a <c>Summary</c> protocol buffer with a tensor.
/// </summary><param name="tensor">
/// A tensor to serialize.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorSummary'.
/// </param><param name="description">
/// Optional argument
/// A json-encoded SummaryDescription proto.
/// </param><param name="labels">
/// Optional argument
/// An unused list of strings.
/// </param><param name="display_name">
/// Optional argument
/// An unused string.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This op is being phased out in favor of TensorSummaryV2, which lets callers pass
/// a tag as well as a serialized SummaryMetadata proto string that contains
/// plugin-specific data. We will keep this op to maintain backwards compatibility.
/// </remarks>
member this.TensorSummary(tensor : TF, ?description : String, ?labels : String[], ?display_name : String, ?operName : String) =
let description = defaultArg description null
let labels = defaultArg labels null
let display_name = defaultArg display_name null
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TensorSummary(tensor.TFOutput, description, labels, display_name, operName))
/// <summary>
/// Outputs a <c>Summary</c> protocol buffer with a tensor and per-plugin data.
/// </summary><param name="tag">
/// A string attached to this summary. Used for organization in TensorBoard.
/// </param><param name="tensor">
/// A tensor to serialize.
/// </param><param name="serialized_summary_metadata">
/// A serialized SummaryMetadata proto. Contains plugin
/// data.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TensorSummaryV2'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.TensorSummaryV2(tag : TF, tensor : TF, serialized_summary_metadata : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TensorSummaryV2(tag.TFOutput, tensor.TFOutput, serialized_summary_metadata.TFOutput, operName))
/// <summary>
/// Creates a dataset that emits the lines of one or more text files.
/// </summary><param name="filenames">
/// A scalar or a vector containing the name(s) of the file(s) to be
/// read.
/// </param><param name="compression_type">
/// A scalar containing either (i) the empty string (no
/// compression), (ii) "ZLIB", or (iii) "GZIP".
/// </param><param name="buffer_size">
/// A scalar containing the number of bytes to buffer.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TextLineDataset'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.TextLineDataset(filenames : TF, compression_type : TF, buffer_size : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TextLineDataset(filenames.TFOutput, compression_type.TFOutput, buffer_size.TFOutput, operName))
/// <summary>
/// A Reader that outputs the lines of a file delimited by '\n'.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TextLineReader'.
/// </param><param name="skip_header_lines">
/// Optional argument
/// Number of lines to skip from the beginning of every file.
/// </param><param name="container">
/// Optional argument
/// If non-empty, this reader is placed in the given container.
/// Otherwise, a default container is used.
/// </param><param name="shared_name">
/// Optional argument
/// If non-empty, this reader is named in the given bucket
/// with this shared_name. Otherwise, the node name is used instead.
/// </param><returns>
/// The handle to reference the Reader.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.TextLineReader(?skip_header_lines : Int64, ?container : String, ?shared_name : String, ?operName : String) =
let skip_header_lines = defaultArg (skip_header_lines |> Option.map Nullable) (Nullable())
let container = defaultArg container null
let shared_name = defaultArg shared_name null
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TextLineReader(skip_header_lines, container, shared_name, operName))
/// <summary>
/// A Reader that outputs the lines of a file delimited by '\n'.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TextLineReaderV2'.
/// </param><param name="skip_header_lines">
/// Optional argument
/// Number of lines to skip from the beginning of every file.
/// </param><param name="container">
/// Optional argument
/// If non-empty, this reader is placed in the given container.
/// Otherwise, a default container is used.
/// </param><param name="shared_name">
/// Optional argument
/// If non-empty, this reader is named in the given bucket
/// with this shared_name. Otherwise, the node name is used instead.
/// </param><returns>
/// The handle to reference the Reader.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.TextLineReaderV2(?skip_header_lines : Int64, ?container : String, ?shared_name : String, ?operName : String) =
let skip_header_lines = defaultArg (skip_header_lines |> Option.map Nullable) (Nullable())
let container = defaultArg container null
let shared_name = defaultArg shared_name null
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TextLineReaderV2(skip_header_lines, container, shared_name, operName))
/// <summary>
/// Creates a dataset that emits the records from one or more TFRecord files.
/// </summary><param name="filenames">
/// A scalar or vector containing the name(s) of the file(s) to be
/// read.
/// </param><param name="compression_type">
/// A scalar containing either (i) the empty string (no
/// compression), (ii) "ZLIB", or (iii) "GZIP".
/// </param><param name="buffer_size">
/// A scalar representing the number of bytes to buffer. A value of
/// 0 means no buffering will be performed.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TFRecordDataset'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.TFRecordDataset(filenames : TF, compression_type : TF, buffer_size : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TFRecordDataset(filenames.TFOutput, compression_type.TFOutput, buffer_size.TFOutput, operName))
/// <summary>
/// A Reader that outputs the records from a TensorFlow Records file.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TFRecordReader'.
/// </param><param name="container">
/// Optional argument
/// If non-empty, this reader is placed in the given container.
/// Otherwise, a default container is used.
/// </param><param name="shared_name">
/// Optional argument
/// If non-empty, this reader is named in the given bucket
/// with this shared_name. Otherwise, the node name is used instead.
/// </param><param name="compression_type">
/// Optional argument
/// </param><returns>
/// The handle to reference the Reader.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.TFRecordReader(?container : String, ?shared_name : String, ?compression_type : String, ?operName : String) =
let container = defaultArg container null
let shared_name = defaultArg shared_name null
let compression_type = defaultArg compression_type null
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TFRecordReader(container, shared_name, compression_type, operName))
/// <summary>
/// A Reader that outputs the records from a TensorFlow Records file.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TFRecordReaderV2'.
/// </param><param name="container">
/// Optional argument
/// If non-empty, this reader is placed in the given container.
/// Otherwise, a default container is used.
/// </param><param name="shared_name">
/// Optional argument
/// If non-empty, this reader is named in the given bucket
/// with this shared_name. Otherwise, the node name is used instead.
/// </param><param name="compression_type">
/// Optional argument
/// </param><returns>
/// The handle to reference the Reader.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.TFRecordReaderV2(?container : String, ?shared_name : String, ?compression_type : String, ?operName : String) =
let container = defaultArg container null
let shared_name = defaultArg shared_name null
let compression_type = defaultArg compression_type null
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TFRecordReaderV2(container, shared_name, compression_type, operName))
/// <summary>
/// Generates labels for candidate sampling with a learned unigram distribution.
/// </summary><param name="true_classes">
/// A batch_size * num_true matrix, in which each row contains the
/// IDs of the num_true target_classes in the corresponding original label.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ThreadUnsafeUnigramCandidateSampler'.
/// </param><param name="seed">
/// Optional argument
/// If either seed or seed2 are set to be non-zero, the random number
/// generator is seeded by the given seed. Otherwise, it is seeded by a
/// random seed.
/// </param><param name="seed2">
/// Optional argument
/// An second seed to avoid seed collision.
/// </param><param name="num_true">
/// Number of true labels per context.
/// </param><param name="num_sampled">
/// Number of candidates to randomly sample.
/// </param><param name="unique">
/// If unique is true, we sample with rejection, so that all sampled
/// candidates in a batch are unique. This requires some approximation to
/// estimate the post-rejection sampling probabilities.
/// </param><param name="range_max">
/// The sampler will sample integers from the interval [0, range_max).
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// sampled_candidates: A vector of length num_sampled, in which each element is
/// the ID of a sampled candidate.
/// true_expected_count: A batch_size * num_true matrix, representing
/// the number of times each candidate is expected to occur in a batch
/// of sampled candidates. If unique=true, then this is a probability.
/// sampled_expected_count: A vector of length num_sampled, for each sampled
/// candidate representing the number of times the candidate is expected
/// to occur in a batch of sampled candidates. If unique=true, then this is a
/// probability.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// See explanations of candidate sampling and the data formats at
/// go/candidate-sampling.
///
/// For each batch, this op picks a single set of sampled candidate labels.
///
/// The advantages of sampling candidates per-batch are simplicity and the
/// possibility of efficient dense matrix multiplication. The disadvantage is that
/// the sampled candidates must be chosen independently of the context and of the
/// true labels.
/// </remarks>
member this.ThreadUnsafeUnigramCandidateSampler(true_classes : TF, num_true : Int64, num_sampled : Int64, unique : Boolean, range_max : Int64, ?seed : Int64, ?seed2 : Int64, ?operName : String) =
let seed = defaultArg (seed |> Option.map Nullable) (Nullable())
let seed2 = defaultArg (seed2 |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.ThreadUnsafeUnigramCandidateSampler(true_classes.TFOutput, num_true, num_sampled, unique, range_max, seed, seed2, operName)
/// <summary>
/// Constructs a tensor by tiling a given tensor.
/// </summary><param name="input">
/// 1-D or higher.
/// </param><param name="multiples">
/// 1-D. Length must be the same as the number of dimensions in <c>input</c></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Tile'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This operation creates a new tensor by replicating <c>input</c><c>multiples</c> times.
/// The output tensor's i'th dimension has <c>input.dims(i) * multiples[i]</c> elements,
/// and the values of <c>input</c> are replicated <c>multiples[i]</c> times along the 'i'th
/// dimension. For example, tiling <c>[a b c d]</c> by <c>[2]</c> produces
/// <c>[a b c d a b c d]</c>.
/// </remarks>
member this.Tile(input : TF, multiples : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Tile(input.TFOutput, multiples.TFOutput, operName))
/// <summary>
/// Returns the gradient of <c>Tile</c>.
/// </summary><param name="input"></param><param name="multiples"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TileGrad'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Since <c>Tile</c> takes an input and repeats the input <c>multiples</c> times
/// along each dimension, <c>TileGrad</c> takes in <c>multiples</c> and aggregates
/// each repeated tile of <c>input</c> into <c>output</c>.
/// </remarks>
member this.TileGrad(input : TF, multiples : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TileGrad(input.TFOutput, multiples.TFOutput, operName))
/// <summary>
/// Provides the time since epoch in seconds.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Timestamp'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Returns the timestamp as a <c>float64</c> for seconds since the Unix epoch.
///
/// Note: the timestamp is computed when the op is executed, not when it is added
/// to the graph.
/// </remarks>
member this.Timestamp(?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Timestamp(operName))
/// <summary>
/// Finds values and indices of the <c>k</c> largest elements for the last dimension.
/// </summary><param name="input">
/// 1-D or higher with last dimension at least <c>k</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TopK'.
/// </param><param name="sorted">
/// Optional argument
/// If true the resulting <c>k</c> elements will be sorted by the values in
/// descending order.
/// </param><param name="k">
/// Number of top elements to look for along the last dimension (along each
/// row for matrices).
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// values: The <c>k</c> largest elements along each last dimensional slice.
/// indices: The indices of <c>values</c> within the last dimension of <c>input</c>.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// If the input is a vector (rank-1), finds the <c>k</c> largest entries in the vector
/// and outputs their values and indices as vectors. Thus <c>values[j]</c> is the
/// <c>j</c>-th largest entry in <c>input</c>, and its index is <c>indices[j]</c>.
///
/// For matrices (resp. higher rank input), computes the top <c>k</c> entries in each
/// row (resp. vector along the last dimension). Thus,
///
/// values.shape = indices.shape = input.shape[:-1] + [k]
///
/// If two elements are equal, the lower-index element appears first.
///
/// If <c>k</c> varies dynamically, use <c>TopKV2</c> below.
/// </remarks>
member this.TopK(input : TF, k : Int64, ?sorted : Boolean, ?operName : String) =
let sorted = defaultArg (sorted |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.TopK(input.TFOutput, k, sorted, operName)
/// <summary>
/// Finds values and indices of the <c>k</c> largest elements for the last dimension.
/// </summary><param name="input">
/// 1-D or higher with last dimension at least <c>k</c>.
/// </param><param name="k">
/// 0-D. Number of top elements to look for along the last dimension (along each
/// row for matrices).
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TopKV2'.
/// </param><param name="sorted">
/// Optional argument
/// If true the resulting <c>k</c> elements will be sorted by the values in
/// descending order.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// values: The <c>k</c> largest elements along each last dimensional slice.
/// indices: The indices of <c>values</c> within the last dimension of <c>input</c>.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// If the input is a vector (rank-1), finds the <c>k</c> largest entries in the vector
/// and outputs their values and indices as vectors. Thus <c>values[j]</c> is the
/// <c>j</c>-th largest entry in <c>input</c>, and its index is <c>indices[j]</c>.
///
/// For matrices (resp. higher rank input), computes the top <c>k</c> entries in each
/// row (resp. vector along the last dimension). Thus,
///
/// values.shape = indices.shape = input.shape[:-1] + [k]
///
/// If two elements are equal, the lower-index element appears first.
/// </remarks>
member this.TopKV2(input : TF, k : TF, ?sorted : Boolean, ?operName : String) =
let sorted = defaultArg (sorted |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.TopKV2(input.TFOutput, k.TFOutput, sorted, operName)
/// <summary>
/// An op enabling differentiation of TPU Embeddings.
/// </summary><param name="embedding_variable">
/// A trainable variable, enabling optimizers to find this op.
/// </param><param name="sliced_activations">
/// The embedding activations Tensor to return.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TPUEmbeddingActivations'.
/// </param><param name="table_id">
/// The id of the table in the embedding layer configuration from which
/// these activations were computed.
/// </param><param name="lookup_id">
/// Identifier of the set of embedding indices which produced these
/// activations.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This op simply returns its first input, which is assumed to have been sliced
/// from the Tensors returned by TPUEmbeddingDequeueActivations. The presence of this
/// op, and its first argument being a trainable Variable, enables automatic
/// differentiation of graphs containing embeddings via the TPU Embedding Python
/// libraries.
/// </remarks>
member this.TPUEmbeddingActivations(embedding_variable : TF, sliced_activations : TF, table_id : Int64, lookup_id : Int64, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TPUEmbeddingActivations(embedding_variable.TFOutput, sliced_activations.TFOutput, table_id, lookup_id, operName))
/// <summary>
/// Operator that connects N unreplicated inputs to an N-way replicated TPU computation.
/// </summary><param name="inputs"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TPUReplicatedInput'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.TPUReplicatedInput(inputs : TF[], ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TPUReplicatedInput(inputs |> Array.map (fun x -> x.TFOutput), operName))
/// <summary>
/// Operator that connects the output of an N-way replicated TPU computation to N separate outputs.
/// </summary><param name="input"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TPUReplicatedOutput'.
/// </param><param name="num_replicas"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.TPUReplicatedOutput(input : TF, num_replicas : Int64, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.TPUReplicatedOutput(input.TFOutput, num_replicas, operName) |> Array.map (fun i -> TF(this.TFGraph, i))
/// <summary>
/// Shuffle dimensions of x according to a permutation.
/// </summary><param name="x"></param><param name="perm"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Transpose'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// The output <c>y</c> has the same rank as <c>x</c>. The shapes of <c>x</c> and <c>y</c> satisfy:
/// <c>y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]</c></remarks>
member this.Transpose(x : TF, perm : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Transpose(x.TFOutput, perm.TFOutput, operName))
/// <summary>
/// Returns x / y element-wise for integer types.
/// </summary><param name="x"></param><param name="y"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TruncateDiv'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Truncation designates that negative numbers will round fractional quantities
/// toward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different
/// than Python semantics. See <c>FloorDiv</c> for a division function that matches
/// Python Semantics.
///
/// *NOTE*: <c>TruncateDiv</c> supports broadcasting. More about broadcasting
/// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
/// </remarks>
member this.TruncateDiv(x : TF, y : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TruncateDiv(x.TFOutput, y.TFOutput, operName))
/// <summary>
/// Outputs random values from a truncated normal distribution.
/// </summary><param name="shape">
/// The shape of the output tensor.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TruncatedNormal'.
/// </param><param name="seed">
/// Optional argument
/// If either <c>seed</c> or <c>seed2</c> are set to be non-zero, the random number
/// generator is seeded by the given seed. Otherwise, it is seeded by a
/// random seed.
/// </param><param name="seed2">
/// Optional argument
/// A second seed to avoid seed collision.
/// </param><param name="dtype">
/// The type of the output.
/// </param><returns>
/// A tensor of the specified shape filled with random truncated normal
/// values.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// The generated values follow a normal distribution with mean 0 and standard
/// deviation 1, except that values whose magnitude is more than 2 standard
/// deviations from the mean are dropped and re-picked.
/// </remarks>
member this.TruncatedNormal(shape : TF, dtype : TFDataType, ?seed : Int64, ?seed2 : Int64, ?operName : String) =
let seed = defaultArg (seed |> Option.map Nullable) (Nullable())
let seed2 = defaultArg (seed2 |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TruncatedNormal(shape.TFOutput, dtype, seed, seed2, operName))
/// <summary>
/// Returns element-wise remainder of division. This emulates C semantics in that
/// </summary><param name="x"></param><param name="y"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TruncateMod'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// the result here is consistent with a truncating divide. E.g. <c>truncate(x / y) *
/// y + truncate_mod(x, y) = x</c>.
///
/// *NOTE*: <c>TruncateMod</c> supports broadcasting. More about broadcasting
/// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
/// </remarks>
member this.TruncateMod(x : TF, y : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.TruncateMod(x.TFOutput, y.TFOutput, operName))
/// <summary>
/// Perform batches of RPC requests.
/// </summary><param name="address"><c>0-D</c> or <c>1-D</c>. The address (i.e. host_name:port) of the RPC server.
/// If this tensor has more than 1 element, then multiple parallel rpc requests
/// are sent. This argument broadcasts with <c>method</c> and <c>request</c>.
/// </param><param name="method"><c>0-D</c> or <c>1-D</c>. The method address on the RPC server.
/// If this tensor has more than 1 element, then multiple parallel rpc requests
/// are sent. This argument broadcasts with <c>address</c> and <c>request</c>.
/// </param><param name="request"><c>0-D</c> or <c>1-D</c>. Serialized proto strings: the rpc request argument.
/// If this tensor has more than 1 element, then multiple parallel rpc requests
/// are sent. This argument broadcasts with <c>address</c> and <c>method</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'TryRpc'.
/// </param><param name="protocol">
/// Optional argument
/// RPC protocol to use. Empty string means use the default protocol.
/// Options include 'grpc'.
/// </param><param name="fail_fast">
/// Optional argument
/// <c>boolean</c>. If <c>true</c> (default), then failures to connect
/// (i.e., the server does not immediately respond) cause an RPC failure.
/// </param><param name="timeout_in_ms">
/// Optional argument
/// <c>int</c>. If <c>0</c> (default), then the kernel will run the RPC
/// request and only time out if the RPC deadline passes or the session times out.
/// If this value is greater than <c>0</c>, then the op will raise an exception if
/// the RPC takes longer than <c>timeout_in_ms</c>.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// response: Same shape as <c>request</c>. Serialized proto strings: the rpc responses.
/// status_code: Same shape as <c>request</c>. Values correspond to tensorflow Status enum codes.
/// status_message: Same shape as <c>request</c>. Values correspond to Status messages
/// returned from the RPC calls.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// This op asynchronously performs either a single RPC request, or a batch
/// of requests. RPC requests are defined by three main parameters:
///
/// - <c>address</c> (the host+port or BNS address of the request)
/// - <c>method</c> (the method name for the request)
/// - <c>request</c> (the serialized proto string, or vector of strings,
/// of the RPC request argument).
///
/// For example, if you have an RPC service running on port localhost:2345,
/// and its interface is configured with the following proto declaration:
///
/// <code>
/// service MyService {
/// rpc MyMethod(MyRequestProto) returns (MyResponseProto) {
/// }
/// };
/// </code>
///
/// then call this op with arguments:
///
/// <code>
/// address = "localhost:2345"
/// method = "MyService/MyMethod"
/// </code>
///
/// The <c>request</c> tensor is a string tensor representing serialized <c>MyRequestProto</c>
/// strings; and the output string tensor <c>response</c> will have the same shape
/// and contain (upon successful completion) corresponding serialized
/// <c>MyResponseProto</c> strings.
///
/// For example, to send a single, empty, <c>MyRequestProto</c>, call
/// this op with <c>request = ""</c>. To send 5 **parallel** empty requests,
/// call this op with <c>request = ["", "", "", "", ""]</c>.
///
/// More generally, one can create a batch of <c>MyRequestProto</c> serialized protos
/// from regular batched tensors using the <c>encode_proto</c> op, and convert
/// the response <c>MyResponseProto</c> serialized protos to batched tensors
/// using the <c>decode_proto</c> op.
///
/// **NOTE** Working with serialized proto strings is faster than instantiating
/// actual proto objects in memory, so no performance degradation is expected
/// compared to writing custom kernels for this workflow.
///
/// Unlike the standard <c>Rpc</c> op, if the connection fails or the remote worker
/// returns an error status, this op does **not** reraise the exception.
/// Instead, the <c>status_code</c> and <c>status_message</c> entry for the corresponding RPC
/// call is set with the error returned from the RPC call. The <c>response</c> tensor
/// will contain valid response values for those minibatch entries whose RPCs did
/// not fail; the rest of the entries will have empty strings.
/// </remarks>
member this.TryRpc(address : TF, method : TF, request : TF, ?protocol : String, ?fail_fast : Boolean, ?timeout_in_ms : Int64, ?operName : String) =
let protocol = defaultArg protocol null
let fail_fast = defaultArg (fail_fast |> Option.map Nullable) (Nullable())
let timeout_in_ms = defaultArg (timeout_in_ms |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.TryRpc(address.TFOutput, method.TFOutput, request.TFOutput, protocol, fail_fast, timeout_in_ms, operName)
/// <summary>
/// Reverses the operation of Batch for a single output Tensor.
/// </summary><param name="batched_tensor"></param><param name="batch_index"></param><param name="id"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Unbatch'.
/// </param><param name="container">
/// Optional argument
/// </param><param name="shared_name">
/// Optional argument
/// </param><param name="timeout_micros"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// An instance of Unbatch either receives an empty batched_tensor, in which case it
/// asynchronously waits until the values become available from a concurrently
/// running instance of Unbatch with the same container and shared_name, or receives
/// a non-empty batched_tensor in which case it finalizes all other concurrently
/// running instances and outputs its own element from the batch.
///
/// batched_tensor: The possibly transformed output of Batch. The size of the first
/// dimension should remain unchanged by the transformations for the operation to
/// work.
/// batch_index: The matching batch_index obtained from Batch.
/// id: The id scalar emitted by Batch.
/// unbatched_tensor: The Tensor corresponding to this execution.
/// timeout_micros: Maximum amount of time (in microseconds) to wait to receive the
/// batched input tensor associated with a given invocation of the op.
/// container: Container to control resource sharing.
/// shared_name: Instances of Unbatch with the same container and shared_name are
/// assumed to possibly belong to the same batch. If left empty, the op name will
/// be used as the shared name.
/// </remarks>
member this.Unbatch(batched_tensor : TF, batch_index : TF, id : TF, timeout_micros : Int64, ?container : String, ?shared_name : String, ?operName : String) =
let container = defaultArg container null
let shared_name = defaultArg shared_name null
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Unbatch(batched_tensor.TFOutput, batch_index.TFOutput, id.TFOutput, timeout_micros, container, shared_name, operName))
/// <summary>
/// Sparse update entries in '*var' and '*accum' according to FOBOS algorithm.
/// </summary><param name="var">
/// Should be from a Variable().
/// </param><param name="accum">
/// Should be from a Variable().
/// </param><param name="lr">
/// Learning rate. Must be a scalar.
/// </param><param name="l1">
/// L1 regularization. Must be a scalar.
/// </param><param name="l2">
/// L2 regularization. Must be a scalar.
/// </param><param name="grad">
/// The gradient.
/// </param><param name="indices">
/// A vector of indices into the first dimension of var and accum.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceSparseApplyProximalAdagrad'.
/// </param><param name="use_locking">
/// Optional argument
/// If True, updating of the var and accum tensors will be protected by
/// a lock; otherwise the behavior is undefined, but may exhibit less contention.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// That is for rows we have grad for, we update var and accum as follows:
/// accum += grad * grad
/// prox_v = var
/// prox_v -= lr * grad * (1 / sqrt(accum))
/// var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
/// </remarks>
member this.ResourceSparseApplyProximalAdagrad(var : TF, accum : TF, lr : TF, l1 : TF, l2 : TF, grad : TF, indices : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.ResourceSparseApplyProximalAdagrad(var.TFOutput, accum.TFOutput, lr.TFOutput, l1.TFOutput, l2.TFOutput, grad.TFOutput, indices.TFOutput, use_locking, operName)
/// <summary>
/// Sparse update '*var' as FOBOS algorithm with fixed learning rate.
/// </summary><param name="var">
/// Should be from a Variable().
/// </param><param name="alpha">
/// Scaling factor. Must be a scalar.
/// </param><param name="l1">
/// L1 regularization. Must be a scalar.
/// </param><param name="l2">
/// L2 regularization. Must be a scalar.
/// </param><param name="grad">
/// The gradient.
/// </param><param name="indices">
/// A vector of indices into the first dimension of var and accum.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceSparseApplyProximalGradientDescent'.
/// </param><param name="use_locking">
/// Optional argument
/// If True, the subtraction will be protected by a lock;
/// otherwise the behavior is undefined, but may exhibit less contention.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// That is for rows we have grad for, we update var as follows:
/// prox_v = var - alpha * grad
/// var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
/// </remarks>
member this.ResourceSparseApplyProximalGradientDescent(var : TF, alpha : TF, l1 : TF, l2 : TF, grad : TF, indices : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.ResourceSparseApplyProximalGradientDescent(var.TFOutput, alpha.TFOutput, l1.TFOutput, l2.TFOutput, grad.TFOutput, indices.TFOutput, use_locking, operName)
/// <summary>
/// Update '*var' according to the RMSProp algorithm.
/// </summary><param name="var">
/// Should be from a Variable().
/// </param><param name="ms">
/// Should be from a Variable().
/// </param><param name="mom">
/// Should be from a Variable().
/// </param><param name="lr">
/// Scaling factor. Must be a scalar.
/// </param><param name="rho">
/// Decay rate. Must be a scalar.
/// </param><param name="momentum"></param><param name="epsilon">
/// Ridge term. Must be a scalar.
/// </param><param name="grad">
/// The gradient.
/// </param><param name="indices">
/// A vector of indices into the first dimension of var, ms and mom.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceSparseApplyRMSProp'.
/// </param><param name="use_locking">
/// Optional argument
/// If <c>True</c>, updating of the var, ms, and mom tensors is protected
/// by a lock; otherwise the behavior is undefined, but may exhibit less
/// contention.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// Note that in dense implementation of this algorithm, ms and mom will
/// update even if the grad is zero, but in this sparse implementation, ms
/// and mom will not update in iterations during which the grad is zero.
///
/// mean_square = decay * mean_square + (1-decay) * gradient ** 2
/// Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
///
/// ms &amp;lt;- rho * ms_{t-1} + (1-rho) * grad * grad
/// mom &amp;lt;- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
/// var &amp;lt;- var - mom
/// </remarks>
member this.ResourceSparseApplyRMSProp(var : TF, ms : TF, mom : TF, lr : TF, rho : TF, momentum : TF, epsilon : TF, grad : TF, indices : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.ResourceSparseApplyRMSProp(var.TFOutput, ms.TFOutput, mom.TFOutput, lr.TFOutput, rho.TFOutput, momentum.TFOutput, epsilon.TFOutput, grad.TFOutput, indices.TFOutput, use_locking, operName)
member this.ResourceStridedSliceAssign(reference : TF, ``begin`` : TF, ``end`` : TF, strides : TF, value : TF, ?begin_mask : Int64, ?end_mask : Int64, ?ellipsis_mask : Int64, ?new_axis_mask : Int64, ?shrink_axis_mask : Int64, ?operName : String) =
let begin_mask = defaultArg (begin_mask |> Option.map Nullable) (Nullable())
let end_mask = defaultArg (end_mask |> Option.map Nullable) (Nullable())
let ellipsis_mask = defaultArg (ellipsis_mask |> Option.map Nullable) (Nullable())
let new_axis_mask = defaultArg (new_axis_mask |> Option.map Nullable) (Nullable())
let shrink_axis_mask = defaultArg (shrink_axis_mask |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.ResourceStridedSliceAssign(reference.TFOutput, ``begin``.TFOutput, ``end``.TFOutput, strides.TFOutput, value.TFOutput, begin_mask, end_mask, ellipsis_mask, new_axis_mask, shrink_axis_mask, operName)
/// <summary>
/// Restores a tensor from checkpoint files.
/// </summary><param name="file_pattern">
/// Must have a single element. The pattern of the files from
/// which we read the tensor.
/// </param><param name="tensor_name">
/// Must have a single element. The name of the tensor to be
/// restored.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Restore'.
/// </param><param name="preferred_shard">
/// Optional argument
/// Index of file to open first if multiple files match
/// <c>file_pattern</c>.
/// </param><param name="dt">
/// The type of the tensor to be restored.
/// </param><returns>
/// The restored tensor.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Reads a tensor stored in one or several files. If there are several files (for
/// instance because a tensor was saved as slices), <c>file_pattern</c> may contain
/// wildcard symbols (<c>*</c> and <c>?</c>) in the filename portion only, not in the
/// directory portion.
///
/// If a <c>file_pattern</c> matches several files, <c>preferred_shard</c> can be used to hint
/// in which file the requested tensor is likely to be found. This op will first
/// open the file at index <c>preferred_shard</c> in the list of matching files and try
/// to restore tensors from that file. Only if some tensors or tensor slices are
/// not found in that first file, then the Op opens all the files. Setting
/// <c>preferred_shard</c> to match the value passed as the <c>shard</c> input
/// of a matching <c>Save</c> Op may speed up Restore. This attribute only affects
/// performance, not correctness. The default value -1 means files are processed in
/// order.
///
/// See also <c>RestoreSlice</c>.
/// </remarks>
member this.Restore(file_pattern : TF, tensor_name : TF, dt : TFDataType, ?preferred_shard : Int64, ?operName : String) =
let preferred_shard = defaultArg (preferred_shard |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Restore(file_pattern.TFOutput, tensor_name.TFOutput, dt, preferred_shard, operName))
/// <summary>
/// Restores a tensor from checkpoint files.
/// </summary><param name="file_pattern">
/// Must have a single element. The pattern of the files from
/// which we read the tensor.
/// </param><param name="tensor_name">
/// Must have a single element. The name of the tensor to be
/// restored.
/// </param><param name="shape_and_slice">
/// Scalar. The shapes and slice specifications to use when
/// restoring a tensors.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RestoreSlice'.
/// </param><param name="preferred_shard">
/// Optional argument
/// Index of file to open first if multiple files match
/// <c>file_pattern</c>. See the documentation for <c>Restore</c>.
/// </param><param name="dt">
/// The type of the tensor to be restored.
/// </param><returns>
/// The restored tensor.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This is like <c>Restore</c> except that restored tensor can be listed as filling
/// only a slice of a larger tensor. <c>shape_and_slice</c> specifies the shape of the
/// larger tensor and the slice that the restored tensor covers.
///
/// The <c>shape_and_slice</c> input has the same format as the
/// elements of the <c>shapes_and_slices</c> input of the <c>SaveSlices</c> op.
/// </remarks>
member this.RestoreSlice(file_pattern : TF, tensor_name : TF, shape_and_slice : TF, dt : TFDataType, ?preferred_shard : Int64, ?operName : String) =
let preferred_shard = defaultArg (preferred_shard |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.RestoreSlice(file_pattern.TFOutput, tensor_name.TFOutput, shape_and_slice.TFOutput, dt, preferred_shard, operName))
/// <summary>
/// Restores tensors from a V2 checkpoint.
/// </summary><param name="prefix">
/// Must have a single element. The prefix of a V2 checkpoint.
/// </param><param name="tensor_names">
/// shape {N}. The names of the tensors to be restored.
/// </param><param name="shape_and_slices">
/// shape {N}. The slice specs of the tensors to be restored.
/// Empty strings indicate that they are non-partitioned tensors.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RestoreV2'.
/// </param><param name="dtypes">
/// shape {N}. The list of expected dtype for the tensors. Must match
/// those stored in the checkpoint.
/// </param><returns>
/// shape {N}. The restored tensors, whose shapes are read from the
/// checkpoint directly.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// For backward compatibility with the V1 format, this Op currently allows
/// restoring from a V1 checkpoint as well:
/// - This Op first attempts to find the V2 index file pointed to by "prefix", and
/// if found proceed to read it as a V2 checkpoint;
/// - Otherwise the V1 read path is invoked.
/// Relying on this behavior is not recommended, as the ability to fall back to read
/// V1 might be deprecated and eventually removed.
///
/// By default, restores the named tensors in full. If the caller wishes to restore
/// specific slices of stored tensors, "shape_and_slices" should be non-empty
/// strings and correspondingly well-formed.
///
/// Callers must ensure all the named tensors are indeed stored in the checkpoint.
/// </remarks>
member this.RestoreV2(prefix : TF, tensor_names : TF, shape_and_slices : TF, dtypes : TFDataType[], ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.RestoreV2(prefix.TFOutput, tensor_names.TFOutput, shape_and_slices.TFOutput, dtypes, operName) |> Array.map (fun i -> TF(this.TFGraph, i))
/// <summary>
/// Retrieve embedding parameters for a single table.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RetrieveTPUEmbeddingAdadeltaParameters'.
/// </param><param name="table_id">
/// Optional argument
/// </param><param name="table_name">
/// Optional argument
/// </param><param name="num_shards"></param><param name="shard_id"></param><returns>
/// Returns a tuple with multiple values, as follows:
/// parameters: Parameter parameters updated by the Adadelta optimization algorithm.
/// accumulators: Parameter accumulators updated by the Adadelta optimization algorithm.
/// updates: Parameter updates updated by the Adadelta optimization algorithm.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
///
/// An op that retrieves optimization parameters from embedding to host
/// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
/// the correct embedding table configuration. For example, this op is
/// used to retrieve updated parameters before saving a checkpoint.
///
/// parameters: A tensor containing the embedding table parameters to store with the
/// parameters from embedding updates using the Adadelta optimization algorithm.
/// accumulators: A tensor containing the embedding table accumulators to store with the
/// parameters from embedding updates using the Adadelta optimization algorithm.
/// updates: A tensor containing the embedding table updates to store with the
/// parameters from embedding updates using the Adadelta optimization algorithm.
/// table_name: Name of this table; must match a name in the
/// TPUEmbeddingConfiguration proto (overrides table_id).
/// num_shards: Number of shards into which the embedding tables are divided.
/// shard_id: Identifier of shard for this operation.
/// table_id: Index of this table in the EmbeddingLayerConfiguration proto
/// (deprecated).
///
/// </remarks>
member this.RetrieveTPUEmbeddingAdadeltaParameters(num_shards : Int64, shard_id : Int64, ?table_id : Int64, ?table_name : String, ?operName : String) =
let table_id = defaultArg (table_id |> Option.map Nullable) (Nullable())
let table_name = defaultArg table_name null
let operName = defaultArg operName null
this.TFGraph.RetrieveTPUEmbeddingAdadeltaParameters(num_shards, shard_id, table_id, table_name, operName)
/// <summary>
/// Retrieve embedding parameters for a single table.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug'.
/// </param><param name="table_id">
/// Optional argument
/// </param><param name="table_name">
/// Optional argument
/// </param><param name="num_shards"></param><param name="shard_id"></param><returns>
/// Returns a tuple with multiple values, as follows:
/// parameters: Parameter parameters updated by the Adadelta optimization algorithm.
/// accumulators: Parameter accumulators updated by the Adadelta optimization algorithm.
/// updates: Parameter updates updated by the Adadelta optimization algorithm.
/// gradient_accumulators: Parameter gradient_accumulators updated by the Adadelta optimization algorithm.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
///
/// An op that retrieves optimization parameters from embedding to host
/// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
/// the correct embedding table configuration. For example, this op is
/// used to retrieve updated parameters before saving a checkpoint.
///
/// parameters: A tensor containing the embedding table parameters to store with the
/// parameters from embedding updates using the Adadelta optimization algorithm.
/// accumulators: A tensor containing the embedding table accumulators to store with the
/// parameters from embedding updates using the Adadelta optimization algorithm.
/// updates: A tensor containing the embedding table updates to store with the
/// parameters from embedding updates using the Adadelta optimization algorithm.
/// gradient_accumulators: A tensor containing the embedding table gradient_accumulators to store with the
/// parameters from embedding updates using the Adadelta optimization algorithm.
/// table_name: Name of this table; must match a name in the
/// TPUEmbeddingConfiguration proto (overrides table_id).
/// num_shards: Number of shards into which the embedding tables are divided.
/// shard_id: Identifier of shard for this operation.
/// table_id: Index of this table in the EmbeddingLayerConfiguration proto
/// (deprecated).
///
/// </remarks>
member this.RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug(num_shards : Int64, shard_id : Int64, ?table_id : Int64, ?table_name : String, ?operName : String) =
let table_id = defaultArg (table_id |> Option.map Nullable) (Nullable())
let table_name = defaultArg table_name null
let operName = defaultArg operName null
this.TFGraph.RetrieveTPUEmbeddingAdadeltaParametersGradAccumDebug(num_shards, shard_id, table_id, table_name, operName)
/// <summary>
/// Retrieve embedding parameters for a single table.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RetrieveTPUEmbeddingAdagradParameters'.
/// </param><param name="table_id">
/// Optional argument
/// </param><param name="table_name">
/// Optional argument
/// </param><param name="num_shards"></param><param name="shard_id"></param><returns>
/// Returns a tuple with multiple values, as follows:
/// parameters: Parameter parameters updated by the Adagrad optimization algorithm.
/// accumulators: Parameter accumulators updated by the Adagrad optimization algorithm.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
///
/// An op that retrieves optimization parameters from embedding to host
/// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
/// the correct embedding table configuration. For example, this op is
/// used to retrieve updated parameters before saving a checkpoint.
///
/// parameters: A tensor containing the embedding table parameters to store with the
/// parameters from embedding updates using the Adagrad optimization algorithm.
/// accumulators: A tensor containing the embedding table accumulators to store with the
/// parameters from embedding updates using the Adagrad optimization algorithm.
/// table_name: Name of this table; must match a name in the
/// TPUEmbeddingConfiguration proto (overrides table_id).
/// num_shards: Number of shards into which the embedding tables are divided.
/// shard_id: Identifier of shard for this operation.
/// table_id: Index of this table in the EmbeddingLayerConfiguration proto
/// (deprecated).
///
/// </remarks>
member this.RetrieveTPUEmbeddingAdagradParameters(num_shards : Int64, shard_id : Int64, ?table_id : Int64, ?table_name : String, ?operName : String) =
let table_id = defaultArg (table_id |> Option.map Nullable) (Nullable())
let table_name = defaultArg table_name null
let operName = defaultArg operName null
this.TFGraph.RetrieveTPUEmbeddingAdagradParameters(num_shards, shard_id, table_id, table_name, operName)
/// <summary>
/// Retrieve embedding parameters for a single table.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RetrieveTPUEmbeddingAdagradParametersGradAccumDebug'.
/// </param><param name="table_id">
/// Optional argument
/// </param><param name="table_name">
/// Optional argument
/// </param><param name="num_shards"></param><param name="shard_id"></param><returns>
/// Returns a tuple with multiple values, as follows:
/// parameters: Parameter parameters updated by the Adagrad optimization algorithm.
/// accumulators: Parameter accumulators updated by the Adagrad optimization algorithm.
/// gradient_accumulators: Parameter gradient_accumulators updated by the Adagrad optimization algorithm.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
///
/// An op that retrieves optimization parameters from embedding to host
/// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
/// the correct embedding table configuration. For example, this op is
/// used to retrieve updated parameters before saving a checkpoint.
///
/// parameters: A tensor containing the embedding table parameters to store with the
/// parameters from embedding updates using the Adagrad optimization algorithm.
/// accumulators: A tensor containing the embedding table accumulators to store with the
/// parameters from embedding updates using the Adagrad optimization algorithm.
/// gradient_accumulators: A tensor containing the embedding table gradient_accumulators to store with the
/// parameters from embedding updates using the Adagrad optimization algorithm.
/// table_name: Name of this table; must match a name in the
/// TPUEmbeddingConfiguration proto (overrides table_id).
/// num_shards: Number of shards into which the embedding tables are divided.
/// shard_id: Identifier of shard for this operation.
/// table_id: Index of this table in the EmbeddingLayerConfiguration proto
/// (deprecated).
///
/// </remarks>
member this.RetrieveTPUEmbeddingAdagradParametersGradAccumDebug(num_shards : Int64, shard_id : Int64, ?table_id : Int64, ?table_name : String, ?operName : String) =
let table_id = defaultArg (table_id |> Option.map Nullable) (Nullable())
let table_name = defaultArg table_name null
let operName = defaultArg operName null
this.TFGraph.RetrieveTPUEmbeddingAdagradParametersGradAccumDebug(num_shards, shard_id, table_id, table_name, operName)
/// <summary>
/// Retrieve embedding parameters for a single table.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RetrieveTPUEmbeddingADAMParameters'.
/// </param><param name="table_id">
/// Optional argument
/// </param><param name="table_name">
/// Optional argument
/// </param><param name="num_shards"></param><param name="shard_id"></param><returns>
/// Returns a tuple with multiple values, as follows:
/// parameters: Parameter parameters updated by the ADAM optimization algorithm.
/// momenta: Parameter momenta updated by the ADAM optimization algorithm.
/// velocities: Parameter velocities updated by the ADAM optimization algorithm.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
///
/// An op that retrieves optimization parameters from embedding to host
/// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
/// the correct embedding table configuration. For example, this op is
/// used to retrieve updated parameters before saving a checkpoint.
///
/// parameters: A tensor containing the embedding table parameters to store with the
/// parameters from embedding updates using the ADAM optimization algorithm.
/// momenta: A tensor containing the embedding table momenta to store with the
/// parameters from embedding updates using the ADAM optimization algorithm.
/// velocities: A tensor containing the embedding table velocities to store with the
/// parameters from embedding updates using the ADAM optimization algorithm.
/// table_name: Name of this table; must match a name in the
/// TPUEmbeddingConfiguration proto (overrides table_id).
/// num_shards: Number of shards into which the embedding tables are divided.
/// shard_id: Identifier of shard for this operation.
/// table_id: Index of this table in the EmbeddingLayerConfiguration proto
/// (deprecated).
///
/// </remarks>
member this.RetrieveTPUEmbeddingADAMParameters(num_shards : Int64, shard_id : Int64, ?table_id : Int64, ?table_name : String, ?operName : String) =
let table_id = defaultArg (table_id |> Option.map Nullable) (Nullable())
let table_name = defaultArg table_name null
let operName = defaultArg operName null
this.TFGraph.RetrieveTPUEmbeddingADAMParameters(num_shards, shard_id, table_id, table_name, operName)
/// <summary>
/// Retrieve embedding parameters for a single table.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RetrieveTPUEmbeddingADAMParametersGradAccumDebug'.
/// </param><param name="table_id">
/// Optional argument
/// </param><param name="table_name">
/// Optional argument
/// </param><param name="num_shards"></param><param name="shard_id"></param><returns>
/// Returns a tuple with multiple values, as follows:
/// parameters: Parameter parameters updated by the ADAM optimization algorithm.
/// momenta: Parameter momenta updated by the ADAM optimization algorithm.
/// velocities: Parameter velocities updated by the ADAM optimization algorithm.
/// gradient_accumulators: Parameter gradient_accumulators updated by the ADAM optimization algorithm.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
///
/// An op that retrieves optimization parameters from embedding to host
/// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
/// the correct embedding table configuration. For example, this op is
/// used to retrieve updated parameters before saving a checkpoint.
///
/// parameters: A tensor containing the embedding table parameters to store with the
/// parameters from embedding updates using the ADAM optimization algorithm.
/// momenta: A tensor containing the embedding table momenta to store with the
/// parameters from embedding updates using the ADAM optimization algorithm.
/// velocities: A tensor containing the embedding table velocities to store with the
/// parameters from embedding updates using the ADAM optimization algorithm.
/// gradient_accumulators: A tensor containing the embedding table gradient_accumulators to store with the
/// parameters from embedding updates using the ADAM optimization algorithm.
/// table_name: Name of this table; must match a name in the
/// TPUEmbeddingConfiguration proto (overrides table_id).
/// num_shards: Number of shards into which the embedding tables are divided.
/// shard_id: Identifier of shard for this operation.
/// table_id: Index of this table in the EmbeddingLayerConfiguration proto
/// (deprecated).
///
/// </remarks>
member this.RetrieveTPUEmbeddingADAMParametersGradAccumDebug(num_shards : Int64, shard_id : Int64, ?table_id : Int64, ?table_name : String, ?operName : String) =
let table_id = defaultArg (table_id |> Option.map Nullable) (Nullable())
let table_name = defaultArg table_name null
let operName = defaultArg operName null
this.TFGraph.RetrieveTPUEmbeddingADAMParametersGradAccumDebug(num_shards, shard_id, table_id, table_name, operName)
/// <summary>
/// Retrieve embedding parameters for a single table.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RetrieveTPUEmbeddingCenteredRMSPropParameters'.
/// </param><param name="table_id">
/// Optional argument
/// </param><param name="table_name">
/// Optional argument
/// </param><param name="num_shards"></param><param name="shard_id"></param><returns>
/// Returns a tuple with multiple values, as follows:
/// parameters: Parameter parameters updated by the centered RMSProp optimization algorithm.
/// ms: Parameter ms updated by the centered RMSProp optimization algorithm.
/// mom: Parameter mom updated by the centered RMSProp optimization algorithm.
/// mg: Parameter mg updated by the centered RMSProp optimization algorithm.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
///
/// An op that retrieves optimization parameters from embedding to host
/// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
/// the correct embedding table configuration. For example, this op is
/// used to retrieve updated parameters before saving a checkpoint.
///
/// parameters: A tensor containing the embedding table parameters to store with the
/// parameters from embedding updates using the centered RMSProp optimization algorithm.
/// ms: A tensor containing the embedding table ms to store with the
/// parameters from embedding updates using the centered RMSProp optimization algorithm.
/// mom: A tensor containing the embedding table mom to store with the
/// parameters from embedding updates using the centered RMSProp optimization algorithm.
/// mg: A tensor containing the embedding table mg to store with the
/// parameters from embedding updates using the centered RMSProp optimization algorithm.
/// table_name: Name of this table; must match a name in the
/// TPUEmbeddingConfiguration proto (overrides table_id).
/// num_shards: Number of shards into which the embedding tables are divided.
/// shard_id: Identifier of shard for this operation.
/// table_id: Index of this table in the EmbeddingLayerConfiguration proto
/// (deprecated).
///
/// </remarks>
member this.RetrieveTPUEmbeddingCenteredRMSPropParameters(num_shards : Int64, shard_id : Int64, ?table_id : Int64, ?table_name : String, ?operName : String) =
let table_id = defaultArg (table_id |> Option.map Nullable) (Nullable())
let table_name = defaultArg table_name null
let operName = defaultArg operName null
this.TFGraph.RetrieveTPUEmbeddingCenteredRMSPropParameters(num_shards, shard_id, table_id, table_name, operName)
/// <summary>
/// Retrieve embedding parameters for a single table.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RetrieveTPUEmbeddingFTRLParameters'.
/// </param><param name="table_id">
/// Optional argument
/// </param><param name="table_name">
/// Optional argument
/// </param><param name="num_shards"></param><param name="shard_id"></param><returns>
/// Returns a tuple with multiple values, as follows:
/// parameters: Parameter parameters updated by the FTRL optimization algorithm.
/// accumulators: Parameter accumulators updated by the FTRL optimization algorithm.
/// linears: Parameter linears updated by the FTRL optimization algorithm.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
///
/// An op that retrieves optimization parameters from embedding to host
/// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
/// the correct embedding table configuration. For example, this op is
/// used to retrieve updated parameters before saving a checkpoint.
///
/// parameters: A tensor containing the embedding table parameters to store with the
/// parameters from embedding updates using the FTRL optimization algorithm.
/// accumulators: A tensor containing the embedding table accumulators to store with the
/// parameters from embedding updates using the FTRL optimization algorithm.
/// linears: A tensor containing the embedding table linears to store with the
/// parameters from embedding updates using the FTRL optimization algorithm.
/// table_name: Name of this table; must match a name in the
/// TPUEmbeddingConfiguration proto (overrides table_id).
/// num_shards: Number of shards into which the embedding tables are divided.
/// shard_id: Identifier of shard for this operation.
/// table_id: Index of this table in the EmbeddingLayerConfiguration proto
/// (deprecated).
///
/// </remarks>
member this.RetrieveTPUEmbeddingFTRLParameters(num_shards : Int64, shard_id : Int64, ?table_id : Int64, ?table_name : String, ?operName : String) =
let table_id = defaultArg (table_id |> Option.map Nullable) (Nullable())
let table_name = defaultArg table_name null
let operName = defaultArg operName null
this.TFGraph.RetrieveTPUEmbeddingFTRLParameters(num_shards, shard_id, table_id, table_name, operName)
/// <summary>
/// Retrieve embedding parameters for a single table.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RetrieveTPUEmbeddingFTRLParametersGradAccumDebug'.
/// </param><param name="table_id">
/// Optional argument
/// </param><param name="table_name">
/// Optional argument
/// </param><param name="num_shards"></param><param name="shard_id"></param><returns>
/// Returns a tuple with multiple values, as follows:
/// parameters: Parameter parameters updated by the FTRL optimization algorithm.
/// accumulators: Parameter accumulators updated by the FTRL optimization algorithm.
/// linears: Parameter linears updated by the FTRL optimization algorithm.
/// gradient_accumulators: Parameter gradient_accumulators updated by the FTRL optimization algorithm.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
///
/// An op that retrieves optimization parameters from embedding to host
/// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
/// the correct embedding table configuration. For example, this op is
/// used to retrieve updated parameters before saving a checkpoint.
///
/// parameters: A tensor containing the embedding table parameters to store with the
/// parameters from embedding updates using the FTRL optimization algorithm.
/// accumulators: A tensor containing the embedding table accumulators to store with the
/// parameters from embedding updates using the FTRL optimization algorithm.
/// linears: A tensor containing the embedding table linears to store with the
/// parameters from embedding updates using the FTRL optimization algorithm.
/// gradient_accumulators: A tensor containing the embedding table gradient_accumulators to store with the
/// parameters from embedding updates using the FTRL optimization algorithm.
/// table_name: Name of this table; must match a name in the
/// TPUEmbeddingConfiguration proto (overrides table_id).
/// num_shards: Number of shards into which the embedding tables are divided.
/// shard_id: Identifier of shard for this operation.
/// table_id: Index of this table in the EmbeddingLayerConfiguration proto
/// (deprecated).
///
/// </remarks>
member this.RetrieveTPUEmbeddingFTRLParametersGradAccumDebug(num_shards : Int64, shard_id : Int64, ?table_id : Int64, ?table_name : String, ?operName : String) =
let table_id = defaultArg (table_id |> Option.map Nullable) (Nullable())
let table_name = defaultArg table_name null
let operName = defaultArg operName null
this.TFGraph.RetrieveTPUEmbeddingFTRLParametersGradAccumDebug(num_shards, shard_id, table_id, table_name, operName)
/// <summary>
/// Retrieve embedding parameters for a single table.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RetrieveTPUEmbeddingMDLAdagradLightParameters'.
/// </param><param name="table_id">
/// Optional argument
/// </param><param name="table_name">
/// Optional argument
/// </param><param name="num_shards"></param><param name="shard_id"></param><returns>
/// Returns a tuple with multiple values, as follows:
/// parameters: Parameter parameters updated by the MDL Adagrad Light optimization algorithm.
/// accumulators: Parameter accumulators updated by the MDL Adagrad Light optimization algorithm.
/// weights: Parameter weights updated by the MDL Adagrad Light optimization algorithm.
/// benefits: Parameter benefits updated by the MDL Adagrad Light optimization algorithm.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
///
/// An op that retrieves optimization parameters from embedding to host
/// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
/// the correct embedding table configuration. For example, this op is
/// used to retrieve updated parameters before saving a checkpoint.
///
/// parameters: A tensor containing the embedding table parameters to store with the
/// parameters from embedding updates using the MDL Adagrad Light optimization algorithm.
/// accumulators: A tensor containing the embedding table accumulators to store with the
/// parameters from embedding updates using the MDL Adagrad Light optimization algorithm.
/// weights: A tensor containing the embedding table weights to store with the
/// parameters from embedding updates using the MDL Adagrad Light optimization algorithm.
/// benefits: A tensor containing the embedding table benefits to store with the
/// parameters from embedding updates using the MDL Adagrad Light optimization algorithm.
/// table_name: Name of this table; must match a name in the
/// TPUEmbeddingConfiguration proto (overrides table_id).
/// num_shards: Number of shards into which the embedding tables are divided.
/// shard_id: Identifier of shard for this operation.
/// table_id: Index of this table in the EmbeddingLayerConfiguration proto
/// (deprecated).
///
/// </remarks>
member this.RetrieveTPUEmbeddingMDLAdagradLightParameters(num_shards : Int64, shard_id : Int64, ?table_id : Int64, ?table_name : String, ?operName : String) =
let table_id = defaultArg (table_id |> Option.map Nullable) (Nullable())
let table_name = defaultArg table_name null
let operName = defaultArg operName null
this.TFGraph.RetrieveTPUEmbeddingMDLAdagradLightParameters(num_shards, shard_id, table_id, table_name, operName)
/// <summary>
/// Retrieve embedding parameters for a single table.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RetrieveTPUEmbeddingMomentumParameters'.
/// </param><param name="table_id">
/// Optional argument
/// </param><param name="table_name">
/// Optional argument
/// </param><param name="num_shards"></param><param name="shard_id"></param><returns>
/// Returns a tuple with multiple values, as follows:
/// parameters: Parameter parameters updated by the Momentum optimization algorithm.
/// momenta: Parameter momenta updated by the Momentum optimization algorithm.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
///
/// An op that retrieves optimization parameters from embedding to host
/// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
/// the correct embedding table configuration. For example, this op is
/// used to retrieve updated parameters before saving a checkpoint.
///
/// parameters: A tensor containing the embedding table parameters to store with the
/// parameters from embedding updates using the Momentum optimization algorithm.
/// momenta: A tensor containing the embedding table momenta to store with the
/// parameters from embedding updates using the Momentum optimization algorithm.
/// table_name: Name of this table; must match a name in the
/// TPUEmbeddingConfiguration proto (overrides table_id).
/// num_shards: Number of shards into which the embedding tables are divided.
/// shard_id: Identifier of shard for this operation.
/// table_id: Index of this table in the EmbeddingLayerConfiguration proto
/// (deprecated).
///
/// </remarks>
member this.RetrieveTPUEmbeddingMomentumParameters(num_shards : Int64, shard_id : Int64, ?table_id : Int64, ?table_name : String, ?operName : String) =
let table_id = defaultArg (table_id |> Option.map Nullable) (Nullable())
let table_name = defaultArg table_name null
let operName = defaultArg operName null
this.TFGraph.RetrieveTPUEmbeddingMomentumParameters(num_shards, shard_id, table_id, table_name, operName)
/// <summary>
/// Retrieve embedding parameters for a single table.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RetrieveTPUEmbeddingMomentumParametersGradAccumDebug'.
/// </param><param name="table_id">
/// Optional argument
/// </param><param name="table_name">
/// Optional argument
/// </param><param name="num_shards"></param><param name="shard_id"></param><returns>
/// Returns a tuple with multiple values, as follows:
/// parameters: Parameter parameters updated by the Momentum optimization algorithm.
/// momenta: Parameter momenta updated by the Momentum optimization algorithm.
/// gradient_accumulators: Parameter gradient_accumulators updated by the Momentum optimization algorithm.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
///
/// An op that retrieves optimization parameters from embedding to host
/// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
/// the correct embedding table configuration. For example, this op is
/// used to retrieve updated parameters before saving a checkpoint.
///
/// parameters: A tensor containing the embedding table parameters to store with the
/// parameters from embedding updates using the Momentum optimization algorithm.
/// momenta: A tensor containing the embedding table momenta to store with the
/// parameters from embedding updates using the Momentum optimization algorithm.
/// gradient_accumulators: A tensor containing the embedding table gradient_accumulators to store with the
/// parameters from embedding updates using the Momentum optimization algorithm.
/// table_name: Name of this table; must match a name in the
/// TPUEmbeddingConfiguration proto (overrides table_id).
/// num_shards: Number of shards into which the embedding tables are divided.
/// shard_id: Identifier of shard for this operation.
/// table_id: Index of this table in the EmbeddingLayerConfiguration proto
/// (deprecated).
///
/// </remarks>
member this.RetrieveTPUEmbeddingMomentumParametersGradAccumDebug(num_shards : Int64, shard_id : Int64, ?table_id : Int64, ?table_name : String, ?operName : String) =
let table_id = defaultArg (table_id |> Option.map Nullable) (Nullable())
let table_name = defaultArg table_name null
let operName = defaultArg operName null
this.TFGraph.RetrieveTPUEmbeddingMomentumParametersGradAccumDebug(num_shards, shard_id, table_id, table_name, operName)
/// <summary>
/// Retrieve embedding parameters for a single table.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RetrieveTPUEmbeddingProximalAdagradParameters'.
/// </param><param name="table_id">
/// Optional argument
/// </param><param name="table_name">
/// Optional argument
/// </param><param name="num_shards"></param><param name="shard_id"></param><returns>
/// Returns a tuple with multiple values, as follows:
/// parameters: Parameter parameters updated by the proximal Adagrad optimization algorithm.
/// accumulators: Parameter accumulators updated by the proximal Adagrad optimization algorithm.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
///
/// An op that retrieves optimization parameters from embedding to host
/// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
/// the correct embedding table configuration. For example, this op is
/// used to retrieve updated parameters before saving a checkpoint.
///
/// parameters: A tensor containing the embedding table parameters to store with the
/// parameters from embedding updates using the proximal Adagrad optimization algorithm.
/// accumulators: A tensor containing the embedding table accumulators to store with the
/// parameters from embedding updates using the proximal Adagrad optimization algorithm.
/// table_name: Name of this table; must match a name in the
/// TPUEmbeddingConfiguration proto (overrides table_id).
/// num_shards: Number of shards into which the embedding tables are divided.
/// shard_id: Identifier of shard for this operation.
/// table_id: Index of this table in the EmbeddingLayerConfiguration proto
/// (deprecated).
///
/// </remarks>
member this.RetrieveTPUEmbeddingProximalAdagradParameters(num_shards : Int64, shard_id : Int64, ?table_id : Int64, ?table_name : String, ?operName : String) =
let table_id = defaultArg (table_id |> Option.map Nullable) (Nullable())
let table_name = defaultArg table_name null
let operName = defaultArg operName null
this.TFGraph.RetrieveTPUEmbeddingProximalAdagradParameters(num_shards, shard_id, table_id, table_name, operName)
/// <summary>
/// Retrieve embedding parameters for a single table.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug'.
/// </param><param name="table_id">
/// Optional argument
/// </param><param name="table_name">
/// Optional argument
/// </param><param name="num_shards"></param><param name="shard_id"></param><returns>
/// Returns a tuple with multiple values, as follows:
/// parameters: Parameter parameters updated by the proximal Adagrad optimization algorithm.
/// accumulators: Parameter accumulators updated by the proximal Adagrad optimization algorithm.
/// gradient_accumulators: Parameter gradient_accumulators updated by the proximal Adagrad optimization algorithm.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
///
/// An op that retrieves optimization parameters from embedding to host
/// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
/// the correct embedding table configuration. For example, this op is
/// used to retrieve updated parameters before saving a checkpoint.
///
/// parameters: A tensor containing the embedding table parameters to store with the
/// parameters from embedding updates using the proximal Adagrad optimization algorithm.
/// accumulators: A tensor containing the embedding table accumulators to store with the
/// parameters from embedding updates using the proximal Adagrad optimization algorithm.
/// gradient_accumulators: A tensor containing the embedding table gradient_accumulators to store with the
/// parameters from embedding updates using the proximal Adagrad optimization algorithm.
/// table_name: Name of this table; must match a name in the
/// TPUEmbeddingConfiguration proto (overrides table_id).
/// num_shards: Number of shards into which the embedding tables are divided.
/// shard_id: Identifier of shard for this operation.
/// table_id: Index of this table in the EmbeddingLayerConfiguration proto
/// (deprecated).
///
/// </remarks>
member this.RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug(num_shards : Int64, shard_id : Int64, ?table_id : Int64, ?table_name : String, ?operName : String) =
let table_id = defaultArg (table_id |> Option.map Nullable) (Nullable())
let table_name = defaultArg table_name null
let operName = defaultArg operName null
this.TFGraph.RetrieveTPUEmbeddingProximalAdagradParametersGradAccumDebug(num_shards, shard_id, table_id, table_name, operName)
/// <summary>
/// Retrieve embedding parameters for a single table.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RetrieveTPUEmbeddingRMSPropParameters'.
/// </param><param name="table_id">
/// Optional argument
/// </param><param name="table_name">
/// Optional argument
/// </param><param name="num_shards"></param><param name="shard_id"></param><returns>
/// Returns a tuple with multiple values, as follows:
/// parameters: Parameter parameters updated by the RMSProp optimization algorithm.
/// ms: Parameter ms updated by the RMSProp optimization algorithm.
/// mom: Parameter mom updated by the RMSProp optimization algorithm.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
///
/// An op that retrieves optimization parameters from embedding to host
/// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
/// the correct embedding table configuration. For example, this op is
/// used to retrieve updated parameters before saving a checkpoint.
///
/// parameters: A tensor containing the embedding table parameters to store with the
/// parameters from embedding updates using the RMSProp optimization algorithm.
/// ms: A tensor containing the embedding table ms to store with the
/// parameters from embedding updates using the RMSProp optimization algorithm.
/// mom: A tensor containing the embedding table mom to store with the
/// parameters from embedding updates using the RMSProp optimization algorithm.
/// table_name: Name of this table; must match a name in the
/// TPUEmbeddingConfiguration proto (overrides table_id).
/// num_shards: Number of shards into which the embedding tables are divided.
/// shard_id: Identifier of shard for this operation.
/// table_id: Index of this table in the EmbeddingLayerConfiguration proto
/// (deprecated).
///
/// </remarks>
member this.RetrieveTPUEmbeddingRMSPropParameters(num_shards : Int64, shard_id : Int64, ?table_id : Int64, ?table_name : String, ?operName : String) =
let table_id = defaultArg (table_id |> Option.map Nullable) (Nullable())
let table_name = defaultArg table_name null
let operName = defaultArg operName null
this.TFGraph.RetrieveTPUEmbeddingRMSPropParameters(num_shards, shard_id, table_id, table_name, operName)
/// <summary>
/// Retrieve embedding parameters for a single table.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug'.
/// </param><param name="table_id">
/// Optional argument
/// </param><param name="table_name">
/// Optional argument
/// </param><param name="num_shards"></param><param name="shard_id"></param><returns>
/// Returns a tuple with multiple values, as follows:
/// parameters: Parameter parameters updated by the RMSProp optimization algorithm.
/// ms: Parameter ms updated by the RMSProp optimization algorithm.
/// mom: Parameter mom updated by the RMSProp optimization algorithm.
/// gradient_accumulators: Parameter gradient_accumulators updated by the RMSProp optimization algorithm.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
///
/// An op that retrieves optimization parameters from embedding to host
/// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
/// the correct embedding table configuration. For example, this op is
/// used to retrieve updated parameters before saving a checkpoint.
///
/// parameters: A tensor containing the embedding table parameters to store with the
/// parameters from embedding updates using the RMSProp optimization algorithm.
/// ms: A tensor containing the embedding table ms to store with the
/// parameters from embedding updates using the RMSProp optimization algorithm.
/// mom: A tensor containing the embedding table mom to store with the
/// parameters from embedding updates using the RMSProp optimization algorithm.
/// gradient_accumulators: A tensor containing the embedding table gradient_accumulators to store with the
/// parameters from embedding updates using the RMSProp optimization algorithm.
/// table_name: Name of this table; must match a name in the
/// TPUEmbeddingConfiguration proto (overrides table_id).
/// num_shards: Number of shards into which the embedding tables are divided.
/// shard_id: Identifier of shard for this operation.
/// table_id: Index of this table in the EmbeddingLayerConfiguration proto
/// (deprecated).
///
/// </remarks>
member this.RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug(num_shards : Int64, shard_id : Int64, ?table_id : Int64, ?table_name : String, ?operName : String) =
let table_id = defaultArg (table_id |> Option.map Nullable) (Nullable())
let table_name = defaultArg table_name null
let operName = defaultArg operName null
this.TFGraph.RetrieveTPUEmbeddingRMSPropParametersGradAccumDebug(num_shards, shard_id, table_id, table_name, operName)
/// <summary>
/// Retrieve embedding parameters for a single table.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RetrieveTPUEmbeddingStochasticGradientDescentParameters'.
/// </param><param name="table_id">
/// Optional argument
/// </param><param name="table_name">
/// Optional argument
/// </param><param name="num_shards"></param><param name="shard_id"></param><returns>
/// Parameter parameters updated by the stochastic gradient descent optimization algorithm.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
///
/// An op that retrieves optimization parameters from embedding to host
/// memory. Must be preceded by a ConfigureTPUEmbeddingHost op that sets up
/// the correct embedding table configuration. For example, this op is
/// used to retrieve updated parameters before saving a checkpoint.
///
/// parameters: A tensor containing the embedding table parameters to store with the
/// parameters from embedding updates using the stochastic gradient descent optimization algorithm.
/// table_name: Name of this table; must match a name in the
/// TPUEmbeddingConfiguration proto (overrides table_id).
/// num_shards: Number of shards into which the embedding tables are divided.
/// shard_id: Identifier of shard for this operation.
/// table_id: Index of this table in the EmbeddingLayerConfiguration proto
/// (deprecated).
///
/// </remarks>
member this.RetrieveTPUEmbeddingStochasticGradientDescentParameters(num_shards : Int64, shard_id : Int64, ?table_id : Int64, ?table_name : String, ?operName : String) =
let table_id = defaultArg (table_id |> Option.map Nullable) (Nullable())
let table_name = defaultArg table_name null
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.RetrieveTPUEmbeddingStochasticGradientDescentParameters(num_shards, shard_id, table_id, table_name, operName))
/// <summary>
/// Reverses specific dimensions of a tensor.
/// </summary><param name="tensor">
/// Up to 8-D.
/// </param><param name="dims">
/// 1-D. The dimensions to reverse.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Reverse'.
/// </param><returns>
/// The same shape as <c>tensor</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Given a <c>tensor</c>, and a <c>bool</c> tensor <c>dims</c> representing the dimensions
/// of <c>tensor</c>, this operation reverses each dimension i of <c>tensor</c> where
/// <c>dims[i]</c> is <c>True</c>.
///
/// <c>tensor</c> can have up to 8 dimensions. The number of dimensions
/// of <c>tensor</c> must equal the number of elements in <c>dims</c>. In other words:
///
/// <c>rank(tensor) = size(dims)</c>
///
/// For example:
///
/// <code>
/// # tensor 't' is [[[[ 0, 1, 2, 3],
/// # [ 4, 5, 6, 7],
/// # [ 8, 9, 10, 11]],
/// # [[12, 13, 14, 15],
/// # [16, 17, 18, 19],
/// # [20, 21, 22, 23]]]]
/// # tensor 't' shape is [1, 2, 3, 4]
///
/// # 'dims' is [False, False, False, True]
/// reverse(t, dims) ==&amp;gt; [[[[ 3, 2, 1, 0],
/// [ 7, 6, 5, 4],
/// [ 11, 10, 9, 8]],
/// [[15, 14, 13, 12],
/// [19, 18, 17, 16],
/// [23, 22, 21, 20]]]]
///
/// # 'dims' is [False, True, False, False]
/// reverse(t, dims) ==&amp;gt; [[[[12, 13, 14, 15],
/// [16, 17, 18, 19],
/// [20, 21, 22, 23]
/// [[ 0, 1, 2, 3],
/// [ 4, 5, 6, 7],
/// [ 8, 9, 10, 11]]]]
///
/// # 'dims' is [False, False, True, False]
/// reverse(t, dims) ==&amp;gt; [[[[8, 9, 10, 11],
/// [4, 5, 6, 7],
/// [0, 1, 2, 3]]
/// [[20, 21, 22, 23],
/// [16, 17, 18, 19],
/// [12, 13, 14, 15]]]]
/// </code></remarks>
member this.Reverse(tensor : TF, dims : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Reverse(tensor.TFOutput, dims.TFOutput, operName))
/// <summary>
/// Reverses variable length slices.
/// </summary><param name="input">
/// The input to reverse.
/// </param><param name="seq_lengths">
/// 1-D with length <c>input.dims(batch_dim)</c> and
/// <c>max(seq_lengths) &amp;lt;= input.dims(seq_dim)</c></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReverseSequence'.
/// </param><param name="batch_dim">
/// Optional argument
/// The dimension along which reversal is performed.
/// </param><param name="seq_dim">
/// The dimension which is partially reversed.
/// </param><returns>
/// The partially reversed input. It has the same shape as <c>input</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This op first slices <c>input</c> along the dimension <c>batch_dim</c>, and for each
/// slice <c>i</c>, reverses the first <c>seq_lengths[i]</c> elements along
/// the dimension <c>seq_dim</c>.
///
/// The elements of <c>seq_lengths</c> must obey <c>seq_lengths[i] &amp;lt;= input.dims[seq_dim]</c>,
/// and <c>seq_lengths</c> must be a vector of length <c>input.dims[batch_dim]</c>.
///
/// The output slice <c>i</c> along dimension <c>batch_dim</c> is then given by input
/// slice <c>i</c>, with the first <c>seq_lengths[i]</c> slices along dimension
/// <c>seq_dim</c> reversed.
///
/// For example:
///
/// <code>
/// # Given this:
/// batch_dim = 0
/// seq_dim = 1
/// input.dims = (4, 8, ...)
/// seq_lengths = [7, 2, 3, 5]
///
/// # then slices of input are reversed on seq_dim, but only up to seq_lengths:
/// output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...]
/// output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...]
/// output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...]
/// output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...]
///
/// # while entries past seq_lens are copied through:
/// output[0, 7:, :, ...] = input[0, 7:, :, ...]
/// output[1, 2:, :, ...] = input[1, 2:, :, ...]
/// output[2, 3:, :, ...] = input[2, 3:, :, ...]
/// output[3, 2:, :, ...] = input[3, 2:, :, ...]
/// </code>
///
/// In contrast, if:
///
/// <code>
/// # Given this:
/// batch_dim = 2
/// seq_dim = 0
/// input.dims = (8, ?, 4, ...)
/// seq_lengths = [7, 2, 3, 5]
///
/// # then slices of input are reversed on seq_dim, but only up to seq_lengths:
/// output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...]
/// output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...]
/// output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...]
/// output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...]
///
/// # while entries past seq_lens are copied through:
/// output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...]
/// output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...]
/// output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...]
/// output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...]
/// </code></remarks>
member this.ReverseSequence(input : TF, seq_lengths : TF, seq_dim : Int64, ?batch_dim : Int64, ?operName : String) =
let batch_dim = defaultArg (batch_dim |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ReverseSequence(input.TFOutput, seq_lengths.TFOutput, seq_dim, batch_dim, operName))
/// <summary>
/// Reverses specific dimensions of a tensor.
/// </summary><param name="tensor">
/// Up to 8-D.
/// </param><param name="axis">
/// 1-D. The indices of the dimensions to reverse. Must be in the range
/// <c>[-rank(tensor), rank(tensor))</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReverseV2'.
/// </param><returns>
/// The same shape as <c>tensor</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// NOTE <c>tf.reverse</c> has now changed behavior in preparation for 1.0.
/// <c>tf.reverse_v2</c> is currently an alias that will be deprecated before TF 1.0.
///
/// Given a <c>tensor</c>, and a <c>int32</c> tensor <c>axis</c> representing the set of
/// dimensions of <c>tensor</c> to reverse. This operation reverses each dimension
/// <c>i</c> for which there exists <c>j</c> s.t. <c>axis[j] == i</c>.
///
/// <c>tensor</c> can have up to 8 dimensions. The number of dimensions specified
/// in <c>axis</c> may be 0 or more entries. If an index is specified more than
/// once, a InvalidArgument error is raised.
///
/// For example:
///
/// <code>
/// # tensor 't' is [[[[ 0, 1, 2, 3],
/// # [ 4, 5, 6, 7],
/// # [ 8, 9, 10, 11]],
/// # [[12, 13, 14, 15],
/// # [16, 17, 18, 19],
/// # [20, 21, 22, 23]]]]
/// # tensor 't' shape is [1, 2, 3, 4]
///
/// # 'dims' is [3] or 'dims' is [-1]
/// reverse(t, dims) ==&amp;gt; [[[[ 3, 2, 1, 0],
/// [ 7, 6, 5, 4],
/// [ 11, 10, 9, 8]],
/// [[15, 14, 13, 12],
/// [19, 18, 17, 16],
/// [23, 22, 21, 20]]]]
///
/// # 'dims' is '[1]' (or 'dims' is '[-3]')
/// reverse(t, dims) ==&amp;gt; [[[[12, 13, 14, 15],
/// [16, 17, 18, 19],
/// [20, 21, 22, 23]
/// [[ 0, 1, 2, 3],
/// [ 4, 5, 6, 7],
/// [ 8, 9, 10, 11]]]]
///
/// # 'dims' is '[2]' (or 'dims' is '[-2]')
/// reverse(t, dims) ==&amp;gt; [[[[8, 9, 10, 11],
/// [4, 5, 6, 7],
/// [0, 1, 2, 3]]
/// [[20, 21, 22, 23],
/// [16, 17, 18, 19],
/// [12, 13, 14, 15]]]]
/// </code></remarks>
member this.ReverseV2(tensor : TF, axis : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ReverseV2(tensor.TFOutput, axis.TFOutput, operName))
/// <summary>
/// Real-valued fast Fourier transform.
/// </summary><param name="input">
/// A float32 tensor.
/// </param><param name="fft_length">
/// An int32 tensor of shape [1]. The FFT length.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RFFT'.
/// </param><returns>
/// A complex64 tensor of the same rank as <c>input</c>. The inner-most
/// dimension of <c>input</c> is replaced with the <c>fft_length / 2 + 1</c> unique
/// frequency components of its 1D Fourier transform.
///
/// @compatibility(numpy)
/// Equivalent to np.fft.rfft
/// @end_compatibility
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Computes the 1-dimensional discrete Fourier transform of a real-valued signal
/// over the inner-most dimension of <c>input</c>.
///
/// Since the DFT of a real signal is Hermitian-symmetric, <c>RFFT</c> only returns the
/// <c>fft_length / 2 + 1</c> unique components of the FFT: the zero-frequency term,
/// followed by the <c>fft_length / 2</c> positive-frequency terms.
///
/// Along the axis <c>RFFT</c> is computed on, if <c>fft_length</c> is smaller than the
/// corresponding dimension of <c>input</c>, the dimension is cropped. If it is larger,
/// the dimension is padded with zeros.
/// </remarks>
member this.RFFT(input : TF, fft_length : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.RFFT(input.TFOutput, fft_length.TFOutput, operName))
/// <summary>
/// 2D real-valued fast Fourier transform.
/// </summary><param name="input">
/// A float32 tensor.
/// </param><param name="fft_length">
/// An int32 tensor of shape [2]. The FFT length for each dimension.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RFFT2D'.
/// </param><returns>
/// A complex64 tensor of the same rank as <c>input</c>. The inner-most 2
/// dimensions of <c>input</c> are replaced with their 2D Fourier transform. The
/// inner-most dimension contains <c>fft_length / 2 + 1</c> unique frequency
/// components.
///
/// @compatibility(numpy)
/// Equivalent to np.fft.rfft2
/// @end_compatibility
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Computes the 2-dimensional discrete Fourier transform of a real-valued signal
/// over the inner-most 2 dimensions of <c>input</c>.
///
/// Since the DFT of a real signal is Hermitian-symmetric, <c>RFFT2D</c> only returns the
/// <c>fft_length / 2 + 1</c> unique components of the FFT for the inner-most dimension
/// of <c>output</c>: the zero-frequency term, followed by the <c>fft_length / 2</c>
/// positive-frequency terms.
///
/// Along each axis <c>RFFT2D</c> is computed on, if <c>fft_length</c> is smaller than the
/// corresponding dimension of <c>input</c>, the dimension is cropped. If it is larger,
/// the dimension is padded with zeros.
/// </remarks>
member this.RFFT2D(input : TF, fft_length : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.RFFT2D(input.TFOutput, fft_length.TFOutput, operName))
/// <summary>
/// 3D real-valued fast Fourier transform.
/// </summary><param name="input">
/// A float32 tensor.
/// </param><param name="fft_length">
/// An int32 tensor of shape [3]. The FFT length for each dimension.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RFFT3D'.
/// </param><returns>
/// A complex64 tensor of the same rank as <c>input</c>. The inner-most 3
/// dimensions of <c>input</c> are replaced with the their 3D Fourier transform. The
/// inner-most dimension contains <c>fft_length / 2 + 1</c> unique frequency
/// components.
///
/// @compatibility(numpy)
/// Equivalent to np.fft.rfftn with 3 dimensions.
/// @end_compatibility
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Computes the 3-dimensional discrete Fourier transform of a real-valued signal
/// over the inner-most 3 dimensions of <c>input</c>.
///
/// Since the DFT of a real signal is Hermitian-symmetric, <c>RFFT3D</c> only returns the
/// <c>fft_length / 2 + 1</c> unique components of the FFT for the inner-most dimension
/// of <c>output</c>: the zero-frequency term, followed by the <c>fft_length / 2</c>
/// positive-frequency terms.
///
/// Along each axis <c>RFFT3D</c> is computed on, if <c>fft_length</c> is smaller than the
/// corresponding dimension of <c>input</c>, the dimension is cropped. If it is larger,
/// the dimension is padded with zeros.
/// </remarks>
member this.RFFT3D(input : TF, fft_length : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.RFFT3D(input.TFOutput, fft_length.TFOutput, operName))
/// <summary>
/// Converts one or more images from RGB to HSV.
/// </summary><param name="images">
/// 1-D or higher rank. RGB data to convert. Last dimension must be size 3.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RGBToHSV'.
/// </param><returns><c>images</c> converted to HSV.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Outputs a tensor of the same shape as the <c>images</c> tensor, containing the HSV
/// value of the pixels. The output is only well defined if the value in <c>images</c>
/// are in <c>[0,1]</c>.
///
/// <c>output[..., 0]</c> contains hue, <c>output[..., 1]</c> contains saturation, and
/// <c>output[..., 2]</c> contains value. All HSV values are in <c>[0,1]</c>. A hue of 0
/// corresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue.
/// </remarks>
member this.RGBToHSV(images : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.RGBToHSV(images.TFOutput, operName))
/// <summary>
/// Elementwise computes the bitwise right-shift of <c>x</c> and <c>y</c>.
/// </summary><param name="x"></param><param name="y"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RightShift'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Performs a logical shift for unsigned integer types, and an arithmetic shift
/// for signed integer types.
///
/// If <c>y</c> is negative, or greater than or equal to than the width of <c>x</c> in bits
/// the result is implementation defined.
/// </remarks>
member this.RightShift(x : TF, y : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.RightShift(x.TFOutput, y.TFOutput, operName))
/// <summary>
/// Returns element-wise integer closest to x.
/// </summary><param name="x"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Rint'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// If the result is midway between two representable values,
/// the even representable is chosen.
/// For example:
///
/// <code>
/// rint(-1.5) ==&amp;gt; -2.0
/// rint(0.5000001) ==&amp;gt; 1.0
/// rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==&amp;gt; [-2., -2., -0., 0., 2., 2., 2.]
/// </code></remarks>
member this.Rint(x : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Rint(x.TFOutput, operName))
/// <summary>
/// Rolls the elements of a tensor along an axis.
/// </summary><param name="input"></param><param name="shift">
/// Dimension must be 0-D or 1-D. <c>shift[i]</c> specifies the number of places by which
/// elements are shifted positively (towards larger indices) along the dimension
/// specified by <c>axis[i]</c>. Negative shifts will roll the elements in the opposite
/// direction.
/// </param><param name="axis">
/// Dimension must be 0-D or 1-D. <c>axis[i]</c> specifies the dimension that the shift
/// <c>shift[i]</c> should occur. If the same axis is referenced more than once, the
/// total shift for that axis will be the sum of all the shifts that belong to that
/// axis.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Roll'.
/// </param><returns>
/// Has the same shape and size as the input. The elements are shifted
/// positively (towards larger indices) by the offsets of <c>shift</c> along the
/// dimensions of <c>axis</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// The elements are shifted positively (towards larger indices) by the offset of
/// <c>shift</c> along the dimension of <c>axis</c>. Negative <c>shift</c> values will shift
/// elements in the opposite direction. Elements that roll passed the last position
/// will wrap around to the first and vice versa. Multiple shifts along multiple
/// axes may be specified.
///
/// For example:
///
/// <code>
/// # 't' is [0, 1, 2, 3, 4]
/// roll(t, shift=2, axis=0) ==&amp;gt; [3, 4, 0, 1, 2]
///
/// # shifting along multiple dimensions
/// # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
/// roll(t, shift=[1, -2], axis=[0, 1]) ==&amp;gt; [[7, 8, 9, 5, 6], [2, 3, 4, 0, 1]]
///
/// # shifting along the same axis multiple times
/// # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
/// roll(t, shift=[2, -3], axis=[1, 1]) ==&amp;gt; [[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]]
/// </code></remarks>
member this.Roll(input : TF, shift : TF, axis : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Roll(input.TFOutput, shift.TFOutput, axis.TFOutput, operName))
/// <summary>
/// Rounds the values of a tensor to the nearest integer, element-wise.
/// </summary><param name="x"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Round'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Rounds half to even. Also known as bankers rounding. If you want to round
/// according to the current system rounding mode use std::cint.
/// </remarks>
member this.Round(x : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Round(x.TFOutput, operName))
/// <summary>
/// Perform batches of RPC requests.
/// </summary><param name="address"><c>0-D</c> or <c>1-D</c>. The address (i.e. host_name:port) of the RPC server.
/// If this tensor has more than 1 element, then multiple parallel rpc requests
/// are sent. This argument broadcasts with <c>method</c> and <c>request</c>.
/// </param><param name="method"><c>0-D</c> or <c>1-D</c>. The method address on the RPC server.
/// If this tensor has more than 1 element, then multiple parallel rpc requests
/// are sent. This argument broadcasts with <c>address</c> and <c>request</c>.
/// </param><param name="request"><c>0-D</c> or <c>1-D</c>. Serialized proto strings: the rpc request argument.
/// If this tensor has more than 1 element, then multiple parallel rpc requests
/// are sent. This argument broadcasts with <c>address</c> and <c>method</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Rpc'.
/// </param><param name="protocol">
/// Optional argument
/// RPC protocol to use. Empty string means use the default protocol.
/// Options include 'grpc'.
/// </param><param name="fail_fast">
/// Optional argument
/// <c>boolean</c>. If <c>true</c> (default), then failures to connect
/// (i.e., the server does not immediately respond) cause an RPC failure.
/// </param><param name="timeout_in_ms">
/// Optional argument
/// <c>int</c>. If <c>0</c> (default), then the kernel will run the RPC
/// request and only time out if the RPC deadline passes or the session times out.
/// If this value is greater than <c>0</c>, then the op will raise an exception if
/// the RPC takes longer than <c>timeout_in_ms</c>.
/// </param><returns>
/// Same shape as <c>request</c>. Serialized proto strings: the rpc responses.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This op asynchronously performs either a single RPC request, or a batch
/// of requests. RPC requests are defined by three main parameters:
///
/// - <c>address</c> (the host+port or BNS address of the request)
/// - <c>method</c> (the RPC method name for the request)
/// - <c>request</c> (the serialized proto string, or vector of strings,
/// of the RPC request argument).
///
/// For example, if you have an RPC service running on port localhost:2345,
/// and its interface is configured with the following proto declaration:
///
/// <code>
/// service MyService {
/// rpc MyMethod(MyRequestProto) returns (MyResponseProto) {
/// }
/// };
/// </code>
///
/// then call this op with arguments:
///
/// <code>
/// address = "localhost:2345"
/// method = "MyService/MyMethod"
/// </code>
///
/// The <c>request</c> tensor is a string tensor representing serialized <c>MyRequestProto</c>
/// strings; and the output string tensor <c>response</c> will have the same shape
/// and contain (upon successful completion) corresponding serialized
/// <c>MyResponseProto</c> strings.
///
/// For example, to send a single, empty, <c>MyRequestProto</c>, call
/// this op with <c>request = ""</c>. To send 5 **parallel** empty requests,
/// call this op with <c>request = ["", "", "", "", ""]</c>.
///
/// More generally, one can create a batch of <c>MyRequestProto</c> serialized protos
/// from regular batched tensors using the <c>encode_proto</c> op, and convert
/// the response <c>MyResponseProto</c> serialized protos to batched tensors
/// using the <c>decode_proto</c> op.
///
/// **NOTE** Working with serialized proto strings is faster than instantiating
/// actual proto objects in memory, so no performance degradation is expected
/// compared to writing custom kernels for this workflow.
///
/// If the connection fails or the remote worker returns an error
/// status, the op reraises this exception locally.
///
/// See the <c>TryRpc</c> op if you prefer to handle RPC failures manually in the graph.
/// </remarks>
member this.Rpc(address : TF, method : TF, request : TF, ?protocol : String, ?fail_fast : Boolean, ?timeout_in_ms : Int64, ?operName : String) =
let protocol = defaultArg protocol null
let fail_fast = defaultArg (fail_fast |> Option.map Nullable) (Nullable())
let timeout_in_ms = defaultArg (timeout_in_ms |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Rpc(address.TFOutput, method.TFOutput, request.TFOutput, protocol, fail_fast, timeout_in_ms, operName))
/// <summary>
/// Computes reciprocal of square root of x element-wise.
/// </summary><param name="x"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Rsqrt'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// I.e., \\(y = 1 / \sqrt{x}\\).
/// </remarks>
member this.Rsqrt(x : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Rsqrt(x.TFOutput, operName))
/// <summary>
/// Computes the gradient for the rsqrt of <c>x</c> wrt its input.
/// </summary><param name="y"></param><param name="dy"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RsqrtGrad'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Specifically, <c>grad = dy * -0.5 * y^3</c>, where <c>y = rsqrt(x)</c>, and <c>dy</c>
/// is the corresponding input gradient.
/// </remarks>
member this.RsqrtGrad(y : TF, dy : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.RsqrtGrad(y.TFOutput, dy.TFOutput, operName))
/// <summary>
/// Generate a single randomly distorted bounding box for an image.
/// </summary><param name="image_size">
/// 1-D, containing <c>[height, width, channels]</c>.
/// </param><param name="bounding_boxes">
/// 3-D with shape <c>[batch, N, 4]</c> describing the N bounding boxes
/// associated with the image.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SampleDistortedBoundingBox'.
/// </param><param name="seed">
/// Optional argument
/// If either <c>seed</c> or <c>seed2</c> are set to non-zero, the random number
/// generator is seeded by the given <c>seed</c>. Otherwise, it is seeded by a random
/// seed.
/// </param><param name="seed2">
/// Optional argument
/// A second seed to avoid seed collision.
/// </param><param name="min_object_covered">
/// Optional argument
/// The cropped area of the image must contain at least this
/// fraction of any bounding box supplied. The value of this parameter should be
/// non-negative. In the case of 0, the cropped area does not need to overlap
/// any of the bounding boxes supplied.
/// </param><param name="aspect_ratio_range">
/// Optional argument
/// The cropped area of the image must have an aspect ratio =
/// width / height within this range.
/// </param><param name="area_range">
/// Optional argument
/// The cropped area of the image must contain a fraction of the
/// supplied image within this range.
/// </param><param name="max_attempts">
/// Optional argument
/// Number of attempts at generating a cropped region of the image
/// of the specified constraints. After <c>max_attempts</c> failures, return the entire
/// image.
/// </param><param name="use_image_if_no_bounding_boxes">
/// Optional argument
/// Controls behavior if no bounding boxes supplied.
/// If true, assume an implicit bounding box covering the whole input. If false,
/// raise an error.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// begin: 1-D, containing <c>[offset_height, offset_width, 0]</c>. Provide as input to
/// <c>tf.slice</c>.
/// size: 1-D, containing <c>[target_height, target_width, -1]</c>. Provide as input to
/// <c>tf.slice</c>.
/// bboxes: 3-D with shape <c>[1, 1, 4]</c> containing the distorted bounding box.
/// Provide as input to <c>tf.image.draw_bounding_boxes</c>.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// Bounding box annotations are often supplied in addition to ground-truth labels
/// in image recognition or object localization tasks. A common technique for
/// training such a system is to randomly distort an image while preserving
/// its content, i.e. *data augmentation*. This Op outputs a randomly distorted
/// localization of an object, i.e. bounding box, given an <c>image_size</c>,
/// <c>bounding_boxes</c> and a series of constraints.
///
/// The output of this Op is a single bounding box that may be used to crop the
/// original image. The output is returned as 3 tensors: <c>begin</c>, <c>size</c> and
/// <c>bboxes</c>. The first 2 tensors can be fed directly into <c>tf.slice</c> to crop the
/// image. The latter may be supplied to <c>tf.image.draw_bounding_boxes</c> to visualize
/// what the bounding box looks like.
///
/// Bounding boxes are supplied and returned as <c>[y_min, x_min, y_max, x_max]</c>. The
/// bounding box coordinates are floats in <c>[0.0, 1.0]</c> relative to the width and
/// height of the underlying image.
///
/// For example,
///
/// <code>
/// # Generate a single distorted bounding box.
/// begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
/// tf.shape(image),
/// bounding_boxes=bounding_boxes)
///
/// # Draw the bounding box in an image summary.
/// image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
/// bbox_for_draw)
/// tf.summary.image('images_with_box', image_with_box)
///
/// # Employ the bounding box to distort the image.
/// distorted_image = tf.slice(image, begin, size)
/// </code>
///
/// Note that if no bounding box information is available, setting
/// <c>use_image_if_no_bounding_boxes = true</c> will assume there is a single implicit
/// bounding box covering the whole image. If <c>use_image_if_no_bounding_boxes</c> is
/// false and no bounding boxes are supplied, an error is raised.
/// </remarks>
member this.SampleDistortedBoundingBox(image_size : TF, bounding_boxes : TF, ?seed : Int64, ?seed2 : Int64, ?min_object_covered : Single, ?aspect_ratio_range : Single[], ?area_range : Single[], ?max_attempts : Int64, ?use_image_if_no_bounding_boxes : Boolean, ?operName : String) =
let seed = defaultArg (seed |> Option.map Nullable) (Nullable())
let seed2 = defaultArg (seed2 |> Option.map Nullable) (Nullable())
let min_object_covered = defaultArg (min_object_covered |> Option.map Nullable) (Nullable())
let aspect_ratio_range = defaultArg aspect_ratio_range null
let area_range = defaultArg area_range null
let max_attempts = defaultArg (max_attempts |> Option.map Nullable) (Nullable())
let use_image_if_no_bounding_boxes = defaultArg (use_image_if_no_bounding_boxes |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.SampleDistortedBoundingBox(image_size.TFOutput, bounding_boxes.TFOutput, seed, seed2, min_object_covered, aspect_ratio_range, area_range, max_attempts, use_image_if_no_bounding_boxes, operName)
/// <summary>
/// Generate a single randomly distorted bounding box for an image.
/// </summary><param name="image_size">
/// 1-D, containing <c>[height, width, channels]</c>.
/// </param><param name="bounding_boxes">
/// 3-D with shape <c>[batch, N, 4]</c> describing the N bounding boxes
/// associated with the image.
/// </param><param name="min_object_covered">
/// The cropped area of the image must contain at least this
/// fraction of any bounding box supplied. The value of this parameter should be
/// non-negative. In the case of 0, the cropped area does not need to overlap
/// any of the bounding boxes supplied.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SampleDistortedBoundingBoxV2'.
/// </param><param name="seed">
/// Optional argument
/// If either <c>seed</c> or <c>seed2</c> are set to non-zero, the random number
/// generator is seeded by the given <c>seed</c>. Otherwise, it is seeded by a random
/// seed.
/// </param><param name="seed2">
/// Optional argument
/// A second seed to avoid seed collision.
/// </param><param name="aspect_ratio_range">
/// Optional argument
/// The cropped area of the image must have an aspect ratio =
/// width / height within this range.
/// </param><param name="area_range">
/// Optional argument
/// The cropped area of the image must contain a fraction of the
/// supplied image within this range.
/// </param><param name="max_attempts">
/// Optional argument
/// Number of attempts at generating a cropped region of the image
/// of the specified constraints. After <c>max_attempts</c> failures, return the entire
/// image.
/// </param><param name="use_image_if_no_bounding_boxes">
/// Optional argument
/// Controls behavior if no bounding boxes supplied.
/// If true, assume an implicit bounding box covering the whole input. If false,
/// raise an error.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// begin: 1-D, containing <c>[offset_height, offset_width, 0]</c>. Provide as input to
/// <c>tf.slice</c>.
/// size: 1-D, containing <c>[target_height, target_width, -1]</c>. Provide as input to
/// <c>tf.slice</c>.
/// bboxes: 3-D with shape <c>[1, 1, 4]</c> containing the distorted bounding box.
/// Provide as input to <c>tf.image.draw_bounding_boxes</c>.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// Bounding box annotations are often supplied in addition to ground-truth labels
/// in image recognition or object localization tasks. A common technique for
/// training such a system is to randomly distort an image while preserving
/// its content, i.e. *data augmentation*. This Op outputs a randomly distorted
/// localization of an object, i.e. bounding box, given an <c>image_size</c>,
/// <c>bounding_boxes</c> and a series of constraints.
///
/// The output of this Op is a single bounding box that may be used to crop the
/// original image. The output is returned as 3 tensors: <c>begin</c>, <c>size</c> and
/// <c>bboxes</c>. The first 2 tensors can be fed directly into <c>tf.slice</c> to crop the
/// image. The latter may be supplied to <c>tf.image.draw_bounding_boxes</c> to visualize
/// what the bounding box looks like.
///
/// Bounding boxes are supplied and returned as <c>[y_min, x_min, y_max, x_max]</c>. The
/// bounding box coordinates are floats in <c>[0.0, 1.0]</c> relative to the width and
/// height of the underlying image.
///
/// For example,
///
/// <code>
/// # Generate a single distorted bounding box.
/// begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box(
/// tf.shape(image),
/// bounding_boxes=bounding_boxes)
///
/// # Draw the bounding box in an image summary.
/// image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
/// bbox_for_draw)
/// tf.summary.image('images_with_box', image_with_box)
///
/// # Employ the bounding box to distort the image.
/// distorted_image = tf.slice(image, begin, size)
/// </code>
///
/// Note that if no bounding box information is available, setting
/// <c>use_image_if_no_bounding_boxes = true</c> will assume there is a single implicit
/// bounding box covering the whole image. If <c>use_image_if_no_bounding_boxes</c> is
/// false and no bounding boxes are supplied, an error is raised.
/// </remarks>
member this.SampleDistortedBoundingBoxV2(image_size : TF, bounding_boxes : TF, min_object_covered : TF, ?seed : Int64, ?seed2 : Int64, ?aspect_ratio_range : Single[], ?area_range : Single[], ?max_attempts : Int64, ?use_image_if_no_bounding_boxes : Boolean, ?operName : String) =
let seed = defaultArg (seed |> Option.map Nullable) (Nullable())
let seed2 = defaultArg (seed2 |> Option.map Nullable) (Nullable())
let aspect_ratio_range = defaultArg aspect_ratio_range null
let area_range = defaultArg area_range null
let max_attempts = defaultArg (max_attempts |> Option.map Nullable) (Nullable())
let use_image_if_no_bounding_boxes = defaultArg (use_image_if_no_bounding_boxes |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.SampleDistortedBoundingBoxV2(image_size.TFOutput, bounding_boxes.TFOutput, min_object_covered.TFOutput, seed, seed2, aspect_ratio_range, area_range, max_attempts, use_image_if_no_bounding_boxes, operName)
/// <summary>
/// Saves the input tensors to disk.
/// </summary><param name="filename">
/// Must have a single element. The name of the file to which we write
/// the tensor.
/// </param><param name="tensor_names">
/// Shape <c>[N]</c>. The names of the tensors to be saved.
/// </param><param name="data"><c>N</c> tensors to save.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Save'.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// The size of <c>tensor_names</c> must match the number of tensors in <c>data</c>. <c>data[i]</c>
/// is written to <c>filename</c> with name <c>tensor_names[i]</c>.
///
/// See also <c>SaveSlices</c>.
/// </remarks>
member this.Save(filename : TF, tensor_names : TF, data : TF[], ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.Save(filename.TFOutput, tensor_names.TFOutput, data |> Array.map (fun x -> x.TFOutput), operName)
/// <summary>
/// Saves input tensors slices to disk.
/// </summary><param name="filename">
/// Must have a single element. The name of the file to which we write the
/// tensor.
/// </param><param name="tensor_names">
/// Shape <c>[N]</c>. The names of the tensors to be saved.
/// </param><param name="shapes_and_slices">
/// Shape <c>[N]</c>. The shapes and slice specifications to use when
/// saving the tensors.
/// </param><param name="data"><c>N</c> tensors to save.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SaveSlices'.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// This is like <c>Save</c> except that tensors can be listed in the saved file as being
/// a slice of a larger tensor. <c>shapes_and_slices</c> specifies the shape of the
/// larger tensor and the slice that this tensor covers. <c>shapes_and_slices</c> must
/// have as many elements as <c>tensor_names</c>.
///
/// Elements of the <c>shapes_and_slices</c> input must either be:
///
/// * The empty string, in which case the corresponding tensor is
/// saved normally.
/// * A string of the form <c>dim0 dim1 ... dimN-1 slice-spec</c> where the
/// <c>dimI</c> are the dimensions of the larger tensor and <c>slice-spec</c>
/// specifies what part is covered by the tensor to save.
///
/// <c>slice-spec</c> itself is a <c>:</c>-separated list: <c>slice0:slice1:...:sliceN-1</c>
/// where each <c>sliceI</c> is either:
///
/// * The string <c>-</c> meaning that the slice covers all indices of this dimension
/// * <c>start,length</c> where <c>start</c> and <c>length</c> are integers. In that
/// case the slice covers <c>length</c> indices starting at <c>start</c>.
///
/// See also <c>Save</c>.
/// </remarks>
member this.SaveSlices(filename : TF, tensor_names : TF, shapes_and_slices : TF, data : TF[], ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.SaveSlices(filename.TFOutput, tensor_names.TFOutput, shapes_and_slices.TFOutput, data |> Array.map (fun x -> x.TFOutput), operName)
/// <summary>
/// Saves tensors in V2 checkpoint format.
/// </summary><param name="prefix">
/// Must have a single element. The prefix of the V2 checkpoint to which we
/// write the tensors.
/// </param><param name="tensor_names">
/// shape {N}. The names of the tensors to be saved.
/// </param><param name="shape_and_slices">
/// shape {N}. The slice specs of the tensors to be saved.
/// Empty strings indicate that they are non-partitioned tensors.
/// </param><param name="tensors"><c>N</c> tensors to save.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SaveV2'.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// By default, saves the named tensors in full. If the caller wishes to save
/// specific slices of full tensors, "shape_and_slices" should be non-empty strings
/// and correspondingly well-formed.
/// </remarks>
member this.SaveV2(prefix : TF, tensor_names : TF, shape_and_slices : TF, tensors : TF[], ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.SaveV2(prefix.TFOutput, tensor_names.TFOutput, shape_and_slices.TFOutput, tensors |> Array.map (fun x -> x.TFOutput), operName)
member this.ScalarSummary(tags : TF, values : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ScalarSummary(tags.TFOutput, values.TFOutput, operName))
/// <summary>
/// Adds sparse updates to a variable reference.
/// </summary><param name="reference">
/// Should be from a <c>Variable</c> node.
/// </param><param name="indices">
/// A tensor of indices into the first dimension of <c>ref</c>.
/// </param><param name="updates">
/// A tensor of updated values to add to <c>ref</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ScatterAdd'.
/// </param><param name="use_locking">
/// Optional argument
/// If True, the addition will be protected by a lock;
/// otherwise the behavior is undefined, but may exhibit less contention.
/// </param><returns>
/// = Same as <c>ref</c>. Returned as a convenience for operations that want
/// to use the updated values after the update is done.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This operation computes
///
/// # Scalar indices
/// ref[indices, ...] += updates[...]
///
/// # Vector indices (for each i)
/// ref[indices[i], ...] += updates[i, ...]
///
/// # High rank indices (for each i, ..., j)
/// ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]
///
/// This operation outputs <c>ref</c> after the update is done.
/// This makes it easier to chain operations that need to use the reset value.
///
/// Duplicate entries are handled correctly: if multiple <c>indices</c> reference
/// the same location, their contributions add.
///
/// Requires <c>updates.shape = indices.shape + ref.shape[1:]</c> or <c>updates.shape = []</c>.
///
/// &amp;lt;div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"&amp;gt;
/// &amp;lt;img style="width:100%" src="https://www.tensorflow.org/images/ScatterAdd.png" alt&amp;gt;
/// &amp;lt;/div&amp;gt;
/// </remarks>
member this.ScatterAdd(reference : TF, indices : TF, updates : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ScatterAdd(reference.TFOutput, indices.TFOutput, updates.TFOutput, use_locking, operName))
/// <summary>
/// Divides a variable reference by sparse updates.
/// </summary><param name="reference">
/// Should be from a <c>Variable</c> node.
/// </param><param name="indices">
/// A tensor of indices into the first dimension of <c>ref</c>.
/// </param><param name="updates">
/// A tensor of values that <c>ref</c> is divided by.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ScatterDiv'.
/// </param><param name="use_locking">
/// Optional argument
/// If True, the operation will be protected by a lock;
/// otherwise the behavior is undefined, but may exhibit less contention.
/// </param><returns>
/// = Same as <c>ref</c>. Returned as a convenience for operations that want
/// to use the updated values after the update is done.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This operation computes
///
/// <code>
/// # Scalar indices
/// ref[indices, ...] /= updates[...]
///
/// # Vector indices (for each i)
/// ref[indices[i], ...] /= updates[i, ...]
///
/// # High rank indices (for each i, ..., j)
/// ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]
/// </code>
///
/// This operation outputs <c>ref</c> after the update is done.
/// This makes it easier to chain operations that need to use the reset value.
///
/// Duplicate entries are handled correctly: if multiple <c>indices</c> reference
/// the same location, their contributions divide.
///
/// Requires <c>updates.shape = indices.shape + ref.shape[1:]</c> or <c>updates.shape = []</c>.
/// </remarks>
member this.ScatterDiv(reference : TF, indices : TF, updates : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ScatterDiv(reference.TFOutput, indices.TFOutput, updates.TFOutput, use_locking, operName))
/// <summary>
/// Reduces sparse updates into a variable reference using the <c>max</c> operation.
/// </summary><param name="reference">
/// Should be from a <c>Variable</c> node.
/// </param><param name="indices">
/// A tensor of indices into the first dimension of <c>ref</c>.
/// </param><param name="updates">
/// A tensor of updated values to reduce into <c>ref</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ScatterMax'.
/// </param><param name="use_locking">
/// Optional argument
/// If True, the update will be protected by a lock;
/// otherwise the behavior is undefined, but may exhibit less contention.
/// </param><returns>
/// = Same as <c>ref</c>. Returned as a convenience for operations that want
/// to use the updated values after the update is done.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This operation computes
///
/// # Scalar indices
/// ref[indices, ...] = max(ref[indices, ...], updates[...])
///
/// # Vector indices (for each i)
/// ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...])
///
/// # High rank indices (for each i, ..., j)
/// ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...])
///
/// This operation outputs <c>ref</c> after the update is done.
/// This makes it easier to chain operations that need to use the reset value.
///
/// Duplicate entries are handled correctly: if multiple <c>indices</c> reference
/// the same location, their contributions combine.
///
/// Requires <c>updates.shape = indices.shape + ref.shape[1:]</c> or <c>updates.shape = []</c>.
///
/// &amp;lt;div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"&amp;gt;
/// &amp;lt;img style="width:100%" src="https://www.tensorflow.org/images/ScatterAdd.png" alt&amp;gt;
/// &amp;lt;/div&amp;gt;
/// </remarks>
member this.ScatterMax(reference : TF, indices : TF, updates : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ScatterMax(reference.TFOutput, indices.TFOutput, updates.TFOutput, use_locking, operName))
/// <summary>
/// Reduces sparse updates into a variable reference using the <c>min</c> operation.
/// </summary><param name="reference">
/// Should be from a <c>Variable</c> node.
/// </param><param name="indices">
/// A tensor of indices into the first dimension of <c>ref</c>.
/// </param><param name="updates">
/// A tensor of updated values to reduce into <c>ref</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ScatterMin'.
/// </param><param name="use_locking">
/// Optional argument
/// If True, the update will be protected by a lock;
/// otherwise the behavior is undefined, but may exhibit less contention.
/// </param><returns>
/// = Same as <c>ref</c>. Returned as a convenience for operations that want
/// to use the updated values after the update is done.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This operation computes
///
/// # Scalar indices
/// ref[indices, ...] = min(ref[indices, ...], updates[...])
///
/// # Vector indices (for each i)
/// ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...])
///
/// # High rank indices (for each i, ..., j)
/// ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...])
///
/// This operation outputs <c>ref</c> after the update is done.
/// This makes it easier to chain operations that need to use the reset value.
///
/// Duplicate entries are handled correctly: if multiple <c>indices</c> reference
/// the same location, their contributions combine.
///
/// Requires <c>updates.shape = indices.shape + ref.shape[1:]</c> or <c>updates.shape = []</c>.
///
/// &amp;lt;div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"&amp;gt;
/// &amp;lt;img style="width:100%" src="https://www.tensorflow.org/images/ScatterAdd.png" alt&amp;gt;
/// &amp;lt;/div&amp;gt;
/// </remarks>
member this.ScatterMin(reference : TF, indices : TF, updates : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ScatterMin(reference.TFOutput, indices.TFOutput, updates.TFOutput, use_locking, operName))
/// <summary>
/// Multiplies sparse updates into a variable reference.
/// </summary><param name="reference">
/// Should be from a <c>Variable</c> node.
/// </param><param name="indices">
/// A tensor of indices into the first dimension of <c>ref</c>.
/// </param><param name="updates">
/// A tensor of updated values to multiply to <c>ref</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ScatterMul'.
/// </param><param name="use_locking">
/// Optional argument
/// If True, the operation will be protected by a lock;
/// otherwise the behavior is undefined, but may exhibit less contention.
/// </param><returns>
/// = Same as <c>ref</c>. Returned as a convenience for operations that want
/// to use the updated values after the update is done.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This operation computes
///
/// <code>
/// # Scalar indices
/// ref[indices, ...] *= updates[...]
///
/// # Vector indices (for each i)
/// ref[indices[i], ...] *= updates[i, ...]
///
/// # High rank indices (for each i, ..., j)
/// ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]
/// </code>
///
/// This operation outputs <c>ref</c> after the update is done.
/// This makes it easier to chain operations that need to use the reset value.
///
/// Duplicate entries are handled correctly: if multiple <c>indices</c> reference
/// the same location, their contributions multiply.
///
/// Requires <c>updates.shape = indices.shape + ref.shape[1:]</c> or <c>updates.shape = []</c>.
/// </remarks>
member this.ScatterMul(reference : TF, indices : TF, updates : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ScatterMul(reference.TFOutput, indices.TFOutput, updates.TFOutput, use_locking, operName))
/// <summary>
/// Scatter <c>updates</c> into a new tensor according to <c>indices</c>.
/// </summary><param name="indices">
/// Index tensor.
/// </param><param name="updates">
/// Updates to scatter into output.
/// </param><param name="shape">
/// 1-D. The shape of the resulting tensor.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ScatterNd'.
/// </param><returns>
/// A new tensor with the given shape and updates applied according
/// to the indices.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Creates a new tensor by applying sparse <c>updates</c> to individual values or
/// slices within a tensor (initially zero for numeric, empty for string) of
/// the given <c>shape</c> according to indices. This operator is the inverse of the
/// <c>tf.gather_nd</c> operator which extracts values or slices from a given tensor.
///
/// This operation is similar to tensor_scatter_add, except that the tensor is
/// zero-initialized. Calling <c>tf.scatter_nd(indices, values, shape)</c> is identical
/// to <c>tensor_scatter_add(tf.zeros(shape, values.dtype), indices, values)</c>
///
/// If <c>indices</c> contains duplicates, then their updates are accumulated (summed).
///
/// **WARNING**: The order in which updates are applied is nondeterministic, so the
/// output will be nondeterministic if <c>indices</c> contains duplicates -- because
/// of some numerical approximation issues, numbers summed in different order
/// may yield different results.
///
/// <c>indices</c> is an integer tensor containing indices into a new tensor of shape
/// <c>shape</c>. The last dimension of <c>indices</c> can be at most the rank of <c>shape</c>:
///
/// indices.shape[-1] &amp;lt;= shape.rank
///
/// The last dimension of <c>indices</c> corresponds to indices into elements
/// (if <c>indices.shape[-1] = shape.rank</c>) or slices
/// (if <c>indices.shape[-1] &amp;lt; shape.rank</c>) along dimension <c>indices.shape[-1]</c> of
/// <c>shape</c>. <c>updates</c> is a tensor with shape
///
/// indices.shape[:-1] + shape[indices.shape[-1]:]
///
/// The simplest form of scatter is to insert individual elements in a tensor by
/// index. For example, say we want to insert 4 scattered elements in a rank-1
/// tensor with 8 elements.
///
/// &amp;lt;div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"&amp;gt;
/// &amp;lt;img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd1.png" alt&amp;gt;
/// &amp;lt;/div&amp;gt;
///
/// In Python, this scatter operation would look like this:
///
/// <code>
/// indices = tf.constant([[4], [3], [1], [7]])
/// updates = tf.constant([9, 10, 11, 12])
/// shape = tf.constant([8])
/// scatter = tf.scatter_nd(indices, updates, shape)
/// with tf.Session() as sess:
/// print(sess.run(scatter))
/// </code>
///
/// The resulting tensor would look like this:
///
/// [0, 11, 0, 10, 9, 0, 0, 12]
///
/// We can also, insert entire slices of a higher rank tensor all at once. For
/// example, if we wanted to insert two slices in the first dimension of a
/// rank-3 tensor with two matrices of new values.
///
/// &amp;lt;div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"&amp;gt;
/// &amp;lt;img style="width:100%" src="https://www.tensorflow.org/images/ScatterNd2.png" alt&amp;gt;
/// &amp;lt;/div&amp;gt;
///
/// In Python, this scatter operation would look like this:
///
/// <code>
/// indices = tf.constant([[0], [2]])
/// updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6],
/// [7, 7, 7, 7], [8, 8, 8, 8]],
/// [[5, 5, 5, 5], [6, 6, 6, 6],
/// [7, 7, 7, 7], [8, 8, 8, 8]]])
/// shape = tf.constant([4, 4, 4])
/// scatter = tf.scatter_nd(indices, updates, shape)
/// with tf.Session() as sess:
/// print(sess.run(scatter))
/// </code>
///
/// The resulting tensor would look like this:
///
/// [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
/// [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
/// [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
/// [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]]
///
/// Note that on CPU, if an out of bound index is found, an error is returned.
/// On GPU, if an out of bound index is found, the index is ignored.
/// </remarks>
member this.ScatterNd(indices : TF, updates : TF, shape : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ScatterNd(indices.TFOutput, updates.TFOutput, shape.TFOutput, operName))
/// <summary>
/// Applies sparse addition between <c>updates</c> and individual values or slices
/// </summary><param name="reference">
/// A mutable Tensor. Should be from a Variable node.
/// </param><param name="indices">
/// A Tensor. Must be one of the following types: int32, int64.
/// A tensor of indices into ref.
/// </param><param name="updates">
/// A Tensor. Must have the same type as ref. A tensor of updated values
/// to add to ref.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ScatterNdAdd'.
/// </param><param name="use_locking">
/// Optional argument
/// An optional bool. Defaults to True. If True, the assignment will
/// be protected by a lock; otherwise the behavior is undefined,
/// but may exhibit less contention.
/// </param><returns>
/// Same as ref. Returned as a convenience for operations that want
/// to use the updated values after the update is done.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// within a given variable according to <c>indices</c>.
///
/// <c>ref</c> is a <c>Tensor</c> with rank <c>P</c> and <c>indices</c> is a <c>Tensor</c> of rank <c>Q</c>.
///
/// <c>indices</c> must be integer tensor, containing indices into <c>ref</c>.
/// It must be shape <c>\\([d_0, ..., d_{Q-2}, K]\\)</c> where <c>0 &amp;lt; K &amp;lt;= P</c>.
///
/// The innermost dimension of <c>indices</c> (with length <c>K</c>) corresponds to
/// indices into elements (if <c>K = P</c>) or slices (if <c>K &amp;lt; P</c>) along the <c>K</c>th
/// dimension of <c>ref</c>.
///
/// <c>updates</c> is <c>Tensor</c> of rank <c>Q-1+P-K</c> with shape:
///
/// $$[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].$$
///
/// For example, say we want to add 4 scattered elements to a rank-1 tensor to 8
/// elements. In Python, that addition would look like this:
///
/// ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
/// indices = tf.constant([[4], [3], [1], [7]])
/// updates = tf.constant([9, 10, 11, 12])
/// add = tf.scatter_nd_add(ref, indices, updates)
/// with tf.Session() as sess:
/// print sess.run(add)
///
/// The resulting update to ref would look like this:
///
/// [1, 13, 3, 14, 14, 6, 7, 20]
///
/// See <c>tf.scatter_nd</c> for more details about how to make updates to
/// slices.
/// </remarks>
member this.ScatterNdAdd(reference : TF, indices : TF, updates : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ScatterNdAdd(reference.TFOutput, indices.TFOutput, updates.TFOutput, use_locking, operName))
/// <summary>
/// Applies sparse addition to <c>input</c> using individual values or slices
/// </summary><param name="input">
/// A Tensor.
/// </param><param name="indices">
/// A Tensor. Must be one of the following types: <c>int32</c>, <c>int64</c>.
/// A tensor of indices into <c>input</c>.
/// </param><param name="updates">
/// A Tensor. Must have the same type as ref. A tensor of updated values
/// to add to <c>input</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ScatterNdNonAliasingAdd'.
/// </param><returns>
/// A <c>Tensor</c> with the same shape as <c>input</c>, containing values of <c>input</c>
/// updated with <c>updates</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// from <c>updates</c> according to indices <c>indices</c>. The updates are non-aliasing:
/// <c>input</c> is only modified in-place if no other operations will use it.
/// Otherwise, a copy of <c>input</c> is made. This operation has a gradient with
/// respect to both <c>input</c> and <c>updates</c>.
///
/// <c>input</c> is a <c>Tensor</c> with rank <c>P</c> and <c>indices</c> is a <c>Tensor</c> of rank <c>Q</c>.
///
/// <c>indices</c> must be integer tensor, containing indices into <c>input</c>.
/// It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where <c>0 &amp;lt; K &amp;lt;= P</c>.
///
/// The innermost dimension of <c>indices</c> (with length <c>K</c>) corresponds to
/// indices into elements (if <c>K = P</c>) or <c>(P-K)</c>-dimensional slices
/// (if <c>K &amp;lt; P</c>) along the <c>K</c>th dimension of <c>input</c>.
///
/// <c>updates</c> is <c>Tensor</c> of rank <c>Q-1+P-K</c> with shape:
///
/// $$[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].$$
///
/// For example, say we want to add 4 scattered elements to a rank-1 tensor to 8
/// elements. In Python, that addition would look like this:
///
/// input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8])
/// indices = tf.constant([[4], [3], [1], [7]])
/// updates = tf.constant([9, 10, 11, 12])
/// output = tf.scatter_nd_non_aliasing_add(input, indices, updates)
/// with tf.Session() as sess:
/// print(sess.run(output))
///
/// The resulting value <c>output</c> would look like this:
///
/// [1, 13, 3, 14, 14, 6, 7, 20]
///
/// See <c>tf.scatter_nd</c> for more details about how to make updates to slices.
/// </remarks>
member this.ScatterNdNonAliasingAdd(input : TF, indices : TF, updates : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ScatterNdNonAliasingAdd(input.TFOutput, indices.TFOutput, updates.TFOutput, operName))
/// <summary>
/// Applies sparse subtraction between <c>updates</c> and individual values or slices
/// </summary><param name="reference">
/// A mutable Tensor. Should be from a Variable node.
/// </param><param name="indices">
/// A Tensor. Must be one of the following types: int32, int64.
/// A tensor of indices into ref.
/// </param><param name="updates">
/// A Tensor. Must have the same type as ref. A tensor of updated values
/// to subtract from ref.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ScatterNdSub'.
/// </param><param name="use_locking">
/// Optional argument
/// An optional bool. Defaults to True. If True, the assignment will
/// be protected by a lock; otherwise the behavior is undefined,
/// but may exhibit less contention.
/// </param><returns>
/// Same as ref. Returned as a convenience for operations that want
/// to use the updated values after the update is done.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// within a given variable according to <c>indices</c>.
///
/// <c>ref</c> is a <c>Tensor</c> with rank <c>P</c> and <c>indices</c> is a <c>Tensor</c> of rank <c>Q</c>.
///
/// <c>indices</c> must be integer tensor, containing indices into <c>ref</c>.
/// It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where <c>0 &amp;lt; K &amp;lt;= P</c>.
///
/// The innermost dimension of <c>indices</c> (with length <c>K</c>) corresponds to
/// indices into elements (if <c>K = P</c>) or slices (if <c>K &amp;lt; P</c>) along the <c>K</c>th
/// dimension of <c>ref</c>.
///
/// <c>updates</c> is <c>Tensor</c> of rank <c>Q-1+P-K</c> with shape:
///
/// $$[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].$$
///
/// For example, say we want to subtract 4 scattered elements from a rank-1 tensor
/// with 8 elements. In Python, that subtraction would look like this:
///
/// ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
/// indices = tf.constant([[4], [3], [1], [7]])
/// updates = tf.constant([9, 10, 11, 12])
/// sub = tf.scatter_nd_sub(ref, indices, updates)
/// with tf.Session() as sess:
/// print sess.run(sub)
///
/// The resulting update to ref would look like this:
///
/// [1, -9, 3, -6, -4, 6, 7, -4]
///
/// See <c>tf.scatter_nd</c> for more details about how to make updates to
/// slices.
/// </remarks>
member this.ScatterNdSub(reference : TF, indices : TF, updates : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ScatterNdSub(reference.TFOutput, indices.TFOutput, updates.TFOutput, use_locking, operName))
/// <summary>
/// Applies sparse <c>updates</c> to individual values or slices within a given
/// </summary><param name="reference">
/// A mutable Tensor. Should be from a Variable node.
/// </param><param name="indices">
/// A Tensor. Must be one of the following types: int32, int64.
/// A tensor of indices into ref.
/// </param><param name="updates">
/// A Tensor. Must have the same type as ref. A tensor of updated
/// values to add to ref.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ScatterNdUpdate'.
/// </param><param name="use_locking">
/// Optional argument
/// An optional bool. Defaults to True. If True, the assignment will
/// be protected by a lock; otherwise the behavior is undefined,
/// but may exhibit less contention.
/// </param><returns>
/// Same as ref. Returned as a convenience for operations that want to
/// use the updated values after the update is done.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// variable according to <c>indices</c>.
///
/// <c>ref</c> is a <c>Tensor</c> with rank <c>P</c> and <c>indices</c> is a <c>Tensor</c> of rank <c>Q</c>.
///
/// <c>indices</c> must be integer tensor, containing indices into <c>ref</c>.
/// It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where <c>0 &amp;lt; K &amp;lt;= P</c>.
///
/// The innermost dimension of <c>indices</c> (with length <c>K</c>) corresponds to
/// indices into elements (if <c>K = P</c>) or slices (if <c>K &amp;lt; P</c>) along the <c>K</c>th
/// dimension of <c>ref</c>.
///
/// <c>updates</c> is <c>Tensor</c> of rank <c>Q-1+P-K</c> with shape:
///
/// $$[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].$$
///
/// For example, say we want to update 4 scattered elements to a rank-1 tensor to
/// 8 elements. In Python, that update would look like this:
///
/// <code>
/// ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
/// indices = tf.constant([[4], [3], [1] ,[7]])
/// updates = tf.constant([9, 10, 11, 12])
/// update = tf.scatter_nd_update(ref, indices, updates)
/// with tf.Session() as sess:
/// print sess.run(update)
/// </code>
///
/// The resulting update to ref would look like this:
///
/// [1, 11, 3, 10, 9, 6, 7, 12]
///
/// See <c>tf.scatter_nd</c> for more details about how to make updates to
/// slices.
///
/// See also <c>tf.scatter_update</c> and <c>tf.batch_scatter_update</c>.
/// </remarks>
member this.ScatterNdUpdate(reference : TF, indices : TF, updates : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ScatterNdUpdate(reference.TFOutput, indices.TFOutput, updates.TFOutput, use_locking, operName))
/// <summary>
/// Subtracts sparse updates to a variable reference.
/// </summary><param name="reference">
/// Should be from a <c>Variable</c> node.
/// </param><param name="indices">
/// A tensor of indices into the first dimension of <c>ref</c>.
/// </param><param name="updates">
/// A tensor of updated values to subtract from <c>ref</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ScatterSub'.
/// </param><param name="use_locking">
/// Optional argument
/// If True, the subtraction will be protected by a lock;
/// otherwise the behavior is undefined, but may exhibit less contention.
/// </param><returns>
/// = Same as <c>ref</c>. Returned as a convenience for operations that want
/// to use the updated values after the update is done.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks><code>
/// # Scalar indices
/// ref[indices, ...] -= updates[...]
///
/// # Vector indices (for each i)
/// ref[indices[i], ...] -= updates[i, ...]
///
/// # High rank indices (for each i, ..., j)
/// ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]
/// </code>
///
/// This operation outputs <c>ref</c> after the update is done.
/// This makes it easier to chain operations that need to use the reset value.
///
/// Duplicate entries are handled correctly: if multiple <c>indices</c> reference
/// the same location, their (negated) contributions add.
///
/// Requires <c>updates.shape = indices.shape + ref.shape[1:]</c> or <c>updates.shape = []</c>.
///
/// &amp;lt;div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"&amp;gt;
/// &amp;lt;img style="width:100%" src="https://www.tensorflow.org/images/ScatterSub.png" alt&amp;gt;
/// &amp;lt;/div&amp;gt;
/// </remarks>
member this.ScatterSub(reference : TF, indices : TF, updates : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ScatterSub(reference.TFOutput, indices.TFOutput, updates.TFOutput, use_locking, operName))
/// <summary>
/// Applies sparse updates to a variable reference.
/// </summary><param name="reference">
/// Should be from a <c>Variable</c> node.
/// </param><param name="indices">
/// A tensor of indices into the first dimension of <c>ref</c>.
/// </param><param name="updates">
/// A tensor of updated values to store in <c>ref</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ScatterUpdate'.
/// </param><param name="use_locking">
/// Optional argument
/// If True, the assignment will be protected by a lock;
/// otherwise the behavior is undefined, but may exhibit less contention.
/// </param><returns>
/// = Same as <c>ref</c>. Returned as a convenience for operations that want
/// to use the updated values after the update is done.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This operation computes
///
/// <code>
/// # Scalar indices
/// ref[indices, ...] = updates[...]
///
/// # Vector indices (for each i)
/// ref[indices[i], ...] = updates[i, ...]
///
/// # High rank indices (for each i, ..., j)
/// ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]
/// </code>
///
/// This operation outputs <c>ref</c> after the update is done.
/// This makes it easier to chain operations that need to use the reset value.
///
/// If values in <c>ref</c> is to be updated more than once, because there are
/// duplicate entries in <c>indices</c>, the order at which the updates happen
/// for each value is undefined.
///
/// Requires <c>updates.shape = indices.shape + ref.shape[1:]</c> or <c>updates.shape = []</c>.
///
/// &amp;lt;div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"&amp;gt;
/// &amp;lt;img style="width:100%" src="https://www.tensorflow.org/images/ScatterUpdate.png" alt&amp;gt;
/// &amp;lt;/div&amp;gt;
///
/// See also <c>tf.batch_scatter_update</c> and <c>tf.scatter_nd_update</c>.
/// </remarks>
member this.ScatterUpdate(reference : TF, indices : TF, updates : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ScatterUpdate(reference.TFOutput, indices.TFOutput, updates.TFOutput, use_locking, operName))
/// <summary>
/// Computes fingerprints of the input strings.
/// </summary><param name="input">
/// vector of strings to compute fingerprints on.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SdcaFprint'.
/// </param><returns>
/// a (N,2) shaped matrix where N is the number of elements in the input
/// vector. Each row contains the low and high parts of the fingerprint.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.SdcaFprint(input : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SdcaFprint(input.TFOutput, operName))
/// <summary>
/// Distributed version of Stochastic Dual Coordinate Ascent (SDCA) optimizer for
/// </summary><param name="sparse_example_indices">
/// a list of vectors which contain example indices.
/// </param><param name="sparse_feature_indices">
/// a list of vectors which contain feature indices.
/// </param><param name="sparse_feature_values">
/// a list of vectors which contains feature value
/// associated with each feature group.
/// </param><param name="dense_features">
/// a list of matrices which contains the dense feature values.
/// </param><param name="example_weights">
/// a vector which contains the weight associated with each
/// example.
/// </param><param name="example_labels">
/// a vector which contains the label/target associated with each
/// example.
/// </param><param name="sparse_indices">
/// a list of vectors where each value is the indices which has
/// corresponding weights in sparse_weights. This field maybe omitted for the
/// dense approach.
/// </param><param name="sparse_weights">
/// a list of vectors where each value is the weight associated with
/// a sparse feature group.
/// </param><param name="dense_weights">
/// a list of vectors where the values are the weights associated
/// with a dense feature group.
/// </param><param name="example_state_data">
/// a list of vectors containing the example state data.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SdcaOptimizer'.
/// </param><param name="adaptative">
/// Optional argument
/// Whether to use Adaptive SDCA for the inner loop.
/// </param><param name="loss_type">
/// Type of the primal loss. Currently SdcaSolver supports logistic,
/// squared and hinge losses.
/// </param><param name="l1">
/// Symmetric l1 regularization strength.
/// </param><param name="l2">
/// Symmetric l2 regularization strength.
/// </param><param name="num_loss_partitions">
/// Number of partitions of the global loss function.
/// </param><param name="num_inner_iterations">
/// Number of iterations per mini-batch.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// out_example_state_data: a list of vectors containing the updated example state
/// data.
/// out_delta_sparse_weights: a list of vectors where each value is the delta
/// weights associated with a sparse feature group.
/// out_delta_dense_weights: a list of vectors where the values are the delta
/// weights associated with a dense feature group.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// linear models with L1 + L2 regularization. As global optimization objective is
/// strongly-convex, the optimizer optimizes the dual objective at each step. The
/// optimizer applies each update one example at a time. Examples are sampled
/// uniformly, and the optimizer is learning rate free and enjoys linear convergence
/// rate.
///
/// [Proximal Stochastic Dual Coordinate Ascent](http://arxiv.org/pdf/1211.2717v1.pdf).&amp;lt;br&amp;gt;
/// Shai Shalev-Shwartz, Tong Zhang. 2012
///
/// $$Loss Objective = \sum f_{i} (wx_{i}) + (l2 / 2) * |w|^2 + l1 * |w|$$
///
/// [Adding vs. Averaging in Distributed Primal-Dual Optimization](http://arxiv.org/abs/1502.03508).&amp;lt;br&amp;gt;
/// Chenxin Ma, Virginia Smith, Martin Jaggi, Michael I. Jordan,
/// Peter Richtarik, Martin Takac. 2015
///
/// [Stochastic Dual Coordinate Ascent with Adaptive Probabilities](https://arxiv.org/abs/1502.08053).&amp;lt;br&amp;gt;
/// Dominik Csiba, Zheng Qu, Peter Richtarik. 2015
/// </remarks>
member this.SdcaOptimizer(sparse_example_indices : TF[], sparse_feature_indices : TF[], sparse_feature_values : TF[], dense_features : TF[], example_weights : TF, example_labels : TF, sparse_indices : TF[], sparse_weights : TF[], dense_weights : TF[], example_state_data : TF, loss_type : String, l1 : Single, l2 : Single, num_loss_partitions : Int64, num_inner_iterations : Int64, ?adaptative : Boolean, ?operName : String) =
let adaptative = defaultArg (adaptative |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.SdcaOptimizer(sparse_example_indices |> Array.map (fun x -> x.TFOutput), sparse_feature_indices |> Array.map (fun x -> x.TFOutput), sparse_feature_values |> Array.map (fun x -> x.TFOutput), dense_features |> Array.map (fun x -> x.TFOutput), example_weights.TFOutput, example_labels.TFOutput, sparse_indices |> Array.map (fun x -> x.TFOutput), sparse_weights |> Array.map (fun x -> x.TFOutput), dense_weights |> Array.map (fun x -> x.TFOutput), example_state_data.TFOutput, loss_type, l1, l2, num_loss_partitions, num_inner_iterations, adaptative, operName)
/// <summary>
/// Applies L1 regularization shrink step on the parameters.
/// </summary><param name="weights">
/// a list of vectors where each value is the weight associated with a
/// feature group.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SdcaShrinkL1'.
/// </param><param name="l1">
/// Symmetric l1 regularization strength.
/// </param><param name="l2">
/// Symmetric l2 regularization strength. Should be a positive float.
/// </param><returns>
/// Returns the description of the operation
/// </returns>
member this.SdcaShrinkL1(weights : TF[], l1 : Single, l2 : Single, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.SdcaShrinkL1(weights |> Array.map (fun x -> x.TFOutput), l1, l2, operName)
/// <summary>
/// Computes the maximum along segments of a tensor.
/// </summary><param name="data"></param><param name="segment_ids">
/// A 1-D tensor whose size is equal to the size of <c>data</c>'s
/// first dimension. Values should be sorted and can be repeated.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SegmentMax'.
/// </param><returns>
/// Has same shape as data, except for dimension 0 which
/// has size <c>k</c>, the number of segments.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Read
/// [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation)
/// for an explanation of segments.
///
/// Computes a tensor such that
/// \\(output_i = \max_j(data_j)\\) where <c>max</c> is over <c>j</c> such
/// that <c>segment_ids[j] == i</c>.
///
/// If the max is empty for a given segment ID <c>i</c>, <c>output[i] = 0</c>.
///
/// &amp;lt;div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"&amp;gt;
/// &amp;lt;img style="width:100%" src="https://www.tensorflow.org/images/SegmentMax.png" alt&amp;gt;
/// &amp;lt;/div&amp;gt;
/// </remarks>
member this.SegmentMax(data : TF, segment_ids : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SegmentMax(data.TFOutput, segment_ids.TFOutput, operName))
/// <summary>
/// Computes the mean along segments of a tensor.
/// </summary><param name="data"></param><param name="segment_ids">
/// A 1-D tensor whose size is equal to the size of <c>data</c>'s
/// first dimension. Values should be sorted and can be repeated.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SegmentMean'.
/// </param><returns>
/// Has same shape as data, except for dimension 0 which
/// has size <c>k</c>, the number of segments.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Read
/// [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation)
/// for an explanation of segments.
///
/// Computes a tensor such that
/// \\(output_i = \frac{\sum_j data_j}{N}\\) where <c>mean</c> is
/// over <c>j</c> such that <c>segment_ids[j] == i</c> and <c>N</c> is the total number of
/// values summed.
///
/// If the mean is empty for a given segment ID <c>i</c>, <c>output[i] = 0</c>.
///
/// &amp;lt;div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"&amp;gt;
/// &amp;lt;img style="width:100%" src="https://www.tensorflow.org/images/SegmentMean.png" alt&amp;gt;
/// &amp;lt;/div&amp;gt;
/// </remarks>
member this.SegmentMean(data : TF, segment_ids : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SegmentMean(data.TFOutput, segment_ids.TFOutput, operName))
/// <summary>
/// Computes the minimum along segments of a tensor.
/// </summary><param name="data"></param><param name="segment_ids">
/// A 1-D tensor whose size is equal to the size of <c>data</c>'s
/// first dimension. Values should be sorted and can be repeated.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SegmentMin'.
/// </param><returns>
/// Has same shape as data, except for dimension 0 which
/// has size <c>k</c>, the number of segments.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Read
/// [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation)
/// for an explanation of segments.
///
/// Computes a tensor such that
/// \\(output_i = \min_j(data_j)\\) where <c>min</c> is over <c>j</c> such
/// that <c>segment_ids[j] == i</c>.
///
/// If the min is empty for a given segment ID <c>i</c>, <c>output[i] = 0</c>.
///
/// &amp;lt;div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"&amp;gt;
/// &amp;lt;img style="width:100%" src="https://www.tensorflow.org/images/SegmentMin.png" alt&amp;gt;
/// &amp;lt;/div&amp;gt;
/// </remarks>
member this.SegmentMin(data : TF, segment_ids : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SegmentMin(data.TFOutput, segment_ids.TFOutput, operName))
/// <summary>
/// Computes the product along segments of a tensor.
/// </summary><param name="data"></param><param name="segment_ids">
/// A 1-D tensor whose size is equal to the size of <c>data</c>'s
/// first dimension. Values should be sorted and can be repeated.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SegmentProd'.
/// </param><returns>
/// Has same shape as data, except for dimension 0 which
/// has size <c>k</c>, the number of segments.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Read
/// [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation)
/// for an explanation of segments.
///
/// Computes a tensor such that
/// \\(output_i = \prod_j data_j\\) where the product is over <c>j</c> such
/// that <c>segment_ids[j] == i</c>.
///
/// If the product is empty for a given segment ID <c>i</c>, <c>output[i] = 1</c>.
///
/// &amp;lt;div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"&amp;gt;
/// &amp;lt;img style="width:100%" src="https://www.tensorflow.org/images/SegmentProd.png" alt&amp;gt;
/// &amp;lt;/div&amp;gt;
/// </remarks>
member this.SegmentProd(data : TF, segment_ids : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SegmentProd(data.TFOutput, segment_ids.TFOutput, operName))
/// <summary>
/// Computes the sum along segments of a tensor.
/// </summary><param name="data"></param><param name="segment_ids">
/// A 1-D tensor whose size is equal to the size of <c>data</c>'s
/// first dimension. Values should be sorted and can be repeated.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SegmentSum'.
/// </param><returns>
/// Has same shape as data, except for dimension 0 which
/// has size <c>k</c>, the number of segments.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Read
/// [the section on segmentation](https://tensorflow.org/api_guides/python/math_ops#Segmentation)
/// for an explanation of segments.
///
/// Computes a tensor such that
/// \\(output_i = \sum_j data_j\\) where sum is over <c>j</c> such
/// that <c>segment_ids[j] == i</c>.
///
/// If the sum is empty for a given segment ID <c>i</c>, <c>output[i] = 0</c>.
///
/// &amp;lt;div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"&amp;gt;
/// &amp;lt;img style="width:100%" src="https://www.tensorflow.org/images/SegmentSum.png" alt&amp;gt;
/// &amp;lt;/div&amp;gt;
/// </remarks>
member this.SegmentSum(data : TF, segment_ids : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SegmentSum(data.TFOutput, segment_ids.TFOutput, operName))
/// <summary>
/// Selects elements from <c>x</c> or <c>y</c>, depending on <c>condition</c>.
/// </summary><param name="condition"></param><param name="t">
/// = A <c>Tensor</c> which may have the same shape as <c>condition</c>.
/// If <c>condition</c> is rank 1, <c>x</c> may have higher rank,
/// but its first dimension must match the size of <c>condition</c>.
/// </param><param name="e">
/// = A <c>Tensor</c> with the same type and shape as <c>x</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Select'.
/// </param><returns>
/// = A <c>Tensor</c> with the same type and shape as <c>x</c> and <c>y</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// The <c>x</c>, and <c>y</c> tensors must all have the same shape, and the
/// output will also have that shape.
///
/// The <c>condition</c> tensor must be a scalar if <c>x</c> and <c>y</c> are scalars.
/// If <c>x</c> and <c>y</c> are vectors or higher rank, then <c>condition</c> must be either a
/// scalar, a vector with size matching the first dimension of <c>x</c>, or must have
/// the same shape as <c>x</c>.
///
/// The <c>condition</c> tensor acts as a mask that chooses, based on the value at each
/// element, whether the corresponding element / row in the output should be
/// taken from <c>x</c> (if true) or <c>y</c> (if false).
///
/// If <c>condition</c> is a vector and <c>x</c> and <c>y</c> are higher rank matrices, then
/// it chooses which row (outer dimension) to copy from <c>x</c> and <c>y</c>.
/// If <c>condition</c> has the same shape as <c>x</c> and <c>y</c>, then it chooses which
/// element to copy from <c>x</c> and <c>y</c>.
///
/// For example:
///
/// <code>
/// # 'condition' tensor is [[True, False]
/// # [False, True]]
/// # 't' is [[1, 2],
/// # [3, 4]]
/// # 'e' is [[5, 6],
/// # [7, 8]]
/// select(condition, t, e) # =&amp;gt; [[1, 6], [7, 4]]
///
///
/// # 'condition' tensor is [True, False]
/// # 't' is [[1, 2],
/// # [3, 4]]
/// # 'e' is [[5, 6],
/// # [7, 8]]
/// select(condition, t, e) ==&amp;gt; [[1, 2],
/// [7, 8]]
///
/// </code></remarks>
member this.Select(condition : TF, t : TF, e : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Select(condition.TFOutput, t.TFOutput, e.TFOutput, operName))
/// <summary>
/// Computes the Eigen Decomposition of a batch of square self-adjoint matrices.
/// </summary><param name="input">
/// Shape is <c>[..., M, M]</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SelfAdjointEig'.
/// </param><returns>
/// Shape is <c>[..., M+1, M]</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// The input is a tensor of shape <c>[..., M, M]</c> whose inner-most 2 dimensions
/// form square matrices, with the same constraints as the single matrix
/// SelfAdjointEig.
///
/// The result is a [..., M+1, M] matrix with [..., 0,:] containing the
/// eigenvalues, and subsequent [...,1:, :] containing the eigenvectors. The eigenvalues
/// are sorted in non-decreasing order.
/// </remarks>
member this.SelfAdjointEig(input : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SelfAdjointEig(input.TFOutput, operName))
/// <summary>
/// Computes the eigen decomposition of one or more square self-adjoint matrices.
/// </summary><param name="input"><c>Tensor</c> input of shape <c>[N, N]</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SelfAdjointEigV2'.
/// </param><param name="compute_v">
/// Optional argument
/// If <c>True</c> then eigenvectors will be computed and returned in <c>v</c>.
/// Otherwise, only the eigenvalues will be computed.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// e: Eigenvalues. Shape is <c>[N]</c>.
/// v: Eigenvectors. Shape is <c>[N, N]</c>.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in
/// <c>input</c> such that <c>input[..., :, :] = v[..., :, :] * diag(e[..., :])</c>. The eigenvalues
/// are sorted in non-decreasing order.
///
/// <code>
/// # a is a tensor.
/// # e is a tensor of eigenvalues.
/// # v is a tensor of eigenvectors.
/// e, v = self_adjoint_eig(a)
/// e = self_adjoint_eig(a, compute_v=False)
/// </code></remarks>
member this.SelfAdjointEigV2(input : TF, ?compute_v : Boolean, ?operName : String) =
let compute_v = defaultArg (compute_v |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.SelfAdjointEigV2(input.TFOutput, compute_v, operName)
/// <summary>
/// Computes scaled exponential linear: <c>scale * alpha * (exp(features) - 1)</c></summary><param name="features"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Selu'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// if &amp;lt; 0, <c>scale * features</c> otherwise.
///
/// To be used together with
/// <c>initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')</c>.
/// For correct dropout, use <c>tf.contrib.nn.alpha_dropout</c>.
///
/// See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515)
/// </remarks>
member this.Selu(features : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Selu(features.TFOutput, operName))
/// <summary>
/// Computes gradients for the scaled exponential linear (Selu) operation.
/// </summary><param name="gradients">
/// The backpropagated gradients to the corresponding Selu operation.
/// </param><param name="outputs">
/// The outputs of the corresponding Selu operation.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SeluGrad'.
/// </param><returns>
/// The gradients: <c>gradients * (outputs + scale * alpha)</c>
/// if outputs &amp;lt; 0, <c>scale * gradients</c> otherwise.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.SeluGrad(gradients : TF, outputs : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SeluGrad(gradients.TFOutput, outputs.TFOutput, operName))
/// <summary>
/// An op that performs gradient updates of embedding tables.
/// </summary><param name="inputs">
/// A TensorList of gradients with which to update embedding tables.
/// It contains one tensor per embedding table in the model.
/// </param><param name="learning_rates">
/// A list of float32 scalars, one for each embedding table,
/// containing the learning rates for each table when dynamic learning rate is
/// enabled through the OptimizationParameters in TPUEmbeddingConfiguration.
/// When the learning rate is constant, the list should be empty.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SendTPUEmbeddingGradients'.
/// </param><param name="config">
/// Serialized TPUEmbeddingConfiguration proto.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// The TensorList argument has the same length and shapes as the return value of
/// TPUEmbeddingReceiveActivations, but contains gradients of the model's loss
/// with respect to the embedding activations. The embedding tables are updated
/// from these gradients via the optimizer specified in the configuration given
/// to tpu.initialize_system.
/// </remarks>
member this.SendTPUEmbeddingGradients(inputs : TF[], learning_rates : TF[], config : String, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.SendTPUEmbeddingGradients(inputs |> Array.map (fun x -> x.TFOutput), learning_rates |> Array.map (fun x -> x.TFOutput), config, operName)
/// <summary>
/// Converts the given <c>resource_handle</c> representing an iterator to a variant tensor.
/// </summary><param name="resource_handle">
/// A handle to an iterator resource.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SerializeIterator'.
/// </param><returns>
/// A variant tensor storing the state of the iterator contained in the
/// resource.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.SerializeIterator(resource_handle : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SerializeIterator(resource_handle.TFOutput, operName))
/// <summary>
/// Serialize an <c>N</c>-minibatch <c>SparseTensor</c> into an <c>[N, 3]</c><c>Tensor</c> object.
/// </summary><param name="sparse_indices">
/// 2-D. The <c>indices</c> of the minibatch <c>SparseTensor</c>.
/// </param><param name="sparse_values">
/// 1-D. The <c>values</c> of the minibatch <c>SparseTensor</c>.
/// </param><param name="sparse_shape">
/// 1-D. The <c>shape</c> of the minibatch <c>SparseTensor</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SerializeManySparse'.
/// </param><param name="out_type">
/// Optional argument
/// The <c>dtype</c> to use for serialization; the supported types are <c>string</c>
/// (default) and <c>variant</c>.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// The <c>SparseTensor</c> must have rank <c>R</c> greater than 1, and the first dimension
/// is treated as the minibatch dimension. Elements of the <c>SparseTensor</c>
/// must be sorted in increasing order of this first dimension. The serialized
/// <c>SparseTensor</c> objects going into each row of <c>serialized_sparse</c> will have
/// rank <c>R-1</c>.
///
/// The minibatch size <c>N</c> is extracted from <c>sparse_shape[0]</c>.
/// </remarks>
member this.SerializeManySparse(sparse_indices : TF, sparse_values : TF, sparse_shape : TF, ?out_type : TFDataType, ?operName : String) =
let out_type = defaultArg (out_type |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SerializeManySparse(sparse_indices.TFOutput, sparse_values.TFOutput, sparse_shape.TFOutput, out_type, operName))
/// <summary>
/// Serialize a <c>SparseTensor</c> into a <c>[3]</c><c>Tensor</c> object.
/// </summary><param name="sparse_indices">
/// 2-D. The <c>indices</c> of the <c>SparseTensor</c>.
/// </param><param name="sparse_values">
/// 1-D. The <c>values</c> of the <c>SparseTensor</c>.
/// </param><param name="sparse_shape">
/// 1-D. The <c>shape</c> of the <c>SparseTensor</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SerializeSparse'.
/// </param><param name="out_type">
/// Optional argument
/// The <c>dtype</c> to use for serialization; the supported types are <c>string</c>
/// (default) and <c>variant</c>.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.SerializeSparse(sparse_indices : TF, sparse_values : TF, sparse_shape : TF, ?out_type : TFDataType, ?operName : String) =
let out_type = defaultArg (out_type |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SerializeSparse(sparse_indices.TFOutput, sparse_values.TFOutput, sparse_shape.TFOutput, out_type, operName))
/// <summary>
/// Transforms a Tensor into a serialized TensorProto proto.
/// </summary><param name="tensor">
/// A Tensor of type <c>T</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SerializeTensor'.
/// </param><returns>
/// A serialized TensorProto proto of the input tensor.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.SerializeTensor(tensor : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SerializeTensor(tensor.TFOutput, operName))
/// <summary>
/// Number of unique elements along last dimension of input <c>set</c>.
/// </summary><param name="set_indices">
/// 2D <c>Tensor</c>, indices of a <c>SparseTensor</c>.
/// </param><param name="set_values">
/// 1D <c>Tensor</c>, values of a <c>SparseTensor</c>.
/// </param><param name="set_shape">
/// 1D <c>Tensor</c>, shape of a <c>SparseTensor</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SetSize'.
/// </param><param name="validate_indices">
/// Optional argument
/// </param><returns>
/// For <c>set</c> ranked <c>n</c>, this is a <c>Tensor</c> with rank <c>n-1</c>, and the same 1st
/// <c>n-1</c> dimensions as <c>set</c>. Each value is the number of unique elements in
/// the corresponding <c>[0...n-1]</c> dimension of <c>set</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Input <c>set</c> is a <c>SparseTensor</c> represented by <c>set_indices</c>, <c>set_values</c>,
/// and <c>set_shape</c>. The last dimension contains values in a set, duplicates are
/// allowed but ignored.
///
/// If <c>validate_indices</c> is <c>True</c>, this op validates the order and range of <c>set</c>
/// indices.
/// </remarks>
member this.SetSize(set_indices : TF, set_values : TF, set_shape : TF, ?validate_indices : Boolean, ?operName : String) =
let validate_indices = defaultArg (validate_indices |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SetSize(set_indices.TFOutput, set_values.TFOutput, set_shape.TFOutput, validate_indices, operName))
/// <summary>
/// Returns the shape of a tensor.
/// </summary><param name="input"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Shape'.
/// </param><param name="out_type">
/// Optional argument
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This operation returns a 1-D integer tensor representing the shape of <c>input</c>.
///
/// For example:
///
/// <code>
/// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
/// shape(t) ==&amp;gt; [2, 2, 3]
/// </code></remarks>
member this.Shape(input : TF, ?out_type : TFDataType, ?operName : String) =
let out_type = defaultArg (out_type |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Shape(input.TFOutput, out_type, operName))
/// <summary>
/// Returns shape of tensors.
/// </summary><param name="input"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ShapeN'.
/// </param><param name="out_type">
/// Optional argument
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This operation returns N 1-D integer tensors representing shape of <c>input[i]s</c>.
/// </remarks>
member this.ShapeN(input : TF[], ?out_type : TFDataType, ?operName : String) =
let out_type = defaultArg (out_type |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.ShapeN(input |> Array.map (fun x -> x.TFOutput), out_type, operName) |> Array.map (fun i -> TF(this.TFGraph, i))
/// <summary>
/// Generate a sharded filename. The filename is printf formatted as
/// </summary><param name="basename"></param><param name="shard"></param><param name="num_shards"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ShardedFilename'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// %s-%05d-of-%05d, basename, shard, num_shards.
/// </remarks>
member this.ShardedFilename(basename : TF, shard : TF, num_shards : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ShardedFilename(basename.TFOutput, shard.TFOutput, num_shards.TFOutput, operName))
/// <summary>
/// Generate a glob pattern matching all sharded file names.
/// </summary><param name="basename"></param><param name="num_shards"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ShardedFilespec'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.ShardedFilespec(basename : TF, num_shards : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ShardedFilespec(basename.TFOutput, num_shards.TFOutput, operName))
/// <summary>
/// Creates a dataset that shuffles and repeats elements from <c>input_dataset</c></summary><param name="input_dataset"></param><param name="buffer_size">
/// The number of output elements to buffer in an iterator over
/// this dataset. Compare with the <c>min_after_dequeue</c> attr when creating a
/// <c>RandomShuffleQueue</c>.
/// </param><param name="seed">
/// A scalar seed for the random number generator. If either <c>seed</c> or
/// <c>seed2</c> is set to be non-zero, the random number generator is seeded
/// by the given seed. Otherwise, a random seed is used.
/// </param><param name="seed2">
/// A second scalar seed to avoid seed collision.
/// </param><param name="count">
/// A scalar representing the number of times the underlying dataset
/// should be repeated. The default is <c>-1</c>, which results in infinite repetition.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ShuffleAndRepeatDataset'.
/// </param><param name="output_types"></param><param name="output_shapes"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// pseudorandomly.
/// </remarks>
member this.ShuffleAndRepeatDataset(input_dataset : TF, buffer_size : TF, seed : TF, seed2 : TF, count : TF, output_types : TFDataType[], output_shapes : TFShape[], ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ShuffleAndRepeatDataset(input_dataset.TFOutput, buffer_size.TFOutput, seed.TFOutput, seed2.TFOutput, count.TFOutput, output_types, output_shapes, operName))
/// <summary>
/// Creates a dataset that shuffles elements from <c>input_dataset</c> pseudorandomly.
/// </summary><param name="input_dataset"></param><param name="buffer_size">
/// The number of output elements to buffer in an iterator over
/// this dataset. Compare with the <c>min_after_dequeue</c> attr when creating a
/// <c>RandomShuffleQueue</c>.
/// </param><param name="seed">
/// A scalar seed for the random number generator. If either <c>seed</c> or
/// <c>seed2</c> is set to be non-zero, the random number generator is seeded
/// by the given seed. Otherwise, a random seed is used.
/// </param><param name="seed2">
/// A second scalar seed to avoid seed collision.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ShuffleDataset'.
/// </param><param name="reshuffle_each_iteration">
/// Optional argument
/// If true, each iterator over this dataset will be given
/// a different pseudorandomly generated seed, based on a sequence seeded by the
/// <c>seed</c> and <c>seed2</c> inputs. If false, each iterator will be given the same
/// seed, and repeated iteration over this dataset will yield the exact same
/// sequence of results.
/// </param><param name="output_types"></param><param name="output_shapes"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.ShuffleDataset(input_dataset : TF, buffer_size : TF, seed : TF, seed2 : TF, output_types : TFDataType[], output_shapes : TFShape[], ?reshuffle_each_iteration : Boolean, ?operName : String) =
let reshuffle_each_iteration = defaultArg (reshuffle_each_iteration |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ShuffleDataset(input_dataset.TFOutput, buffer_size.TFOutput, seed.TFOutput, seed2.TFOutput, output_types, output_shapes, reshuffle_each_iteration, operName))
/// <summary>
/// An op that shuts down a running distributed TPU system. The Op returns
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ShutdownDistributedTPU'.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// an error if no system is running.
/// </remarks>
member this.ShutdownDistributedTPU(?operName : String) =
let operName = defaultArg operName null
this.TFGraph.ShutdownDistributedTPU(operName)
/// <summary>
/// Computes sigmoid of <c>x</c> element-wise.
/// </summary><param name="x"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Sigmoid'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Specifically, <c>y = 1 / (1 + exp(-x))</c>.
/// </remarks>
member this.Sigmoid(x : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Sigmoid(x.TFOutput, operName))
/// <summary>
/// Computes the gradient of the sigmoid of <c>x</c> wrt its input.
/// </summary><param name="y"></param><param name="dy"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SigmoidGrad'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Specifically, <c>grad = dy * y * (1 - y)</c>, where <c>y = sigmoid(x)</c>, and
/// <c>dy</c> is the corresponding input gradient.
/// </remarks>
member this.SigmoidGrad(y : TF, dy : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SigmoidGrad(y.TFOutput, dy.TFOutput, operName))
/// <summary>
/// Returns an element-wise indication of the sign of a number.
/// </summary><param name="x"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Sign'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks><c>y = sign(x) = -1</c> if <c>x &amp;lt; 0</c>; 0 if <c>x == 0</c>; 1 if <c>x &amp;gt; 0</c>.
///
/// For complex numbers, <c>y = sign(x) = x / |x|</c> if <c>x != 0</c>, otherwise <c>y = 0</c>.
/// </remarks>
member this.Sign(x : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Sign(x.TFOutput, operName))
/// <summary>
/// Computes sin of x element-wise.
/// </summary><param name="x"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Sin'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.Sin(x : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Sin(x.TFOutput, operName))
/// <summary>
/// Computes hyperbolic sine of x element-wise.
/// </summary><param name="x"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Sinh'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.Sinh(x : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Sinh(x.TFOutput, operName))
/// <summary>
/// Returns the size of a tensor.
/// </summary><param name="input"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Size'.
/// </param><param name="out_type">
/// Optional argument
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This operation returns an integer representing the number of elements in
/// <c>input</c>.
///
/// For example:
///
/// <code>
/// # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]]
/// size(t) ==&amp;gt; 12
/// </code></remarks>
member this.Size(input : TF, ?out_type : TFDataType, ?operName : String) =
let out_type = defaultArg (out_type |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Size(input.TFOutput, out_type, operName))
/// <summary>
/// Creates a dataset that skips <c>count</c> elements from the <c>input_dataset</c>.
/// </summary><param name="input_dataset"></param><param name="count">
/// A scalar representing the number of elements from the <c>input_dataset</c>
/// that should be skipped. If count is -1, skips everything.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SkipDataset'.
/// </param><param name="output_types"></param><param name="output_shapes"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.SkipDataset(input_dataset : TF, count : TF, output_types : TFDataType[], output_shapes : TFShape[], ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SkipDataset(input_dataset.TFOutput, count.TFOutput, output_types, output_shapes, operName))
/// <summary>
/// Parses a text file and creates a batch of examples.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Skipgram'.
/// </param><param name="window_size">
/// Optional argument
/// The number of words to predict to the left and right of the target.
/// </param><param name="min_count">
/// Optional argument
/// The minimum number of word occurrences for it to be included in the
/// vocabulary.
/// </param><param name="subsample">
/// Optional argument
/// Threshold for word occurrence. Words that appear with higher
/// frequency will be randomly down-sampled. Set to 0 to disable.
/// </param><param name="filename">
/// The corpus's text file name.
/// </param><param name="batch_size">
/// The size of produced batch.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// vocab_word: A vector of words in the corpus.
/// vocab_freq: Frequencies of words. Sorted in the non-ascending order.
/// words_per_epoch: Number of words per epoch in the data file.
/// current_epoch: The current epoch number.
/// total_words_processed: The total number of words processed so far.
/// examples: A vector of word ids.
/// labels: A vector of word ids.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns>
member this.Skipgram(filename : String, batch_size : Int64, ?window_size : Int64, ?min_count : Int64, ?subsample : Single, ?operName : String) =
let window_size = defaultArg (window_size |> Option.map Nullable) (Nullable())
let min_count = defaultArg (min_count |> Option.map Nullable) (Nullable())
let subsample = defaultArg (subsample |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.Skipgram(filename, batch_size, window_size, min_count, subsample, operName)
/// <summary>
/// Return a slice from 'input'.
/// </summary><param name="input"></param><param name="begin">
/// begin[i] specifies the offset into the 'i'th dimension of
/// 'input' to slice from.
/// </param><param name="size">
/// size[i] specifies the number of elements of the 'i'th dimension
/// of 'input' to slice. If size[i] is -1, all remaining elements in dimension
/// i are included in the slice (i.e. this is equivalent to setting
/// size[i] = input.dim_size(i) - begin[i]).
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Slice'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// The output tensor is a tensor with dimensions described by 'size'
/// whose values are extracted from 'input' starting at the offsets in
/// 'begin'.
///
/// *Requirements*:
/// 0 &amp;lt;= begin[i] &amp;lt;= begin[i] + size[i] &amp;lt;= Di for i in [0, n)
/// </remarks>
member this.Slice(input : TF, ``begin`` : TF, size : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Slice(input.TFOutput, ``begin``.TFOutput, size.TFOutput, operName))
/// <summary>
/// Returns a copy of the input tensor.
/// </summary><param name="input"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Snapshot'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.Snapshot(input : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Snapshot(input.TFOutput, operName))
/// <summary>
/// Computes softmax activations.
/// </summary><param name="logits">
/// 2-D with shape <c>[batch_size, num_classes]</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Softmax'.
/// </param><returns>
/// Same shape as <c>logits</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// For each batch <c>i</c> and class <c>j</c> we have
///
/// $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$
/// </remarks>
member this.Softmax(logits : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Softmax(logits.TFOutput, operName))
/// <summary>
/// Computes softmax cross entropy cost and gradients to backpropagate.
/// </summary><param name="features">
/// batch_size x num_classes matrix
/// </param><param name="labels">
/// batch_size x num_classes matrix
/// The caller must ensure that each batch of labels represents a valid
/// probability distribution.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SoftmaxCrossEntropyWithLogits'.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// loss: Per example loss (batch_size vector).
/// backprop: backpropagated gradients (batch_size x num_classes matrix).
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// Inputs are the logits, not probabilities.
/// </remarks>
member this.SoftmaxCrossEntropyWithLogits(features : TF, labels : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.SoftmaxCrossEntropyWithLogits(features.TFOutput, labels.TFOutput, operName)
/// <summary>
/// Computes softplus: <c>log(exp(features) + 1)</c>.
/// </summary><param name="features"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Softplus'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.Softplus(features : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Softplus(features.TFOutput, operName))
/// <summary>
/// Computes softplus gradients for a softplus operation.
/// </summary><param name="gradients">
/// The backpropagated gradients to the corresponding softplus operation.
/// </param><param name="features">
/// The features passed as input to the corresponding softplus operation.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SoftplusGrad'.
/// </param><returns>
/// The gradients: <c>gradients / (1 + exp(-features))</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.SoftplusGrad(gradients : TF, features : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SoftplusGrad(gradients.TFOutput, features.TFOutput, operName))
/// <summary>
/// Computes softsign: <c>features / (abs(features) + 1)</c>.
/// </summary><param name="features"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Softsign'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.Softsign(features : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Softsign(features.TFOutput, operName))
/// <summary>
/// Computes softsign gradients for a softsign operation.
/// </summary><param name="gradients">
/// The backpropagated gradients to the corresponding softsign operation.
/// </param><param name="features">
/// The features passed as input to the corresponding softsign operation.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SoftsignGrad'.
/// </param><returns>
/// The gradients: <c>gradients / (1 + abs(features)) ** 2</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.SoftsignGrad(gradients : TF, features : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SoftsignGrad(gradients.TFOutput, features.TFOutput, operName))
/// <summary>
/// SpaceToBatch for 4-D tensors of type T.
/// </summary><param name="input">
/// 4-D with shape <c>[batch, height, width, depth]</c>.
/// </param><param name="paddings">
/// 2-D tensor of non-negative integers with shape <c>[2, 2]</c>. It specifies
/// the padding of the input with zeros across the spatial dimensions as follows:
///
/// paddings = [[pad_top, pad_bottom], [pad_left, pad_right]]
///
/// The effective spatial dimensions of the zero-padded input tensor will be:
///
/// height_pad = pad_top + height + pad_bottom
/// width_pad = pad_left + width + pad_right
///
/// The attr <c>block_size</c> must be greater than one. It indicates the block size.
///
/// * Non-overlapping blocks of size <c>block_size x block size</c> in the height and
/// width dimensions are rearranged into the batch dimension at each location.
/// * The batch of the output tensor is <c>batch * block_size * block_size</c>.
/// * Both height_pad and width_pad must be divisible by block_size.
///
/// The shape of the output will be:
///
/// [batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
/// depth]
///
/// Some examples:
///
/// (1) For the following input of shape <c>[1, 2, 2, 1]</c> and block_size of 2:
///
/// <code>
/// x = [[[[1], [2]], [[3], [4]]]]
/// </code>
///
/// The output tensor has shape <c>[4, 1, 1, 1]</c> and value:
///
/// <code>
/// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
/// </code>
///
/// (2) For the following input of shape <c>[1, 2, 2, 3]</c> and block_size of 2:
///
/// <code>
/// x = [[[[1, 2, 3], [4, 5, 6]],
/// [[7, 8, 9], [10, 11, 12]]]]
/// </code>
///
/// The output tensor has shape <c>[4, 1, 1, 3]</c> and value:
///
/// <code>
/// [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
/// </code>
///
/// (3) For the following input of shape <c>[1, 4, 4, 1]</c> and block_size of 2:
///
/// <code>
/// x = [[[[1], [2], [3], [4]],
/// [[5], [6], [7], [8]],
/// [[9], [10], [11], [12]],
/// [[13], [14], [15], [16]]]]
/// </code>
///
/// The output tensor has shape <c>[4, 2, 2, 1]</c> and value:
///
/// <code>
/// x = [[[[1], [3]], [[9], [11]]],
/// [[[2], [4]], [[10], [12]]],
/// [[[5], [7]], [[13], [15]]],
/// [[[6], [8]], [[14], [16]]]]
/// </code>
///
/// (4) For the following input of shape <c>[2, 2, 4, 1]</c> and block_size of 2:
///
/// <code>
/// x = [[[[1], [2], [3], [4]],
/// [[5], [6], [7], [8]]],
/// [[[9], [10], [11], [12]],
/// [[13], [14], [15], [16]]]]
/// </code>
///
/// The output tensor has shape <c>[8, 1, 2, 1]</c> and value:
///
/// <code>
/// x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]],
/// [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]]
/// </code>
///
/// Among others, this operation is useful for reducing atrous convolution into
/// regular convolution.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SpaceToBatch'.
/// </param><param name="block_size"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This is a legacy version of the more general SpaceToBatchND.
///
/// Zero-pads and then rearranges (permutes) blocks of spatial data into batch.
/// More specifically, this op outputs a copy of the input tensor where values from
/// the <c>height</c> and <c>width</c> dimensions are moved to the <c>batch</c> dimension. After
/// the zero-padding, both <c>height</c> and <c>width</c> of the input must be divisible by the
/// block size.
/// </remarks>
member this.SpaceToBatch(input : TF, paddings : TF, block_size : Int64, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SpaceToBatch(input.TFOutput, paddings.TFOutput, block_size, operName))
/// <summary>
/// SpaceToBatch for N-D tensors of type T.
/// </summary><param name="input">
/// N-D with shape <c>input_shape = [batch] + spatial_shape + remaining_shape</c>,
/// where spatial_shape has <c>M</c> dimensions.
/// </param><param name="block_shape">
/// 1-D with shape <c>[M]</c>, all values must be &amp;gt;= 1.
/// </param><param name="paddings">
/// 2-D with shape <c>[M, 2]</c>, all values must be &amp;gt;= 0.
/// <c>paddings[i] = [pad_start, pad_end]</c> specifies the padding for input dimension
/// <c>i + 1</c>, which corresponds to spatial dimension <c>i</c>. It is required that
/// <c>block_shape[i]</c> divides <c>input_shape[i + 1] + pad_start + pad_end</c>.
///
/// This operation is equivalent to the following steps:
///
/// 1. Zero-pad the start and end of dimensions <c>[1, ..., M]</c> of the
/// input according to <c>paddings</c> to produce <c>padded</c> of shape <c>padded_shape</c>.
///
/// 2. Reshape <c>padded</c> to <c>reshaped_padded</c> of shape:
///
/// [batch] +
/// [padded_shape[1] / block_shape[0],
/// block_shape[0],
/// ...,
/// padded_shape[M] / block_shape[M-1],
/// block_shape[M-1]] +
/// remaining_shape
///
/// 3. Permute dimensions of <c>reshaped_padded</c> to produce
/// <c>permuted_reshaped_padded</c> of shape:
///
/// block_shape +
/// [batch] +
/// [padded_shape[1] / block_shape[0],
/// ...,
/// padded_shape[M] / block_shape[M-1]] +
/// remaining_shape
///
/// 4. Reshape <c>permuted_reshaped_padded</c> to flatten <c>block_shape</c> into the batch
/// dimension, producing an output tensor of shape:
///
/// [batch * prod(block_shape)] +
/// [padded_shape[1] / block_shape[0],
/// ...,
/// padded_shape[M] / block_shape[M-1]] +
/// remaining_shape
///
/// Some examples:
///
/// (1) For the following input of shape <c>[1, 2, 2, 1]</c>, <c>block_shape = [2, 2]</c>, and
/// <c>paddings = [[0, 0], [0, 0]]</c>:
///
/// <code>
/// x = [[[[1], [2]], [[3], [4]]]]
/// </code>
///
/// The output tensor has shape <c>[4, 1, 1, 1]</c> and value:
///
/// <code>
/// [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
/// </code>
///
/// (2) For the following input of shape <c>[1, 2, 2, 3]</c>, <c>block_shape = [2, 2]</c>, and
/// <c>paddings = [[0, 0], [0, 0]]</c>:
///
/// <code>
/// x = [[[[1, 2, 3], [4, 5, 6]],
/// [[7, 8, 9], [10, 11, 12]]]]
/// </code>
///
/// The output tensor has shape <c>[4, 1, 1, 3]</c> and value:
///
/// <code>
/// [[[1, 2, 3]], [[4, 5, 6]], [[7, 8, 9]], [[10, 11, 12]]]
/// </code>
///
/// (3) For the following input of shape <c>[1, 4, 4, 1]</c>, <c>block_shape = [2, 2]</c>, and
/// <c>paddings = [[0, 0], [0, 0]]</c>:
///
/// <code>
/// x = [[[[1], [2], [3], [4]],
/// [[5], [6], [7], [8]],
/// [[9], [10], [11], [12]],
/// [[13], [14], [15], [16]]]]
/// </code>
///
/// The output tensor has shape <c>[4, 2, 2, 1]</c> and value:
///
/// <code>
/// x = [[[[1], [3]], [[9], [11]]],
/// [[[2], [4]], [[10], [12]]],
/// [[[5], [7]], [[13], [15]]],
/// [[[6], [8]], [[14], [16]]]]
/// </code>
///
/// (4) For the following input of shape <c>[2, 2, 4, 1]</c>, block_shape = <c>[2, 2]</c>, and
/// paddings = <c>[[0, 0], [2, 0]]</c>:
///
/// <code>
/// x = [[[[1], [2], [3], [4]],
/// [[5], [6], [7], [8]]],
/// [[[9], [10], [11], [12]],
/// [[13], [14], [15], [16]]]]
/// </code>
///
/// The output tensor has shape <c>[8, 1, 3, 1]</c> and value:
///
/// <code>
/// x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
/// [[[0], [2], [4]]], [[[0], [10], [12]]],
/// [[[0], [5], [7]]], [[[0], [13], [15]]],
/// [[[0], [6], [8]]], [[[0], [14], [16]]]]
/// </code>
///
/// Among others, this operation is useful for reducing atrous convolution into
/// regular convolution.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SpaceToBatchND'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This operation divides "spatial" dimensions <c>[1, ..., M]</c> of the input into a
/// grid of blocks of shape <c>block_shape</c>, and interleaves these blocks with the
/// "batch" dimension (0) such that in the output, the spatial dimensions
/// <c>[1, ..., M]</c> correspond to the position within the grid, and the batch
/// dimension combines both the position within a spatial block and the original
/// batch position. Prior to division into blocks, the spatial dimensions of the
/// input are optionally zero padded according to <c>paddings</c>. See below for a
/// precise description.
/// </remarks>
member this.SpaceToBatchND(input : TF, block_shape : TF, paddings : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SpaceToBatchND(input.TFOutput, block_shape.TFOutput, paddings.TFOutput, operName))
/// <summary>
/// SpaceToDepth for tensors of type T.
/// </summary><param name="input"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SpaceToDepth'.
/// </param><param name="data_format">
/// Optional argument
/// </param><param name="block_size">
/// The size of the spatial block.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Rearranges blocks of spatial data, into depth. More specifically,
/// this op outputs a copy of the input tensor where values from the <c>height</c>
/// and <c>width</c> dimensions are moved to the <c>depth</c> dimension.
/// The attr <c>block_size</c> indicates the input block size.
///
/// * Non-overlapping blocks of size <c>block_size x block size</c> are rearranged
/// into depth at each location.
/// * The depth of the output tensor is <c>block_size * block_size * input_depth</c>.
/// * The Y, X coordinates within each block of the input become the high order
/// component of the output channel index.
/// * The input tensor's height and width must be divisible by block_size.
///
/// The <c>data_format</c> attr specifies the layout of the input and output tensors
/// with the following options:
/// "NHWC": <c>[ batch, height, width, channels ]</c>
/// "NCHW": <c>[ batch, channels, height, width ]</c>
/// "NCHW_VECT_C":
/// <c>qint8 [ batch, channels / 4, height, width, 4 ]</c>
///
/// It is useful to consider the operation as transforming a 6-D Tensor.
/// e.g. for data_format = NHWC,
/// Each element in the input tensor can be specified via 6 coordinates,
/// ordered by decreasing memory layout significance as:
/// n,oY,bY,oX,bX,iC (where n=batch index, oX, oY means X or Y coordinates
/// within the output image, bX, bY means coordinates
/// within the input block, iC means input channels).
/// The output would be a transpose to the following layout:
/// n,oY,oX,bY,bX,iC
///
/// This operation is useful for resizing the activations between convolutions
/// (but keeping all data), e.g. instead of pooling. It is also useful for training
/// purely convolutional models.
///
/// For example, given an input of shape <c>[1, 2, 2, 1]</c>, data_format = "NHWC" and
/// block_size = 2:
///
/// <code>
/// x = [[[[1], [2]],
/// [[3], [4]]]]
/// </code>
///
/// This operation will output a tensor of shape <c>[1, 1, 1, 4]</c>:
///
/// <code>
/// [[[[1, 2, 3, 4]]]]
/// </code>
///
/// Here, the input has a batch of 1 and each batch element has shape <c>[2, 2, 1]</c>,
/// the corresponding output will have a single element (i.e. width and height are
/// both 1) and will have a depth of 4 channels (1 * block_size * block_size).
/// The output element shape is <c>[1, 1, 4]</c>.
///
/// For an input tensor with larger depth, here of shape <c>[1, 2, 2, 3]</c>, e.g.
///
/// <code>
/// x = [[[[1, 2, 3], [4, 5, 6]],
/// [[7, 8, 9], [10, 11, 12]]]]
/// </code>
///
/// This operation, for block_size of 2, will return the following tensor of shape
/// <c>[1, 1, 1, 12]</c><code>
/// [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]]
/// </code>
///
/// Similarly, for the following input of shape <c>[1 4 4 1]</c>, and a block size of 2:
///
/// <code>
/// x = [[[[1], [2], [5], [6]],
/// [[3], [4], [7], [8]],
/// [[9], [10], [13], [14]],
/// [[11], [12], [15], [16]]]]
/// </code>
///
/// the operator will return the following tensor of shape <c>[1 2 2 4]</c>:
///
/// <code>
/// x = [[[[1, 2, 3, 4],
/// [5, 6, 7, 8]],
/// [[9, 10, 11, 12],
/// [13, 14, 15, 16]]]]
/// </code></remarks>
member this.SpaceToDepth(input : TF, block_size : Int64, ?data_format : String, ?operName : String) =
let data_format = defaultArg data_format null
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SpaceToDepth(input.TFOutput, block_size, data_format, operName))
/// <summary>
/// Applies a sparse gradient to a given accumulator.
/// </summary><param name="handle">
/// The handle to a accumulator.
/// </param><param name="local_step">
/// The local_step value at which the sparse gradient was computed.
/// </param><param name="gradient_indices">
/// Indices of the sparse gradient to be accumulated. Must be a
/// vector.
/// </param><param name="gradient_values">
/// Values are the non-zero slices of the gradient, and must have
/// the same first dimension as indices, i.e., the nnz represented by indices and
/// values must be consistent.
/// </param><param name="gradient_shape">
/// Shape of the sparse gradient to be accumulated.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseAccumulatorApplyGradient'.
/// </param><param name="has_known_shape">
/// Boolean indicating whether gradient_shape is unknown, in which
/// case the input is ignored during validation.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// Does not add if local_step is smaller than the accumulator's
/// global_step.
/// </remarks>
member this.SparseAccumulatorApplyGradient(handle : TF, local_step : TF, gradient_indices : TF, gradient_values : TF, gradient_shape : TF, has_known_shape : Boolean, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.SparseAccumulatorApplyGradient(handle.TFOutput, local_step.TFOutput, gradient_indices.TFOutput, gradient_values.TFOutput, gradient_shape.TFOutput, has_known_shape, operName)
/// <summary>
/// Extracts the average sparse gradient in a SparseConditionalAccumulator.
/// </summary><param name="handle">
/// The handle to a SparseConditionalAccumulator.
/// </param><param name="num_required">
/// Number of gradients required before we return an aggregate.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseAccumulatorTakeGradient'.
/// </param><param name="dtype">
/// The data type of accumulated gradients. Needs to correspond to the type
/// of the accumulator.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// indices: Indices of the average of the accumulated sparse gradients.
/// values: Values of the average of the accumulated sparse gradients.
/// shape: Shape of the average of the accumulated sparse gradients.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// The op will blocks until sufficient (i.e., more than num_required)
/// gradients have been accumulated. If the accumulator has already
/// aggregated more than num_required gradients, it will return its
/// average of the accumulated gradients. Also automatically increments
/// the recorded global_step in the accumulator by 1, and resets the
/// aggregate to 0.
/// </remarks>
member this.SparseAccumulatorTakeGradient(handle : TF, num_required : TF, dtype : TFDataType, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.SparseAccumulatorTakeGradient(handle.TFOutput, num_required.TFOutput, dtype, operName)
/// <summary>
/// Adds two <c>SparseTensor</c> objects to produce another <c>SparseTensor</c>.
/// </summary><param name="a_indices">
/// 2-D. The <c>indices</c> of the first <c>SparseTensor</c>, size <c>[nnz, ndims]</c> Matrix.
/// </param><param name="a_values">
/// 1-D. The <c>values</c> of the first <c>SparseTensor</c>, size <c>[nnz]</c> Vector.
/// </param><param name="a_shape">
/// 1-D. The <c>shape</c> of the first <c>SparseTensor</c>, size <c>[ndims]</c> Vector.
/// </param><param name="b_indices">
/// 2-D. The <c>indices</c> of the second <c>SparseTensor</c>, size <c>[nnz, ndims]</c> Matrix.
/// </param><param name="b_values">
/// 1-D. The <c>values</c> of the second <c>SparseTensor</c>, size <c>[nnz]</c> Vector.
/// </param><param name="b_shape">
/// 1-D. The <c>shape</c> of the second <c>SparseTensor</c>, size <c>[ndims]</c> Vector.
/// </param><param name="thresh">
/// 0-D. The magnitude threshold that determines if an output value/index
/// pair takes space.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseAdd'.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// sum_indices:
/// sum_values:
/// sum_shape:
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// The input <c>SparseTensor</c> objects' indices are assumed ordered in standard
/// lexicographic order. If this is not the case, before this step run
/// <c>SparseReorder</c> to restore index ordering.
///
/// By default, if two values sum to zero at some index, the output <c>SparseTensor</c>
/// would still include that particular location in its index, storing a zero in the
/// corresponding value slot. To override this, callers can specify <c>thresh</c>,
/// indicating that if the sum has a magnitude strictly smaller than <c>thresh</c>, its
/// corresponding value and index would then not be included. In particular,
/// <c>thresh == 0</c> (default) means everything is kept and actual thresholding happens
/// only for a positive value.
///
/// In the following shapes, <c>nnz</c> is the count after taking <c>thresh</c> into account.
/// </remarks>
member this.SparseAdd(a_indices : TF, a_values : TF, a_shape : TF, b_indices : TF, b_values : TF, b_shape : TF, thresh : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.SparseAdd(a_indices.TFOutput, a_values.TFOutput, a_shape.TFOutput, b_indices.TFOutput, b_values.TFOutput, b_shape.TFOutput, thresh.TFOutput, operName)
/// <summary>
/// The gradient operator for the SparseAdd op.
/// </summary><param name="backprop_val_grad">
/// 1-D with shape <c>[nnz(sum)]</c>. The gradient with respect to
/// the non-empty values of the sum.
/// </param><param name="a_indices">
/// 2-D. The <c>indices</c> of the <c>SparseTensor</c> A, size <c>[nnz(A), ndims]</c>.
/// </param><param name="b_indices">
/// 2-D. The <c>indices</c> of the <c>SparseTensor</c> B, size <c>[nnz(B), ndims]</c>.
/// </param><param name="sum_indices">
/// 2-D. The <c>indices</c> of the sum <c>SparseTensor</c>, size
/// <c>[nnz(sum), ndims]</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseAddGrad'.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// a_val_grad: 1-D with shape <c>[nnz(A)]</c>. The gradient with respect to the
/// non-empty values of A.
/// b_val_grad: 1-D with shape <c>[nnz(B)]</c>. The gradient with respect to the
/// non-empty values of B.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// The SparseAdd op calculates A + B, where A, B, and the sum are all represented
/// as <c>SparseTensor</c> objects. This op takes in the upstream gradient w.r.t.
/// non-empty values of the sum, and outputs the gradients w.r.t. the non-empty
/// values of A and B.
/// </remarks>
member this.SparseAddGrad(backprop_val_grad : TF, a_indices : TF, b_indices : TF, sum_indices : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.SparseAddGrad(backprop_val_grad.TFOutput, a_indices.TFOutput, b_indices.TFOutput, sum_indices.TFOutput, operName)
/// <summary>
/// var: Should be from a Variable().
/// </summary><param name="var"></param><param name="accum">
/// Should be from a Variable().
/// </param><param name="accum_update">
/// : Should be from a Variable().
/// </param><param name="lr">
/// Learning rate. Must be a scalar.
/// </param><param name="rho">
/// Decay factor. Must be a scalar.
/// </param><param name="epsilon">
/// Constant factor. Must be a scalar.
/// </param><param name="grad">
/// The gradient.
/// </param><param name="indices">
/// A vector of indices into the first dimension of var and accum.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseApplyAdadelta'.
/// </param><param name="use_locking">
/// Optional argument
/// If True, updating of the var and accum tensors will be protected by
/// a lock; otherwise the behavior is undefined, but may exhibit less contention.
/// </param><returns>
/// Same as "var".
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.SparseApplyAdadelta(var : TF, accum : TF, accum_update : TF, lr : TF, rho : TF, epsilon : TF, grad : TF, indices : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SparseApplyAdadelta(var.TFOutput, accum.TFOutput, accum_update.TFOutput, lr.TFOutput, rho.TFOutput, epsilon.TFOutput, grad.TFOutput, indices.TFOutput, use_locking, operName))
/// <summary>
/// Update relevant entries in '*var' and '*accum' according to the adagrad scheme.
/// </summary><param name="var">
/// Should be from a Variable().
/// </param><param name="accum">
/// Should be from a Variable().
/// </param><param name="lr">
/// Learning rate. Must be a scalar.
/// </param><param name="grad">
/// The gradient.
/// </param><param name="indices">
/// A vector of indices into the first dimension of var and accum.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseApplyAdagrad'.
/// </param><param name="use_locking">
/// Optional argument
/// If <c>True</c>, updating of the var and accum tensors will be protected
/// by a lock; otherwise the behavior is undefined, but may exhibit less
/// contention.
/// </param><param name="update_slots">
/// Optional argument
/// </param><returns>
/// Same as "var".
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// That is for rows we have grad for, we update var and accum as follows:
/// $$accum += grad * grad$$
/// $$var -= lr * grad * (1 / sqrt(accum))$$
/// </remarks>
member this.SparseApplyAdagrad(var : TF, accum : TF, lr : TF, grad : TF, indices : TF, ?use_locking : Boolean, ?update_slots : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let update_slots = defaultArg (update_slots |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SparseApplyAdagrad(var.TFOutput, accum.TFOutput, lr.TFOutput, grad.TFOutput, indices.TFOutput, use_locking, update_slots, operName))
/// <summary>
/// Update entries in '*var' and '*accum' according to the proximal adagrad scheme.
/// </summary><param name="var">
/// Should be from a Variable().
/// </param><param name="gradient_accumulator">
/// Should be from a Variable().
/// </param><param name="gradient_squared_accumulator">
/// Should be from a Variable().
/// </param><param name="grad">
/// The gradient.
/// </param><param name="indices">
/// A vector of indices into the first dimension of var and accum.
/// </param><param name="lr">
/// Learning rate. Must be a scalar.
/// </param><param name="l1">
/// L1 regularization. Must be a scalar.
/// </param><param name="l2">
/// L2 regularization. Must be a scalar.
/// </param><param name="global_step">
/// Training step number. Must be a scalar.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseApplyAdagradDA'.
/// </param><param name="use_locking">
/// Optional argument
/// If True, updating of the var and accum tensors will be protected by
/// a lock; otherwise the behavior is undefined, but may exhibit less contention.
/// </param><returns>
/// Same as "var".
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.SparseApplyAdagradDA(var : TF, gradient_accumulator : TF, gradient_squared_accumulator : TF, grad : TF, indices : TF, lr : TF, l1 : TF, l2 : TF, global_step : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SparseApplyAdagradDA(var.TFOutput, gradient_accumulator.TFOutput, gradient_squared_accumulator.TFOutput, grad.TFOutput, indices.TFOutput, lr.TFOutput, l1.TFOutput, l2.TFOutput, global_step.TFOutput, use_locking, operName))
/// <summary>
/// Update '*var' according to the centered RMSProp algorithm.
/// </summary><param name="var">
/// Should be from a Variable().
/// </param><param name="mg">
/// Should be from a Variable().
/// </param><param name="ms">
/// Should be from a Variable().
/// </param><param name="mom">
/// Should be from a Variable().
/// </param><param name="lr">
/// Scaling factor. Must be a scalar.
/// </param><param name="rho">
/// Decay rate. Must be a scalar.
/// </param><param name="momentum"></param><param name="epsilon">
/// Ridge term. Must be a scalar.
/// </param><param name="grad">
/// The gradient.
/// </param><param name="indices">
/// A vector of indices into the first dimension of var, ms and mom.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseApplyCenteredRMSProp'.
/// </param><param name="use_locking">
/// Optional argument
/// If <c>True</c>, updating of the var, mg, ms, and mom tensors is
/// protected by a lock; otherwise the behavior is undefined, but may exhibit less
/// contention.
/// </param><returns>
/// Same as "var".
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// The centered RMSProp algorithm uses an estimate of the centered second moment
/// (i.e., the variance) for normalization, as opposed to regular RMSProp, which
/// uses the (uncentered) second moment. This often helps with training, but is
/// slightly more expensive in terms of computation and memory.
///
/// Note that in dense implementation of this algorithm, mg, ms, and mom will
/// update even if the grad is zero, but in this sparse implementation, mg, ms,
/// and mom will not update in iterations during which the grad is zero.
///
/// mean_square = decay * mean_square + (1-decay) * gradient ** 2
/// mean_grad = decay * mean_grad + (1-decay) * gradient
/// Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
///
/// $$ms &amp;lt;- rho * ms_{t-1} + (1-rho) * grad * grad$$
/// $$mom &amp;lt;- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$
/// $$var &amp;lt;- var - mom$$
/// </remarks>
member this.SparseApplyCenteredRMSProp(var : TF, mg : TF, ms : TF, mom : TF, lr : TF, rho : TF, momentum : TF, epsilon : TF, grad : TF, indices : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SparseApplyCenteredRMSProp(var.TFOutput, mg.TFOutput, ms.TFOutput, mom.TFOutput, lr.TFOutput, rho.TFOutput, momentum.TFOutput, epsilon.TFOutput, grad.TFOutput, indices.TFOutput, use_locking, operName))
/// <summary>
/// Update relevant entries in '*var' according to the Ftrl-proximal scheme.
/// </summary><param name="var">
/// Should be from a Variable().
/// </param><param name="accum">
/// Should be from a Variable().
/// </param><param name="linear">
/// Should be from a Variable().
/// </param><param name="grad">
/// The gradient.
/// </param><param name="indices">
/// A vector of indices into the first dimension of var and accum.
/// </param><param name="lr">
/// Scaling factor. Must be a scalar.
/// </param><param name="l1">
/// L1 regularization. Must be a scalar.
/// </param><param name="l2">
/// L2 regularization. Must be a scalar.
/// </param><param name="lr_power">
/// Scaling factor. Must be a scalar.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseApplyFtrl'.
/// </param><param name="use_locking">
/// Optional argument
/// If <c>True</c>, updating of the var and accum tensors will be protected
/// by a lock; otherwise the behavior is undefined, but may exhibit less
/// contention.
/// </param><returns>
/// Same as "var".
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// That is for rows we have grad for, we update var, accum and linear as follows:
/// $$accum_new = accum + grad * grad$$
/// $$linear += grad + (accum_{new}^{-lr_{power}} - accum^{-lr_{power}} / lr * var$$
/// $$quadratic = 1.0 / (accum_{new}^{lr_{power}} * lr) + 2 * l2$$
/// $$var = (sign(linear) * l1 - linear) / quadratic\ if\ |linear| &amp;gt; l1\ else\ 0.0$$
/// $$accum = accum_{new}$$
/// </remarks>
member this.SparseApplyFtrl(var : TF, accum : TF, linear : TF, grad : TF, indices : TF, lr : TF, l1 : TF, l2 : TF, lr_power : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SparseApplyFtrl(var.TFOutput, accum.TFOutput, linear.TFOutput, grad.TFOutput, indices.TFOutput, lr.TFOutput, l1.TFOutput, l2.TFOutput, lr_power.TFOutput, use_locking, operName))
/// <summary>
/// Update relevant entries in '*var' according to the Ftrl-proximal scheme.
/// </summary><param name="var">
/// Should be from a Variable().
/// </param><param name="accum">
/// Should be from a Variable().
/// </param><param name="linear">
/// Should be from a Variable().
/// </param><param name="grad">
/// The gradient.
/// </param><param name="indices">
/// A vector of indices into the first dimension of var and accum.
/// </param><param name="lr">
/// Scaling factor. Must be a scalar.
/// </param><param name="l1">
/// L1 regularization. Must be a scalar.
/// </param><param name="l2">
/// L2 shrinkage regulariation. Must be a scalar.
/// </param><param name="l2_shrinkage"></param><param name="lr_power">
/// Scaling factor. Must be a scalar.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseApplyFtrlV2'.
/// </param><param name="use_locking">
/// Optional argument
/// If <c>True</c>, updating of the var and accum tensors will be protected
/// by a lock; otherwise the behavior is undefined, but may exhibit less
/// contention.
/// </param><returns>
/// Same as "var".
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// That is for rows we have grad for, we update var, accum and linear as follows:
/// grad_with_shrinkage = grad + 2 * l2_shrinkage * var
/// accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
/// linear += grad_with_shrinkage +
/// (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
/// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
/// var = (sign(linear) * l1 - linear) / quadratic if |linear| &amp;gt; l1 else 0.0
/// accum = accum_new
/// </remarks>
member this.SparseApplyFtrlV2(var : TF, accum : TF, linear : TF, grad : TF, indices : TF, lr : TF, l1 : TF, l2 : TF, l2_shrinkage : TF, lr_power : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SparseApplyFtrlV2(var.TFOutput, accum.TFOutput, linear.TFOutput, grad.TFOutput, indices.TFOutput, lr.TFOutput, l1.TFOutput, l2.TFOutput, l2_shrinkage.TFOutput, lr_power.TFOutput, use_locking, operName))
/// <summary>
/// Update relevant entries in '*var' and '*accum' according to the momentum scheme.
/// </summary><param name="var">
/// Should be from a Variable().
/// </param><param name="accum">
/// Should be from a Variable().
/// </param><param name="lr">
/// Learning rate. Must be a scalar.
/// </param><param name="grad">
/// The gradient.
/// </param><param name="indices">
/// A vector of indices into the first dimension of var and accum.
/// </param><param name="momentum">
/// Momentum. Must be a scalar.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseApplyMomentum'.
/// </param><param name="use_locking">
/// Optional argument
/// If <c>True</c>, updating of the var and accum tensors will be protected
/// by a lock; otherwise the behavior is undefined, but may exhibit less
/// contention.
/// </param><param name="use_nesterov">
/// Optional argument
/// If <c>True</c>, the tensor passed to compute grad will be
/// var - lr * momentum * accum, so in the end, the var you get is actually
/// var - lr * momentum * accum.
/// </param><returns>
/// Same as "var".
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Set use_nesterov = True if you want to use Nesterov momentum.
///
/// That is for rows we have grad for, we update var and accum as follows:
///
/// $$accum = accum * momentum + grad$$
/// $$var -= lr * accum$$
/// </remarks>
member this.SparseApplyMomentum(var : TF, accum : TF, lr : TF, grad : TF, indices : TF, momentum : TF, ?use_locking : Boolean, ?use_nesterov : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let use_nesterov = defaultArg (use_nesterov |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SparseApplyMomentum(var.TFOutput, accum.TFOutput, lr.TFOutput, grad.TFOutput, indices.TFOutput, momentum.TFOutput, use_locking, use_nesterov, operName))
/// <summary>
/// Sparse update entries in '*var' and '*accum' according to FOBOS algorithm.
/// </summary><param name="var">
/// Should be from a Variable().
/// </param><param name="accum">
/// Should be from a Variable().
/// </param><param name="lr">
/// Learning rate. Must be a scalar.
/// </param><param name="l1">
/// L1 regularization. Must be a scalar.
/// </param><param name="l2">
/// L2 regularization. Must be a scalar.
/// </param><param name="grad">
/// The gradient.
/// </param><param name="indices">
/// A vector of indices into the first dimension of var and accum.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseApplyProximalAdagrad'.
/// </param><param name="use_locking">
/// Optional argument
/// If True, updating of the var and accum tensors will be protected by
/// a lock; otherwise the behavior is undefined, but may exhibit less contention.
/// </param><returns>
/// Same as "var".
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// That is for rows we have grad for, we update var and accum as follows:
/// $$accum += grad * grad$$
/// $$prox_v = var$$
/// $$prox_v -= lr * grad * (1 / sqrt(accum))$$
/// $$var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}$$
/// </remarks>
member this.SparseApplyProximalAdagrad(var : TF, accum : TF, lr : TF, l1 : TF, l2 : TF, grad : TF, indices : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SparseApplyProximalAdagrad(var.TFOutput, accum.TFOutput, lr.TFOutput, l1.TFOutput, l2.TFOutput, grad.TFOutput, indices.TFOutput, use_locking, operName))
/// <summary>
/// Sparse update '*var' as FOBOS algorithm with fixed learning rate.
/// </summary><param name="var">
/// Should be from a Variable().
/// </param><param name="alpha">
/// Scaling factor. Must be a scalar.
/// </param><param name="l1">
/// L1 regularization. Must be a scalar.
/// </param><param name="l2">
/// L2 regularization. Must be a scalar.
/// </param><param name="grad">
/// The gradient.
/// </param><param name="indices">
/// A vector of indices into the first dimension of var and accum.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseApplyProximalGradientDescent'.
/// </param><param name="use_locking">
/// Optional argument
/// If True, the subtraction will be protected by a lock;
/// otherwise the behavior is undefined, but may exhibit less contention.
/// </param><returns>
/// Same as "var".
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// That is for rows we have grad for, we update var as follows:
/// $$prox_v = var - alpha * grad$$
/// $$var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}$$
/// </remarks>
member this.SparseApplyProximalGradientDescent(var : TF, alpha : TF, l1 : TF, l2 : TF, grad : TF, indices : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SparseApplyProximalGradientDescent(var.TFOutput, alpha.TFOutput, l1.TFOutput, l2.TFOutput, grad.TFOutput, indices.TFOutput, use_locking, operName))
/// <summary>
/// Update '*var' according to the RMSProp algorithm.
/// </summary><param name="var">
/// Should be from a Variable().
/// </param><param name="ms">
/// Should be from a Variable().
/// </param><param name="mom">
/// Should be from a Variable().
/// </param><param name="lr">
/// Scaling factor. Must be a scalar.
/// </param><param name="rho">
/// Decay rate. Must be a scalar.
/// </param><param name="momentum"></param><param name="epsilon">
/// Ridge term. Must be a scalar.
/// </param><param name="grad">
/// The gradient.
/// </param><param name="indices">
/// A vector of indices into the first dimension of var, ms and mom.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseApplyRMSProp'.
/// </param><param name="use_locking">
/// Optional argument
/// If <c>True</c>, updating of the var, ms, and mom tensors is protected
/// by a lock; otherwise the behavior is undefined, but may exhibit less
/// contention.
/// </param><returns>
/// Same as "var".
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Note that in dense implementation of this algorithm, ms and mom will
/// update even if the grad is zero, but in this sparse implementation, ms
/// and mom will not update in iterations during which the grad is zero.
///
/// mean_square = decay * mean_square + (1-decay) * gradient ** 2
/// Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
///
/// $$ms &amp;lt;- rho * ms_{t-1} + (1-rho) * grad * grad$$
/// $$mom &amp;lt;- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$
/// $$var &amp;lt;- var - mom$$
/// </remarks>
member this.SparseApplyRMSProp(var : TF, ms : TF, mom : TF, lr : TF, rho : TF, momentum : TF, epsilon : TF, grad : TF, indices : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SparseApplyRMSProp(var.TFOutput, ms.TFOutput, mom.TFOutput, lr.TFOutput, rho.TFOutput, momentum.TFOutput, epsilon.TFOutput, grad.TFOutput, indices.TFOutput, use_locking, operName))
/// <summary>
/// Concatenates a list of <c>SparseTensor</c> along the specified dimension.
/// </summary><param name="indices">
/// 2-D. Indices of each input <c>SparseTensor</c>.
/// </param><param name="values">
/// 1-D. Non-empty values of each <c>SparseTensor</c>.
/// </param><param name="shapes">
/// 1-D. Shapes of each <c>SparseTensor</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseConcat'.
/// </param><param name="concat_dim">
/// Dimension to concatenate along. Must be in range [-rank, rank),
/// where rank is the number of dimensions in each input <c>SparseTensor</c>.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// output_indices: 2-D. Indices of the concatenated <c>SparseTensor</c>.
/// output_values: 1-D. Non-empty values of the concatenated <c>SparseTensor</c>.
/// output_shape: 1-D. Shape of the concatenated <c>SparseTensor</c>.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// Concatenation is with respect to the dense versions of these sparse tensors.
/// It is assumed that each input is a <c>SparseTensor</c> whose elements are ordered
/// along increasing dimension number.
///
/// All inputs' shapes must match, except for the concat dimension. The
/// <c>indices</c>, <c>values</c>, and <c>shapes</c> lists must have the same length.
///
/// The output shape is identical to the inputs', except along the concat
/// dimension, where it is the sum of the inputs' sizes along that dimension.
///
/// The output elements will be resorted to preserve the sort order along
/// increasing dimension number.
///
/// This op runs in <c>O(M log M)</c> time, where <c>M</c> is the total number of non-empty
/// values across all inputs. This is due to the need for an internal sort in
/// order to concatenate efficiently across an arbitrary dimension.
///
/// For example, if <c>concat_dim = 1</c> and the inputs are
///
/// sp_inputs[0]: shape = [2, 3]
/// [0, 2]: "a"
/// [1, 0]: "b"
/// [1, 1]: "c"
///
/// sp_inputs[1]: shape = [2, 4]
/// [0, 1]: "d"
/// [0, 2]: "e"
///
/// then the output will be
///
/// shape = [2, 7]
/// [0, 2]: "a"
/// [0, 4]: "d"
/// [0, 5]: "e"
/// [1, 0]: "b"
/// [1, 1]: "c"
///
/// Graphically this is equivalent to doing
///
/// [ a] concat [ d e ] = [ a d e ]
/// [b c ] [ ] [b c ]
/// </remarks>
member this.SparseConcat(indices : TF[], values : TF[], shapes : TF[], concat_dim : Int64, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.SparseConcat(indices |> Array.map (fun x -> x.TFOutput), values |> Array.map (fun x -> x.TFOutput), shapes |> Array.map (fun x -> x.TFOutput), concat_dim, operName)
/// <summary>
/// A conditional accumulator for aggregating sparse gradients.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseConditionalAccumulator'.
/// </param><param name="container">
/// Optional argument
/// If non-empty, this accumulator is placed in the given container.
/// Otherwise, a default container is used.
/// </param><param name="shared_name">
/// Optional argument
/// If non-empty, this accumulator will be shared under the given name
/// across multiple sessions.
/// </param><param name="reduction_type">
/// Optional argument
/// </param><param name="dtype">
/// The type of the value being accumulated.
/// </param><param name="shape">
/// The shape of the values.
/// </param><returns>
/// The handle to the accumulator.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// The accumulator accepts gradients marked with local_step greater or
/// equal to the most recent global_step known to the accumulator. The
/// average can be extracted from the accumulator, provided sufficient
/// gradients have been accumulated. Extracting the average automatically
/// resets the aggregate to 0, and increments the global_step recorded by
/// the accumulator.
/// </remarks>
member this.SparseConditionalAccumulator(dtype : TFDataType, shape : TFShape, ?container : String, ?shared_name : String, ?reduction_type : String, ?operName : String) =
let container = defaultArg container null
let shared_name = defaultArg shared_name null
let reduction_type = defaultArg reduction_type null
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SparseConditionalAccumulator(dtype, shape, container, shared_name, reduction_type, operName))
/// <summary>
/// Generates sparse cross from a list of sparse and dense tensors.
/// </summary><param name="indices">
/// 2-D. Indices of each input <c>SparseTensor</c>.
/// </param><param name="values">
/// 1-D. values of each <c>SparseTensor</c>.
/// </param><param name="shapes">
/// 1-D. Shapes of each <c>SparseTensor</c>.
/// </param><param name="dense_inputs">
/// 2-D. Columns represented by dense <c>Tensor</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseCross'.
/// </param><param name="hashed_output">
/// If true, returns the hash of the cross instead of the string.
/// This will allow us avoiding string manipulations.
/// </param><param name="num_buckets">
/// It is used if hashed_output is true.
/// output = hashed_value%num_buckets if num_buckets &amp;gt; 0 else hashed_value.
/// </param><param name="hash_key">
/// Specify the hash_key that will be used by the <c>FingerprintCat64</c>
/// function to combine the crosses fingerprints.
/// </param><param name="out_type"></param><param name="internal_type"></param><returns>
/// Returns a tuple with multiple values, as follows:
/// output_indices: 2-D. Indices of the concatenated <c>SparseTensor</c>.
/// output_values: 1-D. Non-empty values of the concatenated or hashed
/// <c>SparseTensor</c>.
/// output_shape: 1-D. Shape of the concatenated <c>SparseTensor</c>.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// The op takes two lists, one of 2D <c>SparseTensor</c> and one of 2D <c>Tensor</c>, each
/// representing features of one feature column. It outputs a 2D <c>SparseTensor</c> with
/// the batchwise crosses of these features.
///
/// For example, if the inputs are
///
/// inputs[0]: SparseTensor with shape = [2, 2]
/// [0, 0]: "a"
/// [1, 0]: "b"
/// [1, 1]: "c"
///
/// inputs[1]: SparseTensor with shape = [2, 1]
/// [0, 0]: "d"
/// [1, 0]: "e"
///
/// inputs[2]: Tensor [["f"], ["g"]]
///
/// then the output will be
///
/// shape = [2, 2]
/// [0, 0]: "a_X_d_X_f"
/// [1, 0]: "b_X_e_X_g"
/// [1, 1]: "c_X_e_X_g"
///
/// if hashed_output=true then the output will be
///
/// shape = [2, 2]
/// [0, 0]: FingerprintCat64(
/// Fingerprint64("f"), FingerprintCat64(
/// Fingerprint64("d"), Fingerprint64("a")))
/// [1, 0]: FingerprintCat64(
/// Fingerprint64("g"), FingerprintCat64(
/// Fingerprint64("e"), Fingerprint64("b")))
/// [1, 1]: FingerprintCat64(
/// Fingerprint64("g"), FingerprintCat64(
/// Fingerprint64("e"), Fingerprint64("c")))
/// </remarks>
member this.SparseCross(indices : TF[], values : TF[], shapes : TF[], dense_inputs : TF[], hashed_output : Boolean, num_buckets : Int64, hash_key : Int64, out_type : TFDataType, internal_type : TFDataType, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.SparseCross(indices |> Array.map (fun x -> x.TFOutput), values |> Array.map (fun x -> x.TFOutput), shapes |> Array.map (fun x -> x.TFOutput), dense_inputs |> Array.map (fun x -> x.TFOutput), hashed_output, num_buckets, hash_key, out_type, internal_type, operName)
/// <summary>
/// Adds up a SparseTensor and a dense Tensor, using these special rules:
/// </summary><param name="sp_indices">
/// 2-D. <c>N x R</c> matrix with the indices of non-empty values in a
/// SparseTensor, possibly not in canonical ordering.
/// </param><param name="sp_values">
/// 1-D. <c>N</c> non-empty values corresponding to <c>sp_indices</c>.
/// </param><param name="sp_shape">
/// 1-D. Shape of the input SparseTensor.
/// </param><param name="dense"><c>R</c>-D. The dense Tensor operand.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseDenseCwiseAdd'.
/// </param><returns>
/// 1-D. The <c>N</c> values that are operated on.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// (1) Broadcasts the dense side to have the same shape as the sparse side, if
/// eligible;
/// (2) Then, only the dense values pointed to by the indices of the SparseTensor
/// participate in the cwise addition.
///
/// By these rules, the result is a logical SparseTensor with exactly the same
/// indices and shape, but possibly with different non-zero values. The output of
/// this Op is the resultant non-zero values.
/// </remarks>
member this.SparseDenseCwiseAdd(sp_indices : TF, sp_values : TF, sp_shape : TF, dense : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SparseDenseCwiseAdd(sp_indices.TFOutput, sp_values.TFOutput, sp_shape.TFOutput, dense.TFOutput, operName))
/// <summary>
/// Component-wise divides a SparseTensor by a dense Tensor.
/// </summary><param name="sp_indices">
/// 2-D. <c>N x R</c> matrix with the indices of non-empty values in a
/// SparseTensor, possibly not in canonical ordering.
/// </param><param name="sp_values">
/// 1-D. <c>N</c> non-empty values corresponding to <c>sp_indices</c>.
/// </param><param name="sp_shape">
/// 1-D. Shape of the input SparseTensor.
/// </param><param name="dense"><c>R</c>-D. The dense Tensor operand.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseDenseCwiseDiv'.
/// </param><returns>
/// 1-D. The <c>N</c> values that are operated on.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// *Limitation*: this Op only broadcasts the dense side to the sparse side, but not
/// the other direction.
/// </remarks>
member this.SparseDenseCwiseDiv(sp_indices : TF, sp_values : TF, sp_shape : TF, dense : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SparseDenseCwiseDiv(sp_indices.TFOutput, sp_values.TFOutput, sp_shape.TFOutput, dense.TFOutput, operName))
/// <summary>
/// Component-wise multiplies a SparseTensor by a dense Tensor.
/// </summary><param name="sp_indices">
/// 2-D. <c>N x R</c> matrix with the indices of non-empty values in a
/// SparseTensor, possibly not in canonical ordering.
/// </param><param name="sp_values">
/// 1-D. <c>N</c> non-empty values corresponding to <c>sp_indices</c>.
/// </param><param name="sp_shape">
/// 1-D. Shape of the input SparseTensor.
/// </param><param name="dense"><c>R</c>-D. The dense Tensor operand.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseDenseCwiseMul'.
/// </param><returns>
/// 1-D. The <c>N</c> values that are operated on.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// The output locations corresponding to the implicitly zero elements in the sparse
/// tensor will be zero (i.e., will not take up storage space), regardless of the
/// contents of the dense tensor (even if it's +/-INF and that INF*0 == NaN).
///
/// *Limitation*: this Op only broadcasts the dense side to the sparse side, but not
/// the other direction.
/// </remarks>
member this.SparseDenseCwiseMul(sp_indices : TF, sp_values : TF, sp_shape : TF, dense : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SparseDenseCwiseMul(sp_indices.TFOutput, sp_values.TFOutput, sp_shape.TFOutput, dense.TFOutput, operName))
/// <summary>
/// Fills empty rows in the input 2-D <c>SparseTensor</c> with a default value.
/// </summary><param name="indices">
/// 2-D. the indices of the sparse tensor.
/// </param><param name="values">
/// 1-D. the values of the sparse tensor.
/// </param><param name="dense_shape">
/// 1-D. the shape of the sparse tensor.
/// </param><param name="default_value">
/// 0-D. default value to insert into location <c>[row, 0, ..., 0]</c>
/// for rows missing from the input sparse tensor.
/// output indices: 2-D. the indices of the filled sparse tensor.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseFillEmptyRows'.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// output_indices:
/// output_values: 1-D. the values of the filled sparse tensor.
/// empty_row_indicator: 1-D. whether the dense row was missing in the
/// input sparse tensor.
/// reverse_index_map: 1-D. a map from the input indices to the output indices.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// The input <c>SparseTensor</c> is represented via the tuple of inputs
/// (<c>indices</c>, <c>values</c>, <c>dense_shape</c>). The output <c>SparseTensor</c> has the
/// same <c>dense_shape</c> but with indices <c>output_indices</c> and values
/// <c>output_values</c>.
///
/// This op inserts a single entry for every row that doesn't have any values.
/// The index is created as <c>[row, 0, ..., 0]</c> and the inserted value
/// is <c>default_value</c>.
///
/// For example, suppose <c>sp_input</c> has shape <c>[5, 6]</c> and non-empty values:
///
/// [0, 1]: a
/// [0, 3]: b
/// [2, 0]: c
/// [3, 1]: d
///
/// Rows 1 and 4 are empty, so the output will be of shape <c>[5, 6]</c> with values:
///
/// [0, 1]: a
/// [0, 3]: b
/// [1, 0]: default_value
/// [2, 0]: c
/// [3, 1]: d
/// [4, 0]: default_value
///
/// The output <c>SparseTensor</c> will be in row-major order and will have the
/// same shape as the input.
///
/// This op also returns an indicator vector shaped <c>[dense_shape[0]]</c> such that
///
/// empty_row_indicator[i] = True iff row i was an empty row.
///
/// And a reverse index map vector shaped <c>[indices.shape[0]]</c> that is used during
/// backpropagation,
///
/// reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :]
/// </remarks>
member this.SparseFillEmptyRows(indices : TF, values : TF, dense_shape : TF, default_value : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.SparseFillEmptyRows(indices.TFOutput, values.TFOutput, dense_shape.TFOutput, default_value.TFOutput, operName)
/// <summary>
/// The gradient of SparseFillEmptyRows.
/// </summary><param name="reverse_index_map">
/// 1-D. The reverse index map from SparseFillEmptyRows.
/// </param><param name="grad_values">
/// 1-D. The gradients from backprop.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseFillEmptyRowsGrad'.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// d_values: 1-D. The backprop into values.
/// d_default_value: 0-D. The backprop into default_value.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// Takes vectors reverse_index_map, shaped <c>[N]</c>, and grad_values,
/// shaped <c>[N_full]</c>, where <c>N_full &amp;gt;= N</c> and copies data into either
/// <c>d_values</c> or <c>d_default_value</c>. Here <c>d_values</c> is shaped <c>[N]</c> and
/// <c>d_default_value</c> is a scalar.
///
/// d_values[j] = grad_values[reverse_index_map[j]]
/// d_default_value = sum_{k : 0 .. N_full - 1} (
/// grad_values[k] * 1{k not in reverse_index_map})
/// </remarks>
member this.SparseFillEmptyRowsGrad(reverse_index_map : TF, grad_values : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.SparseFillEmptyRowsGrad(reverse_index_map.TFOutput, grad_values.TFOutput, operName)
/// <summary>
/// Multiply matrix "a" by matrix "b".
/// </summary><param name="a"></param><param name="b"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseMatMul'.
/// </param><param name="transpose_a">
/// Optional argument
/// </param><param name="transpose_b">
/// Optional argument
/// </param><param name="a_is_sparse">
/// Optional argument
/// </param><param name="b_is_sparse">
/// Optional argument
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// The inputs must be two-dimensional matrices and the inner dimension of "a" must
/// match the outer dimension of "b". Both "a" and "b" must be <c>Tensor</c>s not
/// <c>SparseTensor</c>s. This op is optimized for the case where at least one of "a" or
/// "b" is sparse, in the sense that they have a large proportion of zero values.
/// The breakeven for using this versus a dense matrix multiply on one platform was
/// 30% zero values in the sparse matrix.
///
/// The gradient computation of this operation will only take advantage of sparsity
/// in the input gradient when that gradient comes from a Relu.
/// </remarks>
member this.SparseMatMul(a : TF, b : TF, ?transpose_a : Boolean, ?transpose_b : Boolean, ?a_is_sparse : Boolean, ?b_is_sparse : Boolean, ?operName : String) =
let transpose_a = defaultArg (transpose_a |> Option.map Nullable) (Nullable())
let transpose_b = defaultArg (transpose_b |> Option.map Nullable) (Nullable())
let a_is_sparse = defaultArg (a_is_sparse |> Option.map Nullable) (Nullable())
let b_is_sparse = defaultArg (b_is_sparse |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SparseMatMul(a.TFOutput, b.TFOutput, transpose_a, transpose_b, a_is_sparse, b_is_sparse, operName))
/// <summary>
/// Computes the max of elements across dimensions of a SparseTensor.
/// </summary><param name="input_indices">
/// 2-D. <c>N x R</c> matrix with the indices of non-empty values in a
/// SparseTensor, possibly not in canonical ordering.
/// </param><param name="input_values">
/// 1-D. <c>N</c> non-empty values corresponding to <c>input_indices</c>.
/// </param><param name="input_shape">
/// 1-D. Shape of the input SparseTensor.
/// </param><param name="reduction_axes">
/// 1-D. Length-<c>K</c> vector containing the reduction axes.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseReduceMax'.
/// </param><param name="keep_dims">
/// Optional argument
/// If true, retain reduced dimensions with length 1.
/// </param><returns><c>R-K</c>-D. The reduced Tensor.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This Op takes a SparseTensor and is the sparse counterpart to
/// <c>tf.reduce_max()</c>. In particular, this Op also returns a dense <c>Tensor</c>
/// instead of a sparse one.
///
/// Reduces <c>sp_input</c> along the dimensions given in <c>reduction_axes</c>. Unless
/// <c>keep_dims</c> is true, the rank of the tensor is reduced by 1 for each entry in
/// <c>reduction_axes</c>. If <c>keep_dims</c> is true, the reduced dimensions are retained
/// with length 1.
///
/// If <c>reduction_axes</c> has no entries, all dimensions are reduced, and a tensor
/// with a single element is returned. Additionally, the axes can be negative,
/// which are interpreted according to the indexing rules in Python.
/// </remarks>
member this.SparseReduceMax(input_indices : TF, input_values : TF, input_shape : TF, reduction_axes : TF, ?keep_dims : Boolean, ?operName : String) =
let keep_dims = defaultArg (keep_dims |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SparseReduceMax(input_indices.TFOutput, input_values.TFOutput, input_shape.TFOutput, reduction_axes.TFOutput, keep_dims, operName))
/// <summary>
/// Computes the max of elements across dimensions of a SparseTensor.
/// </summary><param name="input_indices">
/// 2-D. <c>N x R</c> matrix with the indices of non-empty values in a
/// SparseTensor, possibly not in canonical ordering.
/// </param><param name="input_values">
/// 1-D. <c>N</c> non-empty values corresponding to <c>input_indices</c>.
/// </param><param name="input_shape">
/// 1-D. Shape of the input SparseTensor.
/// </param><param name="reduction_axes">
/// 1-D. Length-<c>K</c> vector containing the reduction axes.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseReduceMaxSparse'.
/// </param><param name="keep_dims">
/// Optional argument
/// If true, retain reduced dimensions with length 1.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// output_indices:
/// output_values:
/// output_shape:
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// This Op takes a SparseTensor and is the sparse counterpart to
/// <c>tf.reduce_max()</c>. In contrast to SparseReduceMax, this Op returns a
/// SparseTensor.
///
/// Reduces <c>sp_input</c> along the dimensions given in <c>reduction_axes</c>. Unless
/// <c>keep_dims</c> is true, the rank of the tensor is reduced by 1 for each entry in
/// <c>reduction_axes</c>. If <c>keep_dims</c> is true, the reduced dimensions are retained
/// with length 1.
///
/// If <c>reduction_axes</c> has no entries, all dimensions are reduced, and a tensor
/// with a single element is returned. Additionally, the axes can be negative,
/// which are interpreted according to the indexing rules in Python.
/// </remarks>
member this.SparseReduceMaxSparse(input_indices : TF, input_values : TF, input_shape : TF, reduction_axes : TF, ?keep_dims : Boolean, ?operName : String) =
let keep_dims = defaultArg (keep_dims |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.SparseReduceMaxSparse(input_indices.TFOutput, input_values.TFOutput, input_shape.TFOutput, reduction_axes.TFOutput, keep_dims, operName)
/// <summary>
/// Computes the sum of elements across dimensions of a SparseTensor.
/// </summary><param name="input_indices">
/// 2-D. <c>N x R</c> matrix with the indices of non-empty values in a
/// SparseTensor, possibly not in canonical ordering.
/// </param><param name="input_values">
/// 1-D. <c>N</c> non-empty values corresponding to <c>input_indices</c>.
/// </param><param name="input_shape">
/// 1-D. Shape of the input SparseTensor.
/// </param><param name="reduction_axes">
/// 1-D. Length-<c>K</c> vector containing the reduction axes.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseReduceSum'.
/// </param><param name="keep_dims">
/// Optional argument
/// If true, retain reduced dimensions with length 1.
/// </param><returns><c>R-K</c>-D. The reduced Tensor.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This Op takes a SparseTensor and is the sparse counterpart to
/// <c>tf.reduce_sum()</c>. In particular, this Op also returns a dense <c>Tensor</c>
/// instead of a sparse one.
///
/// Reduces <c>sp_input</c> along the dimensions given in <c>reduction_axes</c>. Unless
/// <c>keep_dims</c> is true, the rank of the tensor is reduced by 1 for each entry in
/// <c>reduction_axes</c>. If <c>keep_dims</c> is true, the reduced dimensions are retained
/// with length 1.
///
/// If <c>reduction_axes</c> has no entries, all dimensions are reduced, and a tensor
/// with a single element is returned. Additionally, the axes can be negative,
/// which are interpreted according to the indexing rules in Python.
/// </remarks>
member this.SparseReduceSum(input_indices : TF, input_values : TF, input_shape : TF, reduction_axes : TF, ?keep_dims : Boolean, ?operName : String) =
let keep_dims = defaultArg (keep_dims |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.SparseReduceSum(input_indices.TFOutput, input_values.TFOutput, input_shape.TFOutput, reduction_axes.TFOutput, keep_dims, operName))
/// <summary>
/// Computes the sum of elements across dimensions of a SparseTensor.
/// </summary><param name="input_indices">
/// 2-D. <c>N x R</c> matrix with the indices of non-empty values in a
/// SparseTensor, possibly not in canonical ordering.
/// </param><param name="input_values">
/// 1-D. <c>N</c> non-empty values corresponding to <c>input_indices</c>.
/// </param><param name="input_shape">
/// 1-D. Shape of the input SparseTensor.
/// </param><param name="reduction_axes">
/// 1-D. Length-<c>K</c> vector containing the reduction axes.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'SparseReduceSumSparse'.
/// </param><param name="keep_dims">
/// Optional argument
/// If true, retain reduced dimensions with length 1.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// output_indices:
/// output_values:
/// output_shape:
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// This Op takes a SparseTensor and is the sparse counterpart to
/// <c>tf.reduce_sum()</c>. In contrast to SparseReduceSum, this Op returns a
/// SparseTensor.
///
/// Reduces <c>sp_input</c> along the dimensions given in <c>reduction_axes</c>. Unless
/// <c>keep_dims</c> is true, the rank of the tensor is reduced by 1 for each entry in
/// <c>reduction_axes</c>. If <c>keep_dims</c> is true, the reduced dimensions are retained
/// with length 1.
///
/// If <c>reduction_axes</c> has no entries, all dimensions are reduced, and a tensor
/// with a single element is returned. Additionally, the axes can be negative,
/// which are interpreted according to the indexing rules in Python.
/// </remarks>
member this.SparseReduceSumSparse(input_indices : TF, input_values : TF, input_shape : TF, reduction_axes : TF, ?keep_dims : Boolean, ?operName : String) =
let keep_dims = defaultArg (keep_dims |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.SparseReduceSumSparse(input_indices.TFOutput, input_values.TFOutput, input_shape.TFOutput, reduction_axes.TFOutput, keep_dims, operName)
/// <summary>
/// Returns x + y element-wise, working on quantized buffers.
/// </summary><param name="x"></param><param name="y"></param><param name="min_x">
/// The float value that the lowest quantized <c>x</c> value represents.
/// </param><param name="max_x">
/// The float value that the highest quantized <c>x</c> value represents.
/// </param><param name="min_y">
/// The float value that the lowest quantized <c>y</c> value represents.
/// </param><param name="max_y">
/// The float value that the highest quantized <c>y</c> value represents.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizedAdd'.
/// </param><param name="Toutput">
/// Optional argument
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// z:
/// min_z: The float value that the lowest quantized output value represents.
/// max_z: The float value that the highest quantized output value represents.
///
/// *NOTE*: <c>QuantizedAdd</c> supports limited forms of broadcasting. More about
/// broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns>
member this.QuantizedAdd(x : TF, y : TF, min_x : TF, max_x : TF, min_y : TF, max_y : TF, ?Toutput : TFDataType, ?operName : String) =
let Toutput = defaultArg (Toutput |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.QuantizedAdd(x.TFOutput, y.TFOutput, min_x.TFOutput, max_x.TFOutput, min_y.TFOutput, max_y.TFOutput, Toutput, operName)
/// <summary>
/// Produces the average pool of the input tensor for quantized types.
/// </summary><param name="input">
/// 4-D with shape <c>[batch, height, width, channels]</c>.
/// </param><param name="min_input">
/// The float value that the lowest quantized input value represents.
/// </param><param name="max_input">
/// The float value that the highest quantized input value represents.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizedAvgPool'.
/// </param><param name="ksize">
/// The size of the window for each dimension of the input tensor.
/// The length must be 4 to match the number of dimensions of the input.
/// </param><param name="strides">
/// The stride of the sliding window for each dimension of the input
/// tensor. The length must be 4 to match the number of dimensions of the input.
/// </param><param name="padding">
/// The type of padding algorithm to use.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// output:
/// min_output: The float value that the lowest quantized output value represents.
/// max_output: The float value that the highest quantized output value represents.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns>
member this.QuantizedAvgPool(input : TF, min_input : TF, max_input : TF, ksize : Int64[], strides : Int64[], padding : String, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.QuantizedAvgPool(input.TFOutput, min_input.TFOutput, max_input.TFOutput, ksize, strides, padding, operName)
/// <summary>
/// Quantized Batch normalization.
/// </summary><param name="t">
/// A 4D input Tensor.
/// </param><param name="t_min">
/// The value represented by the lowest quantized input.
/// </param><param name="t_max">
/// The value represented by the highest quantized input.
/// </param><param name="m">
/// A 1D mean Tensor with size matching the last dimension of t.
/// This is the first output from tf.nn.moments,
/// or a saved moving average thereof.
/// </param><param name="m_min">
/// The value represented by the lowest quantized mean.
/// </param><param name="m_max">
/// The value represented by the highest quantized mean.
/// </param><param name="v">
/// A 1D variance Tensor with size matching the last dimension of t.
/// This is the second output from tf.nn.moments,
/// or a saved moving average thereof.
/// </param><param name="v_min">
/// The value represented by the lowest quantized variance.
/// </param><param name="v_max">
/// The value represented by the highest quantized variance.
/// </param><param name="beta">
/// A 1D beta Tensor with size matching the last dimension of t.
/// An offset to be added to the normalized tensor.
/// </param><param name="beta_min">
/// The value represented by the lowest quantized offset.
/// </param><param name="beta_max">
/// The value represented by the highest quantized offset.
/// </param><param name="gamma">
/// A 1D gamma Tensor with size matching the last dimension of t.
/// If "scale_after_normalization" is true, this tensor will be multiplied
/// with the normalized tensor.
/// </param><param name="gamma_min">
/// The value represented by the lowest quantized gamma.
/// </param><param name="gamma_max">
/// The value represented by the highest quantized gamma.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizedBatchNormWithGlobalNormalization'.
/// </param><param name="out_type"></param><param name="variance_epsilon">
/// A small float number to avoid dividing by 0.
/// </param><param name="scale_after_normalization">
/// A bool indicating whether the resulted tensor
/// needs to be multiplied with gamma.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// result:
/// result_min:
/// result_max:
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// This op is deprecated and will be removed in the future. Prefer
/// <c>tf.nn.batch_normalization</c>.
/// </remarks>
member this.QuantizedBatchNormWithGlobalNormalization(t : TF, t_min : TF, t_max : TF, m : TF, m_min : TF, m_max : TF, v : TF, v_min : TF, v_max : TF, beta : TF, beta_min : TF, beta_max : TF, gamma : TF, gamma_min : TF, gamma_max : TF, out_type : TFDataType, variance_epsilon : Single, scale_after_normalization : Boolean, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.QuantizedBatchNormWithGlobalNormalization(t.TFOutput, t_min.TFOutput, t_max.TFOutput, m.TFOutput, m_min.TFOutput, m_max.TFOutput, v.TFOutput, v_min.TFOutput, v_max.TFOutput, beta.TFOutput, beta_min.TFOutput, beta_max.TFOutput, gamma.TFOutput, gamma_min.TFOutput, gamma_max.TFOutput, out_type, variance_epsilon, scale_after_normalization, operName)
/// <summary>
/// Adds Tensor 'bias' to Tensor 'input' for Quantized types.
/// </summary><param name="input"></param><param name="bias">
/// A 1D bias Tensor with size matching the last dimension of 'input'.
/// </param><param name="min_input">
/// The float value that the lowest quantized input value represents.
/// </param><param name="max_input">
/// The float value that the highest quantized input value represents.
/// </param><param name="min_bias">
/// The float value that the lowest quantized bias value represents.
/// </param><param name="max_bias">
/// The float value that the highest quantized bias value represents.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizedBiasAdd'.
/// </param><param name="out_type"></param><returns>
/// Returns a tuple with multiple values, as follows:
/// output:
/// min_out: The float value that the lowest quantized output value represents.
/// max_out: The float value that the highest quantized output value represents.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// Broadcasts the values of bias on dimensions 0..N-2 of 'input'.
/// </remarks>
member this.QuantizedBiasAdd(input : TF, bias : TF, min_input : TF, max_input : TF, min_bias : TF, max_bias : TF, out_type : TFDataType, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.QuantizedBiasAdd(input.TFOutput, bias.TFOutput, min_input.TFOutput, max_input.TFOutput, min_bias.TFOutput, max_bias.TFOutput, out_type, operName)
/// <summary>
/// Concatenates quantized tensors along one dimension.
/// </summary><param name="concat_dim">
/// 0-D. The dimension along which to concatenate. Must be in the
/// range [0, rank(values)).
/// </param><param name="values">
/// The <c>N</c> Tensors to concatenate. Their ranks and types must match,
/// and their sizes must match in all dimensions except <c>concat_dim</c>.
/// </param><param name="input_mins">
/// The minimum scalar values for each of the input tensors.
/// </param><param name="input_maxes">
/// The maximum scalar values for each of the input tensors.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizedConcat'.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// output: A <c>Tensor</c> with the concatenation of values stacked along the
/// <c>concat_dim</c> dimension. This tensor's shape matches that of <c>values</c> except
/// in <c>concat_dim</c> where it has the sum of the sizes.
/// output_min: The float value that the minimum quantized output value represents.
/// output_max: The float value that the maximum quantized output value represents.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns>
member this.QuantizedConcat(concat_dim : TF, values : TF[], input_mins : TF[], input_maxes : TF[], ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.QuantizedConcat(concat_dim.TFOutput, values |> Array.map (fun x -> x.TFOutput), input_mins |> Array.map (fun x -> x.TFOutput), input_maxes |> Array.map (fun x -> x.TFOutput), operName)
/// <summary>
/// Computes a 2D convolution given quantized 4D input and filter tensors.
/// </summary><param name="input"></param><param name="filter">
/// filter's input_depth dimension must match input's depth dimensions.
/// </param><param name="min_input">
/// The float value that the lowest quantized input value represents.
/// </param><param name="max_input">
/// The float value that the highest quantized input value represents.
/// </param><param name="min_filter">
/// The float value that the lowest quantized filter value represents.
/// </param><param name="max_filter">
/// The float value that the highest quantized filter value represents.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizedConv2D'.
/// </param><param name="out_type">
/// Optional argument
/// </param><param name="dilations">
/// Optional argument
/// 1-D tensor of length 4. The dilation factor for each dimension of
/// <c>input</c>. If set to k &amp;gt; 1, there will be k-1 skipped cells between each
/// filter element on that dimension. The dimension order is determined by the
/// value of <c>data_format</c>, see above for details. Dilations in the batch and
/// depth dimensions must be 1.
/// </param><param name="strides">
/// The stride of the sliding window for each dimension of the input
/// tensor.
/// </param><param name="padding">
/// The type of padding algorithm to use.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// output:
/// min_output: The float value that the lowest quantized output value represents.
/// max_output: The float value that the highest quantized output value represents.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// The inputs are quantized tensors where the lowest value represents the real
/// number of the associated minimum, and the highest represents the maximum.
/// This means that you can only interpret the quantized output in the same way, by
/// taking the returned minimum and maximum values into account.
/// </remarks>
member this.QuantizedConv2D(input : TF, filter : TF, min_input : TF, max_input : TF, min_filter : TF, max_filter : TF, strides : Int64[], padding : String, ?out_type : TFDataType, ?dilations : Int64[], ?operName : String) =
let out_type = defaultArg (out_type |> Option.map Nullable) (Nullable())
let dilations = defaultArg dilations null
let operName = defaultArg operName null
this.TFGraph.QuantizedConv2D(input.TFOutput, filter.TFOutput, min_input.TFOutput, max_input.TFOutput, min_filter.TFOutput, max_filter.TFOutput, strides, padding, out_type, dilations, operName)
/// <summary>
/// Quantized Instance normalization.
/// </summary><param name="x">
/// A 4D input Tensor.
/// </param><param name="x_min">
/// The value represented by the lowest quantized input.
/// </param><param name="x_max">
/// The value represented by the highest quantized input.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizedInstanceNorm'.
/// </param><param name="output_range_given">
/// Optional argument
/// If True, <c>given_y_min</c> and <c>given_y_min</c>
/// and <c>given_y_max</c> are used as the output range. Otherwise,
/// the implementation computes the output range.
/// </param><param name="given_y_min">
/// Optional argument
/// Output in <c>y_min</c> if <c>output_range_given</c> is True.
/// </param><param name="given_y_max">
/// Optional argument
/// Output in <c>y_max</c> if <c>output_range_given</c> is True.
/// </param><param name="variance_epsilon">
/// Optional argument
/// A small float number to avoid dividing by 0.
/// </param><param name="min_separation">
/// Optional argument
/// Minimum value of <c>y_max - y_min</c></param><returns>
/// Returns a tuple with multiple values, as follows:
/// y: A 4D Tensor.
/// y_min: The value represented by the lowest quantized output.
/// y_max: The value represented by the highest quantized output.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns>
member this.QuantizedInstanceNorm(x : TF, x_min : TF, x_max : TF, ?output_range_given : Boolean, ?given_y_min : Single, ?given_y_max : Single, ?variance_epsilon : Single, ?min_separation : Single, ?operName : String) =
let output_range_given = defaultArg (output_range_given |> Option.map Nullable) (Nullable())
let given_y_min = defaultArg (given_y_min |> Option.map Nullable) (Nullable())
let given_y_max = defaultArg (given_y_max |> Option.map Nullable) (Nullable())
let variance_epsilon = defaultArg (variance_epsilon |> Option.map Nullable) (Nullable())
let min_separation = defaultArg (min_separation |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.QuantizedInstanceNorm(x.TFOutput, x_min.TFOutput, x_max.TFOutput, output_range_given, given_y_min, given_y_max, variance_epsilon, min_separation, operName)
/// <summary>
/// Perform a quantized matrix multiplication of <c>a</c> by the matrix <c>b</c>.
/// </summary><param name="a">
/// Must be a two-dimensional tensor.
/// </param><param name="b">
/// Must be a two-dimensional tensor.
/// </param><param name="min_a">
/// The float value that the lowest quantized <c>a</c> value represents.
/// </param><param name="max_a">
/// The float value that the highest quantized <c>a</c> value represents.
/// </param><param name="min_b">
/// The float value that the lowest quantized <c>b</c> value represents.
/// </param><param name="max_b">
/// The float value that the highest quantized <c>b</c> value represents.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizedMatMul'.
/// </param><param name="Toutput">
/// Optional argument
/// </param><param name="transpose_a">
/// Optional argument
/// If true, <c>a</c> is transposed before multiplication.
/// </param><param name="transpose_b">
/// Optional argument
/// If true, <c>b</c> is transposed before multiplication.
/// </param><param name="Tactivation">
/// Optional argument
/// The type of output produced by activation function
/// following this operation.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// output:
/// min_out: The float value that the lowest quantized output value represents.
/// max_out: The float value that the highest quantized output value represents.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// The inputs must be two-dimensional matrices and the inner dimension of
/// <c>a</c> (after being transposed if <c>transpose_a</c> is non-zero) must match the
/// outer dimension of <c>b</c> (after being transposed if <c>transposed_b</c> is
/// non-zero).
/// </remarks>
member this.QuantizedMatMul(a : TF, b : TF, min_a : TF, max_a : TF, min_b : TF, max_b : TF, ?Toutput : TFDataType, ?transpose_a : Boolean, ?transpose_b : Boolean, ?Tactivation : TFDataType, ?operName : String) =
let Toutput = defaultArg (Toutput |> Option.map Nullable) (Nullable())
let transpose_a = defaultArg (transpose_a |> Option.map Nullable) (Nullable())
let transpose_b = defaultArg (transpose_b |> Option.map Nullable) (Nullable())
let Tactivation = defaultArg (Tactivation |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.QuantizedMatMul(a.TFOutput, b.TFOutput, min_a.TFOutput, max_a.TFOutput, min_b.TFOutput, max_b.TFOutput, Toutput, transpose_a, transpose_b, Tactivation, operName)
/// <summary>
/// Produces the max pool of the input tensor for quantized types.
/// </summary><param name="input">
/// The 4D (batch x rows x cols x depth) Tensor to MaxReduce over.
/// </param><param name="min_input">
/// The float value that the lowest quantized input value represents.
/// </param><param name="max_input">
/// The float value that the highest quantized input value represents.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizedMaxPool'.
/// </param><param name="ksize">
/// The size of the window for each dimension of the input tensor.
/// The length must be 4 to match the number of dimensions of the input.
/// </param><param name="strides">
/// The stride of the sliding window for each dimension of the input
/// tensor. The length must be 4 to match the number of dimensions of the input.
/// </param><param name="padding">
/// The type of padding algorithm to use.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// output:
/// min_output: The float value that the lowest quantized output value represents.
/// max_output: The float value that the highest quantized output value represents.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns>
member this.QuantizedMaxPool(input : TF, min_input : TF, max_input : TF, ksize : Int64[], strides : Int64[], padding : String, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.QuantizedMaxPool(input.TFOutput, min_input.TFOutput, max_input.TFOutput, ksize, strides, padding, operName)
/// <summary>
/// Returns x * y element-wise, working on quantized buffers.
/// </summary><param name="x"></param><param name="y"></param><param name="min_x">
/// The float value that the lowest quantized <c>x</c> value represents.
/// </param><param name="max_x">
/// The float value that the highest quantized <c>x</c> value represents.
/// </param><param name="min_y">
/// The float value that the lowest quantized <c>y</c> value represents.
/// </param><param name="max_y">
/// The float value that the highest quantized <c>y</c> value represents.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizedMul'.
/// </param><param name="Toutput">
/// Optional argument
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// z:
/// min_z: The float value that the lowest quantized output value represents.
/// max_z: The float value that the highest quantized output value represents.
///
/// *NOTE*: <c>QuantizedMul</c> supports limited forms of broadcasting. More about
/// broadcasting [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns>
member this.QuantizedMul(x : TF, y : TF, min_x : TF, max_x : TF, min_y : TF, max_y : TF, ?Toutput : TFDataType, ?operName : String) =
let Toutput = defaultArg (Toutput |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.QuantizedMul(x.TFOutput, y.TFOutput, min_x.TFOutput, max_x.TFOutput, min_y.TFOutput, max_y.TFOutput, Toutput, operName)
/// <summary>
/// Convert the quantized 'input' tensor into a lower-precision 'output', using the
/// </summary><param name="input"></param><param name="input_min">
/// The float value that the minimum quantized input value represents.
/// </param><param name="input_max">
/// The float value that the maximum quantized input value represents.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizeDownAndShrinkRange'.
/// </param><param name="out_type">
/// The type of the output. Should be a lower bit depth than Tinput.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// output:
/// output_min: The float value that the minimum quantized output value represents.
/// output_max: The float value that the maximum quantized output value represents.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// actual distribution of the values to maximize the usage of the lower bit depth
/// and adjusting the output min and max ranges accordingly.
///
/// [input_min, input_max] are scalar floats that specify the range for the float
/// interpretation of the 'input' data. For example, if input_min is -1.0f and
/// input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0
/// value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
///
/// This operator tries to squeeze as much precision as possible into an output with
/// a lower bit depth by calculating the actual min and max values found in the
/// data. For example, maybe that quint16 input has no values lower than 16,384 and
/// none higher than 49,152. That means only half the range is actually needed, all
/// the float interpretations are between -0.5f and 0.5f, so if we want to compress
/// the data into a quint8 output, we can use that range rather than the theoretical
/// -1.0f to 1.0f that is suggested by the input min and max.
///
/// In practice, this is most useful for taking output from operations like
/// QuantizedMatMul that can produce higher bit-depth outputs than their inputs and
/// may have large potential output ranges, but in practice have a distribution of
/// input values that only uses a small fraction of the possible range. By feeding
/// that output into this operator, we can reduce it from 32 bits down to 8 with
/// minimal loss of accuracy.
/// </remarks>
member this.QuantizeDownAndShrinkRange(input : TF, input_min : TF, input_max : TF, out_type : TFDataType, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.QuantizeDownAndShrinkRange(input.TFOutput, input_min.TFOutput, input_max.TFOutput, out_type, operName)
/// <summary>
/// Computes Quantized Rectified Linear: <c>max(features, 0)</c></summary><param name="features"></param><param name="min_features">
/// The float value that the lowest quantized value represents.
/// </param><param name="max_features">
/// The float value that the highest quantized value represents.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizedRelu'.
/// </param><param name="out_type">
/// Optional argument
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// activations: Has the same output shape as "features".
/// min_activations: The float value that the lowest quantized value represents.
/// max_activations: The float value that the highest quantized value represents.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns>
member this.QuantizedRelu(features : TF, min_features : TF, max_features : TF, ?out_type : TFDataType, ?operName : String) =
let out_type = defaultArg (out_type |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.QuantizedRelu(features.TFOutput, min_features.TFOutput, max_features.TFOutput, out_type, operName)
/// <summary>
/// Computes Quantized Rectified Linear 6: <c>min(max(features, 0), 6)</c></summary><param name="features"></param><param name="min_features">
/// The float value that the lowest quantized value represents.
/// </param><param name="max_features">
/// The float value that the highest quantized value represents.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizedRelu6'.
/// </param><param name="out_type">
/// Optional argument
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// activations: Has the same output shape as "features".
/// min_activations: The float value that the lowest quantized value represents.
/// max_activations: The float value that the highest quantized value represents.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns>
member this.QuantizedRelu6(features : TF, min_features : TF, max_features : TF, ?out_type : TFDataType, ?operName : String) =
let out_type = defaultArg (out_type |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.QuantizedRelu6(features.TFOutput, min_features.TFOutput, max_features.TFOutput, out_type, operName)
/// <summary>
/// Computes Quantized Rectified Linear X: <c>min(max(features, 0), max_value)</c></summary><param name="features"></param><param name="max_value"></param><param name="min_features">
/// The float value that the lowest quantized value represents.
/// </param><param name="max_features">
/// The float value that the highest quantized value represents.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizedReluX'.
/// </param><param name="out_type">
/// Optional argument
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// activations: Has the same output shape as "features".
/// min_activations: The float value that the lowest quantized value represents.
/// max_activations: The float value that the highest quantized value represents.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns>
member this.QuantizedReluX(features : TF, max_value : TF, min_features : TF, max_features : TF, ?out_type : TFDataType, ?operName : String) =
let out_type = defaultArg (out_type |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.QuantizedReluX(features.TFOutput, max_value.TFOutput, min_features.TFOutput, max_features.TFOutput, out_type, operName)
member this.QuantizedReshape(tensor : TF, shape : TF, input_min : TF, input_max : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.QuantizedReshape(tensor.TFOutput, shape.TFOutput, input_min.TFOutput, input_max.TFOutput, operName)
/// <summary>
/// Resize quantized <c>images</c> to <c>size</c> using quantized bilinear interpolation.
/// </summary><param name="images">
/// 4-D with shape <c>[batch, height, width, channels]</c>.
/// </param><param name="size">
/// = A 1-D int32 Tensor of 2 elements: <c>new_height, new_width</c>. The
/// new size for the images.
/// </param><param name="min"></param><param name="max"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizedResizeBilinear'.
/// </param><param name="align_corners">
/// Optional argument
/// If true, the centers of the 4 corner pixels of the input and output tensors are
/// aligned, preserving the values at the corner pixels. Defaults to false.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// resized_images: 4-D with shape
/// <c>[batch, new_height, new_width, channels]</c>.
/// out_min:
/// out_max:
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// Input images and output images must be quantized types.
/// </remarks>
member this.QuantizedResizeBilinear(images : TF, size : TF, min : TF, max : TF, ?align_corners : Boolean, ?operName : String) =
let align_corners = defaultArg (align_corners |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.QuantizedResizeBilinear(images.TFOutput, size.TFOutput, min.TFOutput, max.TFOutput, align_corners, operName)
/// <summary>
/// Quantize the 'input' tensor of type float to 'output' tensor of type 'T'.
/// </summary><param name="input"></param><param name="min_range">
/// The minimum scalar value possibly produced for the input.
/// </param><param name="max_range">
/// The maximum scalar value possibly produced for the input.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'QuantizeV2'.
/// </param><param name="mode">
/// Optional argument
/// </param><param name="round_mode">
/// Optional argument
/// </param><param name="T"></param><returns>
/// Returns a tuple with multiple values, as follows:
/// output: The quantized data produced from the float input.
/// output_min: The actual minimum scalar value used for the output.
/// output_max: The actual maximum scalar value used for the output.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// [min_range, max_range] are scalar floats that specify the range for
/// the 'input' data. The 'mode' attribute controls exactly which calculations are
/// used to convert the float values to their quantized equivalents. The
/// 'round_mode' attribute controls which rounding tie-breaking algorithm is used
/// when rounding float values to their quantized equivalents.
///
/// In 'MIN_COMBINED' mode, each value of the tensor will undergo the following:
///
/// <code>
/// out[i] = (in[i] - min_range) * range(T) / (max_range - min_range)
/// if T == qint8: out[i] -= (range(T) + 1) / 2.0
/// </code>
///
/// here <c>range(T) = numeric_limits&amp;lt;T&amp;gt;::max() - numeric_limits&amp;lt;T&amp;gt;::min()</c>
///
/// *MIN_COMBINED Mode Example*
///
/// Assume the input is type float and has a possible range of [0.0, 6.0] and the
/// output type is quint8 ([0, 255]). The min_range and max_range values should be
/// specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each
/// value of the input by 255/6 and cast to quint8.
///
/// If the output type was qint8 ([-128, 127]), the operation will additionally
/// subtract each value by 128 prior to casting, so that the range of values aligns
/// with the range of qint8.
///
/// If the mode is 'MIN_FIRST', then this approach is used:
///
/// <code>
/// num_discrete_values = 1 &amp;lt;&amp;lt; (# of bits in T)
/// range_adjust = num_discrete_values / (num_discrete_values - 1)
/// range = (range_max - range_min) * range_adjust
/// range_scale = num_discrete_values / range
/// quantized = round(input * range_scale) - round(range_min * range_scale) +
/// numeric_limits&amp;lt;T&amp;gt;::min()
/// quantized = max(quantized, numeric_limits&amp;lt;T&amp;gt;::min())
/// quantized = min(quantized, numeric_limits&amp;lt;T&amp;gt;::max())
/// </code>
///
/// The biggest difference between this and MIN_COMBINED is that the minimum range
/// is rounded first, before it's subtracted from the rounded value. With
/// MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing
/// and dequantizing will introduce a larger and larger error.
///
/// *SCALED mode Example*
///
/// <c>SCALED</c> mode matches the quantization approach used in
/// <c>QuantizeAndDequantize{V2|V3}</c>.
///
/// If the mode is <c>SCALED</c>, we do not use the full range of the output type,
/// choosing to elide the lowest possible value for symmetry (e.g., output range is
/// -127 to 127, not -128 to 127 for signed 8 bit quantization), so that 0.0 maps to
/// 0.
///
/// We first find the range of values in our tensor. The
/// range we use is always centered on 0, so we find m such that
///
/// <code>
/// m = max(abs(input_min), abs(input_max))
/// </code>
///
/// Our input tensor range is then <c>[-m, m]</c>.
///
/// Next, we choose our fixed-point quantization buckets, <c>[min_fixed, max_fixed]</c>.
/// If T is signed, this is
///
/// <code>
/// num_bits = sizeof(T) * 8
/// [min_fixed, max_fixed] =
/// [-(1 &amp;lt;&amp;lt; (num_bits - 1) - 1), (1 &amp;lt;&amp;lt; (num_bits - 1)) - 1]
/// </code>
///
/// Otherwise, if T is unsigned, the fixed-point range is
///
/// <code>
/// [min_fixed, max_fixed] = [0, (1 &amp;lt;&amp;lt; num_bits) - 1]
/// </code>
///
/// From this we compute our scaling factor, s:
///
/// <code>
/// s = (max_fixed - min_fixed) / (2 * m)
/// </code>
///
/// Now we can quantize the elements of our tensor:
///
/// <code>
/// result = round(input * s)
/// </code>
///
/// One thing to watch out for is that the operator may choose to adjust the
/// requested minimum and maximum values slightly during the quantization process,
/// so you should always use the output ports as the range for further calculations.
/// For example, if the requested minimum and maximum values are close to equal,
/// they will be separated by a small epsilon value to prevent ill-formed quantized
/// buffers from being created. Otherwise, you can end up with buffers where all the
/// quantized values map to the same float value, which causes problems for
/// operations that have to perform further calculations on them.
/// </remarks>
member this.QuantizeV2(input : TF, min_range : TF, max_range : TF, T : TFDataType, ?mode : String, ?round_mode : String, ?operName : String) =
let mode = defaultArg mode null
let round_mode = defaultArg round_mode null
let operName = defaultArg operName null
this.TFGraph.QuantizeV2(input.TFOutput, min_range.TFOutput, max_range.TFOutput, T, mode, round_mode, operName)
/// <summary>
/// Closes the given queue.
/// </summary><param name="handle">
/// The handle to a queue.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'QueueClose'.
/// </param><param name="cancel_pending_enqueues">
/// Optional argument
/// If true, all pending enqueue requests that are
/// blocked on the given queue will be canceled.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// This operation signals that no more elements will be enqueued in the
/// given queue. Subsequent Enqueue(Many) operations will fail.
/// Subsequent Dequeue(Many) operations will continue to succeed if
/// sufficient elements remain in the queue. Subsequent Dequeue(Many)
/// operations that would block will fail immediately.
/// </remarks>
member this.QueueClose(handle : TF, ?cancel_pending_enqueues : Boolean, ?operName : String) =
let cancel_pending_enqueues = defaultArg (cancel_pending_enqueues |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.QueueClose(handle.TFOutput, cancel_pending_enqueues, operName)
/// <summary>
/// Closes the given queue.
/// </summary><param name="handle">
/// The handle to a queue.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'QueueCloseV2'.
/// </param><param name="cancel_pending_enqueues">
/// Optional argument
/// If true, all pending enqueue requests that are
/// blocked on the given queue will be canceled.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// This operation signals that no more elements will be enqueued in the
/// given queue. Subsequent Enqueue(Many) operations will fail.
/// Subsequent Dequeue(Many) operations will continue to succeed if
/// sufficient elements remain in the queue. Subsequent Dequeue(Many)
/// operations that would block will fail immediately.
/// </remarks>
member this.QueueCloseV2(handle : TF, ?cancel_pending_enqueues : Boolean, ?operName : String) =
let cancel_pending_enqueues = defaultArg (cancel_pending_enqueues |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.QueueCloseV2(handle.TFOutput, cancel_pending_enqueues, operName)
/// <summary>
/// Dequeues a tuple of one or more tensors from the given queue.
/// </summary><param name="handle">
/// The handle to a queue.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'QueueDequeue'.
/// </param><param name="timeout_ms">
/// Optional argument
/// If the queue is empty, this operation will block for up to
/// timeout_ms milliseconds.
/// Note: This option is not supported yet.
/// </param><param name="component_types">
/// The type of each component in a tuple.
/// </param><returns>
/// One or more tensors that were dequeued as a tuple.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This operation has k outputs, where k is the number of components
/// in the tuples stored in the given queue, and output i is the ith
/// component of the dequeued tuple.
///
/// N.B. If the queue is empty, this operation will block until an element
/// has been dequeued (or 'timeout_ms' elapses, if specified).
/// </remarks>
member this.QueueDequeue(handle : TF, component_types : TFDataType[], ?timeout_ms : Int64, ?operName : String) =
let timeout_ms = defaultArg (timeout_ms |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.QueueDequeue(handle.TFOutput, component_types, timeout_ms, operName) |> Array.map (fun i -> TF(this.TFGraph, i))
/// <summary>
/// Dequeues <c>n</c> tuples of one or more tensors from the given queue.
/// </summary><param name="handle">
/// The handle to a queue.
/// </param><param name="n">
/// The number of tuples to dequeue.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'QueueDequeueMany'.
/// </param><param name="timeout_ms">
/// Optional argument
/// If the queue has fewer than n elements, this operation
/// will block for up to timeout_ms milliseconds.
/// Note: This option is not supported yet.
/// </param><param name="component_types">
/// The type of each component in a tuple.
/// </param><returns>
/// One or more tensors that were dequeued as a tuple.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// If the queue is closed and there are fewer than <c>n</c> elements, then an
/// OutOfRange error is returned.
///
/// This operation concatenates queue-element component tensors along the
/// 0th dimension to make a single component tensor. All of the components
/// in the dequeued tuple will have size <c>n</c> in the 0th dimension.
///
/// This operation has <c>k</c> outputs, where <c>k</c> is the number of components in
/// the tuples stored in the given queue, and output <c>i</c> is the ith
/// component of the dequeued tuple.
///
/// N.B. If the queue is empty, this operation will block until <c>n</c> elements
/// have been dequeued (or 'timeout_ms' elapses, if specified).
/// </remarks>
member this.QueueDequeueMany(handle : TF, n : TF, component_types : TFDataType[], ?timeout_ms : Int64, ?operName : String) =
let timeout_ms = defaultArg (timeout_ms |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.QueueDequeueMany(handle.TFOutput, n.TFOutput, component_types, timeout_ms, operName) |> Array.map (fun i -> TF(this.TFGraph, i))
/// <summary>
/// Dequeues <c>n</c> tuples of one or more tensors from the given queue.
/// </summary><param name="handle">
/// The handle to a queue.
/// </param><param name="n">
/// The number of tuples to dequeue.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'QueueDequeueManyV2'.
/// </param><param name="timeout_ms">
/// Optional argument
/// If the queue has fewer than n elements, this operation
/// will block for up to timeout_ms milliseconds.
/// Note: This option is not supported yet.
/// </param><param name="component_types">
/// The type of each component in a tuple.
/// </param><returns>
/// One or more tensors that were dequeued as a tuple.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// If the queue is closed and there are fewer than <c>n</c> elements, then an
/// OutOfRange error is returned.
///
/// This operation concatenates queue-element component tensors along the
/// 0th dimension to make a single component tensor. All of the components
/// in the dequeued tuple will have size <c>n</c> in the 0th dimension.
///
/// This operation has <c>k</c> outputs, where <c>k</c> is the number of components in
/// the tuples stored in the given queue, and output <c>i</c> is the ith
/// component of the dequeued tuple.
///
/// N.B. If the queue is empty, this operation will block until <c>n</c> elements
/// have been dequeued (or 'timeout_ms' elapses, if specified).
/// </remarks>
member this.QueueDequeueManyV2(handle : TF, n : TF, component_types : TFDataType[], ?timeout_ms : Int64, ?operName : String) =
let timeout_ms = defaultArg (timeout_ms |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.QueueDequeueManyV2(handle.TFOutput, n.TFOutput, component_types, timeout_ms, operName) |> Array.map (fun i -> TF(this.TFGraph, i))
/// <summary>
/// Dequeues <c>n</c> tuples of one or more tensors from the given queue.
/// </summary><param name="handle">
/// The handle to a queue.
/// </param><param name="n">
/// The number of tuples to dequeue.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'QueueDequeueUpTo'.
/// </param><param name="timeout_ms">
/// Optional argument
/// If the queue has fewer than n elements, this operation
/// will block for up to timeout_ms milliseconds.
/// Note: This option is not supported yet.
/// </param><param name="component_types">
/// The type of each component in a tuple.
/// </param><returns>
/// One or more tensors that were dequeued as a tuple.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This operation is not supported by all queues. If a queue does not support
/// DequeueUpTo, then an Unimplemented error is returned.
///
/// If the queue is closed and there are more than 0 but less than <c>n</c>
/// elements remaining, then instead of returning an OutOfRange error like
/// QueueDequeueMany, less than <c>n</c> elements are returned immediately. If
/// the queue is closed and there are 0 elements left in the queue, then
/// an OutOfRange error is returned just like in QueueDequeueMany.
/// Otherwise the behavior is identical to QueueDequeueMany:
///
/// This operation concatenates queue-element component tensors along the
/// 0th dimension to make a single component tensor. All of the components
/// in the dequeued tuple will have size <c>n</c> in the 0th dimension.
///
/// This operation has k outputs, where <c>k</c> is the number of components in
/// the tuples stored in the given queue, and output <c>i</c> is the ith
/// component of the dequeued tuple.
/// </remarks>
member this.QueueDequeueUpTo(handle : TF, n : TF, component_types : TFDataType[], ?timeout_ms : Int64, ?operName : String) =
let timeout_ms = defaultArg (timeout_ms |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.QueueDequeueUpTo(handle.TFOutput, n.TFOutput, component_types, timeout_ms, operName) |> Array.map (fun i -> TF(this.TFGraph, i))
/// <summary>
/// Dequeues <c>n</c> tuples of one or more tensors from the given queue.
/// </summary><param name="handle">
/// The handle to a queue.
/// </param><param name="n">
/// The number of tuples to dequeue.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'QueueDequeueUpToV2'.
/// </param><param name="timeout_ms">
/// Optional argument
/// If the queue has fewer than n elements, this operation
/// will block for up to timeout_ms milliseconds.
/// Note: This option is not supported yet.
/// </param><param name="component_types">
/// The type of each component in a tuple.
/// </param><returns>
/// One or more tensors that were dequeued as a tuple.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This operation is not supported by all queues. If a queue does not support
/// DequeueUpTo, then an Unimplemented error is returned.
///
/// If the queue is closed and there are more than 0 but less than <c>n</c>
/// elements remaining, then instead of returning an OutOfRange error like
/// QueueDequeueMany, less than <c>n</c> elements are returned immediately. If
/// the queue is closed and there are 0 elements left in the queue, then
/// an OutOfRange error is returned just like in QueueDequeueMany.
/// Otherwise the behavior is identical to QueueDequeueMany:
///
/// This operation concatenates queue-element component tensors along the
/// 0th dimension to make a single component tensor. All of the components
/// in the dequeued tuple will have size n in the 0th dimension.
///
/// This operation has <c>k</c> outputs, where <c>k</c> is the number of components in
/// the tuples stored in the given queue, and output <c>i</c> is the ith
/// component of the dequeued tuple.
/// </remarks>
member this.QueueDequeueUpToV2(handle : TF, n : TF, component_types : TFDataType[], ?timeout_ms : Int64, ?operName : String) =
let timeout_ms = defaultArg (timeout_ms |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.QueueDequeueUpToV2(handle.TFOutput, n.TFOutput, component_types, timeout_ms, operName) |> Array.map (fun i -> TF(this.TFGraph, i))
/// <summary>
/// Dequeues a tuple of one or more tensors from the given queue.
/// </summary><param name="handle">
/// The handle to a queue.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'QueueDequeueV2'.
/// </param><param name="timeout_ms">
/// Optional argument
/// If the queue is empty, this operation will block for up to
/// timeout_ms milliseconds.
/// Note: This option is not supported yet.
/// </param><param name="component_types">
/// The type of each component in a tuple.
/// </param><returns>
/// One or more tensors that were dequeued as a tuple.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This operation has k outputs, where k is the number of components
/// in the tuples stored in the given queue, and output i is the ith
/// component of the dequeued tuple.
///
/// N.B. If the queue is empty, this operation will block until an element
/// has been dequeued (or 'timeout_ms' elapses, if specified).
/// </remarks>
member this.QueueDequeueV2(handle : TF, component_types : TFDataType[], ?timeout_ms : Int64, ?operName : String) =
let timeout_ms = defaultArg (timeout_ms |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.QueueDequeueV2(handle.TFOutput, component_types, timeout_ms, operName) |> Array.map (fun i -> TF(this.TFGraph, i))
/// <summary>
/// Enqueues a tuple of one or more tensors in the given queue.
/// </summary><param name="handle">
/// The handle to a queue.
/// </param><param name="components">
/// One or more tensors from which the enqueued tensors should be taken.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'QueueEnqueue'.
/// </param><param name="timeout_ms">
/// Optional argument
/// If the queue is full, this operation will block for up to
/// timeout_ms milliseconds.
/// Note: This option is not supported yet.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// The components input has k elements, which correspond to the components of
/// tuples stored in the given queue.
///
/// N.B. If the queue is full, this operation will block until the given
/// element has been enqueued (or 'timeout_ms' elapses, if specified).
/// </remarks>
member this.QueueEnqueue(handle : TF, components : TF[], ?timeout_ms : Int64, ?operName : String) =
let timeout_ms = defaultArg (timeout_ms |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.QueueEnqueue(handle.TFOutput, components |> Array.map (fun x -> x.TFOutput), timeout_ms, operName)
/// <summary>
/// Enqueues zero or more tuples of one or more tensors in the given queue.
/// </summary><param name="handle">
/// The handle to a queue.
/// </param><param name="components">
/// One or more tensors from which the enqueued tensors should
/// be taken.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'QueueEnqueueMany'.
/// </param><param name="timeout_ms">
/// Optional argument
/// If the queue is too full, this operation will block for up
/// to timeout_ms milliseconds.
/// Note: This option is not supported yet.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// This operation slices each component tensor along the 0th dimension to
/// make multiple queue elements. All of the tuple components must have the
/// same size in the 0th dimension.
///
/// The components input has k elements, which correspond to the components of
/// tuples stored in the given queue.
///
/// N.B. If the queue is full, this operation will block until the given
/// elements have been enqueued (or 'timeout_ms' elapses, if specified).
/// </remarks>
member this.QueueEnqueueMany(handle : TF, components : TF[], ?timeout_ms : Int64, ?operName : String) =
let timeout_ms = defaultArg (timeout_ms |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.QueueEnqueueMany(handle.TFOutput, components |> Array.map (fun x -> x.TFOutput), timeout_ms, operName)
/// <summary>
/// Enqueues zero or more tuples of one or more tensors in the given queue.
/// </summary><param name="handle">
/// The handle to a queue.
/// </param><param name="components">
/// One or more tensors from which the enqueued tensors should
/// be taken.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'QueueEnqueueManyV2'.
/// </param><param name="timeout_ms">
/// Optional argument
/// If the queue is too full, this operation will block for up
/// to timeout_ms milliseconds.
/// Note: This option is not supported yet.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// This operation slices each component tensor along the 0th dimension to
/// make multiple queue elements. All of the tuple components must have the
/// same size in the 0th dimension.
///
/// The components input has k elements, which correspond to the components of
/// tuples stored in the given queue.
///
/// N.B. If the queue is full, this operation will block until the given
/// elements have been enqueued (or 'timeout_ms' elapses, if specified).
/// </remarks>
member this.QueueEnqueueManyV2(handle : TF, components : TF[], ?timeout_ms : Int64, ?operName : String) =
let timeout_ms = defaultArg (timeout_ms |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.QueueEnqueueManyV2(handle.TFOutput, components |> Array.map (fun x -> x.TFOutput), timeout_ms, operName)
/// <summary>
/// Enqueues a tuple of one or more tensors in the given queue.
/// </summary><param name="handle">
/// The handle to a queue.
/// </param><param name="components">
/// One or more tensors from which the enqueued tensors should be taken.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'QueueEnqueueV2'.
/// </param><param name="timeout_ms">
/// Optional argument
/// If the queue is full, this operation will block for up to
/// timeout_ms milliseconds.
/// Note: This option is not supported yet.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// The components input has k elements, which correspond to the components of
/// tuples stored in the given queue.
///
/// N.B. If the queue is full, this operation will block until the given
/// element has been enqueued (or 'timeout_ms' elapses, if specified).
/// </remarks>
member this.QueueEnqueueV2(handle : TF, components : TF[], ?timeout_ms : Int64, ?operName : String) =
let timeout_ms = defaultArg (timeout_ms |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.QueueEnqueueV2(handle.TFOutput, components |> Array.map (fun x -> x.TFOutput), timeout_ms, operName)
/// <summary>
/// Returns true if queue is closed.
/// </summary><param name="handle">
/// The handle to a queue.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'QueueIsClosed'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This operation returns true if the queue is closed and false if the queue
/// is open.
/// </remarks>
member this.QueueIsClosed(handle : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.QueueIsClosed(handle.TFOutput, operName))
/// <summary>
/// Returns true if queue is closed.
/// </summary><param name="handle">
/// The handle to a queue.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'QueueIsClosedV2'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This operation returns true if the queue is closed and false if the queue
/// is open.
/// </remarks>
member this.QueueIsClosedV2(handle : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.QueueIsClosedV2(handle.TFOutput, operName))
/// <summary>
/// Computes the number of elements in the given queue.
/// </summary><param name="handle">
/// The handle to a queue.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'QueueSize'.
/// </param><returns>
/// The number of elements in the given queue.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.QueueSize(handle : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.QueueSize(handle.TFOutput, operName))
/// <summary>
/// Computes the number of elements in the given queue.
/// </summary><param name="handle">
/// The handle to a queue.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'QueueSizeV2'.
/// </param><returns>
/// The number of elements in the given queue.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.QueueSizeV2(handle : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.QueueSizeV2(handle.TFOutput, operName))
/// <summary>
/// Randomly crop <c>image</c>.
/// </summary><param name="image">
/// 3-D of shape <c>[height, width, channels]</c>.
/// </param><param name="size">
/// 1-D of length 2 containing: <c>crop_height</c>, <c>crop_width</c>..
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RandomCrop'.
/// </param><param name="seed">
/// Optional argument
/// If either seed or seed2 are set to be non-zero, the random number
/// generator is seeded by the given seed. Otherwise, it is seeded by a
/// random seed.
/// </param><param name="seed2">
/// Optional argument
/// An second seed to avoid seed collision.
/// </param><returns>
/// 3-D of shape <c>[crop_height, crop_width, channels].</c>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks><c>size</c> is a 1-D int64 tensor with 2 elements representing the crop height and
/// width. The values must be non negative.
///
/// This Op picks a random location in <c>image</c> and crops a <c>height</c> by <c>width</c>
/// rectangle from that location. The random location is picked so the cropped
/// area will fit inside the original image.
/// </remarks>
member this.RandomCrop(image : TF, size : TF, ?seed : Int64, ?seed2 : Int64, ?operName : String) =
let seed = defaultArg (seed |> Option.map Nullable) (Nullable())
let seed2 = defaultArg (seed2 |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.RandomCrop(image.TFOutput, size.TFOutput, seed, seed2, operName))
/// <summary>
/// Outputs random values from the Gamma distribution(s) described by alpha.
/// </summary><param name="shape">
/// 1-D integer tensor. Shape of independent samples to draw from each
/// distribution described by the shape parameters given in alpha.
/// </param><param name="alpha">
/// A tensor in which each scalar is a "shape" parameter describing the
/// associated gamma distribution.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RandomGamma'.
/// </param><param name="seed">
/// Optional argument
/// If either <c>seed</c> or <c>seed2</c> are set to be non-zero, the random number
/// generator is seeded by the given seed. Otherwise, it is seeded by a
/// random seed.
/// </param><param name="seed2">
/// Optional argument
/// A second seed to avoid seed collision.
/// </param><returns>
/// A tensor with shape <c>shape + shape(alpha)</c>. Each slice
/// <c>[:, ..., :, i0, i1, ...iN]</c> contains the samples drawn for
/// <c>alpha[i0, i1, ...iN]</c>. The dtype of the output matches the dtype of alpha.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This op uses the algorithm by Marsaglia et al. to acquire samples via
/// transformation-rejection from pairs of uniform and normal random variables.
/// See http://dl.acm.org/citation.cfm?id=358414
/// </remarks>
member this.RandomGamma(shape : TF, alpha : TF, ?seed : Int64, ?seed2 : Int64, ?operName : String) =
let seed = defaultArg (seed |> Option.map Nullable) (Nullable())
let seed2 = defaultArg (seed2 |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.RandomGamma(shape.TFOutput, alpha.TFOutput, seed, seed2, operName))
/// <summary>
/// Computes the derivative of a Gamma random sample w.r.t. <c>alpha</c>.
/// </summary><param name="alpha"></param><param name="sample"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RandomGammaGrad'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.RandomGammaGrad(alpha : TF, sample : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.RandomGammaGrad(alpha.TFOutput, sample.TFOutput, operName))
/// <summary>
/// Use RandomPoissonV2 instead.
/// </summary><param name="shape"></param><param name="rate"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RandomPoisson'.
/// </param><param name="seed">
/// Optional argument
/// </param><param name="seed2">
/// Optional argument
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.RandomPoisson(shape : TF, rate : TF, ?seed : Int64, ?seed2 : Int64, ?operName : String) =
let seed = defaultArg (seed |> Option.map Nullable) (Nullable())
let seed2 = defaultArg (seed2 |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.RandomPoisson(shape.TFOutput, rate.TFOutput, seed, seed2, operName))
/// <summary>
/// Outputs random values from the Poisson distribution(s) described by rate.
/// </summary><param name="shape">
/// 1-D integer tensor. Shape of independent samples to draw from each
/// distribution described by the shape parameters given in rate.
/// </param><param name="rate">
/// A tensor in which each scalar is a "rate" parameter describing the
/// associated poisson distribution.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RandomPoissonV2'.
/// </param><param name="seed">
/// Optional argument
/// If either <c>seed</c> or <c>seed2</c> are set to be non-zero, the random number
/// generator is seeded by the given seed. Otherwise, it is seeded by a
/// random seed.
/// </param><param name="seed2">
/// Optional argument
/// A second seed to avoid seed collision.
/// </param><param name="dtype">
/// Optional argument
/// </param><returns>
/// A tensor with shape <c>shape + shape(rate)</c>. Each slice
/// <c>[:, ..., :, i0, i1, ...iN]</c> contains the samples drawn for
/// <c>rate[i0, i1, ...iN]</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This op uses two algorithms, depending on rate. If rate &amp;gt;= 10, then
/// the algorithm by Hormann is used to acquire samples via
/// transformation-rejection.
/// See http://www.sciencedirect.com/science/article/pii/0167668793909974.
///
/// Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform
/// random variables.
/// See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer
/// Programming, Volume 2. Addison Wesley
/// </remarks>
member this.RandomPoissonV2(shape : TF, rate : TF, ?seed : Int64, ?seed2 : Int64, ?dtype : TFDataType, ?operName : String) =
let seed = defaultArg (seed |> Option.map Nullable) (Nullable())
let seed2 = defaultArg (seed2 |> Option.map Nullable) (Nullable())
let dtype = defaultArg (dtype |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.RandomPoissonV2(shape.TFOutput, rate.TFOutput, seed, seed2, dtype, operName))
/// <summary>
/// Randomly shuffles a tensor along its first dimension.
/// </summary><param name="value">
/// The tensor to be shuffled.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RandomShuffle'.
/// </param><param name="seed">
/// Optional argument
/// If either <c>seed</c> or <c>seed2</c> are set to be non-zero, the random number
/// generator is seeded by the given seed. Otherwise, it is seeded by a
/// random seed.
/// </param><param name="seed2">
/// Optional argument
/// A second seed to avoid seed collision.
/// </param><returns>
/// A tensor of same shape and type as <c>value</c>, shuffled along its first
/// dimension.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// The tensor is shuffled along dimension 0, such that each <c>value[j]</c> is mapped
/// to one and only one <c>output[i]</c>. For example, a mapping that might occur for a
/// 3x2 tensor is:
///
/// <code>
/// [[1, 2], [[5, 6],
/// [3, 4], ==&amp;gt; [1, 2],
/// [5, 6]] [3, 4]]
/// </code></remarks>
member this.RandomShuffle(value : TF, ?seed : Int64, ?seed2 : Int64, ?operName : String) =
let seed = defaultArg (seed |> Option.map Nullable) (Nullable())
let seed2 = defaultArg (seed2 |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.RandomShuffle(value.TFOutput, seed, seed2, operName))
/// <summary>
/// A queue that randomizes the order of elements.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RandomShuffleQueue'.
/// </param><param name="shapes">
/// Optional argument
/// The shape of each component in a value. The length of this attr must
/// be either 0 or the same as the length of component_types. If the length of
/// this attr is 0, the shapes of queue elements are not constrained, and
/// only one element may be dequeued at a time.
/// </param><param name="capacity">
/// Optional argument
/// The upper bound on the number of elements in this queue.
/// Negative numbers mean no limit.
/// </param><param name="min_after_dequeue">
/// Optional argument
/// Dequeue will block unless there would be this
/// many elements after the dequeue or the queue is closed. This
/// ensures a minimum level of mixing of elements.
/// </param><param name="seed">
/// Optional argument
/// If either seed or seed2 is set to be non-zero, the random number
/// generator is seeded by the given seed. Otherwise, a random seed is used.
/// </param><param name="seed2">
/// Optional argument
/// A second seed to avoid seed collision.
/// </param><param name="container">
/// Optional argument
/// If non-empty, this queue is placed in the given container.
/// Otherwise, a default container is used.
/// </param><param name="shared_name">
/// Optional argument
/// If non-empty, this queue will be shared under the given name
/// across multiple sessions.
/// </param><param name="component_types">
/// The type of each component in a value.
/// </param><returns>
/// The handle to the queue.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.RandomShuffleQueue(component_types : TFDataType[], ?shapes : TFShape[], ?capacity : Int64, ?min_after_dequeue : Int64, ?seed : Int64, ?seed2 : Int64, ?container : String, ?shared_name : String, ?operName : String) =
let shapes = defaultArg shapes null
let capacity = defaultArg (capacity |> Option.map Nullable) (Nullable())
let min_after_dequeue = defaultArg (min_after_dequeue |> Option.map Nullable) (Nullable())
let seed = defaultArg (seed |> Option.map Nullable) (Nullable())
let seed2 = defaultArg (seed2 |> Option.map Nullable) (Nullable())
let container = defaultArg container null
let shared_name = defaultArg shared_name null
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.RandomShuffleQueue(component_types, shapes, capacity, min_after_dequeue, seed, seed2, container, shared_name, operName))
/// <summary>
/// A queue that randomizes the order of elements.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RandomShuffleQueueV2'.
/// </param><param name="shapes">
/// Optional argument
/// The shape of each component in a value. The length of this attr must
/// be either 0 or the same as the length of component_types. If the length of
/// this attr is 0, the shapes of queue elements are not constrained, and
/// only one element may be dequeued at a time.
/// </param><param name="capacity">
/// Optional argument
/// The upper bound on the number of elements in this queue.
/// Negative numbers mean no limit.
/// </param><param name="min_after_dequeue">
/// Optional argument
/// Dequeue will block unless there would be this
/// many elements after the dequeue or the queue is closed. This
/// ensures a minimum level of mixing of elements.
/// </param><param name="seed">
/// Optional argument
/// If either seed or seed2 is set to be non-zero, the random number
/// generator is seeded by the given seed. Otherwise, a random seed is used.
/// </param><param name="seed2">
/// Optional argument
/// A second seed to avoid seed collision.
/// </param><param name="container">
/// Optional argument
/// If non-empty, this queue is placed in the given container.
/// Otherwise, a default container is used.
/// </param><param name="shared_name">
/// Optional argument
/// If non-empty, this queue will be shared under the given name
/// across multiple sessions.
/// </param><param name="component_types">
/// The type of each component in a value.
/// </param><returns>
/// The handle to the queue.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.RandomShuffleQueueV2(component_types : TFDataType[], ?shapes : TFShape[], ?capacity : Int64, ?min_after_dequeue : Int64, ?seed : Int64, ?seed2 : Int64, ?container : String, ?shared_name : String, ?operName : String) =
let shapes = defaultArg shapes null
let capacity = defaultArg (capacity |> Option.map Nullable) (Nullable())
let min_after_dequeue = defaultArg (min_after_dequeue |> Option.map Nullable) (Nullable())
let seed = defaultArg (seed |> Option.map Nullable) (Nullable())
let seed2 = defaultArg (seed2 |> Option.map Nullable) (Nullable())
let container = defaultArg container null
let shared_name = defaultArg shared_name null
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.RandomShuffleQueueV2(component_types, shapes, capacity, min_after_dequeue, seed, seed2, container, shared_name, operName))
/// <summary>
/// Outputs random values from a normal distribution.
/// </summary><param name="shape">
/// The shape of the output tensor.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RandomStandardNormal'.
/// </param><param name="seed">
/// Optional argument
/// If either <c>seed</c> or <c>seed2</c> are set to be non-zero, the random number
/// generator is seeded by the given seed. Otherwise, it is seeded by a
/// random seed.
/// </param><param name="seed2">
/// Optional argument
/// A second seed to avoid seed collision.
/// </param><param name="dtype">
/// The type of the output.
/// </param><returns>
/// A tensor of the specified shape filled with random normal values.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// The generated values will have mean 0 and standard deviation 1.
/// </remarks>
member this.RandomStandardNormal(shape : TF, dtype : TFDataType, ?seed : Int64, ?seed2 : Int64, ?operName : String) =
let seed = defaultArg (seed |> Option.map Nullable) (Nullable())
let seed2 = defaultArg (seed2 |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.RandomStandardNormal(shape.TFOutput, dtype, seed, seed2, operName))
/// <summary>
/// Outputs random values from a uniform distribution.
/// </summary><param name="shape">
/// The shape of the output tensor.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RandomUniform'.
/// </param><param name="seed">
/// Optional argument
/// If either <c>seed</c> or <c>seed2</c> are set to be non-zero, the random number
/// generator is seeded by the given seed. Otherwise, it is seeded by a
/// random seed.
/// </param><param name="seed2">
/// Optional argument
/// A second seed to avoid seed collision.
/// </param><param name="dtype">
/// The type of the output.
/// </param><returns>
/// A tensor of the specified shape filled with uniform random values.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// The generated values follow a uniform distribution in the range <c>[0, 1)</c>. The
/// lower bound 0 is included in the range, while the upper bound 1 is excluded.
/// </remarks>
member this.RandomUniform(shape : TF, dtype : TFDataType, ?seed : Int64, ?seed2 : Int64, ?operName : String) =
let seed = defaultArg (seed |> Option.map Nullable) (Nullable())
let seed2 = defaultArg (seed2 |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.RandomUniform(shape.TFOutput, dtype, seed, seed2, operName))
/// <summary>
/// Outputs random integers from a uniform distribution.
/// </summary><param name="shape">
/// The shape of the output tensor.
/// </param><param name="minval">
/// 0-D. Inclusive lower bound on the generated integers.
/// </param><param name="maxval">
/// 0-D. Exclusive upper bound on the generated integers.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RandomUniformInt'.
/// </param><param name="seed">
/// Optional argument
/// If either <c>seed</c> or <c>seed2</c> are set to be non-zero, the random number
/// generator is seeded by the given seed. Otherwise, it is seeded by a
/// random seed.
/// </param><param name="seed2">
/// Optional argument
/// A second seed to avoid seed collision.
/// </param><returns>
/// A tensor of the specified shape filled with uniform random integers.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// The generated values are uniform integers in the range <c>[minval, maxval)</c>.
/// The lower bound <c>minval</c> is included in the range, while the upper bound
/// <c>maxval</c> is excluded.
///
/// The random integers are slightly biased unless <c>maxval - minval</c> is an exact
/// power of two. The bias is small for values of <c>maxval - minval</c> significantly
/// smaller than the range of the output (either <c>2^32</c> or <c>2^64</c>).
/// </remarks>
member this.RandomUniformInt(shape : TF, minval : TF, maxval : TF, ?seed : Int64, ?seed2 : Int64, ?operName : String) =
let seed = defaultArg (seed |> Option.map Nullable) (Nullable())
let seed2 = defaultArg (seed2 |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.RandomUniformInt(shape.TFOutput, minval.TFOutput, maxval.TFOutput, seed, seed2, operName))
/// <summary>
/// Creates a sequence of numbers.
/// </summary><param name="start">
/// 0-D (scalar). First entry in the sequence.
/// </param><param name="limit">
/// 0-D (scalar). Upper limit of sequence, exclusive.
/// </param><param name="delta">
/// 0-D (scalar). Optional. Default is 1. Number that increments <c>start</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Range'.
/// </param><returns>
/// 1-D.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This operation creates a sequence of numbers that begins at <c>start</c> and
/// extends by increments of <c>delta</c> up to but not including <c>limit</c>.
///
/// For example:
///
/// <code>
/// # 'start' is 3
/// # 'limit' is 18
/// # 'delta' is 3
/// tf.range(start, limit, delta) ==&amp;gt; [3, 6, 9, 12, 15]
/// </code></remarks>
member this.Range(start : TF, limit : TF, delta : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Range(start.TFOutput, limit.TFOutput, delta.TFOutput, operName))
/// <summary>
/// Creates a dataset with a range of values. Corresponds to python's xrange.
/// </summary><param name="start">
/// corresponds to start in python's xrange().
/// </param><param name="stop">
/// corresponds to stop in python's xrange().
/// </param><param name="step">
/// corresponds to step in python's xrange().
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RangeDataset'.
/// </param><param name="output_types"></param><param name="output_shapes"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.RangeDataset(start : TF, stop : TF, step : TF, output_types : TFDataType[], output_shapes : TFShape[], ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.RangeDataset(start.TFOutput, stop.TFOutput, step.TFOutput, output_types, output_shapes, operName))
/// <summary>
/// Returns the rank of a tensor.
/// </summary><param name="input"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Rank'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This operation returns an integer representing the rank of <c>input</c>.
///
/// For example:
///
/// <code>
/// # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]
/// # shape of tensor 't' is [2, 2, 3]
/// rank(t) ==&amp;gt; 3
/// </code>
///
/// **Note**: The rank of a tensor is not the same as the rank of a matrix. The rank
/// of a tensor is the number of indices required to uniquely select each element
/// of the tensor. Rank is also known as "order", "degree", or "ndims."
/// </remarks>
member this.Rank(input : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Rank(input.TFOutput, operName))
/// <summary>
/// Returns the number of records this Reader has produced.
/// </summary><param name="reader_handle">
/// Handle to a Reader.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReaderNumRecordsProduced'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This is the same as the number of ReaderRead executions that have
/// succeeded.
/// </remarks>
member this.ReaderNumRecordsProduced(reader_handle : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ReaderNumRecordsProduced(reader_handle.TFOutput, operName))
/// <summary>
/// Returns the number of records this Reader has produced.
/// </summary><param name="reader_handle">
/// Handle to a Reader.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReaderNumRecordsProducedV2'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This is the same as the number of ReaderRead executions that have
/// succeeded.
/// </remarks>
member this.ReaderNumRecordsProducedV2(reader_handle : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ReaderNumRecordsProducedV2(reader_handle.TFOutput, operName))
/// <summary>
/// Returns the number of work units this Reader has finished processing.
/// </summary><param name="reader_handle">
/// Handle to a Reader.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReaderNumWorkUnitsCompleted'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.ReaderNumWorkUnitsCompleted(reader_handle : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ReaderNumWorkUnitsCompleted(reader_handle.TFOutput, operName))
/// <summary>
/// Returns the number of work units this Reader has finished processing.
/// </summary><param name="reader_handle">
/// Handle to a Reader.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReaderNumWorkUnitsCompletedV2'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.ReaderNumWorkUnitsCompletedV2(reader_handle : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ReaderNumWorkUnitsCompletedV2(reader_handle.TFOutput, operName))
/// <summary>
/// Returns the next record (key, value pair) produced by a Reader.
/// </summary><param name="reader_handle">
/// Handle to a Reader.
/// </param><param name="queue_handle">
/// Handle to a Queue, with string work items.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReaderRead'.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// key: A scalar.
/// value: A scalar.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// Will dequeue from the input queue if necessary (e.g. when the
/// Reader needs to start reading from a new file since it has finished
/// with the previous file).
/// </remarks>
member this.ReaderRead(reader_handle : TF, queue_handle : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.ReaderRead(reader_handle.TFOutput, queue_handle.TFOutput, operName)
/// <summary>
/// Returns up to <c>num_records</c> (key, value) pairs produced by a Reader.
/// </summary><param name="reader_handle">
/// Handle to a <c>Reader</c>.
/// </param><param name="queue_handle">
/// Handle to a <c>Queue</c>, with string work items.
/// </param><param name="num_records">
/// number of records to read from <c>Reader</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReaderReadUpTo'.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// keys: A 1-D tensor.
/// values: A 1-D tensor.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// Will dequeue from the input queue if necessary (e.g. when the
/// Reader needs to start reading from a new file since it has finished
/// with the previous file).
/// It may return less than <c>num_records</c> even before the last batch.
/// </remarks>
member this.ReaderReadUpTo(reader_handle : TF, queue_handle : TF, num_records : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.ReaderReadUpTo(reader_handle.TFOutput, queue_handle.TFOutput, num_records.TFOutput, operName)
/// <summary>
/// Returns up to <c>num_records</c> (key, value) pairs produced by a Reader.
/// </summary><param name="reader_handle">
/// Handle to a <c>Reader</c>.
/// </param><param name="queue_handle">
/// Handle to a <c>Queue</c>, with string work items.
/// </param><param name="num_records">
/// number of records to read from <c>Reader</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReaderReadUpToV2'.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// keys: A 1-D tensor.
/// values: A 1-D tensor.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// Will dequeue from the input queue if necessary (e.g. when the
/// Reader needs to start reading from a new file since it has finished
/// with the previous file).
/// It may return less than <c>num_records</c> even before the last batch.
/// </remarks>
member this.ReaderReadUpToV2(reader_handle : TF, queue_handle : TF, num_records : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.ReaderReadUpToV2(reader_handle.TFOutput, queue_handle.TFOutput, num_records.TFOutput, operName)
/// <summary>
/// Returns the next record (key, value pair) produced by a Reader.
/// </summary><param name="reader_handle">
/// Handle to a Reader.
/// </param><param name="queue_handle">
/// Handle to a Queue, with string work items.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReaderReadV2'.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// key: A scalar.
/// value: A scalar.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// Will dequeue from the input queue if necessary (e.g. when the
/// Reader needs to start reading from a new file since it has finished
/// with the previous file).
/// </remarks>
member this.ReaderReadV2(reader_handle : TF, queue_handle : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.ReaderReadV2(reader_handle.TFOutput, queue_handle.TFOutput, operName)
/// <summary>
/// Restore a Reader to its initial clean state.
/// </summary><param name="reader_handle">
/// Handle to a Reader.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReaderReset'.
/// </param><returns>
/// Returns the description of the operation
/// </returns>
member this.ReaderReset(reader_handle : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.ReaderReset(reader_handle.TFOutput, operName)
/// <summary>
/// Restore a Reader to its initial clean state.
/// </summary><param name="reader_handle">
/// Handle to a Reader.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReaderResetV2'.
/// </param><returns>
/// Returns the description of the operation
/// </returns>
member this.ReaderResetV2(reader_handle : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.ReaderResetV2(reader_handle.TFOutput, operName)
/// <summary>
/// Restore a reader to a previously saved state.
/// </summary><param name="reader_handle">
/// Handle to a Reader.
/// </param><param name="state">
/// Result of a ReaderSerializeState of a Reader with type
/// matching reader_handle.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReaderRestoreState'.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// Not all Readers support being restored, so this can produce an
/// Unimplemented error.
/// </remarks>
member this.ReaderRestoreState(reader_handle : TF, state : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.ReaderRestoreState(reader_handle.TFOutput, state.TFOutput, operName)
/// <summary>
/// Restore a reader to a previously saved state.
/// </summary><param name="reader_handle">
/// Handle to a Reader.
/// </param><param name="state">
/// Result of a ReaderSerializeState of a Reader with type
/// matching reader_handle.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReaderRestoreStateV2'.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// Not all Readers support being restored, so this can produce an
/// Unimplemented error.
/// </remarks>
member this.ReaderRestoreStateV2(reader_handle : TF, state : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.ReaderRestoreStateV2(reader_handle.TFOutput, state.TFOutput, operName)
/// <summary>
/// Produce a string tensor that encodes the state of a Reader.
/// </summary><param name="reader_handle">
/// Handle to a Reader.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReaderSerializeState'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Not all Readers support being serialized, so this can produce an
/// Unimplemented error.
/// </remarks>
member this.ReaderSerializeState(reader_handle : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ReaderSerializeState(reader_handle.TFOutput, operName))
/// <summary>
/// Produce a string tensor that encodes the state of a Reader.
/// </summary><param name="reader_handle">
/// Handle to a Reader.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReaderSerializeStateV2'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Not all Readers support being serialized, so this can produce an
/// Unimplemented error.
/// </remarks>
member this.ReaderSerializeStateV2(reader_handle : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ReaderSerializeStateV2(reader_handle.TFOutput, operName))
/// <summary>
/// Reads and outputs the entire contents of the input filename.
/// </summary><param name="filename"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReadFile'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.ReadFile(filename : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ReadFile(filename.TFOutput, operName))
/// <summary>
/// Reads the value of a variable.
/// </summary><param name="resource">
/// handle to the resource in which to store the variable.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReadVariableOp'.
/// </param><param name="dtype">
/// the dtype of the value.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// The tensor returned by this operation is immutable.
///
/// The value returned by this operation is guaranteed to be influenced by all the
/// writes on which this operation depends directly or indirectly, and to not be
/// influenced by any of the writes which depend directly or indirectly on this
/// operation.
/// </remarks>
member this.ReadVariableOp(resource : TF, dtype : TFDataType, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ReadVariableOp(resource.TFOutput, dtype, operName))
/// <summary>
/// Returns the real part of a complex number.
/// </summary><param name="input"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Real'.
/// </param><param name="Tout">
/// Optional argument
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Given a tensor <c>input</c> of complex numbers, this operation returns a tensor of
/// type <c>float</c> that is the real part of each element in <c>input</c>. All elements in
/// <c>input</c> must be complex numbers of the form \\(a + bj\\), where *a* is the real
/// part returned by this operation and *b* is the imaginary part.
///
/// For example:
///
/// <code>
/// # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
/// tf.real(input) ==&amp;gt; [-2.25, 3.25]
/// </code></remarks>
member this.Real(input : TF, ?Tout : TFDataType, ?operName : String) =
let Tout = defaultArg (Tout |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Real(input.TFOutput, Tout, operName))
/// <summary>
/// Returns x / y element-wise for real types.
/// </summary><param name="x"></param><param name="y"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RealDiv'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// If <c>x</c> and <c>y</c> are reals, this will return the floating-point division.
///
/// *NOTE*: <c>Div</c> supports broadcasting. More about broadcasting
/// [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
/// </remarks>
member this.RealDiv(x : TF, y : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.RealDiv(x.TFOutput, y.TFOutput, operName))
/// <summary>
/// Computes the reciprocal of x element-wise.
/// </summary><param name="x"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Reciprocal'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// I.e., \\(y = 1 / x\\).
/// </remarks>
member this.Reciprocal(x : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Reciprocal(x.TFOutput, operName))
/// <summary>
/// Computes the gradient for the inverse of <c>x</c> wrt its input.
/// </summary><param name="y"></param><param name="dy"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReciprocalGrad'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Specifically, <c>grad = -dy * y*y</c>, where <c>y = 1/x</c>, and <c>dy</c>
/// is the corresponding input gradient.
/// </remarks>
member this.ReciprocalGrad(y : TF, dy : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ReciprocalGrad(y.TFOutput, dy.TFOutput, operName))
/// <summary>
/// Emits randomized records.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RecordInput'.
/// </param><param name="file_random_seed">
/// Optional argument
/// Random seeds used to produce randomized records.
/// </param><param name="file_shuffle_shift_ratio">
/// Optional argument
/// Shifts the list of files after the list is randomly
/// shuffled.
/// </param><param name="file_buffer_size">
/// Optional argument
/// The randomization shuffling buffer.
/// </param><param name="file_parallelism">
/// Optional argument
/// How many sstables are opened and concurrently iterated over.
/// </param><param name="batch_size">
/// Optional argument
/// The batch size.
/// </param><param name="compression_type">
/// Optional argument
/// The type of compression for the file. Currently ZLIB and
/// GZIP are supported. Defaults to none.
/// </param><param name="file_pattern">
/// Glob pattern for the data files.
/// </param><returns>
/// A tensor of shape [batch_size].
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.RecordInput(file_pattern : String, ?file_random_seed : Int64, ?file_shuffle_shift_ratio : Single, ?file_buffer_size : Int64, ?file_parallelism : Int64, ?batch_size : Int64, ?compression_type : String, ?operName : String) =
let file_random_seed = defaultArg (file_random_seed |> Option.map Nullable) (Nullable())
let file_shuffle_shift_ratio = defaultArg (file_shuffle_shift_ratio |> Option.map Nullable) (Nullable())
let file_buffer_size = defaultArg (file_buffer_size |> Option.map Nullable) (Nullable())
let file_parallelism = defaultArg (file_parallelism |> Option.map Nullable) (Nullable())
let batch_size = defaultArg (batch_size |> Option.map Nullable) (Nullable())
let compression_type = defaultArg compression_type null
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.RecordInput(file_pattern, file_random_seed, file_shuffle_shift_ratio, file_buffer_size, file_parallelism, batch_size, compression_type, operName))
/// <summary>
/// An op that receives embedding activations on the TPU.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RecvTPUEmbeddingActivations'.
/// </param><param name="num_outputs">
/// The number of output activation tensors, equal to the number of
/// embedding tables in the model.
/// </param><param name="config">
/// Serialized TPUEmbeddingConfiguration proto.
/// </param><returns>
/// A TensorList of embedding activations containing one Tensor per
/// embedding table in the model.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// The TPU system performs the embedding lookups and aggregations specified by
/// the arguments to TPUEmbeddingEnqueue(Integer/Sparse/SparseTensor)Batch. The
/// results of these aggregations are visible to the Tensorflow Graph as the
/// outputs of a RecvTPUEmbeddingActivations op. This op returns a list containing
/// one Tensor of activations per table specified in the model. There can be at
/// most one RecvTPUEmbeddingActivations op in the TPU graph.
/// </remarks>
member this.RecvTPUEmbeddingActivations(num_outputs : Int64, config : String, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.RecvTPUEmbeddingActivations(num_outputs, config, operName) |> Array.map (fun i -> TF(this.TFGraph, i))
/// <summary>
/// Joins a string Tensor across the given dimensions.
/// </summary><param name="inputs">
/// The input to be joined. All reduced indices must have non-zero size.
/// </param><param name="reduction_indices">
/// The dimensions to reduce over. Dimensions are reduced in the
/// order specified. Omitting <c>reduction_indices</c> is equivalent to passing
/// <c>[n-1, n-2, ..., 0]</c>. Negative indices from <c>-n</c> to <c>-1</c> are supported.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReduceJoin'.
/// </param><param name="keep_dims">
/// Optional argument
/// If <c>True</c>, retain reduced dimensions with length <c>1</c>.
/// </param><param name="separator">
/// Optional argument
/// The separator to use when joining.
/// </param><returns>
/// Has shape equal to that of the input with reduced dimensions removed or
/// set to <c>1</c> depending on <c>keep_dims</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Computes the string join across dimensions in the given string Tensor of shape
/// <c>[\\(d_0, d_1, ..., d_{n-1}\\)]</c>. Returns a new Tensor created by joining the input
/// strings with the given separator (default: empty string). Negative indices are
/// counted backwards from the end, with <c>-1</c> being equivalent to <c>n - 1</c>. If
/// indices are not specified, joins across all dimensions beginning from <c>n - 1</c>
/// through <c>0</c>.
///
/// For example:
///
/// <code>
/// # tensor <c>a</c> is [["a", "b"], ["c", "d"]]
/// tf.reduce_join(a, 0) ==&amp;gt; ["ac", "bd"]
/// tf.reduce_join(a, 1) ==&amp;gt; ["ab", "cd"]
/// tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==&amp;gt; ["ac", "bd"]
/// tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==&amp;gt; ["ab", "cd"]
/// tf.reduce_join(a, 0, keep_dims=True) ==&amp;gt; [["ac", "bd"]]
/// tf.reduce_join(a, 1, keep_dims=True) ==&amp;gt; [["ab"], ["cd"]]
/// tf.reduce_join(a, 0, separator=".") ==&amp;gt; ["a.c", "b.d"]
/// tf.reduce_join(a, [0, 1]) ==&amp;gt; "acbd"
/// tf.reduce_join(a, [1, 0]) ==&amp;gt; "abcd"
/// tf.reduce_join(a, []) ==&amp;gt; [["a", "b"], ["c", "d"]]
/// tf.reduce_join(a) = tf.reduce_join(a, [1, 0]) ==&amp;gt; "abcd"
/// </code></remarks>
member this.ReduceJoin(inputs : TF, reduction_indices : TF, ?keep_dims : Boolean, ?separator : String, ?operName : String) =
let keep_dims = defaultArg (keep_dims |> Option.map Nullable) (Nullable())
let separator = defaultArg separator null
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ReduceJoin(inputs.TFOutput, reduction_indices.TFOutput, keep_dims, separator, operName))
/// <summary>
/// Creates or finds a child frame, and makes <c>data</c> available to the child frame.
/// </summary><param name="data">
/// The tensor to be made available to the child frame.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RefEnter'.
/// </param><param name="is_constant">
/// Optional argument
/// If true, the output is constant within the child frame.
/// </param><param name="parallel_iterations">
/// Optional argument
/// The number of iterations allowed to run in parallel.
/// </param><param name="frame_name">
/// The name of the child frame.
/// </param><returns>
/// The same tensor as <c>data</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// The unique <c>frame_name</c> is used by the <c>Executor</c> to identify frames. If
/// <c>is_constant</c> is true, <c>output</c> is a constant in the child frame; otherwise
/// it may be changed in the child frame. At most <c>parallel_iterations</c> iterations
/// are run in parallel in the child frame.
/// </remarks>
member this.RefEnter(data : TF, frame_name : String, ?is_constant : Boolean, ?parallel_iterations : Int64, ?operName : String) =
let is_constant = defaultArg (is_constant |> Option.map Nullable) (Nullable())
let parallel_iterations = defaultArg (parallel_iterations |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.RefEnter(data.TFOutput, frame_name, is_constant, parallel_iterations, operName))
/// <summary>
/// Exits the current frame to its parent frame.
/// </summary><param name="data">
/// The tensor to be made available to the parent frame.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RefExit'.
/// </param><returns>
/// The same tensor as <c>data</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Exit makes its input <c>data</c> available to the parent frame.
/// </remarks>
member this.RefExit(data : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.RefExit(data.TFOutput, operName))
/// <summary>
/// Return the same ref tensor as the input ref tensor.
/// </summary><param name="input"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RefIdentity'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.RefIdentity(input : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.RefIdentity(input.TFOutput, operName))
/// <summary>
/// Forwards the value of an available tensor from <c>inputs</c> to <c>output</c>.
/// </summary><param name="inputs">
/// The input tensors, exactly one of which will become available.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RefMerge'.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// output: Will be set to the available input tensor.
/// value_index: The index of the chosen input tensor in <c>inputs</c>.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks><c>Merge</c> waits for at least one of the tensors in <c>inputs</c> to become available.
/// It is usually combined with <c>Switch</c> to implement branching.
///
/// <c>Merge</c> forwards the first tensor for become available to <c>output</c>, and sets
/// <c>value_index</c> to its index in <c>inputs</c>.
/// </remarks>
member this.RefMerge(inputs : TF[], ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.RefMerge(inputs |> Array.map (fun x -> x.TFOutput), operName)
/// <summary>
/// Makes its input available to the next iteration.
/// </summary><param name="data">
/// The tensor to be made available to the next iteration.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RefNextIteration'.
/// </param><returns>
/// The same tensor as <c>data</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.RefNextIteration(data : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.RefNextIteration(data.TFOutput, operName))
/// <summary>
/// Forwards the <c>index</c>th element of <c>inputs</c> to <c>output</c>.
/// </summary><param name="index">
/// A scalar that determines the input that gets selected.
/// </param><param name="inputs">
/// A list of ref tensors, one of which will be forwarded to <c>output</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RefSelect'.
/// </param><returns>
/// The forwarded tensor.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.RefSelect(index : TF, inputs : TF[], ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.RefSelect(index.TFOutput, inputs |> Array.map (fun x -> x.TFOutput), operName))
/// <summary>
/// Forwards the ref tensor <c>data</c> to the output port determined by <c>pred</c>.
/// </summary><param name="data">
/// The ref tensor to be forwarded to the appropriate output.
/// </param><param name="pred">
/// A scalar that specifies which output port will receive data.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RefSwitch'.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// output_false: If <c>pred</c> is false, data will be forwarded to this output.
/// output_true: If <c>pred</c> is true, data will be forwarded to this output.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// If <c>pred</c> is true, the <c>data</c> input is forwarded to <c>output_true</c>. Otherwise,
/// the data goes to <c>output_false</c>.
///
/// See also <c>Switch</c> and <c>Merge</c>.
/// </remarks>
member this.RefSwitch(data : TF, pred : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.RefSwitch(data.TFOutput, pred.TFOutput, operName)
/// <summary>
/// Check if the input matches the regex pattern.
/// </summary><param name="input">
/// A string tensor of the text to be processed.
/// </param><param name="pattern">
/// A scalar string tensor containing the regular expression to match the input.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RegexFullMatch'.
/// </param><returns>
/// A bool tensor with the same shape as <c>input</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// The input is a string tensor of any shape. The pattern is a scalar
/// string tensor which is applied to every element of the input tensor.
/// The boolean values (True or False) of the output tensor indicate
/// if the input matches the regex pattern provided.
///
/// The pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)
/// </remarks>
member this.RegexFullMatch(input : TF, pattern : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.RegexFullMatch(input.TFOutput, pattern.TFOutput, operName))
/// <summary>
/// Replaces the match of pattern in input with rewrite.
/// </summary><param name="input">
/// The text to be processed.
/// </param><param name="pattern">
/// The regular expression to match the input.
/// </param><param name="rewrite">
/// The rewrite to be applied to the matched expresion.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RegexReplace'.
/// </param><param name="replace_global">
/// Optional argument
/// If True, the replacement is global, otherwise the replacement
/// is done only on the first match.
/// </param><returns>
/// The text after applying pattern and rewrite.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax)
/// </remarks>
member this.RegexReplace(input : TF, pattern : TF, rewrite : TF, ?replace_global : Boolean, ?operName : String) =
let replace_global = defaultArg (replace_global |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.RegexReplace(input.TFOutput, pattern.TFOutput, rewrite.TFOutput, replace_global, operName))
/// <summary>
/// Computes rectified linear: <c>max(features, 0)</c>.
/// </summary><param name="features"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Relu'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.Relu(features : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Relu(features.TFOutput, operName))
/// <summary>
/// Computes rectified linear 6: <c>min(max(features, 0), 6)</c>.
/// </summary><param name="features"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Relu6'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.Relu6(features : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Relu6(features.TFOutput, operName))
/// <summary>
/// Computes rectified linear 6 gradients for a Relu6 operation.
/// </summary><param name="gradients">
/// The backpropagated gradients to the corresponding Relu6 operation.
/// </param><param name="features">
/// The features passed as input to the corresponding Relu6 operation, or
/// its output; using either one produces the same result.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Relu6Grad'.
/// </param><returns>
/// The gradients:
/// <c>gradients * (features &amp;gt; 0) * (features &amp;lt; 6)</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.Relu6Grad(gradients : TF, features : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Relu6Grad(gradients.TFOutput, features.TFOutput, operName))
/// <summary>
/// Computes rectified linear gradients for a Relu operation.
/// </summary><param name="gradients">
/// The backpropagated gradients to the corresponding Relu operation.
/// </param><param name="features">
/// The features passed as input to the corresponding Relu operation, OR
/// the outputs of that operation (both work equivalently).
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ReluGrad'.
/// </param><returns><c>gradients * (features &amp;gt; 0)</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.ReluGrad(gradients : TF, features : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ReluGrad(gradients.TFOutput, features.TFOutput, operName))
/// <summary>
/// Execute a sub graph on a remote processor.
/// </summary><param name="inputs">
/// Arbitrary number of tensors with arbitrary data types
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RemoteFusedGraphExecute'.
/// </param><param name="Toutputs"></param><param name="serialized_remote_fused_graph_execute_info">
/// Serialized protocol buffer
/// of RemoteFusedGraphExecuteInfo which contains graph specifications.
/// </param><returns>
/// Arbitrary number of tensors with arbitrary data types
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// The graph specifications(such as graph itself, input tensors and output names)
/// are stored as a serialized protocol buffer of RemoteFusedGraphExecuteInfo
/// as serialized_remote_fused_graph_execute_info.
/// The specifications will be passed to a dedicated registered
/// remote fused graph executor. The executor will send the graph specifications
/// to a remote processor and execute that graph. The execution results
/// will be passed to consumer nodes as outputs of this node.
/// </remarks>
member this.RemoteFusedGraphExecute(inputs : TF[], Toutputs : TFDataType[], serialized_remote_fused_graph_execute_info : String, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.RemoteFusedGraphExecute(inputs |> Array.map (fun x -> x.TFOutput), Toutputs, serialized_remote_fused_graph_execute_info, operName) |> Array.map (fun i -> TF(this.TFGraph, i))
/// <summary>
/// Creates a dataset that emits the outputs of <c>input_dataset</c><c>count</c> times.
/// </summary><param name="input_dataset"></param><param name="count">
/// A scalar representing the number of times that <c>input_dataset</c> should
/// be repeated. A value of <c>-1</c> indicates that it should be repeated infinitely.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RepeatDataset'.
/// </param><param name="output_types"></param><param name="output_shapes"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.RepeatDataset(input_dataset : TF, count : TF, output_types : TFDataType[], output_shapes : TFShape[], ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.RepeatDataset(input_dataset.TFOutput, count.TFOutput, output_types, output_shapes, operName))
/// <summary>
/// Given a quantized tensor described by (input, input_min, input_max), outputs a
/// </summary><param name="input"></param><param name="input_min">
/// The float value that the minimum quantized input value represents.
/// </param><param name="input_max">
/// The float value that the maximum quantized input value represents.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'RequantizationRange'.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// output_min: The computed min output.
/// output_max: the computed max output.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// range that covers the actual values present in that tensor. This op is
/// typically used to produce the requested_output_min and requested_output_max for
/// Requantize.
/// </remarks>
member this.RequantizationRange(input : TF, input_min : TF, input_max : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.RequantizationRange(input.TFOutput, input_min.TFOutput, input_max.TFOutput, operName)
/// <summary>
/// Convert the quantized 'input' tensor into a lower-precision 'output', using the
/// </summary><param name="input"></param><param name="input_min">
/// The float value that the minimum quantized input value represents.
/// </param><param name="input_max">
/// The float value that the maximum quantized input value represents.
/// </param><param name="requested_output_min">
/// The float value that the minimum quantized output value represents.
/// </param><param name="requested_output_max">
/// The float value that the maximum quantized output value represents.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Requantize'.
/// </param><param name="out_type">
/// The type of the output. Should be a lower bit depth than Tinput.
/// </param><returns>
/// Returns a tuple with multiple values, as follows:
/// output:
/// output_min: The requested_output_min value is copied into this output.
/// output_max: The requested_output_max value is copied into this output.
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// output range specified with 'requested_output_min' and 'requested_output_max'.
///
/// [input_min, input_max] are scalar floats that specify the range for the float
/// interpretation of the 'input' data. For example, if input_min is -1.0f and
/// input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0
/// value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f.
/// </remarks>
member this.Requantize(input : TF, input_min : TF, input_max : TF, requested_output_min : TF, requested_output_max : TF, out_type : TFDataType, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.Requantize(input.TFOutput, input_min.TFOutput, input_max.TFOutput, requested_output_min.TFOutput, requested_output_max.TFOutput, out_type, operName)
/// <summary>
/// Reshapes a tensor.
/// </summary><param name="tensor"></param><param name="shape">
/// Defines the shape of the output tensor.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'Reshape'.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Given <c>tensor</c>, this operation returns a tensor that has the same values
/// as <c>tensor</c> with shape <c>shape</c>.
///
/// If one component of <c>shape</c> is the special value -1, the size of that dimension
/// is computed so that the total size remains constant. In particular, a <c>shape</c>
/// of <c>[-1]</c> flattens into 1-D. At most one component of <c>shape</c> can be -1.
///
/// If <c>shape</c> is 1-D or higher, then the operation returns a tensor with shape
/// <c>shape</c> filled with the values of <c>tensor</c>. In this case, the number of elements
/// implied by <c>shape</c> must be the same as the number of elements in <c>tensor</c>.
///
/// For example:
///
/// <code>
/// # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9]
/// # tensor 't' has shape [9]
/// reshape(t, [3, 3]) ==&amp;gt; [[1, 2, 3],
/// [4, 5, 6],
/// [7, 8, 9]]
///
/// # tensor 't' is [[[1, 1], [2, 2]],
/// # [[3, 3], [4, 4]]]
/// # tensor 't' has shape [2, 2, 2]
/// reshape(t, [2, 4]) ==&amp;gt; [[1, 1, 2, 2],
/// [3, 3, 4, 4]]
///
/// # tensor 't' is [[[1, 1, 1],
/// # [2, 2, 2]],
/// # [[3, 3, 3],
/// # [4, 4, 4]],
/// # [[5, 5, 5],
/// # [6, 6, 6]]]
/// # tensor 't' has shape [3, 2, 3]
/// # pass '[-1]' to flatten 't'
/// reshape(t, [-1]) ==&amp;gt; [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6]
///
/// # -1 can also be used to infer the shape
///
/// # -1 is inferred to be 9:
/// reshape(t, [2, -1]) ==&amp;gt; [[1, 1, 1, 2, 2, 2, 3, 3, 3],
/// [4, 4, 4, 5, 5, 5, 6, 6, 6]]
/// # -1 is inferred to be 2:
/// reshape(t, [-1, 9]) ==&amp;gt; [[1, 1, 1, 2, 2, 2, 3, 3, 3],
/// [4, 4, 4, 5, 5, 5, 6, 6, 6]]
/// # -1 is inferred to be 3:
/// reshape(t, [ 2, -1, 3]) ==&amp;gt; [[[1, 1, 1],
/// [2, 2, 2],
/// [3, 3, 3]],
/// [[4, 4, 4],
/// [5, 5, 5],
/// [6, 6, 6]]]
///
/// # tensor 't' is [7]
/// # shape <c>[]</c> reshapes to a scalar
/// reshape(t, []) ==&amp;gt; 7
/// </code></remarks>
member this.Reshape(tensor : TF, shape : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.Reshape(tensor.TFOutput, shape.TFOutput, operName))
/// <summary>
/// Resize <c>images</c> to <c>size</c> using area interpolation.
/// </summary><param name="images">
/// 4-D with shape <c>[batch, height, width, channels]</c>.
/// </param><param name="size">
/// = A 1-D int32 Tensor of 2 elements: <c>new_height, new_width</c>. The
/// new size for the images.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResizeArea'.
/// </param><param name="align_corners">
/// Optional argument
/// If true, the centers of the 4 corner pixels of the input and output tensors are
/// aligned, preserving the values at the corner pixels. Defaults to false.
/// </param><returns>
/// 4-D with shape
/// <c>[batch, new_height, new_width, channels]</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Input images can be of different types but output images are always float.
///
/// The range of pixel values for the output image might be slightly different
/// from the range for the input image because of limited numerical precision.
/// To guarantee an output range, for example <c>[0.0, 1.0]</c>, apply
/// <c>tf.clip_by_value</c> to the output.
///
/// Each output pixel is computed by first transforming the pixel's footprint into
/// the input tensor and then averaging the pixels that intersect the footprint. An
/// input pixel's contribution to the average is weighted by the fraction of its
/// area that intersects the footprint. This is the same as OpenCV's INTER_AREA.
/// </remarks>
member this.ResizeArea(images : TF, size : TF, ?align_corners : Boolean, ?operName : String) =
let align_corners = defaultArg (align_corners |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ResizeArea(images.TFOutput, size.TFOutput, align_corners, operName))
/// <summary>
/// Resize <c>images</c> to <c>size</c> using bicubic interpolation.
/// </summary><param name="images">
/// 4-D with shape <c>[batch, height, width, channels]</c>.
/// </param><param name="size">
/// = A 1-D int32 Tensor of 2 elements: <c>new_height, new_width</c>. The
/// new size for the images.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResizeBicubic'.
/// </param><param name="align_corners">
/// Optional argument
/// If true, the centers of the 4 corner pixels of the input and output tensors are
/// aligned, preserving the values at the corner pixels. Defaults to false.
/// </param><returns>
/// 4-D with shape
/// <c>[batch, new_height, new_width, channels]</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Input images can be of different types but output images are always float.
/// </remarks>
member this.ResizeBicubic(images : TF, size : TF, ?align_corners : Boolean, ?operName : String) =
let align_corners = defaultArg (align_corners |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ResizeBicubic(images.TFOutput, size.TFOutput, align_corners, operName))
/// <summary>
/// Computes the gradient of bicubic interpolation.
/// </summary><param name="grads">
/// 4-D with shape <c>[batch, height, width, channels]</c>.
/// </param><param name="original_image">
/// 4-D with shape <c>[batch, orig_height, orig_width, channels]</c>,
/// The image tensor that was resized.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResizeBicubicGrad'.
/// </param><param name="align_corners">
/// Optional argument
/// If true, the centers of the 4 corner pixels of the input and grad tensors are
/// aligned. Defaults to false.
/// </param><returns>
/// 4-D with shape <c>[batch, orig_height, orig_width, channels]</c>.
/// Gradients with respect to the input image. Input image must have been
/// float or double.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.ResizeBicubicGrad(grads : TF, original_image : TF, ?align_corners : Boolean, ?operName : String) =
let align_corners = defaultArg (align_corners |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ResizeBicubicGrad(grads.TFOutput, original_image.TFOutput, align_corners, operName))
/// <summary>
/// Resize <c>images</c> to <c>size</c> using bilinear interpolation.
/// </summary><param name="images">
/// 4-D with shape <c>[batch, height, width, channels]</c>.
/// </param><param name="size">
/// = A 1-D int32 Tensor of 2 elements: <c>new_height, new_width</c>. The
/// new size for the images.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResizeBilinear'.
/// </param><param name="align_corners">
/// Optional argument
/// If true, the centers of the 4 corner pixels of the input and output tensors are
/// aligned, preserving the values at the corner pixels. Defaults to false.
/// </param><returns>
/// 4-D with shape
/// <c>[batch, new_height, new_width, channels]</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Input images can be of different types but output images are always float.
/// </remarks>
member this.ResizeBilinear(images : TF, size : TF, ?align_corners : Boolean, ?operName : String) =
let align_corners = defaultArg (align_corners |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ResizeBilinear(images.TFOutput, size.TFOutput, align_corners, operName))
/// <summary>
/// Computes the gradient of bilinear interpolation.
/// </summary><param name="grads">
/// 4-D with shape <c>[batch, height, width, channels]</c>.
/// </param><param name="original_image">
/// 4-D with shape <c>[batch, orig_height, orig_width, channels]</c>,
/// The image tensor that was resized.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResizeBilinearGrad'.
/// </param><param name="align_corners">
/// Optional argument
/// If true, the centers of the 4 corner pixels of the input and grad tensors are
/// aligned. Defaults to false.
/// </param><returns>
/// 4-D with shape <c>[batch, orig_height, orig_width, channels]</c>.
/// Gradients with respect to the input image. Input image must have been
/// float or double.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.ResizeBilinearGrad(grads : TF, original_image : TF, ?align_corners : Boolean, ?operName : String) =
let align_corners = defaultArg (align_corners |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ResizeBilinearGrad(grads.TFOutput, original_image.TFOutput, align_corners, operName))
/// <summary>
/// Resize <c>images</c> to <c>size</c> using nearest neighbor interpolation.
/// </summary><param name="images">
/// 4-D with shape <c>[batch, height, width, channels]</c>.
/// </param><param name="size">
/// = A 1-D int32 Tensor of 2 elements: <c>new_height, new_width</c>. The
/// new size for the images.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResizeNearestNeighbor'.
/// </param><param name="align_corners">
/// Optional argument
/// If true, the centers of the 4 corner pixels of the input and output tensors are
/// aligned, preserving the values at the corner pixels. Defaults to false.
/// </param><returns>
/// 4-D with shape
/// <c>[batch, new_height, new_width, channels]</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.ResizeNearestNeighbor(images : TF, size : TF, ?align_corners : Boolean, ?operName : String) =
let align_corners = defaultArg (align_corners |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ResizeNearestNeighbor(images.TFOutput, size.TFOutput, align_corners, operName))
/// <summary>
/// Computes the gradient of nearest neighbor interpolation.
/// </summary><param name="grads">
/// 4-D with shape <c>[batch, height, width, channels]</c>.
/// </param><param name="size">
/// = A 1-D int32 Tensor of 2 elements: <c>orig_height, orig_width</c>. The
/// original input size.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResizeNearestNeighborGrad'.
/// </param><param name="align_corners">
/// Optional argument
/// If true, the centers of the 4 corner pixels of the input and grad tensors are
/// aligned. Defaults to false.
/// </param><returns>
/// 4-D with shape <c>[batch, orig_height, orig_width, channels]</c>. Gradients
/// with respect to the input image.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.ResizeNearestNeighborGrad(grads : TF, size : TF, ?align_corners : Boolean, ?operName : String) =
let align_corners = defaultArg (align_corners |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ResizeNearestNeighborGrad(grads.TFOutput, size.TFOutput, align_corners, operName))
/// <summary>
/// Update '*var' according to the adadelta scheme.
/// </summary><param name="var">
/// Should be from a Variable().
/// </param><param name="accum">
/// Should be from a Variable().
/// </param><param name="accum_update">
/// Should be from a Variable().
/// </param><param name="lr">
/// Scaling factor. Must be a scalar.
/// </param><param name="rho">
/// Decay factor. Must be a scalar.
/// </param><param name="epsilon">
/// Constant factor. Must be a scalar.
/// </param><param name="grad">
/// The gradient.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceApplyAdadelta'.
/// </param><param name="use_locking">
/// Optional argument
/// If True, updating of the var, accum and update_accum tensors will be protected by
/// a lock; otherwise the behavior is undefined, but may exhibit less contention.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// accum = rho() * accum + (1 - rho()) * grad.square();
/// update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad;
/// update_accum = rho() * update_accum + (1 - rho()) * update.square();
/// var -= update;
/// </remarks>
member this.ResourceApplyAdadelta(var : TF, accum : TF, accum_update : TF, lr : TF, rho : TF, epsilon : TF, grad : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.ResourceApplyAdadelta(var.TFOutput, accum.TFOutput, accum_update.TFOutput, lr.TFOutput, rho.TFOutput, epsilon.TFOutput, grad.TFOutput, use_locking, operName)
/// <summary>
/// Update '*var' according to the adagrad scheme.
/// </summary><param name="var">
/// Should be from a Variable().
/// </param><param name="accum">
/// Should be from a Variable().
/// </param><param name="lr">
/// Scaling factor. Must be a scalar.
/// </param><param name="grad">
/// The gradient.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceApplyAdagrad'.
/// </param><param name="use_locking">
/// Optional argument
/// If <c>True</c>, updating of the var and accum tensors will be protected
/// by a lock; otherwise the behavior is undefined, but may exhibit less
/// contention.
/// </param><param name="update_slots">
/// Optional argument
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// accum += grad * grad
/// var -= lr * grad * (1 / sqrt(accum))
/// </remarks>
member this.ResourceApplyAdagrad(var : TF, accum : TF, lr : TF, grad : TF, ?use_locking : Boolean, ?update_slots : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let update_slots = defaultArg (update_slots |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.ResourceApplyAdagrad(var.TFOutput, accum.TFOutput, lr.TFOutput, grad.TFOutput, use_locking, update_slots, operName)
/// <summary>
/// Update '*var' according to the proximal adagrad scheme.
/// </summary><param name="var">
/// Should be from a Variable().
/// </param><param name="gradient_accumulator">
/// Should be from a Variable().
/// </param><param name="gradient_squared_accumulator">
/// Should be from a Variable().
/// </param><param name="grad">
/// The gradient.
/// </param><param name="lr">
/// Scaling factor. Must be a scalar.
/// </param><param name="l1">
/// L1 regularization. Must be a scalar.
/// </param><param name="l2">
/// L2 regularization. Must be a scalar.
/// </param><param name="global_step">
/// Training step number. Must be a scalar.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceApplyAdagradDA'.
/// </param><param name="use_locking">
/// Optional argument
/// If True, updating of the var and accum tensors will be protected by
/// a lock; otherwise the behavior is undefined, but may exhibit less contention.
/// </param><returns>
/// Returns the description of the operation
/// </returns>
member this.ResourceApplyAdagradDA(var : TF, gradient_accumulator : TF, gradient_squared_accumulator : TF, grad : TF, lr : TF, l1 : TF, l2 : TF, global_step : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.ResourceApplyAdagradDA(var.TFOutput, gradient_accumulator.TFOutput, gradient_squared_accumulator.TFOutput, grad.TFOutput, lr.TFOutput, l1.TFOutput, l2.TFOutput, global_step.TFOutput, use_locking, operName)
/// <summary>
/// Update '*var' according to the Adam algorithm.
/// </summary><param name="var">
/// Should be from a Variable().
/// </param><param name="m">
/// Should be from a Variable().
/// </param><param name="v">
/// Should be from a Variable().
/// </param><param name="beta1_power">
/// Must be a scalar.
/// </param><param name="beta2_power">
/// Must be a scalar.
/// </param><param name="lr">
/// Scaling factor. Must be a scalar.
/// </param><param name="beta1">
/// Momentum factor. Must be a scalar.
/// </param><param name="beta2">
/// Momentum factor. Must be a scalar.
/// </param><param name="epsilon">
/// Ridge term. Must be a scalar.
/// </param><param name="grad">
/// The gradient.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceApplyAdam'.
/// </param><param name="use_locking">
/// Optional argument
/// If <c>True</c>, updating of the var, m, and v tensors will be protected
/// by a lock; otherwise the behavior is undefined, but may exhibit less
/// contention.
/// </param><param name="use_nesterov">
/// Optional argument
/// If <c>True</c>, uses the nesterov update.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// $$lr_t := \text{learning\_rate} * \sqrt{1 - beta_2^t} / (1 - beta_1^t)$$
/// $$m_t := beta_1 * m_{t-1} + (1 - beta_1) * g$$
/// $$v_t := beta_2 * v_{t-1} + (1 - beta_2) * g * g$$
/// $$variable := variable - lr_t * m_t / (\sqrt{v_t} + \epsilon)$$
/// </remarks>
member this.ResourceApplyAdam(var : TF, m : TF, v : TF, beta1_power : TF, beta2_power : TF, lr : TF, beta1 : TF, beta2 : TF, epsilon : TF, grad : TF, ?use_locking : Boolean, ?use_nesterov : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let use_nesterov = defaultArg (use_nesterov |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.ResourceApplyAdam(var.TFOutput, m.TFOutput, v.TFOutput, beta1_power.TFOutput, beta2_power.TFOutput, lr.TFOutput, beta1.TFOutput, beta2.TFOutput, epsilon.TFOutput, grad.TFOutput, use_locking, use_nesterov, operName)
/// <summary>
/// Update '*var' according to the AdaMax algorithm.
/// </summary><param name="var">
/// Should be from a Variable().
/// </param><param name="m">
/// Should be from a Variable().
/// </param><param name="v">
/// Should be from a Variable().
/// </param><param name="beta1_power">
/// Must be a scalar.
/// </param><param name="lr">
/// Scaling factor. Must be a scalar.
/// </param><param name="beta1">
/// Momentum factor. Must be a scalar.
/// </param><param name="beta2">
/// Momentum factor. Must be a scalar.
/// </param><param name="epsilon">
/// Ridge term. Must be a scalar.
/// </param><param name="grad">
/// The gradient.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceApplyAdaMax'.
/// </param><param name="use_locking">
/// Optional argument
/// If <c>True</c>, updating of the var, m, and v tensors will be protected
/// by a lock; otherwise the behavior is undefined, but may exhibit less
/// contention.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// m_t &amp;lt;- beta1 * m_{t-1} + (1 - beta1) * g
/// v_t &amp;lt;- max(beta2 * v_{t-1}, abs(g))
/// variable &amp;lt;- variable - learning_rate / (1 - beta1^t) * m_t / (v_t + epsilon)
/// </remarks>
member this.ResourceApplyAdaMax(var : TF, m : TF, v : TF, beta1_power : TF, lr : TF, beta1 : TF, beta2 : TF, epsilon : TF, grad : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.ResourceApplyAdaMax(var.TFOutput, m.TFOutput, v.TFOutput, beta1_power.TFOutput, lr.TFOutput, beta1.TFOutput, beta2.TFOutput, epsilon.TFOutput, grad.TFOutput, use_locking, operName)
/// <summary>
/// Update '*var' according to the AddSign update.
/// </summary><param name="var">
/// Should be from a Variable().
/// </param><param name="m">
/// Should be from a Variable().
/// </param><param name="lr">
/// Scaling factor. Must be a scalar.
/// </param><param name="alpha">
/// Must be a scalar.
/// </param><param name="sign_decay">
/// Must be a scalar.
/// </param><param name="beta">
/// Must be a scalar.
/// </param><param name="grad">
/// The gradient.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceApplyAddSign'.
/// </param><param name="use_locking">
/// Optional argument
/// If <c>True</c>, updating of the var and m tensors is
/// protected by a lock; otherwise the behavior is undefined, but may exhibit less
/// contention.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// m_t &amp;lt;- beta1 * m_{t-1} + (1 - beta1) * g
/// update &amp;lt;- (alpha + sign_decay * sign(g) *sign(m)) * g
/// variable &amp;lt;- variable - lr_t * update
/// </remarks>
member this.ResourceApplyAddSign(var : TF, m : TF, lr : TF, alpha : TF, sign_decay : TF, beta : TF, grad : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.ResourceApplyAddSign(var.TFOutput, m.TFOutput, lr.TFOutput, alpha.TFOutput, sign_decay.TFOutput, beta.TFOutput, grad.TFOutput, use_locking, operName)
/// <summary>
/// Update '*var' according to the centered RMSProp algorithm.
/// </summary><param name="var">
/// Should be from a Variable().
/// </param><param name="mg">
/// Should be from a Variable().
/// </param><param name="ms">
/// Should be from a Variable().
/// </param><param name="mom">
/// Should be from a Variable().
/// </param><param name="lr">
/// Scaling factor. Must be a scalar.
/// </param><param name="rho">
/// Decay rate. Must be a scalar.
/// </param><param name="momentum"></param><param name="epsilon">
/// Ridge term. Must be a scalar.
/// </param><param name="grad">
/// The gradient.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceApplyCenteredRMSProp'.
/// </param><param name="use_locking">
/// Optional argument
/// If <c>True</c>, updating of the var, mg, ms, and mom tensors is
/// protected by a lock; otherwise the behavior is undefined, but may exhibit less
/// contention.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// The centered RMSProp algorithm uses an estimate of the centered second moment
/// (i.e., the variance) for normalization, as opposed to regular RMSProp, which
/// uses the (uncentered) second moment. This often helps with training, but is
/// slightly more expensive in terms of computation and memory.
///
/// Note that in dense implementation of this algorithm, mg, ms, and mom will
/// update even if the grad is zero, but in this sparse implementation, mg, ms,
/// and mom will not update in iterations during which the grad is zero.
///
/// mean_square = decay * mean_square + (1-decay) * gradient ** 2
/// mean_grad = decay * mean_grad + (1-decay) * gradient
///
/// Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
///
/// mg &amp;lt;- rho * mg_{t-1} + (1-rho) * grad
/// ms &amp;lt;- rho * ms_{t-1} + (1-rho) * grad * grad
/// mom &amp;lt;- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon)
/// var &amp;lt;- var - mom
/// </remarks>
member this.ResourceApplyCenteredRMSProp(var : TF, mg : TF, ms : TF, mom : TF, lr : TF, rho : TF, momentum : TF, epsilon : TF, grad : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.ResourceApplyCenteredRMSProp(var.TFOutput, mg.TFOutput, ms.TFOutput, mom.TFOutput, lr.TFOutput, rho.TFOutput, momentum.TFOutput, epsilon.TFOutput, grad.TFOutput, use_locking, operName)
/// <summary>
/// Update '*var' according to the Ftrl-proximal scheme.
/// </summary><param name="var">
/// Should be from a Variable().
/// </param><param name="accum">
/// Should be from a Variable().
/// </param><param name="linear">
/// Should be from a Variable().
/// </param><param name="grad">
/// The gradient.
/// </param><param name="lr">
/// Scaling factor. Must be a scalar.
/// </param><param name="l1">
/// L1 regulariation. Must be a scalar.
/// </param><param name="l2">
/// L2 regulariation. Must be a scalar.
/// </param><param name="lr_power">
/// Scaling factor. Must be a scalar.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceApplyFtrl'.
/// </param><param name="use_locking">
/// Optional argument
/// If <c>True</c>, updating of the var and accum tensors will be protected
/// by a lock; otherwise the behavior is undefined, but may exhibit less
/// contention.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// accum_new = accum + grad * grad
/// linear += grad - (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
/// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
/// var = (sign(linear) * l1 - linear) / quadratic if |linear| &amp;gt; l1 else 0.0
/// accum = accum_new
/// </remarks>
member this.ResourceApplyFtrl(var : TF, accum : TF, linear : TF, grad : TF, lr : TF, l1 : TF, l2 : TF, lr_power : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.ResourceApplyFtrl(var.TFOutput, accum.TFOutput, linear.TFOutput, grad.TFOutput, lr.TFOutput, l1.TFOutput, l2.TFOutput, lr_power.TFOutput, use_locking, operName)
/// <summary>
/// Update '*var' according to the Ftrl-proximal scheme.
/// </summary><param name="var">
/// Should be from a Variable().
/// </param><param name="accum">
/// Should be from a Variable().
/// </param><param name="linear">
/// Should be from a Variable().
/// </param><param name="grad">
/// The gradient.
/// </param><param name="lr">
/// Scaling factor. Must be a scalar.
/// </param><param name="l1">
/// L1 regulariation. Must be a scalar.
/// </param><param name="l2">
/// L2 shrinkage regulariation. Must be a scalar.
/// </param><param name="l2_shrinkage"></param><param name="lr_power">
/// Scaling factor. Must be a scalar.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceApplyFtrlV2'.
/// </param><param name="use_locking">
/// Optional argument
/// If <c>True</c>, updating of the var and accum tensors will be protected
/// by a lock; otherwise the behavior is undefined, but may exhibit less
/// contention.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// grad_with_shrinkage = grad + 2 * l2_shrinkage * var
/// accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
/// linear += grad_with_shrinkage +
/// (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
/// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
/// var = (sign(linear) * l1 - linear) / quadratic if |linear| &amp;gt; l1 else 0.0
/// accum = accum_new
/// </remarks>
member this.ResourceApplyFtrlV2(var : TF, accum : TF, linear : TF, grad : TF, lr : TF, l1 : TF, l2 : TF, l2_shrinkage : TF, lr_power : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.ResourceApplyFtrlV2(var.TFOutput, accum.TFOutput, linear.TFOutput, grad.TFOutput, lr.TFOutput, l1.TFOutput, l2.TFOutput, l2_shrinkage.TFOutput, lr_power.TFOutput, use_locking, operName)
/// <summary>
/// Update '*var' by subtracting 'alpha' * 'delta' from it.
/// </summary><param name="var">
/// Should be from a Variable().
/// </param><param name="alpha">
/// Scaling factor. Must be a scalar.
/// </param><param name="delta">
/// The change.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceApplyGradientDescent'.
/// </param><param name="use_locking">
/// Optional argument
/// If <c>True</c>, the subtraction will be protected by a lock;
/// otherwise the behavior is undefined, but may exhibit less contention.
/// </param><returns>
/// Returns the description of the operation
/// </returns>
member this.ResourceApplyGradientDescent(var : TF, alpha : TF, delta : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.ResourceApplyGradientDescent(var.TFOutput, alpha.TFOutput, delta.TFOutput, use_locking, operName)
/// <summary>
/// Update '*var' according to the momentum scheme. Set use_nesterov = True if you
/// </summary><param name="var">
/// Should be from a Variable().
/// </param><param name="accum">
/// Should be from a Variable().
/// </param><param name="lr">
/// Scaling factor. Must be a scalar.
/// </param><param name="grad">
/// The gradient.
/// </param><param name="momentum">
/// Momentum. Must be a scalar.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceApplyMomentum'.
/// </param><param name="use_locking">
/// Optional argument
/// If <c>True</c>, updating of the var and accum tensors will be protected
/// by a lock; otherwise the behavior is undefined, but may exhibit less
/// contention.
/// </param><param name="use_nesterov">
/// Optional argument
/// If <c>True</c>, the tensor passed to compute grad will be
/// var - lr * momentum * accum, so in the end, the var you get is actually
/// var - lr * momentum * accum.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// want to use Nesterov momentum.
///
/// accum = accum * momentum + grad
/// var -= lr * accum
/// </remarks>
member this.ResourceApplyMomentum(var : TF, accum : TF, lr : TF, grad : TF, momentum : TF, ?use_locking : Boolean, ?use_nesterov : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let use_nesterov = defaultArg (use_nesterov |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.ResourceApplyMomentum(var.TFOutput, accum.TFOutput, lr.TFOutput, grad.TFOutput, momentum.TFOutput, use_locking, use_nesterov, operName)
/// <summary>
/// Update '*var' according to the AddSign update.
/// </summary><param name="var">
/// Should be from a Variable().
/// </param><param name="m">
/// Should be from a Variable().
/// </param><param name="lr">
/// Scaling factor. Must be a scalar.
/// </param><param name="logbase">
/// Must be a scalar.
/// </param><param name="sign_decay">
/// Must be a scalar.
/// </param><param name="beta">
/// Must be a scalar.
/// </param><param name="grad">
/// The gradient.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceApplyPowerSign'.
/// </param><param name="use_locking">
/// Optional argument
/// If <c>True</c>, updating of the var and m tensors is
/// protected by a lock; otherwise the behavior is undefined, but may exhibit less
/// contention.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// m_t &amp;lt;- beta1 * m_{t-1} + (1 - beta1) * g
/// update &amp;lt;- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g
/// variable &amp;lt;- variable - lr_t * update
/// </remarks>
member this.ResourceApplyPowerSign(var : TF, m : TF, lr : TF, logbase : TF, sign_decay : TF, beta : TF, grad : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.ResourceApplyPowerSign(var.TFOutput, m.TFOutput, lr.TFOutput, logbase.TFOutput, sign_decay.TFOutput, beta.TFOutput, grad.TFOutput, use_locking, operName)
/// <summary>
/// Update '*var' and '*accum' according to FOBOS with Adagrad learning rate.
/// </summary><param name="var">
/// Should be from a Variable().
/// </param><param name="accum">
/// Should be from a Variable().
/// </param><param name="lr">
/// Scaling factor. Must be a scalar.
/// </param><param name="l1">
/// L1 regularization. Must be a scalar.
/// </param><param name="l2">
/// L2 regularization. Must be a scalar.
/// </param><param name="grad">
/// The gradient.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceApplyProximalAdagrad'.
/// </param><param name="use_locking">
/// Optional argument
/// If True, updating of the var and accum tensors will be protected by
/// a lock; otherwise the behavior is undefined, but may exhibit less contention.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// accum += grad * grad
/// prox_v = var - lr * grad * (1 / sqrt(accum))
/// var = sign(prox_v)/(1+lr*l2) * max{|prox_v|-lr*l1,0}
/// </remarks>
member this.ResourceApplyProximalAdagrad(var : TF, accum : TF, lr : TF, l1 : TF, l2 : TF, grad : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.ResourceApplyProximalAdagrad(var.TFOutput, accum.TFOutput, lr.TFOutput, l1.TFOutput, l2.TFOutput, grad.TFOutput, use_locking, operName)
/// <summary>
/// Update '*var' as FOBOS algorithm with fixed learning rate.
/// </summary><param name="var">
/// Should be from a Variable().
/// </param><param name="alpha">
/// Scaling factor. Must be a scalar.
/// </param><param name="l1">
/// L1 regularization. Must be a scalar.
/// </param><param name="l2">
/// L2 regularization. Must be a scalar.
/// </param><param name="delta">
/// The change.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceApplyProximalGradientDescent'.
/// </param><param name="use_locking">
/// Optional argument
/// If True, the subtraction will be protected by a lock;
/// otherwise the behavior is undefined, but may exhibit less contention.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// prox_v = var - alpha * delta
/// var = sign(prox_v)/(1+alpha*l2) * max{|prox_v|-alpha*l1,0}
/// </remarks>
member this.ResourceApplyProximalGradientDescent(var : TF, alpha : TF, l1 : TF, l2 : TF, delta : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.ResourceApplyProximalGradientDescent(var.TFOutput, alpha.TFOutput, l1.TFOutput, l2.TFOutput, delta.TFOutput, use_locking, operName)
/// <summary>
/// Update '*var' according to the RMSProp algorithm.
/// </summary><param name="var">
/// Should be from a Variable().
/// </param><param name="ms">
/// Should be from a Variable().
/// </param><param name="mom">
/// Should be from a Variable().
/// </param><param name="lr">
/// Scaling factor. Must be a scalar.
/// </param><param name="rho">
/// Decay rate. Must be a scalar.
/// </param><param name="momentum"></param><param name="epsilon">
/// Ridge term. Must be a scalar.
/// </param><param name="grad">
/// The gradient.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceApplyRMSProp'.
/// </param><param name="use_locking">
/// Optional argument
/// If <c>True</c>, updating of the var, ms, and mom tensors is protected
/// by a lock; otherwise the behavior is undefined, but may exhibit less
/// contention.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// Note that in dense implementation of this algorithm, ms and mom will
/// update even if the grad is zero, but in this sparse implementation, ms
/// and mom will not update in iterations during which the grad is zero.
///
/// mean_square = decay * mean_square + (1-decay) * gradient ** 2
/// Delta = learning_rate * gradient / sqrt(mean_square + epsilon)
///
/// ms &amp;lt;- rho * ms_{t-1} + (1-rho) * grad * grad
/// mom &amp;lt;- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
/// var &amp;lt;- var - mom
/// </remarks>
member this.ResourceApplyRMSProp(var : TF, ms : TF, mom : TF, lr : TF, rho : TF, momentum : TF, epsilon : TF, grad : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.ResourceApplyRMSProp(var.TFOutput, ms.TFOutput, mom.TFOutput, lr.TFOutput, rho.TFOutput, momentum.TFOutput, epsilon.TFOutput, grad.TFOutput, use_locking, operName)
/// <summary>
/// Increments variable pointed to by 'resource' until it reaches 'limit'.
/// </summary><param name="resource">
/// Should be from a scalar <c>Variable</c> node.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceCountUpTo'.
/// </param><param name="limit">
/// If incrementing ref would bring it above limit, instead generates an
/// 'OutOfRange' error.
/// </param><param name="T"></param><returns>
/// A copy of the input before increment. If nothing else modifies the
/// input, the values produced will all be distinct.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.ResourceCountUpTo(resource : TF, limit : Int64, T : TFDataType, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ResourceCountUpTo(resource.TFOutput, limit, T, operName))
/// <summary>
/// Gather slices from the variable pointed to by <c>resource</c> according to <c>indices</c>.
/// </summary><param name="resource"></param><param name="indices"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceGather'.
/// </param><param name="validate_indices">
/// Optional argument
/// </param><param name="dtype"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks><c>indices</c> must be an integer tensor of any dimension (usually 0-D or 1-D).
/// Produces an output tensor with shape <c>indices.shape + params.shape[1:]</c> where:
///
/// <code>
/// # Scalar indices
/// output[:, ..., :] = params[indices, :, ... :]
///
/// # Vector indices
/// output[i, :, ..., :] = params[indices[i], :, ... :]
///
/// # Higher rank indices
/// output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :]
/// </code></remarks>
member this.ResourceGather(resource : TF, indices : TF, dtype : TFDataType, ?validate_indices : Boolean, ?operName : String) =
let validate_indices = defaultArg (validate_indices |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.ResourceGather(resource.TFOutput, indices.TFOutput, dtype, validate_indices, operName))
/// <summary>
/// Adds sparse updates to the variable referenced by <c>resource</c>.
/// </summary><param name="resource">
/// Should be from a <c>Variable</c> node.
/// </param><param name="indices">
/// A tensor of indices into the first dimension of <c>ref</c>.
/// </param><param name="updates">
/// A tensor of updated values to add to <c>ref</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceScatterAdd'.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// This operation computes
///
/// # Scalar indices
/// ref[indices, ...] += updates[...]
///
/// # Vector indices (for each i)
/// ref[indices[i], ...] += updates[i, ...]
///
/// # High rank indices (for each i, ..., j)
/// ref[indices[i, ..., j], ...] += updates[i, ..., j, ...]
///
/// Duplicate entries are handled correctly: if multiple <c>indices</c> reference
/// the same location, their contributions add.
///
/// Requires <c>updates.shape = indices.shape + ref.shape[1:]</c> or <c>updates.shape = []</c>.
///
/// &amp;lt;div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"&amp;gt;
/// &amp;lt;img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt&amp;gt;
/// &amp;lt;/div&amp;gt;
/// </remarks>
member this.ResourceScatterAdd(resource : TF, indices : TF, updates : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.ResourceScatterAdd(resource.TFOutput, indices.TFOutput, updates.TFOutput, operName)
/// <summary>
/// Divides sparse updates into the variable referenced by <c>resource</c>.
/// </summary><param name="resource">
/// Should be from a <c>Variable</c> node.
/// </param><param name="indices">
/// A tensor of indices into the first dimension of <c>ref</c>.
/// </param><param name="updates">
/// A tensor of updated values to add to <c>ref</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceScatterDiv'.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// This operation computes
///
/// # Scalar indices
/// ref[indices, ...] /= updates[...]
///
/// # Vector indices (for each i)
/// ref[indices[i], ...] /= updates[i, ...]
///
/// # High rank indices (for each i, ..., j)
/// ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...]
///
/// Duplicate entries are handled correctly: if multiple <c>indices</c> reference
/// the same location, their contributions multiply.
///
/// Requires <c>updates.shape = indices.shape + ref.shape[1:]</c> or <c>updates.shape = []</c>.
///
/// &amp;lt;div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"&amp;gt;
/// &amp;lt;img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt&amp;gt;
/// &amp;lt;/div&amp;gt;
/// </remarks>
member this.ResourceScatterDiv(resource : TF, indices : TF, updates : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.ResourceScatterDiv(resource.TFOutput, indices.TFOutput, updates.TFOutput, operName)
/// <summary>
/// Reduces sparse updates into the variable referenced by <c>resource</c> using the <c>max</c> operation.
/// </summary><param name="resource">
/// Should be from a <c>Variable</c> node.
/// </param><param name="indices">
/// A tensor of indices into the first dimension of <c>ref</c>.
/// </param><param name="updates">
/// A tensor of updated values to add to <c>ref</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceScatterMax'.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// This operation computes
///
/// # Scalar indices
/// ref[indices, ...] = max(ref[indices, ...], updates[...])
///
/// # Vector indices (for each i)
/// ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...])
///
/// # High rank indices (for each i, ..., j)
/// ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...])
///
/// Duplicate entries are handled correctly: if multiple <c>indices</c> reference
/// the same location, their contributions are combined.
///
/// Requires <c>updates.shape = indices.shape + ref.shape[1:]</c> or <c>updates.shape = []</c>.
///
/// &amp;lt;div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"&amp;gt;
/// &amp;lt;img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt&amp;gt;
/// &amp;lt;/div&amp;gt;
/// </remarks>
member this.ResourceScatterMax(resource : TF, indices : TF, updates : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.ResourceScatterMax(resource.TFOutput, indices.TFOutput, updates.TFOutput, operName)
/// <summary>
/// Reduces sparse updates into the variable referenced by <c>resource</c> using the <c>min</c> operation.
/// </summary><param name="resource">
/// Should be from a <c>Variable</c> node.
/// </param><param name="indices">
/// A tensor of indices into the first dimension of <c>ref</c>.
/// </param><param name="updates">
/// A tensor of updated values to add to <c>ref</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceScatterMin'.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// This operation computes
///
/// # Scalar indices
/// ref[indices, ...] = min(ref[indices, ...], updates[...])
///
/// # Vector indices (for each i)
/// ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...])
///
/// # High rank indices (for each i, ..., j)
/// ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...])
///
/// Duplicate entries are handled correctly: if multiple <c>indices</c> reference
/// the same location, their contributions are combined.
///
/// Requires <c>updates.shape = indices.shape + ref.shape[1:]</c> or <c>updates.shape = []</c>.
///
/// &amp;lt;div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"&amp;gt;
/// &amp;lt;img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt&amp;gt;
/// &amp;lt;/div&amp;gt;
/// </remarks>
member this.ResourceScatterMin(resource : TF, indices : TF, updates : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.ResourceScatterMin(resource.TFOutput, indices.TFOutput, updates.TFOutput, operName)
/// <summary>
/// Multiplies sparse updates into the variable referenced by <c>resource</c>.
/// </summary><param name="resource">
/// Should be from a <c>Variable</c> node.
/// </param><param name="indices">
/// A tensor of indices into the first dimension of <c>ref</c>.
/// </param><param name="updates">
/// A tensor of updated values to add to <c>ref</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceScatterMul'.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// This operation computes
///
/// # Scalar indices
/// ref[indices, ...] *= updates[...]
///
/// # Vector indices (for each i)
/// ref[indices[i], ...] *= updates[i, ...]
///
/// # High rank indices (for each i, ..., j)
/// ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...]
///
/// Duplicate entries are handled correctly: if multiple <c>indices</c> reference
/// the same location, their contributions multiply.
///
/// Requires <c>updates.shape = indices.shape + ref.shape[1:]</c> or <c>updates.shape = []</c>.
///
/// &amp;lt;div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"&amp;gt;
/// &amp;lt;img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt&amp;gt;
/// &amp;lt;/div&amp;gt;
/// </remarks>
member this.ResourceScatterMul(resource : TF, indices : TF, updates : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.ResourceScatterMul(resource.TFOutput, indices.TFOutput, updates.TFOutput, operName)
/// <summary>
/// Adds sparse <c>updates</c> to individual values or slices within a given
/// </summary><param name="reference">
/// A resource handle. Must be from a VarHandleOp.
/// </param><param name="indices">
/// A Tensor. Must be one of the following types: int32, int64.
/// A tensor of indices into ref.
/// </param><param name="updates">
/// A Tensor. Must have the same type as ref. A tensor of
/// values to add to ref.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceScatterNdAdd'.
/// </param><param name="use_locking">
/// Optional argument
/// An optional bool. Defaults to True. If True, the assignment will
/// be protected by a lock; otherwise the behavior is undefined,
/// but may exhibit less contention.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// variable according to <c>indices</c>.
///
/// <c>ref</c> is a <c>Tensor</c> with rank <c>P</c> and <c>indices</c> is a <c>Tensor</c> of rank <c>Q</c>.
///
/// <c>indices</c> must be integer tensor, containing indices into <c>ref</c>.
/// It must be shape <c>[d_0, ..., d_{Q-2}, K]</c> where <c>0 &amp;lt; K &amp;lt;= P</c>.
///
/// The innermost dimension of <c>indices</c> (with length <c>K</c>) corresponds to
/// indices into elements (if <c>K = P</c>) or slices (if <c>K &amp;lt; P</c>) along the <c>K</c>th
/// dimension of <c>ref</c>.
///
/// <c>updates</c> is <c>Tensor</c> of rank <c>Q-1+P-K</c> with shape:
///
/// <code>
/// [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
/// </code>
///
/// For example, say we want to update 4 scattered elements to a rank-1 tensor to
/// 8 elements. In Python, that update would look like this:
///
/// <code>
/// ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True)
/// indices = tf.constant([[4], [3], [1] ,[7]])
/// updates = tf.constant([9, 10, 11, 12])
/// update = tf.scatter_nd_add(ref, indices, updates)
/// with tf.Session() as sess:
/// print sess.run(update)
/// </code>
///
/// The resulting update to ref would look like this:
///
/// [1, 12, 3, 14, 14, 6, 7, 20]
///
/// See <c>tf.scatter_nd</c> for more details about how to make updates to
/// slices.
/// </remarks>
member this.ResourceScatterNdAdd(reference : TF, indices : TF, updates : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.ResourceScatterNdAdd(reference.TFOutput, indices.TFOutput, updates.TFOutput, use_locking, operName)
/// <summary>
/// Applies sparse <c>updates</c> to individual values or slices within a given
/// </summary><param name="reference">
/// A resource handle. Must be from a VarHandleOp.
/// </param><param name="indices">
/// A Tensor. Must be one of the following types: int32, int64.
/// A tensor of indices into ref.
/// </param><param name="updates">
/// A Tensor. Must have the same type as ref. A tensor of updated
/// values to add to ref.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceScatterNdUpdate'.
/// </param><param name="use_locking">
/// Optional argument
/// An optional bool. Defaults to True. If True, the assignment will
/// be protected by a lock; otherwise the behavior is undefined,
/// but may exhibit less contention.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// variable according to <c>indices</c>.
///
/// <c>ref</c> is a <c>Tensor</c> with rank <c>P</c> and <c>indices</c> is a <c>Tensor</c> of rank <c>Q</c>.
///
/// <c>indices</c> must be integer tensor, containing indices into <c>ref</c>.
/// It must be shape <c>[d_0, ..., d_{Q-2}, K]</c> where <c>0 &amp;lt; K &amp;lt;= P</c>.
///
/// The innermost dimension of <c>indices</c> (with length <c>K</c>) corresponds to
/// indices into elements (if <c>K = P</c>) or slices (if <c>K &amp;lt; P</c>) along the <c>K</c>th
/// dimension of <c>ref</c>.
///
/// <c>updates</c> is <c>Tensor</c> of rank <c>Q-1+P-K</c> with shape:
///
/// <code>
/// [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].
/// </code>
///
/// For example, say we want to update 4 scattered elements to a rank-1 tensor to
/// 8 elements. In Python, that update would look like this:
///
/// <code>
/// ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8])
/// indices = tf.constant([[4], [3], [1] ,[7]])
/// updates = tf.constant([9, 10, 11, 12])
/// update = tf.scatter_nd_update(ref, indices, updates)
/// with tf.Session() as sess:
/// print sess.run(update)
/// </code>
///
/// The resulting update to ref would look like this:
///
/// [1, 11, 3, 10, 9, 6, 7, 12]
///
/// See <c>tf.scatter_nd</c> for more details about how to make updates to
/// slices.
/// </remarks>
member this.ResourceScatterNdUpdate(reference : TF, indices : TF, updates : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.ResourceScatterNdUpdate(reference.TFOutput, indices.TFOutput, updates.TFOutput, use_locking, operName)
/// <summary>
/// Subtracts sparse updates from the variable referenced by <c>resource</c>.
/// </summary><param name="resource">
/// Should be from a <c>Variable</c> node.
/// </param><param name="indices">
/// A tensor of indices into the first dimension of <c>ref</c>.
/// </param><param name="updates">
/// A tensor of updated values to add to <c>ref</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceScatterSub'.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// This operation computes
///
/// # Scalar indices
/// ref[indices, ...] -= updates[...]
///
/// # Vector indices (for each i)
/// ref[indices[i], ...] -= updates[i, ...]
///
/// # High rank indices (for each i, ..., j)
/// ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...]
///
/// Duplicate entries are handled correctly: if multiple <c>indices</c> reference
/// the same location, their contributions add.
///
/// Requires <c>updates.shape = indices.shape + ref.shape[1:]</c> or <c>updates.shape = []</c>.
///
/// &amp;lt;div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"&amp;gt;
/// &amp;lt;img style="width:100%" src='https://www.tensorflow.org/images/ScatterAdd.png' alt&amp;gt;
/// &amp;lt;/div&amp;gt;
/// </remarks>
member this.ResourceScatterSub(resource : TF, indices : TF, updates : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.ResourceScatterSub(resource.TFOutput, indices.TFOutput, updates.TFOutput, operName)
/// <summary>
/// Assigns sparse updates to the variable referenced by <c>resource</c>.
/// </summary><param name="resource">
/// Should be from a <c>Variable</c> node.
/// </param><param name="indices">
/// A tensor of indices into the first dimension of <c>ref</c>.
/// </param><param name="updates">
/// A tensor of updated values to add to <c>ref</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceScatterUpdate'.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// This operation computes
///
/// # Scalar indices
/// ref[indices, ...] = updates[...]
///
/// # Vector indices (for each i)
/// ref[indices[i], ...] = updates[i, ...]
///
/// # High rank indices (for each i, ..., j)
/// ref[indices[i, ..., j], ...] = updates[i, ..., j, ...]
/// </remarks>
member this.ResourceScatterUpdate(resource : TF, indices : TF, updates : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.ResourceScatterUpdate(resource.TFOutput, indices.TFOutput, updates.TFOutput, operName)
/// <summary>
/// var: Should be from a Variable().
/// </summary><param name="var"></param><param name="accum">
/// Should be from a Variable().
/// </param><param name="accum_update">
/// : Should be from a Variable().
/// </param><param name="lr">
/// Learning rate. Must be a scalar.
/// </param><param name="rho">
/// Decay factor. Must be a scalar.
/// </param><param name="epsilon">
/// Constant factor. Must be a scalar.
/// </param><param name="grad">
/// The gradient.
/// </param><param name="indices">
/// A vector of indices into the first dimension of var and accum.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceSparseApplyAdadelta'.
/// </param><param name="use_locking">
/// Optional argument
/// If True, updating of the var and accum tensors will be protected by
/// a lock; otherwise the behavior is undefined, but may exhibit less contention.
/// </param><returns>
/// Returns the description of the operation
/// </returns>
member this.ResourceSparseApplyAdadelta(var : TF, accum : TF, accum_update : TF, lr : TF, rho : TF, epsilon : TF, grad : TF, indices : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.ResourceSparseApplyAdadelta(var.TFOutput, accum.TFOutput, accum_update.TFOutput, lr.TFOutput, rho.TFOutput, epsilon.TFOutput, grad.TFOutput, indices.TFOutput, use_locking, operName)
/// <summary>
/// Update relevant entries in '*var' and '*accum' according to the adagrad scheme.
/// </summary><param name="var">
/// Should be from a Variable().
/// </param><param name="accum">
/// Should be from a Variable().
/// </param><param name="lr">
/// Learning rate. Must be a scalar.
/// </param><param name="grad">
/// The gradient.
/// </param><param name="indices">
/// A vector of indices into the first dimension of var and accum.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceSparseApplyAdagrad'.
/// </param><param name="use_locking">
/// Optional argument
/// If <c>True</c>, updating of the var and accum tensors will be protected
/// by a lock; otherwise the behavior is undefined, but may exhibit less
/// contention.
/// </param><param name="update_slots">
/// Optional argument
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// That is for rows we have grad for, we update var and accum as follows:
/// accum += grad * grad
/// var -= lr * grad * (1 / sqrt(accum))
/// </remarks>
member this.ResourceSparseApplyAdagrad(var : TF, accum : TF, lr : TF, grad : TF, indices : TF, ?use_locking : Boolean, ?update_slots : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let update_slots = defaultArg (update_slots |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.ResourceSparseApplyAdagrad(var.TFOutput, accum.TFOutput, lr.TFOutput, grad.TFOutput, indices.TFOutput, use_locking, update_slots, operName)
/// <summary>
/// Update entries in '*var' and '*accum' according to the proximal adagrad scheme.
/// </summary><param name="var">
/// Should be from a Variable().
/// </param><param name="gradient_accumulator">
/// Should be from a Variable().
/// </param><param name="gradient_squared_accumulator">
/// Should be from a Variable().
/// </param><param name="grad">
/// The gradient.
/// </param><param name="indices">
/// A vector of indices into the first dimension of var and accum.
/// </param><param name="lr">
/// Learning rate. Must be a scalar.
/// </param><param name="l1">
/// L1 regularization. Must be a scalar.
/// </param><param name="l2">
/// L2 regularization. Must be a scalar.
/// </param><param name="global_step">
/// Training step number. Must be a scalar.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceSparseApplyAdagradDA'.
/// </param><param name="use_locking">
/// Optional argument
/// If True, updating of the var and accum tensors will be protected by
/// a lock; otherwise the behavior is undefined, but may exhibit less contention.
/// </param><returns>
/// Returns the description of the operation
/// </returns>
member this.ResourceSparseApplyAdagradDA(var : TF, gradient_accumulator : TF, gradient_squared_accumulator : TF, grad : TF, indices : TF, lr : TF, l1 : TF, l2 : TF, global_step : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.ResourceSparseApplyAdagradDA(var.TFOutput, gradient_accumulator.TFOutput, gradient_squared_accumulator.TFOutput, grad.TFOutput, indices.TFOutput, lr.TFOutput, l1.TFOutput, l2.TFOutput, global_step.TFOutput, use_locking, operName)
/// <summary>
/// Update '*var' according to the centered RMSProp algorithm.
/// </summary><param name="var">
/// Should be from a Variable().
/// </param><param name="mg">
/// Should be from a Variable().
/// </param><param name="ms">
/// Should be from a Variable().
/// </param><param name="mom">
/// Should be from a Variable().
/// </param><param name="lr">
/// Scaling factor. Must be a scalar.
/// </param><param name="rho">
/// Decay rate. Must be a scalar.
/// </param><param name="momentum"></param><param name="epsilon">
/// Ridge term. Must be a scalar.
/// </param><param name="grad">
/// The gradient.
/// </param><param name="indices">
/// A vector of indices into the first dimension of var, ms and mom.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceSparseApplyCenteredRMSProp'.
/// </param><param name="use_locking">
/// Optional argument
/// If <c>True</c>, updating of the var, mg, ms, and mom tensors is
/// protected by a lock; otherwise the behavior is undefined, but may exhibit less
/// contention.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// The centered RMSProp algorithm uses an estimate of the centered second moment
/// (i.e., the variance) for normalization, as opposed to regular RMSProp, which
/// uses the (uncentered) second moment. This often helps with training, but is
/// slightly more expensive in terms of computation and memory.
///
/// Note that in dense implementation of this algorithm, mg, ms, and mom will
/// update even if the grad is zero, but in this sparse implementation, mg, ms,
/// and mom will not update in iterations during which the grad is zero.
///
/// mean_square = decay * mean_square + (1-decay) * gradient ** 2
/// mean_grad = decay * mean_grad + (1-decay) * gradient
/// Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2)
///
/// ms &amp;lt;- rho * ms_{t-1} + (1-rho) * grad * grad
/// mom &amp;lt;- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)
/// var &amp;lt;- var - mom
/// </remarks>
member this.ResourceSparseApplyCenteredRMSProp(var : TF, mg : TF, ms : TF, mom : TF, lr : TF, rho : TF, momentum : TF, epsilon : TF, grad : TF, indices : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.ResourceSparseApplyCenteredRMSProp(var.TFOutput, mg.TFOutput, ms.TFOutput, mom.TFOutput, lr.TFOutput, rho.TFOutput, momentum.TFOutput, epsilon.TFOutput, grad.TFOutput, indices.TFOutput, use_locking, operName)
/// <summary>
/// Update relevant entries in '*var' according to the Ftrl-proximal scheme.
/// </summary><param name="var">
/// Should be from a Variable().
/// </param><param name="accum">
/// Should be from a Variable().
/// </param><param name="linear">
/// Should be from a Variable().
/// </param><param name="grad">
/// The gradient.
/// </param><param name="indices">
/// A vector of indices into the first dimension of var and accum.
/// </param><param name="lr">
/// Scaling factor. Must be a scalar.
/// </param><param name="l1">
/// L1 regularization. Must be a scalar.
/// </param><param name="l2">
/// L2 regularization. Must be a scalar.
/// </param><param name="lr_power">
/// Scaling factor. Must be a scalar.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceSparseApplyFtrl'.
/// </param><param name="use_locking">
/// Optional argument
/// If <c>True</c>, updating of the var and accum tensors will be protected
/// by a lock; otherwise the behavior is undefined, but may exhibit less
/// contention.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// That is for rows we have grad for, we update var, accum and linear as follows:
/// accum_new = accum + grad * grad
/// linear += grad + (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
/// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
/// var = (sign(linear) * l1 - linear) / quadratic if |linear| &amp;gt; l1 else 0.0
/// accum = accum_new
/// </remarks>
member this.ResourceSparseApplyFtrl(var : TF, accum : TF, linear : TF, grad : TF, indices : TF, lr : TF, l1 : TF, l2 : TF, lr_power : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.ResourceSparseApplyFtrl(var.TFOutput, accum.TFOutput, linear.TFOutput, grad.TFOutput, indices.TFOutput, lr.TFOutput, l1.TFOutput, l2.TFOutput, lr_power.TFOutput, use_locking, operName)
/// <summary>
/// Update relevant entries in '*var' according to the Ftrl-proximal scheme.
/// </summary><param name="var">
/// Should be from a Variable().
/// </param><param name="accum">
/// Should be from a Variable().
/// </param><param name="linear">
/// Should be from a Variable().
/// </param><param name="grad">
/// The gradient.
/// </param><param name="indices">
/// A vector of indices into the first dimension of var and accum.
/// </param><param name="lr">
/// Scaling factor. Must be a scalar.
/// </param><param name="l1">
/// L1 regularization. Must be a scalar.
/// </param><param name="l2">
/// L2 shrinkage regulariation. Must be a scalar.
/// </param><param name="l2_shrinkage"></param><param name="lr_power">
/// Scaling factor. Must be a scalar.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceSparseApplyFtrlV2'.
/// </param><param name="use_locking">
/// Optional argument
/// If <c>True</c>, updating of the var and accum tensors will be protected
/// by a lock; otherwise the behavior is undefined, but may exhibit less
/// contention.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// That is for rows we have grad for, we update var, accum and linear as follows:
/// grad_with_shrinkage = grad + 2 * l2_shrinkage * var
/// accum_new = accum + grad_with_shrinkage * grad_with_shrinkage
/// linear += grad_with_shrinkage +
/// (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var
/// quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2
/// var = (sign(linear) * l1 - linear) / quadratic if |linear| &amp;gt; l1 else 0.0
/// accum = accum_new
/// </remarks>
member this.ResourceSparseApplyFtrlV2(var : TF, accum : TF, linear : TF, grad : TF, indices : TF, lr : TF, l1 : TF, l2 : TF, l2_shrinkage : TF, lr_power : TF, ?use_locking : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.ResourceSparseApplyFtrlV2(var.TFOutput, accum.TFOutput, linear.TFOutput, grad.TFOutput, indices.TFOutput, lr.TFOutput, l1.TFOutput, l2.TFOutput, l2_shrinkage.TFOutput, lr_power.TFOutput, use_locking, operName)
/// <summary>
/// Update relevant entries in '*var' and '*accum' according to the momentum scheme.
/// </summary><param name="var">
/// Should be from a Variable().
/// </param><param name="accum">
/// Should be from a Variable().
/// </param><param name="lr">
/// Learning rate. Must be a scalar.
/// </param><param name="grad">
/// The gradient.
/// </param><param name="indices">
/// A vector of indices into the first dimension of var and accum.
/// </param><param name="momentum">
/// Momentum. Must be a scalar.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'ResourceSparseApplyMomentum'.
/// </param><param name="use_locking">
/// Optional argument
/// If <c>True</c>, updating of the var and accum tensors will be protected
/// by a lock; otherwise the behavior is undefined, but may exhibit less
/// contention.
/// </param><param name="use_nesterov">
/// Optional argument
/// If <c>True</c>, the tensor passed to compute grad will be
/// var - lr * momentum * accum, so in the end, the var you get is actually
/// var - lr * momentum * accum.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// Set use_nesterov = True if you want to use Nesterov momentum.
///
/// That is for rows we have grad for, we update var and accum as follows:
///
/// accum = accum * momentum + grad
/// var -= lr * accum
/// </remarks>
member this.ResourceSparseApplyMomentum(var : TF, accum : TF, lr : TF, grad : TF, indices : TF, momentum : TF, ?use_locking : Boolean, ?use_nesterov : Boolean, ?operName : String) =
let use_locking = defaultArg (use_locking |> Option.map Nullable) (Nullable())
let use_nesterov = defaultArg (use_nesterov |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
this.TFGraph.ResourceSparseApplyMomentum(var.TFOutput, accum.TFOutput, lr.TFOutput, grad.TFOutput, indices.TFOutput, momentum.TFOutput, use_locking, use_nesterov, operName)
/// <summary>
/// Updates the table to associates keys with values.
/// </summary><param name="table_handle">
/// Handle to the table.
/// </param><param name="keys">
/// Any shape. Keys to look up.
/// </param><param name="values">
/// Values to associate with keys.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'LookupTableInsertV2'.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// The tensor <c>keys</c> must be of the same type as the keys of the table.
/// The tensor <c>values</c> must be of the type of the table values.
/// </remarks>
member this.LookupTableInsertV2(table_handle : TF, keys : TF, values : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.LookupTableInsertV2(table_handle.TFOutput, keys.TFOutput, values.TFOutput, operName)
/// <summary>
/// Computes the number of elements in the given table.
/// </summary><param name="table_handle">
/// Handle to the table.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'LookupTableSize'.
/// </param><returns>
/// Scalar that contains number of elements in the table.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.LookupTableSize(table_handle : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.LookupTableSize(table_handle.TFOutput, operName))
/// <summary>
/// Computes the number of elements in the given table.
/// </summary><param name="table_handle">
/// Handle to the table.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'LookupTableSizeV2'.
/// </param><returns>
/// Scalar that contains number of elements in the table.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.LookupTableSizeV2(table_handle : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.LookupTableSizeV2(table_handle.TFOutput, operName))
/// <summary>
/// Forwards the input to the output.
/// </summary><param name="input">
/// A boolean scalar, representing the branch predicate of the Switch op.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'LoopCond'.
/// </param><returns>
/// The same tensor as <c>input</c>.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// This operator represents the loop termination condition used by the
/// "pivot" switches of a loop.
/// </remarks>
member this.LoopCond(input : TF, ?operName : String) =
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.LoopCond(input.TFOutput, operName))
/// <summary>
/// Applies lower_bound(sorted_search_values, values) along each row.
/// </summary><param name="sorted_inputs">
/// 2-D Tensor where each row is ordered.
/// </param><param name="values">
/// 2-D Tensor with the same numbers of rows as <c>sorted_search_values</c>. Contains
/// the values that will be searched for in <c>sorted_search_values</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'LowerBound'.
/// </param><param name="out_type">
/// Optional argument
/// </param><returns>
/// A <c>Tensor</c> with the same shape as <c>values</c>. It contains the first scalar index
/// into the last dimension where values can be inserted without changing the
/// ordered property.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// Each set of rows with the same index in (sorted_inputs, values) is treated
/// independently. The resulting row is the equivalent of calling
/// <c>np.searchsorted(sorted_inputs, values, side='left')</c>.
///
/// The result is not a global index to the entire
/// <c>Tensor</c>, but rather just the index in the last dimension.
///
/// A 2-D example:
/// sorted_sequence = [[0, 3, 9, 9, 10],
/// [1, 2, 3, 4, 5]]
/// values = [[2, 4, 9],
/// [0, 2, 6]]
///
/// result = LowerBound(sorted_sequence, values)
///
/// result == [[1, 2, 2],
/// [0, 1, 5]]
/// </remarks>
member this.LowerBound(sorted_inputs : TF, values : TF, ?out_type : TFDataType, ?operName : String) =
let out_type = defaultArg (out_type |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.LowerBound(sorted_inputs.TFOutput, values.TFOutput, out_type, operName))
/// <summary>
/// Local Response Normalization.
/// </summary><param name="input">
/// 4-D.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'LRN'.
/// </param><param name="depth_radius">
/// Optional argument
/// 0-D. Half-width of the 1-D normalization window.
/// </param><param name="bias">
/// Optional argument
/// An offset (usually positive to avoid dividing by 0).
/// </param><param name="alpha">
/// Optional argument
/// A scale factor, usually positive.
/// </param><param name="beta">
/// Optional argument
/// An exponent.
/// </param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// The 4-D <c>input</c> tensor is treated as a 3-D array of 1-D vectors (along the last
/// dimension), and each vector is normalized independently. Within a given vector,
/// each component is divided by the weighted, squared sum of inputs within
/// <c>depth_radius</c>. In detail,
///
/// sqr_sum[a, b, c, d] =
/// sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2)
/// output = input / (bias + alpha * sqr_sum) ** beta
///
/// For details, see [Krizhevsky et al., ImageNet classification with deep
/// convolutional neural networks (NIPS 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks).
/// </remarks>
member this.LRN(input : TF, ?depth_radius : Int64, ?bias : Single, ?alpha : Single, ?beta : Single, ?operName : String) =
let depth_radius = defaultArg (depth_radius |> Option.map Nullable) (Nullable())
let bias = defaultArg (bias |> Option.map Nullable) (Nullable())
let alpha = defaultArg (alpha |> Option.map Nullable) (Nullable())
let beta = defaultArg (beta |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.LRN(input.TFOutput, depth_radius, bias, alpha, beta, operName))
/// <summary>
/// Gradients for Local Response Normalization.
/// </summary><param name="input_grads">
/// 4-D with shape <c>[batch, height, width, channels]</c>.
/// </param><param name="input_image">
/// 4-D with shape <c>[batch, height, width, channels]</c>.
/// </param><param name="output_image">
/// 4-D with shape <c>[batch, height, width, channels]</c>.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'LRNGrad'.
/// </param><param name="depth_radius">
/// Optional argument
/// A depth radius.
/// </param><param name="bias">
/// Optional argument
/// An offset (usually &amp;gt; 0 to avoid dividing by 0).
/// </param><param name="alpha">
/// Optional argument
/// A scale factor, usually positive.
/// </param><param name="beta">
/// Optional argument
/// An exponent.
/// </param><returns>
/// The gradients for LRN.
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.LRNGrad(input_grads : TF, input_image : TF, output_image : TF, ?depth_radius : Int64, ?bias : Single, ?alpha : Single, ?beta : Single, ?operName : String) =
let depth_radius = defaultArg (depth_radius |> Option.map Nullable) (Nullable())
let bias = defaultArg (bias |> Option.map Nullable) (Nullable())
let alpha = defaultArg (alpha |> Option.map Nullable) (Nullable())
let beta = defaultArg (beta |> Option.map Nullable) (Nullable())
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.LRNGrad(input_grads.TFOutput, input_image.TFOutput, output_image.TFOutput, depth_radius, bias, alpha, beta, operName))
/// <summary>
/// Makes a new iterator from the given <c>dataset</c> and stores it in <c>iterator</c>.
/// </summary><param name="dataset"></param><param name="iterator"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'MakeIterator'.
/// </param><returns>
/// Returns the description of the operation
/// </returns><remarks>
/// This operation may be executed multiple times. Each execution will reset the
/// iterator in <c>iterator</c> to the first element of <c>dataset</c>.
/// </remarks>
member this.MakeIterator(dataset : TF, iterator : TF, ?operName : String) =
let operName = defaultArg operName null
this.TFGraph.MakeIterator(dataset.TFOutput, iterator.TFOutput, operName)
/// <summary>
/// Op removes all elements in the underlying container.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'MapClear'.
/// </param><param name="capacity">
/// Optional argument
/// </param><param name="memory_limit">
/// Optional argument
/// </param><param name="container">
/// Optional argument
/// </param><param name="shared_name">
/// Optional argument
/// </param><param name="dtypes"></param><returns>
/// Returns the description of the operation
/// </returns>
member this.MapClear(dtypes : TFDataType[], ?capacity : Int64, ?memory_limit : Int64, ?container : String, ?shared_name : String, ?operName : String) =
let capacity = defaultArg (capacity |> Option.map Nullable) (Nullable())
let memory_limit = defaultArg (memory_limit |> Option.map Nullable) (Nullable())
let container = defaultArg container null
let shared_name = defaultArg shared_name null
let operName = defaultArg operName null
this.TFGraph.MapClear(dtypes, capacity, memory_limit, container, shared_name, operName)
/// <summary>
/// Op returns the number of incomplete elements in the underlying container.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'MapIncompleteSize'.
/// </param><param name="capacity">
/// Optional argument
/// </param><param name="memory_limit">
/// Optional argument
/// </param><param name="container">
/// Optional argument
/// </param><param name="shared_name">
/// Optional argument
/// </param><param name="dtypes"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.MapIncompleteSize(dtypes : TFDataType[], ?capacity : Int64, ?memory_limit : Int64, ?container : String, ?shared_name : String, ?operName : String) =
let capacity = defaultArg (capacity |> Option.map Nullable) (Nullable())
let memory_limit = defaultArg (memory_limit |> Option.map Nullable) (Nullable())
let container = defaultArg container null
let shared_name = defaultArg shared_name null
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.MapIncompleteSize(dtypes, capacity, memory_limit, container, shared_name, operName))
/// <summary>
/// Op peeks at the values at the specified key. If the
/// </summary><param name="key"></param><param name="indices"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'MapPeek'.
/// </param><param name="capacity">
/// Optional argument
/// </param><param name="memory_limit">
/// Optional argument
/// </param><param name="container">
/// Optional argument
/// </param><param name="shared_name">
/// Optional argument
/// </param><param name="dtypes"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// underlying container does not contain this key
/// this op will block until it does.
/// </remarks>
member this.MapPeek(key : TF, indices : TF, dtypes : TFDataType[], ?capacity : Int64, ?memory_limit : Int64, ?container : String, ?shared_name : String, ?operName : String) =
let capacity = defaultArg (capacity |> Option.map Nullable) (Nullable())
let memory_limit = defaultArg (memory_limit |> Option.map Nullable) (Nullable())
let container = defaultArg container null
let shared_name = defaultArg shared_name null
let operName = defaultArg operName null
this.TFGraph.MapPeek(key.TFOutput, indices.TFOutput, dtypes, capacity, memory_limit, container, shared_name, operName) |> Array.map (fun i -> TF(this.TFGraph, i))
/// <summary>
/// Op returns the number of elements in the underlying container.
/// </summary><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'MapSize'.
/// </param><param name="capacity">
/// Optional argument
/// </param><param name="memory_limit">
/// Optional argument
/// </param><param name="container">
/// Optional argument
/// </param><param name="shared_name">
/// Optional argument
/// </param><param name="dtypes"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns>
member this.MapSize(dtypes : TFDataType[], ?capacity : Int64, ?memory_limit : Int64, ?container : String, ?shared_name : String, ?operName : String) =
let capacity = defaultArg (capacity |> Option.map Nullable) (Nullable())
let memory_limit = defaultArg (memory_limit |> Option.map Nullable) (Nullable())
let container = defaultArg container null
let shared_name = defaultArg shared_name null
let operName = defaultArg operName null
TF(this.TFGraph, this.TFGraph.MapSize(dtypes, capacity, memory_limit, container, shared_name, operName))
/// <summary>
/// Stage (key, values) in the underlying container which behaves like a hashtable.
/// </summary><param name="key">
/// int64
/// </param><param name="indices"></param><param name="values">
/// a list of tensors
/// dtypes A list of data types that inserted values should adhere to.
/// </param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'MapStage'.
/// </param><param name="capacity">
/// Optional argument
/// Maximum number of elements in the Staging Area. If &amp;gt; 0, inserts
/// on the container will block when the capacity is reached.
/// </param><param name="memory_limit">
/// Optional argument
/// </param><param name="container">
/// Optional argument
/// If non-empty, this queue is placed in the given container. Otherwise,
/// a default container is used.
/// </param><param name="shared_name">
/// Optional argument
/// It is necessary to match this name to the matching Unstage Op.
/// </param><param name="dtypes"></param><returns>
/// Returns the description of the operation
/// </returns>
member this.MapStage(key : TF, indices : TF, values : TF[], dtypes : TFDataType[], ?capacity : Int64, ?memory_limit : Int64, ?container : String, ?shared_name : String, ?operName : String) =
let capacity = defaultArg (capacity |> Option.map Nullable) (Nullable())
let memory_limit = defaultArg (memory_limit |> Option.map Nullable) (Nullable())
let container = defaultArg container null
let shared_name = defaultArg shared_name null
let operName = defaultArg operName null
this.TFGraph.MapStage(key.TFOutput, indices.TFOutput, values |> Array.map (fun x -> x.TFOutput), dtypes, capacity, memory_limit, container, shared_name, operName)
/// <summary>
/// Op removes and returns the values associated with the key
/// </summary><param name="key"></param><param name="indices"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'MapUnstage'.
/// </param><param name="capacity">
/// Optional argument
/// </param><param name="memory_limit">
/// Optional argument
/// </param><param name="container">
/// Optional argument
/// </param><param name="shared_name">
/// Optional argument
/// </param><param name="dtypes"></param><returns>
/// The TFOperation can be fetched from the resulting TFOutput, by fethching the Operation property from the result.
/// </returns><remarks>
/// from the underlying container. If the underlying container
/// does not contain this key, the op will block until it does.
/// </remarks>
member this.MapUnstage(key : TF, indices : TF, dtypes : TFDataType[], ?capacity : Int64, ?memory_limit : Int64, ?container : String, ?shared_name : String, ?operName : String) =
let capacity = defaultArg (capacity |> Option.map Nullable) (Nullable())
let memory_limit = defaultArg (memory_limit |> Option.map Nullable) (Nullable())
let container = defaultArg container null
let shared_name = defaultArg shared_name null
let operName = defaultArg operName null
this.TFGraph.MapUnstage(key.TFOutput, indices.TFOutput, dtypes, capacity, memory_limit, container, shared_name, operName) |> Array.map (fun i -> TF(this.TFGraph, i))
/// <summary>
/// Op removes and returns a random (key, value)
/// </summary><param name="indices"></param><param name="operName">
/// If specified, the created operation in the graph will be this one, otherwise it will be named 'MapUnstageNoKey'.
/// </param><param name="capacity">
/// Optional argument
/// </param><param name="memory_limit">
/// Optional argument
/// </param><param name="container">
/// Optional argument
/// </param><param name="shared_name">
/// Optional argument
/// </param><param name="dtypes"></param><returns>
/// Returns a tuple with multiple values, as follows:
/// key:
/// values:
/// The TFOperation can be fetched from any of the TFOutputs returned in the tuple values, by fethching the Operation property.
/// </returns><remarks>
/// from the underlying container. If the underlying container
/// does not contain elements, the op will block until it does.
/// </remarks>
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment