Skip to content

Instantly share code, notes, and snippets.

@i418c
Created January 15, 2024 04:20
Show Gist options
  • Save i418c/8e407534558ede92e39e2fbfc5e9c91f to your computer and use it in GitHub Desktop.
Save i418c/8e407534558ede92e39e2fbfc5e9c91f to your computer and use it in GitHub Desktop.
Normal Inverse Gaussian Negative log_prob.ipynb
Display the source blob
Display the rendered blob
Raw
{
"nbformat": 4,
"nbformat_minor": 0,
"metadata": {
"colab": {
"provenance": [],
"gpuType": "T4",
"authorship_tag": "ABX9TyP86f9Cgx5gIgnohTzn+PEM",
"include_colab_link": true
},
"kernelspec": {
"name": "python3",
"display_name": "Python 3"
},
"language_info": {
"name": "python"
},
"accelerator": "GPU"
},
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
"<a href=\"https://colab.research.google.com/gist/i418c/8e407534558ede92e39e2fbfc5e9c91f/normal-inverse-gaussian-negative-log_prob.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "YX9nTG6rJl-K",
"outputId": "d6805aee-c54d-407f-9866-96833d4bdc1b"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Collecting tf-nightly\n",
" Downloading tf_nightly-2.16.0.dev20240110-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (566.9 MB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m566.9/566.9 MB\u001b[0m \u001b[31m2.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hCollecting tfp-nightly\n",
" Downloading tfp_nightly-0.24.0.dev20240114-py2.py3-none-any.whl (6.9 MB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m6.9/6.9 MB\u001b[0m \u001b[31m39.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hRequirement already satisfied: absl-py>=1.0.0 in /usr/local/lib/python3.10/dist-packages (from tf-nightly) (1.4.0)\n",
"Requirement already satisfied: astunparse>=1.6.0 in /usr/local/lib/python3.10/dist-packages (from tf-nightly) (1.6.3)\n",
"Requirement already satisfied: flatbuffers>=23.5.26 in /usr/local/lib/python3.10/dist-packages (from tf-nightly) (23.5.26)\n",
"Requirement already satisfied: gast!=0.5.0,!=0.5.1,!=0.5.2,>=0.2.1 in /usr/local/lib/python3.10/dist-packages (from tf-nightly) (0.5.4)\n",
"Requirement already satisfied: google-pasta>=0.1.1 in /usr/local/lib/python3.10/dist-packages (from tf-nightly) (0.2.0)\n",
"Collecting h5py>=3.10.0 (from tf-nightly)\n",
" Downloading h5py-3.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (4.8 MB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m4.8/4.8 MB\u001b[0m \u001b[31m70.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hRequirement already satisfied: libclang>=13.0.0 in /usr/local/lib/python3.10/dist-packages (from tf-nightly) (16.0.6)\n",
"Collecting ml-dtypes~=0.3.1 (from tf-nightly)\n",
" Downloading ml_dtypes-0.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (2.2 MB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.2/2.2 MB\u001b[0m \u001b[31m91.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hRequirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.10/dist-packages (from tf-nightly) (3.3.0)\n",
"Requirement already satisfied: packaging in /usr/local/lib/python3.10/dist-packages (from tf-nightly) (23.2)\n",
"Requirement already satisfied: protobuf!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<5.0.0dev,>=3.20.3 in /usr/local/lib/python3.10/dist-packages (from tf-nightly) (3.20.3)\n",
"Requirement already satisfied: requests<3,>=2.21.0 in /usr/local/lib/python3.10/dist-packages (from tf-nightly) (2.31.0)\n",
"Requirement already satisfied: setuptools in /usr/local/lib/python3.10/dist-packages (from tf-nightly) (67.7.2)\n",
"Requirement already satisfied: six>=1.12.0 in /usr/local/lib/python3.10/dist-packages (from tf-nightly) (1.16.0)\n",
"Requirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.10/dist-packages (from tf-nightly) (2.4.0)\n",
"Requirement already satisfied: typing-extensions>=3.6.6 in /usr/local/lib/python3.10/dist-packages (from tf-nightly) (4.5.0)\n",
"Requirement already satisfied: wrapt<1.15,>=1.11.0 in /usr/local/lib/python3.10/dist-packages (from tf-nightly) (1.14.1)\n",
"Requirement already satisfied: grpcio<2.0,>=1.24.3 in /usr/local/lib/python3.10/dist-packages (from tf-nightly) (1.60.0)\n",
"Collecting tb-nightly~=2.16.0.a (from tf-nightly)\n",
" Downloading tb_nightly-2.16.0a20240114-py3-none-any.whl (5.5 MB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m5.5/5.5 MB\u001b[0m \u001b[31m105.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hCollecting tf-estimator-nightly~=2.14.0.dev (from tf-nightly)\n",
" Downloading tf_estimator_nightly-2.14.0.dev2023080308-py2.py3-none-any.whl (440 kB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m440.9/440.9 kB\u001b[0m \u001b[31m39.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hCollecting keras-nightly~=3.0.0.dev (from tf-nightly)\n",
" Downloading keras_nightly-3.0.3.dev2024011503-py3-none-any.whl (1.0 MB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.0/1.0 MB\u001b[0m \u001b[31m54.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hRequirement already satisfied: tensorflow-io-gcs-filesystem>=0.23.1 in /usr/local/lib/python3.10/dist-packages (from tf-nightly) (0.35.0)\n",
"Requirement already satisfied: numpy<2.0.0,>=1.23.5 in /usr/local/lib/python3.10/dist-packages (from tf-nightly) (1.23.5)\n",
"Requirement already satisfied: decorator in /usr/local/lib/python3.10/dist-packages (from tfp-nightly) (4.4.2)\n",
"Requirement already satisfied: cloudpickle>=1.3 in /usr/local/lib/python3.10/dist-packages (from tfp-nightly) (2.2.1)\n",
"Requirement already satisfied: dm-tree in /usr/local/lib/python3.10/dist-packages (from tfp-nightly) (0.1.8)\n",
"Requirement already satisfied: wheel<1.0,>=0.23.0 in /usr/local/lib/python3.10/dist-packages (from astunparse>=1.6.0->tf-nightly) (0.42.0)\n",
"Requirement already satisfied: rich in /usr/local/lib/python3.10/dist-packages (from keras-nightly~=3.0.0.dev->tf-nightly) (13.7.0)\n",
"Collecting namex (from keras-nightly~=3.0.0.dev->tf-nightly)\n",
" Downloading namex-0.0.7-py3-none-any.whl (5.8 kB)\n",
"Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.21.0->tf-nightly) (3.3.2)\n",
"Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.21.0->tf-nightly) (3.6)\n",
"Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.21.0->tf-nightly) (2.0.7)\n",
"Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.21.0->tf-nightly) (2023.11.17)\n",
"Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.10/dist-packages (from tb-nightly~=2.16.0.a->tf-nightly) (3.5.1)\n",
"Requirement already satisfied: tensorboard-data-server<0.8.0,>=0.7.0 in /usr/local/lib/python3.10/dist-packages (from tb-nightly~=2.16.0.a->tf-nightly) (0.7.2)\n",
"Collecting tf-keras-nightly (from tb-nightly~=2.16.0.a->tf-nightly)\n",
" Downloading tf_keras_nightly-2.16.0.dev2024011410-py3-none-any.whl (1.7 MB)\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.7/1.7 MB\u001b[0m \u001b[31m64.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25hRequirement already satisfied: werkzeug>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from tb-nightly~=2.16.0.a->tf-nightly) (3.0.1)\n",
"Requirement already satisfied: MarkupSafe>=2.1.1 in /usr/local/lib/python3.10/dist-packages (from werkzeug>=1.0.1->tb-nightly~=2.16.0.a->tf-nightly) (2.1.3)\n",
"Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.10/dist-packages (from rich->keras-nightly~=3.0.0.dev->tf-nightly) (3.0.0)\n",
"Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.10/dist-packages (from rich->keras-nightly~=3.0.0.dev->tf-nightly) (2.16.1)\n",
"Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.10/dist-packages (from markdown-it-py>=2.2.0->rich->keras-nightly~=3.0.0.dev->tf-nightly) (0.1.2)\n",
"Installing collected packages: namex, tfp-nightly, tf-keras-nightly, tf-estimator-nightly, ml-dtypes, h5py, tb-nightly, keras-nightly, tf-nightly\n",
" Attempting uninstall: ml-dtypes\n",
" Found existing installation: ml-dtypes 0.2.0\n",
" Uninstalling ml-dtypes-0.2.0:\n",
" Successfully uninstalled ml-dtypes-0.2.0\n",
" Attempting uninstall: h5py\n",
" Found existing installation: h5py 3.9.0\n",
" Uninstalling h5py-3.9.0:\n",
" Successfully uninstalled h5py-3.9.0\n",
"\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n",
"tensorflow 2.15.0 requires ml-dtypes~=0.2.0, but you have ml-dtypes 0.3.2 which is incompatible.\u001b[0m\u001b[31m\n",
"\u001b[0mSuccessfully installed h5py-3.10.0 keras-nightly-3.0.3.dev2024011503 ml-dtypes-0.3.2 namex-0.0.7 tb-nightly-2.16.0a20240114 tf-estimator-nightly-2.14.0.dev2023080308 tf-keras-nightly-2.16.0.dev2024011410 tf-nightly-2.16.0.dev20240110 tfp-nightly-0.24.0.dev20240114\n"
]
}
],
"source": [
"!pip install tf-nightly tfp-nightly"
]
},
{
"cell_type": "code",
"source": [
"import os\n",
"os.environ[\"TF_USE_LEGACY_KERAS\"] = \"1\"\n",
"import numpy as np\n",
"import tensorflow as tf\n",
"import tensorflow_probability as tfp\n",
"from tensorflow_probability import distributions as tfd\n",
"from tensorflow_probability.python.internal import distribution_util as dist_util\n",
"from tensorflow import keras\n"
],
"metadata": {
"id": "Bnl1idP1Lfok"
},
"execution_count": 2,
"outputs": []
},
{
"cell_type": "code",
"source": [
"class IndependentNormalInverseGaussian(tfp.layers.DistributionLambda):\n",
" def __init__(self,\n",
" event_shape=(),\n",
" convert_to_tensor_fn=tfd.Distribution.sample,\n",
" validate_args=False,\n",
" **kwargs):\n",
" \"\"\"Initialize the `IndependentNormalInverseGaussian` layer.\n",
"\n",
" Args:\n",
" event_shape: integer vector `Tensor` representing the shape of single\n",
" draw from this distribution.\n",
" convert_to_tensor_fn: Python `callable` that takes a `tfd.Distribution`\n",
" instance and returns a `tf.Tensor`-like object.\n",
" Default value: `tfd.Distribution.sample`.\n",
" validate_args: Python `bool`, default `False`. When `True` distribution\n",
" parameters are checked for validity despite possibly degrading runtime\n",
" performance. When `False` invalid inputs may silently render incorrect\n",
" outputs.\n",
" Default value: `False`.\n",
" **kwargs: Additional keyword arguments passed to `tf.keras.Layer`.\n",
" \"\"\"\n",
" convert_to_tensor_fn = tfp.python.layers.distribution_layer._get_convert_to_tensor_fn(\n",
" convert_to_tensor_fn)\n",
"\n",
" # If there is a 'make_distribution_fn' keyword argument (e.g., because we\n",
" # are being called from a `from_config` method), remove it. We pass the\n",
" # distribution function to `DistributionLambda.__init__` below as the first\n",
" # positional argument.\n",
" kwargs.pop('make_distribution_fn', None)\n",
"\n",
" super(IndependentNormalInverseGaussian, self).__init__(\n",
" lambda t: IndependentNormalInverseGaussian.new(\n",
" t, event_shape, validate_args),\n",
" convert_to_tensor_fn,\n",
" **kwargs)\n",
"\n",
" self._event_shape = event_shape\n",
" self._convert_to_tensor_fn = convert_to_tensor_fn\n",
" self._validate_args = validate_args\n",
"\n",
" @staticmethod\n",
" def new(params, event_shape=(), validate_args=False, name=None):\n",
" \"\"\"Create the distribution instance from a `params` vector.\"\"\"\n",
" with tf.name_scope(name or 'IndependentNormalInverseGaussian'):\n",
" params = tf.convert_to_tensor(params, name='params')\n",
" event_shape = dist_util.expand_to_vector(\n",
" tf.convert_to_tensor(\n",
" event_shape, name='event_shape', dtype_hint=tf.int32),\n",
" tensor_name='event_shape')\n",
" output_shape = tf.concat([\n",
" tf.shape(params)[:-1],\n",
" event_shape,\n",
" ],\n",
" axis=0)\n",
" loc_params, scale_params, tailweight_params, skewness_params = tf.split(\n",
" params, 4, axis=-1)\n",
"\n",
" # tailweight must be greater than abs(skewness)\n",
" tailweight_params = tf.abs(\n",
" skewness_params) + tf.math.softplus(tailweight_params) + 1e-6\n",
" return tfd.Independent(\n",
" tfd.NormalInverseGaussian(\n",
" loc=tf.reshape(loc_params, output_shape),\n",
" scale=tf.math.softplus(\n",
" tf.reshape(scale_params, output_shape)) + 1e-6,\n",
" tailweight=tf.reshape(tailweight_params, output_shape),\n",
" skewness=tf.reshape(skewness_params, output_shape),\n",
" validate_args=validate_args,\n",
" allow_nan_stats=False),\n",
" reinterpreted_batch_ndims=tf.size(event_shape),\n",
" validate_args=validate_args)\n",
"\n",
" @staticmethod\n",
" def params_size(event_shape=(), name=None):\n",
" \"\"\"The number of `params` needed to create a single distribution.\"\"\"\n",
" with tf.name_scope(name or 'IndependentNormalInverseGaussian_params_size'):\n",
" event_shape = tf.convert_to_tensor(\n",
" event_shape, name='event_shape', dtype_hint=tf.int32)\n",
" return np.int32(4) * tfp.python.layers.distribution_layer._event_size(\n",
" event_shape, name=name or 'IndependentNormalInverseGaussian_params_size')\n",
"\n",
" def get_config(self):\n",
" \"\"\"Returns the config of this layer.\n",
"\n",
" NOTE: At the moment, this configuration can only be serialized if the\n",
" Layer's `convert_to_tensor_fn` is a serializable Keras object (i.e.,\n",
" implements `get_config`) or one of the standard values:\n",
" - `Distribution.sample` (or `\"sample\"`)\n",
" - `Distribution.mean` (or `\"mean\"`)\n",
" - `Distribution.mode` (or `\"mode\"`)\n",
" - `Distribution.stddev` (or `\"stddev\"`)\n",
" - `Distribution.variance` (or `\"variance\"`)\n",
" \"\"\"\n",
" config = {\n",
" 'event_shape': self._event_shape,\n",
" 'convert_to_tensor_fn': tfp.python.layers.distribution_layer._serialize(self._convert_to_tensor_fn),\n",
" 'validate_args': self._validate_args\n",
" }\n",
" base_config = super(\n",
" IndependentNormalInverseGaussian, self).get_config()\n",
" return dict(list(base_config.items()) + list(config.items()))\n",
"\n",
"encoded_size=2\n",
"class EncoderDecoder(keras.layers.Layer):\n",
" def __init__(self):\n",
" super().__init__()\n",
" self.dense1=keras.layers.Dense(128, activation='relu')\n",
" self.dense2=keras.layers.Dense(IndependentNormalInverseGaussian.params_size(encoded_size))\n",
" self.dist=IndependentNormalInverseGaussian(event_shape=[encoded_size],validate_args=True)\n",
"\n",
" def call(self,inputs):\n",
" x=self.dense1(inputs)\n",
" x=self.dense2(x)\n",
" return self.dist(x)\n",
"\n",
"class AutoEncoder(keras.Model):\n",
" def __init__(self):\n",
" super().__init__()\n",
" self.encoder = EncoderDecoder()\n",
"\n",
" def call(self, x):\n",
" return self.encoder(x)\n",
"\n",
"def negative_log_likelihood(y_true, y_pred):\n",
" return -y_pred.log_prob(y_true)"
],
"metadata": {
"id": "DRftUPIXLqaP"
},
"execution_count": 28,
"outputs": []
},
{
"cell_type": "code",
"source": [
"batch_size=2048\n",
"output_dim=2\n",
"\n",
"train=np.random.rand(batch_size,40,5)\n",
"verify=np.ones((batch_size,40,output_dim))"
],
"metadata": {
"id": "UaTpiw7HLrIA"
},
"execution_count": 32,
"outputs": []
},
{
"cell_type": "code",
"source": [
"model=AutoEncoder()\n",
"model.compile(optimizer=tf.keras.optimizers.Adam(\n",
" learning_rate=0.01), loss=negative_log_likelihood,\n",
" run_eagerly=False, jit_compile=False)"
],
"metadata": {
"id": "Gt2KklXXL0a6"
},
"execution_count": 33,
"outputs": []
},
{
"cell_type": "code",
"source": [
"model.fit(train,verify,epochs=100,batch_size=batch_size)"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "MyqLeHdEMctH",
"outputId": "0580a287-07bc-46c3-8767-7282fa19ee62"
},
"execution_count": 34,
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Epoch 1/100\n",
"1/1 [==============================] - 66s 66s/step - loss: 3.3655\n",
"Epoch 2/100\n",
"1/1 [==============================] - 0s 37ms/step - loss: 2.3018\n",
"Epoch 3/100\n",
"1/1 [==============================] - 0s 42ms/step - loss: 1.5960\n",
"Epoch 4/100\n",
"1/1 [==============================] - 0s 70ms/step - loss: 1.3450\n",
"Epoch 5/100\n",
"1/1 [==============================] - 0s 49ms/step - loss: 1.5000\n",
"Epoch 6/100\n",
"1/1 [==============================] - 0s 49ms/step - loss: 1.5359\n",
"Epoch 7/100\n",
"1/1 [==============================] - 0s 57ms/step - loss: 1.2714\n",
"Epoch 8/100\n",
"1/1 [==============================] - 0s 54ms/step - loss: 0.9394\n",
"Epoch 9/100\n",
"1/1 [==============================] - 0s 30ms/step - loss: 0.7248\n",
"Epoch 10/100\n",
"1/1 [==============================] - 0s 26ms/step - loss: 0.6414\n",
"Epoch 11/100\n",
"1/1 [==============================] - 0s 31ms/step - loss: 0.5837\n",
"Epoch 12/100\n",
"1/1 [==============================] - 0s 26ms/step - loss: 0.4446\n",
"Epoch 13/100\n",
"1/1 [==============================] - 0s 30ms/step - loss: 0.1935\n",
"Epoch 14/100\n",
"1/1 [==============================] - 0s 26ms/step - loss: -0.0514\n",
"Epoch 15/100\n",
"1/1 [==============================] - 0s 47ms/step - loss: -0.1521\n",
"Epoch 16/100\n",
"1/1 [==============================] - 0s 48ms/step - loss: -0.2901\n",
"Epoch 17/100\n",
"1/1 [==============================] - 0s 52ms/step - loss: -0.4746\n",
"Epoch 18/100\n",
"1/1 [==============================] - 0s 50ms/step - loss: -0.6155\n",
"Epoch 19/100\n",
"1/1 [==============================] - 0s 27ms/step - loss: -0.8489\n",
"Epoch 20/100\n",
"1/1 [==============================] - 0s 28ms/step - loss: -0.9039\n",
"Epoch 21/100\n",
"1/1 [==============================] - 0s 27ms/step - loss: -1.0623\n",
"Epoch 22/100\n",
"1/1 [==============================] - 0s 51ms/step - loss: -1.3401\n",
"Epoch 23/100\n",
"1/1 [==============================] - 0s 51ms/step - loss: -1.2453\n",
"Epoch 24/100\n",
"1/1 [==============================] - 0s 28ms/step - loss: -1.7086\n",
"Epoch 25/100\n",
"1/1 [==============================] - 0s 28ms/step - loss: -1.5859\n",
"Epoch 26/100\n",
"1/1 [==============================] - 0s 27ms/step - loss: -2.1112\n",
"Epoch 27/100\n",
"1/1 [==============================] - 0s 30ms/step - loss: -1.9173\n",
"Epoch 28/100\n",
"1/1 [==============================] - 0s 28ms/step - loss: -2.1606\n",
"Epoch 29/100\n",
"1/1 [==============================] - 0s 27ms/step - loss: -1.6993\n",
"Epoch 30/100\n",
"1/1 [==============================] - 0s 25ms/step - loss: -2.6555\n",
"Epoch 31/100\n",
"1/1 [==============================] - 0s 30ms/step - loss: -1.7219\n",
"Epoch 32/100\n",
"1/1 [==============================] - 0s 30ms/step - loss: -1.0170\n",
"Epoch 33/100\n",
"1/1 [==============================] - 0s 30ms/step - loss: -2.1974\n",
"Epoch 34/100\n",
"1/1 [==============================] - 0s 39ms/step - loss: -2.1088\n",
"Epoch 35/100\n",
"1/1 [==============================] - 0s 25ms/step - loss: -2.3484\n",
"Epoch 36/100\n",
"1/1 [==============================] - 0s 26ms/step - loss: -2.8507\n",
"Epoch 37/100\n",
"1/1 [==============================] - 0s 28ms/step - loss: -2.2692\n",
"Epoch 38/100\n",
"1/1 [==============================] - 0s 27ms/step - loss: -2.9651\n",
"Epoch 39/100\n",
"1/1 [==============================] - 0s 26ms/step - loss: -2.8891\n",
"Epoch 40/100\n",
"1/1 [==============================] - 0s 26ms/step - loss: -2.6888\n",
"Epoch 41/100\n",
"1/1 [==============================] - 0s 24ms/step - loss: -3.1850\n",
"Epoch 42/100\n",
"1/1 [==============================] - 0s 27ms/step - loss: -2.7880\n",
"Epoch 43/100\n",
"1/1 [==============================] - 0s 24ms/step - loss: -2.8748\n",
"Epoch 44/100\n",
"1/1 [==============================] - 0s 24ms/step - loss: -3.1557\n",
"Epoch 45/100\n",
"1/1 [==============================] - 0s 27ms/step - loss: -2.7160\n",
"Epoch 46/100\n",
"1/1 [==============================] - 0s 29ms/step - loss: -3.4120\n",
"Epoch 47/100\n",
"1/1 [==============================] - 0s 27ms/step - loss: -2.5137\n",
"Epoch 48/100\n",
"1/1 [==============================] - 0s 28ms/step - loss: -1.8559\n",
"Epoch 49/100\n",
"1/1 [==============================] - 0s 28ms/step - loss: -2.6253\n",
"Epoch 50/100\n",
"1/1 [==============================] - 0s 24ms/step - loss: -4.0185\n",
"Epoch 51/100\n",
"1/1 [==============================] - 0s 28ms/step - loss: -1.7008\n",
"Epoch 52/100\n",
"1/1 [==============================] - 0s 28ms/step - loss: -1.2227\n",
"Epoch 53/100\n",
"1/1 [==============================] - 0s 25ms/step - loss: -2.3020\n",
"Epoch 54/100\n",
"1/1 [==============================] - 0s 24ms/step - loss: -2.9454\n",
"Epoch 55/100\n",
"1/1 [==============================] - 0s 27ms/step - loss: -1.2098\n",
"Epoch 56/100\n",
"1/1 [==============================] - 0s 49ms/step - loss: -0.0357\n",
"Epoch 57/100\n",
"1/1 [==============================] - 0s 50ms/step - loss: -0.7313\n",
"Epoch 58/100\n",
"1/1 [==============================] - 0s 25ms/step - loss: -2.2866\n",
"Epoch 59/100\n",
"1/1 [==============================] - 0s 23ms/step - loss: -2.7616\n",
"Epoch 60/100\n",
"1/1 [==============================] - 0s 26ms/step - loss: -2.1294\n",
"Epoch 61/100\n",
"1/1 [==============================] - 0s 24ms/step - loss: -1.7551\n",
"Epoch 62/100\n",
"1/1 [==============================] - 0s 28ms/step - loss: -2.6146\n",
"Epoch 63/100\n",
"1/1 [==============================] - 0s 25ms/step - loss: -3.0814\n",
"Epoch 64/100\n",
"1/1 [==============================] - 0s 26ms/step - loss: -3.4430\n",
"Epoch 65/100\n",
"1/1 [==============================] - 0s 28ms/step - loss: -2.4013\n",
"Epoch 66/100\n",
"1/1 [==============================] - 0s 28ms/step - loss: -2.6869\n",
"Epoch 67/100\n",
"1/1 [==============================] - 0s 33ms/step - loss: -3.1297\n",
"Epoch 68/100\n",
"1/1 [==============================] - 0s 26ms/step - loss: -2.9101\n",
"Epoch 69/100\n",
"1/1 [==============================] - 0s 27ms/step - loss: -3.5570\n",
"Epoch 70/100\n",
"1/1 [==============================] - 0s 25ms/step - loss: -3.1159\n",
"Epoch 71/100\n",
"1/1 [==============================] - 0s 26ms/step - loss: -2.4504\n",
"Epoch 72/100\n",
"1/1 [==============================] - 0s 24ms/step - loss: -3.4034\n",
"Epoch 73/100\n",
"1/1 [==============================] - 0s 27ms/step - loss: -2.9141\n",
"Epoch 74/100\n",
"1/1 [==============================] - 0s 26ms/step - loss: -2.6201\n",
"Epoch 75/100\n",
"1/1 [==============================] - 0s 29ms/step - loss: -3.9475\n",
"Epoch 76/100\n",
"1/1 [==============================] - 0s 25ms/step - loss: -1.7778\n",
"Epoch 77/100\n",
"1/1 [==============================] - 0s 27ms/step - loss: -1.6612\n",
"Epoch 78/100\n",
"1/1 [==============================] - 0s 26ms/step - loss: -3.6711\n",
"Epoch 79/100\n",
"1/1 [==============================] - 0s 29ms/step - loss: 0.3769\n",
"Epoch 80/100\n",
"1/1 [==============================] - 0s 50ms/step - loss: 1.1394\n",
"Epoch 81/100\n",
"1/1 [==============================] - 0s 52ms/step - loss: 0.9912\n",
"Epoch 82/100\n",
"1/1 [==============================] - 0s 29ms/step - loss: 0.5777\n",
"Epoch 83/100\n",
"1/1 [==============================] - 0s 27ms/step - loss: -1.2120\n",
"Epoch 84/100\n",
"1/1 [==============================] - 0s 28ms/step - loss: -1.1085\n",
"Epoch 85/100\n",
"1/1 [==============================] - 0s 26ms/step - loss: -0.3907\n",
"Epoch 86/100\n",
"1/1 [==============================] - 0s 27ms/step - loss: -0.8647\n",
"Epoch 87/100\n",
"1/1 [==============================] - 0s 26ms/step - loss: -1.4703\n",
"Epoch 88/100\n",
"1/1 [==============================] - 0s 29ms/step - loss: -1.3610\n",
"Epoch 89/100\n",
"1/1 [==============================] - 0s 28ms/step - loss: -1.8506\n",
"Epoch 90/100\n",
"1/1 [==============================] - 0s 27ms/step - loss: -1.7853\n",
"Epoch 91/100\n",
"1/1 [==============================] - 0s 35ms/step - loss: -2.2071\n",
"Epoch 92/100\n",
"1/1 [==============================] - 0s 38ms/step - loss: -2.9349\n",
"Epoch 93/100\n",
"1/1 [==============================] - 0s 29ms/step - loss: -2.4070\n",
"Epoch 94/100\n",
"1/1 [==============================] - 0s 26ms/step - loss: -1.8796\n",
"Epoch 95/100\n",
"1/1 [==============================] - 0s 26ms/step - loss: -1.7277\n",
"Epoch 96/100\n",
"1/1 [==============================] - 0s 28ms/step - loss: -1.9495\n",
"Epoch 97/100\n",
"1/1 [==============================] - 0s 23ms/step - loss: -2.4179\n",
"Epoch 98/100\n",
"1/1 [==============================] - 0s 27ms/step - loss: -2.8112\n",
"Epoch 99/100\n",
"1/1 [==============================] - 0s 31ms/step - loss: -2.6550\n",
"Epoch 100/100\n",
"1/1 [==============================] - 0s 28ms/step - loss: -2.4226\n"
]
},
{
"output_type": "execute_result",
"data": {
"text/plain": [
"<tf_keras.src.callbacks.History at 0x7ada8f844bb0>"
]
},
"metadata": {},
"execution_count": 34
}
]
}
]
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment