diff --git a/.travis.yml b/.travis.yml index 946b4682..7f65c633 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,21 +1,20 @@ dist: trusty language: python python: - - "2.7" - "3.5" - "3.6" env: matrix: - - TF_VERSION=1.13.2 - - TF_VERSION=1.15 + - TF_VERSION=1.15.2 - TF_VERSION=2.0 # command to install dependencies install: - pip install --upgrade pip - pip install tensorflow==$TF_VERSION - pip install coveralls -# command to run tests script: - nosetests --logging-level=WARNING --with-coverage --cover-package=t3f + # Run eager and no-eager tests separately. + - nosetests t3f/*_no_eager_test.py --logging-level=WARNING --with-coverage --cover-package=t3f + - nosetests -e='*no_eager*' --logging-level=WARNING --with-coverage --cover-package=t3f after_success: coveralls diff --git a/docs/installation.rst b/docs/installation.rst index fa8bb6ff..7b8e26da 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -3,7 +3,7 @@ Installation ============ -T3f assumes you have Python 2.7, 3.5 or 3.6 and a working TensorFlow installation (tested versions are from 1.13 to 2.0 (TF 2.0 is only supported in compat.v1 mode), see here_ for TF installation instructions). +T3f assumes you have Python 3.5 or 3.6 and a working TensorFlow installation (tested versions are from 1.15.2 to 2.0, see here_ for TF installation instructions). .. _here: https://www.tensorflow.org/install/ diff --git a/docs/quick_start.ipynb b/docs/quick_start.ipynb index 58d958d7..d4f93902 100644 --- a/docs/quick_start.ipynb +++ b/docs/quick_start.ipynb @@ -1,283 +1,366 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Quick start\n", - "\n", - "[Open](https://colab.research.google.com/github/Bihaqo/t3f/blob/develop/docs/quick_start.ipynb) **this page in an interactive mode via Google Colaboratory.**\n", - "\n", - "In this quick starting guide we show the basics of working with t3f library. The main concept of the library is a TensorTrain object -- a compact (factorized) representation of a tensor (=multidimensional array). This is generalization of the matrix low-rank decomposition.\n", - "\n", - "\n", - "To begin, let's import some libraries and enable [eager execution mode](https://www.tensorflow.org/guide/eager) which simplifies workflow with TensorFlow" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "import tensorflow.compat.v1 as tf\n", - "tf.disable_v2_behavior()\n", - "tf.enable_resource_variables()\n", - "tf.enable_eager_execution()\n", - "try:\n", - " import t3f\n", - "except ImportError:\n", - " # Install T3F if it's not already installed.\n", - " !git clone https://github.com/Bihaqo/t3f.git\n", - " !cd t3f; pip install .\n", - " import t3f" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Converting to and from TT-format\n", - "------------------------------------------------\n", - "\n", - "Let's start with converting a dense (numpy) matrix into the TT-format, which in this case coincides with the low-rank format." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "factors of the matrix: (, )\n", - "Original matrix: \n", - "[[-0.37987684 2.05245337 -0.35579983 1.2085382 ]\n", - " [ 0.88487745 -0.7914341 0.77149975 0.12811055]\n", - " [-1.02101132 1.93720209 -0.90908913 -0.14740026]]\n", - "Reconstructed matrix: \n", - "tf.Tensor(\n", - "[[-0.37987684 2.05245337 -0.35579983 1.2085382 ]\n", - " [ 0.88487745 -0.7914341 0.77149975 0.12811055]\n", - " [-1.02101132 1.93720209 -0.90908913 -0.14740026]], shape=(3, 4), dtype=float64)\n" - ] + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.5" + }, + "colab": { + "name": "quick_start.ipynb", + "provenance": [], + "collapsed_sections": [] } - ], - "source": [ - "# Generate a random dense matrix of size 3 x 4.\n", - "a_dense = np.random.randn(3, 4)\n", - "# Convert the matrix into the TT-format with TT-rank = 3 (the larger the TT-rank,\n", - "# the more exactly the tensor will be converted, but the more memory and time\n", - "# everything will take). For matrices, matrix rank coinsides with TT-rank.\n", - "a_tt = t3f.to_tt_tensor(a_dense, max_tt_rank=3)\n", - "# a_tt stores the factorized representation of the matrix, namely it stores the matrix\n", - "# as a product of two smaller matrices which are called TT-cores. You can\n", - "# access the TT-cores directly.\n", - "print('factors of the matrix: ', a_tt.tt_cores)\n", - "# To check that the convertions into the TT-format didn't change the matrix too much,\n", - "# let's convert it back and compare to the original.\n", - "reconstructed_matrix = t3f.full(a_tt)\n", - "print('Original matrix: ')\n", - "print(a_dense)\n", - "print('Reconstructed matrix: ')\n", - "print(reconstructed_matrix)\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The same idea applies to tensors" - ] }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "The difference between the original tensor and the reconsrtucted one is 0.000001\n" - ] - } - ], - "source": [ - "# Generate a random dense tensor of size 3 x 2 x 2.\n", - "a_dense = np.random.randn(3, 2, 2).astype(np.float32)\n", - "# Convert the tensor into the TT-format with TT-rank = 3.\n", - "a_tt = t3f.to_tt_tensor(a_dense, max_tt_rank=3)\n", - "# The 3 TT-cores are available in a_tt.tt_cores.\n", - "# To check that the convertions into the TT-format didn't change the tensor too much,\n", - "# let's convert it back and compare to the original.\n", - "reconstructed_tensor = t3f.full(a_tt)\n", - "print('The difference between the original tensor and the reconsrtucted '\n", - " 'one is %f' % np.linalg.norm(reconstructed_tensor - a_dense))\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Arithmetic operations\n", - "--------------------------------\n", - "\n", - "T3F is a library of different operations that can be applied to the tensors in the TT-format by working directly with the compact representation, i.e. without the need to materialize the tensors themself.\n", - "Here are some basic examples" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": { + "id": "gAdpYMvhjExj", + "colab_type": "text" + }, + "source": [ + "# Quick start\n", + "\n", + "[Open](https://colab.research.google.com/github/Bihaqo/t3f/blob/develop/docs/quick_start.ipynb) **this page in an interactive mode via Google Colaboratory.**\n", + "\n", + "In this quick starting guide we show the basics of working with t3f library. The main concept of the library is a TensorTrain object -- a compact (factorized) representation of a tensor (=multidimensional array). This is generalization of the matrix low-rank decomposition.\n", + "\n", + "\n", + "To begin, let's import some libraries." + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Frobenius norm of the tensor is 0.968410\n", - "The TT-ranks of a and b are 3 and 2. The TT-rank of their elementwise product is 6. The TT-rank of their product after rounding is 3. The difference between the exact and the rounded elementwise product is 0.003162.\n" - ] - } - ], - "source": [ - "# Create a random tensor of shape (3, 2, 2) directly in the TT-format\n", - "# (in contrast to generating a dense tensor and then converting it to TT).\n", - "b_tt = t3f.random_tensor((3, 2, 2), tt_rank=2)\n", - "# Compute the Frobenius norm of the tensor.\n", - "norm = t3f.frobenius_norm(b_tt)\n", - "print('Frobenius norm of the tensor is %f' % norm)\n", - "# Compute the TT-representation of the sum or elementwise product of two TT-tensors.\n", - "sum_tt = a_tt + b_tt\n", - "prod_tt = a_tt * b_tt\n", - "twice_a_tt = 2 * a_tt\n", - "# Most operations on TT-tensors increase the TT-rank. After applying a sequence of\n", - "# operations the TT-rank can increase by too much and we may want to reduce it.\n", - "# To do that there is a rounding operation, which finds the tensor that is of\n", - "# a smaller rank but is as close to the original one as possible.\n", - "rounded_prod_tt = t3f.round(prod_tt, max_tt_rank=3)\n", - "a_max_tt_rank = np.max(a_tt.get_tt_ranks())\n", - "b_max_tt_rank = np.max(b_tt.get_tt_ranks())\n", - "exact_prod_max_tt_rank = np.max(prod_tt.get_tt_ranks())\n", - "rounded_prod_max_tt_rank = np.max(rounded_prod_tt.get_tt_ranks())\n", - "difference = t3f.frobenius_norm(prod_tt - rounded_prod_tt)\n", - "print('The TT-ranks of a and b are %d and %d. The TT-rank '\n", - " 'of their elementwise product is %d. The TT-rank of '\n", - " 'their product after rounding is %d. The difference '\n", - " 'between the exact and the rounded elementwise '\n", - " 'product is %f.' % (a_max_tt_rank, b_max_tt_rank,\n", - " exact_prod_max_tt_rank,\n", - " rounded_prod_max_tt_rank,\n", - " difference))\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Working with TT-matrices\n", - "------------------------------------\n", - "\n", - "Recall that for 2-dimensional tensors the TT-format coincides with the matrix low-rank format. However, sometimes matrices can have full matrix rank, but some tensor structure (for example a kronecker product of matrices). In this case there is a special object called Matrix TT-format. You can think of it as a sum of kronecker products (although it's a bit more complicated than that).\n", - "\n", - "Let's say that you have a matrix of size 8 x 27. You can convert it into the matrix TT-format of tensor shape (2, 2, 2) x (3, 3, 3) (in which case the matrix will be represented with 3 TT-cores) or, for example, into the matrix TT-format of tensor shape (4, 2) x (3, 9) (in which case the matrix will be represented with 2 TT-cores)." - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ + "cell_type": "code", + "metadata": { + "id": "4oUv_JuSjExl", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 34 + }, + "outputId": "12d6ddbb-019c-44af-9b0a-4efc74b81948" + }, + "source": [ + "import numpy as np\n", + "\n", + "# Import TF 2.\n", + "%tensorflow_version 2.x\n", + "import tensorflow as tf\n", + "\n", + "# Fix seed so that the results are reproducable.\n", + "tf.random.set_seed(0)\n", + "np.random.seed(0)\n", + "try:\n", + " import t3f\n", + "except ImportError:\n", + " # Install T3F if it's not already installed.\n", + " !git clone https://github.com/Bihaqo/t3f.git\n", + " !cd t3f; pip install .\n", + " import t3f" + ], + "execution_count": 1, + "outputs": [ + { + "output_type": "stream", + "text": [ + "TensorFlow 2.x selected.\n" + ], + "name": "stdout" + } + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Frobenius norm of the matrix is 8.127118\n" - ] - } - ], - "source": [ - "a_dense = np.random.rand(8, 27).astype(np.float32)\n", - "a_matrix_tt = t3f.to_tt_matrix(a_dense, shape=((2, 2, 2), (3, 3, 3)), max_tt_rank=4)\n", - "# Now you can work with 'a_matrix_tt' like with any other TT-object, e.g.\n", - "print('Frobenius norm of the matrix is %f' % t3f.frobenius_norm(a_matrix_tt))\n", - "twice_a_matrix_tt = 2.0 * a_matrix_tt # multiplication by a number.\n", - "prod_tt = a_matrix_tt * a_matrix_tt # Elementwise product of two TT-matrices.\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "But, additionally, you can also compute matrix multiplication between TT-matrices" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": { + "id": "hUSpAcUNjExo", + "colab_type": "text" + }, + "source": [ + "Converting to and from TT-format\n", + "------------------------------------------------\n", + "\n", + "Let's start with converting a dense (numpy) matrix into the TT-format, which in this case coincides with the low-rank format." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "maLG_cgGjExp", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 476 + }, + "outputId": "50136303-f57b-4f42-c4d0-c90bb0e46ee5" + }, + "source": [ + "# Generate a random dense matrix of size 3 x 4.\n", + "a_dense = np.random.randn(3, 4)\n", + "# Convert the matrix into the TT-format with TT-rank = 3 (the larger the TT-rank,\n", + "# the more exactly the tensor will be converted, but the more memory and time\n", + "# everything will take). For matrices, matrix rank coinsides with TT-rank.\n", + "a_tt = t3f.to_tt_tensor(a_dense, max_tt_rank=3)\n", + "# a_tt stores the factorized representation of the matrix, namely it stores the matrix\n", + "# as a product of two smaller matrices which are called TT-cores. You can\n", + "# access the TT-cores directly.\n", + "print('factors of the matrix: ', a_tt.tt_cores)\n", + "# To check that the convertions into the TT-format didn't change the matrix too much,\n", + "# let's convert it back and compare to the original.\n", + "reconstructed_matrix = t3f.full(a_tt)\n", + "print('Original matrix: ')\n", + "print(a_dense)\n", + "print('Reconstructed matrix: ')\n", + "print(reconstructed_matrix)\n" + ], + "execution_count": 2, + "outputs": [ + { + "output_type": "stream", + "text": [ + "factors of the matrix: (, )\n", + "Original matrix: \n", + "[[ 1.76405235 0.40015721 0.97873798 2.2408932 ]\n", + " [ 1.86755799 -0.97727788 0.95008842 -0.15135721]\n", + " [-0.10321885 0.4105985 0.14404357 1.45427351]]\n", + "Reconstructed matrix: \n", + "tf.Tensor(\n", + "[[ 1.76405235 0.40015721 0.97873798 2.2408932 ]\n", + " [ 1.86755799 -0.97727788 0.95008842 -0.15135721]\n", + " [-0.10321885 0.4105985 0.14404357 1.45427351]], shape=(3, 4), dtype=float64)\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "0ZTU_7r3jExu", + "colab_type": "text" + }, + "source": [ + "The same idea applies to tensors" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "Yt9bqmSsjExv", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 34 + }, + "outputId": "cace78a7-84e9-4d3d-ff0e-b664d5b10dde" + }, + "source": [ + "# Generate a random dense tensor of size 3 x 2 x 2.\n", + "a_dense = np.random.randn(3, 2, 2).astype(np.float32)\n", + "# Convert the tensor into the TT-format with TT-rank = 3.\n", + "a_tt = t3f.to_tt_tensor(a_dense, max_tt_rank=3)\n", + "# The 3 TT-cores are available in a_tt.tt_cores.\n", + "# To check that the convertions into the TT-format didn't change the tensor too much,\n", + "# let's convert it back and compare to the original.\n", + "reconstructed_tensor = t3f.full(a_tt)\n", + "print('The difference between the original tensor and the reconsrtucted '\n", + " 'one is %f' % np.linalg.norm(reconstructed_tensor - a_dense))\n" + ], + "execution_count": 3, + "outputs": [ + { + "output_type": "stream", + "text": [ + "The difference between the original tensor and the reconsrtucted one is 0.000002\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "qKLybqtAjEx1", + "colab_type": "text" + }, + "source": [ + "Arithmetic operations\n", + "--------------------------------\n", + "\n", + "T3F is a library of different operations that can be applied to the tensors in the TT-format by working directly with the compact representation, i.e. without the need to materialize the tensors themself.\n", + "Here are some basic examples" + ] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "Difference between multiplying matrix by vector in the TT-format and then converting the result into dense vector and multiplying dense matrix by dense vector is 0.000002.\n" - ] + "cell_type": "code", + "metadata": { + "id": "kDdvofu8jEx2", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 71 + }, + "outputId": "e4bc7e61-1d99-494f-bc8b-258e540ed1ec" + }, + "source": [ + "# Create a random tensor of shape (3, 2, 2) directly in the TT-format\n", + "# (in contrast to generating a dense tensor and then converting it to TT).\n", + "b_tt = t3f.random_tensor((3, 2, 2), tt_rank=2)\n", + "# Compute the Frobenius norm of the tensor.\n", + "norm = t3f.frobenius_norm(b_tt)\n", + "print('Frobenius norm of the tensor is %f' % norm)\n", + "# Compute the TT-representation of the sum or elementwise product of two TT-tensors.\n", + "sum_tt = a_tt + b_tt\n", + "prod_tt = a_tt * b_tt\n", + "twice_a_tt = 2 * a_tt\n", + "# Most operations on TT-tensors increase the TT-rank. After applying a sequence of\n", + "# operations the TT-rank can increase by too much and we may want to reduce it.\n", + "# To do that there is a rounding operation, which finds the tensor that is of\n", + "# a smaller rank but is as close to the original one as possible.\n", + "rounded_prod_tt = t3f.round(prod_tt, max_tt_rank=3)\n", + "a_max_tt_rank = np.max(a_tt.get_tt_ranks())\n", + "b_max_tt_rank = np.max(b_tt.get_tt_ranks())\n", + "exact_prod_max_tt_rank = np.max(prod_tt.get_tt_ranks())\n", + "rounded_prod_max_tt_rank = np.max(rounded_prod_tt.get_tt_ranks())\n", + "difference = t3f.frobenius_norm(prod_tt - rounded_prod_tt)\n", + "print('The TT-ranks of a and b are %d and %d. The TT-rank '\n", + " 'of their elementwise product is %d. The TT-rank of '\n", + " 'their product after rounding is %d. The difference '\n", + " 'between the exact and the rounded elementwise '\n", + " 'product is %f.' % (a_max_tt_rank, b_max_tt_rank,\n", + " exact_prod_max_tt_rank,\n", + " rounded_prod_max_tt_rank,\n", + " difference))\n" + ], + "execution_count": 4, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Frobenius norm of the tensor is 2.943432\n", + "The TT-ranks of a and b are 3 and 2. The TT-rank of their elementwise product is 6. The TT-rank of their product after rounding is 3. The difference between the exact and the rounded elementwise product is 0.003162.\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "2beGpQP8jEx5", + "colab_type": "text" + }, + "source": [ + "Working with TT-matrices\n", + "------------------------------------\n", + "\n", + "Recall that for 2-dimensional tensors the TT-format coincides with the matrix low-rank format. However, sometimes matrices can have full matrix rank, but some tensor structure (for example a kronecker product of matrices). In this case there is a special object called Matrix TT-format. You can think of it as a sum of kronecker products (although it's a bit more complicated than that).\n", + "\n", + "Let's say that you have a matrix of size 8 x 27. You can convert it into the matrix TT-format of tensor shape (2, 2, 2) x (3, 3, 3) (in which case the matrix will be represented with 3 TT-cores) or, for example, into the matrix TT-format of tensor shape (4, 2) x (3, 9) (in which case the matrix will be represented with 2 TT-cores)." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "bsYbLqLPjEx6", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 34 + }, + "outputId": "7ee19827-50d2-4826-b5f6-1a321425a47b" + }, + "source": [ + "a_dense = np.random.rand(8, 27).astype(np.float32)\n", + "a_matrix_tt = t3f.to_tt_matrix(a_dense, shape=((2, 2, 2), (3, 3, 3)), max_tt_rank=4)\n", + "# Now you can work with 'a_matrix_tt' like with any other TT-object, e.g.\n", + "print('Frobenius norm of the matrix is %f' % t3f.frobenius_norm(a_matrix_tt))\n", + "twice_a_matrix_tt = 2.0 * a_matrix_tt # multiplication by a number.\n", + "prod_tt = a_matrix_tt * a_matrix_tt # Elementwise product of two TT-matrices.\n" + ], + "execution_count": 5, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Frobenius norm of the matrix is 7.805310\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ZubUrinMjEx9", + "colab_type": "text" + }, + "source": [ + "But, additionally, you can also compute matrix multiplication between TT-matrices" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "eNVmZAe6jEx-", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 54 + }, + "outputId": "0e80ee65-0041-4384-a240-fd5091ae59e3" + }, + "source": [ + "vector_tt = t3f.random_matrix(((3, 3, 3), (1, 1, 1)), tt_rank=3)\n", + "matvec_tt = t3f.matmul(a_matrix_tt, vector_tt)\n", + "# Check that the result coinsides with np.matmul.\n", + "matvec_expected = np.matmul(t3f.full(a_matrix_tt), t3f.full(vector_tt))\n", + "difference = np.linalg.norm(matvec_expected - t3f.full(matvec_tt))\n", + "print('Difference between multiplying matrix by vector in '\n", + " 'the TT-format and then converting the result into '\n", + " 'dense vector and multiplying dense matrix by '\n", + " 'dense vector is %f.' % difference)" + ], + "execution_count": 6, + "outputs": [ + { + "output_type": "stream", + "text": [ + "Difference between multiplying matrix by vector in the TT-format and then converting the result into dense vector and multiplying dense matrix by dense vector is 0.000001.\n" + ], + "name": "stdout" + } + ] } - ], - "source": [ - "vector_tt = t3f.random_matrix(((3, 3, 3), (1, 1, 1)), tt_rank=3)\n", - "matvec_tt = t3f.matmul(a_matrix_tt, vector_tt)\n", - "# Check that the result coinsides with np.matmul.\n", - "matvec_expected = np.matmul(t3f.full(a_matrix_tt), t3f.full(vector_tt))\n", - "difference = np.linalg.norm(matvec_expected - t3f.full(matvec_tt))\n", - "print('Difference between multiplying matrix by vector in '\n", - " 'the TT-format and then converting the result into '\n", - " 'dense vector and multiplying dense matrix by '\n", - " 'dense vector is %f.' % difference)" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.5" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + ] +} \ No newline at end of file diff --git a/docs/requirement.txt b/docs/requirement.txt index 26674f8b..fc635882 100644 --- a/docs/requirement.txt +++ b/docs/requirement.txt @@ -2,6 +2,6 @@ # tf is necessary for building the docs with readthedocs.org: they fetch a fresh # version of the library on each build and it doesn't import properly without # tensorflow being installed. -tensorflow>=1.12,<=1.14 +tensorflow>=1.15.2,<=2.0 ipykernel nbsphinx diff --git a/docs/troubleshooting.rst b/docs/troubleshooting.rst index b6e2a68f..8f17592c 100644 --- a/docs/troubleshooting.rst +++ b/docs/troubleshooting.rst @@ -7,15 +7,7 @@ If something does not work, try * Installing the latest version of the library (see :ref:`InstallationInstructions`) -* Importing TensorFlow in the following way: - -.. code-block:: python - - import tensorflow.compat.v1 as tf - tf.disable_v2_behavior() - tf.enable_resource_variables() - tf.enable_eager_execution() - +* Installing Tensorflow version 2.0 * Creating an issue_ on GitHub diff --git a/docs/tutorials/riemannian.ipynb b/docs/tutorials/riemannian.ipynb index a972549b..c39dbc60 100644 --- a/docs/tutorials/riemannian.ipynb +++ b/docs/tutorials/riemannian.ipynb @@ -1,292 +1,279 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Riemannian optimization\n", - "\n", - "[Open](https://colab.research.google.com/github/Bihaqo/t3f/blob/develop/docs/tutorials/riemannian.ipynb) **this page in an interactive mode via Google Colaboratory.**" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Riemannian optimization is a framework for solving optimization problems with a constraint that the solution belongs to a manifold. \n", - "\n", - "Let us consider the following problem. Given some TT tensor $A$ with large tt-ranks we would like to find a tensor $X$ (with small prescribed tt-ranks $r$) which is closest to $A$ (in the sense of Frobenius norm). Mathematically it can be written as follows:\n", - "\\begin{equation*}\n", - "\\begin{aligned}\n", - "& \\underset{X}{\\text{minimize}} \n", - "& & \\frac{1}{2}\\|X - A\\|_F^2 \\\\\n", - "& \\text{subject to} \n", - "& & \\text{tt_rank}(X) = r\n", - "\\end{aligned}\n", - "\\end{equation*}\n", - "\n", - "It is known that the set of TT tensors with elementwise fixed TT ranks forms a manifold. Thus we can solve this problem using the so called Riemannian gradient descent. Given some functional $F$ on a manifold $\\mathcal{M}$ it is defined as\n", - "$$\\hat{x}_{k+1} = x_{k} - \\alpha P_{T_{x_k}\\mathcal{M}} \\nabla F(x_k),$$\n", - "$$x_{k+1} = \\mathcal{R}(\\hat{x}_{k+1})$$\n", - "with $P_{T_{x_k}} \\mathcal{M}$ being the projection onto the tangent space of $\\mathcal{M}$ at the point $x_k$ and $\\mathcal{R}$ being a retraction - an operation which projects points to the manifold, and $\\alpha$ is the learning rate.\n", - "\n", - "We can implement this in `t3f` using the `t3f.riemannian` module. As a retraction it is convenient to use the rounding method (`t3f.round`)." - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [], - "source": [ - "import tensorflow.compat.v1 as tf\n", - "tf.disable_v2_behavior()\n", - "tf.enable_resource_variables()\n", - "import numpy as np\n", - "import matplotlib.pyplot as plt\n", - "tf.set_random_seed(0)\n", - "np.random.seed(0)\n", - "%matplotlib inline\n", - "\n", - "try:\n", - " import t3f\n", - "except ImportError:\n", - " # Install T3F if it's not already installed.\n", - " !git clone https://github.com/Bihaqo/t3f.git\n", - " !cd t3f; pip install .\n", - " import t3f" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "sess = tf.InteractiveSession()" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "# Initialize A randomly, with large tt-ranks\n", - "shape = 10 * [2]\n", - "init_A = t3f.random_tensor(shape, tt_rank=16)\n", - "A = t3f.get_variable('A', initializer=init_A, trainable=False)" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "# Create an X variable and compute the gradient of the functional. Note that it is simply X - A.\n", - "\n", - "init_X = t3f.random_tensor(shape, tt_rank=2)\n", - "X = t3f.get_variable('X', initializer=init_X)\n", - "\n", - "gradF = X - A" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "# Let us compute the projection of the gradient onto the tangent space at X" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "riemannian_grad = t3f.riemannian.project(gradF, X)" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "# Compute the update by subtracting the Riemannian gradient\n", - "# and retracting back to the manifold\n", - "alpha = 1.0\n", - "\n", - "train_step = t3f.assign(X, t3f.round(X - alpha * riemannian_grad, max_tt_rank=2))" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "# let us also compute the value of the functional\n", - "# to see if it is decreasing\n", - "F = 0.5 * t3f.frobenius_norm_squared(X - A)" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "sess.run(tf.global_variables_initializer())" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "81.622\n", - "58.5347\n", - "56.27\n", - "56.0832\n", - "51.7328\n", - "50.7767\n", - "50.7767\n", - "50.7767\n", - "50.7767\n", - "50.7767\n" - ] + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.5" + }, + "colab": { + "name": "riemannian.ipynb", + "provenance": [], + "collapsed_sections": [] } - ], - "source": [ - "log = []\n", - "for i in range(100):\n", - " F_v, _ = sess.run([F, train_step.op])\n", - " if i % 10 == 0:\n", - " print (F_v)\n", - " log.append(F_v)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "It is intructive to compare the obtained result with the quasioptimum delivered by the TT-round procedure. " - ] }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ + "cells": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "52.4074\n" - ] - } - ], - "source": [ - "quasi_sol = t3f.round(A, max_tt_rank=2)\n", - "\n", - "val = sess.run(0.5 * t3f.frobenius_norm_squared(quasi_sol - A))\n", - "print (val)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We see that the value is slightly bigger than the exact minimum, but TT-round is faster and cheaper to compute, so it is often used in practice." - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": { + "id": "gqwrcHFAVwgs", + "colab_type": "text" + }, + "source": [ + "# Riemannian optimization\n", + "\n", + "[Open](https://colab.research.google.com/github/Bihaqo/t3f/blob/develop/docs/tutorials/riemannian.ipynb) **this page in an interactive mode via Google Colaboratory.**" + ] + }, { - "data": { - "text/plain": [ - "" + "cell_type": "markdown", + "metadata": { + "id": "oz-X6ZrGVwgu", + "colab_type": "text" + }, + "source": [ + "Riemannian optimization is a framework for solving optimization problems with a constraint that the solution belongs to a manifold. \n", + "\n", + "Let us consider the following problem. Given some TT tensor $A$ with large tt-ranks we would like to find a tensor $X$ (with small prescribed tt-ranks $r$) which is closest to $A$ (in the sense of Frobenius norm). Mathematically it can be written as follows:\n", + "\\begin{equation*}\n", + "\\begin{aligned}\n", + "& \\underset{X}{\\text{minimize}} \n", + "& & \\frac{1}{2}\\|X - A\\|_F^2 \\\\\n", + "& \\text{subject to} \n", + "& & \\text{tt_rank}(X) = r\n", + "\\end{aligned}\n", + "\\end{equation*}\n", + "\n", + "It is known that the set of TT tensors with elementwise fixed TT ranks forms a manifold. Thus we can solve this problem using the so called Riemannian gradient descent. Given some functional $F$ on a manifold $\\mathcal{M}$ it is defined as\n", + "$$\\hat{x}_{k+1} = x_{k} - \\alpha P_{T_{x_k}\\mathcal{M}} \\nabla F(x_k),$$\n", + "$$x_{k+1} = \\mathcal{R}(\\hat{x}_{k+1})$$\n", + "with $P_{T_{x_k}} \\mathcal{M}$ being the projection onto the tangent space of $\\mathcal{M}$ at the point $x_k$ and $\\mathcal{R}$ being a retraction - an operation which projects points to the manifold, and $\\alpha$ is the learning rate.\n", + "\n", + "We can implement this in `t3f` using the `t3f.riemannian` module. As a retraction it is convenient to use the rounding method (`t3f.round`)." ] - }, - "execution_count": 12, - "metadata": {}, - "output_type": "execute_result" }, { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAZkAAAEKCAYAAADAVygjAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4wLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvpW3flQAAIABJREFUeJzt3Xl8VOXZ//HPlT2EEEjYA0pQFlkT\nJKCCSute0Fp3qrVuULUu1bZP7VOtS+vz68+f2qq1ta7URwtVcYNStSpopVUBBQFRXEAIIGCQsIZs\n1++PmUxDyDIJMxky832/XmM4Z865z3XmxLly3/c5923ujoiISDQkxToAERGJX0oyIiISNUoyIiIS\nNUoyIiISNUoyIiISNUoyIiISNUoyIiISNUoyIiISNUoyIiISNSmxDiDWunbt6v369Yt1GCIi7cqi\nRYu+cvduzW2X8EmmX79+LFy4MNZhiIi0K2b2RTjbqblMRESiRklGRESiRklGRESiJuH7ZEQiobKy\nkpKSEsrLy2MdikhEZWRk0KdPH1JTU1u1v5KMSASUlJSQnZ1Nv379MLNYhyMSEe5OaWkpJSUlFBQU\ntKoMNZeJREB5eTl5eXlKMBJXzIy8vLz9qqEryYhEiBKMxKP9/b1Wkmmlv32wgWnzV8U6DBGRA5qS\nTCu9umIjf3rz81iHIRKSnJxMYWEhw4YN49RTT2Xr1q0ArF+/nrPOOivG0e3rl7/8Ja+++mqsw2hQ\nx44dgf3/7H73u9+xa9euZrebN28ekyZNavVx9tfWrVv5wx/+EJWylWRaaVDPbDaUlVO2qzLWoYgA\nkJmZyeLFi1m2bBm5ubncf//9APTu3ZtnnnkmxtHt67bbbuP4449vs+NVV1e3eJ/9/ezCTTKxpiRz\nABrUIxuAjzduj3EkIvs68sgjWbduHQCrV69m2LBhQOCL9qc//SnFxcWMGDGCP/3pT0DgL+ljjz2W\nc845h4EDB3LDDTfw5JNPMmbMGIYPH85nn30GwKxZsxg7dixFRUUcf/zxbNy4EYBbbrmFSy65hAkT\nJtC/f3/uvffe0LEPO+wwpkyZwtChQznxxBPZvXs3ABdddFHoC/y2226juLiYYcOGMXXqVNwdgAkT\nJvCzn/2MMWPGMHDgQP75z3/uc641NTVceeWVDB06lEmTJvGtb30rVG6/fv247bbbGD9+PE8//TQP\nPfQQxcXFjBw5kjPPPDOUAFatWsWRRx5JcXExN910U6jscD+7CRMmcNZZZzF48GDOP/983J17772X\n9evX841vfINvfOMb+8T90ksvMXjwYMaPH8+zzz4bWr9z504uueQSiouLKSoq4oUXXgBg+fLljBkz\nhsLCQkaMGMEnn3wCwOOPP86IESMYOXIk3/ve9wDYvHkzZ555JsXFxRQXFzN//vwmr9MNN9zAZ599\nRmFhIT/96U+b+/VqEd3C3EqDev4nyYwpyI1xNHIguXXWcj5cvy2iZQ7p3YmbTx0a1rbV1dW89tpr\nXHrppfu898gjj5CTk8OCBQvYs2cP48aN48QTTwRgyZIlrFixgtzcXPr3789ll13Gu+++yz333MN9\n993H7373O8aPH8/bb7+NmfHwww9zxx13cNdddwHw0UcfMXfuXLZv386gQYO44oorAPjkk0+YPn06\nDz30EOeccw4zZ87kggsu2Cuuq666il/+8pcAfO9732P27NmceuqpAFRVVfHuu+8yZ84cbr311n2a\n2J599llWr17N0qVL2bRpE4cddhiXXHJJ6P2MjAzeeustAEpLS5kyZQoAN954I4888ghXX3011157\nLVdccQUXXnhhqAbYks/u/fffZ/ny5fTu3Ztx48Yxf/58rrnmGu6++27mzp1L165d9yqrvLycKVOm\n8Prrr3PooYdy7rnnht67/fbb+eY3v8mjjz7K1q1bGTNmDMcffzwPPPAA1157Leeffz4VFRVUV1ez\nfPlybr/9dubPn0/Xrl3ZsmULANdeey3XXXcd48ePZ82aNZx00kmsWLGi0ev0m9/8hmXLlrF48eIG\nz31/KMm0Uq+cDLIzUvj4y8h+mYi01u7duyksLGT16tUcfvjhnHDCCfts88orr/DBBx+E/tIvKyvj\nk08+IS0tjeLiYnr16gXAIYccEvoCHT58OHPnzgUCzwOde+65bNiwgYqKir2enZg4cSLp6emkp6fT\nvXv3UC2noKCAwsJCAA4//HBWr169T1xz587ljjvuYNeuXWzZsoWhQ4eGkswZZ5zR5L5vvfUWZ599\nNklJSfTs2XOfWkPdL/Bly5Zx4403snXrVnbs2MFJJ50EwPz585k5cyYQSHI/+9nPWvTZjRkzhj59\n+gCErsH48eP3KaPWRx99REFBAQMGDADgggsu4MEHHwwd58UXX+TOO+8EAglpzZo1HHnkkdx+++2U\nlJRwxhlnMGDAAF5//XXOOuusUBLLzQ38wfvqq6/y4Ycfho63bds2tm8PtLo0dp2iJS6TjJn1B34B\n5Lh7VHo8zYzBPbP5+Es1l8newq1xRFptn0xZWRmTJk3i/vvv55prrtlrG3fnvvvuC3251po3bx7p\n6emh5aSkpNByUlISVVVVAFx99dVcf/31nHbaacybN49bbrkltE/d/ZOTk0P71F9f21xWq7y8nCuv\nvJKFCxfSt29fbrnllr2ey6jdv26Z9c+pKVlZWaF/X3TRRTz//POMHDmSadOmMW/evNB7zd2qG+5n\n11ic9TV2PHdn5syZDBo0aK/1hx12GGPHjuVvf/sbJ510Eg8//DDu3mA5NTU1/Pvf/yYzM3Of91oT\n6/6Iap+MmV1nZsvNbJmZTTezjFaW86iZbTKzZQ28d7KZfWxmn5rZDQDu/rm779tWEGEDe2Tz0Zfb\nm/0lF2lLOTk53Hvvvdx5551UVu59Y8pJJ53EH//4x9D6lStXsnPnzrDLLisrIz8/H4A///nPEYm3\nNqF07dqVHTt2tLijffz48cycOZOamho2bty4V+Kob/v27fTq1YvKykqefPLJ0Ppx48YxY8YMgL3W\n19Wazy47OztUg6hr8ODBrFq1KtTXNX369L2Oc99994W+V95//30APv/8c/r3788111zDaaedxgcf\nfMBxxx3HU089RWlpKUCouezEE0/k97//fajM5prBGoszEqKWZMwsH7gGGO3uw4Bk4Lx623Q3s+x6\n6w5toLhpwMkNHCMZuB84BRgCTDazIRE5gTAM7pnN9vIqvtym8arkwFJUVMTIkSNDX5y1LrvsMoYM\nGcKoUaMYNmwYP/jBD1r0l+wtt9zC2WefzdFHH71PP0Nrde7cmSlTpjB8+HBOP/10iouLW7T/mWee\nSZ8+fULnM3bsWHJychrc9le/+hVjx47lhBNOYPDgwaH199xzD/fffz/FxcWUlZU1uG9rPrupU6dy\nyimn7NOEl5GRwYMPPsjEiRMZP348Bx98cOi9m266icrKSkaMGMGwYcNCNyL89a9/ZdiwYRQWFvLR\nRx9x4YUXMnToUH7xi19w7LHHMnLkSK6//noA7r33XhYuXMiIESMYMmQIDzzwQJNx5uXlMW7cOIYN\nGxbxjn/cPSovIB9YC+QSaJabDZxYb5uzgdeBjODyFGBOI+X1A5bVW3ck8HKd5Z8DP6+z/ExzcR5+\n+OHeWu98XuoH/2y2v/7RxlaXIfHhww8/jHUICW379u3u7v7VV195//79fcOGDTGOKL409PsNLPQw\nckHUajLuvg64E1gDbADK3P2Vets8DbwEzDCz84FLgHNacJjaRFarBMg3szwzewAoMrOfN7SjmZ1q\nZg829ldLOEK3MatfRiSmJk2aRGFhIUcffTQ33XQTPXv2jHVIEhS1jn8z6wJ8GygAtgJPm9kF7v5E\n3e3c/Q4zmwH8ETjE3Xe05DANrHN3LwUub2pHd58FzBo9evSUFhxvLzkdUunZKUNJRiTGmuqHkdiK\nZsf/8cAqd9/s7pXAs8BR9Tcys6OBYcBzwM0tPEYJ0LfOch9gfevCbZ1BusNMRKRR0Uwya4AjzKyD\nBe6xOw5YUXcDMysCHiJQ47kYyDWzX7fgGAuAAWZWYGZpBG4seDEi0YdpcM9sPt28g6rqmrY8rIhI\nuxDNPpl3gGeA94ClwWM9WG+zDsDZ7v6Zu9cA3we+qF+WmU0H/g0MMrMSM7s0eIwq4CrgZQIJ7Cl3\nXx6lU2rQwB7ZVFTVsLo0/NtARUQSRVQfxnT3m2miCczd59dbriRQs6m/3eQmypgDzNmPMPdL7fAy\nH325nUO7ZzeztYhIYtEAmfvp0O4dSU4yVqpfRmKotLSUwsJCCgsL6dmzJ/n5+QwfPpzCwkKGDBkS\nWle7TUVFRcxirR1GH2DDhg37DHF/7bXXkp+fT03Nf5qgZ8+ezc03t7TLVg4ESjL7KSM1mX55HfhI\nSUZiKC8vj8WLF7N48WIuv/xyrrvuOpYuXcrixYv58MMPQ+tqt0lLS2u0rGgPM1LX3XffHRqwEgLD\noTz33HP07duXN998M7R+4sSJvPjii+1i2HzZm5JMBAzqma0h/6Vdu+WWW5g6dSonnngiF154IeXl\n5Vx88cUMHz6coqKi0ACZ06ZN46qrrgrtN2nSpNDtwx07duQXv/gFI0eO5IgjjggNvNjYMPoAM2fO\n5OST/zOYx9y5cxk2bBhXXHHFXkOtmBkTJkxg9uzZ0foIJEqUZCLg0G4dWbtlFxVVusNM2q9Fixbx\nwgsv8Je//CU03P3SpUuZPn063//+9/catLIhO3fu5IgjjmDJkiUcc8wxPPRQoHu1dhj9BQsW7PWQ\n5KpVq+jSpcteAzZOnz6dyZMn853vfIfZs2fvNfba6NGjG5xPRg5scTkKc1sr6JZFjcOaLbs4tHvH\n5neQuDdv3jzeeOON0HJtk1DtFy/Asccey4QJE7jrrrvYsSPwDHKvXr2YOnUqs2bN4r333gtte/31\n15OdHd0bS0477bTQqL1vvfUWV199NRAYzPHggw9m5cqVTe6flpYW6l85/PDD+cc//gE0Poz+hg0b\n6NatW2j/iooK5syZw29/+1uys7MZO3Ysr7zyChMnTgSge/furF/fpo/BSQQoyURAv7zAUOKrvtqp\nJCNAYEbHCRMm7LO+oc7rH//4x/usO/XUU0PzqUTD/fffH0p4c+YEbs6sOyS+NzKyeEpKyl4d8nVr\nN6mpqaFh5+sPId/QcPSZmZl77f/SSy9RVlbG8OHDAdi1axcdOnQIJZny8vIGh66XA5uayyKgoGvg\nf87VX+lZGWkffvjDH4ZuAujdu/c+7x9zzDGhIe9XrlzJmjVrGDRoEP369WPx4sXU1NSwdu1a3n33\n3WaP1dgw+gMHDtxrErLp06fz8MMPs3r1alavXs2qVat45ZVXQp39K1euDE2FLO2HkkwEdO6QRpcO\nqXyuJCNx4sorr6S6uprhw4dz7rnnMm3aNNLT0xk3bhwFBQUMHz6cn/zkJ4waNarZshobRj8rK4tD\nDjmETz/9lF27dvHyyy+Hai21748fP55Zs2YBgZsC6r4v7YM1Vi1OFKNHj/aFCxfudzln/GE+aSlJ\nzJh6ZASikvZmxYoVHHbYYbEOo9157rnnWLRoEb/+ddOjSW3cuJHvfve7vPbaa20UmdTV0O+3mS1y\n99HN7auaTIQUdO3I6q90D79IS3znO9+hX79+zW63Zs0a7rrrrugHJBGnjv8IKejagZnvlbOroooO\nafpYRcJ12WWXNbtNS2fLlAOHajIRUtA1cFeZajOJK9GbniU+7e/vtZJMhPTr2gEI3MYsiScjI4PS\n0lIlGokr7k5paSkZGRmtLkPtOhFS+6yMhvxPTH369KGkpITNmzfHOhSRiMrIyKBPnz6t3l9JJkKy\n0lPo0SmdzzcrySSi1NRUCgoKYh2GyAFHzWURVNA1SzUZEZE6lGQiqKBrlvpkRETqUJKJoIKuWWzZ\nWUHZrsrmNxYRSQBKMhEUGihTTWYiIoCSTET171Y7GvOOGEciInJgUJKJoL65HUgyWKUHMkVEACWZ\niEpPSSa/S6Y6/0VEgpRkIiwwUKaSjIgIKMlEXEFeB9VkRESClGQirEdOBjv2VLG7ojrWoYiIxJyS\nTITlZaUBULpzT4wjERGJPSWZCOvSIZBktuysiHEkIiKxpyQTYXkdlWRERGopyURYblY6oCQjIgJN\nDPVvZkuBhmZgMsDdfUTUomrHcrNUkxERqdXUfDKT2iyKONIpI4WUJKNUSUZEpPEk4+5ftGUg8cLM\n6JKVxtdKMiIizffJmNkRZrbAzHaYWYWZVZvZtrYIrr3Ky0pTTUZEhPA6/n8PTAY+ATKBy4D7ohlU\ne5eblaY+GRERwry7zN0/BZLdvdrdHwO+Ed2w2rcuSjIiIkDTHf+1dplZGrDYzO4ANgBZ0Q2rfctT\nkhERAcKryXwvuN1VwE6gL3BmNINq73Kz0ijbXUlldU2sQxERialmazJ17jIrB26NbjjxoXb8sq93\nVdA9OyPG0YiIxE44d5eNM7N/mNlKM/u89tUWwbVXXWqTzM7KGEciIhJb4fTJPAJcBywCNH59GHL3\nGok5O7bBiIjEUDhJpszd/x71SOJInsYvExEBwksyc83s/wHPAqFJUtz9vahF1c5p/DIRkYBwkszY\n4M/RddY58M3IhxMfOndIBZRkRETCubtMD162UGpyEjmZqUoyIpLwwrm7LMfM7jazhcHXXWaW0xbB\ntWcav0xEJLyHMR8FtgPnBF/bgMeiGdT+MrP+ZvaImT0Tqxhys9LYskNJRkQSWzhJ5hB3v9ndPw++\nbgX6N7eTmQ0ys8V1XtvM7EetCdLMHjWzTWa2rIH3Tjazj83sUzO7ASAY56WtOVakdMlK4+tdSjIi\nktjCSTK7zWx87YKZjQN2N7eTu3/s7oXuXggcDuwCnqu7jZl1N7PseusObaC4acDJ9VeaWTJwP3AK\nMASYbGZDmj2jNqDmMhGR8JLMFcD9ZrbazL4gMPT/5S08znHAZw1MhHYs8IKZZQCY2RTg3vo7u/ub\nwJYGyh0DfBqsuVQAM4BvtzC2qMgNTlzm3tAM1iIiiaHZJOPui919JDACGO7uRe6+pIXHOQ+Y3kDZ\nTwMvATPM7HzgEgL9PuHKB9bWWS4B8s0sz8weAIrM7OcN7Whmp5rZg2VlZS04XPhys9KoqnG2lVdF\npXwRkfag0VuYzewCd3/CzK6vtx4Ad787nAMEpwk4DWjwy97d7zCzGcAfCfT/7AgzdgBruEgvpZna\nlrvPAmaNHj16SguOF7a6D2TmZKZG4xAiIge8pmoytXPGZDfw6tiCY5wCvOfuGxt608yOBoYR6K+5\nuQXlQqDm0rfOch9gfQvLiIr/JJk9zWwpIhK/Gq3JuPufgv981d3n130v2Pkfrsk00FQWLKcIeAiY\nCKwCnjCzX7v7jWGWvQAYYGYFwDoCzXLfbUFsUVM7flmpbmMWkQQWTsf/fWGu24eZdQBOIDDuWUM6\nAGe7+2fuXgN8H6h/cwBmNh34NzDIzErM7FIAd68iMJnay8AK4Cl3Xx5ObNHWJSvQRKbbmEUkkTXV\nJ3MkcBTQrV6/TCcgOZzC3X0XkNfE+/PrLVcSqNnU325yE2XMAeaEE09bCtVkdBuziCSwpsYuSyPQ\n95LC3pOibAPOimZQ8SAzLZnM1GQ99S8iCa2pPpk3gDfMbFoDz7dIGHKz0jRIpogktHD6ZB42s861\nC2bWxcxejmJMcSM3K40t6pMRkQQWTpLp6u5baxfc/Wuge/RCih+qyYhIogsnydSY2UG1C2Z2MIFJ\ny6QZeVlpuoVZRBJaODNj/gJ4y8zeCC4fA0yNXkjxI1cjMYtIggtnZsyXzGwUcASBYVyuc/evoh5Z\nHMjtmMauimp2V1STmRbWXd8iInElnOYygHQCoyCXAUPM7JjohRQ/enbKAODLbeUxjkREJDaarcmY\n2f8FzgWWAzXB1Q68GcW44kLvzpkArPt6NwVds5rZWkQk/oTTJ3M6MMjdNdJjC+UHk8z6rc3O8SYi\nEpfCaS77HNBY9a3Qo1MGZrBOSUZEElQ4NZldwGIzew0I1Wbc/ZqoRRUn0lKS6J6drpqMiCSscJLM\ni8GXtELvzpmsL1OSEZHEFM4tzH9ui0DiVX7nTJav3xbrMEREYiKcu8tW0cAT/u7ePyoRxZn8zpm8\n8uFG3D00dbWISKIIp7lsdJ1/ZwBnA7nRCSf+9O6cSUVVDaU7K+jaMT3W4YiItKlm7y5z99I6r3Xu\n/jvgm20QW1zorduYRSSBhdNcNqrOYhKBmk12I5tLPb07B576X/f1bkb06dzM1iIi8SWc5rK76vy7\nClgFnBOdcOJP7QOZelZGRBJRo0nGzK5193uAm9z9rTaMKa7kZKbSIS2Z9Vs1fpmIJJ6m+mQuDv68\nty0CiVdmRn7nTPXJiEhCaqq5bIWZrQa6mdkHddYb4O4+IqqRxRE9kCkiiarRJOPuk82sJ/AycFrb\nhRR/enfOZPn6sliHISLS5prs+Hf3L4GRbRRL3MrvnMFXOyoor6wmI1WTl4lI4gh30jLZD7XPymwo\nU+e/iCQWJZk2UHfyMhGRRBJ2kjEzTe3YSpq8TEQSVbNJxsyOMrMPgRXB5ZFm9oeoRxZHNHmZiCSq\ncGoyvwVOAkoB3H0JcEw0g4o3aSlJ9MjOUE1GRBJOWM1l7r623qrqKMQS13p3ztCzMiKScMJJMmvN\n7CjAzSzNzH5CsOlMwte7c6aGlhGRhBNOkrkc+CGQD5QAhcFlaYH8zpms27ob933mfxMRiVvhTL/8\nFXB+G8QS12onL9u0fQ89OmXEOhwRkTYRznwy3YApQL+627v7JdELK/4U9wtMJjpn6QYuHlcQ42hE\nRNpGOM1lLwA5wKvA3+q8pAWG9O7EiD45zHh3rZrMRCRhhDNpWQd3/1nUI0kA5xUfxH8/t5T3125l\n1EFdYh2OiEjUhVOTmW1m34p6JAngtMLedEhLZsa7a2IdiohIm2g0yZjZdjPbBlxLINHsNrNtddZL\nC3VMT+HUEb2ZtWQD28srYx2OiEjUNZpk3D3b3TsFfya5e2ad5U5tGWQ8OW9MX3ZXVvPikvWxDkVE\nJOrCGbvstXDWSXgK+3ZmcM9sZrxbfxAFEZH401RzWYaZ5QFdzayLmeUGX/2A3m0VYLwxM84r7svS\ndWXMWboh1uGIiERVU3eX/QD4EYGEsgiw4PptwP1RjiuunTfmIF5Ysp4fzVhM5w6pHHVI11iHJCIS\nFU31ydzj7gXAT9y9v7sXBF8j3f33bRhj3MlITeaxi4o5OK8DUx9fxLJ1ZbEOSUQkKprtk3H3+9oi\nkETTuUMaj186hk4ZKVz02Lus+mpnrEMSEYk4Tb8cQ71yMnn80rHUOHz3obdZu2VXrEMSEYmopjr+\nxwV/prddOInn0O4deeLSseyqqGbyQ29r9kwRiStN1WTuDf78d1sEksiG9O7EE5eOpWx3Jd996G02\nbdO8MyISH5pKMpVm9hiQb2b31n+1VYCJYnifHP58yRg2b9/DlP9dRHmlJh8VkfavqSQzCXgZKCdw\nC3P9l0TYqIO6cM95RXxQspWfPL1EozWLSLvX6HMywcnKZpjZCndf0oYxJbQThvTgZycP5jd//4gB\n3bO59vgBsQ5JRKTVwrm7rNTMnjOzTWa20cxmmlmfqEeWwH5wTH/OHNWH3766kjdWbo51OCIirRZO\nknkMeJHAk//5wKzgOokSM+N/zhhGj07p/O+/v4h1OCIirRZOkunu7o+5e1XwNQ3oFuW4El56SjLf\nLsxn3seb2LKzItbhiIi0SjhJZrOZXWBmycHXBUBptAMT+HZhb6pqnL9pIE0RaafCSTKXAOcAXwIb\ngLOC6yTKhvTqxMAeHXnh/XWxDkVEpFXCGbtsjbuf5u7d3L27u5/u7gd0R4GZ9TezR8zsmVjHsj/M\njNOL8ln4xdesKdWQMyLS/kR17DIz62xmz5jZR2a2wsyObGU5jwbvblvWwHsnm9nHZvapmd0A4O6f\nu/ul+xv/geC0kYGpe15YrNqMiLQ/0R4g8x7gJXcfDIwEVtR908y6m1l2vXWHNlDONODk+ivNLJnA\n3DanAEOAyWY2JDKhHxj6dOnAmIJcnl+8Tg9niki7E7UkY2adgGOARwDcvcLdt9bb7FjgBTPLCO4z\nhf+MmRbi7m8CWxo4zBjg02DNpQKYAXw7zPhONbMHy8oO/LlcvlOUz2ebd7Js3bZYhyIi0iLNJhkz\n6xHs3/h7cHmImYXTFNUf2Aw8Zmbvm9nDZpZVdwN3fxp4icDIAufzn5sMwpUPrK2zXEJgrLU8M3sA\nKDKznze0o7vPcvepOTk5LThcbHxrWC9Sk01NZiLS7oRTk5lGYAyz3sHllQSmZW5OCjAK+KO7FwE7\ngRvqb+TudxAYH+2PwGnuviOMsmtZA+vc3Uvd/XJ3P8Td/08Lyjsg5XRI5ZgB3ZizdIOazESkXQkn\nyXR196eAGgB3rwLCGSK4BChx93eCy88QSDp7MbOjgWHAc8DN4QRd7xh96yz3Ada3sIx24VvDe7G+\nrJz319ZvcRQROXCFk2R2mlke4ABmdgTQbEeGu38JrDWzQcFVxwEf1t3GzIqAhwj0o1wM5JrZr8MP\nnwXAADMrMLM04DwCQ+DEneOH9CA12ZjzgR7MFJH2I5wkcz2BL+5DzGw+8DhwdZjlXw08aWYfAIXA\n/9R7vwNwtrt/5u41wPeBfZ7BMbPpBCZPG2RmJbV9QsFa1VUEmvNWAE+5+/IwY2tXcjJTOVpNZiLS\nzjQ61H8td3/PzI4FBhHoA/nY3SvDKdzdFwOjm3h/fr3lSgI1m/rbTW6ijDnAnHDiae8mDu/F6x9t\n4v21Wxl1UJdYhyMi0qxmk4yZXVhv1Sgzw90fj1JM0oi6TWZKMiLSHoTTXFZc53U0cAtwWhRjkkao\nyUxE2ptwmsv26n8xsxzgf6MWkTTpW2oyE5F2pDVP/O8CNCdwjJwQbDJ7/F+rYx2KiEizwumTmUXw\n9mUCSWkI8FQ0g5LG5WSmcvmxh3Df658y7tCunD26b/M7iYjESLNJBrizzr+rgC/cvSRK8UgYfnT8\nQBau/pqbXljG8D45DO7ZKdYhiYg0KJz5ZN6o85qvBBN7yUnGPZMLyc5I5con3mPHnqpYhyQi0qBG\nk4yZbTezbQ28tpuZhgOOse7LXy3NAAAQy0lEQVTZGdw3uYjVpTuZ8ueFbNlZEeuQRET20WiScfds\nd+/UwCvb3dU+cwA4on8ed50zkkVrvubU+95iacmBP22BiCSWsO8uC04wdlDtK5pBSfi+U9SHmZcf\nBcCZD/yLafNXUVVdE+OoREQCwplP5jQz+wRYBbwBrAb+HuW4pAWG98lh1tXjOaJ/HrfM+pCJ977F\nPz/ZHOuwRETCqsn8CjgCWOnuBQRGU57f9C7S1nKz0vjzxcU8cMEodlVW8b1H3uWSaQv46Et1n4lI\n7ISTZCrdvRRIMrMkd59LYERlOcCYGScP68Wr1x/Lz08ZzILVWzjlnn/y46eWUPL1rliHJyIJKJzn\nZLaaWUfgTQLD9m8i8LyMHKDSU5L5wbGHcG5xX/4w7zOm/Ws1Ly5Zx+mF+Vw+4RAO6dYx1iGKSIKw\n5gZaNLMsAtMjG3A+kAM8GazdtHujR4/2hQsXxjqMqFq/dTd/euMzZixYS0V1DScO6cGl4/tT3K8L\nZg3NYC0i0jQzW+TujU7lEtqusSRjZr8H/uLu/4p0cAeSREgytb7asYfH5q/iibfXULa7kqG9O3HR\nUf2YOKIXHdLCqdSKiAREIslcS2A6417AX4HpwUnI4koiJZlauyuqee79dTw2fxWfbNpBx/QUJg7v\nxVmj+3D4QV1ISlLtRkSatt9Jpk5BBxNINucBGcB0YIa7r4xEoLGWiEmmlruzYPXXPL1wLX9buoFd\nFdX07JTBKcN7MnF4L4oO6kKyEo6INCBiSaZeoUXAo8AId0/ej/gOGImcZOrauaeKf3y4kb8t3cAb\nKzdTUVUTnCStK8cM7MaR/fPo0yVTfTgiAoSfZMIZ6j8VOJlATeY4Ag9k3rrfEcoBJSs9hdOL8jm9\nKJ/t5ZXM+3gzb67czBsrNzP7gw0A9OyUweh+XRjZpzND8zsxtHcOOZmpMY5cRA5kTfXJnABMBiYC\n7wIzgOfdfWfbhRd9qsk0zd1ZuXEH764q5d3VX7Nw9RY2lJWH3u/RKZ3+XTvSv1sWB+V2oHfnTHp3\nzqB7dga5WWl0SEver9pPTY2zdXclpTv2sHnHHjZvD7527GHLjgq27KygbHdlaMKjZDO6d0qnT5cO\n9M3N5JuDu9MrJ3M/PwURqS8SHf9zgb8AM919S4TjO2AoybRc6Y49LF+/jeXrt/Hpph18/tUOPtu0\ng23l+z4+lZGaRKeMVDqmp9AxI4WM1GTSkpNITba9+nuqapyKqhr2VNWwu6Ka7Xsq2V5exfbyKqpr\n9v0dTUtOIjcrjdysNHIyU0NlVVbX8OW2ctZv3U1ltZNkcMzAbpw7ui8nDOlBSnJrJoMVkfqi0icT\nj5RkImdbeSUbtpazvmw3X23fQ+nOQE1j2+5KduypYseeKsorq6msdiqra6iqdmorOclJRnpKEmkp\nSWSkJJOdkUJ2Rio5mankdUwjr2M6XbPS6N4pnW4dM+iUmdJkDammxllVupPn31/H0wtL+HJbOWMK\ncvn95CK6d8poo09EJH4pyYRJSSb+Vdc4z75Xwi9fWE5Wegr3TS7iyEPyYh2WSLsWbpJR24HEveQk\n4+zRfXnhqnF0ykzh/Iff5oXF62IdlkhCUJKRhDGwRzYvXjWeww/uwo3PLWND2e5YhyQS95RkJKF0\nTE/hzrNHUlXj/PezS0n05mKRaFOSkYRzcF4WPz1pEHM/3syz76nZTCSalGQkIV10VD9GH9yFW2ct\nZ9O28uZ3EJFWUZKRhJSUZNxx1gj2VNVw1ytxMQyfyAFJSUYSVv9uHTljVB9eWLKOsl2VsQ5HJC4p\nyUhCu+CIgyivrOGZ90piHYpIXFKSkYQ2tHcORQd15sl3vtCdZiJRoCQjCe+CsQfz+ead/PuzuJhR\nXOSAoiQjCW/iiF507pDKE+98EetQROKOkowkvIzUZM4+vA+vLN+o25lFIkxJRgT47tiDqapxZixY\nG+tQROKKkowIUNA1iyP65zJryfpYhyISV5RkRIJOHtqTTzbt4LPNO2IdikjcUJIRCTpxaE8AXl7+\nZYwjEYkfSjIiQb07ZzKyb2deXqYkIxIpSjIidZw0tAdLSspYv1VzzYhEgpKMSB0nB5vMXlGTmUhE\nKMmI1NG/W0cGdO/Iy8s3xjoUkbigJCNSz8nDevLOqlK27KyIdSgi7Z6SjEg9Jw3tSY3DqytUmxHZ\nX0oyIvUM7d2J/M6Z/H3phliHItLuKcmI1GNmTBrZi39+8hVf7dgT63BE2jUlGZEGnFHUh6oa1zAz\nIvtJSUakAYN6ZjO0dyeee39drEMRaddSYh1ArG3fvp1bb701tDxlyhQAHnroodC6Y489lgkTJnDX\nXXexY0dgXKtevXoxdepUZs2axXvvvRfa9vrrr2f9+vXMmDEjtG7SpEkcfvjhex1n4MCBTJ48menT\np7Ny5crQ+ptvvplFixYxe/bs0LrzzjuP3r17c/fdd4fWjRo1ilNPPZUHH3yQDRsCfQcdO3bkxz/+\nMfPmzeONN97QOe3nOY0B1lTn8OmmkSx4bXZcnFM8XiedU2zOKVyW6FPOjh492hcuXBjrMOQAtGl7\nOUf+n9f5wTH9+a+TB8c6HJEDipktcvfRzW2n5jKRRnTPzuDoAV15/v111NQk9h9jIq2lJCPShO8U\n5bO+rJy3V5XGOhSRdklJRqQJJw7pScf0FGYu0g0AIq2hJCPShMy0ZM4Ylc/zi9exbF1ZrMMRaXeU\nZESa8eMTBtGlQxo3PPsBVdU1sQ5HpF1RkhFpRk6HVG49bSjL1m3jsfmrYx2OSLuiJCMShm8N78nx\nh/Xgrn98zJrSXbEOR6TdUJIRCYOZ8avTh5KSlMS1f32fDWWaOVMkHEoyImHqlZPJb84czooN2zjh\n7jd54u0v9PyMSDMSflgZkZaYNKI3I/I7c8OzH3Dj88v4yztrGD+gK4V9OzOkVydyO6aRnZ6CmcU6\nVJEDgpKMSAsdlNeBJy8by9OLSvjLO2uYNn81FXXuOktOMjqmp5CanERqspGSbCRZ4GUAwfxjEFYy\nUrqSaPnD+aMY0CM7qsdQkhFpBTPjnNF9OWd0X/ZUVbNiw3ZWbtzOtt2VlO2uZNvuSiprnMqqGqpq\nHHenxqEmOFagh/7TNA9nI5FWSk9JjvoxlGRE9lN6SjKFfTtT2LdzrEMROeCo419ERKImLpOMmfU3\ns0fM7JlYxyIiksiimmTMbLWZLTWzxWbW6klbzOxRM9tkZssaeO9kM/vYzD41sxsA3P1zd790f2IX\nEZH91xY1mW+4e2FDk9uYWXczy6637tAGypgGnNzA/snA/cApwBBgspkNiUjUIiKy32LdXHYs8IKZ\nZQCY2RTg3vobufubwJYG9h8DfBqsuVQAM4Bvh3NgMzvVzB4sK9PIuiIi0RLtJOPAK2a2yMym7vOm\n+9PAS8AMMzsfuAQ4pwXl5wNr6yyXAPlmlmdmDwBFZvbzBgNzn+XuU3NyclpwOBERaYlo38I8zt3X\nm1l34B9m9lGwVhLi7neY2Qzgj8Ah7r6jBeU39Jyau3spcHnrwxYRkUiIak3G3dcHf24CniPQvLUX\nMzsaGBZ8/+YWHqIE6FtnuQ+wvlXBiohIxJl7dJ4oNrMsIMndtwf//Q/gNnd/qc42RcB0YCKwCngC\n+Nzdb2ygvH7AbHcfVmddCrASOA5YBywAvuvuy1sQ52bgixafYEBX4KtW7tueJeJ5J+I5Q2KedyKe\nM7T8vA92927NbRTN5rIewHPBsZlSgL/UTTBBHYCz3f0zADP7PnBR/YLMbDowAehqZiXAze7+iLtX\nmdlVwMtAMvBoSxIMQDgfUmPMbGFDd83Fu0Q870Q8Z0jM807Ec4bonXfUkoy7fw6MbGab+fWWK4GH\nGthuchNlzAHmtDJMERGJoljfwiwiInFMSWb/PBjrAGIkEc87Ec8ZEvO8E/GcIUrnHbWOfxEREdVk\nREQkapRkWqmhgTnjjZn1NbO5ZrbCzJab2bXB9blm9g8z+yT4s0usY400M0s2s/fNbHZwucDM3gme\n81/NLC3WMUaamXU2s2fM7KPgNT8y3q+1mV0X/N1eZmbTzSwjHq91Q4MMN3ZtLeDe4HfbB2Y2an+O\nrSTTCgk0MGcV8GN3Pww4Avhh8DxvAF5z9wHAa8HleHMtsKLO8v8Ffhs856+BeBzl+x7gJXcfTODO\n0BXE8bU2s3zgGmB08Pm7ZOA84vNaT2PfQYYbu7anAAOCr6kERmNpNSWZ1mn1wJztibtvcPf3gv/e\nTuBLJ5/Auf45uNmfgdNjE2F0mFkfAg8IPxxcNuCbQO38RPF4zp2AY4BHANy9wt23EufXmsBjHJnB\nB7s7ABuIw2vdyCDDjV3bbwOPe8DbQGcz69XaYyvJtE6DA3PGKJY2ERxxoQh4B+jh7hsgkIiA7rGL\nLCp+B/wXUBNczgO2untVcDker3d/YDPwWLCZ8OHgSB1xe63dfR1wJ7CGQHIpAxYR/9e6VmPXNqLf\nb0oyrdPgwJxtHkUbMbOOwEzgR+6+LdbxRJOZTQI2ufuiuqsb2DTerncKMAr4o7sXATuJo6axhgT7\nIL4NFAC9gSwCTUX1xdu1bk5Ef9+VZFonYQbmNLNUAgnmSXd/Nrh6Y231OfhzU6zii4JxwGlmtppA\nM+g3CdRsOgebVCA+r3cJUOLu7wSXnyGQdOL5Wh8PrHL3zcHRRp4FjiL+r3Wtxq5tRL/flGRaZwEw\nIHgXShqBzsIXYxxTxAX7Ih4BVrj73XXeehH4fvDf3wdeaOvYosXdf+7ufdy9H4Hr+rq7nw/MBc4K\nbhZX5wzg7l8Ca81sUHDVccCHxPG1JtBMdoSZdQj+rteec1xf6zoau7YvAhcG7zI7AiirbVZrDT2M\n2Upm9i0Cf+HWDsx5e4xDijgzGw/8E1jKf/on/ptAv8xTwEEE/kc9290bmrm0XTOzCcBP3H2SmfUn\nULPJBd4HLnD3PbGML9LMrJDAzQ5pwOfAxQT+EI3ba21mtwLnEriT8n3gMgL9D3F1resOMgxsJDCt\nyvM0cG2DCff3BO5G2wVc7O4LW31sJRkREYkWNZeJiEjUKMmIiEjUKMmIiEjUKMmIiEjUKMmIiEjU\nKMmIRIiZ7Qj+7Gdm341w2f9db/lfkSxfJFqUZEQirx/QoiQTHNm7KXslGXc/qoUxicSEkoxI5P0G\nONrMFgfnK0k2s/9nZguC83P8AAIPewbn6/kLgQdeMbPnzWxRcI6TqcF1vyEwUvBiM3syuK621mTB\nspeZ2VIzO7dO2fPqzA/zZPAhO5E2ldL8JiLSQjcQHCkAIJgsyty92MzSgflm9kpw2zHAMHdfFVy+\nJPjUdSawwMxmuvsNZnaVuxc2cKwzgEIC8790De7zZvC9ImAogXGn5hMYl+2tyJ+uSONUkxGJvhMJ\njAW1mMCQPHkEJoQCeLdOggG4xsyWAG8TGKRwAE0bD0x392p33wi8ARTXKbvE3WuAxQSa8UTalGoy\nItFnwNXu/vJeKwNjo+2st3w8cKS77zKzeUBGGGU3pu54W9Xo/3eJAdVkRCJvO5BdZ/ll4IrgtAmY\n2cDghGD15QBfBxPMYAJTXteqrN2/njeBc4P9Pt0IzG75bkTOQiQC9JeNSOR9AFQFm72mAfcQaKp6\nL9j5vpmGp/R9CbjczD4APibQZFbrQeADM3svOPVAreeAI4ElBCaW+i93/zKYpERiTqMwi4hI1Ki5\nTEREokZJRkREokZJRkREokZJRkREokZJRkREokZJRkREokZJRkREokZJRkREoub/AyBm2qNNuZkD\nAAAAAElFTkSuQmCC\n", - "text/plain": [ - "" + "cell_type": "code", + "metadata": { + "id": "ri9QCNEAVwgw", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 34 + }, + "outputId": "efd8c64b-836e-449b-a385-dd3a63a5b4a2" + }, + "source": [ + "# Import TF 2.\n", + "%tensorflow_version 2.x\n", + "import tensorflow as tf\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "\n", + "# Fix seed so that the results are reproducable.\n", + "tf.random.set_seed(0)\n", + "np.random.seed(0)\n", + "\n", + "try:\n", + " import t3f\n", + "except ImportError:\n", + " # Install T3F if it's not already installed.\n", + " !git clone https://github.com/Bihaqo/t3f.git\n", + " !cd t3f; pip install -e .\n", + " import t3f" + ], + "execution_count": 1, + "outputs": [ + { + "output_type": "stream", + "text": [ + "TensorFlow 2.x selected.\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "5Awp7wdwVwg3", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Initialize A randomly, with large tt-ranks\n", + "shape = 10 * [2]\n", + "init_A = t3f.random_tensor(shape, tt_rank=16)\n", + "A = t3f.get_variable('A', initializer=init_A, trainable=False)\n", + "\n", + "# Create an X variable.\n", + "init_X = t3f.random_tensor(shape, tt_rank=2)\n", + "X = t3f.get_variable('X', initializer=init_X)\n", + "\n", + "def step():\n", + " # Compute the gradient of the functional. Note that it is simply X - A.\n", + " gradF = X - A\n", + "\n", + " # Let us compute the projection of the gradient onto the tangent space at X.\n", + " riemannian_grad = t3f.riemannian.project(gradF, X)\n", + "\n", + " # Compute the update by subtracting the Riemannian gradient\n", + " # and retracting back to the manifold\n", + " alpha = 1.0\n", + " t3f.assign(X, t3f.round(X - alpha * riemannian_grad, max_tt_rank=2))\n", + "\n", + " # Let us also compute the value of the functional\n", + " # to see if it is decreasing.\n", + " return 0.5 * t3f.frobenius_norm_squared(X - A)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "us71Ch2hVwhN", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 187 + }, + "outputId": "650521b0-ac72-4d26-8fef-a7659b320c81" + }, + "source": [ + "log = []\n", + "for i in range(100):\n", + " F = step()\n", + " if i % 10 == 0:\n", + " print(F)\n", + " log.append(F.numpy())" + ], + "execution_count": 3, + "outputs": [ + { + "output_type": "stream", + "text": [ + "tf.Tensor(749.22894, shape=(), dtype=float32)\n", + "tf.Tensor(569.4678, shape=(), dtype=float32)\n", + "tf.Tensor(502.00604, shape=(), dtype=float32)\n", + "tf.Tensor(490.0112, shape=(), dtype=float32)\n", + "tf.Tensor(489.01282, shape=(), dtype=float32)\n", + "tf.Tensor(488.71234, shape=(), dtype=float32)\n", + "tf.Tensor(488.56543, shape=(), dtype=float32)\n", + "tf.Tensor(488.47928, shape=(), dtype=float32)\n", + "tf.Tensor(488.4239, shape=(), dtype=float32)\n", + "tf.Tensor(488.38593, shape=(), dtype=float32)\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "i2QmXYqeVwhQ", + "colab_type": "text" + }, + "source": [ + "It is intructive to compare the obtained result with the quasioptimum delivered by the TT-round procedure. " + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "FgpVf8qRVwhR", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 34 + }, + "outputId": "0dce2840-5aca-45d5-c5c1-e34651f00043" + }, + "source": [ + "quasi_sol = t3f.round(A, max_tt_rank=2)\n", + "\n", + "val = 0.5 * t3f.frobenius_norm_squared(quasi_sol - A)\n", + "print(val)" + ], + "execution_count": 4, + "outputs": [ + { + "output_type": "stream", + "text": [ + "tf.Tensor(518.3871, shape=(), dtype=float32)\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Dj5uljryVwhU", + "colab_type": "text" + }, + "source": [ + "We see that the value is slightly bigger than the exact minimum, but TT-round is faster and cheaper to compute, so it is often used in practice." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "xosyMEx1VwhV", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 296 + }, + "outputId": "7d49ca8b-1f5a-458f-8abf-1e867e9deb28" + }, + "source": [ + "plt.semilogy(log, label='Riemannian gradient descent')\n", + "plt.axhline(y=val.numpy(), lw=1, ls='--', color='gray', label='TT-round(A)')\n", + "plt.xlabel('Iteration')\n", + "plt.ylabel('Value of the functional')\n", + "plt.legend()" + ], + "execution_count": 5, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 5 + }, + { + "output_type": "display_data", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAZUAAAEGCAYAAACtqQjWAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjMsIGh0\ndHA6Ly9tYXRwbG90bGliLm9yZy+AADFEAAAgAElEQVR4nO3deXxV1bn/8c+TOWEMQxAISpB5DFMU\nQUFq1QqIrXMdalWoWsWqt9XbYtXeen/WW7wVS7WgVnuvF6viBLVoVXCgtQoIyiQOIAYQEGQMIdPz\n++OcHJOQhJPknByS832/XueV7LWnZ2dDnqy99lrL3B0REZFISIh1ACIi0nwoqYiISMQoqYiISMQo\nqYiISMQoqYiISMQkxTqAWOvQoYN379491mGIiDQpy5Yt+8rdO1Ytj/uk0r17d5YuXRrrMEREmhQz\n+7y6cj3+EhGRiFFSERGRiFFSERGRiIn7NhWRSCguLiY/P5/CwsJYhyISUWlpaWRnZ5OcnBzW9koq\nIhGQn59Pq1at6N69O2YW63BEIsLd2blzJ/n5+eTk5IS1jx5/iURAYWEh7du3V0KRZsXMaN++fZ1q\n4EoqIhGihCLNUV3/XSup1NOT727imWX5sQ5DROSooqRST8++v5mnln4R6zBEQhITE8nNzWXgwIFM\nmjSJ3bt3A7BlyxbOO++8GEd3uF/+8pe8+uqrsQ6jWi1btgQa/rP73e9+R0FBwRG3W7x4MRMnTqz3\neRpq9+7d/OEPf4jIsZRU6qlT6zS279WbPnL0SE9PZ8WKFaxatYp27doxa9YsALp06cIzzzwT4+gO\n96tf/YrTTjut0c5XWlpa530a+rMLN6nEmpLKUaBTq1S27zuEZs6Uo9GoUaPYvHkzABs3bmTgwIFA\n4BfrT3/6U0aOHMngwYP54x//CAT+Uh47diyTJ0+mR48e3HbbbTzxxBPk5eUxaNAgPv30UwDmz5/P\nCSecwNChQznttNPYtm0bAHfeeSdXXnkl48aNo0ePHsycOTN07n79+jFlyhQGDBjA6aefzsGDBwG4\n4oorQr+wf/WrXzFy5EgGDhzI1KlTQ/+vxo0bx6233kpeXh69e/fmrbfeOuxay8rKuO666+jbty/f\n/va3Oeuss0LH7d69O7feeivDhg3j6aefZs6cOYwcOZIhQ4Zw7rnnhn7hb9iwgVGjRjFo0CCmT58e\nOna4P7tx48Zx3nnn0bdvXy655BLcnZkzZ7JlyxZOPfVUTj311MPiXrhwIX379mXYsGE8++yzofID\nBw5w5ZVXkpeXx9ChQ3nhhRcAWL16NXl5eeTm5jJ48GA+/vhjAP785z8zePBghgwZwmWXXQbAjh07\nOPfccxk5ciQjR45kyZIltd6n2267jU8//ZTc3Fx++tOfHumfV630SnE9dWqdRkFRKfsPldAqLbz3\ntyU+3DV/NWu27I3oMft3ac0dkwaEtW1paSmvvfYaV1111WHrHnnkEdq0acN7773HoUOHGD16NKef\nfjoAK1euZO3atbRr144ePXpw9dVX8+6773L//ffzwAMP8Lvf/Y4xY8bwzjvvYGY8/PDD3HvvvcyY\nMQOAdevWsWjRIvbt20efPn249tprAfj444+ZO3cuc+bM4YILLmDevHlceumlleK6/vrr+eUvfwnA\nZZddxoIFC5g0aRIAJSUlvPvuu7z00kvcddddhz0ye/bZZ9m4cSNr1qxh+/bt9OvXjyuvvDK0vn37\n9ixfvhyAnTt3MmXKFACmT5/OI488wg033MCNN97Itddey+WXXx6q4dXlZ/f++++zevVqunTpwujR\no1myZAnTpk3jvvvuY9GiRXTo0KHSsQoLC5kyZQqvv/46PXv25MILLwytu/vuuxk/fjyPPvoou3fv\nJi8vj9NOO42HHnqIG2+8kUsuuYSioiJKS0tZvXo1v/71r/nHP/5Bhw4d2LVrFwA33ngjN910E2PG\njGHTpk2cccYZrF27tsb7dM8997Bq1SpWrFhR7bXXhZJKPWW1TgVg295DSipyVDh48CC5ubls3ryZ\nfv368e1vf/uwbV555RU++OCD0F/ye/bs4eOPPyYlJYWRI0fSuXNnAI4//vjQL8xBgwaxaNEiINAf\n58ILL2Tr1q0UFRVV6rswYcIEUlNTSU1NJSsrK1SLycnJITc3F4Dhw4ezcePGw+JatGgR9957LwUF\nBezatYsBAwaEksr3vve9Wvd9++23Of/880lISOCYY445rFZQ8Rf2qlWrmD59Ort372b//v2cccYZ\nACxZsoR58+YBgaR266231ulnl5eXR3Z2NgC5ubls3LiRMWPGHHaMcuvWrSMnJ4devXoBcOmllzJ7\n9uzQeV588UV++9vfAoEEtGnTJkaNGsXdd99Nfn4+3/ve9+jVqxevv/46559/fihptWvXDoBXX32V\nNWvWhM63d+9e9u/fD9R8nyJFSaWeOrVOA2D73kJ6ZrWMcTRyNAm3RhFp5W0qBQUFnHHGGcyaNYtp\n06ZV2sbdeeCBB0K/TMstXryY1NTU0HJCQkJoOSEhgZKSEgBuuOEGbr75Zs4++2wWL17MnXfeGdqn\n4v6JiYmhfaqWlz/+KldYWMh1113H0qVL6datG3feeWelfhHl+1c8Zl20aNEi9P0VV1zB888/z5Ah\nQ3jsscdYvHhxaN2RXp0N92dX3zgrnmfevHn06dOnUnm/fv044YQT+Otf/8pZZ50VevxWnbKyMt55\n5x3S0tIOWxfJWKujNpV6Kk8q2/apsV6OLhkZGcycOZMZM2Yc9gvjjDPO4MEHH6S4uBiA9evXc+DA\ngbCPvWfPHrp27QrA448/HpF4yxNIhw4d2L9/f50bxkePHs28efMoKytj27ZtlRJFVfv27aNz584U\nFxfzxBNPVDrGk08+CVCpvKL6/OxatWrFvn37Divv27cvGzduDLVVzZ07t9J5HnjggVC70vvvvw/A\nZ599Ro8ePZg2bRqTJ0/mgw8+YPz48Tz99NPs3LkTIPT46/TTT+eBBx4IHfNIj7VqirM+lFTqKavV\nN4+/RI42Q4cOZfDgwZV+WQFcffXV9O/fn2HDhjFw4EB+9KMf1ekv1TvvvJPzzz+f4cOHH9ZOUF9t\n27ZlypQpDBw4kDPOOIORI0fWaf9zzz2X7Oxs+vfvz6WXXsqwYcNo06ZNtdv+x3/8ByeccAKjR4+m\nb9++ofL777+fWbNmMWjQoNALDlXV52c3depUzjzzzMMeyaWlpTF79mwmTJjAsGHDyMrKCq27/fbb\nKS4uZvDgwQwYMIDbb78dgKeeeoqBAweSm5vLqlWruPzyyxkwYAC/+MUvGDt2LEOGDOHmm28GYObM\nmSxdupTBgwfTv39/HnrooVrjbN++PaNHj2bgwIENbqi3eH97acSIEV7fSboG3fEy543IjtnjDjl6\nrF27ln79+sU6jLi1f/9+WrZsyc6dO8nLy2PJkiUcc8wxsQ6r2aju37eZLXP3EVW3VZtKA2S1TmW7\naioiMTdx4kR2795NUVERt99+uxJKDCmpNECn1mlsUwdIkZirrR1FGpfaVBqgU+s0NdSLiFSgpNIA\nWa1T2bZXvepFRMopqTRAp1ZpFJWUsedgcaxDERE5KiipNECor4oa60VEACWVBukUGqpF7SoiIqCk\n0iDf1FSUVCS2du7cSW5uLrm5uRxzzDF07dqVQYMGkZubS//+/UNl5dsUFRXFLNbyuUoAtm7detg8\nIj/5yU/o2rUrZWVlobIFCxaEBpyUo5uSSgN0DPaq375Pj78kttq3b8+KFStYsWIF11xzDTfddBMf\nfvghK1asYM2aNaGy8m1SUlJqPFakx4KqzX333RcaNRgCY1Y999xzdOvWjTfeeCNUPmHCBObPn98k\n5iaJd0oqDZCWnEjbjGTVVKTJu/POO7nssssYPXo0l112GYWFhfzwhz9k0KBBDB06NDRK8WOPPcb1\n118f2m/ixImhPiItW7bkF7/4BUOGDOHEE08MjX5b01wlAPPmzePMM88MLS9evJgBAwZw7bXXVhpi\nxswYN24cCxYsiNaPQCJEnR8bqFMrdYCUwy1evLjSX9rlf43PmTMnVDZ27FjGjRvHjBkzQsOSd+7c\nmalTpzJ//vzQHCAAN998M61atYpqzGvWrOHtt98mPT2dGTNmYGZ8+OGHrFu3jtNPP53169fXuv+B\nAwc48cQTufvuu/nZz37GnDlzmD59eo1zlWzYsIHMzMxKo+bOnTuXiy++mMmTJ/Pzn/+c4uJikpMD\nU0uMGDGCt956iwsuuCA6PwCJCCWVBirvqyJS0bhx4xg3btxh5XfcccdhZbfccsthZZMmTQrNJ9JY\nzj77bNLT04HAHCU33HADEBhR97jjjjtiUklJSQm1jwwfPpy///3vQM1zlWzdupWOHTuG9i8qKuKl\nl17ivvvuo1WrVpxwwgm8/PLLoWNmZWWxZcuWCF6xRIOSSgN1ap3Gp9u/inUYInUya9asUK3ppZde\nAirPO1KTpKSkSg3oFec9SU5ODs1JUnWejurmKklPT6+0/8svv8zu3bsZNGgQAAUFBaSnp4eSSmFh\nYSjpydFLbSoN1Kl1YK76sjL1qpem48c//nGo0b5Lly6HrT/55JND84qsX7+eTZs20adPH7p3786K\nFSsoKyvjiy++4N133z3iuWqaq6R3796VZnKcO3cuDz/8MBs3bmTjxo1s2LCBv//976HG+fXr14fm\ni5ejl5JKA3VqnUZJmbOrIHavaIpE2nXXXUdZWRmDBg3iwgsv5LHHHiM1NZXRo0eTk5ND//79mTZt\nGsOGDTvisWqaq6RFixYcf/zxfPLJJxQUFLBw4UImTJhQaf2YMWOYP38+EJhyuOJ6OTppPpUGzKcC\nsHDVl1zzv8v467QxDOhS/cRA0vxpPpX6ee6551i2bBm//vWva91u27ZtfP/73+e1115rpMikorrM\np6KaSgOV96rXvCoidffd736X7t27H3G7TZs2MWPGjOgHJA2mhvoGUq96Kefu1TZIS+2uvvrqI25T\n1ymGJXLq+jRLNZUG6qi56oXAnOM7d+7UNAjSrLg7O3fuJC0tLex9mmVNxczOASYArYFH3P2VaJ0r\nOTGBDi1TNFlXnMvOziY/P58dO3bEOhSRiEpLSyM7Ozvs7aOWVMysD/CXCkU9gF+6++/qcaxHgYnA\ndncfWGXdmcD9QCLwsLvf4+7PA8+bWSbwWyBqSQUgq1Ua2/X4K64lJyeTk5MT6zBEYi5qj7/c/SN3\nz3X3XGA4UAA8V3EbM8sys1ZVynpWc7jHgDOrFppZIjAL+A7QH7jYzPpX2GR6cH1UqVe9iEhAY7Wp\nfAv41N0/r1I+lkCNIhXAzKYAD1Td2d3fBHZVc9w84BN3/8zdi4AngckW8Bvgb+6+vJr9IqpdRgpf\nq5+KiEijtalcBMytWujuT5tZDvAXM3sauBL4dh2O2xX4osJyPnACcANwGtDGzHq6+0NVdzSzScCk\nnj2rqxjVTduMFHYXaEphEZGo11TMLAU4G3i6uvXufi9QCDwInO3u+xt6Tnef6e7D3f2a6hJKcJv5\n7j61TZuGd1jMzEhm/6ESikrKjryxiEgz1hiPv74DLHf3bdWtNLOTgYEE2lsOH8K1dpuBbhWWs4Nl\njSqzRWDCo916BCYica4xksrFVPPoC8DMhgKzgcnAD4H2Zlb7eA2VvQf0MrOcYI3oIuDFBsZbZ5kZ\ngaTytR6BiUici2pSMbMWBNpInq1hkwzgAnf/1N3LgMuBqo35mNlc4J9AHzPLN7OrANy9BLgeeBlY\nCzzl7qsjfyW1y8wITCKkxnoRiXdRbah39wNA+1rWL6myXAzMqWa7i2s5xkvASw0Is8HaltdUDiip\niEh80zAtEZDZorymosdfIhLflFQi4Js2FdVURCS+KalEQFpyIunJiXr7S0TinpJKhGRmJOvxl4jE\nPSWVCGmbkaKGehGJe0oqEdKuhcb/EhFRUomQthnJGv9LROJejf1UzOxDoLpp7Axwdx8ctaiaoEyN\nVCwiUmvnx4mNFkUzkJmRzO6DxZSWOYkJmqdcROJTjUmlmrlPpBZtM1Jwh70Hi0MDTIqIxJsjtqmY\n2Ylm9p6Z7TezIjMrNbO9jRFcU9KuhTpAioiE01D/ewIjDX8MpANX0whT9DY1bTM0VIuISFhvf7n7\nJ0Ciu5e6+5+oZr74eFc+VIt61YtIPAtnlOKC4FwlK8zsXmArehX5MOVJZZc6QIpIHAsnOVwW3O56\n4ACBmRbPjWZQTVHb4EjF6qsiIvHsiDWVCm+BFQJ3RTecpqtVahJJCaaGehGJa0dMKmY2GrgTOK7i\n9u7eI3phNT1mFhj/SzUVEYlj4bSpPALcBCwDSqMbTtOWmZGsQSVFJK6Fk1T2uPvfoh5JM6ChWkQk\n3oWTVBaZ2X8BzwKHygvdfXnUomqiMlsks/GrgliHISISM+EklROCX0dUKHNgfOTDadoyM1J4v2B3\nrMMQEYmZcN7+OrUxAmkO2maksLugGHfHTINKikj8CWfsrzZmdp+ZLQ1+ZphZm8YIrqnJzEimqLSM\nA0V6n0FE4lM4nR8fBfYBFwQ/e4E/RTOopqq8V73eABOReBVOm8rx7l6xB/1dZrYiWgE1ZeVD3u8u\nKKZbuxgHIyISA+HUVA6a2ZjyhWBnyIPRC6npygyNVKyaiojEp3BqKtcCjwfbUQzYBVwRzaCaqrYZ\nmlNFROJbOG9/rQCGmFnr4LIm6KpBqKaiNhURiVM1JhUzu9Td/9fMbq5SDoC73xfl2JqcNumaqEtE\n4lttNZUWwa+tqlnnUYilyUtKTKBNerIm6hKRuFVjUnH3Pwa/fdXdl1RcF2ysl2pkZiSzSzUVEYlT\n4bz99UCYZUJ5r3rVVEQkPtXWpjIKOAnoWKVdpTWQGO3AmqrMjGR27D905A1FRJqh2moqKUBLAomn\nVYXPXuC86IfWNLVrkcrO/aqpiEh8qq1N5Q3gDTN7rMKUwnIEXdumsW1vIcWlZSQnhvN0UUSk+Qjn\nt97DZta2fMHMMs3s5SjG1KRlZ2ZQ5vDlnsJYhyIi0ujCSSod3D00SYi7fw1kRS+kpi07Mx2AL77W\nZF0iEn/CSSplZnZs+YKZHYf6qdQoOzMDgPyvNTyaiMSfcMb++gXwtpm9QWDsr5OBqVGNqgk7pk0a\nCaakIiLxKZyxvxaa2TDgxGDRT9z9q+iG1XSlJCVwTOs08vX4S0TiUDg1FYBUAqMTJwH9zQx3fzN6\nYTVt2ZkZqqmISFw6YlIxs98AFwKrgbJgsQNKKjXIzkznXxt2xToMEZFGF05N5Rygj7urm3iYsjPT\neWFlISWlZSSpr4qIxJFwfuN9BiRHO5DmJDszg9IyZ6v6qohInAmnplIArDCz14BQbcXdp0Utqiau\nvK9K/tcH6dYuI8bRiIg0nnCSyovBj4SpayipFADtYxuMiEgjCueV4scbI5DmpHObdEx9VUQkDoXz\n9tcGqulB7+49ohJRM/BNXxUlFRGJL+E8/hpR4fs04HygXXTCaT6yM9PVAVJE4s4R3/5y950VPpvd\n/XfAhEaIrUlTB0gRiUfhPP4aVmExgUDNJdye+HErOzOdF9VXRUTiTDjJYUaF70uADcAF0Qmn+cjO\nTKe0zPlyb2Fo5GIRkeautjnqb3T3+4Hb3f3tRoypWag4BL6SiojEi9qey/ww+HVmYwTS3FTsACki\nEi9qe/y11sw+BrqY2QcVyg1wdx8c3dCatm/6qugNMBGJHzUmFXe/2MyOAV4Gzm68kJqHlKQEOrVS\nXxURiS+1NtS7+5fAkEaKpdlRXxURiTd61zWKsjPT+WKXaioiEj+UVKJoYNc2bN59kNVb9sQ6FBGR\nRhF2UjEzvRdbR+eP6EaLlEQefmtDrEMREWkUR0wqZnaSma0B1gWXh5jZH6IeWTPQJj2ZC0cey/yV\nW9iyW4/BRKT5C6em8t/AGcBOAHdfCZwSzaCakyvHdMeBPy1RbUVEmr+wHn+5+xdVikqjEEvEmNk5\nZjbHzP5iZqfHMpbszAwmDOrM3He/YG9hcSxDERGJunCSyhdmdhLgZpZsZv8GrA3n4GbW1syeMbN1\nZrbWzEbVJ0gze9TMtpvZqmrWnWlmH5nZJ2Z2G4C7P+/uU4BrgAvrc85ImnpKD/YfKmHuvzbFOhQR\nkagKJ6lcA/wY6ApsBnKDy+G4H1jo7n0J9HeplIzMLMvMWlUp61nNcR4DzqxaaGaJwCzgO0B/4GIz\n619hk+nB9TE1sGsbTjq+PY8u2cCBQyWxDkdEJGrCmU/lK3e/xN07uXuWu1/q7juPtJ+ZtSHQ9vJI\n8DhF7r67ymZjgefNLDW4zxTggWpieBPYVc1p8oBP3P0zdy8CngQmW8BvgL+5+/Ia4ptkZrP37Gmc\n131v/nZvtu87xH++FFYlT0SkSQrn7a+OZvZzM5sdfAz1qJk9Gsaxc4AdwJ/M7H0ze9jMWlTcwN2f\nJjAMzF/M7BLgSgIzS4arK1CxvSc/WHYDcBpwnpldU92O7j7f3ae2adOmDqervxHd23H1mBye+Ncm\n3li/o1HOKSLS2MJ5/PUC0AZ4Ffhrhc+RJAHDgAfdfShwALit6kbufi9QCDwInO3u+8MLvWbuPtPd\nh7v7Ne7+UEOPFym3nN6HXlkt+dkzK9lToEZ7EWl+wkkqGe5+q7s/5e7zyj9h7JcP5Lv7v4LLzxBI\nMpWY2cnAQOA54I5wAw/aDHSrsJwdLDsqpSUnct8FuezcX8QvXzzsnQMRkSYvnKSywMzOquuBg4NR\nfmFmfYJF3wLWVNzGzIYCs4HJBOZvaW9mv67Dad4DeplZjpmlABcBL9Y11sY0KLsNN4zvxQsrtrDg\ngy2xDkdEJKJqTCpmts/M9gI3EkgsB81sb4XycNwAPBGcjyUX+M8q6zOAC9z9U3cvAy4HPq8mlrnA\nP4E+ZpZvZlcBuHsJcD2Bdpm1wFPuvjrM2GLmulOPZ0i3tvziuVV8uacw1uGIiESMuXusY4ipESNG\n+NKlSxv9vBu+OsBZ97/F8OMy+fOVeSQkWKPHICJSX2a2zN1HVC0P5+2v18Ipk7rJ6dCC6RP78fYn\nX/H4PzfGOhwRkYiocZIuM0sDWgAdzCyTwDTCAK0JvLYrDfT9vGN5fe12/t/f1tG9QwtO7ZMV65BE\nRBqktprKj4ClQF9gWYXPC8Dvox9a82dm3HveYHp2bMnVjy/lf985rDlJRKRJqTGpuPv97p4D/Ju7\n93D3nOBniLsrqURI+5apPHXNKMb27sj051dx91/XUFYW3+1cItJ0hTNMy2HDpkhktUxNYvZlw/nB\nqOOY89YGfrVgDfH+AoWINE01tqlI40pKTODOsweQmJDAo0s20LFVKj8+tbqxNUVEjl61NdSPdvcl\nZpbq7ocaM6h4ZWZMn9CPnQcO8V8vf0SHlilcOPLYWIclIhK22h5/zQx+/WdjBCIBCQnGf503hJN7\ndeDfn/2Q597Pj3VIIiJhq+3xV7GZzQa6mtnMqivdfVr0wopvKUkJPHTpcK56/D1u+stKtu09xI9O\n6YGZOkiKyNGttprKROB1AiMIL6vmI1HUIjWJx6/MY+Lgztzzt3XcNX8NpXorTESOcjXWVNz9K+BJ\nM1vr7isbMSYJSk1KZOZFQ+ncJo05b22gW7sMrhqTE+uwRERqFM4oxTvN7LngHPHbzWyemWVHPTIB\nAm0sv5jQn1E92vPHNz6lsLg01iGJiNQonKTyJwLDyXcJfuYHy6QRXT++J9v3HeKZZWq4F5GjVzhJ\nJcvd/+TuJcHPY0DHKMclVZx0fHtyu7XloTc+pbi0LNbhiIhUK5yk8pWZXWpmicHPpcDOaAcmlZkZ\nN4zvSf7XB3lhhSb3EpGjUzhJ5UrgAuBLYCtwHoFZGqWRje+bRb/OrfnD4k/0JpiIHJXCGfvrc3c/\n2907unuWu5/j7psaIzipzMz48anH89mOAyxc9WWswxEROUw4NRU5inxnYGe6t8/g4bc/i3UoIiKH\nUVJpYhITjCtO6s77m3azfNPXsQ5HRKQSJZUm6PwR3WiVlsQjb2+IdSgiIpWEM0d9JzN7xMz+Flzu\nb2ZXRT80qUmL1CQuzjuWhau+ZPPug7EOR0QkJJyaymPAywQ6PgKsB34SrYAkPD84qTsAj/9jY0zj\nEBGpKJyk0sHdnwLKANy9BNBYITHWtW06Zw48hrnvbuLAoZJYhyMiAoSXVA6YWXvAAczsRGBPVKOS\nsFw1Jod9hSXMW66hW0Tk6BBOUrmZwNhfx5vZEuDPwA1RjUrCMuzYTHplteSV1dtiHYqICBDGHPXu\nvtzMxgJ9AAM+cvfiqEcmYTmld0f+553POVhUSnpKYqzDEZE4F87bX5cD3weGA8OAi4NlchQ4pXdH\nikrKeGeDhmMTkdg7Yk0FGFnh+zTgW8ByAo/BJMZOyGlHalICb63/ilP7ZMU6HBGJc+E8/qrUfmJm\nbYEnoxaR1ElaciJ5Oe148+MdsQ5FRKRePeoPAJrT9ihySq+OfLJ9P1vUEVJEYiycNpX5ZvZi8LMA\n+Ah4LvqhSbhO6R2YM+0t1VZEJMbCaVP5bYXvS4DP3V0dI44ivTu15JjWaby5/isuHHlsrMMRkTgW\nTpvKG40RiNSfmXFyrw68smYbpWVOYoLFOiQRiVM1Pv4ys31mtreazz4z29uYQcqRndy7I3sOFrMy\nf3esQxGROFZjTcXdWzVmINIwJ/fsgBm8uX4Hw47NjHU4IhKnwn77y8yyzOzY8k80g5K6y2yRwuDs\ntiz+SI31IhI74bz9dbaZfQxsAN4ANgJ/i3JcUg/f6pvFyvzdfLX/UKxDEZE4FU5N5T+AE4H17p5D\noEf9O1GNSuplfN8s3FFtRURiJpykUuzuO4EEM0tw90XAiCjHJfUwoEtrOrVO5fV1GrVYRGIjnH4q\nu82sJfAm8ISZbSfQq16OMmbGqX2y+OsHWykqKSMlqT4DJoiI1F84v3UmAweBm4CFwKfApGgGJfU3\nvm8W+w6VsHTjrliHIiJxqLZ+KrPMbLS7H3D3UncvcffH3X1m8HGYHIVG9+xASmICr6/bHutQRCQO\n1VZTWQ/81sw2mtm9Zja0sYKS+muRmsSJx7dXUhGRmKgxqbj7/e4+ChgL7AQeNbN1ZnaHmfVutAil\nzsb36chnXx1gw1dq+hKRxjMnsVMAAA99SURBVHXENhV3/9zdf+PuQ4GLgXOAtVGPTOptfN9OAKqt\niEijC6fzY5KZTTKzJwh0evwI+F7UI5N6O7Z9Bj2zWurVYhFpdLU11H/bzB4F8oEpwF+B4939Ind/\nobEClPoZ3zeLdzfsYv+hkliHIiJxpLaayr8D/wD6ufvZ7v5/7q6H9E3EqX2yKC513tbEXSLSiGpr\nqB/v7g+7+9eNGZBExojumbRKS1K7iog0KnW5bqaSExM4pXdHFn20g7Iyj3U4IhInlFSasfF9stix\n7xCrtuyJdSgiEieUVJqxcX06YqZXi0Wk8SipNGPtW6aS260ti5RURKSRKKk0c+P7ZLEyfw879mni\nLhGJPiWVZu7UvlkALPpItRURiT4llWaufOKuv69R73oRiT4llWbOzDhnaFdeXbuNT7bvi3U4ItLM\nKanEgakn9yA9OZH7X/sk1qGISDOnpBIH2rdM5YqTurPggy2s36baiohEj5JKnJhycg9apCRx/6sf\nxzoUEWnGkmIdQKzt27ePu+66K7Q8ZcoUAObMmRMqGzt2LOPGjWPGjBns378fgM6dOzN16lTmz5/P\n8uXLQ9vefPPNbNmyhSeffDJUNnHiRIYPH17pPL179+biiy9m7ty5rF+/PlR+xx13sGzZMhYsWBAq\nu+iii+jSpQv33XdfqGzYsGFMmjSJ2bNns3XrVgBatmzJLbfcwuLFi3njjTcOu6bzE96BT+Cuu+Y3\nm2tqjvdJ16RragrXVBNzj+9xoUaMGOFLly6NdRiNYk9BMWN+8zojumfy6BUjMbNYhyQiTZSZLXP3\nEVXL9fgrjrTJSObG03qx6KMdzFqkRnsRiTwllThz1Zgczsntwm9fWc/CVV/GOhwRaWaUVOKMmXHP\nuYMZ0q0tNz+1grVb98Y6JBFpRpRU4lBaciJzLhtO67RkLnvkXRZrCBcRiRAllTiV1TqNP1+VR7sW\nyVzxp/eY/vyHFBRpPnsRaRgllTjWu1MrXrx+DFePyeGJf21iwsy3WbVZE3qJSP0pqcS5tOREpk/s\nzxNXn0BBUQnf+8M/+J9/biTeXzUXkfpRUhEATjq+Ay9NO5mTerbn9hdW8+P/W87ewuJYhyUiTYyS\nioS0b5nKoz8Yyb9/py8vr97G5N8vYd2XejtMRMKnpCKVJCQYPxp7PHOnnMiBQyWcM2sJzy7Pj3VY\nItJEKKlItfJy2rFg2hiGZLfl5qdWcu/CdZSVqZ1FRGqnpCI1ymqVxhNXn8DFecfyh8WfctNTKzhU\nUhrrsETkKNYsRyk2s3OACUBr4BF3fyXGITVZSYkJ/Od3B9KtXTr3LvyIL/cU8tClw8lskRLr0ETk\nKBTVmoqZbTSzD81shZnVeyhgM3vUzLab2apq1p1pZh+Z2SdmdhuAuz/v7lOAa4AL638FAoGhXa4b\n15P7L8rl/U27mfiA+rOISPUa4/HXqe6eW90QyWaWZWatqpT1rOYYjwFnVrN/IjAL+A7QH7jYzPpX\n2GR6cL1EwOTcrjx9zSjK3Dn3wX8wb5ka8EWksli3qYwFnjezVAAzmwI8UHUjd38T2FXN/nnAJ+7+\nmbsXAU8Cky3gN8Df3H15NfthZpPMbPaePfqLuy6GdGvL/BvGMOzYTG55eiW3PLWSferPIiJB0U4q\nDrxiZsvMbOphK92fBl4G/mJmlwBXAufX4fhdgS8qLOcHy24ATgPOM7Nrqg3Mfb67T23Tpk0dTicA\nHVqm8j9X5TFtfE+eez+fs2a+xbLPq8v5IhJvop1Uxrj7MAKPp35sZqdU3cDd7wUKgQeBs919f0NP\n6u4z3X24u1/j7g819HhyuKTEBG4+vQ9P/WgUAOc/9E/umr+aPQdVaxGJZ1FNKu6+Ofh1O/AcgcdV\nlZjZycDA4Po76niKzUC3CsvZwTJpJCO6t+OlaSdzcd6xPPaPjXxrxmKeeu8L9WkRiVNRSypm1qK8\nEd7MWgCnA6uqbDMUmA1MBn4ItDezX9fhNO8Bvcwsx8xSgIuAFyMRv4SvVVoyd393EPOvH8Nx7Vvw\ns3kf8K373uB/3vlcw+mLxJlo1lQ6AW+b2UrgXeCv7r6wyjYZwAXu/qm7lwGXA59XPZCZzQX+CfQx\ns3wzuwrA3UuA6wm0y6wFnnL31VG7IqnVwK5teOaaUfz++0NpnZbE7c+v4qR7XufOF1ez4ovdGvlY\nJA5YvP9HHzFihC9dWu8uNFIDd2fZ51/zpyUb+fuabRSVlpHToQWn9csiL6c9I7tn0jZDHShFmioz\nW1ZtVxElFSWVaNtzsJiFq7by4sotvLfxa4pKygA4vmMLBnRpw4AurendqRXd2mWQnZlOWnJijCMW\nkSNRUqmBkkrjKiwu5YP8Pby7YScr8/ewZsteNu8+WGmbDi1T6dwmjU6t08hqnUrb9GQyM1Jok5FM\nm/TAp1VaEi1SkshISSQ9JZGMlCQSEyxGVyUSf2pKKs1y7C85eqUlJ5KX0468nHahsl0Hitjw1X6+\n2HWQTbsK2LrnIFv3FJL/dQErvviarwuKKQ3jbbKUpAQyUhJJS0okJSmB1KQEUpMTSElMICUpgZSk\nRFISjaSEBJKTEkhONFISE0gKliUmGEkJRlKikZiQQFKChcoSK3wSLPi9GQkJRmICobIEK/9UKEv4\nZtmCX8u3sSpfK27zzbYA32xngBkYwW0SDi8L7lJpufyYFlgRKq/umOUOO2bV7StuLIKSihwF2rVI\noV2Ldgw/rvr17s6+QyXsKShmz8Fi9h4sZm9hCQeLSzhwqJSCohIKiko5WFzKwaJSikrKKCwu5VBJ\nGUUlZRwqKeNQSSl7DxZTXBooKynz0PelZU5JmVNSGigvKfOwkphUVl3i+aY8uFRhG6pZX9MxKu5U\nngjLt69mk2/WV9mm4lY171uxvHK0VeM7kqoJurrj1hRH5ePYEbepujKcYz39o1ERHxxWSUWOemZG\n67RkWqclV+qUFE3uHko2pWVOqTtlweWy4HJpmVNWxjffe/BTRuj78nJ3KHMoLXOcwHLge4LrK28D\n3yw7TplDWYV93QPDVbh/c/zAMt9sE7iQUHnl7Tx4nZW3L18uX1f+swh9X905KpyHao5RXfk3P+iK\n6w8/T+D7b/ap+LS+4qP7iketuF+l7avZpuqaysevGmrl+KpT+XzVn7xyHNVfQ83HrFk4x6q6IjEx\n8jVNJRWRapgFHoMl6Z0BkTqJ9YCSIiLSjCipiIhIxCipiIhIxCipiIhIxCipiIhIxCipiIhIxCip\niIhIxCipiIhIxMT9gJJmtoNq5nAJUwfgqwiG0xTE4zVDfF53PF4zxOd11+eaj3P3jlUL4z6pNISZ\nLa1ulM7mLB6vGeLzuuPxmiE+rzuS16zHXyIiEjFKKiIiEjFKKg0zO9YBxEA8XjPE53XH4zVDfF53\nxK5ZbSoiIhIxqqmIiEjEKKmIiEjEKKnUg5mdaWYfmdknZnZbrOOJFjPrZmaLzGyNma02sxuD5e3M\n7O9m9nHwa2asY400M0s0s/fNbEFwOcfM/hW8538xs8jOwXoUMLO2ZvaMma0zs7VmNqq532szuyn4\nb3uVmc01s7TmeK/N7FEz225mqyqUVXtvLWBm8Po/MLNhdTmXkkodmVkiMAv4DtAfuNjM+sc2qqgp\nAW5x9/7AicCPg9d6G/Cau/cCXgsuNzc3AmsrLP8G+G937wl8DVwVk6ii635gobv3BYYQuP5me6/N\nrCswDRjh7gOBROAimue9fgw4s0pZTff2O0Cv4Gcq8GBdTqSkUnd5wCfu/pm7FwFPApNjHFNUuPtW\nd18e/H4fgV8yXQlc7+PBzR4HzolNhNFhZtnABODh4LIB44Fngps0x2tuA5wCPALg7kXuvptmfq8J\nTKmebmZJQAawlWZ4r939TWBXleKa7u1k4M8e8A7Q1sw6h3suJZW66wp8UWE5P1jWrJlZd2Ao8C+g\nk7tvDa76EugUo7Ci5XfAz4Cy4HJ7YLe7lwSXm+M9zwF2AH8KPvZ72Mxa0IzvtbtvBn4LbCKQTPYA\ny2j+97pcTfe2Qb/jlFTkiMysJTAP+Im77624zgPvpDeb99LNbCKw3d2XxTqWRpYEDAMedPehwAGq\nPOpqhvc6k8Bf5TlAF6AFhz8iiguRvLdKKnW3GehWYTk7WNYsmVkygYTyhLs/GyzeVl4dDn7dHqv4\nomA0cLaZbSTwaHM8gbaGtsFHJNA873k+kO/u/wouP0MgyTTne30asMHdd7h7MfAsgfvf3O91uZru\nbYN+xymp1N17QK/gGyIpBBr2XoxxTFERbEt4BFjr7vdVWPUi8IPg9z8AXmjs2KLF3f/d3bPdvTuB\ne/u6u18CLALOC27WrK4ZwN2/BL4wsz7Bom8Ba2jG95rAY68TzSwj+G+9/Jqb9b2uoKZ7+yJwefAt\nsBOBPRUekx2RetTXg5mdReC5eyLwqLvfHeOQosLMxgBvAR/yTfvCzwm0qzwFHEtg2oAL3L1qI2CT\nZ2bjgH9z94lm1oNAzaUd8D5wqbsfimV8kWZmuQReTkgBPgN+SOAPz2Z7r83sLuBCAm86vg9cTaD9\noFndazObC4wjMMT9NuAO4HmqubfBBPt7Ao8CC4AfuvvSsM+lpCIiIpGix18iIhIxSioiIhIxSioi\nIhIxSioiIhIxSioiIhIxSioiEWJm+4Nfu5vZ9yN87J9XWf5HJI8vEilKKiKR1x2oU1Kp0IO7JpWS\nirufVMeYRBqFkopI5N0DnGxmK4LzdSSa2X+Z2XvB+Sl+BIHOlWb2lpm9SKAnN2b2vJktC87xMTVY\ndg+BkXRXmNkTwbLyWpEFj73KzD40swsrHHtxhflRngh2ahOJqiP9dSQidXcbwZ74AMHksMfdR5pZ\nKrDEzF4JbjsMGOjuG4LLVwZ7NacD75nZPHe/zcyud/fcas71PSCXwPwnHYL7vBlcNxQYAGwBlhAY\n1+rtyF+uyDdUUxGJvtMJjKW0gsAQN+0JTIAE8G6FhAIwzcxWAu8QGNSvF7UbA8x191J33wa8AYys\ncOx8dy8DVhB4LCcSVaqpiESfATe4+8uVCgNjix2osnwaMMrdC8xsMZDWgPNWHK+qFP1/l0agmopI\n5O0DWlVYfhm4NjiNAGbWOzgBVlVtgK+DCaUvgSmcyxWX71/FW8CFwXabjgRmb3w3IlchUg/6y0Uk\n8j4ASoOPsR4jMB9Ld2B5sLF8B9VPUbsQuMbM1gIfEXgEVm428IGZLQ8OxV/uOWAUsJLAJEs/c/cv\ng0lJpNFplGIREYkYPf4SEZGIUVIREZGIUVIREZGIUVIREZGIUVIREZGIUVIREZGIUVIREZGI+f8w\n0+JPgiRtTwAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "tags": [] + } + } ] - }, - "metadata": {}, - "output_type": "display_data" } - ], - "source": [ - "plt.semilogy(log, label='Riemannian gradient descent')\n", - "plt.axhline(y=val, lw=1, ls='--', color='gray', label='TT-round(A)')\n", - "plt.xlabel('Iteration')\n", - "plt.ylabel('Value of the functional')\n", - "plt.legend()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.5" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + ] +} \ No newline at end of file diff --git a/docs/tutorials/tensor_completion.ipynb b/docs/tutorials/tensor_completion.ipynb index d2d4e203..81540e55 100644 --- a/docs/tutorials/tensor_completion.ipynb +++ b/docs/tutorials/tensor_completion.ipynb @@ -1,434 +1,581 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Tensor completion (example of minimizing a loss w.r.t. TT-tensor)\n", - "\n", - "[Open](https://colab.research.google.com/github/Bihaqo/t3f/blob/develop/docs/tutorials/tensor_completion.ipynb) **this page in an interactive mode via Google Colaboratory.**\n", - "\n", - "In this example we will see how can we do tensor completion with t3f, i.e. observe a fraction of values in a tensor and recover the rest by assuming that the original tensor has low TT-rank.\n", - "Mathematically it means that we have a binary mask $P$ and a ground truth tensor $A$, but we observe only a noisy and sparsified version of $A$: $P \\odot (\\hat{A})$, where $\\odot$ is the elementwise product (applying the binary mask) and $\\hat{A} = A + \\text{noise}$. In this case our task reduces to the following optimization problem:\n", - "\n", - "$$\n", - "\\begin{aligned}\n", - "& \\underset{X}{\\text{minimize}} \n", - "& & \\|P \\odot (X - \\hat{A})\\|_F^2 \\\\\n", - "& \\text{subject to} \n", - "& & \\text{tt_rank}(X) \\leq r_0\n", - "\\end{aligned}\n", - "$$" - ] - }, - { - "cell_type": "code", - "execution_count": 1, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "import numpy as np\n", - "import tensorflow.compat.v1 as tf\n", - "tf.disable_v2_behavior()\n", - "tf.enable_resource_variables()\n", - "tf.set_random_seed(0)\n", - "np.random.seed(0)\n", - "%matplotlib inline\n", - "import matplotlib.pyplot as plt\n", - "\n", - "try:\n", - " import t3f\n", - "except ImportError:\n", - " # Install T3F if it's not already installed.\n", - " !git clone https://github.com/Bihaqo/t3f.git\n", - " !cd t3f; pip install .\n", - " import t3f" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Generating problem instance**\n", - "\n", - "Lets generate a random matrix $A$, noise, and mask $P$." - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "shape = (3, 4, 4, 5, 7, 5)\n", - "# Fix random seed so the results are comparable between runs.\n", - "tf.set_random_seed(0)\n", - "# Generate ground truth tensor A. To make sure that it has low TT-rank,\n", - "# let's generate a random tt-rank 5 tensor and apply t3f.full to it to convert to actual tensor.\n", - "ground_truth = t3f.full(t3f.random_tensor(shape, tt_rank=5))\n", - "# Make a (non trainable) variable out of ground truth. Otherwise, it will be randomly regenerated on each sess.run.\n", - "ground_truth = tf.get_variable('ground_truth', initializer=ground_truth, trainable=False)\n", - "noise = 1e-2 * tf.get_variable('noise', initializer=tf.random_normal(shape), trainable=False)\n", - "noisy_ground_truth = ground_truth + noise\n", - "# Observe 25% of the tensor values.\n", - "sparsity_mask = tf.cast(tf.random_uniform(shape) <= 0.25, tf.float32)\n", - "sparsity_mask = tf.get_variable('sparsity_mask', initializer=sparsity_mask, trainable=False)\n", - "sparse_observation = noisy_ground_truth * sparsity_mask" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "**Initialize the variable and compute the loss**" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "observed_total = tf.reduce_sum(sparsity_mask)\n", - "total = np.prod(shape)\n", - "initialization = t3f.random_tensor(shape, tt_rank=5)\n", - "estimated = t3f.get_variable('estimated', initializer=initialization)\n", - "# Loss is MSE between the estimated and ground-truth tensor as computed in the observed cells.\n", - "loss = 1.0 / observed_total * tf.reduce_sum((sparsity_mask * t3f.full(estimated) - sparse_observation)**2)\n", - "# Test loss is MSE between the estimated tensor and full (and not noisy) ground-truth tensor A.\n", - "test_loss = 1.0 / total * tf.reduce_sum((t3f.full(estimated) - ground_truth)**2)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "SGD optimization\n", - "-------------------------\n", - "The simplest way to solve the optimization problem is Stochastic Gradient Descent: let TensorFlow differentiate the loss w.r.t. the factors (cores) of the TensorTrain decomposition of the estimated tensor and minimize the loss with your favourite SGD variation." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "optimizer = tf.train.AdamOptimizer(learning_rate=0.01)\n", - "step = optimizer.minimize(loss)" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "0 2.20282 2.26653\n", - "1000 0.00158865 0.00234495\n", - "2000 0.000334849 0.00045755\n", - "3000 9.98362e-05 5.27664e-05\n", - "4000 8.28005e-05 2.14205e-05\n", - "5000 8.17184e-05 2.07301e-05\n", - "6000 8.57184e-05 2.59403e-05\n", - "7000 8.1738e-05 2.07167e-05\n", - "8000 0.000102493 4.31596e-05\n", - "9000 8.5987e-05 2.29819e-05\n" - ] + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.5" + }, + "colab": { + "name": "tensor_completion.ipynb", + "provenance": [], + "collapsed_sections": [] } - ], - "source": [ - "sess = tf.Session()\n", - "sess.run(tf.global_variables_initializer())\n", - "train_loss_hist = []\n", - "test_loss_hist = []\n", - "for i in range(10000):\n", - " _, tr_loss_v, test_loss_v = sess.run([step, loss, test_loss])\n", - " train_loss_hist.append(tr_loss_v)\n", - " test_loss_hist.append(test_loss_v)\n", - " if i % 1000 == 0:\n", - " print(i, tr_loss_v, test_loss_v)" - ] }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ + "cells": [ { - "data": { - "text/plain": [ - "" + "cell_type": "markdown", + "metadata": { + "id": "c644IyT2b6fN", + "colab_type": "text" + }, + "source": [ + "# Tensor completion (example of minimizing a loss w.r.t. TT-tensor)\n", + "\n", + "[Open](https://colab.research.google.com/github/Bihaqo/t3f/blob/develop/docs/tutorials/tensor_completion.ipynb) **this page in an interactive mode via Google Colaboratory.**\n", + "\n", + "In this example we will see how can we do tensor completion with t3f, i.e. observe a fraction of values in a tensor and recover the rest by assuming that the original tensor has low TT-rank.\n", + "Mathematically it means that we have a binary mask $P$ and a ground truth tensor $A$, but we observe only a noisy and sparsified version of $A$: $P \\odot (\\hat{A})$, where $\\odot$ is the elementwise product (applying the binary mask) and $\\hat{A} = A + \\text{noise}$. In this case our task reduces to the following optimization problem:\n", + "\n", + "$$\n", + "\\begin{aligned}\n", + "& \\underset{X}{\\text{minimize}} \n", + "& & \\|P \\odot (X - \\hat{A})\\|_F^2 \\\\\n", + "& \\text{subject to} \n", + "& & \\text{tt_rank}(X) \\leq r_0\n", + "\\end{aligned}\n", + "$$" ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" }, { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAY4AAAEaCAYAAAAG87ApAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4wLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvpW3flQAAIABJREFUeJzt3Xd4VGX2wPHvSSchCSF0Qu9VEKSI\nKDYEBREbFuyKZXV3f2vvuurqrquurm2t2BV17SiwCqL0gPSWUBNaQkIqSUg5vz/uAAGTMAOZ3GRy\nPs8zz9y59cwV5+Qt931FVTHGGGO8FeR2AMYYY+oWSxzGGGN8YonDGGOMTyxxGGOM8YklDmOMMT6x\nxGGMMcYnljiMqUNEZLOInHGUx14uItOrOyZT/1jiMHWOiJwkInNFJFtEMkVkjoicUG57SxF5XUS2\ni0ieiGwUkcki0t2zvb2IqGdbnojsEpFvReRM975V9Sr3HUP2r1PVD1R1pJtxmcBgicPUKSISA3wL\n/BtoDLQGHgWKPNvjgblAJDAciAaOB34GDk8MjVS1IXAcMAP4QkSu9v+3MKZus8Rh6pquAKr6kaqW\nqmqBqk5X1eWe7f8H5ABXqOoGdWSp6tuq+u+KTqiqO1X1eeAR4O8iUuH/FyLSS0RmeEo5u0TkPs/6\ncBH5l6eEs92zHO7ZNkJEUkXkLhFJE5EdInKeiJwtIus957qv3DUeEZHPROQTEckVkSUiclwl8QSJ\nyD0iskFEMkRkiog09mye7XnP8pSqhorI1SLya7njTxSRRZ6S2yIRObHctlki8pinNJcrItNFpMmR\n/uOY+sESh6lr1gOlIvKOiIwWkbjDtp8BfKGqZUdx7v8CzYBuh28QkWjgf8APQCugM/CjZ/P9wBCg\nH07pZRDwQLnDWwAROKWjh4DXgYnAAJxS0UMi0rHc/uOAT3FKVB8CX4pIaAXx/hE4DzjFE9Me4CXP\ntpM9741UtaGqzjvs+zQGvgNeAOKBZ4HvPCW2/S4DrvHckzDgjgpiMPWQJQ5Tp6hqDnASoDg/wOki\n8rWINPfs0gTYuX9/ETlXRLL2/9V8hNNv97w3rmDbGGCnqj6jqoWqmquqCzzbLgf+qqppqpqOU3V2\nRblji4EnVLUY+NgT4/Oec6wCVgF9y+2/WFU/8+z/LE7SGVJBTDcC96tqqqoW4ZSYLizfrlGFc4Ak\nVX1PVUtU9SNgLTC23D5vq+p6VS0ApuAkRmMscZi6R1XXqOrVqpoA9Mb5a/tfns0ZQMty+36tqo1w\nqrDCjnDq1p73zAq2tQE2VHJcK2BLuc9bPOv2y1DVUs9yged9V7ntBUDDcp9T9i94Sk6ph51vv3Y4\n7TJZIpIFrAFKgeYV7HukmPfH3brc553llvceFqOpxyxxmDpNVdcCk3ESCDjVR+dV1k5xBOOBNGBd\nBdtSgE6VHLcd50d8v7YcLL0cjTb7FzzfI6GS86UAo1W1UblXhKpuwymRVeXwmPfHve0Y4jb1hCUO\nU6eISHcRuV1EEjyf2wCXAvM9uzwLxAHviUgncURTRTWLiDQXkVuBh4F7K2kf+RZoISJ/9jSGR4vI\nYM+2j4AHRKSppwH5IeD9Y/iaA0TkfE+V059xeozNr2C/V4EnRKSd53s0FZFxnm3pQBnQsYLjAKYC\nXUXkMhEJEZEJQE/P9zSmSpY4TF2TCwwGFohIPs4P6krgdgBV3Y3THlAI/OrZfylOt9ybDztXlucc\nK4CzgYtU9a2KLqqquTjdecfiVOEkAad6Nj8OJALLPeda4ll3tL4CJuA0dl8BnO9p7zjc88DXwHQR\nycW5F4M98e4FngDmeKqyDmkjUdUMnHab23Gq9+4CxnjunzFVEpvIyZjaQ0QeATqr6kS3YzGmMlbi\nMMYY4xNLHMYYY3xiVVXGGGN8YiUOY4wxPrHEYYwxxifeDE1QZ4jIWGBsdHT0DV27dnU7HGOMqVMW\nL168W1WbHmm/gGzjGDhwoCYmJrodhjHG1CkislhVBx5pP6uqMsYY4xNLHMYYY3xiicMYY4xPArJx\nvHPnzm6HYoypY4qLi0lNTaWwsNDtUPwuIiKChIQEQkMrmh/syKxx3BhjgE2bNhEdHU18fDwi4nY4\nfqOqZGRkkJubS4cOHQ7ZZo3jxhjjg8LCwoBPGgAiQnx8/DGVrCxxGGOMR6Anjf2O9Xta4jDGmFoi\nKyuLl19+2efjzj77bLKysvwQUcUscRhjTC1RWeIoLS2tYO+Dpk6dSqNGjfwV1u8EVK+qY1W2+B2C\ndiyDmFYQ0/rQ97BIt8MzxgS4e+65hw0bNtCvXz9CQ0Np2LAhLVu2ZOnSpaxevZrzzjuPlJQUCgsL\n+dOf/sSkSZMAaN++PYmJieTl5TF69GhOOukk5s6dS+vWrfnqq69o0KBBtcYZUInjWLvjzpk/l97p\n3xFH7u+2lYTFojGtCG6UQFBs63KJpVxyCY8+xm9gjKkNHv1mFau351TrOXu2iuHhsb2q3Oepp55i\n5cqVLF26lFmzZnHOOeewcuXKA72f3nrrLRo3bkxBQQEnnHACF1xwAfHx8YecIykpiY8++ojXX3+d\niy++mM8//5yJE6t3QsmAShyq+g3wzcCBA284muNzT36EZzfcxu49Wezbk4rkbCd6XxotJZMWJZm0\nLMikZVoyrYIW0pjs3x1fGhYDMa0Iim2NVFRqiWsHYVHH+jWNMfXEoEGDDuky+8ILL/DFF18AkJKS\nQlJS0u8SR4cOHejXrx8AAwYMYPPmzdUeV0AljmN1dp+WnN2n5SHr8opK2JFVwLasArZnFbIsq4Dt\n2QWk7cmmeM8OgvO20aQs45Dk0jp9M62CfiNO9xDEoc/JlDVqT1DzntCsBzTr6bziO0NIWE1+VWNM\nFY5UMqgpUVEH/9CcNWsW//vf/5g3bx6RkZGMGDGiwi614eHhB5aDg4MpKCio9rgscRxBw/AQujSP\npkvziquhysqU3XlFBxJLSlYB87MK2JFdwK49eRRnbSe8YBetJIP2spNuGSn0zl5B23U/EEwZABoU\nAvFdkEMSSg9o1B6CrP+CMfVFdHQ0ubm/ryoHyM7OJi4ujsjISNauXcv8+fNrOLqDLHEco6AgoVlM\nBM1iIujftuJ9CvaVsj27gI3p+azZkcN323NI3pFB8J4NdJMUugWl0Ct9Gz0zf6XZys8PHKehkUiz\nHtB6ACQMgjYnQKN2UE/6mhtT38THxzNs2DB69+5NgwYNaN68+YFto0aN4tVXX6Vv375069aNIUOG\nuBanDTnioryiEtbtzGH1jlzW7Mhh9fYcUnam0aZkK12DUugRlMLxYan0KEsmTJ0iqUY1QxJOcJJI\nwgnQqr+1mxhTDdasWUOPHj3cDqPGVPR9vR1yxEocLmoYHsKAdo0Z0K7xgXWlZcqWjHzWeJLJs9uy\nWbZlN632baJ/UDInFmzkhI3LaLbuOwBUgpHmvaDtUOhwMrQfBg3i3PpKxph6wBJHLRMcJHRs2pCO\nTRtyTl+nob60TFm/K5fEzZlM27yHxzdnUlCYTv+gZIaEbuCkPZvomjaZ0IX/QRGkRR9PEhkO7U6E\niBiXv5UxJpDU+sQhIlHAy8A+YJaqfuBySDUuOEjo0TKGHi1juGJoe1SVbVkFJG7ew6LNmfzf5kw2\nZWdxnCRzWsQ6zsheR6cFrxE870WQIKc6q9Np0Ol0SBgIwUc3lLIxxoBLiUNE3gLGAGmq2rvc+lHA\n80Aw8IaqPgWcD3ymqt+IyCdAvUschxMREuIiSYiL5Lz+rQFIyy1kTvIAfknazeSk3WTvzeX4oCRG\nN0zi1KzVJPzyDDL7aQiLdkojnU9zkknjji5/G2NMXeNWiWMy8CLw7v4VIhIMvAScCaQCi0TkayAB\nWOHZreoBW+qxZtERjO+fwPj+Cagq63fl8UvScfyUvJu/bcwgrDiXk0PXcGHYOgZu+Y2GnjYS4jo4\nCaTz6U7VllVrGWOOwJXEoaqzRaT9YasHAcmquhFARD4GxuEkkQRgKVUMyigik4BJAG3bVtIvtp4Q\nEbq1iKZbi2iuH96RwuJSFmzKZObaXjy6Pp1NWXm0l52cH7OOUWWr6bT0I4IT3wQJhjaDnCqtTqdB\nq34QFOz21zHG1DK1qY2jNZBS7nMqMBh4AXhRRM4BvqnsYFV9DXgNnO64foyzzokIDeaUrk05pWtT\nADbtzmfWujRmruvLixsz0JJ9DA3bwIS49QzJWkb8zMdh5uNO76yOIw4mktjWrn4PYwJdVlYWH374\nIbfccovPx/7rX/9i0qRJREb6f0DW2pQ4KnqqTVU1H7jGqxPYnONe6dAkig5NOnDNsA7s3VfC/I0Z\nzFzbiSfX9SN1z1gak8OFcUmMiVpL941zCVvljI1Dk25OlVan06DdMBsx2Jhqtn9Y9aNNHBMnTqx3\niSMVaFPucwKw3ZcTHOsgh/VRZFgIp3Vvzmndm6OqbEjPY9a6dGau68DbmwZSXHoZ/cJ3cHmTDZzE\nMloseguZ/zIEhznPjuxPJM172xPtxhyj8sOqn3nmmTRr1owpU6ZQVFTE+PHjefTRR8nPz+fiiy8m\nNTWV0tJSHnzwQXbt2sX27ds59dRTadKkCTNnzvRrnLUpcSwCuohIB2AbcAlwmS8nsBLHsREROjeL\npnMzp20kr6iEOcm7mbUujWfXdeTO7OGEs48Lm2xlXPRaemctJnLGQzDjIYhq5lRrtR0CbQY7Y21Z\n+4ipq76/B3auOPJ+vmjRB0Y/VeUu5YdVnz59Op999hkLFy5EVTn33HOZPXs26enptGrViu++czq4\nZGdnExsby7PPPsvMmTNp0qRJ9cZdAbe6434EjACaiEgq8LCqvikitwLTcLrjvqWqq3w5r5U4qlfD\n8BDO6tWCs3q1QFVZtyuXmWvTmbmuBZdu6UJp2Rg6ReRwdfNNnBKykoQNMwlaMcU5OCwaEjxjbLU+\nHpr3gtg2VioxxkvTp09n+vTp9O/fH4C8vDySkpIYPnw4d9xxB3fffTdjxoxh+PDhNR6bW72qLq1k\n/VRg6tGe10oc/iMidG8RQ/cWMdw8ohPZBcXMSd7NzLVpvLC+KQ/mHgdcxomN8zknbiuDgpNok7OK\n8F/+iagzCjARsU6VVvNeznuL3hDfxboAm9rnCCWDmqCq3Hvvvdx4442/27Z48WKmTp3Kvffey8iR\nI3nooYdqNLbaVFV1zKzEUXNiG4QemL+krExZvSOHX5N3k7h5D//cEseevT2BcbSIKGF0s0wGRW6n\nG1toUZBMg6UfIvvyDp4sMt55nqRxB+c9rj3EtIToltCwudO7y0oqph4oP6z6WWedxYMPPsjll19O\nw4YN2bZtG6GhoZSUlNC4cWMmTpxIw4YNmTx58iHHBmxVlQksQUFC79ax9G4dC6c4fylt3J3P4i17\nWLx5Dwu3N+aDpBbsK3VmJQsPhmHx+QyP2UX3sDRal+0gft92GmydT9DKz2F/CWW/4HCIbg4NWzhJ\nJiLWeTVo5Fne/x4DoZEQEgGhDQ6+71+25GNqufLDqo8ePZrLLruMoUOHAtCwYUPef/99kpOTufPO\nOwkKCiI0NJRXXnkFgEmTJjF69Ghatmzp98bxgBpWvVxV1Q1JSUluh2PKKS4tOzAfyZodOazekcPa\nnbmk5xYdsl+rhkEMaJRH5wb5tAvLoWVQFk0li0alGUTt203YvmykMBsKs6Ho99P3VikkwpNMIg8m\nlAOJZf9yJIRGQHgMRDWByCYQ1RSi4p3lhs1ttsYAZcOq19Nh1a2qqvYKDQ468DT7/vG1AHILi9mS\nsZctGXvZnJHPlox8tmTsZXlmITuyC9lXcmjpI0igaXQ4zWMiaN4ylLZRJbSJLKZleBHNwoqIDysj\nLqyEqKBigkoKoaQQigs873uheP/nAs/yXudzYbbzXlzgrCvMgbLiCr6JQMNmzjzysQkHX43aOjM3\nxnWwWRtNwAuoxGHqnuiI0IPVXIdRVbL2FrMju5CdOQXOu+e1K7eIlKxCFqcUkZm/z3NEsOcVSkhQ\nJM2im9EsJoLmMZ5EExNBs8YHl5vHhBPbIBSpqApLFYpyIH+389q7G/LTIXcXZKdAzjZIXwvJP0Jx\n/sHjwho63S7bn+SM/dVmkFOSMSaABFTisF5VgUVEiIsKIy4qjJ6tKu95VVRSSnpuEbtyikjLKWRX\njpNYduUUkpZTxMb0fOZtyCCnsOR3x0aFBZMQF0mbxpG0adyANocsRxEVHwvxnSoPUhUK9sCeTbBr\nFexcCdsWwy/PwuynnfaZDidDz3HQ/RyIbFz5uYypIwKqjWO/ujJ1rKlZhcWlpOUUsSvXSS47swtJ\n3VNA6p69pGQWkLJnL3v3HToAc9PocLo1j6ZL84Z0bR7teTUkOuIIc5oU5sDWebBxFqz9FrK2QlCI\nM+7XwGuhy5n2gGQts2bNGrp3715xCTTAqCpr16496jYOSxzGeKgqmfn7SNlTQErmXlL27GVjej7r\nd+WStCuPguKDSaVj0yj6t4mjf9tG9G/biG7NowkJrqRtQxV2LIVVX8KyjyFvJ8QkwOAb4YTrbcyv\nWmLTpk1ER0cTHx8f0MlDVcnIyCA3N5cOHTocss0ShyUOU43KypTUPQWs35XL2p05LE3J5rete8jw\ntK9EhQUztFM8J3dtyvAuTWkfH1nxj09pMaz7Hha+Bpt/cYZqGX67k0CCA6rmuM4pLi4mNTWVwsJC\nt0Pxu4iICBISEggNPbTkXC8Th3XHNTVJ1UkmS7buYeGmTH5J2s3WzL0AtG0cyVm9mjOmbyv6JsRW\nnES2zIOZTzgJpNXxMO5F56l6Y1xSLxPHflbiMG7ZvDufX5LS+WltGr8m76a4VGnTuAFj+7bi4oFt\naN8k6tADVGHVf2HqXbAvD879N/S92J3gTb1nicMSh3FZ9t5ipq/eybfLd/Br8m5Ky5RhneO55IS2\nnNWrBWEh5dpE8tLh06tgyxyn6uq0B+1Jd1PjLHFY4jC1yK6cQj5NTOGjhSlsyyqgRUwEk07uyKWD\n2tIgzNO7qrQYpt4BiyfD4Jth1JOWPEyNssRhicPUQqVlyuykdF6dtYEFmzJp0jCce0d35/zjWzvt\nIKow7T6Y/zIM+xOc+Ve3Qzb1SL1MHNY4buqShZsyefL7Nfy2NYuTOjfhuQn9aBod7iSPqXfAojdg\nzHPOcx/G1IB6mTj2sxKHqSvKypQPF27lsW9XE9sglNeuHEi/No2gtAQ+vtQZ0mTi59DpVLdDNfWA\nt4nDRmMzxkVBQcLEIe348g/DCA8N4rLX5zMnebfzTMeFb0GTLvD59ZCzw+1QjTnAEocxtUCPljF8\nftOJtImL5Jq3F/Fr0m4Ij4aL33VG7P3sWqcUYkwtYInDmFqiWUwEn9w4hI5No7jxvURWbsuGpt1g\n7POwdS789JjbIRoDWOIwplZpFBnGO9cOolFkGFe/vZBtWQXQ9yIYcA3M+Resn+52iMZY4jCmtmke\nE8E71w6isLiMWz5YQlFJKYx6Cpr3hi9vsvYO47qAShwiMlZEXsvO9nFKUWNqmc7NGvLPi/qyLCWL\nJ75b40xne+HbTnvHf2+AstIjn8QYPwmoxKGq36jqpNjY388mZ0xdM6p3S24Y3oF3523h+xU7oGlX\nOPufzqCIvzzrdnimHguoxGFMoLlrVHf6tI7lvi9WkJZbCP0ugz4Xw6y/wZa5bodn6ilLHMbUYqHB\nQTw34Tjy95Vy339XoABjnoW49s7zHXszXY7Q1EeWOIyp5To3i+buUd3535o0Pk1MdZ7vuPAtyEuD\nr251higxpgZZ4jCmDrjmxPYM6diYR79ZRUrmXmjV3xkAcd13sPB1t8Mz9YwlDmPqgKAg4Z8XHQfA\n3Z8vp6xMYcjN0HUUTL8fdix3OUJTn1jiMKaOSIiL5IExPZm7IYP3F2xx5uoY9zJExsNn10BRntsh\nmnqi1icOEekoIm+KyGdux2KM2y45oQ0nd23Kk1PXsiUjH6Li4YI3IHMjTL3T7fBMPeHXxCEib4lI\nmoisPGz9KBFZJyLJInJPVedQ1Y2qep0/4zSmrhAR/n5BH0KChTs/9VRZtT8JTr4Lln0Iyz52O0RT\nD/i7xDEZGFV+hYgEAy8Bo4GewKUi0lNE+ojIt4e9mvk5PmPqnJaxDXhoTE8Wbs7krTmbnJUn3wnt\nhsG3f4Hdye4GaAKeXxOHqs4GDu9oPghI9pQk9gEfA+NUdYWqjjnslebP+Iypqy4ckMDp3Zvx9LR1\nbEjPc+bvOP91CAlz2jtKitwO0QQwN9o4WgMp5T6netZVSETiReRVoL+I3FvFfpNEJFFEEtPT06sv\nWmNqIRHhyfP7EBEazB2fLqO0TCG2tdNYvnM5zHjY7RBNAHMjcUgF6yp9gklVM1T1JlXtpKpPVrHf\na8CjwJKwsLBqCNOY2q1ZTAR/HdeL37Zm8fovG52V3c+GwTfBgldg7VR3AzQBy43EkQq0Kfc5Adhe\nHSe2QQ5NfXPuca0Y1asFz05fz/pduc7KM/8KLfrCV7dA9jZ3AzQByY3EsQjoIiIdRCQMuAT4ujpO\nbMOqm/pGRHh8fG8aRoRw+5RlFJeWQUi4MwR7SRF8f5fbIZoA5O/uuB8B84BuIpIqItepaglwKzAN\nWANMUdVV1XE9K3GY+qhJw3AeG9ebFduyeXXWBs/Kzk5Pq7XfQtIMdwM0AUc0AAdIGzhwoCYmJrod\nhjE16tYPlzBt1U6++sNJ9GwVAyX74JWhoGVwy3ynJGJMFURksaoOPNJ+tf7JcV9YVZWpzx4b15vY\nBmHc8eky9pWUOV1zR//Deap87gtuh2cCSEAlDquqMvVZXFQYT4zvzeodObw00/MQYOfTocdYmP0M\nZKVUfQJjvORV4hCRBiLSzd/BHCsrcZj67qxeLTivXytempnMqu2e/w9GPgEozHjQ1dhM4Dhi4hCR\nscBS4AfP534iUi29oKqblTiMgUfO7UVcVBi3T/FUWcW1g5P+D1Z9AZt+cTs8EwC8KXE8gjNMSBaA\nqi4F2vsvJGPMsWgUGcaT4/uwdmcuL+6vshr2J4htC9/fDaUl7gZo6jxvEkeJqtaJuh+rqjLGcUbP\n5ozv35qXZyazZkcOhDaAs56AtFWQ+Jbb4Zk6zpvEsVJELgOCRaSLiPwbmOvnuI6KVVUZc9BDY3rS\nKDKUuz5bTklpmdNI3nEEzHwc8ne7HZ6pw7xJHLcBvYAi4CMgB/izP4Myxhy7uKgwHj3XeTDwjV83\nOTMGjvo77MuHnx5zOzxThx0xcajqXlW9X1VPUNWBnuXCmgjOGHNszu7TgrN6NefZGeud4debdYdB\nN8Lid2D7b26HZ+oob3pVzRSRnw5/1URwvrI2DmMOJSI8Nq43ESFB3PO5Z8bAEXdDVBOYehcE4MgR\nxv+8qaq6A7jT83oQp2turRzPw9o4jPm9ZjERPDS2F4s27+G9+VsgIhbOeARSF8LiyS5HZ+oib6qq\nFpd7zVHVvwCDayA2Y0w1ueD41pzStSl//2EtKZl74bjLoMMpMO0+SF/vdnimjvGmqqpxuVcTETkL\naFEDsRljqomI8Lfz+yDAfV+sQEVg/H8gJAI+vxaKC9wO0dQh3lRVLcapmlqMM0T67cB1/gzKGFP9\nWjdqwD2ju/NL0m4+XZwKMS1h/KuwcyV8cROUlbkdoqkjvKmq6qCqHT3vXVR1pKr+WhPB+coax42p\n2uWD2zGoQ2Me+3Y1u3IKoetZzoyBq7+EHx9xOzxTR1Q6H4eInF/Vgar6X79EVA1sPg5jKrdpdz6j\n/jWbk7s25bUrBiAA3/3FeaL8lHvg1HvdDtG4xNv5OEKq2Da2im0K1NrEYYypXIcmUdw+sit/m7qW\nTxalcMmgtnD2M87ETz8/BSiMuNd5YNCYClSaOFT1mpoMxBhTc647qSO/JO3moa9X0bNVDH0TGsG5\n/3Y2/vx32JvhTAIVFOxuoKZW8nY+jnNE5C4ReWj/y9+BGWP8JzhIeP6S/jRtGM7N7y8hM38fBAU5\nyePEP8KiN+CTK2DfXrdDNbWQN91xXwUm4IxZJcBFQDs/x2WM8bPGUWG8OnEA6XlFXP/OIgr2lTrJ\nY+RjMPppWDcV3hkDeWluh2pqGW9KHCeq6pXAHlV9FBgKtPFvWMaYmtAnIZbnJ/Tjt5Qsbv1wiTOK\nLsDgSTDhfdi1Gt44HdLWuhuoqVW8SRz7nwzaKyKtgGKgg/9COnrWHdcY343u05K/juvNj2vTuOvz\n5ZSWeXpa9hgD13wHxYXw5kjYMNPdQE2t4U3i+FZEGgFPA0uAzTjDq9c6NlaVMUfniiHt+MuZXfnv\nkm3cPmXpwZJH6wFww48Q2xo+uNAZVdfUe1V1xwVAVfcP3P+5iHwLRNSVGQGNMd774+ldCA4Snp62\njpIy5bkJ/QgNDoJGbeHaH+DTq+GbP8KeTXDaQ057iKmXvGkcXyYi94lIJ1UtsqRhTOD6w6mduXd0\nd75dvoPr3kkkr8gzP3lELFz2KQy4Bn59Dj67xsa3qse8+ZPhXKAEmCIii0TkDhFp6+e4jDEuufGU\nTjx1fh/mJO9mwn/mkZbjmbctOATGPAcjH4fVX8Fk63FVX3kzVtUWVf2Hqg4ALgP6Apv8HpkxxjWX\nDGrLG1cNZNPufMa/PJekXbnOBhE48TaY8B7sWmU9ruopbx8AbC8idwEfA92Bu/walTHGdad2a8Yn\nk4ayr7SMC16Zy9wNuw9u7DHW6XFVUgRvngkbZ7kWp6l53rRxLMAZlyoIuEhVB6nqM36PzBjjuj4J\nsXxxy4m0iI3gyjcX8mliysGNrQfA9T9CbAJ8OAG2zHUvUFOjvClxXKWqx6vqU6q60e8RVUBEzhOR\n10XkKxEZ6UYMxtRXCXGRfHbziQzpGM+dny3nmenrODCqdqM2cNU3ENsGPrwEdix3N1hTI7xp4zim\nCkwReUtE0kRk5WHrR4nIOhFJFpF7jhDDl6p6A3A1zvAnxpgaFBMRytvXnMCEgW3490/J/PmTpRSV\nlDobo5rAFV9AeEOn5GEN5gGvJjpiTwZGlV8hIsHAS8BooCdwqYj0FJE+IvLtYa9m5Q59wHOcMaaG\nhQYH8dQFfbjzrG58tXQ7E99Y4AyOCE7J49KPoCATPr0GSkvcDdb4ld8Th6rOBjIPWz0ISFbVjaq6\nD6fRfZyqrlDVMYe90sTxd+DHpjDlAAAb8klEQVR7VV3i75iNMRUTEf5wamf+fWl/lqVmc/7Lc9i0\nO9/Z2PI4GPsCbPkVfvqru4Eav/KmcfwiEYn2LD8gIv8VkeOP8bqtgXKtbKR61lXmNuAM4EIRuamS\nOCeJSKKIJKanpx9jeMaYqow9rhUf3TCYnMISxr88h8Vb9jgbjpsAA66GOS/A5jmuxmj8x5sSx4Oq\nmisiJwFnAe8ArxzjdSuaWqziOWwBVX1BVQeo6k2q+mol+7wGPAosCQsLO8bwjDFHMqBdY7645UQa\nNQjl6rcWsnKbZ1CJkU9AXDv48iYoynU3SOMX3iQOTwsY5wCvqOpXwLH+Mqdy6NDsCcD2YzynDXJo\nTA1rFx/FBzcMIToihKveWsjG9DynkXz8fyA7Fabd53aIxg+8SRzbROQ/wMXAVBEJ9/K4qiwCuohI\nBxEJAy4Bvj7Gc9qw6sa4oHWjBrx//WAUuP7dRHILi6HtEOcJ8yXvQvKPbodoqpk3CeBiYBowSlWz\ngMbAnd5eQEQ+AuYB3UQkVUSuU9US4FbPedcAU1R1lc/RH8ZKHMa4o2PThrx8+fFsydjLX6Yso6xM\nYcR90KQrfP1HKLQ/5gKJN4mjJfCdqiaJyAicqWMXensBVb1UVVuqaqiqJqjqm571U1W1q6p2UtUn\njip6Y0ytMaRjPPef3YMZq3fx9tzNEBoB416G3O0w/UG3wzPVyJvE8TlQKiKdgTdxZv/70K9RHSWr\nqjLGXdcMa8/p3Zvxjx/WkpyWC21OgKG3wpJ3rMoqgHiTOMo8VUvnA/9S1f/DKYXUOlZVZYy7RIQn\nL+hDZFgwf5myzJlJ8NT7IL4LfPMnKMxxO0RTDbxJHMUicilwJfCtZ12o/0I6elbiMMZ9zaIjeOy8\n3ixPzWby3M0Q2gDOewVytsGMh9wOz1QDbxLHNcBQ4AlV3SQiHYD3/RvW0bEShzG1wzl9WnJqt6Y8\nN2M9O7ILPFVWf4DFb8OGmW6HZ46RN4McrgbuAFaISG8gVVWf8ntkxpg6S0R49NzelJQpj3272ll5\n6v0Q39npZWUPBtZp3gw5MgJIwhlc8GVgvYic7Oe4jopVVRlTe7SNj+S20zozdcVOZq1Lc6qsxr0M\n2SlWZVXHeVNV9QwwUlVPUdWTcYYdec6/YR0dq6oypna54eSOdGwaxUNfraKwuBTaDnaqrBLfslkD\n6zBvEkeoqq7b/0FV11NLG8eNMbVLeEgwT5zXh62Ze/n3T0nOytMecKqsvrrNqqzqKG8SR6KIvCki\nIzyv14HF/g7MGBMYhnaK5/zjW/Pa7I0k7cr1VFm95Kmyetjt8MxR8CZx3AysAv4I/AlYDdzoz6CO\nlrVxGFM73X92D6LCQ7j/i5XOcCRth8CQWyDxTVj1pdvhGR9506uqSFWfVdXzVXW8qj4HvFcDsfnM\n2jiMqZ3iG4Zz3+geLNycyWeLU52VZzwMCSfAl7dA2hp3AzQ+OdpRbodWaxTGmIB30cAEBrVvzN++\nX0NGXhGEhMPF7znDsH98ORRkuR2i8VJNzDlujDGICE+M701+UQkPf+0ZDDumJVz8LmRtgSlXQEmR\nu0Ear1SaOETk+EpeA7BeVcaYo9CleTR/PK0L3y7fwVdLtzkr2w5xnu/YNBu+uBHKytwN0hxRSBXb\nnqli29rqDqQ6iMhYYGznzp3dDsUYU4mbR3Ri5ro0HvhyJSe0b0yrRg2cucrzdsGMByGqKYz+B0hF\nM0yb2kBUK53qu84aOHCgJiYmuh2GMaYSWzLyOfv5X+ib0Ij3rx9McJCAKkx/AOa9CINvhlFPWvKo\nYSKyWFUHHmk/a+MwxtS4dvFRPHxuL+ZtzODZGZ7ni0Vg5ONON90Fr8D3dznJxNQ6VVVVGWOM31w8\nsA2/bd3DSzM30Kd1I0b1buEkj7P+BhLklDzKSuDsZyDI/satTSxxGGNc88i5vVi9I5fbpyylQ5Nh\ndGsRfbDkERwKvz4HxQVw7osQbD9XtUVVvaomllsedti2W/0ZlDGmfggPCebViccTFR7CVW8tZFtW\ngbNBBE5/2BmKfdlH8Pl1ULLP3WDNAVWV//5Sbvnfh2271g+xGGPqoZaxDXjn2kHk7yvhqrcWkrXX\nkyBE4JS7YOQTsPpL+GQiFBe6G6wBqk4cUslyRZ9rBRurypi6qUfLGF6/ciBbM/ZyZfnkAXDirTDm\nOUiaDh9eBEV57gVqgKoTh1ayXNHnWsHGqjKm7hrSMZ5XJh7P2h25XPb6AjLzyyWPgdfC+Fdh86/w\n/vk2PInLqkoc3UVkuYisKLe8/3O3GorPGFOPnN6jOa9fNZAN6Xlc+tp80nLKVU0ddwlcNBm2LYF3\nz4X8DNfirO8qfQBQRNpVdaCqbvFLRNXAHgA0pm6bk7ybG95NJC4yjHeuPYHOzaIPblw/3WnvaNwR\nrvwSolu4F2iAOeYHAFV1S/kXkAccDzSpzUnDGFP3DevchE8mDaWopIwLXpnHos2ZBzd2HQmXfwpZ\nW+Ht0ZCV4l6g9VRV3XG/FZHenuWWwEqc3lTvicifayg+Y0w91Schli9uOZH4hmFc/sYCpq7YcXBj\nx1Pgii+c6qq3R0PGBvcCrYeqauPooKorPcvXADNUdSwwGOuOa4ypAW0aR/L5TSfSp3Usf/hwCZPn\nbDq4se1guOpr2JcPk8dAdqp7gdYzVSWO4nLLpwNTAVQ1F7Bxj40xNSIuKowPrh/MmT2a88g3q3ny\n+zXO9LMArfrBVd/Avjz44CLrbVVDqkocKSJym4iMx2nb+AFARBpQg/NxiEgPEXlVRD4TkZtr6rrG\nmNojIjSYVyYOYOKQtvzn543c/uky9pV4/n5t0RsmvAe7k5xGc3vC3O+qShzXAb2Aq4EJqro/lQ8B\n3vbm5CLyloikicjKw9aPEpF1IpIsIvdUdQ5VXaOqNwEXA0ds7TfGBKbgIOGxcb25Y2RXvvhtG9e9\ns4i8ohJnY8cRMO5F2PwLfH2bjarrZ1X1qkpT1ZtUdZyqTi+3fqaq/tPL808GRpVfISLBwEvAaKAn\ncKmI9BSRPp4G+fKvZp5jzgV+BX706dsZYwKKiHDraV14+sK+zN2QwWWvzye7wFOrftwlMOI+WP4x\nLHjV3UADXKXDTYrI11UdqKrnHunkqjpbRNoftnoQkKyqGz3X+RgYp6pPAmMqOc/XwNci8h3w4ZGu\na4wJbBcNbEPjqDBufn8JV7y5gPeuG0xsg1A4+U7YuRym3Q/Ne0OH4W6HGpCqqqoaCiQAvwD/xJlK\ntvzraLUGyne8TvWsq5CIjBCRF0TkP3ga6CvZb5KIJIpIYnp6+jGEZ4ypC07v0ZxXJh7Pmh05XPnm\nAqfkERQE570C8Z3g06vsGQ8/qSpxtADuA3oDzwNnArtV9WdV/fkYrlnRAImVVkiq6ixV/aOq3qiq\nL1Wx32uqOlBVBzZt2vQYwjPG1BWn92jOK5cPYPWOHK5/ZxGFxaUQEQMTPnAayT+/DkpL3A4z4FTV\nxlGqqj+o6lU4DeLJwCwRue0Yr5kKtCn3OQHYfoznBGx0XGPqozN6Nue5Cf1YtHkP//fJUqerbtOu\nzoi6KQtg9tNuhxhwqpyPUUTCReR84H3gD8ALwH+P8ZqLgC4i0kFEwoBLgCrbU7xlo+MaUz+N6duK\nB87pwfcrd/LYd6udlX0vguMuhdn/gC1z3Q0wwFQ15Mg7wFycZzgeVdUTVPUxVd3m7clF5CNgHtBN\nRFJF5DpVLQFuBaYBa4ApqrrqmL7FwetZicOYeur64R25dlgH3p6zmQ8XbHVWnv00NGoLn98Ahfa7\nUF2qGh23DMj3fCy/kwCqqjF+ju2o2ei4xtRPpWXKNZMXMW/Dbj65cSjHt42D1ER480w4/koY+7zb\nIdZq1TE6bpCqRnteMeVe0bU1aViJw5j6LThIeOGSfrSMbcDN7y8mLbcQEgbCkFtg8WRnIihzzKps\n46hrrI3DGNMoMoz/XDGAnIISbv3gN0pKy+DU+yGuvfNUeXGB2yHWeQGVOIwxBpw5zP92fm8Wbs7k\npZkbICwSxr4AmRutl1U1CKjEYVVVxpj9xvdPYHz/1jz/43oSN2c6c3j0vQTmvgiZm458AlOpgEoc\nVlVljCnvr+N6kRAXyZ8+Xuo8WX7GwxAUDDMedDu0Oi2gEocxxpQXHRHK85f0Y2dOIQ98uRJiWsHw\nv8Cab2DTL26HV2cFVOKwqipjzOH6t43jz6d34Ztl2/l+xQ4YeqvzbMe0+2z49aMUUInDqqqMMRW5\naUQnereO4cGvVpK5L9gZfn3ncqfkYXwWUInDGGMqEhocxNMXHkd2QTGPfrMK+l4M8V1g1pNQZjNh\n+yqgEodVVRljKtOjZQy3ndaFr5ZuZ/qadBhxD6SthlXHOvxe/RNQicOqqowxVbl5RCd6tozh/i9X\nkt1pLDTrCbOeslKHjwIqcRhjTFVCg4P4x4V9ycgr4pkZSTD8dshIgvXfux3aUdmSkc+zM9ZT2ZiD\n/mKJwxhTr/RuHcuVQ9vz3vwtLI8d4fSwmlM3Bz+8dvIiXvgxidQ9NTuMiiUOY0y9c/vIrjRtGM59\nX62hbPAtzoRPWxe4HZbPikqcKraa7lUcUInDGseNMd6IjgjlobE9Wbkthw+LT4EGcXW21OGGgEoc\n1jhujPHWOX1aMrxLE576MYW8vlfBuqmwZ7PbYfmkXVkKD4e8U+NFjoBKHMYY4y0R4bFxvdlXWsZT\naUNBxJmzow55et/jXBMyjeCcrTV6XUscxph6q32TKP4wojPvryklo/WpsOQ9KNnndlg+u/7dRezK\nKayx61niMMbUazeN6EiHJlH8PX0Y7N0Na752OyQfCAB5RSV8sKDmSh2WOIwx9Vp4SDCPjevNp9ld\nyYpoDYlvuxrPrHVptL/nO35au4vktDwKi0u9O7AG2zkscRhj6r2TujRhzHEJvJ5/Mmz5FTI2uBbL\np4tTAbh2ciJnPPsz57zg3fDvL/yUzHvzNvsvsHICKnFYd1xjzNF64Jwe/BB0MmUIuuxj9wI5rOCw\nIT3/wHJyWh7t7/mOBRszDtlHPO8PfrXKmWPdzwIqcVh3XGPM0WoeE8HEkUOZU9qLgsQPa6zqZ8HG\nDPKKSgBIyy3kuxU7Kt137obdAHyzfDtwMMdcEDybIJyE8f78Lf4L1iPE71cwxpg64ooh7Xh+7pkM\nz3+Ovcm/EtlluF+vl7V3HxNem8+g9o1ZuDmz0v2mJKbQq1XMgZJFSamSkVd0YPufQr5gt8byXulI\nGkWG+TVmCLAShzHGHIuQ4CDOvOAG9mo4a6a9Xm3nnbUujR9W7gTgt617+GbZdnZmF/Lz+nSA3yWN\nUEoO+XzXZ8s554VfyfWUTD5elMKAx/93yD6NyQUgIa5BtcVdGStxGGNMOX07tmZp3Cl0SZ/B6pR0\nerZpesznvPrtRQBsfuocxr88F4C4yFD27C3+3b5jg+by77AXebz4cmaX9WW9tjmw7R8/rDtkXz1Q\nBqlZVuIwxpjDdDntKmJkL59/9gFlZdXX1lG+eqmipAFwVnAiAA+EfsD08LsZJGu8OvegoLV0lRSk\nBnKJJQ5jjDlMVM8zKQ6JokvGTD5elHJU59iYnsfZz/9CdrkEcXj1kjemhD92YDmMYh4OeYdo9gJO\nW8d+w4JXMT387qOK1VeWOIwx5nAh4YR0H805YUt46rsVbN6df+RjDvPizGRW78hhxppd1RbWRcE/\nc03INP4S8mml+zTIXFtt16uMJQ5jjKmA9BxHdFkOg4LW8udPllLs6/MRnsLA/V+s8PqQWPIYEzy/\n0u37u9wGe94rqkSLTf3J6+sdrTqROEQkSkQWi8gYt2MxxtQTnc+A0Ege6JjM0pQsnpm+3qfD9/+o\n759sqbK97gr5mE6yjX6SzAuhL1Z5TqkwVdQ8v/aqEpG3gDFAmqr2Lrd+FPA8EAy8oapPHeFUdwNT\n/BaoMcYcLiwSOp9B+5QfueyEm3j15w0ECdw1qnu1XaIZWdwS8jU3BX9DkFSeFD4Le4Qt2oxlZZ2A\niksa+0kN9LTyd4ljMjCq/AoRCQZeAkYDPYFLRaSniPQRkW8PezUTkTOA1UD1VRQaY4w3up8Debv4\n66ASwkOCeHnWBmauTfPqUK3kyfP+ksS5QU6X3P0liKqSBsDAoPVcEPzrgc9XhczgpuCvK+yOWxIR\n51V8x8KvJQ5VnS0i7Q9bPQhIVtWNACLyMTBOVZ/EKZ0cQkROBaJwkkyBiExV1d+V/URkEjAJoG3b\nttX5NYwx9VXnMwAhZMMMFt53O5e9MZ9rJi/iuIRYvvzDMKRc39eyMmX8y3PYkrmXYZ2aUFJ28Gfq\nh7C7aSR5DCl6iS/CHwYga18U74b93adwRgctOrB8T+jHbCpr/rt9iuK6+PglfedGG0droHz/tlTP\nugqp6v2q+mfgQ+D1ipKGZ7/XVHWgqg5s2vTYH9gxxhiimkDCQFg/jdjIUD68YQgAy1Kz6XDvVNrf\n8x05hcW8N38Ls9ansSw1m6y9xXy3YgfTVh2sJOkelEIL2XPIqX1NGgBDg1d7sZf/q6rceHK8om91\nxBYfVZ18xBOLjAXGdu7c+SjCMsaYCnQ5C2Y+DnlpxDZsxoa/nc3kuZt57FvnR7zvI9NdC62iH9NA\nfQAwFWhT7nMCsL06Tmyj4xpjql3Xkc570gwAgoOE607qwPJHRhIaLJzWvZnXp4olzx8RHiI6wv+D\nHLpR4lgEdBGRDsA24BLgsuo4sZU4jDHVrkVfiG4JSdOg/+UHVsdEhJL0xNkHPhcWl5JbWEJuYTGZ\n+fsoLVNKy5ToiFB4w9lnWcSkag2tfdDv+ww1j4mo1mtUxK8lDhH5CJgHdBORVBG5TlVLgFuBacAa\nYIqqrqqO61mJwxhT7USgy5mwYSaUVjy+FEBEaDBNo8Pp2LQhA9s3ZnDoBk58vxN9Ynx/6vzY1PE2\nDlW9tJL1U4Gp1X09K3EYY/yiy1mw5F3YOg86nOzdMQtfc943eTf1a11SJ54c95aVOIwxftFxhPM+\n828+HOT5y3/P5uqN5YiXrfsPABpjTN0X3tB53zrP92Nn+ZJs6oaAShwiMlZEXsvOznY7FGNMoOk4\nwnnP3ubd/jXRL9YlAZU4rKrKGOM3Zz3pvCd7O6eGW4nDqqqMMaZ2aNYDYhIgycsH/qzEUTdYVZUx\nxm9EnIcBN8yEkqIj7++WAH1y3G+sqsoY41ddzoLifNgy14udrcRhjDGmw3Dn/b3zjryva1VV1sZh\njDG1R1jUweVK5tuoDwIqcVgbhzHG75r3cd53rTzCji6VOOwBQN9YG4cxxu8mfg4SBKu/rnh7cQHs\n2xvITRyBlTiMMcbvoptD++Ew+x/w13gnUYBTdfXdHfBEC/hbS3dj9DNLHMYY46s+FzrvZSWQu8NZ\nLsyCRa8f3Mernlf+YFVVPrE2DmNMjegx9uBy2lrYsQzy0g7dJ3NjzcZUgwIqcVgbhzGmRjSIO7i8\nZQ7852R4aZB78ZRnjePGGFNLnfeq8774HXfjcIElDmOMORr9LnWmlC0pcDuSw1iJwxhjaq8bZkJE\nI7ejqHGWOIwx5mjFtIQL3nA7ikNpqd8vYYnDGGOORadTvZ+HvCYs+9jvlwioxGHdcY0xrrjqG3ho\nD3Q63e1I4NT7/H4J0QAcqGvgwIGamJjodhjGGFOniMhiVR14pP0CqsRhjDHG/yxxGGOM8YklDmOM\nMT6xxGGMMcYnljiMMcb4xBKHMcYYn1jiMMYY4xNLHMYYY3wSkA8Aikg6sMXzMRY4/FHyw9eV/9wE\n2O2n0CqKpbqOqWq/yrZ5c28qWleb75e3x1XX/apovd2vqrfV9/tV1Xa371c7VW16xL1UNaBfwGtH\nWlf+M5BYk7FU1zFV7VfZNm/uTV27X94eV13360j3pz7fr8q21ff7VdX22ny/yr/qQ1XVN16sq2gf\nfzia63h7TFX7VbbNm3tT0brafL+8Pa667ldF6+1+Vb2tvt+vqrbX5vt1QEBWVR0LEUlUL8ZqMQ67\nX76x++Ubu1++qan7VR9KHL56ze0A6hi7X76x++Ubu1++qZH7ZSUOY4wxPrEShzHGGJ9Y4jDGGOMT\nSxzGGGN8YonjCEQkSkTeEZHXReRyt+Op7USko4i8KSKfuR1LXSAi53n+bX0lIiPdjqe2E5EeIvKq\niHwmIje7HU9d4PkNWywiY6rrnPUycYjIWyKSJiIrD1s/SkTWiUiyiNzjWX0+8Jmq3gCcW+PB1gK+\n3C9V3aiq17kTae3g4/360vNv62pgggvhus7H+7VGVW8CLgbqZTddH3+/AO4GplRnDPUycQCTgVHl\nV4hIMPASMBroCVwqIj2BBCDFs1tpDcZYm0zG+/tlju5+PeDZXh9Nxof7JSLnAr8CP9ZsmLXGZLy8\nXyJyBrAa2FWdAdTLxKGqs4HMw1YPApI9fzHvAz4GxgGpOMkD7H6VV9n9qvd8uV/i+DvwvaouqelY\nawNf/32p6teqeiJQL6uOfbxfpwJDgMuAG0SkWn7DQqrjJAGiNQdLFuAkjMHAC8CLInIOLjzaX4tV\neL9EJB54AugvIveq6pOuRFf7VPbv6zbgDCBWRDqr6qtuBFcLVfbvawRO9XE4MNWFuGqrCu+Xqt4K\nICJXA7tVtaw6LmaJ4yCpYJ2qaj5wTU0HUwdUdr8ygJtqOpg6oLL79QLOHyfmUJXdr1nArJoNpU6o\n8H4dWFCdXJ0Xq5dVL5VIBdqU+5wAbHcplrrA7pdv7H75xu6Xb2r0flniOGgR0EVEOohIGHAJ8LXL\nMdVmdr98Y/fLN3a/fFOj96teJg4R+QiYB3QTkVQRuU5VS4BbgWnAGmCKqq5yM87awu6Xb+x++cbu\nl29qw/2yQQ6NMcb4pF6WOIwxxhw9SxzGGGN8YonDGGOMTyxxGGOM8YklDmOMMT6xxGGMMcYnljiM\nqYKI5Hne24vIZdV87vsO+zy3Os9vjL9Y4jDGO+1xRhj1mmeo66ockjg8I74aU+tZ4jDGO08Bw0Vk\nqYj8n4gEi8jTIrJIRJaLyI0AIjJCRGaKyIfACs+6Lz0zsK0SkUmedU8BDTzn+8Czbn/pRjznXiki\nK0RkQrlzz/LMfrdWRD4QkYoGtzPGr2x0XGO8cw9wh6qOAfAkgGxVPUFEwoE5IjLds+8goLeqbvJ8\nvlZVM0WkAbBIRD5X1XtE5FZV7VfBtc4H+gHHAU08x8z2bOsP9MIZwG4OMAxnUiNjaoyVOIw5OiOB\nK0VkKbAAiAe6eLYtLJc0AP4oIsuA+TgjmHahaicBH6lqqaruAn4GTih37lTPvApLcarQjKlRVuIw\n5ugIcJuqTjtkpTPRUP5hn88AhqrqXhGZBUR4ce7KFJVbLsX+HzYusBKHMd7JBaLLfZ4G3CwioQAi\n0lVEoio4LhbY40ka3XGm8dyveP/xh5kNTPC0ozQFTgYWVsu3MKYa2F8rxnhnOVDiqXKaDDyPU020\nxNNAnQ6cV8FxPwA3ichyYB1OddV+rwHLRWSJqpafP/sLYCiwDGcWt7tUdacn8RjjOhtW3RhjjE+s\nqsoYY4xPLHEYY4zxiSUOY4wxPrHEYYwxxieWOIwxxvjEEocxxhifWOIwxhjjE0scxhhjfPL/H7+N\nLtns6cUAAAAASUVORK5CYII=\n", - "text/plain": [ - "" + "cell_type": "code", + "metadata": { + "id": "XKpnAlb2b6fP", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 34 + }, + "outputId": "53ef39b2-9fc4-474a-c7fe-4706cea33e09" + }, + "source": [ + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "# Import TF 2.\n", + "%tensorflow_version 2.x\n", + "import tensorflow as tf\n", + "\n", + "# Fix seed so that the results are reproducable.\n", + "tf.random.set_seed(0)\n", + "np.random.seed(0)\n", + "\n", + "try:\n", + " import t3f\n", + "except ImportError:\n", + " # Install T3F if it's not already installed.\n", + " !git clone https://github.com/Bihaqo/t3f.git\n", + " !cd t3f; pip install .\n", + " import t3f" + ], + "execution_count": 1, + "outputs": [ + { + "output_type": "stream", + "text": [ + "TensorFlow 2.x selected.\n" + ], + "name": "stdout" + } ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "plt.loglog(train_loss_hist, label='train')\n", - "plt.loglog(test_loss_hist, label='test')\n", - "plt.xlabel('Iteration')\n", - "plt.ylabel('MSE Loss value')\n", - "plt.title('SGD completion')\n", - "plt.legend()\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Speeding it up\n", - "--------------------\n", - "The simple solution we have so far assumes that loss is computed by materializing the full estimated tensor and then zeroing out unobserved elements. If the tensors are really large and the fraction of observerd values is small (e.g. less than 1%), it may be much more efficient to directly work only with the observed elements." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "tf.reset_default_graph()\n", - "\n", - "shape = (10, 10, 10, 10, 10, 10, 10)\n", - "\n", - "total_observed = np.prod(shape)\n", - "# Since now the tensor is too large to work with explicitly,\n", - "# we don't want to generate binary mask,\n", - "# but we would rather generate indecies of observed cells.\n", - "\n", - "ratio = 0.001\n", - "\n", - "# Let us simply randomly pick some indecies (it may happen\n", - "# that we will get duplicates but probability of that\n", - "# is 10^(-14) so lets not bother for now)\n", - "\n", - "num_observed = int(ratio * total_observed)\n", - "observation_idx = np.random.randint(0, 10, size=(num_observed, len(shape)))\n", - "# and let us generate some values of the tensor to be approximated\n", - "observations = np.random.randn(num_observed)\n" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "# Our strategy is to feed the observation_idx\n", - "# into the tensor in the Tensor Train format and compute MSE between\n", - "# the obtained values and the desired values" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "initialization = t3f.random_tensor(shape, tt_rank=16)\n", - "estimated = t3f.get_variable('estimated', initializer=initialization)" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "# To collect the values of a TT tensor (withour forming the full tensor)\n", - "# we use the function t3f.gather_nd" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "estimated_vals = t3f.gather_nd(estimated, observation_idx)\n", - "loss = tf.reduce_mean((estimated_vals - observations) ** 2)" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "optimizer = tf.train.AdamOptimizer(learning_rate=0.01)\n", - "step = optimizer.minimize(loss)" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [ + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "0 1.64438\n", - "100 0.0472497\n", - "200 0.00913698\n", - "300 0.00306178\n", - "400 0.00155388\n", - "500 0.000971667\n", - "600 0.000669613\n", - "700 0.000499607\n", - "800 0.000437507\n", - "900 0.000346848\n", - "1000 0.000325652\n", - "1100 0.000275839\n", - "1200 0.000247506\n", - "1300 0.000410816\n", - "1400 0.000331641\n", - "1500 0.000296677\n", - "1600 0.00025365\n", - "1700 0.000210029\n", - "1800 0.000216491\n", - "1900 0.000312779\n" - ] - } - ], - "source": [ - "sess = tf.Session()\n", - "sess.run(tf.global_variables_initializer())\n", - "loss_hist = []\n", - "\n", - "for i in range(2000):\n", - " _, loss_v = sess.run([step, loss])\n", - " loss_hist.append(loss_v)\n", - "\n", - " if i % 100 == 0:\n", - " print(i, loss_v)" - ] - }, - { - "cell_type": "code", - "execution_count": 14, - "metadata": {}, - "outputs": [ + "cell_type": "markdown", + "metadata": { + "id": "jS_8PA1ub6fS", + "colab_type": "text" + }, + "source": [ + "**Generating problem instance**\n", + "\n", + "Lets generate a random matrix $A$, noise, and mask $P$." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "kTe5tB3Kb6fT", + "colab_type": "code", + "colab": {} + }, + "source": [ + "shape = (3, 4, 4, 5, 7, 5)\n", + "# Generate ground truth tensor A. To make sure that it has low TT-rank,\n", + "# let's generate a random tt-rank 5 tensor and apply t3f.full to it to convert to actual tensor.\n", + "ground_truth = t3f.full(t3f.random_tensor(shape, tt_rank=5))\n", + "# Make a (non trainable) variable out of ground truth. Otherwise, it will be randomly regenerated on each sess.run.\n", + "ground_truth = tf.Variable(ground_truth, trainable=False)\n", + "noise = 1e-2 * tf.Variable(tf.random.normal(shape), trainable=False)\n", + "noisy_ground_truth = ground_truth + noise\n", + "# Observe 25% of the tensor values.\n", + "sparsity_mask = tf.cast(tf.random.uniform(shape) <= 0.25, tf.float32)\n", + "sparsity_mask = tf.Variable(sparsity_mask, trainable=False)\n", + "sparse_observation = noisy_ground_truth * sparsity_mask" + ], + "execution_count": 0, + "outputs": [] + }, { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAY4AAAEaCAYAAAAG87ApAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMS4wLCBo\ndHRwOi8vbWF0cGxvdGxpYi5vcmcvpW3flQAAIABJREFUeJzt3Xl8FPX9x/HXJwlJCAn3fYMgHqCI\nEe9WWy9UPNt61PtuFbWt9afWX2tb7U97eh9Y7wPvtmK97wuFACL3jRCQm3AFEpJ8fn/sgEvIsZtk\nM5vN+/l4zCM7M9+Z+eyw7Ge/x8yYuyMiIhKrtLADEBGRpkWJQ0RE4qLEISIicVHiEBGRuChxiIhI\nXJQ4REQkLkocIrITMzvCzArrsf0bZnZ+Q8YkyUWJQ5q8+n7R1bDfm8xsoZltMrNCM3u+0vqjzewD\nM9toZmvM7Csz+x8zyw7W32Jm24L1G81sjpnda2bdGjrWsATv8enoZe4+wt2fCCsmSTwlDmnSzCwj\nEfsIfjGfCxzl7rlAPvBe1PofAy8BzwJ93L0DcAbQE+gVtavn3T0PaA+cCnQFJqZS8pDmR4lDGkzw\na3tp8Ot6tpn9MFh+i5m9aGZPB+ummtnuZnajma00syVmdkzUfi40s5lB2QVmdnnUuiOCX///Y2bL\ngTHAG0D3oGawycy6m1mamd1gZvOD2sALZtY+2EdfM3Mzu9jMFgPvV/F2DgDecvf5AO6+3N1HB9sb\n8HfgD+7+sLuvDcrMdvdR7j638s7cfZu7TyeSXFYBv6rhPF4a9f5nmNmwYPmeZvahmRWZ2XQzOylq\nm8fN7P6gmWiTmX1mZl3N7E4zW2dms8xsv6jyi4LzPyNY/9j2mlIV8XQ3s5fNbFVQA7s6WH4ccBNw\nRnDMKcHyD83skuB1mpndbGbfBP/WT5pZm0r/Dueb2WIzW21mv6nuvEjyUOKQBmFmg4CrgAOCX9jH\nAouiiowEngLaAZOBt4h8/noAfwAeiiq7EjgRaA1cCPxj+5dnoCuRX/B9gPOAEcAyd88NpmXA1cAp\nwPeB7sA64L5KYX8f2DOItbIvgPPM7Ndmlm9m6VHrBhGpWbxcy2nZhbuXA/8BDq9qfVCTuSV4X62B\nk4A1ZtYCGAu8DXQGRgHPBOd9u58ANwMdgRJgHDApmH+JSLKL9lMi7303YPdg28rxpAXHnULk3+qH\nwLVmdqy7vwn8iUitKtfd963iLV0QTEcC/YFc4N5KZQ4jck5/CPzWzPas6txIEnF3TZrqPQEDiHzh\nHwW0qLTuFuCdqPmRwCYgPZjPAxxoW82+/w1cE7w+AigFsqPWHwEUVtpmJvDDqPluwDYgA+gbHK9/\nLe/pp8C7wGZgDXBDsPywYPvoGJ4DioBi4Nyo9/10Ffu9AphbzTHf2v5eKy0/HFgOpEUtGwPcErx+\nHHg4at0oYGbU/BCgKGp+EXBF1PzxwPzK5xM4EFhcKZYbgceqe4/Ah8Alwev3gJ9HrRtUxb9Dz6j1\n44Ezw/48a6p5qnf7sAiAu88zs2uJfJHsbWZvAb/0yK9/gBVRxbcAqz3y63v7PER+jRaZ2Qjgd0R+\nBacBOcDUqO1XufvWWkLqA/zLzCqilpUDXaLml9Tynp4h8qu+BZHayzNmNhlYHBTpBiwMyp4JYGaf\nAulV7C5aD2BtNet6AfOrWN4dWOLu0e/nm2Bf21U+x5XncyvtM/r9fxMco7I+RJoBi6KWpQOfVBl9\n1XF/U+k4Gez877A86nVxFXFKklFTlTQYd3/W3Q8j8mXjwB3x7sPMsog0Af0V6OLubYHXAYs+VOVD\nV7GrJcAId28bNWW7+9JattuFR/onXgS+BgYDs4ClwGkxvakoQdPPSKr/4l1CpOmosmVAr2D77XoH\ncdRVdCd+7+AYVcWzsNJ5zHP344P1tZ3DZUQ+D9HHKWPnpCZNjBKHNAgzG2RmPwi++LcS+YVbXstm\nVckEsoh0IJcFtY9jat6EFUCH7Z2ugQeB28ysTxBfJzM7OdYgzOwCMzvBzPKCDt4RwN7Al+7uRDq3\nfxd0ZLeziIHs/Es6en8tgrb7MUT6aCr3N2z3T+A6M9s/2OeA4D18SaTJ7PpgX0cQSUDPxfqeqnCl\nmfUMBg3cBDxfRZnxwIZgMEJLM0s3s8FmdkCwfgXQt1JCizYG+IWZ9TOzXL7rEymrR9wSMiUOaShZ\nwO3AaiJND52JfBnFxd03EunYfoFIh/bZwKu1bDOLyBfUgmDEUXfgrmC7t81sI5HO7gPjCGVDEP9i\nIn0XfwZ+5u6fBsd8nkhn9DlEfpWvDmIeDbwYtZ8zzGxTsI9XifSV7B/VhFf5vbwI3EZkmO9GIv07\n7d29lEhH+YjgWPcD5wXvva6eJdLZviCYbq0innIiCWookWa51USS2/Ykvf29rjGzSVUc41EigyI+\nDrbfSqT/RZowi/x4EpHmxMwWEenAfjfsWKTpUY1DRETiosQhIiJxUVOViIjERTUOERGJixKHiIjE\nJSWvHO/YsaP37ds37DBERJqUiRMnrnb3TrWVS8nE0bdvXwoKCsIOQ0SkSTGzb2ovpaYqERGJkxKH\niIjERYlDRETiklKJw8xGmtno9evXhx2KiEjKSqnE4e5j3f2yNm3a1F5YRETqJKUSh4iIJJ4Sh4iI\nxEWJQ0RE4qLEISIicVHiEBGRuKTkLUfqavLidZSUVdC1dTZdWmfTMjM97JBERJKOEkeUv78zh0/m\nrt4x3zo7g65tIkmkS+vsIKFkfTffJpuOuVmkp1mIUYuINC4ljii3njKYJWu3sHzDVlZETcs3lDBv\n5WpWbiyhvGLnB1+lGXTKy6Jr62w6V0ou0UmndXYGZkowItL0KXFE6dOhFX06tKp2fXmFs2ZTCSs2\nlFSZXBavKWbCorUUFW/bZduWLdLp0jprR3Lp2iabznlZ9Gjbkh7tWtKjbUvat8pUchGRpKfEEYf0\nNKNzULMYQvVXp2/dVs7KapLLivVbmVJYxFvTt1JSVrHTdi1bpO9IIj3ataRn8DryN4fOeVmkqVlM\nREKW9InDzFoB9wOlwIfu/kzIIdUqu0U6vTvk0LtDTrVl3J31W7axtGgLS9dt2fG3MHg9del61m4u\n3WmbzPQ0urXN3imZ9IhKLl3bZNMiXQPlRCSxQkkcZvYocCKw0t0HRy0/DrgLSAf+6e63A6cBL7n7\nWDN7Hkj6xBELM6NtTiZtczLZu3vVtZfi0rJIMqmUVJauK+ajOatYsaFkp/JpBl1bZwe1lZydmsF6\ntmtJ97YtyW6hkWIiUj9h1TgeB+4Fnty+wMzSgfuAo4FCYIKZvQr0BKYGxcobN8xw5WRmMLBLHgO7\n5FW5vqSsnG+LtrK0aAuF64p3SjLjF65l+Yatu3Tmt8tp8V3HfV42Xdrs2qHfPidTTWIiUq1QEoe7\nf2xmfSstHg7Mc/cFAGb2HHAykSTSE/iKGi5YNLPLgMsAevfu3fBBJ6GsjHT6dmxF345Vd+iXlVew\nYmNJUFuJJJZIv0sJKzZsZfqyDazeVILvnFtokW50zoskk0gnfiShdG2dTae8rMiUm0XbnBbqzBdp\nhpKpj6MHsCRqvhA4ELgbuNfMTgDGVrexu48GRgPk5+d7deWak4z0tEhzVduWDO/XvsoyZeUVrNpU\nwvL12zvxg0799VtZsXErs5dv5OM5q9lUUrbr/tOMjrmRRNIxNzP4m7UjuUS/zsvScGSRVJFMiaOq\nbxV3983AhTHtwGwkMHLAgAENGlgqy0hPo1ublnRr07LGcptKylixYSurNpbsmFZvivq7qYQZ325g\nzaZSyip2zduZGWl0ys2iY1BbidRaqk42rbKS6WMpIpUl0//QQqBX1HxPYFk8O3D3scDY/Pz8Sxsy\nMIHcrAxyO+WyW6fcGstVVDhFW7btlFh2er2phMJ1xXy1ZB1rNpfu0kwGkJOZvqM5rHf7nOD6mhz6\ndMihb4dWaiITCVkyJY4JwEAz6wcsBc4Ezg43JIlXWprRvlUm7VtlMoiqO/W3KyuvYG1xaVRyKd0p\nyazYsJUvFqzhlclLd9qudXYGg7rmsVe31uzVvTV7d2/DwC65ZGVoxJhIYwhrOO4Y4Aigo5kVAr9z\n90fM7CrgLSLDcR919+lx7ldNVU1IRnoanfMine812bqtnCVri/lmTTGL1mxm4erNzF6+kZcmFrJ5\nXGSgXUaaMahrHgf0bc+B/dpzQL/2dMzNaoy3IdLsmFfVVtDE5efne0FBQdhhSIJVVDiL1xYz49sN\nTF+2nq+WFDHxm3Vs3Ra5In+3Tq04fGAnjtyjMwf2a69rWERqYWYT3T2/1nJKHJJKSssqmLZsPeMX\nruWLBWsYN38NJWUV5GSmc+iAjhy9VxeO3bsrbVq2CDtUkaTTLBNHVFPVpXPnzg07HEkCW7eVM27+\nGt6btYIPZq1iadEWMtPT+N7unRi5bzeO3qsLOZnJ1NUnEp5mmTi2U41DquLufF24nrFTlvHa19+y\nfMNW8rIyOGW/Hpx9YG/27NY67BBFQqXEocQhNaiocMYvWsvzE5bw36nfUlpWwX6923L59/pz9F5d\n9XAuaZaaZeJQU5XURVFxKa9MWsoT4xbxzZpi+ndsxeXf789pw3rqbsPSrDTLxLGdahxSF+UVzhvT\nvuXBj+YzbekG+nbI4dfH7sHxQ7rqgkNpFmJNHPo5JRJITzNO3Kc7Y686jEcvyCcrI50rn53EKfd9\nxrj5a8IOTyRpKHGIVGJm/GCPLrx+zeH85Uf7sHJjCWc9/AWXPFHAkrXFYYcnErqUaqpSH4ckwtZt\n5Tz22SLueX8uFe6M+sFALjm8n25xIilHfRzq45AGtqxoC398bQZvTFtO/46tuO3UIRy8W4ewwxJp\nMOrjEGlg3du25IFz9ufxCw+g3J2zHv6C3/1nGsWluz6rRCSVKXGIxOmIQZ1585rvceGhfXnyi284\n7s5P+GKBOs+l+VDiEKmDlpnp/G7k3jx36UEAnPXwF/zlrVmUlVeEHJlI4qVU4jCzkWY2ev369WGH\nIs3Egf078Oa1h3NGfi/u+2A+Zz38BcuKtoQdlkhCpVTicPex7n5ZmzZtwg5FmpGczAxuP30f7jpz\nKDOWbeD4uz/hvZkrwg5LJGFSKnGIhOnkoT147erD6d6mJRc/UcCf35xFRRXPXxdp6pQ4RBpQv46t\neOXnh3DW8F7c/+F8LntqIptKNOpKUosSh0gDy26Rzp9OHcLvT9qbD2av5PT7P9cV55JSlDhEEsDM\nOP+Qvjxx4XC+Xb+F0x/4nFnLN4QdlkiDSKnEoVFVkmwOG9iRF684BDP4yYPjmLBobdghidRbSiUO\njaqSZDSoax4v/+wQOuZmcc4/v+TdGRpxJU1bSiUOkWTVs10OL15xMIO65nH50xP579ffhh2SSJ0p\ncYg0kg65WTx76UHs16stVz83WclDmiwlDpFGlJuVweMXDWdY70jyeO3rZWGHJBI3JQ6RRpablcFj\nF0aSxzXPfaXkIU2OEodICHKzMng8Knm8OW152CGJxEyJQyQkrYLksW/PNlw9ZjKfzl0ddkgiMVHi\nEAlRq6wMHrtgOP07teKypwqYtHhd2CGJ1CqlEocuAJSmqE1OC568eDid8rK44NHxzPxWV5hLckup\nxKELAKWp6pyXzdMXH0hOZgbnPjKeRas3hx2SSLVSKnGINGW92ufw9CXDKa+o4LxHx7NmU0nYIYlU\nSYlDJIkM6JzHIxccwIoNW7nkyQK2bisPOySRXShxiCSZYb3bcecZQ/lqSRG/eP4rPQxKko4Sh0gS\nGjGkG785fk/emLacO96cFXY4IjvJCDsAEanaxYf1Y/HaYh76eAG92udwzkF9wg5JBFDiEElaZsZv\nT9yLwnVbuOXV6ezWKZeDd+sQdlgiaqoSSWYZ6WnceeZQ+nTI4efPTNQjaCUpKHGIJLnW2S345/kH\nUF7hXPpkAZtLysIOSZo5JQ6RJqBfx1bcc/Yw5qzYyHUvTtFIKwlV0icOM+tvZo+Y2UthxyISpu/v\n3okbR0RGWt3z/ryww5FmLKGJw8weNbOVZjat0vLjzGy2mc0zsxtq2oe7L3D3ixMZp0hTccnh/Tht\nvx7c+d4cPpqzKuxwpJlKdI3jceC46AVmlg7cB4wA9gLOMrO9zGyImb1Waeqc4PhEmhQz47ZThzCo\nSx7XPjeZZUVbwg5JmqGEJg53/xhYW2nxcGBeUJMoBZ4DTnb3qe5+YqVpZSLjE2mKWmamc/9Ph7Gt\n3Lny2UmUllWEHZI0M2H0cfQAlkTNFwbLqmRmHczsQWA/M7uxhnKXmVmBmRWsWqUqvKS2/p1yueP0\nfZi8uIjb39CV5dK4YroA0MxaAr3dfXYDHNOqWFbtEBF3XwNcUdtO3X00MBogPz9fQ04k5Z2wTzcm\nLOrLo58t5IC+7RgxpFvYIUkzUWuNw8xGAl8BbwbzQ83s1XocsxDoFTXfE1hWj/3toAc5SXNz0/F7\nsm+vtvz6pa9ZqGd4SCOJpanqFiL9EkUA7v4V0Lcex5wADDSzfmaWCZwJ1CcR7aAHOUlzk5mRxn1n\n70d6mvHzZybpNuzSKGJJHGXuXqef8GY2BhgHDDKzQjO72N3LgKuAt4CZwAvuPr0u+xcR6NkuhzvP\nGMrMbzfw+7H6rySJF0sfxzQzOxtIN7OBwNXA57Hs3N3Pqmb568DrMUcZo6BZbeSAAQMaetciSe3I\nPTrz8yN24/4P53NA3/acNqxn2CFJCoulxjEK2BsoAcYAG4BrExlUXampSpqzXx69O8P7tec3/5rG\n3BUbww5HUliticPdi939N+5+gLvnB6+3NkZwIhK7jPQ07jlrP1plpfOzZyZRXKqbIUpixDKq6gMz\ne7/y1BjBxUujqqS569I6m7vO3I/5qzZx87+m4a6R6dLwYunjuC7qdTZwOpCUP2XcfSwwNj8//9Kw\nYxEJy6EDOnLtD3fnH+/OYXi/9pw5vHfYIUmKqTVxuPvESos+M7OPEhSPiDSAq34wgIJv1vLbV6cz\npGcb9u6ufj9pOLE0VbWPmjqa2bFA10aILW5qqhKJSE8z/nHGUNrltODKZyaxceu2sEOSFBLLqKqJ\nQEHwdxzwKyApb3OuUVUi3+mYm8U9Zw1jybot3PDyVPV3SIOJpamqX2MEIiINb3i/9vz62EHc/sYs\nho9rz/mH9A07JEkB1SYOMzutpg3d/ZWGD0dEGtplh/dnwsK13PrfGezbqy1De7UNOyRp4qy66quZ\nPVbDdu7uFyUmpLqLunL80rlz54YdjkjSKCou5cR7PqWs3Bk76jA65WWFHZIkITOb6O75tZZLxXbP\n/Px8LygoCDsMkaQyfdl6Tn/gc/bp0ZZnLj2QFulhPI5HklmsiSOmT46ZnWBm15vZb7dP9Q9RRBrT\n3t3bcMfp+zB+0Vpu++/MsMORJqzWzvHg6Xs5wJHAP4EfAeMTHJeIJMDJQ3swtXA9//x0IYN7tOFH\n++tmiBK/WGoch7j7ecA6d/89cDA7P4hJRJqQG0bswSG7deCmf03l68KisMORJiiWxLEl+FtsZt2B\nbUBSDtHVBYAitctIT+Pes4fRKTeLK56ayOpNJWGHJE1MLInjNTNrC/wFmAQsInJ79aSjCwBFYtO+\nVSYPnbs/azaXcuUzk9hWXhF2SNKExHJb9T+6e5G7vwz0AfZwd3WOizRxg3u04fbTh/DlwrX86XV1\nlkvsYrlX1RQzu8nMdnP3kro+RlZEks+p+/XkokP78dhni3hlUmHY4UgTEUtT1UlEbqP+gplNMLPr\nzEz3aRZJETcevwcH9W/Pja9MZdpS/S6U2sXSVPWNu//Z3fcHzgb2ARYmPDIRaRQtgs7yDq0yufyp\niazdXBp2SJLkYr0AsK+ZXQ88B+wBXJ/QqOpIo6pE6qZjbhYPnZvPqk0lXPXsJMrUWS41iKWP40vg\nlaDsj919uLv/LeGR1YFGVYnU3ZCebfi/U4fw+fw13P7GrLDDkSQWy6Njz3d3fYpEmoHT9+/J1KWR\nK8uH9GzDyUN7hB2SJKFY+jiUNESakd+csCfD+7Xnf17+munL1Owru9LtMUVkJy3S07j/p8Nol5PJ\nZU+qs1x2pcQhIrvomJvFg+fsz6pNJYwao85y2VksneM/NrO84PXNZvaKmQ1LfGgiEqZ9e7XltlMG\n89m8Nfz5rdlhhyNJJJYax/+6+0YzOww4FngCeCCxYYlIMvhxfi/OO7gPoz9ewH++Whp2OJIkYkkc\n5cHfE4AH3P0/QGbiQhKRZPK/J+7F8L6RzvIZyzaEHY4kgVgSx1Izewj4CfC6mWXFuF2j0wWAIg2v\nRXoa9/10GG1bZnL50wWsU2d5sxdLAvgJ8BZwnLsXAe2BXyc0qjrSBYAiidEpL4sHz92fFetLuPq5\nyeosb+ZiSRzdgP+6+1wzOwL4MXp0rEizM7RXW249ZTCfzF3NX95WZ3lzFkvieBkoN7MBwCNEnv73\nbEKjEpGk9JMDenHuQX146KMFjJ2yLOxwJCSxJI4Kdy8DTgPudPdfEKmFiEgz9L8n7kV+n3Zc/9LX\nzPxWneXNUSyJY5uZnQWcB7wWLGuRuJBEJJllZqRx/znDaN0yg8ueKqCoWJ3lzU0sieNC4GDgNndf\naGb9gKcTG5aIJLPOedk8cE6ks3zUmMmUV3jYIUkjiuUmhzOA64CpZjYYKHT32xMemYgktWG92/GH\nk/fmk7mr+Zs6y5uVWm+rHoykegJYBBjQy8zOd/ePExuaiCS7M4f3ZkphEfd/OJ/h/dpzxKDOYYck\njSCWpqq/Ace4+/fd/XtEbjvyj8SGJSJNxe9G7s0eXfP45QtTWLFha9jhSCOIJXG0cPcd9VB3n4M6\nx0UkkN0inXvPHsaW0nKuHqOLA5uDWBJHgZk9YmZHBNPDwMREB7admZ1iZg+b2X/M7JjGOq6IxG5A\n51xuPWUwXy5cy93vzws7HEmwWBLHz4DpwNXANcAM4PJYdm5mj5rZSjObVmn5cWY228zmmdkNNe3D\n3f/t7pcCFwBnxHJcEWl8p+/fkx/t35N73p/L5/NWhx2OJJC5xz+Mzsyed/dav8TN7HvAJuBJdx8c\nLEsH5gBHA4XABOAsIB34v0q7uMjdVwbb/Q14xt0n1Xbc/Px8LygoiOMdiUhDKC4tY+Q9n7Jhaxlv\nXnM4HXKzwg5J4mBmE909v7Zydb3L7cGxFApGXq2ttHg4MM/dF7h7KfAccLK7T3X3EytNKy3iDuCN\nWJKGiIQnJzOD+346jPXF27jxlanU5YepJL8wbo/eA1gSNV8YLKvOKOAo4EdmdkV1hczsMjMrMLOC\nVatWNUykIhK3Pbq25tfHDuLtGSt4saAw7HAkAaq9jqOGx8Ma9RtVZVUsq/ZnibvfDdxd207dfTQw\nGiJNVXWOTkTq7eLD+vH+rJX8fux0Durfgd4dcsIOSRpQTRcA/q2GdbPqccxCoFfUfE+gQW6zaWYj\ngZEDBgxoiN2JSB2lpRl//cm+HPePj/nlC1/x/OUHk55W1W9GaYqqbapy9yNrmupxzAnAQDPrZ2aZ\nwJnAq/XYX3TMepCTSJLo0bYlfzhlbwq+WcdDH88POxxpQAnt4zCzMcA4YJCZFZrZxcEt2q8i8lTB\nmcAL7j69gY6nR8eKJJFThvbghH268Y935jBtqf5fpoo6DcdNdhqOK5I8iopLOeYfH9MuJ5NXRx1K\nVkZ62CFJNRI9HFdEJCZtczK54/R9mL1iI39/Z07Y4UgDqDZxmNk5Ua8PrbTuqkQGVVdqqhJJTkfu\n0Zmzhvdi9McLmLCo8qVd0tTUVOP4ZdTreyqtuygBsdSbOsdFktdvTtiLnu1a8qsXprC5pCzscKQe\nakocVs3rquZFRGqUm5XB3348lCXrivnT6zPDDkfqoabE4dW8rmpeRKRWw/u155LD+vHMl4v5aI7u\n8NBU1ZQ49jCzr81satTr7fODGim+uKiPQyT5/eqYQQzsnMv1L01hffG2sMOROqh2OK6Z9alpQ3f/\nJiERNQANxxVJblML13Pq/Z9x4j7duPPM/cIORwL1Ho7r7t9ET0Rujz4M6JjMSUNEkt+Qnm246gcD\n+PdXy3hj6rdhhyNxqmk47mtmtv0ZGt2AaURGUz1lZtc2UnwikqKuPHIAQ3q04aZ/TWXlRj2rvCmp\nqY+jn7tvf3LfhcA77j4SOJAkHY6rPg6RpqNFehr/OGNfNpeWc5Oe3dGk1JQ4onutfgi8DuDuG4Gk\nfBq9ruMQaVoGdM7j+mMH8e7Mlbw4Uc/uaCpqShxLzGyUmZ1KpG/jTQAza0n9nschIrLDRYf248B+\n7fnD2BkUrisOOxyJQU2J42Jgb+AC4Ax3LwqWHwQ8luC4RKSZSEsz/vrjfXF3fv3i11RUqMkq2dU0\nqmqlu1/h7ie7+9tRyz9w9782TnjxUR+HSNPUq30O/3viXoxbsIYnxi0KOxypRU3XcdT4cCV3Pykh\nETUAXcch0vS4Oxc/UcBn81bz+jWHs1un3LBDanZivY6jpkfHHgwsAcYAX6L7U4lIApkZt582hGPu\n/JhfvjCFl684mIx0PfkhGdX0r9IVuAkYDNwFHA2sdveP3P2jxghORJqXzq2zufWUwUxZUsQDH+px\ns8mqpj6Ocnd/093PJ9IhPg/40MxGNVp0ItLsnLhPd0bu25273pvLpMXrwg5HqlBjPdDMsszsNOBp\n4ErgbuCVxghMRJqvW08ZTNc22Yx6djJFxaVhhyOV1HTLkSeAz4lcw/F7dz/A3f/o7ksbLToRaZba\ntGzBfWcPY+XGrVz34te6qjzJ1FTjOBfYHbgG+NzMNgTTRjPb0DjhxUfDcUVSx7692nLT8Xvy7swV\nPPLpwrDDkSg19XGkuXteMLWOmvLcvXVjBhkr3XJEJLVccEhfjt27C7e/MYvJ6u9IGhrrJiJJy8z4\n8+n70rVNNlc9O1kPfkoSShwiktTa5LTg3qC/41cvTtEtSZKAEoeIJL2hUf0d9384L+xwmj0lDhFp\nEi44pC+n7teDv70zhw9mrQw7nGZNiUNEmgQz40+nDmHPrq25+rnJLFq9OeyQmi0lDhFpMlpmpvPQ\nufuTnmZc9lQBm0vKwg6pWUqpxKHrOERSX6/2Odx71jDmrdzE9S/p4sAwpFTi0HUcIs3DYQM7csOI\nPfjv1G956OMFYYfT7KRU4hC+U5ovAAAOQ0lEQVSR5uPSw/tz4j7d+PObs/h4zqqww2lWlDhEpEky\nM/78o33YvUseo8ZM5ps16ixvLEocItJk5WRm8NC5+2MGFz4+QVeWNxIlDhFp0vp0aMXoc/MpXLuF\ny58uoLSsIuyQUp4Sh4g0ecP7teeOHw3hiwVruelfUzXSKsFqeua4iEiTcep+PVm0upi73ptL3w45\nXPWDgWGHlLKUOEQkZVx71EAWry3mr2/PoXeHVpy0b/ewQ0pJShwikjLMjNtPH8LSdVu47sUpdG2d\nzfB+7cMOK+Woj0NEUkpWRuS2JD3bteSSJyYwe/nGsENKOUocIpJy2rXK5MmLhpPdIp3zHx3P0qIt\nYYeUUpI+cZjZnmb2oJm9ZGY/CzseEWkaerbL4YmLhrO5tIzzHx1PUXFp2CGljIQmDjN71MxWmtm0\nSsuPM7PZZjbPzG6oaR/uPtPdrwB+AuQnMl4RSS17dmvNw+fls3htMRc9PoEtpeVhh5QSEl3jeBw4\nLnqBmaUD9wEjgL2As8xsLzMbYmavVZo6B9ucBHwKvJfgeEUkxRzUvwN3nTGUyUuKuOrZSZSV6wLB\n+kpo4nD3j4G1lRYPB+a5+wJ3LwWeA05296nufmKlaWWwn1fd/RDgp9Udy8wuM7MCMytYtUo3PBOR\n74wY0o0/nDyY92at1AWCDSCM4bg9gCVR84XAgdUVNrMjgNOALOD16sq5+2hgNEB+fr4+FSKyk3MP\n6sOqDVu5+/15dM7L5rpjB4UdUpMVRuKwKpZV+0Xv7h8CHyYqGBFpPn5x9O6s2lTCvR/Mo1NeFucf\n0jfskJqkMBJHIdArar4nsKwhdmxmI4GRAwYMaIjdiUiKMTP+ePJgVm0s5Zax0+mYm8UJ+3QLO6wm\nJ4zhuBOAgWbWz8wygTOBVxtix3oCoIjUJiM9jXvP3o/9e7fjF89/xefzV4cdUpOT6OG4Y4BxwCAz\nKzSzi929DLgKeAuYCbzg7tMb6Hh65riI1Cq7RTr/PD+fPh1yuPzJicxYtiHskJqURI+qOsvdu7l7\nC3fv6e6PBMtfd/fd3X03d7+tAY+nGoeIxKRtTiZPXDSc3OwMzn9sfKM9QfC+D+YxtbDmH7fvzljB\nwN+8zqaSsmrL/P3t2UxevK6hw4tJ0l85LiKSKN3btuTJi4azrbyCM0d/wbfrE39rkr+8NZuR935a\nY5m/vzOHbeXOotXVJ7O735/Hqfd/3tDhxSSlEoeaqkQkXgO75PHMJQeyaWsZFzw6gfVbEvf42Viv\nH7Fg7OnSoi1JecFiSiUONVWJSF3s3b0ND567P/NXbeLIv37IuPlrEnKc8grf6XV1N1/cnl8uf2oi\nf3htRhXrw71ULaUSh4hIXR06oCPPX34QaQbXvzyF4tLq+xfqqiwqcdz57hwOvf19CtcV71Rmydri\nnWo9789auct+KkK+xDmlEoeaqkSkPvbv0557zx7G0nVbGPXsZD6ft5p/TS6scZsFqzZxz3tzY6oF\nVESV+XReZBjw8vVbdypz+J8/2KkmUlFFloiuuUxZUlTrcRtaSiUONVWJSH0d1L/Djvtanf3PL/nF\n81NqTArnPzaev70zhzWba79te3kVScCqupdGlKpqF9EJ6OT7PuOkez9l9aYS1sYQQ0NIqcQhItIQ\nzjmoDzefsOeO+Q9m79pctN3mksit2qtKCtutL97GL1/4ig1bv2v+2v7d/97MlVz2ZEG126bVklgA\nvi5cT/6t7zLsj+/UXrgB6JnjIiJVuOTw/uzRtTXnPPIlv/3PdA7ZrSPZLdJ3Kbc9YZSWVTDz2w10\nysuiY27WTmXu/2ger0xaSpfW2TuWbU8z9384PzLvzrSlu16IaJWqJDO/3cCIuz6pz1urt5SqcaiP\nQ0Qa0mEDO3LPWftRuG4LJ9z9Cc+NXwzA3BUbueXV6ZSVV+zogygtr2DEXZ9w3J27fqmXl0fKRDcx\nUan565FPF1Z7fceNr0xlzorIs9Nfnlhzn0tjSKkah7uPBcbm5+dfGnYsIpIaRu7bnfQ04+fPTOKG\nV6ayrngb789awYRF6zjjgF6UBwlgW3C9xepNJbvsY3srVnSuqNyydet/Z1Z5/KVFWxgzfjFfLljD\n+9cdQXosbVcJllI1DhGRRDh+SDcm3nwUedkZ3PHmLCYsitzq490ZK3YMsZ29fOOO8lu3lbO++Lsh\ntdtrGtGd7NHlY7H9OBVJ8BAqJQ4RkRh0yM2i4OajGNQlb8ey8YvWUloWqWlc89xXO5Zf+mQB+/7h\n7R3z27/so2sZpTFeER7dxeHuLN+wa42msSlxiIjEKCsjnacuHs6VR+4GwCdzq74l+/bl81ZuAr5r\nonrk04V1PrYZvDSxkLFTGuTxRfWSUolDneMikmidW2fz62P34OD+HWote9TfP2LiN+vYsq28zsfb\nnnTSzPi/N2bVeT8NKaUShy4AFJHG8tTFwxnYObfWcsuKttApLzI8t1XmrsN5Y1Ve4Y12gV9tUmpU\nlYhIY8lIT+PtX3yPSYvXcfoD46otN2rM5B2vc7Mz2Fxat9rH4rXFtRdqJClV4xARaUxmxv592jPx\n5qNiKp/IW7Y3JiUOEZF66pCbxdRbjuGNaw6vse9j67bke7ZGXShxiIg0gLzsFuzZrTW3nTo47FAS\nLqUSh0ZViUjY+nfKZf6fjuey7/VnaK+2YYeTEBb2k6QSIT8/3wsKqr/bpIhIYykrr+CPr83giXHf\nNMrxFt1+Qp23NbOJ7p5fW7mUqnGIiCSbjPQ0fn/yYN689vCwQ2kwShwiIo1gj66t2bNb67DDaBBK\nHCIijeT5yw/ijWuafs1DiUNEpJG0DkZe3X3WfmGHUi9KHCIijeykfbuz6PYT+OT6I4H63YokDLrl\niIhISHq1z9kxCmpZ0RamLl3PNc9NrteFghUVTlqCH/akGoeISBLo3rYlx+7dlVl/HMHl3+tf5/3M\nW7WpAaOqWkrVOMxsJDBywIABYYciIlJnNx6/J1f+YABrNpWyaM1mMtPTGNgll6fGfcM5B/XhwD+9\nV+22GY3waNmUShx65riIpIrW2S1ond2Cfh1b7Vj2q2MG1bpdi/TENySpqUpEJIU0RuJIqRqHiEhz\nMPvW4xi/cC2/HzuDMw/oxYn7dGfUmEms2lhCx9zMhB9f96oSERFA96oSEZEEUeIQEZG4KHGIiEhc\nlDhERCQuShwiIhIXJQ4REYmLEoeIiMRFiUNEROKSkhcAmtkqoAhYX2lVmxiWdQRWJy66XVQVUyK3\nr618XdfHcm6rWtbcz3dtZeI531Ut1/mOr0xDn29o3HNe3/Pdx9071VrK3VNyAkbXZRlQEHacidy+\ntvJ1Xa/zXffyNZWJ53xXc351vkM83419zut7vmOdUrmpamw9ljWm+h4/3u1rK1/X9TrfdS9fU5l4\nzndVy3W+4yvT3M93TFKyqao+zKzAY7hXizQMne/GpfPd+FLxnKdyjaOuRocdQDOj8924dL4bX8qd\nc9U4REQkLqpxiIhIXJQ4REQkLkocIiISFyWOWphZKzN7wsweNrOfhh1PqjOz/mb2iJm9FHYszYGZ\nnRJ8tv9jZseEHU+qM7M9zexBM3vJzH4Wdjx11SwTh5k9amYrzWxapeXHmdlsM5tnZjcEi08DXnL3\nS4GTGj3YFBDP+Xb3Be5+cTiRpoY4z/e/g8/2BcAZIYTb5MV5vme6+xXAT4AmO0S3WSYO4HHguOgF\nZpYO3AeMAPYCzjKzvYCewJKgWHkjxphKHif28y319zjxn++bg/USv8eJ43yb2UnAp8B7jRtmw2mW\nicPdPwbWVlo8HJgX/OItBZ4DTgYKiSQPaKbnq77iPN9ST/Gcb4u4A3jD3Sc1dqypIN7Pt7u/6u6H\nAE226VtfhN/pwXc1C4gkjB7AK8DpZvYA4d9OIJVUeb7NrIOZPQjsZ2Y3hhNaSqru8z0KOAr4kZld\nEUZgKaq6z/cRZna3mT0EvB5OaPWXEXYAScSqWObuvhm4sLGDaQaqO99rAH2BNbzqzvfdwN2NHUwz\nUN35/hD4sHFDaXiqcXynEOgVNd8TWBZSLM2Bznfj0vluXCl9vpU4vjMBGGhm/cwsEzgTeDXkmFKZ\nznfj0vluXCl9vptl4jCzMcA4YJCZFZrZxe5eBlwFvAXMBF5w9+lhxpkqdL4bl85342qO51s3ORQR\nkbg0yxqHiIjUnRKHiIjERYlDRETiosQhIiJxUeIQEZG4KHGIiEhclDhEamBmm4K/fc3s7Abe902V\n5j9vyP2LJIoSh0hs+gJxJY7g1to12SlxBHdMFUl6ShwisbkdONzMvjKzX5hZupn9xcwmmNnXZnY5\nQHD30w/M7FlgarDs32Y20cymm9llwbLbgZbB/p4Jlm2v3Viw72lmNtXMzoja94fB0+NmmdkzZlbV\nzfREEkp3xxWJzQ3Ade5+IkCQANa7+wFmlgV8ZmZvB2WHA4PdfWEwf5G7rzWzlsAEM3vZ3W8ws6vc\nfWgVxzoNGArsC3QMtvk4WLcfsDeRG+Z9BhxK5KFAIo1GNQ6RujkGOM/MvgK+BDoAA4N146OSBsDV\nZjYF+ILIHVMHUrPDgDHuXu7uK4CPgAOi9l3o7hXAV0Sa0EQalWocInVjwCh3f2unhWZHAJsrzR8F\nHOzuxWb2IZAdw76rUxL1uhz9H5YQqMYhEpuNQF7U/FvAz8ysBYCZ7W5mrarYrg2wLkgaewAHRa3b\ntn37Sj4Gzgj6UToB3wPGN8i7EGkA+rUiEpuvgbKgyelx4C4izUSTgg7qVcApVWz3JnCFmX0NzCbS\nXLXdaOBrM5vk7tHPn/4XcDAwBXDgendfHiQekdDptuoiIhIXNVWJiEhclDhERCQuShwiIhIXJQ4R\nEYmLEoeIiMRFiUNEROKixCEiInFR4hARkbj8P08s3jabrHatAAAAAElFTkSuQmCC\n", - "text/plain": [ - "" + "cell_type": "markdown", + "metadata": { + "id": "VIBpNWFzb6fX", + "colab_type": "text" + }, + "source": [ + "**Initialize the variable and compute the loss**" ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "plt.loglog(loss_hist)\n", - "plt.xlabel('Iteration')\n", - "plt.ylabel('MSE Loss value')\n", - "plt.title('smarter SGD completion')\n", - "plt.legend()" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [ + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[-0.10904536 -1.38673067 -0.53866088 ..., -0.0059163 0.22185898\n", - " 0.74981755]\n" - ] - } - ], - "source": [ - "print(sess.run(t3f.gather_nd(estimated, observation_idx)))" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [ + "cell_type": "code", + "metadata": { + "id": "C4h03v5Kb6fY", + "colab_type": "code", + "colab": {} + }, + "source": [ + "observed_total = tf.reduce_sum(sparsity_mask)\n", + "total = np.prod(shape)\n", + "initialization = t3f.random_tensor(shape, tt_rank=5)\n", + "estimated = t3f.get_variable('estimated', initializer=initialization)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "8QWYA3a7b6fb", + "colab_type": "text" + }, + "source": [ + "SGD optimization\n", + "-------------------------\n", + "The simplest way to solve the optimization problem is Stochastic Gradient Descent: let TensorFlow differentiate the loss w.r.t. the factors (cores) of the TensorTrain decomposition of the estimated tensor and minimize the loss with your favourite SGD variation." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "6JyoT6uVb6fc", + "colab_type": "code", + "colab": {} + }, + "source": [ + "optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)\n", + "\n", + "def step():\n", + " with tf.GradientTape() as tape:\n", + " # Loss is MSE between the estimated and ground-truth tensor as computed in the observed cells.\n", + " loss = 1.0 / observed_total * tf.reduce_sum((sparsity_mask * t3f.full(estimated) - sparse_observation)**2)\n", + "\n", + " gradients = tape.gradient(loss, estimated.tt_cores)\n", + " optimizer.apply_gradients(zip(gradients, estimated.tt_cores))\n", + "\n", + " # Test loss is MSE between the estimated tensor and full (and not noisy) ground-truth tensor A.\n", + " test_loss = 1.0 / total * tf.reduce_sum((t3f.full(estimated) - ground_truth)**2)\n", + " return loss, test_loss" + ], + "execution_count": 0, + "outputs": [] + }, { - "name": "stdout", - "output_type": "stream", - "text": [ - "[ -1.27225139e-01 -1.37794858e+00 -5.42469328e-01 ..., -1.30643336e-03\n", - " 2.35629296e-01 7.53320726e-01]\n" - ] + "cell_type": "code", + "metadata": { + "id": "e6VWyInAb6ff", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 187 + }, + "outputId": "9ff1647e-2e1e-46be-d0c0-fcc70e25f632" + }, + "source": [ + "train_loss_hist = []\n", + "test_loss_hist = []\n", + "for i in range(5000):\n", + " tr_loss_v, test_loss_v = step()\n", + " tr_loss_v, test_loss_v = tr_loss_v.numpy(), test_loss_v.numpy()\n", + " train_loss_hist.append(tr_loss_v)\n", + " test_loss_hist.append(test_loss_v)\n", + " if i % 1000 == 0:\n", + " print(i, tr_loss_v, test_loss_v)" + ], + "execution_count": 7, + "outputs": [ + { + "output_type": "stream", + "text": [ + "0 1.768507 1.6856995\n", + "1000 0.0011041266 0.001477238\n", + "2000 9.759675e-05 3.4615714e-05\n", + "3000 8.749525e-05 2.0825255e-05\n", + "4000 9.1277245e-05 2.188003e-05\n", + "5000 9.666496e-05 3.5304438e-05\n", + "6000 8.7534434e-05 2.1069698e-05\n", + "7000 8.753277e-05 2.1103975e-05\n", + "8000 9.058935e-05 2.6075113e-05\n", + "9000 8.8796776e-05 2.2456348e-05\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "0lPHsSveb6fj", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 316 + }, + "outputId": "60f23a2d-3961-477a-d7bf-6451f8a523af" + }, + "source": [ + "plt.loglog(train_loss_hist, label='train')\n", + "plt.loglog(test_loss_hist, label='test')\n", + "plt.xlabel('Iteration')\n", + "plt.ylabel('MSE Loss value')\n", + "plt.title('SGD completion')\n", + "plt.legend()\n" + ], + "execution_count": 8, + "outputs": [ + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 8 + }, + { + "output_type": "display_data", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYoAAAEaCAYAAAAPGBBTAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjMsIGh0\ndHA6Ly9tYXRwbG90bGliLm9yZy+AADFEAAAgAElEQVR4nO3dd5xU1fn48c8zs7M7W1lYisBSlipN\n6UUBsYOIGjUoWGJiRJOY5JvYE01M8s03JvnFWGKJRkKiBiXYaBE0gqigUkR6L7LUZWHZ3p/fH3fA\nBXZnZ8vszM4879drXjP33HvPfeaK8+w5595zRVUxxhhjauIKdQDGGGPCmyUKY4wxflmiMMYY45cl\nCmOMMX5ZojDGGOOXJQpjjDF+WaIwphkRkd0ickk9971JRBY1dkwm8lmiMM2OiIwWkWUiclxEjorI\nJyIyrMr69iLyoojsF5F8EdkpIjNE5Gzf+q4ior51+SJySETmiciloftWjavKd4w5Uaaqr6rqZaGM\nyzRPlihMsyIiKcA84GmgFdAR+BVQ4lufBiwDEoAxQDIwGPgQOD0RpKpqEnAu8B7wlojcFvxvYUzz\nYonCNDe9AFR1pqpWqGqRqi5S1bW+9T8BcoFbVHWHOnJU9e+q+nR1FarqQVV9EngU+L2IVPv/hYj0\nE5H3fK2YQyLyM195nIg84WvB7Pd9jvOtGycimSJyv4gcFpEDInKNiFwhIlt9df2syjEeFZHZIvK6\niOSJyGoRObeGeFwi8qCI7BCRbBGZJSKtfKuX+t5zfK2mUSJym4h8XGX/80Rkha9ltkJEzquybomI\n/MbXWssTkUUi0rq2/zgmMlmiMM3NVqBCRP4hIhNEpOVp6y8B3lLVynrU/SbQFuh9+goRSQbeB94F\nOgA9gP/6Vv8cGAkMxGmdDAcerrL7WYAXp/XzC+BF4GZgCE6r5xERyaiy/dXAv3FaTP8C3hYRTzXx\n/hC4BrjAF9Mx4BnfurG+91RVTVLV5ad9n1bAfOApIA14HJjva5GdMBX4tu+cxAL3VhODiQKWKEyz\noqq5wGhAcX5ws0Rkjoi0823SGjh4YnsRuUpEck78VVxL9ft9762qWXclcFBV/6Sqxaqap6qf+dbd\nBPxaVQ+rahZOV9gtVfYtA36rqmXAa74Yn/TVsQHYiJNgTlilqrN92z+Ok2RGVhPTXcDPVTVTVUtw\nWkTXVx2X8GMisE1VX1bVclWdCWwGJlXZ5u+qulVVi4BZOInQRCFLFKbZUdVNqnqbqqYD/XH+mn7C\ntzobaF9l2zmqmorTJRVbS9Udfe9Hq1nXCdhRw34dgD1Vlvf4yk7IVtUK3+ci3/uhKuuLgKQqy3tP\nfPC1jDJPq++ELjjjKjkikgNsAiqAdtVsW1vMJ+LuWGX5YJXPhafFaKKIJQrTrKnqZmAGTsIApzvo\nmprGGWrxDeAwsKWadXuBbjXstx/nR/uEznzdOqmPTic++L5Heg317QUmqGpqlZdXVffhtLj8OT3m\nE3Hva0DcJkJZojDNioicLSL3iEi6b7kTMAX41LfJ40BL4GUR6S6OZPx0m4hIOxG5G/gl8FAN4xvz\ngPYi8j++wetkERnhWzcTeFhE2vgGfH8BvNKArzlERK71dSH9D84VXZ9Ws93zwG9FpIvve7QRkat9\n67KASmpObguAXiIyVURiROQGoK/vexpzCksUprnJA0YAn4lIAc4P6HrgHgBVPYLTn18MfOzbfg3O\nZbLfO62uHF8d64ArgG+q6vTqDqqqeTiX107C6ZLZBlzoW/2/wEpgra+u1b6y+noHuAFncPoW4Frf\neMXpngTmAItEJA/nXIzwxVsI/Bb4xNc1dcoYh6pm44y73IPTXXc/cKXv/BlzCrEHFxkTPkTkUaCH\nqt4c6liMOcFaFMYYY/yyRGGMMcYv63oyxhjjl7UojDHG+GWJwhhjjF+B3OrfbIjIJGBScnLyHb16\n9Qp1OMYY06ysWrXqiKq2Ob08Iscohg4dqitXrgx1GMYY06yIyCpVHXp6uXU9GWOM8csShTHGGL8s\nURhjjPErogazjTGmvsrKysjMzKS4uDjUoQSd1+slPT0dj6e652GdyRKFMcYAmZmZJCcn07VrV0Qk\n1OEEjaqSnZ1NZmYmGRkZte9AhHU9icgkEXnh+PHjoQ7FGNPMFBcXk5aWFtFJAkBESEtLq1PLKaIS\nharOVdVpLVq0CHUoxphmKNKTxAl1/Z4RlSiMMaY5y8nJ4dlnn63zfldccQU5OTlBiMgRkWMU+7Oy\nefTZGfXaV1xu3N5kYrxJxCYkERefTEJ8PEleD8leD8neGJLiYkjyxpDsjSE5zoPX44qav0SMMcFz\nIlF8//vfP6W8vLycmJiaf64XLFgQ1LgiMlF0KPuKRw//uNHqK1cXhXgpJI5CjaOIOLLxkqlxFBJH\nMV7K3PGUx8RT6U5AYxNQTyISm4grLpEYbyIx3mTiElKIb9GKpJTWpKS2pFWSl5YJscTGWMPOGAMP\nPvggO3bsYODAgXg8HrxeLy1btmTz5s1s3bqVa665hr1791JcXMyPf/xjpk2bBkDXrl1ZuXIl+fn5\nTJgwgdGjR7Ns2TI6duzIO++8Q3x8fIPiishEQVp3uOmZ+u1bWQ6lBc6rrJCKknzKCvOQ4nziiguI\nKcknubQASguRsgJcZdm4K4qIqSjCU1FEbHmJ84Tj2g6jQh7xHNJE8iWRYnciJTHJlHuSqYxrAd4W\nuOJT8SSmEpvUivjkViSktKJFahrJqa1xxaeAy12/72iM8etXczewcX9uo9bZt0MKv5zUz+82jz32\nGOvXr2fNmjUsWbKEiRMnsn79+pNXJ02fPp1WrVpRVFTEsGHDuO6660hLSzuljm3btjFz5kxefPFF\nJk+ezBtvvMHNNzfsgYmRmSjiUqDnpY1SlRuoUy6urICyQigthDJfwiktpLQol6L84xTlHaM4/yhl\n+TlUFOZAcQ6uklxSSnPxlB/GW7SDhIICkiis9VD5JFDkSqI4JokyTzKVsSloXAoSn0pMQiqexJZO\ngmnRirjElki8k4DwpjrnyB2Z//mNiRTDhw8/5RLWp556irfeeguAvXv3sm3btjMSRUZGBgMHDgRg\nyJAh7N69u8Fx2C9FY3O5IS7ZeVUR63sFfD1WZQVFeTnk5mSRl5NNQe5RivOOUlpwjPKCHLQoB0qO\n4yrJI7Ysl9jCfBLyviKZQlKkgGSKcIn/CR9LXPGUxqRQEZt0shXjjk/FnegkGU9CKq74FhCbBLGJ\n4ElwXrEn3quUuaz7zESO2v7ybyqJiYknPy9ZsoT333+f5cuXk5CQwLhx46q9xDUuLu7kZ7fbTVFR\nUYPjsEQRrlxu4lukEd8ijXZdAttFVcktLudYQSk7Coo5npNDQW42RbnZlOQfoyz/GJVFOWhxLq7S\nXNyluXgL80kpKiCFQlJkF8kUkSIFeCnEJZUBh1vu8lIRE09lTAKVMfGoJx5xxyIxcUhMLC6PF7cn\nFldMHK6YOIiJBXeVV0wcuD0gbifZnnx3fb1c9bPLdeo2VT87ZwNO5kmFk7Mkn/bZOXFffxFxgQgg\nzufYBIhvBcntIL5lwOfDmPpITk4mLy+v2nXHjx+nZcuWJCQksHnzZj799NMmiyvsE4WIJALPAqXA\nElV9NcQhhS0RoUW8hxbxHrq2ToQuaUB3v/uUVVSSU1jGscJSsvNL2VtYSl5xGXlFZRQV5lGWf4yy\nolzKi/MpL3bGbNQ3PiNlhcRpMQlSQjwlJJSWnPwcTwkeiomVfGIpJ5ZyPJQTSxmxUk4sFcRKGR5f\nuZvAk1LIJHeAswY43Zp9JkHyWaGOyESYtLQ0zj//fPr37098fDzt2rU7uW78+PE8//zz9OnTh969\nezNy5Mgmiyskz6MQkenAlcBhVe1fpXw88CTO0MDfVPUxEbkFyFHVuSLyuqreUFv99jyKpqGqlJRX\nUlxWQVFZBUWlFRSXVVJUVuGUlVZQXH6i/Ot1RWUVlJRVUlrhvJeUV1JWVkZlWTFl5eWUn3yVOe8V\n5VSUV1BeUUF5eRlaWYGLStxU4kJPfnaWK1G+vlT5xGdFTjYwPDFuUrweUhKcS55T4j20SoyjXXIc\n7VJiaZcUS7uUOOLc4owxFWZD7n44tB4yV8LRHeCKgf7Xw7gHoVVg0yCY8LZp0yb69OkT6jCaTHXf\nt6bnUYSqRTED+AvwzxMFIuIGngEuBTKBFSIyB0gH1vk2q2jaMI0/IoLX48brcZPahMetrFRKKyqd\nJFMl2ZSWV1JSXlHls7N88nNZBYVlFeQUlpGdX8qxwlJ2F5Ry9HApB3OLKS2vxLlkrQSX5NG9TRJ9\nO6RybnpXxvYaT/fzEp37ZbK2wMq/w+p/wIa3YPRPYMw9TneaMREoJIlCVZeKSNfTiocD21V1J4CI\nvAZcjZM00oE1+LmTXESmAdMAOnfu3PhBm7Dhcglel5OgILDZL2tTWakcKSgh81gRe48WsiOrgI37\nc1mx6yjvrNkPQOdWCXxzSDo3DO9C2wmPwfk/gkUPw4ePwbaFcN1LzqXZxkSYcBqj6AjsrbKcCYwA\nngL+IiITgbk17ayqLwAvgNP1FMQ4TQRyuYS2yV7aJnsZ3PnUQeu9Rwv5cGsW764/yJ/e28ozS7Zz\nx5hufH9cD+Kvnw79vgHv/AD+egFc9yL0nhCib2FMcIRToqiWqhYA3w5kWxGZBEzq0aNHcIMyUaVT\nqwRuHtmFm0d2YWdWPn9+fxtPf7Cd/6w/yFM3DqJvn0nQfiDMugVemwpXPgFDvhXqsI1pNOF08fs+\noFOV5XRfWcBs9lgTbN3aJPH0lEG8cvsIcovKuO65ZSzechhSO8G35kH3i2Huj+DzF0MdqjGNJpwS\nxQqgp4hkiEgscCMwpy4V2PMoTFMZ3bM18340mm5tEvnuP1by5upMiEuCKTOh90RYcC+snB7qMI1p\nFCFJFCIyE1gO9BaRTBG5XVXLgbuBhcAmYJaqbqhLvdaiME2pbbKX16aNZERGK+7595fMWrnXuWnw\nmzOg13iY9xNY/c9a6zHmhPpOMw7wxBNPUFhY+9Q/9RGSRKGqU1S1vap6VDVdVV/ylS9Q1V6q2l1V\nfxuK2Iypi2Svh+m3DWN0j9Y88MZaXl/xlXOZ7OR/Qo9LYM6PYM2/Qh2maSbCNVGE/WB2XdhgtgkF\nr8fNi7cOZdrLq3jgjXVUKkwZ3hlueBVm3ghvf9+ZYuTcWu8VNVGu6jTjl156KW3btmXWrFmUlJTw\njW98g1/96lcUFBQwefJkMjMzqaio4JFHHuHQoUPs37+fCy+8kNatW7N48eJGjSuiEoWqzgXmDh06\n9I5Qx2Kii9fj5oVbhnDXK6t46M11qMLUEZ3hxn/BzBvg7bucuagGXB/qUE0g/vMgHFxX+3Z1cdYA\nmPCY302qTjO+aNEiZs+ezeeff46qctVVV7F06VKysrLo0KED8+fPB5w5oFq0aMHjjz/O4sWLad26\ndePGTXgNZjeYDWabUPJ63Pz1liFcdHZbfvbWOl7+dI8zqeCU16DzKHhzmnMntzEBWLRoEYsWLWLQ\noEEMHjyYzZs3s23bNgYMGMB7773HAw88wEcffURTjMlai8KYRhQX4+a5mwfz/VdW88jb61FVbh3V\nFabOgleug9m3Q0UZnDM51KEaf2r5y78pqCoPPfQQd9555xnrVq9ezYIFC3j44Ye5+OKL+cUvfhHU\nWCKqRWFMOIiLcfPszYO5pE87fvHOBmZ8ssu5dPbm2b6WxR3w0Z9Ond7cGE6dZvzyyy9n+vTp5Ofn\nA7Bv3z4OHz7M/v37SUhI4Oabb+a+++5j9erVZ+zb2CKqRWGD2SZcxMW4efamwdz9r9U8OncjMW4X\nN4/sAre86Uz38d9fw9FdMPFxm0zQnFR1mvEJEyYwdepURo0aBUBSUhKvvPIK27dv57777sPlcuHx\neHjuuecAmDZtGuPHj6dDhw6NPpgdkmnGg82mGTfhorS8krteWcUHmw/zh+vPYfLQTlBZCUv+D5b+\n0WlhTH4ZktqEOtSoZ9OM1zzNuHU9GRNEsTEunr1pMGN6OvdZvLNmn/N0vosehuunw/4v4MUL4cDa\nUIdqTI0sURgTZM6ls0MZ3rUVP3l9jXNTHkD/6+A770JlBUy/HDa+E9pAjalBRCUKuzzWhKv4WLdz\nB3fPNjzwxjqeWbwdVYUOg2DaEmjXD2bdCksec7qmjAkjEZUobK4nE84S42L4261DuXpgB/64cAu/\nnreRykqF5HbOzLPnToElv4O3pkFFeajDjUqROGZbnbp+z4i66smYcBcb4+LPkweSlhjH9E92cayg\nlD9+81w8Hi9c8xyk9YAPfgNaCde+6NzNbZqE1+slOzubtLQ055G3EUpVyc7Oxuv1BryPJQpjmpjL\nJTxyZR/SkmL548ItHC8q49mbhhAf64ax9zrJ4f1HoUU6XPrrUIcbNdLT08nMzCQrKyvUoQSd1+sl\nPT094O0tURgTAiLCDy7sQWqCh4ffXs/NL33G9NuG0SLeA6N/Ajl74ZMnnSfn9b821OFGBY/HQ0ZG\nRqjDCEsRNUZhg9mmublpRBeemTqYtZk5fGfGCgpLfWMT4x+D9GHOMy3yDoU2SBP1IipR2GC2aY6u\nGNCeJ28cxBdfHePOl1dRWl7p3K19zXNQVuQ8Lc+YEIqoRGFMc3XFgPY8du05fLTtCP87f6NT2Lon\nXHA/bJoDu5aGNkAT1SxRGBMmJg/rxB1jMvjn8j3OM7gBRt0NKenw3i9sEkETMpYojAkjD4w/mxEZ\nrfjZW+vYfaQAPF648GfOVB+b54U6PBOlLFEYE0Zi3C6evHEQHreL+99Y69yQd+6NkNoFlj0d6vBM\nlLJEYUyYOauFl0eu7Mvnu446T8lzuWHk92HvZ7D381CHZ6JQRCUKuzzWRIpvDklnbK82/OHdzRzO\nLYZBN4O3BXz+QqhDM1EoohKFXR5rIoWI8Kur+lFaUckfF25xnpDX/3rYNA+Kc0MdnokyEZUojIkk\nGa0T+fb5Gcxencm6zOPOpIHlRTYduWlyliiMCWN3X9SD1HgPj7+3BdKHOpMGfvlaqMMyUcYShTFh\nLMXr4btjurF4SxZr9x2Hft+Ar5ZBQXaoQzNRxBKFMWHu1lFdSPHG8Mzi7dD7CmcK8m2LQh2WiSKW\nKIwJc8leDzeP7MJ7Gw+xP6E3JLeHLQtCHZaJIpYojGkGpgzvjAIzV2RCr/Gw/b9QVhzqsEyUsERh\nTDPQqVUCF/Zuy2sr9lLR83IoK4A9H4c6LBMlwj5RiEg3EXlJRGaHOhZjQmny0HSy8kr4TPtDjBe2\n2jiFaRpBTRQiMl1EDovI+tPKx4vIFhHZLiIP+qtDVXeq6u3BjNOY5mBc77YkxrqZs/EYZIyFbQtt\nRlnTJILdopgBjK9aICJu4BlgAtAXmCIifUVkgIjMO+3VNsjxGdNseD1uLunbjnc3HKSix6VwbDcc\n2RbqsEwUCGqiUNWlwNHTiocD230thVLgNeBqVV2nqlee9joc6LFEZJqIrBSRldHwcHQTnSYOaE9O\nYRmrY0c4BdsWhjYgExVCMUbREdhbZTnTV1YtEUkTkeeBQSLyUE3bqeoLqjpUVYe2adOm8aI1Joyc\n36M1sW4X7x2IhTZ9YKslChN8YT+YrarZqnqXqnZX1d/529ZmjzWRLjEuhmEZLflwSxb0ugy+Wg7F\n9u/dBFcoEsU+oFOV5XRfWYPZ7LEmGozt2YYth/LI7nAhVJbDjsWhDslEuFAkihVATxHJEJFY4EZg\nTmNUbC0KEw0u6O10rX6Q39V5RoVN52GCLNiXx84ElgO9RSRTRG5X1XLgbmAhsAmYpaobGuN41qIw\n0aB3u2TapcSxZMcx6H6xkygqK0MdlolgMcGsXFWn1FC+ALDJaoypBxHh/B6tWbIli8pJl+Ha8Cbs\n/wLSh4Q6NBOhwn4wuy6s68lEi9E9WnO0oJQtyeeBKwY2vhXqkEwEi6hEYV1PJlqM7tEagKWZ5U73\n0/q3rPvJBE1EJQprUZho0TbFS692SXy8/QgMuB5yMyHz81CHZSJURCUKa1GYaHJ+j9Z8vusoxd0u\ncyYJXP9GqEMyESqiEoUx0WRMz9aUlFey+mA59LocNrwFFeWhDstEoIAShYjEi0jvYAfTUNb1ZKLJ\n8Iw0YlzCR9uPQP/roCDLnlFhgqLWRCEik4A1wLu+5YEi0ig3yDU263oy0SQpLoZBnVP5ZPsR6HkZ\nxCZZ95MJikBaFI/izPiaA6Cqa4CMIMZkjAnQ6B5tWLfvODllbjh7ImycA+WloQ7LRJhAEkWZqp7e\nl2NPSzEmDIzumYYqLNuR7XQ/FefATpv7yTSuQBLFBhGZCrhFpKeIPA0sC3Jc9WJjFCbanJOeSlJc\njHOZbLcLwZtq3U+m0QWSKH4I9ANKgJlALvA/wQyqvmyMwkQbj9vFyG5pzjhFTCz0mQSbF0BZUahD\nMxGk1kShqoWq+nNVHeZ7MNDPVbW4KYIzxtRudI809mQXsvdoodP9VJoH294LdVgmgtQ6KaCILKaa\nMQlVvSgoERlj6mRML2fa8f9uOsRtI8dAYhun+6nvVSGOzESKQGaPvbfKZy9wHWB39RgTJrq3SaJX\nuyQWrDvIbednQN+r4YtXoSQf4pJCHZ6JAIF0Pa2q8vpEVX8KjAt+aHVng9kmWl0xoD0r9hzlcG6x\n0/1UXgRb3w11WCZCBHLDXasqr9YicjkQlqPFNphtotXEAe1RhXc3HIROIyG5g139ZBpNIFc9rQJW\n+t6XA/cAtwczKGNM3fRsl0yPtknMX3sAXC7o9w1nQLsoJ9ShmQgQSNdThqp28733VNXLVNUmlDEm\nzEwc0J7Pdx/l0Inup8oy2Dw/1GGZCFDjYLaIXOtvR1V9s/HDMcbU11UDO/Dkf7cx98v9fHf0YEjt\nAutnw6CbQh2aaeb8XfU0yc86BSxRGBNGurdJYkDHFsz5cj/fHdPN6X5a/hcoOgbxLUMdnmnGakwU\nqvrtpgzEGNNwVw/swP/O38TOrHy69ZkEnzwBWxfBuTeEOjTTjAX6PIqJInK/iPzixCvYgdWHXR5r\not2V53RABOZ8uR86DIaks2Dz3FCHZZq5QC6PfR64AWfOJwG+CXQJclz1YpfHmmh3VgsvIzPSmLNm\nPyriTD2+/b8295NpkEBaFOep6q3AMVX9FTAK6BXcsIwx9XX1wA7sPFLA+n250OdKKCuEHTb1uKm/\nQBLFiT9FCkWkA1AGtA9eSMaYhpjQvz0et/DOmn3QZTTEtYDN80IdlmnGAkkU80QkFfgjsBrYDfwr\nmEEZY+qvRYKHcb3bMnftfipcHuh1OWz5D1TYFG2mfgK54e43qpqjqm/gjE2craphOZhtjHFcPbAD\nh3JL+GxXttP9VHQUvgrL542ZZiCQwey1IvIzEemuqiXVPBbVGBNmLj67HYmxbt75Yj90vxjccc4D\njYyph0C6nibhTCs+S0RWiMi9ItI5yHEZYxogPtbN5f3OYsH6A5S446HbONgyH9Qed2/qLpCupz2q\n+gdVHQJMBc4BdgU9MmNMg1w1sAN5xeUs2ZIFZ18BOV/BoQ2hDss0Q4HecNdFRO4HXgPOBu4PalRn\nHv8aEXlRRF4Xkcua8tjGNFeje7QmLTGWOWv2Q68JgMAW634ydRfIGMVnwFu+bb+pqsNV9U+BHkBE\npovIYRFZf1r5eBHZIiLbReRBf3Wo6tuqegdwF87Nf8aYWsS4XVx5Tnve33SIPE8rSB9ml8maegmk\nRXGrqg5W1cdUdWc9jjEDGF+1QETcwDPABKAvMEVE+orIABGZd9qrbZVdH/btZ4wJwFUDO1JSXsmi\nDYec7qcDX8LxzFCHZZqZQMYotjTkAKq6FDh6WvFwYLuq7lTVUpwuratVdZ2qXnna67A4fg/8R1VX\nV3ccEZkmIitFZGVWVlZDQjYmYgzunEp6y3je+XI/9J7oFG75T2iDMs1OQGMUQdAR2FtlOdNXVpMf\nApcA14vIXdVtoKovqOpQVR3apk2bxovUmGZMRJh0bgc+2X6EYwldIa2nPczI1FmoEkWdqOpTqjpE\nVe9S1edr2s5mjzXmTBMHtKeiUlm44aDT/bT7I3tEqqmTQAazvykiyb7PD4vImyIyuIHH3Qd0qrKc\n7itrEJs91pgz9euQQte0BOavO+B0P1WWw/b3Qx2WaUYCaVE8oqp5IjIap/vnJeC5Bh53BdBTRDJE\nJBa4EZjTwDqtRWFMNUSEiee0Z9mObLJTB0BiW+t+MnUSSKKo8L1PBF5Q1flAbKAHEJGZwHKgt4hk\nisjtqloO3A0sBDYBs1S1wXcCWYvCmOpNHNDB6X7adAR6j3daFOWloQ7LNBOBJIp9IvJXnPsXFohI\nXID7AaCqU1S1vap6VDVdVV/ylS9Q1V6q2l1Vf1u/8I0xgejTPplurROZv8539VNJrjNWYUwAAvnB\nn4zzl//lqpoDtALuC2pU9WRdT8ZU70T30/Id2RxpOxI8ibDxnVCHZZqJQBJFe2C+qm4TkXE4j0L9\nPKhR1ZN1PRlTs4nntKdS4d0tx6HPJNjwlj0i1QQkkETxBlAhIj2AF3CuVgrLBxdZi8KYmvVul0z3\nNonMX3sABk51up9sUNsEIJBEUekbfL4WeFpV7yNMH4VqLQpjauZ0P3Xgs13ZZLUeDi06wZqw/JvP\nhJlAEkWZiEwBbgVOzCjmCV5IxphgmTjA1/208TCceyPsXAzHdoc6LBPmAkkU3wZGAb9V1V0ikgG8\nHNywjDHB0KtdEj3aJjF/7X4Y8m0QF3z211CHZcJcIJMCbgTuBdaJSH8gU1V/H/TI6sHGKIzxT0SY\nOKA9n+06ygFaQb9rYfU/bUoP41cgU3iMA7bhTO/9LLBVRMYGOa56sTEKY2p33eB0AF77fC+cdzeU\n5sOnDZ1swUSyQLqe/gRcpqoXqOpY4HLgz8ENyxgTLJ3TEhjbsw2vr9hLedsB0PdqWPYU5B0MdWgm\nTAWSKDxVn0mhqlsJ08Fs63oyJjA3jejMwdxiPth8GC7+JVSUwXu/DHVYJkwFkihWisjfRGSc7/Ui\nsDLYgdWHdT0ZE5iLzm7LWSleZizbDWndYfRPYO1rsGluqEMzYSiQRPE9YCPwI99rI86zq40xzVSM\n28V3Rndl2Y5sVn91DC64H4lKkZwAABlYSURBVNqfC+/8AA5vDnV4JswEctVTiao+rqrX+l5/xi6P\nNabZu2lEF1omePjLB9vB7YHJ/wR3HLx6PWTvCHV4JozU9wl3oxo1CmNMk0uMi+G7Y7rxwebDLN+R\nDS27wk3/htICeOlS2Low1CGaMNEsHoUaKBvMNqZubh+dQXrLeB6ds4GyikroMBC++z4ktYN/TYaZ\nU2HvClANdagmhERr+Afg53GnAsxT1bCc7wlg6NChunJlWI63GxN2Fm44yJ0vr+JHF/fkp5f2cgrL\nS+CTp2D501B8HFK7QI9LnETSth+kdoLENiAS2uBNoxKRVao69IxyP4lisb8KVfXCRoqt0VmiMKZu\n7pn1JW9+kck/vzOcMT3bfL2iJA/Wvwmb58FXnzozzp7gjoOUDs4ruT2ktIfkDpB81tdlye0hJuAH\nYpoQq3OiaM4sURhTNwUl5Vz77DL2Hivk5dtHMKRLyzM3qqyEozvhyBY4vg+Of+W85x2EvP2QewAq\nSs7cr1U36DDIeXW/CNr2tZZImLJEYYzx63BuMZP/upyDucX8/rpzuHpgx7pVoApFxyDvgJM08vY7\nieTQejjwJRzf62yXkg79v+FMSpjWvfG/iKk3SxTGmFodzivmB6+uZsXuY1zYuw0/vqQXAzulNk7l\nuQdg+3uweQFsWwRaAb2vgIsegXZ9G+cYpkEsURhjAlJWUck/lu3myfe3kVdSTu92yYzr3YbBXVrS\nu10ynVol4HY1sOso9wCs/gcsf9YZ9xh2O1zyK4hLapwvYeqlPoPZN6vqK77P56vqJ1XW3a2qfwla\ntPUkIpOAST169Lhj27ZtoQ7HmGYtr7iMt77Yx4J1B1i15xhlFc5vhdsltEmKo21KHG2TvbRNiaPd\nifcqZWmJcbUnlMKjsOQx+PwFaNkFrv87dKzpgksTbPVJFKtVdfDpn6tbDjfWojCmcRWWlrP1UD5b\nD+axO7uAw3klziu3mMN5JRwtKD1jn1i3i25tEul9VjJ92qcwIqMVAzq2IMZdze1be5bBm3dCwWG4\n5lnof10TfCtzupoSRYy/fWr4XN2yMSaCJcTGMLBTao3jFaXllWTlO4njUG4JWXnFZOYUsfVgHit2\nHeWdNfsBSPbGMK53W64d3JExPVp/nTS6nAd3fACv3wyzv+M8SGnY7U319Uwt/CUKreFzdcvGmCgW\nG+OiY2o8HVPjq11/JL+E5Tuy+XjbERZuPMjcL/fToYWXOy/ozg3DOuH1uCGpDXxrDsy6Feb/1NnR\nkkVY8Nf1VAhsx2k9dPd9xrfcTVUTmyTCerCuJ2PCV0l5BYs3H+ZvH+1i5Z5jtG/h5dGr+nF5v7Oc\nDcpLnGSxdSHc+C84+4rQBhxF6jNG0cVfhaq6p5Fia3SWKIwJf6rK8h3Z/HreRjYfzGNC/7N47Lpz\naBHvgdJCmDERsrbAd96F9ueEOtyoUFOiqHFSQFXdU/UF5AODgdbhnCSMMc2DiHBej9bM/eFo7h/f\nm/c2HmLS0x+zcX8uxCbAlJkQn+qMWxTn1l6hCZoaE4WIzBOR/r7P7YH1wHeAl0Xkf5ooPmNMhPO4\nXXx/XA9ev3MkpeWVTP7rcmfa8+Sz4Prpzh3d8+8JdZhRzd804xmqut73+dvAe6o6CRiBkzCMMabR\nDOnSird+cB7tW3j51t8/Z/Hmw9B5JFzwIKybBetmhzrEqOUvUZRV+XwxsABAVfOAymAGVZWI9BGR\n50Vktoh8r6mOa4xpeu1bxDPrzlH0bpfMXa+s4vNdR2HsvdBxCLz7oHODnmly/hLFXhH5oYh8A2ds\n4l0AEYkHPIFULiLTReSwiKw/rXy8iGwRke0i8qC/OlR1k6reBUwGzg/kuMaY5qtlYiwzvj2Mji3j\nuf0fK9iaVQhXPuEkifcfDXV4Uclforgd6AfcBtygqjm+8pHA3wOsfwYwvmqBiLiBZ4AJQF9gioj0\nFZEBvnGRqq+2vn2uAubja9UYYyJbWlIcL98+grgYN3e+vIrcln1g5Pec+aH2rQ51eFEn6JMCikhX\nnCfinRgYHwU8qqqX+5YfAlDV3wVQ13xVnVjbdnZ5rDGRYcXuo0x54VPG9W7DC5N74Xp6ELTrB7fO\nsWdaBEGdp/AQkTn+KlTVq+oZS0dgb5XlTJwB8priGAdcC8Thp0UhItOAaQCdO3euZ2jGmHAyrGsr\nHp7Yh0fnbuSVNW24dez98O4DsOMD6HFxqMOLGv6m8BiF84M+E/iMEM3vpKpLgCUBbPeCiBwAJsXG\nxg4JdlzGmKbxrfO6smRrFv+3YBNj7p5MRuqz8N9fO0/Ls1ZFk/A3RnEW8DOgP/AkcClwRFU/VNUP\nG3DMfUCnKsvpvrIGU9W5qjqtRYsWjVGdMSYMiAi/v+4c4mLc3PvmZirH3AMH1sDOxaEOLWr4uzO7\nQlXfVdVv4QxgbweWiMjdDTzmCqCniGSISCxwI+C3mytQIjJJRF44fvx4Y1RnjAkT7VK8/OLKvqza\nc4w3y0dDcnv4+IlQhxU1/LUoEJE4EbkWeAX4AfAU8FaglYvITGA50FtEMkXkdlUtB+4GFgKbgFmq\nuqG+X6Aqa1EYE7muHdyRIV1a8rtFOykacifs+hD2rQp1WFHB3xQe/8T5kR8M/EpVh6nqb1Q14G4i\nVZ2iqu1V1aOq6ar6kq98gar2UtXuqvrbBn8LY0zEExF+dVU/jhaW8mTOaIhLgU+fD3VYUcFfi+Jm\noCfwY2CZiOT6XnkiEpYzdFnXkzGRrX/HFtwwtBMvrcgi7+xvwsa3oeBIqMOKeP7GKFyqmux7pVR5\nJatqSlMGGSjrejIm8v3o4p4IwouFF0BFKXzxSqhDCoqVu4/yVXZhqMMAahmjaG6sRWFM5OuQGs/U\nEZ15ZoOH4g4jYdXfobLJpp+rs4pKpbis4uTytkN5lFXUHu/1zy9n7B8Xn9zn3yv31rJH8ERUorAW\nhTHR4QcX9iDW7WK2XAbHdjsD22HqOzNWcPYj7wKw92ghl/55Kf+3YFOd6rj0z0u5b/Za3lydGYwQ\naxVRicIYEx3aJMdxw7BOPLa7O5WxybDu36EO6QxbDuaxas9RPtyadbLsaEEpAKv2HKt1/9GudXST\n/aeU/XTWl40bZIAsURhjmqXbR2dQWOlhXco42DgHyopCHdIpLn9iKdc9t/yUsrrMrPdK7O/4IO5e\nPtl+hP6yk++4/9O4AdZBRCUKG6MwJnp0apXAFQPa82TWICjNgy2h+yH15ynP0+z2TgUgpugIu71T\nuaz4Xb/7LNv+9ZVcN/3tM+bFPcwvPC/TkSw/ewVPRCUKG6MwJrrcObY7S0p6kR/XFtbOCnU4AEz/\neBdLthwmnmKSKeQq99etitjcPQBcUvxejft/uTeHqX/7rNp1n3h/3LjBBsjfpIDGGBPWBqS3YFhG\na+ZmjeLG7fORomMQ3zKkMf163kYA1sT9kFQpOFm++0gBX+7NoZeffSsrlauf+eS00lM7rHZm5ZOa\nEMvmg7n8/t0tzL5rFB53cP/mj6gWhXU9GRN9bhnVhdcKhiCV5bB1YajDOalqkgAY9/+WMPPzr6rd\ndu/RQlbtOVrtGMbfPX84ZfmiP33IxX9awv2z1/Ll3hwOHi9my8E8vv/qqoAuu62PiEoU1vVkTPS5\nrO9ZHEjsw1F3a9g0N9Th1MmnO7P5x7LdjPnD4lMGvu9yfz1P6oXuM690OlZYdsryPf9ew4J1B9l8\nIC8ocVrXkzGmWYuNcXHj8C7M+2gQN29/H1dpIcQmhCye293z2a7ptWzlPEfjxhc+PWPNCNnEg57X\n6nTM3KLyOm1fVxHVojDGRKcpIzqzqHI4rvJi2PHfkMQw45NdfLztCI94XuUfsb8/Y3132cdF7i98\nS9VfKPuTmSt5Pe43fo+TSh5xlNKvcgsvev4ElRV8ddSZ6uPj7cGZ98paFMaYZq99i3iSe19Azs4n\nSd44F3efSU0ew6NznUHs3d7q1/837r5Tlncd+XoMI4Fi4ilhwbpynqph/xPWeO9kY2UXUooLSHcf\nYcexTFLI52LXF/z+XfjeuO4N+h7ViagWhQ1mGxO9po7qxvsVg6nY/B+oCG5XTFXPLN7OZzuz67iX\ncOH/W0Ircukm+5kf+xCrvN8LeO++rj0nP//v/I382fMcf459ju7SKA8LPUNEJQobzDYmep3fvTXr\nE0cSW5bbpA80+uPCLdxQzVhDIBbH/ZQP4u4lw3UIODFyUTdbD+ZxlhwFwEtZLVvXT0QlCmNM9HK5\nhG7Dr6RcXWR9MS/U4fhVUu7MJttCTp1GPJXAr1qSKuMc/aq0MILBEoUxJmJcNbIPa+hJyeamv59i\nlGsDXeVAQNuWV1Y/mL3C+4M6H1ertEO+F/NOnfcPhCUKY0zESE2IJavdWNKLtpCXHZz+enDuoB7z\nhw9464uvp/2eGftblsTdE9D+itCGnAbF0FHOHBe50l391B8NZYnCGBNRepx3NQBrFr8ZtGOUVlSy\n92gRD8xexwjZxFnUdTAbFsQ92CixTI0J/uXAliiMMRGl5znnc0xSKdu8ENW6TOxdDwKvx/2GRXEP\n1HnXNpLbKCHcHaTupqosURhjIovLRU7HCxhU9gWf7Wj8abkrqhlfSJHweLZ1sERUorD7KIwxAB2H\nTKSl5LNk6QeNWm9OYSndf7aAlz7e1aB6hrq2NlJETSOiEoXdR2GMAYjtcQEAunMph3OLG63eg766\nZq3cC9TvvofmKKIShTHGAJB8FqUtezLKtZ5/LN/daNWePuQhUZIpLFEYYyJSbM8LGenewqvLdnC8\nsHHvWNZKZUHsQ1xO/e7Ibm4sURhjIlPGWLxaTPfSLcxYtrtRqjzRovBQTl/XHv7g+kuj1BvuLFEY\nYyJTl/MB4ZZ2e5j+ya5GaVXoyWkzgvMkuXBlicIYE5kSWkH7c7g0fjN5xWU8/t6WRqs6t+hE0omO\nQQpLFMaYyJVxAYmHV3PbsHa8/OkeNu5v2E1uJ7qecoudacyDfDtfvazac7TR62wWiUJEEkVkpYhc\nGepYjDHNSMYFUFHKT88+RqvEWP7n9S8oKq2od3WuomPs9k5lqjs0T9ELRHlF46evoCYKEZkuIodF\nZP1p5eNFZIuIbBeRQCY8eQCYFZwojTERq/NIcMWQtP8THp88kK2H8nnozbVU1jB7a23cec79EzeF\ncaKQIFyzG+wWxQxgfNUCEXEDzwATgL7AFBHpKyIDRGTeaa+2InIpsBE4HORYjTGRJi4JOg6FXR8x\ntlcb7r2sF2+v2c/P315HWUXdB6RPTzAahmMUwbi3I6jPzFbVpSLS9bTi4cB2Vd0JICKvAVer6u+A\nM7qWRGQckIiTVIpEZIGqRtclB8aY+ssYCx/9Pyg+zg8u7EFRWQXPLN7B1kP5/PrqfvTrEPhMDpVR\n+tMT1ERRg47A3irLmcCImjZW1Z8DiMhtwJGakoSITAOmAXTu3LmxYjXGNHcZY2HpH2DPcqT3eO67\n/Gx6tUvml3M2MPGpjxmR0YpL+7ZjYKdUOrdKIC0pDrer+j/Lm0WLIgh1hiJR1Iuqzqhl/QsicgCY\nFBsbO6RpojLGhL30YRDjhV1LobfTE371wI6M692Wl5fv5s0v9vG/8zed3NztEhI8buI8buJiXMTG\nuKhUpVKV1GMbmBt36mNIw02Mu/FHFEKRKPYBnaosp/vKGkxV5wJzhw4dekdj1GeMiQAeL3Qa4SSK\nKlrEe7j7op7cfVFPDucWs27fcfYfL+bQ8WIKSsspKa+kuKyCsgrFJeAWoVO7tlBl4tgEKWniL1O7\nvu1TGr3OUCSKFUBPEcnASRA3AlMbo2IRmQRM6tGjR2NUZ4yJFBlj4YPfQEE2JKadsbptipeLU7y1\n17OvEl6E7m0SqcdD7ZpErLuZXfUkIjOB5UBvEckUkdtVtRy4G1gIbAJmqeqGxjieTTNujKlWhjPt\nOLs/Cm0cTeGr5Y1eZbCveppSQ/kCYEEwj22MMSd1GASxyU73U79rGl5fsB+xGmaaxZ3ZgbIn3Blj\nquWOgS7nnTFOUX+WKJot63oyxtQoYyxkb4Pc/Q2oJLoSxAkRlSisRWGMqVHGWOd9VyOMU1jXU/Nl\nLQpjTI3a9Yf4lg3rfjqRH47t8rtZpImoRGGMMTVyuaDrGNj1Yf1bBBWljRtTM2GJwhgTPTLGwvG9\ncGx3qCMJomZ2H0VTszEKY4xfJ8cp6tn9FIypWZuBiEoUNkZhjPGrdS9Ibg/b3w91JMHTDJ9HYYwx\n4UMEeo2H7f+FsuK67x9lVzudYInCGBNdzr4SygqcQe06aw6JwloUftkYhTGmVhljnOk8Ns+v+77W\nomj+bIzCGFOrmDjoeSlsWQCVFXXcuRkkChujMMaYRtDvGijIgh0f1G0/a1EYY0yU6DUBEtLgi5fr\nuGNzSBTWojDGmIaLiYVzboTNCyA/K9TRhL2IShQ2mG2MCdjQb0NlOXz2XOD7NIeuJxuj8M8Gs40x\nAWvdE/p9Az57wXlEakCaQaIIgohKFMYYUycXPADlRbDwocC2rywPbjyNwloUxhjTeNqeDWPugbWv\nw8rptW9fHp2zxwb1mdnGGBP2xt4H+7+AeT+FY3tg9E8gPrX6baN0mnFLFMaY6Ob2wOR/woL74JMn\n4NPnoP25kNIexO0kh/JiKC2EgsOhjrZ2pfmNXqV1PRljjCcerv4L3LkUht/hJI/Dm+DgOji6E4qO\ngcsNrbrXXEfr3s57z8uc90G3OO99r3Hek85y3r1BvtjmxFTqjUi0OVzuFSARmQRM6tGjxx3btm0L\ndTjGGNOsiMgqVR16enlEtSjs8lhjjGl8EZUojDHGND5LFMYYY/yyRGGMMcYvSxTGGGP8skRhjDHG\nL0sUxhhj/LJEYYwxxq+IuuHuBBHJAvb4FlsApz+g4vSyqsutgSNBCq26WBprH3/b1bQukHNTXVk4\nn69A92us81VduZ0v/+ui/Xz5Wx/q89VFVducUaqqEf0CXqitrOoysLIpY2msffxtV9O6QM5Ncztf\nge7XWOertvMTzeerpnXRfr78rQ/X8xUNXU9zAyirbptgqM9xAt3H33Y1rQvk3FRXFs7nK9D9Gut8\nVVdu58v/umg/X/7Wh+X5isiup4YQkZVazVwnpnp2vurGzlfd2Pmqm2Cdr2hoUdTVC6EOoJmx81U3\ndr7qxs5X3QTlfFmLwhhjjF/WojDGGOOXJQpjjDF+WaIwxhjjlyWKWohIooj8Q0ReFJGbQh1PuBOR\nbiLykojMDnUszYGIXOP7t/W6iFwW6njCnYj0EZHnRWS2iHwv1PE0B77fsJUicmV964jKRCEi00Xk\nsIisP618vIhsEZHtIvKgr/haYLaq3gFc1eTBhoG6nC9V3amqt4cm0vBQx/P1tu/f1l3ADaGIN9Tq\neL42qepdwGTg/FDEG2p1/P0CeACY1ZBjRmWiAGYA46sWiIgbeAaYAPQFpohIXyAd2OvbrKIJYwwn\nMwj8fJn6na+Hfeuj0QzqcL5E5CpgPrCgacMMGzMI8HyJyKXARuBwQw4YlYlCVZcCR08rHg5s9/1F\nXAq8BlwNZOIkC7DzVVVN5yvq1eV8ieP3wH9UdXVTxxoO6vrvS1XnqOoEICq7gut4vsYBI4GpwB0i\nUq/fsJj6hxtxOvJ1ywGcBDECeAr4i4hMpOmmFmgOqj1fIpIG/BYYJCIPqervQhJd+Knp39cPgUuA\nFiLSQ1WfD0VwYaimf1/jcLqD44jeFkV1qj1fqno3gIjcBhxR1cr6VG6JohaqWgB8O9RxNBeqmo3T\n324CoKpP4fwxYgKgqkuAJSEOo9lR1RkN2T8qu1JqsA/oVGU53Vdmqmfnq27sfNWNna+6Cer5skTx\ntRVATxHJEJFY4EZgTohjCmd2vurGzlfd2Pmqm6Cer6hMFCIyE1gO9BaRTBG5XVXLgbuBhcAmYJaq\nbghlnOHCzlfd2PmqGztfdROK82WTAhpjjPErKlsUxhhjAmeJwhhjjF+WKIwxxvhlicIYY4xfliiM\nMcb4ZYnCGGOMX5YojPFDRPJ9711FZGoj1/2z05aXNWb9xjQWSxTGBKYrzgycAROR2uZSOyVRqOp5\ndYzJmCZhicKYwDwGjBGRNSLyExFxi8gfRWSFiKwVkTsBRGSciHwkInNwngOAiLwtIqtEZIOITPOV\nPQbE++p71Vd2ovUivrrXi8g6EbmhSt1LfE932ywir4qIhOBcmChjs8caE5gHgXtV9UoA3w/+cVUd\nJiJxwCcissi37WCgv6ru8i1/R1WPikg8sEJE3lDVB0XkblUdWM2xrgUGAucCrX37LPWtGwT0A/YD\nn+A85e3jxv+6xnzNWhTG1M9lwK0isgb4DEgDevrWfV4lSQD8SES+BD7FmeGzJ/6NBmaqaoWqHgI+\nBIZVqTvT91yBNThdYsYElbUojKkfAX6oqgtPKXQerFNw2vIlwChVLRSRJYC3AcctqfK5Avt/2DQB\na1EYE5g8ILnK8kLgeyLiARCRXiKSWM1+LYBjviRxNs5jKU8oO7H/aT4CbvCNg7QBxgKfN8q3MKYe\n7K8RYwKzFqjwdSHNAJ7E6fZZ7RtQzgKuqWa/d4G7RGQTsAWn++mEF4C1IrJaVas+//ktYBTwJaDA\n/ap60JdojGlyNs24McYYv6zryRhjjF+WKIwxxvhlicIYY4xfliiMMcb4ZYnCGGOMX5YojDHG+GWJ\nwhhjjF+WKIwxxvj1/wFCxaoW1NjReAAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "tags": [] + } + } + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "Ouhl6kvBb6fn", + "colab_type": "text" + }, + "source": [ + "Speeding it up\n", + "--------------------\n", + "The simple solution we have so far assumes that loss is computed by materializing the full estimated tensor and then zeroing out unobserved elements. If the tensors are really large and the fraction of observerd values is small (e.g. less than 1%), it may be much more efficient to directly work only with the observed elements." + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "88t0Auu3b6fo", + "colab_type": "code", + "colab": {} + }, + "source": [ + "shape = (10, 10, 10, 10, 10, 10, 10)\n", + "\n", + "total_observed = np.prod(shape)\n", + "# Since now the tensor is too large to work with explicitly,\n", + "# we don't want to generate binary mask,\n", + "# but we would rather generate indecies of observed cells.\n", + "\n", + "ratio = 0.001\n", + "\n", + "# Let us simply randomly pick some indecies (it may happen\n", + "# that we will get duplicates but probability of that\n", + "# is 10^(-14) so lets not bother for now).\n", + "\n", + "num_observed = int(ratio * total_observed)\n", + "observation_idx = np.random.randint(0, 10, size=(num_observed, len(shape)))\n", + "# and let us generate some values of the tensor to be approximated\n", + "observations = np.random.randn(num_observed)\n" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "5e3CGCQGb6fr", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# Our strategy is to feed the observation_idx\n", + "# into the tensor in the Tensor Train format and compute MSE between\n", + "# the obtained values and the desired values" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "o_nTVpYpb6fu", + "colab_type": "code", + "colab": {} + }, + "source": [ + "initialization = t3f.random_tensor(shape, tt_rank=16)\n", + "estimated = t3f.get_variable('estimated', initializer=initialization)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "y9vjUa3Xb6fx", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# To collect the values of a TT tensor (withour forming the full tensor)\n", + "# we use the function t3f.gather_nd" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "U1-IJdByb6fz", + "colab_type": "code", + "colab": {} + }, + "source": [ + "def loss():\n", + " estimated_vals = t3f.gather_nd(estimated, observation_idx)\n", + " return tf.reduce_mean((estimated_vals - observations) ** 2)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "Lg_-VK80b6f3", + "colab_type": "code", + "colab": {} + }, + "source": [ + "optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)\n", + "def step():\n", + " with tf.GradientTape() as tape:\n", + " loss_value = loss()\n", + " gradients = tape.gradient(loss_value, estimated.tt_cores)\n", + " optimizer.apply_gradients(zip(gradients, estimated.tt_cores))\n", + " return loss_value" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "A7b8xOBSgDc7", + "colab_type": "text" + }, + "source": [ + "#### Compiling the function to additionally speed things up\n" + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "33RRJ_pogCJ7", + "colab_type": "code", + "colab": {} + }, + "source": [ + "# In TF eager mode you're supposed to first implement and debug\n", + "# a function, and then compile it to make it faster.\n", + "faster_step = tf.function(step)" + ], + "execution_count": 0, + "outputs": [] + }, + { + "cell_type": "code", + "metadata": { + "id": "ua3MnN_Yb6f6", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 357 + }, + "outputId": "c1634c70-7d26-45c1-85a7-7128dac42731" + }, + "source": [ + "loss_hist = []\n", + "for i in range(2000):\n", + " loss_v = faster_step().numpy()\n", + " loss_hist.append(loss_v)\n", + " if i % 100 == 0:\n", + " print(i, loss_v)" + ], + "execution_count": 16, + "outputs": [ + { + "output_type": "stream", + "text": [ + "0 2.513642\n", + "100 0.09261158\n", + "200 0.016660467\n", + "300 0.0062909224\n", + "400 0.0030982601\n", + "500 0.0018596936\n", + "600 0.0012290174\n", + "700 0.00086869544\n", + "800 0.00065623457\n", + "900 0.00052747165\n", + "1000 0.00044029654\n", + "1100 0.00038606362\n", + "1200 0.00033268757\n", + "1300 0.0002910529\n", + "1400 0.00028836995\n", + "1500 0.00023541097\n", + "1600 0.00022489333\n", + "1700 0.00022316887\n", + "1800 0.00039261775\n", + "1900 0.0003216249\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "q5hrWqCTb6gA", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 333 + }, + "outputId": "f0b4326c-9413-4b5b-cf71-c2fdae9325b5" + }, + "source": [ + "plt.loglog(loss_hist)\n", + "plt.xlabel('Iteration')\n", + "plt.ylabel('MSE Loss value')\n", + "plt.title('smarter SGD completion')\n", + "plt.legend()" + ], + "execution_count": 17, + "outputs": [ + { + "output_type": "stream", + "text": [ + "No handles with labels found to put in legend.\n" + ], + "name": "stderr" + }, + { + "output_type": "execute_result", + "data": { + "text/plain": [ + "" + ] + }, + "metadata": { + "tags": [] + }, + "execution_count": 17 + }, + { + "output_type": "display_data", + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYoAAAEaCAYAAAAPGBBTAAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjMsIGh0\ndHA6Ly9tYXRwbG90bGliLm9yZy+AADFEAAAgAElEQVR4nO3deXxU5dn/8c+VPSQhgYQ9rCagLLIF\nrEtdnrpAFaX0UXHDWqrVn9razVpbre2jj/bpTkURN9yXamvFDW2VIlqVgOxbkDUgEAKEhEAI4f79\nMSc4xGQyWSYnmXzfr9e8nHPPOWeuOcS55l7OfZtzDhERkbrE+B2AiIi0bkoUIiISkhKFiIiEpEQh\nIiIhKVGIiEhIShQiIhKSEoWIHMPMzjSzwiYc/6aZXd2cMYm/lCikzWvqF1uI895uZhvMrMzMCs3s\nhRqvn2Nm75lZqZkVm9liM/upmSV5r99lZpXe66VmttbM7jezHs0dq1+8z/h0cJlzbrxz7gm/YpLm\np0QhbZqZxUXiHN4v4quAs51zqUAe8K+g1y8GXgKeBfo65zKBS4FsoHfQqV5wzqUBnYFvAN2BhdGU\nLCT6KVFIs/F+TW/1fj2vMbOveeV3mdlfzexp77VlZjbQzH5mZjvNbIuZnRt0nmvMbJW373oz+27Q\na2d6v+5/ambbgeeAN4Ge3i//MjPraWYxZnabmX3m/dp/0cw6e+foZ2bOzKaa2Wbg3Vo+zhhgjnPu\nMwDn3Hbn3EzveAP+APzaOfewc263t88a59zNzrmCmidzzlU651YQSCZFwI9CXMdrgz7/SjMb5ZWf\nYGZzzWyvma0wswuDjpllZg94zT5lZvaBmXU3sz+Z2R4zW21mI4P23+hd/5Xe649X14Rqiaenmb1s\nZkVeDet7Xvk44HbgUu89l3jlc83sO97zGDP7hZlt8v6tnzSz9Br/Dleb2WYz22VmP6/ruoh/lCik\nWZjZIOAmYIz3C/o8YGPQLhOAp4BOwKfAHAJ/f72AXwMPBe27E7gA6AhcA/yx+svS053AL/S+wBRg\nPLDNOZfqPbYBNwMTgTOAnsAeYHqNsM8ATvBirekjYIqZ/cTM8swsNui1QQRqDi/Xc1m+xDlXBfwD\n+Gptr3s1lbu8z9URuBAoNrN4YDbwNtDV+3zPeNe92iXAL4AsoAL4D7DI236JQHILdgWBz34cMNA7\ntmY8Md77LiHwb/U14BYzO8859xbwvwRqTanOueG1fKRveY+zgAFAKnB/jX1OI3BNvwbcaWYn1HZt\nxEfOOT30aPIDyCHwBX82EF/jtbuAd4K2JwBlQKy3nQY4IKOOc78CfN97fiZwCEgKev1MoLDGMauA\nrwVt9wAqgTign/d+A+r5TFcA/wT2A8XAT73y07zjg2N4HtgLlANXBX3up2s57/VAQR3vOaf6s9Yo\n/yqwHYgJKnsOuMt7Pgt4OOi1m4FVQdvDgL1B2xuB64O2vw58VvN6AicBm2vE8jPg8bo+IzAX+I73\n/F/A/wt6bVAt/w7ZQa9/Akz2++9Zj2MfTW7fFQFwzq0zs1sIfHEMMbM5wA9d4Nc9wI6g3Q8Au1zg\n13X1NgR+be41s/HALwn8yo0BOgDLgo4vcs4drCekvsDfzexIUFkV0C1oe0s9n+kZAr/a4wnUTp4x\ns8XAZm+XHsAGb9/JAGY2H4it5XTBegG763itN/BZLeU9gS3OueDPs8k7V7Wa17jmdmqNcwZ//k3e\ne9TUl0Cz3t6gsljg/Vqjrz3uTTXeJ45j/x22Bz0vryVO8ZmanqTZOOeedc6dRuDLxQG/aeg5zCyR\nQJPO74BuzrkM4A3Agt+q5lvXcqotwHjnXEbQI8k5t7We477EBfoX/gosBYYCa4CtwKSwPlQQryln\nAnV/0W4h0BRU0zagt3d8tT5eHI0V3Onex3uP2uLZUOM6pjnnvu69Xt813Ebg7yH4fQ5zbBKTVk6J\nQpqFmQ0ys//yvugPEvgFe6Sew2qTACQS6PA97NUuzg19CDuAzOpOUs8M4B4z6+vF18XMLgo3CDP7\nlpmdb2ZpXofseGAI8LH3q/5HwC+9judOFpDLsb+Ug88X57W9P0egj6Vmf0G1R4Afm9lo75w53mf4\nmMCv7VvNLN7MziSQcJ4P9zPV4kYzy/Y6+X8OvFDLPp8Apd7ggWQzizWzoWY2xnt9B9CvRgIL9hzw\nAzPrb2apfNGncbgJcUsLU6KQ5pII3AfsItCU0JVAW3aDOOdKge8BLxLogL4ceLWeY1YT+EJa740I\n6gn82TvubTMrJdA5fVIDQtlHYETPZgJ9D/8H3OCcm++95wsEOo+vJPCre5cX80zgr0HnudTMyoAS\nL55iYHRQk1zNz/JX4B4Cw25LCfTPdHbOHSKQGMZ77/UAMMX77I31LIHO8fUEmrvuriWeKgIDC0YQ\naGbbRSCZVSfl6s9abGaLanmPxwgMYpjnHX+QQP+JtCHmnBYuEmlvzGwjgQ7nf/odi7R+qlGIiEhI\nShQiIhKSmp5ERCQk1ShERCQkJQoREQkpqu7MNrMJwIS0tLRrBw4c6Hc4IiJtysKFC3c557rULI/K\nPoq8vDyXn5/vdxgiIm2KmS10zuXVLFfTk4iIhKREISIiISlRiIhISFHVmS0iIgGVlZUUFhZy8OCX\nZ+RPSkoiOzub+Pj4sM4VVYmietRTTk6O36GIiPiqsLCQtLQ0+vXrR2D13gDnHMXFxRQWFtK/f/+w\nzhVVTU/OudnOuevS09Pr31lEJIodPHiQzMzMY5IEgJmRmZlZa02jLlGVKERE5As1k0R95XVRohAR\nkZCUKEREJCQlChGRKFXXzBsNnZFDiUJEJAolJSVRXFz8paRQPeopKSkp7HNF1fBYEREJyM7OprCw\nkKKioi+9Vn0fRbiiKlFU30fRpU8O979b0KhzpCfHc1zXVHK7ppGVmtDg0QEiIq1BfHx82PdJ1Ccq\nZ49N7JHrelz9pyafJ6NDPDldUsntlkpO1zRyuqaS2zWVHulJSiAiEnXqmj02qmoU1Yb2Sueje8Y3\n+DjnYPf+Q6zbWUbBzlIKdpaxbmcZby3fzp7yLUf3S0mIJadrIHnkdks9mkyyO3UgNkYJRESiS1Qm\nCgPiYxvXT989PYnu6Umclpt1THlxWcXRxFGdSOavK+LlRYVH90mMi2FAl0CtI7draqAG0i2Vvpkp\njY5HRMRvUZkoIiEzNZHM1ES+MiDzmPKSA5Ws21nGZ0G1kEWb9/Dqkm1H94mLMfplpRxNINV9IAO6\npJAUH9vSH0VEpEGUKJooPTme0X07Mbpvp2PKyw8d5rOd+1lXVErBjjIKdpaxenspc1Zs54jXLRRj\nMKBLKidmp3Nir3RO7J3B4B4dlTxEpFVRooiQDglxDMtOZ1j2sRMUHqysYmPx/qPJY8XWEuat3cXf\nFm0FArWPgd3SAskjO4MTs9MZ2C2NhDg1XYmIP5QoWlhSfCzHd+/I8d07Hi1zzrF930GWFpawtHAv\nSwtLeHP5dp5fEOhAT4iL4YQeHQO1Di+B5HRNVce5iLSIqBwem5eX5/Lz8/0Oo0mcc2zZfYAlhXtZ\ntrWEJVv2snxrCfsPVQGQHB/L0F4dj9Y6TszOoG/nDsQoeYhII7Wr4bHRwMzok9mBPpkdmDC8JwBH\njjjW7yrzah6B2sfTH22i4vARANKS4jgxO51hvTIY7jV79cpI1j0fItIkShRtSEyMeTf+pTFpVOD2\n+8qqIxTsKAs0WW0tYVlhCY/OX09lVaCmmJmSwLDq/o5e6Qzp1ZHuHXXDoIiEr9UnCjNLAR4ADgFz\nnXPP+BxSqxIfG8Pgnh0Z3LMjk72yg5VVrNleerS/Y2lhCfPWFhwdbZXRIZ7BPToyuEdHTugRODan\na6ru9RCRWvmSKMzsMeACYKdzbmhQ+Tjgz0As8Ihz7j5gEvCSc262mb0AKFHUIyk+luG9MxjeO+No\nWfmhw6zcto+Vn+9j1ef7WLltH08FNVslxMaQ0zWVE3p0JLfbFzcM6m5zEfGrRjELuB94srrAzGKB\n6cA5QCGwwMxeBbKBZd5uVS0bZvTokBBHXr/O5PXrfLTscNURNhbvZ8W2faz6vJSVn+9jXsGxd5sn\nxMUwICvFm7IklX6ZKWR3SqZ35w50SU1U57lIO+BLonDOzTOzfjWKxwLrnHPrAczseeAiAkkjG1iM\n1s9oVnGxMUf7PC4a8UV58N3m64oCU5YsKdzL68s+J3iQXEJcDNkZyfTyEkd2p2R6dwr8N7tTB82+\nKxIlWlMfRS9gS9B2IXASMA2438zOB2bXdbCZXQdcB9CnT58Ihhn96rrb/GBlFYV7DrBlTzmFew5Q\nuLv86PaK5dvZvf/QMfsnxsXQJS2RrmmJdPEeXdOSAs9TE+naMZGs1EQyUxNIjNPd6CKtVWtKFLVy\nzu0Hrgljv5nATAjcRxHpuNqjpPjYo01QtSmrOMzWPQfYsrucwj3lbN17gKLSCorKKtiwaz+fbNjN\nnvLKWo9NS4oLJI2UBDJTE8hMTSQrJcGbYyuBzJREsrzyjOR4NXmJtKDWlCi2Ar2DtrO9srBVL1yU\nk5PTnHFJmFIT4xjUPY1B3dPq3OfQ4SPsKqsIJBAviRSXVbCr7BDF+w9RXFbBxl3lLNy0h937Dx0d\nqRUsNsbo1CHBSxyBJJKZmkC3jknHNH91TlHTl0hzaE2JYgGQa2b9CSSIycDlDTmBc242MDsvL+/a\nCMQnzSAhLoaeGcn0zEiud9+qI4695YEEsqusguKyQ0FJxftvWQVL9uxlV2nF0bvWq3VIiD3aX9I/\nK4VB3dIY2D2N3K6ppCS2pj99kdbNr+GxzwFnAllmVgj80jn3qJndBMwhMDz2MefcigaeVzWKKBIb\nY0endx/Yre5aSrXSg5WBvpOjzV8HKNxTzpY9B/jws10crDxydN/+WSmM6J1x9DGkZ0fidB+JSK00\n15O0C1VHHFt2l7NmRylrtpeyfGsJi7fsZWdpBQBpiXGcfFwmX83N4rTcLvTL7KBmK2l3NNeTtGux\n3uJR/bJSOG9IdyAw8eLnJQdZtHkPH6zbxby1u3h75Q4AsjslM25Id84/sQcjemcoaUi7FlU1iqCm\np2sLCgr8DkfaGOccG4vLmV9QxHtrini/oIjKKkevjGTOP7EHE0f0YnDPjvWfSKSNqqtGEVWJopqa\nnqQ5lByo5J2VO3hj2edHk8bw7HQmj+3DhOE9SVWHuEQZJQqRJthbfoi/f7qV5z/ZwpodpaQkxPLf\no7P5zlcH0LtzB7/DE2kW7SJRqOlJIs05x6LNe3n24828umQrVUcc55/Yk++ePoChvdLrP4FIK9Yu\nEkU11SikJWwvOcjjH27g2Y82U1pxmPOGdOPH5w4iN4yhvCKtkRKFSITsO1jJ4/M38vD76yk/dJhv\njsrmlnMG0iuMmwpFWhMlCpEI273/EA+8t44nP9oEDq45rR83/1euOr2lzWgXiUJ9FNIabNt7gN+/\nvZaXFxXSrWMit3/9BC4c3lP3Ykir1y4SRTXVKKQ1WLR5D7/8xwqWbS1hbP/O3DNxqPovpFWrK1Fo\nchuRCBnVpxOv3Hgq904aRsGOUs6fNp/p762jsupI/QeLtCJKFCIRFBtjXDa2D+/88AzOGdKN385Z\nw8TpH7By2z6/QxMJmxKFSAvISk1k+uWjmHHlKHbsq+Ci6fN55P31RGPTr0SfqEoUZjbBzGaWlJT4\nHYpIrcYN7cE7PzidswZ15e7XVzH1iXyKyyr8DkskpKhKFM652c6569LTdYestF6dUhJ46KrR/Pqi\nIcwv2MXXp73Pwk17/A5LpE5RlShE2gozY8rJ/XjlxlNJjIvlsoc/4u+fFvodlkitlChEfDS4Z0f+\nceOpjOqTwQ9eWMJv3lrNkdoWChfxkRKFiM86pSTw5LdP4rKxfXhw7mf84MXFGkIrrYrmFhBpBRLi\nYvjfbwwlu1Myv52zhpIDlTx4xWiSE2L9Dk0kumoUGvUkbZmZceNZOdw7aRjz1hZx5aMfs7f8kN9h\niURXotCoJ4kGl43tw/TLR7GssITJMz9i934lC/FXVCUKkWgxflgPHrk6jw279nP5wx/pXgvxlRKF\nSCt1+sAuPHr1GDbs2s8Vj3ysZCG+UaIQacVOy83isW+NYWPxfi5/+GN2KVmID5QoRFq5U3OyeOzq\nMWzarWYo8YcShUgbcEpOFo9/ayybd5dz1aOfUHKg0u+QpB1RohBpI04+LpOHrsqjYGcp1zz+Cfsr\nDvsdkrQTUZUodB+FRLszBnbhL5eNYklhCd95Ip+DlVV+hyTtQFQlCt1HIe3BuKHd+d3FJ/LRhmL+\n3zOLOHRY031IZEVVohBpL74xMpu7Jw7l3dU7+dFfl2giQYkozfUk0kZdcVJf9h04zG/eWk3P9CR+\n9vUT/A5JopQShUgbdv0ZA9i29wAPzVtPz4xkrj6ln98hSRRSohBpw8yMuy4cwvZ9B7lr9gq6pydx\n3pDufoclUUZ9FCJtXGyMMW3ySIZnZ/C95z7VsqrS7JQoRKJAckIsj16dR4/0JL7zxAI27trvd0gS\nRZQoRKJEZmois64ZC8DUJxbo7m1pNmElCjNLNrNBkQ5GRJqmX1YKM64czebd5dz07CIOa0lVaQb1\nJgozmwAsBt7ytkeY2auRDkxEGuekAZnc841hvF+wi1/NXul3OBIFwqlR3AWMBfYCOOcWA/0jGNMx\nzGyAmT1qZi+11HuKtHWX5PXmu6cP4KmPNvHEhxv9DkfauHASRaVzrubkSWHdBmpmj5nZTjNbXqN8\nnJmtMbN1ZnZbqHM459Y756aG834i8oVbxx3P2Sd041ezV/DvtUV+hyNtWDiJYoWZXQ7Emlmumf0F\n+DDM888CxgUXmFksMB0YDwwGLjOzwWY2zMxeq/HoGv5HEZFgsTHGnyePYFD3jtz0zCIKdpT6HZK0\nUeEkipuBIUAF8BywD7glnJM75+YBu2sUjwXWeTWFQ8DzwEXOuWXOuQtqPHaG/UlE5EtSEuN45Oo8\nEuNj+fYTC9iz/5DfIUkbVG+icM6VO+d+7pwb45zL854fbMJ79gK2BG0XemW1MrNMM5sBjDSzn4XY\n7zozyzez/KIiVbNFqvXKSObhKaPZUVLB957/VCOhpMHCGfX0npm9W/PREsEBOOeKnXPXO+eOc87d\nG2K/mV4iy+vSpUtLhSfSJozs04n/mTiE9wt28ds5a/wOR9qYcOZ6+nHQ8yTgm0BTltbaCvQO2s72\nyprMG8o7IScnpzlOJxJVLh3Th2VbS3ho3nqG9ErnwuE9/Q5J2ohwmp4WBj0+cM79EDizCe+5AMg1\ns/5mlgBMBprlvgwtXCQS2p0XDGFMv07c+tISVm7b53c40kaE0/TUOeiRZWbnAWF9E5vZc8B/gEFm\nVmhmU51zh4GbgDnAKuBF59yKJnyG4PfTUqgiISTExTD9ilFkJCdw3VP56tyWsJhzoW+JMLMNBO6b\nMAJNThuAXzvn5kc+vMbJy8tz+fn5foch0mp9unkPlz70EWP7d2bWNWOIi9W0bwJmttA5l1ezPJym\np/7OuQHef3Odc+e25iQhIvUb2acTd08cyvx1u/g/dW5LPerszDazSaEOdM79rfnDaRp1ZouE75Ix\nvVm2tYSZ89YzpGdHLhpR5yh1aedCjXqaEOI1B7S6ROGcmw3MzsvLu9bvWETagjsuGMzq7fv46ctL\nyemaypCeGggiX1ZvH0VbpD4KkfAVlVYw4S/ziYs1Zt90Gp1SEvwOSXzS6D4K7+DzzexWM7uz+tH8\nITadRj2JNFyXtERmXDWanaUV3PSc1rCQLwtneOwM4FICcz4ZcDHQN8JxNYruoxBpnBG9M7h74lA+\nWFfMb95a7Xc40sqEU6M4xTk3BdjjnPsVcDIwMLJhiUhLuySvN1NO7svD72/gH4ubZbIEiRLhJIoD\n3n/LzawnUAn0iFxIIuKXOy4YzNh+nfnpy0tZsU1NuBIQTqJ4zcwygN8Ci4CNwLORDKqx1Ech0jTx\nsV/cuX390wspKa/0OyRpBRo06snMEoGkWla8a1U06kmkaRZt3sOlD/2Hr+Z24ZEpecTEmN8hSQto\n9KgnM1tqZreb2XHOuYrWniREpOlG9enEnRcM5t3VO7n/vXV+hyM+C6fpaQKBOZ5eNLMFZvZjM+sT\n4bhExGdXfqUvk0b24o//XMvcNVpssj0LZ66nTc65/3POjQYuB04kMDGgiEQxM+OebwxjULc0bnlh\nMVt2l/sdkvgk3Bvu+prZrQTWtz4euDWiUTWSOrNFmldyQiwzrhxN1RHHDc8s5GBlld8hiQ/C6aP4\nGPi7t+/FzrmxzrnfRzyyRtANdyLNr19WCn+8ZATLt+7jl/9olqVjpI0JZynUKc45zUMs0o6dPbgb\nN52Vw/3vrWNknwwmj1U3ZXsSTh+FkoSI8INzBvLV3CzufHUFSwv3+h2OtCAtayUiYYmNMf48eSRd\nUhO54elF7NYyqu2GEoWIhK1zSgIPXDGKotIKvv/8p1Qdib5lCuTLwunMvtjM0rznvzCzv5nZqMiH\n1nAa9SQSecN7Z/Cri4bwfsEupv2rwO9wpAWEU6O4wzlXamanAWcDjwIPRjasxtGoJ5GWMXlMbyaN\n6sW0dwt4v6DI73AkwsJJFNUDp88HZjrnXge0BJZIO2Zm3D1xKLldU7nl+cVsLznod0gSQeEkiq1m\n9hCBxYve8CYGVN+GSDvXISGOB64YxYHKKm5+bhGVWhkvaoXzhX8JMAc4zzm3F+gM/CSiUYlIm5DT\nNY17Jw1jwcY9/G6ORtJHq3ASRQ/gdedcgZmdSWAp1E8iGpWItBkXjejFFSf14aF563ln5Q6/w5EI\nCCdRvAxUmVkOMBPoTStduEhE/HHHBYMZ2qsjP3pRkwdGo3ASxRHn3GFgEvAX59xP0FKoIhIkKT6W\nBy4fjQNufHYRFYc1eWA0CSdRVJrZZcAU4DWvLD5yITWe7qMQ8U+fzA787uLhLC0s4Z7XV/kdjjSj\ncBLFNcDJwD3OuQ1m1h94KrJhNY7uoxDx13lDunPtV/vz5H82MXvJNr/DkWYSzqSAK4EfA8vMbChQ\n6Jz7TcQjE5E26dZxxzO6bydue3kpnxWV+R2ONINwpvA4EygApgMPAGvN7PQIxyUibVR8bAz3Xz6S\nhLgY/t/TizhwSP0VbV04TU+/B851zp3hnDsdOA/4Y2TDEpG2rEd6Mn+aPJK1O0u58x/L/Q5Hmiic\nRBEfvCaFc24trbQzW0RajzMGduHms3L468JCXszf4nc40gThJIp8M3vEzM70Hg8D+ZEOTETavu+f\nPZBTjsvkjleWs+rzfX6HI40UTqK4AVgJfM97rASuj2RQIhIdqhc7Sk+O58ZnFlF6sNLvkKQRwhn1\nVOGc+4NzbpL3+COtdHisiLQ+XdIS+ctlI9lYvJ+f/W0Zzmmxo7amsbPAntysUYhIVDtpQCY/Pm8Q\nry39nKc+2uR3ONJAmi5cRFrE9acfx38d35X/eW0lS7bs9TscaYA6E4WZjarjMZoWHPVkZhPN7GEz\ne8HMzm2p9xWR5hUTY/zhkuF0TUvihqcXUlxW4XdIEiarq73QzN4LdaBz7qx6T272GHABsNM5NzSo\nfBzwZyAWeMQ5d18Y5+oE/M45N7W+ffPy8lx+vgZmibRGy7eW8M0HP2RknwyennoScbFq2GgtzGyh\ncy6vZnlcXQeEkwjCMAu4H3gyKJBYAnd5nwMUAgvM7FUCSePeGsd/2zm303v+C+84EWnDhvZK595J\nw/jhi0u4983V3HHBYL9DknrUmSiag3Nunpn1q1E8FljnnFsPYGbPAxc55+4lUPs4hpkZcB/wpnNu\nUSTjFZGWMWlUNksLS3h0/gaG9Upn4shefockIfhR5+sFBN+mWeiV1eVm4Gzgv82szvs3zOw6M8s3\ns/yioqLmiVREIubn55/A2P6due1vS1mxTUsDtGatvnHQOTfNOTfaOXe9c25GiP1mOufynHN5Xbp0\nackQRaQR4mNjmH75KDKSE/juUwvZs/+Q3yFJHUKNeroy6PmpNV67qQnvuZXAcqrVsr2yJtPCRSJt\nS5e0RGZcNZqd+yq4+blPOVx1xO+QpBahahQ/DHr+lxqvfbsJ77kAyDWz/maWAEwGXm3C+Y7SwkUi\nbc+I3hncPXEo89ft4rdvr6n/AGlxoRKF1fG8tu3aT2D2HPAfYJCZFZrZVG/97ZuAOcAq4EXn3IoG\nxBzq/VSjEGmDLhnTmyu/0oeH/r2e15ZqZbzWJtSoJ1fH89q2az+Bc5fVUf4G8EY452gI59xsYHZe\nXt61zX1uEYmsOy8YwqrPS/nJX5eS0zWV47t39Dsk8YSqURxvZkvNbFnQ8+rtQS0Un4i0EwlxMTx4\nxSjSkuL47lMLKSnXTLOtRahEcQIwgcC9DdXPq7db5R0yanoSadu6dkziwStHs23vAb7/wqdUHdFM\ns61BnYnCObcp+AGUAaOALG+71VFntkjbN7pvJ+66cAhz1xTxx3fW+h2OEHp47GtmNtR73gNYTmC0\n01NmdksLxSci7dDlY/sweUxv7n9vHW8t/9zvcNq9UE1P/Z1z1auiXwO845ybAJxE04bHRoyankSi\ng5nxq4uGMKJ3Bj96cQkFO0r9DqldC5UognuSvoY3Ssk5Vwq0yrti1PQkEj0S42KZceVokhMCndv7\ntIyqb0Ilii1mdrOZfYNA38RbAGaWTAuuRyEi7Vf39CQeuGIUm3eX88MXFnNEndu+CJUopgJDgG8B\nlzrnqpek+grweITjEhEBYGz/ztxxwWD+uWon094t8DucdinUehQ7gS/N1uqcew8IuaiRX8xsAjAh\nJyfH71BEpBlNObkvSwtL+NM/CxjSM51zBnfzO6R2JdQKdyHnX3LOXRiRiJqBVrgTiT4HK6u4eMZ/\n2LhrP6/cdCrHdUn1O6So0+AV7oCTCawb8RzwMWHO7yQiEglJ8bHMuGo0E/4yn+8+tZB/3HgqKYkR\nXXtNPKH6KLoDtwNDCaxvfQ6wyzn3b+fcv1siOBGRYL0ykrn/spGsLyrj1peWUleLiDSvUHdmVznn\n3nLOXU2gA3sdMLeJa1GIiDTJKTlZ/HTc8by+7HMeeX+D3+G0CyHrbWaWCJwPXAb0A6YBf498WI2j\nzmyR9uG60wewpHAv9721mn3ysG8AAA3ySURBVKG90jn5uEy/Q4pqoTqznyTQ7PQG8HzQXdqtnjqz\nRaJfWcVhJk7/gD37D/Ha906jR3qy3yG1eXV1Zofqo7gSyAW+D3xoZvu8R6mZ7YtUoCIi4UhNjGPG\nlaOpOHyEG55eRMXhKr9Dilqh+ihinHNp3qNj0CPNOacVRUTEdzldU/ndxSeyeMtefj17pd/hRK1Q\nNQoRkVZv3NAeXH/GcTzz8Wb+mr/F73CikhKFiLR5Pz53IKfmZPLzV5azfKtmj25uUZUoNM24SPsU\nFxvDtMkjyUpJ4Non89lZetDvkKJKVCUKTTMu0n5lpiby8NV57C2v5LonF3KwUp3bzSWqEoWItG9D\neqbzp8kjWLxlr+7cbkZKFCISVc4b0p1bxw3i1SXbuP/ddX6HExU0o5aIRJ0bzjiOdTvK+P07azmu\naypfH9bD75DaNNUoRCTqmBn3fnMYo/t24ocvLmZZoQa4NIUShYhEpcS4WB66ajSZKYlMfWIB2/Ye\n8DukNkuJQkSiVlZqIo99awwHDlXx7VkLKKs47HdIbVJUJQrdRyEiNQ3qnsb0K0ZRsLOMm55dxOGq\nI36H1OZEVaLQfRQiUpvTB3bhfy4aytw1Rfxq9koNm20gjXoSkXbh8pP6sKl4Pw/NW0+/rBSmntbf\n75DaDCUKEWk3fjrueDbvLufu11fSu1My5w7p7ndIbUJUNT2JiIQSE2P84ZIRnJidwfefX8zSwr1+\nh9QmKFGISLuSnBDLI1Py6JySwNQn8tmqYbP1UqIQkXanS1oij18zhoOHqpg6awGlByv9DqlVU6IQ\nkXZpYLc0HrxyNOt2lnHjs59q2GwIShQi0m6dlpvF3ROHMm9tEXe+ukLDZuugUU8i0q5NHtuHTbvL\neXDuZ/TPTOHa0wf4HVKro0QhIu3eT84dxObicv73zVX07pzMuKGabTZYq296MrMTzGyGmb1kZjf4\nHY+IRJ+YGOP3lwxneHYGt7ywmCVbNGw2WEQThZk9ZmY7zWx5jfJxZrbGzNaZ2W2hzuGcW+Wcux64\nBDg1kvGKSPuVFB/LI1fnkZWayNQn8incU+53SK1GpGsUs4BxwQVmFgtMB8YDg4HLzGywmQ0zs9dq\nPLp6x1wIvA68EeF4RaQdy0pNZNY1Y6g4HJhtdp+GzQIRThTOuXnA7hrFY4F1zrn1zrlDwPPARc65\nZc65C2o8dnrnedU5Nx64IpLxiojkdE3joStHs75oPzc+s4hKDZv1pY+iF7AlaLvQK6uVmZ1pZtPM\n7CFC1CjM7Dozyzez/KKiouaLVkTanVNysvjfScN4v2AXd7yyvN0Pm231o56cc3OBuWHsNxOYCZCX\nl9e+/1VFpMkuyevNpuL9TH/vM/plpXD9Gcf5HZJv/KhRbAV6B21ne2VNpoWLRKQ5/eicQVxwYg/u\ne3M1byz73O9wfONHolgA5JpZfzNLACYDrzbHibVwkYg0p5gY43cXD2d030784IXFfLp5j98h+SLS\nw2OfA/4DDDKzQjOb6pw7DNwEzAFWAS8651Y00/upRiEizSopPpaZV42mW8ckrn0yny2729+wWYvG\nTpq8vDyXn5/vdxgiEkXW7Sxj0gMf0LVjEi/fcArpyfF+h9TszGyhcy6vZnmrvzNbRKQ1yOmaykNX\n5bGpeD9THv2YsorDfofUYqIqUajpSUQi6eTjMpk2eSTLtpZw07Pt5x6LqEoU6swWkUgbP6wHd08c\nxtw1Rdz4zCIOVlb5HVLERVWiEBFpCZef1IdfXTiEd1btYMqjn1ByILqn+oiqRKGmJxFpKVef0o9p\nk0fy6ZY9TPjLfAp2lPodUsREVaJQ05OItKQJw3vy+LfGsnl3Od97fnHUdnBHVaIQEWlpp+Vm8fi3\nxrB2RylTHv2YveWH/A6p2SlRiIg00VnHd+X+y0ayfOs+Rvz6Hcb9aR6HDkfPiKioShTqoxARv4wf\n1oNZ14wBYPX2UmZ9uMHniJpPVCUK9VGIiJ9Oycli0R3n0CsjmT+8s5b3C4r4wztrWfX5Pr9Da5Ko\nShQiIn7rnJLAKzeeSr/MFK569BOm/auA655q3JRCJQcqufGZRb73eyhRiIg0sy5pibx0wymcNagL\nAFt2H2hwn8U3H/yQ4b96m9eXfc5tLy+jYEcpK7bV3az+Yv4W+t32ekT6RpQoREQiIDUxjkevHsM5\ng7sB8N8zPuSe11fyxIcba73nYsvucj5Yt+vo9sJNX0xp/taK7Zzzx3mcP20+AMVlFfzxnbV8+NkX\n+//mzdUAEbn5r9WvcNcQZjYBmJCTk+N3KCIixMQYD0/JY/p76/jtnDUsLQzUCPpldmBjcTm3nJ3L\nLWcPBGDi9A8o3n+IjfedH/Kcg+98i9xuaSzZshf+Rb37N8vniPg7tCB1ZotIa3TjWTk8PfWko9sb\niwNrWvzpnwVHy4r3B/ohxv1pHmu2132Xd/mhqkCS8DjnOHLEUX4oMOeUo/mXjoiqRCEi0lqdlpvF\nP2489Uvl1z2Zf8wX/+rtpfzu7TVhn3fFtn0MuP0NDlRPThiBJYaUKEREWsjw3hm8f+tZx5S9vXIH\nF03/4Jiyd1buCPuc97+77pjtI0oUIiJtW+/OHfj49q812/kO1Jjm/EgEVi1VohARaWHdOiZRcM94\nzhjYpcnn+vfaomO2lSjqoSk8RKStiI+N4Ylvj+XD2/6rWc8bgTwRXYlCo55EpK3pmZHMJ7d/jd6d\nk5vlfFUR6KSIqkQhItIWde2YxLyfnFX/jmFQ05OISJQyM9bcPY61d49v0nk6dUhopoi+oEQhItJK\nJMbFkhAXw79/ciYDu6U26hydUpQoRESiXt/MFN78/ul+h3GUEoWISCsUG2NsuPfrLLvrXL9DUaIQ\nEWmtzIy0pHhW/884Ts3JJP8XZ4fc/6mpYyMTh4vEoFufBM0ee21BQUG9+4uItDWbi8sp3FtOYlwM\ng7p3JCkuhpyfvwk0fSZZM1vonMurWR5V04w752YDs/Py8q71OxYRkUjok9mBPpkdjimbNLIXp+Vm\nRew9oypRiIi0R3+4dEREz68+ChERCUmJQkREQlKiEBGRkJQoREQkJCUKEREJSYlCRERCUqIQEZGQ\nlChERCSkqJrCo5qZFQF7gZproqaHUZYF7IpcdF9SW0yRPL6+/Rv7ejjXtray9n6969unIde7tnJd\n74bt09zXG1r2mjf1evd1zn15IW/nXFQ+gJmNKQPy/Y4zksfXt39jX9f1bvz+ofZpyPWu4/rqevt4\nvVv6mjf1etf1iOamp9lNKGtJTX3/hh5f3/6NfV3Xu/H7h9qnIde7tnJd74bt096vd62isumpKcws\n39Uye6JEhq53y9L1bnnRcM2juUbRWDP9DqCd0fVuWbreLa/NX3PVKEREJCTVKEREJCQlChERCUmJ\nQkREQlKiqIeZpZjZE2b2sJld4Xc80c7MBpjZo2b2kt+xtAdmNtH7237BzM71O55oZ2YnmNkMM3vJ\nzG7wO55wtctEYWaPmdlOM1teo3ycma0xs3VmdptXPAl4yTl3LXBhiwcbBRpyvZ1z651zU/2JNDo0\n8Hq/4v1tXw9c6ke8bV0Dr/cq59z1wCXAqX7E2xjtMlEAs4BxwQVmFgtMB8YDg4HLzGwwkA1s8Xar\nasEYo8kswr/e0nSzaPj1/oX3ujTcLBpwvc3sQuB14I2WDbPx2mWicM7NA3bXKB4LrPN+0R4Cngcu\nAgoJJAtop9erqRp4vaWJGnK9LeA3wJvOuUUtHWs0aOjft3PuVefceKDNNGXri+8Lvfii5gCBBNEL\n+BvwTTN7EP9vz48mtV5vM8s0sxnASDP7mT+hRaW6/r5vBs4G/tvMrvcjsChV19/3mWY2zcweog3V\nKOL8DqC1c87tB67xO472wjlXTKC9XFqAc24aMM3vONoL59xcYK7PYTSYahRf2Ar0DtrO9sokMnS9\nW5aud8uKquutRPGFBUCumfU3swRgMvCqzzFFM13vlqXr3bKi6nq3y0RhZs8B/wEGmVmhmU11zh0G\nbgLmAKuAF51zK/yMM1roercsXe+W1R6utyYFFBGRkNpljUJERMKnRCEiIiEpUYiISEhKFCIiEpIS\nhYiIhKREISIiISlRiIRgZmXef/uZ2eXNfO7ba2x/2JznF2kuShQi4ekHNChRmFl9c6kdkyicc6c0\nMCaRFqFEIRKe+4CvmtliM/uBmcWa2W/NbIGZLTWz7wJ4s4O+b2avAiu9slfMbKGZrTCz67yy+4Bk\n73zPeGXVtRfzzr3czJaZ2aVB557rrY622syeMTPz4VpIO6PZY0XCcxvwY+fcBQDeF36Jc26MmSUC\nH5jZ296+o4ChzrkN3va3nXO7zSwZWGBmLzvnbjOzm5xzI2p5r0nACGA4kOUdM897bSQwBNgGfEBg\nlbT5zf9xRb6gGoVI45wLTDGzxcDHQCaQ6732SVCSAPiemS0BPiIwo2guoZ0GPOecq3LO7QD+DYwJ\nOnehc+4IsJhAk5hIRKlGIdI4BtzsnJtzTKHZmcD+GttnAyc758rNbC6Q1IT3rQh6XoX+H5YWoBqF\nSHhKgbSg7TnADWYWD2BmA80spZbj0oE9XpI4HvhK0GuV1cfX8D5wqdcP0gU4HfikWT6FSCPo14hI\neJYCVV4T0izgzwSafRZ5HcpFwMRajnsLuN7MVgFrCDQ/VZsJLDWzRc654PWT/w6cDCwBHHCrc267\nl2hEWpymGRcRkZDU9CQiIiEpUYiISEhKFCIiEpIShYiIhKREISIiISlRiIhISEoUIiISkhKFiIiE\n9P8BOWNMSgNRV5gAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "tags": [] + } + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "VWVwnhc7b6gD", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 68 + }, + "outputId": "b9e46705-3e7e-40db-bc85-ac2087538ed1" + }, + "source": [ + "print(t3f.gather_nd(estimated, observation_idx))" + ], + "execution_count": 18, + "outputs": [ + { + "output_type": "stream", + "text": [ + "tf.Tensor(\n", + "[-0.12139133 -1.3777294 -0.5469675 ... -0.00776806 0.23622975\n", + " 0.7571926 ], shape=(10000,), dtype=float32)\n" + ], + "name": "stdout" + } + ] + }, + { + "cell_type": "code", + "metadata": { + "id": "5bvmcc-Jb6gI", + "colab_type": "code", + "colab": { + "base_uri": "https://localhost:8080/", + "height": 51 + }, + "outputId": "569577a1-3714-4ca8-ba2f-24e2ddcbd061" + }, + "source": [ + "print(observations)" + ], + "execution_count": 19, + "outputs": [ + { + "output_type": "stream", + "text": [ + "[-1.27225139e-01 -1.37794858e+00 -5.42469328e-01 ... -1.30643336e-03\n", + " 2.35629296e-01 7.53320726e-01]\n" + ], + "name": "stdout" + } + ] } - ], - "source": [ - "print(observations)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.5" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} + ] +} \ No newline at end of file diff --git a/t3f/approximate.py b/t3f/approximate.py index 1121edd1..cafbee03 100644 --- a/t3f/approximate.py +++ b/t3f/approximate.py @@ -1,6 +1,6 @@ import itertools import numpy as np -import tensorflow.compat.v1 as tf +import tensorflow as tf from t3f.tensor_train_batch import TensorTrainBatch from t3f import decompositions from t3f import batch_ops @@ -28,7 +28,7 @@ def add_n(tt_objects, max_tt_rank, name='t3f_approximate_add_n'): """ list_of_cores_lists = [tt.tt_cores for tt in tt_objects] all_cores = tuple(itertools.chain.from_iterable(list_of_cores_lists)) - with tf.name_scope(name, values=all_cores): + with tf.name_scope(name): prev_level = tt_objects while len(prev_level) > 1: next_level = [] @@ -80,7 +80,7 @@ def reduce_sum_batch(tt_batch, max_tt_rank, coef=None, all_tensors = tt_batch.tt_cores if coef is not None: all_tensors += (coef, ) - with tf.name_scope(name, values=all_tensors): + with tf.name_scope(name): is_batch_output = False if coef is not None: coef = tf.convert_to_tensor(coef, dtype=tt_batch.dtype) @@ -101,7 +101,7 @@ def reduce_sum_batch(tt_batch, max_tt_rank, coef=None, curr_core = tt_batch.tt_cores[core_idx] curr_shape = curr_core.get_shape().as_list() new_shape = np.insert(curr_shape, 1, 1) - tiling = np.ones(len(new_shape)) + tiling = np.ones(len(new_shape), dtype=int) tiling[1] = output_size curr_core = tf.tile(tf.reshape(curr_core, new_shape), tiling) if core_idx == 0: diff --git a/t3f/approximate_test.py b/t3f/approximate_test.py index 372bd88e..a6e05c3f 100644 --- a/t3f/approximate_test.py +++ b/t3f/approximate_test.py @@ -1,6 +1,6 @@ import numpy as np -import tensorflow.compat.v1 as tf -tf.disable_v2_behavior() +import tensorflow as tf +tf.compat.v1.enable_eager_execution() from t3f import ops from t3f import approximate @@ -22,21 +22,20 @@ def desired(tt_objects): res += tt return res - with self.test_session() as sess: - res_actual = ops.full(approximate.add_n([tt_a, tt_b], 6)) - res_desired = ops.full(desired([tt_a, tt_b])) - res_desired_val, res_actual_val = sess.run([res_desired, res_actual]) - self.assertAllClose(res_desired_val, res_actual_val, atol=1e-5, rtol=1e-5) + res_actual = ops.full(approximate.add_n([tt_a, tt_b], 6)) + res_desired = ops.full(desired([tt_a, tt_b])) + res_desired_val, res_actual_val = self.evaluate([res_desired, res_actual]) + self.assertAllClose(res_desired_val, res_actual_val, atol=1e-5, rtol=1e-5) - res_actual = ops.full(approximate.add_n([tt_a, tt_b, tt_a], 8)) - res_desired = ops.full(desired([tt_a, tt_b, tt_a])) - res_desired_val, res_actual_val = sess.run([res_desired, res_actual]) - self.assertAllClose(res_desired_val, res_actual_val, atol=1e-5, rtol=1e-5) + res_actual = ops.full(approximate.add_n([tt_a, tt_b, tt_a], 8)) + res_desired = ops.full(desired([tt_a, tt_b, tt_a])) + res_desired_val, res_actual_val = self.evaluate([res_desired, res_actual]) + self.assertAllClose(res_desired_val, res_actual_val, atol=1e-5, rtol=1e-5) - res_actual = ops.full(approximate.add_n([tt_a, tt_b, tt_a, tt_a, tt_a], 12)) - res_desired = ops.full(desired([tt_a, tt_b, tt_a, tt_a, tt_a])) - res_desired_val, res_actual_val = sess.run([res_desired, res_actual]) - self.assertAllClose(res_desired_val, res_actual_val, atol=1e-5, rtol=1e-5) + res_actual = ops.full(approximate.add_n([tt_a, tt_b, tt_a, tt_a, tt_a], 12)) + res_desired = ops.full(desired([tt_a, tt_b, tt_a, tt_a, tt_a])) + res_desired_val, res_actual_val = self.evaluate([res_desired, res_actual]) + self.assertAllClose(res_desired_val, res_actual_val, atol=1e-5, rtol=1e-5) def testReduceSumBatch(self): # Sum a batch of TT-tensors. @@ -47,15 +46,14 @@ def desired(tt_batch): res += tt_batch[i] return res for batch_size in [2, 3, 4, 5]: - with self.test_session() as sess: - tt_batch = initializers.random_tensor_batch((4, 3, 5), - tt_rank=2, - batch_size=batch_size, - dtype=self.dtype) - res_actual = ops.full(approximate.reduce_sum_batch(tt_batch, 10)) - res_desired = ops.full(desired(tt_batch)) - res_desired_val, res_actual_val = sess.run([res_desired, res_actual]) - self.assertAllClose(res_desired_val, res_actual_val, atol=1e-5, rtol=1e-5) + tt_batch = initializers.random_tensor_batch((4, 3, 5), + tt_rank=2, + batch_size=batch_size, + dtype=self.dtype) + res_actual = ops.full(approximate.reduce_sum_batch(tt_batch, 10)) + res_desired = ops.full(desired(tt_batch)) + res_desired_val, res_actual_val = self.evaluate([res_desired, res_actual]) + self.assertAllClose(res_desired_val, res_actual_val, atol=1e-5, rtol=1e-5) def testReduceSumBatchWeighted(self): # Weighted sum of a batch of TT-tensors. @@ -65,16 +63,15 @@ def desired(tt_batch, coef): for i in range(1, tt_batch.batch_size): res += coef[i] * tt_batch[i] return res - with self.test_session() as sess: - tt_batch = initializers.random_tensor_batch((4, 3, 5), - tt_rank=3, - batch_size=3, - dtype=self.dtype) - res_actual = ops.full(approximate.reduce_sum_batch(tt_batch, 9, - [1.2, -0.2, 1])) - res_desired = ops.full(desired(tt_batch, [1.2, -0.2, 1])) - res_desired_val, res_actual_val = sess.run([res_desired, res_actual]) - self.assertAllClose(res_desired_val, res_actual_val, atol=1e-5, rtol=1e-5) + tt_batch = initializers.random_tensor_batch((4, 3, 5), + tt_rank=3, + batch_size=3, + dtype=self.dtype) + res_actual = ops.full(approximate.reduce_sum_batch(tt_batch, 9, + [1.2, -0.2, 1])) + res_desired = ops.full(desired(tt_batch, [1.2, -0.2, 1])) + res_desired_val, res_actual_val = self.evaluate([res_desired, res_actual]) + self.assertAllClose(res_desired_val, res_actual_val, atol=1e-5, rtol=1e-5) def testReduceSumBatchMultipleWeighted(self): # Multiple weighted sums of a batch of TT-tensors. @@ -84,21 +81,20 @@ def desired(tt_batch, coef): for i in range(1, tt_batch.batch_size): res += coef[i] * tt_batch[i] return res - with self.test_session() as sess: - tt_batch = initializers.random_tensor_batch((4, 3, 5), tt_rank=2, - batch_size=3, - dtype=self.dtype) - coef = [[1., 0.1], - [0.9, -0.2], - [0.3, 0.3]] - coef = np.array(coef) - res_actual = ops.full(approximate.reduce_sum_batch(tt_batch, 6, - coef)) - res_desired_1 = ops.full(desired(tt_batch, coef[:, 0])) - res_desired_2 = ops.full(desired(tt_batch, coef[:, 1])) - res_desired = tf.stack((res_desired_1, res_desired_2)) - res_desired_val, res_actual_val = sess.run([res_desired, res_actual]) - self.assertAllClose(res_desired_val, res_actual_val, atol=1e-5, rtol=1e-5) + tt_batch = initializers.random_tensor_batch((4, 3, 5), tt_rank=2, + batch_size=3, + dtype=self.dtype) + coef = [[1., 0.1], + [0.9, -0.2], + [0.3, 0.3]] + coef = np.array(coef) + res_actual = ops.full(approximate.reduce_sum_batch(tt_batch, 6, + coef)) + res_desired_1 = ops.full(desired(tt_batch, coef[:, 0])) + res_desired_2 = ops.full(desired(tt_batch, coef[:, 1])) + res_desired = tf.stack((res_desired_1, res_desired_2)) + res_desired_val, res_actual_val = self.evaluate([res_desired, res_actual]) + self.assertAllClose(res_desired_val, res_actual_val, atol=1e-5, rtol=1e-5) class ApproximateTestFloat32(tf.test.TestCase, _ApproximateTest): diff --git a/t3f/autodiff.py b/t3f/autodiff.py index a41623de..4e2ee301 100644 --- a/t3f/autodiff.py +++ b/t3f/autodiff.py @@ -1,8 +1,21 @@ -import tensorflow.compat.v1 as tf +import tensorflow as tf from t3f import shapes from t3f import decompositions from t3f import riemannian +from t3f import utils + + +def value_and_grad(f, x): + """Gradient of the given function w.r.t. x. Works in eager and graph mode.""" + if utils.in_eager_mode(): + with tf.GradientTape() as tape: + tape.watch(x) + v = f(x) + return v, tape.gradient(v, x) + else: + v = f(x) + return v, tf.gradients(v, x) def _enforce_gauge_conditions(deltas, left): @@ -90,20 +103,23 @@ def gradients(func, x, name='t3f_gradients', runtime_check=True): See also: t3f.hessian_vector_product """ - with tf.name_scope(name, values=x.tt_cores): + with tf.name_scope(name): left = decompositions.orthogonalize_tt_cores(x) right = decompositions.orthogonalize_tt_cores(left, left_to_right=False) deltas = [right.tt_cores[0]] deltas += [tf.zeros_like(cc) for cc in right.tt_cores[1:]] - x_projection = riemannian.deltas_to_tangent_space(deltas, x, left, right) - function_value = func(x_projection) + + def augmented_func(d): + x_projection = riemannian.deltas_to_tangent_space(d, x, left, right) + return func(x_projection) + + function_value, cores_grad = value_and_grad(augmented_func, deltas) if runtime_check: assert_op = _is_invariant_to_input_transforms(function_value, func(x)) else: assert_op = tf.no_op() with tf.control_dependencies([assert_op]): - cores_grad = tf.gradients(function_value, deltas) - deltas = _enforce_gauge_conditions(cores_grad, left) + deltas = _enforce_gauge_conditions(cores_grad, left) return riemannian.deltas_to_tangent_space(deltas, x, left, right) @@ -152,23 +168,30 @@ def hessian_vector_product(func, x, vector, name='t3f_hessian_vector_product', t3f.gradients """ all_cores = list(x.tt_cores) + list(vector.tt_cores) - with tf.name_scope(name, values=all_cores): + with tf.name_scope(name): left = decompositions.orthogonalize_tt_cores(x) right = decompositions.orthogonalize_tt_cores(left, left_to_right=False) deltas = [right.tt_cores[0]] deltas += [tf.zeros_like(cc) for cc in right.tt_cores[1:]] - x_projection = riemannian.deltas_to_tangent_space(deltas, x, left, right) - function_value = func(x_projection) - if runtime_check: - assert_op = _is_invariant_to_input_transforms(function_value, func(x)) - else: - assert_op = tf.no_op() - with tf.control_dependencies([assert_op]): - vector_projected = riemannian.project(vector, x) - cores_grad = tf.gradients(function_value, deltas) - vec_deltas = riemannian.tangent_space_to_deltas(vector_projected) - products = [tf.reduce_sum(a * b) for a, b in zip(cores_grad, vec_deltas)] - grad_times_vec = tf.add_n(products) - second_cores_grad = tf.gradients(grad_times_vec, deltas) + + def augmented_outer_func(deltas_outer): + + def augmented_inner_func(deltas_inner): + x_projection = riemannian.deltas_to_tangent_space(deltas_inner, x, left, + right) + return func(x_projection) + + function_value, cores_grad = value_and_grad(augmented_inner_func, deltas_outer) + if runtime_check: + assert_op = _is_invariant_to_input_transforms(function_value, func(x)) + else: + assert_op = tf.no_op() + with tf.control_dependencies([assert_op]): + vector_projected = riemannian.project(vector, x) + vec_deltas = riemannian.tangent_space_to_deltas(vector_projected) + products = [tf.reduce_sum(a * b) for a, b in zip(cores_grad, vec_deltas)] + return tf.add_n(products) + + _, second_cores_grad = value_and_grad(augmented_outer_func, deltas) final_deltas = _enforce_gauge_conditions(second_cores_grad, left) return riemannian.deltas_to_tangent_space(final_deltas, x, left, right) \ No newline at end of file diff --git a/t3f/autodiff_test.py b/t3f/autodiff_test.py index 3821ab84..10052b64 100644 --- a/t3f/autodiff_test.py +++ b/t3f/autodiff_test.py @@ -1,5 +1,6 @@ import numpy as np -import tensorflow.compat.v1 as tf +import tensorflow as tf +tf.compat.v1.enable_eager_execution() from t3f import ops from t3f import initializers @@ -13,10 +14,9 @@ def _TestSingleGradient(self, func, x, desired): actual1 = ops.full(autodiff.gradients(func, x, runtime_check=False)) actual2 = ops.full(autodiff.gradients(func, x, runtime_check=True)) - with self.test_session() as sess: - desired_v, actual1_v, actual2_v = sess.run([desired, actual1, actual2]) - self.assertAllClose(desired_v, actual1_v, rtol=1e-4) - self.assertAllClose(desired_v, actual2_v, rtol=1e-4) + desired_v, actual1_v, actual2_v = self.evaluate([desired, actual1, actual2]) + self.assertAllClose(desired_v, actual1_v, rtol=1e-4) + self.assertAllClose(desired_v, actual2_v, rtol=1e-4) def testGradients(self): w = initializers.random_matrix(([5] * 3, None), dtype=self.dtype) @@ -25,7 +25,8 @@ def testGradients(self): def func1(x): return 0.5 * ops.flat_inner(x, w) ** 2 - desired1 = ops.full(ops.flat_inner(x, w) * riemannian.project(w, x)) + desired1 = ops.full(riemannian.project(w, x) * ops.flat_inner(x, w)) + self._TestSingleGradient(func1, x, desired1) def func2(x): @@ -38,10 +39,9 @@ def func3(x): # A function which is not invariant to different representations of the # same tensor, i.e. it does not even have a Riemannian gradient. return tf.add_n([tf.reduce_sum(c) for c in x.tt_cores]) ** 2 - actual3 = ops.full(autodiff.gradients(func3, x)) with self.assertRaises(tf.errors.InvalidArgumentError): - with self.test_session() as sess: - sess.run(actual3) + actual3 = ops.full(autodiff.gradients(func3, x)) + self.evaluate(actual3) def _TestSingleHessianByVector(self, func, x, z, desired): actual1 = ops.full(autodiff.hessian_vector_product( @@ -49,10 +49,9 @@ def _TestSingleHessianByVector(self, func, x, z, desired): actual2 = ops.full(autodiff.hessian_vector_product(func, x, z, runtime_check=True)) - with self.test_session() as sess: - desired_v, actual1_v, actual2_v = sess.run([desired, actual1, actual2]) - self.assertAllClose(desired_v, actual1_v, rtol=1e-4) - self.assertAllClose(desired_v, actual2_v, rtol=1e-4) + desired_v, actual1_v, actual2_v = self.evaluate([desired, actual1, actual2]) + self.assertAllClose(desired_v, actual1_v, rtol=1e-4) + self.assertAllClose(desired_v, actual2_v, rtol=1e-4) def testHessianVectorProduct(self): w = initializers.random_matrix(([5] * 3, None), dtype=self.dtype) @@ -66,7 +65,7 @@ def func1(x): # Grad: w # Hessian: w w.T # Hessian by vector: w - desired1 = riemannian.project(ops.flat_inner(projected_vector, w) * w, x) + desired1 = riemannian.project(w * ops.flat_inner(projected_vector, w), x) desired1 = ops.full(desired1) self._TestSingleHessianByVector(func1, x, z, desired1) @@ -82,10 +81,9 @@ def func3(x): # same tensor, i.e. it does not even have a Riemannian gradient or # hessian. return tf.add_n([tf.reduce_sum(c) for c in x.tt_cores]) ** 2 - actual3 = ops.full(autodiff.hessian_vector_product(func3, x, z)) with self.assertRaises(tf.errors.InvalidArgumentError): - with self.test_session() as sess: - sess.run(actual3) + actual3 = ops.full(autodiff.hessian_vector_product(func3, x, z)) + self.evaluate(actual3) class AutodiffTestFloat32(tf.test.TestCase, _AutodiffTest): diff --git a/t3f/batch_ops.py b/t3f/batch_ops.py index d1ba7db6..f062e0e1 100644 --- a/t3f/batch_ops.py +++ b/t3f/batch_ops.py @@ -1,5 +1,5 @@ import itertools -import tensorflow.compat.v1 as tf +import tensorflow as tf from t3f.tensor_train_base import TensorTrainBase from t3f.tensor_train_batch import TensorTrainBatch @@ -38,7 +38,7 @@ def concat_along_batch_dim(tt_list, name='t3f_concat_along_batch_dim'): list_of_cores_lists = [tt.tt_cores for tt in tt_list] all_cores = tuple(itertools.chain.from_iterable(list_of_cores_lists)) - with tf.name_scope(name, values=all_cores): + with tf.name_scope(name): res_cores = [] for core_idx in range(ndims): curr_core = tf.concat([tt.tt_cores[core_idx] for tt in tt_list], axis=0) @@ -67,7 +67,7 @@ def multiply_along_batch_dim(batch_tt, weights, Returns: TensorTrainBatch """ - with tf.name_scope(name, values=batch_tt.tt_cores+(weights,)): + with tf.name_scope(name): weights = tf.convert_to_tensor(weights, dtype=batch_tt.dtype) tt_cores = list(batch_tt.tt_cores) if batch_tt.is_tt_matrix(): @@ -161,7 +161,7 @@ def pairwise_flat_inner(tt_1, tt_2, matrix=None, all_cores = tt_1.tt_cores + tt_2.tt_cores if matrix is not None: all_cores += matrix.tt_cores - with tf.name_scope(name, values=all_cores): + with tf.name_scope(name): ndims = tt_1.ndims() if matrix is None: curr_core_1 = tt_1.tt_cores[0] diff --git a/t3f/batch_ops_no_eager_test.py b/t3f/batch_ops_no_eager_test.py new file mode 100644 index 00000000..b3b453a7 --- /dev/null +++ b/t3f/batch_ops_no_eager_test.py @@ -0,0 +1,47 @@ +# Graph mode tests. +import numpy as np +import tensorflow.compat.v1 as tf +tf.disable_eager_execution() + +from t3f import ops +from t3f import batch_ops +from t3f import initializers + + +class _BatchOpsTest(): + + def testConcatTensorPlaceholders(self): + # Test concating TTTensors of unknown batch sizes along batch dimension. + number_of_objects = tf.placeholder(tf.int32) + all = initializers.random_tensor_batch((2, 3), batch_size=5, + dtype=self.dtype) + actual = batch_ops.concat_along_batch_dim((all[:number_of_objects], + all[number_of_objects:])) + with tf.Session() as sess: + desired_val, actual_val = sess.run((ops.full(all), ops.full(actual)), + feed_dict={number_of_objects: 2}) + self.assertAllClose(desired_val, actual_val) + + def testConcatMatrixPlaceholders(self): + # Test concating TTMatrices of unknown batch sizes along batch dimension. + number_of_objects = tf.placeholder(tf.int32) + all = initializers.random_matrix_batch(((2, 3), (2, 3)), batch_size=5, + dtype=self.dtype) + actual = batch_ops.concat_along_batch_dim((all[:number_of_objects], + all[number_of_objects:])) + with tf.Session() as sess: + desired_val, actual_val = sess.run((ops.full(all), ops.full(actual)), + feed_dict={number_of_objects: 2}) + self.assertAllClose(desired_val, actual_val) + + +class BatchOpsTestFloat32(tf.test.TestCase, _BatchOpsTest): + dtype = tf.float32 + + +class BatchOpsTestFloat64(tf.test.TestCase, _BatchOpsTest): + dtype = tf.float64 + + +if __name__ == "__main__": + tf.test.main() diff --git a/t3f/batch_ops_test.py b/t3f/batch_ops_test.py index f0719627..c04b72a0 100644 --- a/t3f/batch_ops_test.py +++ b/t3f/batch_ops_test.py @@ -1,5 +1,6 @@ import numpy as np -import tensorflow.compat.v1 as tf +import tensorflow as tf +tf.compat.v1.enable_eager_execution() from t3f import ops from t3f import batch_ops @@ -31,43 +32,18 @@ def testConcatMatrix(self): first_second_desired = tf.concat((first_full, second_full), axis=0) first_second_third_desired = tf.concat((first_full, second_full, third_full), axis=0) - with self.test_session() as sess: - res = sess.run((first_res, first_second_res, first_second_third_res, - first_desired, first_second_desired, - first_second_third_desired)) - first_res_val = res[0] - first_second_res_val = res[1] - first_second_third_res_val = res[2] - first_desired_val = res[3] - first_second_desired_val = res[4] - first_second_third_desired_val = res[5] - self.assertAllClose(first_res_val, first_desired_val) - self.assertAllClose(first_second_res_val, first_second_desired_val) - self.assertAllClose(first_second_third_res_val, first_second_third_desired_val) - - def testConcatTensorPlaceholders(self): - # Test concating TTTensors of unknown batch sizes along batch dimension. - number_of_objects = tf.placeholder(tf.int32) - all = initializers.random_tensor_batch((2, 3), batch_size=5, - dtype=self.dtype) - actual = batch_ops.concat_along_batch_dim((all[:number_of_objects], - all[number_of_objects:])) - with self.test_session() as sess: - desired_val, actual_val = sess.run((ops.full(all), ops.full(actual)), - feed_dict={number_of_objects: 2}) - self.assertAllClose(desired_val, actual_val) - - def testConcatMatrixPlaceholders(self): - # Test concating TTMatrices of unknown batch sizes along batch dimension. - number_of_objects = tf.placeholder(tf.int32) - all = initializers.random_matrix_batch(((2, 3), (2, 3)), batch_size=5, - dtype=self.dtype) - actual = batch_ops.concat_along_batch_dim((all[:number_of_objects], - all[number_of_objects:])) - with self.test_session() as sess: - desired_val, actual_val = sess.run((ops.full(all), ops.full(actual)), - feed_dict={number_of_objects: 2}) - self.assertAllClose(desired_val, actual_val) + res = self.evaluate((first_res, first_second_res, first_second_third_res, + first_desired, first_second_desired, + first_second_third_desired)) + first_res_val = res[0] + first_second_res_val = res[1] + first_second_third_res_val = res[2] + first_desired_val = res[3] + first_second_desired_val = res[4] + first_second_third_desired_val = res[5] + self.assertAllClose(first_res_val, first_desired_val) + self.assertAllClose(first_second_res_val, first_second_desired_val) + self.assertAllClose(first_second_third_res_val, first_second_third_desired_val) def testBatchMultiply(self): # Test multiplying batch of TTMatrices by individual numbers. @@ -77,9 +53,8 @@ def testBatchMultiply(self): actual = batch_ops.multiply_along_batch_dim(tt, weights) individual_desired = [weights[i] * tt[i:i+1] for i in range(3)] desired = batch_ops.concat_along_batch_dim(individual_desired) - with self.test_session() as sess: - desired_val, acutual_val = sess.run((ops.full(desired), ops.full(actual))) - self.assertAllClose(desired_val, acutual_val) + desired_val, acutual_val = self.evaluate((ops.full(desired), ops.full(actual))) + self.assertAllClose(desired_val, acutual_val) def testGramMatrix(self): # Test Gram Matrix of a batch of TT vectors. @@ -89,9 +64,8 @@ def testGramMatrix(self): full_vectors = tf.reshape(ops.full(tt_vectors), (5, 6)) res_desired = tf.matmul(full_vectors, tf.transpose(full_vectors)) res_desired = tf.squeeze(res_desired) - with self.test_session() as sess: - res_actual_val, res_desired_val = sess.run((res_actual, res_desired)) - self.assertAllClose(res_desired_val, res_actual_val) + res_actual_val, res_desired_val = self.evaluate((res_actual, res_desired)) + self.assertAllClose(res_desired_val, res_actual_val) def testGramMatrixWithMatrix(self): # Test Gram Matrix of a batch of TT vectors with providing a matrix, so we @@ -102,16 +76,15 @@ def testGramMatrixWithMatrix(self): matrix = initializers.random_matrix(((2, 3), (2, 3)), dtype=self.dtype) res_actual = batch_ops.gram_matrix(tt_vectors, matrix) full_vectors = tf.reshape(ops.full(tt_vectors), (4, 6)) - with self.test_session() as sess: - res = sess.run((res_actual, full_vectors, ops.full(matrix))) - res_actual_val, vectors_val, matrix_val = res - res_desired_val = np.zeros((4, 4)) - for i in range(4): - for j in range(4): - curr_val = np.dot(vectors_val[i], matrix_val) - curr_val = np.dot(curr_val, vectors_val[j]) - res_desired_val[i, j] = curr_val - self.assertAllClose(res_desired_val, res_actual_val, atol=1e-5, rtol=1e-5) + res = self.evaluate((res_actual, full_vectors, ops.full(matrix))) + res_actual_val, vectors_val, matrix_val = res + res_desired_val = np.zeros((4, 4)) + for i in range(4): + for j in range(4): + curr_val = np.dot(vectors_val[i], matrix_val) + curr_val = np.dot(curr_val, vectors_val[j]) + res_desired_val[i, j] = curr_val + self.assertAllClose(res_desired_val, res_actual_val, atol=1e-5, rtol=1e-5) def testPairwiseFlatInnerTensor(self): # Test pairwise_flat_inner of a batch of TT tensors. @@ -124,9 +97,8 @@ def testPairwiseFlatInnerTensor(self): full_tensors_2 = tf.reshape(ops.full(tt_tensors_2), (5, 12)) res_desired = tf.matmul(full_tensors_1, tf.transpose(full_tensors_2)) res_desired = tf.squeeze(res_desired) - with self.test_session() as sess: - res_actual_val, res_desired_val = sess.run((res_actual, res_desired)) - self.assertAllClose(res_desired_val, res_actual_val) + res_actual_val, res_desired_val = self.evaluate((res_actual, res_desired)) + self.assertAllClose(res_desired_val, res_actual_val) def testPairwiseFlatInnerMatrix(self): # Test pairwise_flat_inner of a batch of TT matrices. @@ -141,9 +113,8 @@ def testPairwiseFlatInnerMatrix(self): full_vectors_2 = tf.reshape(ops.full(tt_vectors_2), (5, 36)) res_desired = tf.matmul(full_vectors_1, tf.transpose(full_vectors_2)) res_desired = tf.squeeze(res_desired) - with self.test_session() as sess: - res_actual_val, res_desired_val = sess.run((res_actual, res_desired)) - self.assertAllClose(res_desired_val, res_actual_val, atol=1e-5, rtol=1e-5) + res_actual_val, res_desired_val = self.evaluate((res_actual, res_desired)) + self.assertAllClose(res_desired_val, res_actual_val, atol=1e-5, rtol=1e-5) def testPairwiseFlatInnerVectorsWithMatrix(self): # Test pairwise_flat_inner of a batch of TT vectors with providing a matrix, @@ -160,17 +131,16 @@ def testPairwiseFlatInnerVectorsWithMatrix(self): matrix) full_vectors_1 = tf.reshape(ops.full(tt_vectors_1), (2, 6)) full_vectors_2 = tf.reshape(ops.full(tt_vectors_2), (3, 6)) - with self.test_session() as sess: - res = sess.run((res_actual, full_vectors_1, full_vectors_2, - ops.full(matrix))) - res_actual_val, vectors_1_val, vectors_2_val, matrix_val = res - res_desired_val = np.zeros((2, 3)) - for i in range(2): - for j in range(3): - curr_val = np.dot(vectors_1_val[i], matrix_val) - curr_val = np.dot(curr_val, vectors_2_val[j]) - res_desired_val[i, j] = curr_val - self.assertAllClose(res_desired_val, res_actual_val) + res = self.evaluate((res_actual, full_vectors_1, full_vectors_2, + ops.full(matrix))) + res_actual_val, vectors_1_val, vectors_2_val, matrix_val = res + res_desired_val = np.zeros((2, 3)) + for i in range(2): + for j in range(3): + curr_val = np.dot(vectors_1_val[i], matrix_val) + curr_val = np.dot(curr_val, vectors_2_val[j]) + res_desired_val[i, j] = curr_val + self.assertAllClose(res_desired_val, res_actual_val) class BatchOpsTestFloat32(tf.test.TestCase, _BatchOpsTest): diff --git a/t3f/decompositions.py b/t3f/decompositions.py index 9a2658fe..4d6b0267 100644 --- a/t3f/decompositions.py +++ b/t3f/decompositions.py @@ -1,5 +1,5 @@ import numpy as np -import tensorflow.compat.v1 as tf +import tensorflow as tf from t3f.tensor_train import TensorTrain from t3f.tensor_train_batch import TensorTrainBatch @@ -57,7 +57,7 @@ def to_tt_matrix(mat, shape, max_tt_rank=10, epsilon=None, not a vector of length d + 1 where d is the number of dimensions (rank) of the input tensor, if epsilon is less than 0. """ - with tf.name_scope(name, values=(mat,)): + with tf.name_scope(name): mat = tf.convert_to_tensor(mat) # In case the shape is immutable. shape = list(shape) @@ -79,14 +79,14 @@ def to_tt_matrix(mat, shape, max_tt_rank=10, epsilon=None, tens = tf.reshape(tens, new_shape) tt_tens = to_tt_tensor(tens, max_tt_rank, epsilon) tt_cores = [] - static_tt_ranks = tt_tens.get_tt_ranks() + static_tt_ranks = tt_tens.get_tt_ranks().as_list() dynamic_tt_ranks = shapes.tt_ranks(tt_tens) for core_idx in range(d): curr_core = tt_tens.tt_cores[core_idx] - curr_rank = static_tt_ranks[core_idx].value + curr_rank = static_tt_ranks[core_idx] if curr_rank is None: curr_rank = dynamic_tt_ranks[core_idx] - next_rank = static_tt_ranks[core_idx + 1].value + next_rank = static_tt_ranks[core_idx + 1] if next_rank is None: next_rank = dynamic_tt_ranks[core_idx + 1] curr_core_new_shape = (curr_rank, shape[0, core_idx], @@ -137,9 +137,9 @@ def to_tt_tensor(tens, max_tt_rank=10, epsilon=None, and not a vector of length d + 1 where d is the number of dimensions (rank) of the input tensor, if epsilon is less than 0. """ - with tf.name_scope(name, values=(tens,)): + with tf.name_scope(name): tens = tf.convert_to_tensor(tens) - static_shape = tens.get_shape() + static_shape = tens.shape.as_list() dynamic_shape = tf.shape(tens) # Raises ValueError if ndims is not defined. d = static_shape.__len__() @@ -158,15 +158,15 @@ def to_tt_tensor(tens, max_tt_rank=10, epsilon=None, tt_cores = [] are_tt_ranks_defined = True for core_idx in range(d - 1): - curr_mode = static_shape[core_idx].value + curr_mode = static_shape[core_idx] if curr_mode is None: curr_mode = dynamic_shape[core_idx] rows = ranks[core_idx] * curr_mode tens = tf.reshape(tens, [rows, -1]) - columns = tens.get_shape()[1].value + columns = tens.get_shape()[1] if columns is None: columns = tf.shape(tens)[1] - s, u, v = tf.svd(tens, full_matrices=False) + s, u, v = tf.linalg.svd(tens, full_matrices=False) if max_tt_rank[core_idx + 1] == 1: ranks[core_idx + 1] = 1 else: @@ -183,8 +183,8 @@ def to_tt_tensor(tens, max_tt_rank=10, epsilon=None, v = v[:, 0:ranks[core_idx + 1]] core_shape = (ranks[core_idx], curr_mode, ranks[core_idx + 1]) tt_cores.append(tf.reshape(u, core_shape)) - tens = tf.matmul(tf.diag(s), tf.transpose(v)) - last_mode = static_shape[-1].value + tens = tf.matmul(tf.linalg.diag(s), tf.transpose(v)) + last_mode = static_shape[-1] if last_mode is None: last_mode = dynamic_shape[-1] core_shape = (ranks[d - 1], last_mode, ranks[d]) @@ -234,7 +234,7 @@ def round(tt, max_tt_rank=None, epsilon=None, name='t3f_round'): the input tensor, if epsilon is less than 0. """ # TODO: add epsilon to the name_scope dependencies. - with tf.name_scope(name, values=tt.tt_cores): + with tf.name_scope(name): if isinstance(tt, TensorTrainBatch): return _round_batch_tt(tt, max_tt_rank, epsilon) else: @@ -277,7 +277,7 @@ def _round_tt(tt, max_tt_rank, epsilon): columns = curr_mode * ranks[core_idx + 1] curr_core = tf.reshape(curr_core, [-1, columns]) - rows = curr_core.get_shape()[0].value + rows = curr_core.shape.as_list()[0] if rows is None: rows = tf.shape(curr_core)[0] if max_tt_rank[core_idx] == 1: @@ -291,7 +291,7 @@ def _round_tt(tt, max_tt_rank, epsilon): min_dim = tf.minimum(rows, columns) ranks[core_idx] = tf.minimum(max_tt_rank[core_idx], min_dim) are_tt_ranks_defined = False - s, u, v = tf.svd(curr_core, full_matrices=False) + s, u, v = tf.linalg.svd(curr_core, full_matrices=False) u = u[:, 0:ranks[core_idx]] s = s[0:ranks[core_idx]] v = v[:, 0:ranks[core_idx]] @@ -304,7 +304,7 @@ def _round_tt(tt, max_tt_rank, epsilon): prev_core_shape = (-1, rows) tt_cores[core_idx - 1] = tf.reshape(tt_cores[core_idx - 1], prev_core_shape) tt_cores[core_idx - 1] = tf.matmul(tt_cores[core_idx - 1], u) - tt_cores[core_idx - 1] = tf.matmul(tt_cores[core_idx - 1], tf.diag(s)) + tt_cores[core_idx - 1] = tf.matmul(tt_cores[core_idx - 1], tf.linalg.diag(s)) if tt.is_tt_matrix(): core_shape = (ranks[0], raw_shape[0][0], raw_shape[1][0], ranks[1]) @@ -353,7 +353,7 @@ def _round_batch_tt(tt, max_tt_rank, epsilon): columns = curr_mode * ranks[core_idx + 1] curr_core = tf.reshape(curr_core, (batch_size, -1, columns)) - rows = curr_core.get_shape()[1].value + rows = curr_core.shape.as_list()[1] if rows is None: rows = tf.shape(curr_core)[1] if max_tt_rank[core_idx] == 1: @@ -367,7 +367,7 @@ def _round_batch_tt(tt, max_tt_rank, epsilon): min_dim = tf.minimum(rows, columns) ranks[core_idx] = tf.minimum(max_tt_rank[core_idx], min_dim) are_tt_ranks_defined = False - s, u, v = tf.svd(curr_core, full_matrices=False) + s, u, v = tf.linalg.svd(curr_core, full_matrices=False) u = u[:, :, 0:ranks[core_idx]] s = s[:, 0:ranks[core_idx]] v = v[:, :, 0:ranks[core_idx]] @@ -380,7 +380,7 @@ def _round_batch_tt(tt, max_tt_rank, epsilon): prev_core_shape = (batch_size, -1, rows) tt_cores[core_idx - 1] = tf.reshape(tt_cores[core_idx - 1], prev_core_shape) tt_cores[core_idx - 1] = tf.matmul(tt_cores[core_idx - 1], u) - tt_cores[core_idx - 1] = tf.matmul(tt_cores[core_idx - 1], tf.matrix_diag(s)) + tt_cores[core_idx - 1] = tf.matmul(tt_cores[core_idx - 1], tf.linalg.diag(s)) if tt.is_tt_matrix(): core_shape = (batch_size, ranks[0], raw_shape[0][0], raw_shape[1][0], ranks[1]) @@ -404,7 +404,7 @@ def orthogonalize_tt_cores(tt, left_to_right=True, Returns: The same type as the input `tt` (TenosorTrain or a TensorTrainBatch). """ - with tf.name_scope(name, values=tt.tt_cores): + with tf.name_scope(name): if isinstance(tt, TensorTrainBatch): if left_to_right: return _orthogonalize_batch_tt_cores_left_to_right(tt) @@ -460,7 +460,7 @@ def _orthogonalize_tt_cores_left_to_right(tt): qr_shape = (curr_rank * curr_mode, next_rank) curr_core = tf.reshape(curr_core, qr_shape) - curr_core, triang = tf.qr(curr_core) + curr_core, triang = tf.linalg.qr(curr_core) if triang.get_shape().is_fully_defined(): triang_shape = triang.get_shape().as_list() else: @@ -521,7 +521,7 @@ def _orthogonalize_batch_tt_cores_left_to_right(tt): qr_shape = (batch_size, curr_rank * curr_mode, next_rank) curr_core = tf.reshape(curr_core, qr_shape) - curr_core, triang = tf.qr(curr_core) + curr_core, triang = tf.linalg.qr(curr_core) if triang.get_shape().is_fully_defined(): triang_shape = triang.get_shape().as_list() else: @@ -583,7 +583,7 @@ def _orthogonalize_tt_cores_right_to_left(tt): qr_shape = (prev_rank, curr_mode * curr_rank) curr_core = tf.reshape(curr_core, qr_shape) - curr_core, triang = tf.qr(tf.transpose(curr_core)) + curr_core, triang = tf.linalg.qr(tf.transpose(curr_core)) curr_core = tf.transpose(curr_core) triang = tf.transpose(triang) if triang.get_shape().is_fully_defined(): diff --git a/t3f/decompositions_no_eager_test.py b/t3f/decompositions_no_eager_test.py new file mode 100644 index 00000000..4374c3e3 --- /dev/null +++ b/t3f/decompositions_no_eager_test.py @@ -0,0 +1,43 @@ +# Graph mode tests. +import numpy as np +import tensorflow.compat.v1 as tf +tf.disable_eager_execution() + +from t3f import ops +from t3f import shapes +from t3f import decompositions + + +class _DecompositionsTest(): + + def testTTTensor(self): + # Test that a tensor of ones and of zeros can be converted into TT with + # TT-rank 1. + shape = (2, 1, 4, 3) + tens_arr = (np.zeros(shape).astype(self.dtype.as_numpy_dtype), + np.ones(shape).astype(self.dtype.as_numpy_dtype)) + for tens in tens_arr: + with tf.Session() as sess: + tf_tens = tf.constant(tens) + tt_tens = decompositions.to_tt_tensor(tf_tens, max_tt_rank=1) + static_tt_ranks = tt_tens.get_tt_ranks().as_list() + + # Try to decompose the same tensor with unknown shape. + tf_tens_pl = tf.placeholder(self.dtype, (None, None, None, None)) + tt_tens = decompositions.to_tt_tensor(tf_tens_pl, max_tt_rank=1) + tt_val = ops.full(tt_tens).eval({tf_tens_pl: tens}) + self.assertAllClose(tens, tt_val) + dynamic_tt_ranks = shapes.tt_ranks(tt_tens).eval({tf_tens_pl: tens}) + self.assertAllEqual(dynamic_tt_ranks, static_tt_ranks) + + +class DecompositionsTestFloat32(tf.test.TestCase, _DecompositionsTest): + dtype = tf.float32 + + +class DecompositionsTestFloat64(tf.test.TestCase, _DecompositionsTest): + dtype = tf.float64 + + +if __name__ == "__main__": + tf.test.main() diff --git a/t3f/decompositions_test.py b/t3f/decompositions_test.py index bc3bb510..4f49d20b 100644 --- a/t3f/decompositions_test.py +++ b/t3f/decompositions_test.py @@ -1,5 +1,6 @@ import numpy as np -import tensorflow.compat.v1 as tf +import tensorflow as tf +tf.compat.v1.enable_eager_execution() from t3f import ops from t3f import shapes @@ -15,19 +16,10 @@ def testTTTensor(self): tens = np.random.rand(*shape).astype(self.dtype.as_numpy_dtype) tf_tens = tf.constant(tens) tt_tens = decompositions.to_tt_tensor(tf_tens, max_tt_rank=3) - with self.test_session(): - self.assertAllClose(tens, ops.full(tt_tens).eval()) - dynamic_tt_ranks = shapes.tt_ranks(tt_tens).eval() - static_tt_ranks = tt_tens.get_tt_ranks().as_list() - self.assertAllEqual(dynamic_tt_ranks, static_tt_ranks) - - # Try to decompose the same tensor with unknown shape. - tf_tens_pl = tf.placeholder(self.dtype, (None, None, 4, None)) - tt_tens = decompositions.to_tt_tensor(tf_tens_pl, max_tt_rank=3) - tt_val = ops.full(tt_tens).eval({tf_tens_pl: tens}) - self.assertAllClose(tens, tt_val) - dynamic_tt_ranks = shapes.tt_ranks(tt_tens).eval({tf_tens_pl: tens}) - self.assertAllEqual(dynamic_tt_ranks, static_tt_ranks) + self.assertAllClose(tens, self.evaluate(ops.full(tt_tens))) + dynamic_tt_ranks = self.evaluate(shapes.tt_ranks(tt_tens)) + static_tt_ranks = tt_tens.get_tt_ranks().as_list() + self.assertAllEqual(dynamic_tt_ranks, static_tt_ranks) def testTTTensorSimple(self): # Test that a tensor of ones and of zeros can be converted into TT with @@ -38,19 +30,10 @@ def testTTTensorSimple(self): for tens in tens_arr: tf_tens = tf.constant(tens) tt_tens = decompositions.to_tt_tensor(tf_tens, max_tt_rank=1) - with self.test_session(): - self.assertAllClose(tens, ops.full(tt_tens).eval()) - dynamic_tt_ranks = shapes.tt_ranks(tt_tens).eval() - static_tt_ranks = tt_tens.get_tt_ranks().as_list() - self.assertAllEqual(dynamic_tt_ranks, static_tt_ranks) - - # Try to decompose the same tensor with unknown shape. - tf_tens_pl = tf.placeholder(self.dtype, (None, None, None, None)) - tt_tens = decompositions.to_tt_tensor(tf_tens_pl, max_tt_rank=1) - tt_val = ops.full(tt_tens).eval({tf_tens_pl: tens}) - self.assertAllClose(tens, tt_val) - dynamic_tt_ranks = shapes.tt_ranks(tt_tens).eval({tf_tens_pl: tens}) - self.assertAllEqual(dynamic_tt_ranks, static_tt_ranks) + self.assertAllClose(tens, self.evaluate(ops.full(tt_tens))) + dynamic_tt_ranks = self.evaluate(shapes.tt_ranks(tt_tens)) + static_tt_ranks = tt_tens.get_tt_ranks().as_list() + self.assertAllEqual(dynamic_tt_ranks, static_tt_ranks) def testTTVector(self): vec_shape = (2, 1, 4, 3) @@ -59,8 +42,7 @@ def testTTVector(self): vec = np.random.rand(rows, 1).astype(self.dtype.as_numpy_dtype) tf_vec = tf.constant(vec) tt_vec = decompositions.to_tt_matrix(tf_vec, (vec_shape, None)) - with self.test_session(): - self.assertAllClose(vec, ops.full(tt_vec).eval()) + self.assertAllClose(vec, self.evaluate(ops.full(tt_vec))) def testTTCompositeRankTensor(self): # Test if a composite rank (list of ranks) can be used for decomposition @@ -71,8 +53,7 @@ def testTTCompositeRankTensor(self): tt_ranks = [1, 2, 3, 3, 1] tt_tensor = decompositions.to_tt_tensor(tf_tensor, max_tt_rank=tt_ranks) - with self.test_session(): - self.assertAllClose(np_tensor, ops.full(tt_tensor).eval()) + self.assertAllClose(np_tensor, self.evaluate(ops.full(tt_tensor))) def testTTCompositeRankMatrix(self): # Test if a composite rank (list of ranks) can be used for decomposition @@ -86,9 +67,8 @@ def testTTCompositeRankMatrix(self): tt_ranks = [10, 20, 30, 40, 30] tt_mat = decompositions.to_tt_matrix(tf_mat, (out_shape, inp_shape), max_tt_rank=tt_ranks) - with self.test_session(): - self.assertAllClose(mat, ops.full(tt_mat).eval(), atol=1e-5, rtol=1e-5) - + self.assertAllClose(mat, self.evaluate(ops.full(tt_mat)), atol=1e-5, rtol=1e-5) + def testTTMatrix(self): # Convert a np.prod(out_shape) x np.prod(in_shape) matrix into TT-matrix # and back. @@ -100,9 +80,8 @@ def testTTMatrix(self): tf_mat = tf.constant(mat) tt_mat = decompositions.to_tt_matrix(tf_mat, (out_shape, inp_shape), max_tt_rank=90) - with self.test_session(): - # TODO: why so bad accuracy? - self.assertAllClose(mat, ops.full(tt_mat).eval(), atol=1e-5, rtol=1e-5) + # TODO: why so bad accuracy? + self.assertAllClose(mat, self.evaluate(ops.full(tt_mat)), atol=1e-5, rtol=1e-5) def testRoundTensor(self): shape = (2, 1, 4, 3, 3) @@ -110,13 +89,12 @@ def testRoundTensor(self): tens = initializers.random_tensor(shape, tt_rank=15, dtype=self.dtype) rounded_tens = decompositions.round(tens, max_tt_rank=9) - with self.test_session() as sess: - vars = [ops.full(tens), ops.full(rounded_tens)] - tens_value, rounded_tens_value = sess.run(vars) - # TODO: why so bad accuracy? - self.assertAllClose(tens_value, rounded_tens_value, atol=1e-4, rtol=1e-4) - dynamic_tt_ranks = shapes.tt_ranks(rounded_tens).eval() - self.assertAllEqual([1, 2, 2, 8, 3, 1], dynamic_tt_ranks) + vars = [ops.full(tens), ops.full(rounded_tens)] + tens_value, rounded_tens_value = self.evaluate(vars) + # TODO: why so bad accuracy? + self.assertAllClose(tens_value, rounded_tens_value, atol=1e-4, rtol=1e-4) + dynamic_tt_ranks = self.evaluate(shapes.tt_ranks(rounded_tens)) + self.assertAllEqual([1, 2, 2, 8, 3, 1], dynamic_tt_ranks) def testOrthogonalizeLeftToRight(self): shape = (2, 4, 3, 3) @@ -125,20 +103,19 @@ def testOrthogonalizeLeftToRight(self): tens = initializers.random_tensor(shape, tt_rank=tt_ranks, dtype=self.dtype) orthogonal = decompositions.orthogonalize_tt_cores(tens) - with self.test_session() as sess: - tens_val, orthogonal_val = sess.run([ops.full(tens), ops.full(orthogonal)]) - self.assertAllClose(tens_val, orthogonal_val, atol=1e-5, rtol=1e-5) - dynamic_tt_ranks = shapes.tt_ranks(orthogonal).eval() - self.assertAllEqual(updated_tt_ranks, dynamic_tt_ranks) - # Check that the TT-cores are orthogonal. - for core_idx in range(4 - 1): - core = orthogonal.tt_cores[core_idx] - core = tf.reshape(core, (updated_tt_ranks[core_idx] * shape[core_idx], - updated_tt_ranks[core_idx + 1])) - should_be_eye = tf.matmul(tf.transpose(core), core) - should_be_eye_val = sess.run(should_be_eye) - self.assertAllClose(np.eye(updated_tt_ranks[core_idx + 1]), - should_be_eye_val) + tens_val, orthogonal_val = self.evaluate([ops.full(tens), ops.full(orthogonal)]) + self.assertAllClose(tens_val, orthogonal_val, atol=1e-5, rtol=1e-5) + dynamic_tt_ranks = self.evaluate(shapes.tt_ranks(orthogonal)) + self.assertAllEqual(updated_tt_ranks, dynamic_tt_ranks) + # Check that the TT-cores are orthogonal. + for core_idx in range(4 - 1): + core = orthogonal.tt_cores[core_idx] + core = tf.reshape(core, (updated_tt_ranks[core_idx] * shape[core_idx], + updated_tt_ranks[core_idx + 1])) + should_be_eye = tf.matmul(tf.transpose(core), core) + should_be_eye_val = self.evaluate(should_be_eye) + self.assertAllClose(np.eye(updated_tt_ranks[core_idx + 1]), + should_be_eye_val) def testOrthogonalizeRightToLeft(self): shape = (2, 4, 3, 3) @@ -147,20 +124,19 @@ def testOrthogonalizeRightToLeft(self): tens = initializers.random_tensor(shape, tt_rank=tt_ranks, dtype=self.dtype) orthogonal = decompositions.orthogonalize_tt_cores(tens, left_to_right=False) - with self.test_session() as sess: - tens_val, orthogonal_val = sess.run([ops.full(tens), ops.full(orthogonal)]) - self.assertAllClose(tens_val, orthogonal_val, atol=1e-5, rtol=1e-5) - dynamic_tt_ranks = shapes.tt_ranks(orthogonal).eval() - self.assertAllEqual(updated_tt_ranks, dynamic_tt_ranks) - # Check that the TT-cores are orthogonal. - for core_idx in range(1, 4): - core = orthogonal.tt_cores[core_idx] - core = tf.reshape(core, (updated_tt_ranks[core_idx], shape[core_idx] * - updated_tt_ranks[core_idx + 1])) - should_be_eye = tf.matmul(core, tf.transpose(core)) - should_be_eye_val = sess.run(should_be_eye) - self.assertAllClose(np.eye(updated_tt_ranks[core_idx]), - should_be_eye_val) + tens_val, orthogonal_val = self.evaluate([ops.full(tens), ops.full(orthogonal)]) + self.assertAllClose(tens_val, orthogonal_val, atol=1e-5, rtol=1e-5) + dynamic_tt_ranks = self.evaluate(shapes.tt_ranks(orthogonal)) + self.assertAllEqual(updated_tt_ranks, dynamic_tt_ranks) + # Check that the TT-cores are orthogonal. + for core_idx in range(1, 4): + core = orthogonal.tt_cores[core_idx] + core = tf.reshape(core, (updated_tt_ranks[core_idx], shape[core_idx] * + updated_tt_ranks[core_idx + 1])) + should_be_eye = tf.matmul(core, tf.transpose(core)) + should_be_eye_val = self.evaluate(should_be_eye) + self.assertAllClose(np.eye(updated_tt_ranks[core_idx]), + should_be_eye_val) class _DecompositionsBatchTest(): @@ -172,35 +148,33 @@ def testOrthogonalizeLeftToRight(self): tens = initializers.random_tensor_batch(shape, tt_rank=tt_ranks, batch_size=2, dtype=self.dtype) orthogonal = decompositions.orthogonalize_tt_cores(tens) - with self.test_session() as sess: - tens_val, orthogonal_val = sess.run([ops.full(tens), ops.full(orthogonal)]) - self.assertAllClose(tens_val, orthogonal_val, atol=1e-5, rtol=1e-5) - dynamic_tt_ranks = shapes.tt_ranks(orthogonal).eval() - self.assertAllEqual(updated_tt_ranks, dynamic_tt_ranks) - # Check that the TT-cores are orthogonal. - for core_idx in range(4 - 1): - core_shape = (updated_tt_ranks[core_idx] * shape[core_idx], - updated_tt_ranks[core_idx + 1]) - for i in range(2): - core = tf.reshape(orthogonal.tt_cores[core_idx][i], core_shape) - should_be_eye = tf.matmul(tf.transpose(core), core) - should_be_eye_val = sess.run(should_be_eye) - self.assertAllClose(np.eye(updated_tt_ranks[core_idx + 1]), - should_be_eye_val) + tens_val, orthogonal_val = self.evaluate([ops.full(tens), ops.full(orthogonal)]) + self.assertAllClose(tens_val, orthogonal_val, atol=1e-5, rtol=1e-5) + dynamic_tt_ranks = self.evaluate(shapes.tt_ranks(orthogonal)) + self.assertAllEqual(updated_tt_ranks, dynamic_tt_ranks) + # Check that the TT-cores are orthogonal. + for core_idx in range(4 - 1): + core_shape = (updated_tt_ranks[core_idx] * shape[core_idx], + updated_tt_ranks[core_idx + 1]) + for i in range(2): + core = tf.reshape(orthogonal.tt_cores[core_idx][i], core_shape) + should_be_eye = tf.matmul(tf.transpose(core), core) + should_be_eye_val = self.evaluate(should_be_eye) + self.assertAllClose(np.eye(updated_tt_ranks[core_idx + 1]), + should_be_eye_val) def testRoundTensor(self): shape = (2, 1, 4, 3, 3) tens = initializers.random_tensor_batch(shape, tt_rank=15, batch_size=3, dtype=self.dtype) rounded_tens = decompositions.round(tens, max_tt_rank=9) - with self.test_session() as sess: - vars = [ops.full(tens), ops.full(rounded_tens)] - tens_value, rounded_tens_value = sess.run(vars) - # TODO: why so bad accuracy? - self.assertAllClose(tens_value, rounded_tens_value, atol=1e-4, - rtol=1e-4) - dynamic_tt_ranks = shapes.tt_ranks(rounded_tens).eval() - self.assertAllEqual([1, 2, 2, 8, 3, 1], dynamic_tt_ranks) + vars = [ops.full(tens), ops.full(rounded_tens)] + tens_value, rounded_tens_value = self.evaluate(vars) + # TODO: why so bad accuracy? + self.assertAllClose(tens_value, rounded_tens_value, atol=1e-4, + rtol=1e-4) + dynamic_tt_ranks = self.evaluate(shapes.tt_ranks(rounded_tens)) + self.assertAllEqual([1, 2, 2, 8, 3, 1], dynamic_tt_ranks) class DecompositionsTestFloat32(tf.test.TestCase, _DecompositionsTest): diff --git a/t3f/examples_tests.py b/t3f/examples_tests.py deleted file mode 100644 index b5c0b1de..00000000 --- a/t3f/examples_tests.py +++ /dev/null @@ -1,43 +0,0 @@ -"""Tests from the README examples and the paper.""" - -import tensorflow.compat.v1 as tf -import t3f - -class ExamplesTest(tf.test.TestCase): - - def testMainReadme(self): - # Just check that the readme examples do not raise exceptions. - # Create a random tensor of shape (3, 2, 2). - a = t3f.random_tensor((3, 2, 2), tt_rank=3) - norm = t3f.frobenius_norm(a) - # Convert TT-tensor into a dense tensor for printing. - a_full = t3f.full(a) - # Run a tensorflow session to run the operations. - with tf.Session() as sess: - # Run the operations. Note that if you run these - # two operations separetly (sess.run(a_full), sess.run(norm)) - # the result will be different, since sess.run will - # generate a new random tensor a on each run because `a' is - # an operation 'generate me a random tensor'. - a_val, norm_val = sess.run([a_full, norm]) - a = t3f.random_tensor((3, 2, 2), tt_rank=3) - b_dense = tf.random_normal((3, 2, 2)) - # Use TT-SVD on b_dense. - b_tt = t3f.to_tt_tensor(b_dense, max_tt_rank=4) - sum_round = t3f.round(t3f.add(a, b_tt), max_tt_rank=2) - # Inner product (sum of products of all elements). - a = t3f.random_tensor((3, 2, 2), tt_rank=3) - b = t3f.random_tensor((3, 2, 2), tt_rank=4) - inner_prod = t3f.flat_inner(a, b) - A = t3f.random_matrix(((3, 2, 2), (2, 3, 3)), tt_rank=3) - b = t3f.random_matrix(((2, 3, 3), None), tt_rank=3) - # Matrix-by-vector - matvec = t3f.matmul(A, b) - - # Matrix-by-dense matrix - b_dense = tf.random_normal((18, 1)) - matvec2 = t3f.matmul(A, b_dense) - - -if __name__ == "__main__": - tf.test.main() diff --git a/t3f/initializers.py b/t3f/initializers.py index a840877f..59c3e971 100644 --- a/t3f/initializers.py +++ b/t3f/initializers.py @@ -1,5 +1,5 @@ import numpy as np -import tensorflow.compat.v1 as tf +import tensorflow as tf from t3f.tensor_train import TensorTrain from t3f.tensor_train_batch import TensorTrainBatch @@ -270,7 +270,7 @@ def tensor_with_random_cores(shape, tt_rank=2, mean=0., stddev=1., with tf.name_scope(name): for i in range(num_dims): curr_core_shape = (tt_rank[i], shape[i], tt_rank[i + 1]) - tt_cores[i] = tf.random_normal(curr_core_shape, mean=mean, stddev=stddev, + tt_cores[i] = tf.random.normal(curr_core_shape, mean=mean, stddev=stddev, dtype=dtype) return TensorTrain(tt_cores, shape, tt_rank) @@ -312,7 +312,7 @@ def tensor_batch_with_random_cores(shape, tt_rank=2, batch_size=1, with tf.name_scope(name): for i in range(num_dims): curr_core_shape = (batch_size, tt_rank[i], shape[i], tt_rank[i + 1]) - tt_cores[i] = tf.random_normal(curr_core_shape, mean=mean, stddev=stddev, + tt_cores[i] = tf.random.normal(curr_core_shape, mean=mean, stddev=stddev, dtype=dtype) return TensorTrainBatch(tt_cores, shape, tt_rank, batch_size) @@ -368,7 +368,7 @@ def matrix_with_random_cores(shape, tt_rank=2, mean=0., stddev=1., for i in range(num_dims): curr_core_shape = (tt_rank[i], shape[0][i], shape[1][i], tt_rank[i + 1]) - tt_cores[i] = tf.random_normal(curr_core_shape, mean=mean, stddev=stddev, + tt_cores[i] = tf.random.normal(curr_core_shape, mean=mean, stddev=stddev, dtype=dtype) return TensorTrain(tt_cores, shape, tt_rank) @@ -426,7 +426,7 @@ def matrix_batch_with_random_cores(shape, tt_rank=2, batch_size=1, for i in range(num_dims): curr_core_shape = (batch_size, tt_rank[i], shape[0][i], shape[1][i], tt_rank[i + 1]) - tt_cores[i] = tf.random_normal(curr_core_shape, mean=mean, stddev=stddev, + tt_cores[i] = tf.random.normal(curr_core_shape, mean=mean, stddev=stddev, dtype=dtype) return TensorTrainBatch(tt_cores, shape, tt_rank, batch_size) diff --git a/t3f/initializers_test.py b/t3f/initializers_test.py index 0b474830..47cac5ff 100644 --- a/t3f/initializers_test.py +++ b/t3f/initializers_test.py @@ -1,5 +1,6 @@ import numpy as np -import tensorflow.compat.v1 as tf +import tensorflow as tf +tf.compat.v1.enable_eager_execution() from t3f import initializers from t3f import ops @@ -13,13 +14,12 @@ def testTensorOnesAndZeros(self): ones_desired = np.ones((2, 3, 4), dtype=self.dtype.as_numpy_dtype) zeros_desired = np.zeros((2, 3, 4), dtype=self.dtype.as_numpy_dtype) - with self.test_session() as sess: - tt_ones_full = sess.run(ops.full(tt_ones)) - tt_zeros_full = sess.run(ops.full(tt_zeros)) - self.assertAllClose(tt_ones_full, ones_desired) - self.assertEqual(tt_ones_full.dtype, ones_desired.dtype) - self.assertAllClose(tt_zeros_full, zeros_desired) - self.assertEqual(tt_zeros_full.dtype, zeros_desired.dtype) + tt_ones_full = self.evaluate(ops.full(tt_ones)) + tt_zeros_full = self.evaluate(ops.full(tt_zeros)) + self.assertAllClose(tt_ones_full, ones_desired) + self.assertEqual(tt_ones_full.dtype, ones_desired.dtype) + self.assertAllClose(tt_zeros_full, zeros_desired) + self.assertEqual(tt_zeros_full.dtype, zeros_desired.dtype) bad_shapes = [[[2, 3]], [-1, 3], [0.1, 4]] for shape in bad_shapes: with self.assertRaises(ValueError): @@ -38,13 +38,12 @@ def testMatrixOnesAndZeros(self): bad_shapes = [[[-1, 2, 3], [3, 4, 6]], [[1.5, 2, 4], [2, 5, 6]], [[1], [2, 3]], [2, 3, 4]] - with self.test_session() as sess: - tt_ones_full = sess.run(ops.full(tt_ones)) - tt_zeros_full = sess.run(ops.full(tt_zeros)) - self.assertAllClose(tt_ones_full, ones_desired) - self.assertEqual(tt_ones_full.dtype, ones_desired.dtype) - self.assertAllClose(tt_zeros_full, zeros_desired) - self.assertEqual(tt_zeros_full.dtype, zeros_desired.dtype) + tt_ones_full = self.evaluate(ops.full(tt_ones)) + tt_zeros_full = self.evaluate(ops.full(tt_zeros)) + self.assertAllClose(tt_ones_full, ones_desired) + self.assertEqual(tt_ones_full.dtype, ones_desired.dtype) + self.assertAllClose(tt_zeros_full, zeros_desired) + self.assertEqual(tt_zeros_full.dtype, zeros_desired.dtype) for shape in bad_shapes: with self.assertRaises(ValueError): initializers.matrix_ones(shape) @@ -54,9 +53,8 @@ def testMatrixOnesAndZeros(self): def testEye(self): tt_eye = initializers.eye([4, 5, 6], dtype=self.dtype) eye_desired = np.eye(120) - with self.test_session() as sess: - eye_full = sess.run(ops.full(tt_eye)) - self.assertAllClose(eye_full, eye_desired) + eye_full = self.evaluate(ops.full(tt_eye)) + self.assertAllClose(eye_full, eye_desired) bad_shapes = [[[2, 3]], [-1, 3], [0.1, 4]] for shape in bad_shapes: with self.assertRaises(ValueError): @@ -67,12 +65,11 @@ def testOnesLikeAndZerosLike(self): b = initializers.ones_like(a) c = initializers.zeros_like(a) var_list = [ops.full(b), ops.full(c)] - with self.test_session() as sess: - bf, cf = sess.run(var_list) - self.assertAllClose(bf, np.ones((2, 3, 4))) - self.assertEqual(self.dtype.as_numpy_dtype, bf.dtype) - self.assertAllClose(cf, np.zeros((2, 3, 4))) - self.assertEqual(self.dtype.as_numpy_dtype, cf.dtype) + bf, cf = self.evaluate(var_list) + self.assertAllClose(bf, np.ones((2, 3, 4))) + self.assertEqual(self.dtype.as_numpy_dtype, bf.dtype) + self.assertAllClose(cf, np.zeros((2, 3, 4))) + self.assertEqual(self.dtype.as_numpy_dtype, cf.dtype) with self.assertRaises(ValueError): initializers.ones_like(1) with self.assertRaises(ValueError): diff --git a/t3f/kronecker.py b/t3f/kronecker.py index 97e361f3..e1f4e2dd 100644 --- a/t3f/kronecker.py +++ b/t3f/kronecker.py @@ -1,4 +1,4 @@ -import tensorflow.compat.v1 as tf +import tensorflow as tf from t3f.tensor_train import TensorTrain from t3f.tensor_train_batch import TensorTrainBatch @@ -30,11 +30,11 @@ def determinant(kron_a, name='t3f_kronecker_determinant'): shapes_defined = kron_a.get_shape().is_fully_defined() if shapes_defined: - i_shapes = kron_a.get_raw_shape()[0] - j_shapes = kron_a.get_raw_shape()[1] + i_shapes = kron_a.get_raw_shape()[0].as_list() + j_shapes = kron_a.get_raw_shape()[1].as_list() else: - i_shapes = ops.raw_shape(kron_a)[0] - j_shapes = ops.raw_shape(kron_a)[1] + i_shapes = ops.raw_shape(kron_a)[0].as_list() + j_shapes = ops.raw_shape(kron_a)[1].as_list() if shapes_defined: if i_shapes != j_shapes: @@ -42,17 +42,17 @@ def determinant(kron_a, name='t3f_kronecker_determinant'): 'matrices (tt-cores must be square)') is_batch = isinstance(kron_a, TensorTrainBatch) - with tf.name_scope(name, values=kron_a.tt_cores): + with tf.name_scope(name): pows = tf.cast(tf.reduce_prod(i_shapes), kron_a.dtype) cores = kron_a.tt_cores det = 1 for core_idx in range(kron_a.ndims()): core = cores[core_idx] if is_batch: - core_det = tf.matrix_determinant(core[:, 0, :, :, 0]) + core_det = tf.linalg.det(core[:, 0, :, :, 0]) else: - core_det = tf.matrix_determinant(core[0, :, :, 0]) - core_pow = pows / i_shapes[core_idx].value + core_det = tf.linalg.det(core[0, :, :, 0]) + core_pow = pows / i_shapes[core_idx] det *= tf.pow(core_det, core_pow) return det @@ -83,11 +83,11 @@ def slog_determinant(kron_a, name='t3f_kronecker_slog_determinant'): shapes_defined = kron_a.get_shape().is_fully_defined() if shapes_defined: - i_shapes = kron_a.get_raw_shape()[0] - j_shapes = kron_a.get_raw_shape()[1] + i_shapes = kron_a.get_raw_shape()[0].as_list() + j_shapes = kron_a.get_raw_shape()[1].as_list() else: - i_shapes = ops.raw_shape(kron_a)[0] - j_shapes = ops.raw_shape(kron_a)[1] + i_shapes = ops.raw_shape(kron_a)[0].as_list() + j_shapes = ops.raw_shape(kron_a)[1].as_list() if shapes_defined: if i_shapes != j_shapes: @@ -95,7 +95,7 @@ def slog_determinant(kron_a, name='t3f_kronecker_slog_determinant'): 'matrices (tt-cores must be square)') is_batch = isinstance(kron_a, TensorTrainBatch) - with tf.name_scope(name, values=kron_a.tt_cores): + with tf.name_scope(name): pows = tf.cast(tf.reduce_prod(i_shapes), kron_a.dtype) logdet = 0. det_sign = 1. @@ -103,13 +103,13 @@ def slog_determinant(kron_a, name='t3f_kronecker_slog_determinant'): for core_idx in range(kron_a.ndims()): core = kron_a.tt_cores[core_idx] if is_batch: - core_det = tf.matrix_determinant(core[:, 0, :, :, 0]) + core_det = tf.linalg.det(core[:, 0, :, :, 0]) else: - core_det = tf.matrix_determinant(core[0, :, :, 0]) + core_det = tf.linalg.det(core[0, :, :, 0]) core_abs_det = tf.abs(core_det) core_det_sign = tf.sign(core_det) - core_pow = pows / i_shapes[core_idx].value - logdet += tf.log(core_abs_det) * core_pow + core_pow = pows / i_shapes[core_idx] + logdet += tf.math.log(core_abs_det) * core_pow det_sign *= core_det_sign**(core_pow) return det_sign, logdet @@ -151,15 +151,15 @@ def inv(kron_a, name='t3f_kronecker_inv'): 'matrices (tt-cores must be square)') is_batch = isinstance(kron_a, TensorTrainBatch) - with tf.name_scope(name, values=kron_a.tt_cores): + with tf.name_scope(name): inv_cores = [] for core_idx in range(kron_a.ndims()): core = kron_a.tt_cores[core_idx] if is_batch: - core_inv = tf.matrix_inverse(core[:, 0, :, :, 0]) + core_inv = tf.linalg.inv(core[:, 0, :, :, 0]) core_inv = tf.expand_dims(tf.expand_dims(core_inv, 1), -1) else: - core_inv = tf.matrix_inverse(core[0, :, :, 0]) + core_inv = tf.linalg.inv(core[0, :, :, 0]) core_inv = tf.expand_dims(tf.expand_dims(core_inv, 0), -1) inv_cores.append(core_inv) @@ -209,15 +209,15 @@ def cholesky(kron_a, name='t3f_kronecker_cholesky'): 'matrices (tt-cores must be square)') is_batch = isinstance(kron_a, TensorTrainBatch) - with tf.name_scope(name, values=kron_a.tt_cores): + with tf.name_scope(name): cho_cores = [] for core_idx in range(kron_a.ndims()): core = kron_a.tt_cores[core_idx] if is_batch: - core_cho = tf.cholesky(core[:, 0, :, :, 0]) + core_cho = tf.linalg.cholesky(core[:, 0, :, :, 0]) core_cho = tf.expand_dims(tf.expand_dims(core_cho, 1), -1) else: - core_cho = tf.cholesky(core[0, :, :, 0]) + core_cho = tf.linalg.cholesky(core[0, :, :, 0]) core_cho = tf.expand_dims(tf.expand_dims(core_cho, 0), -1) cho_cores.append(core_cho) diff --git a/t3f/kronecker_test.py b/t3f/kronecker_test.py index 1b5bc13f..6795d1e8 100644 --- a/t3f/kronecker_test.py +++ b/t3f/kronecker_test.py @@ -1,5 +1,6 @@ import numpy as np -import tensorflow.compat.v1 as tf +import tensorflow as tf +tf.compat.v1.enable_eager_execution() from t3f.tensor_train import TensorTrain from t3f.tensor_train_batch import TensorTrainBatch @@ -16,7 +17,7 @@ def testIsKronNonKron(self): dtype=self.dtype) tt_mat = variables.get_variable('tt_mat', initializer=initializer) self.assertFalse(kr._is_kron(tt_mat)) - + def testIsKronKron(self): # Tests _is_kron on a Kronecker matrix initializer = initializers.random_matrix(((2, 3), (3, 2)), tt_rank=1, @@ -29,12 +30,11 @@ def testDet(self): initializer = initializers.random_matrix(((2, 3, 2), (2, 3, 2)), tt_rank=1, dtype=self.dtype) kron_mat = variables.get_variable('kron_mat', initializer=initializer) - init_op = tf.global_variables_initializer() - with self.test_session() as sess: - sess.run(init_op) - desired = np.linalg.det(ops.full(kron_mat).eval()) - actual = kr.determinant(kron_mat).eval() - self.assertAllClose(desired, actual) + init_op = tf.compat.v1.global_variables_initializer() + self.evaluate(init_op) + desired = np.linalg.det(self.evaluate(ops.full(kron_mat))) + actual = self.evaluate(kr.determinant(kron_mat)) + self.assertAllClose(desired, actual) def testSlogDet(self): # Tests the slog_determinant function @@ -42,43 +42,41 @@ def testSlogDet(self): # TODO: use kron and -1 * kron matrices, when mul is implemented # the current version is platform-dependent - tf.set_random_seed(5) # negative derminant + tf.compat.v1.set_random_seed(5) # negative derminant initializer = initializers.random_matrix(((2, 3), (2, 3)), tt_rank=1, dtype=self.dtype) kron_neg = variables.get_variable('kron_neg', initializer=initializer) - tf.set_random_seed(1) # positive determinant + tf.compat.v1.set_random_seed(1) # positive determinant initializer = initializers.random_matrix(((2, 3), (2, 3)), tt_rank=1, dtype=self.dtype) kron_pos = variables.get_variable('kron_pos', initializer=initializer) - init_op = tf.global_variables_initializer() - with self.test_session() as sess: - # negative derminant - sess.run(init_op) - desired_sign, desired_det = np.linalg.slogdet(ops.full(kron_neg).eval()) - actual_sign, actual_det = sess.run(kr.slog_determinant(kron_neg)) - self.assertEqual(desired_sign, actual_sign) - self.assertAllClose(desired_det, actual_det) - - # positive determinant - desired_sign, desired_det = np.linalg.slogdet(ops.full(kron_pos).eval()) - actual_sign, actual_det = sess.run(kr.slog_determinant(kron_pos)) - self.assertEqual(desired_sign, actual_sign) - self.assertAllClose(desired_det, actual_det) + init_op = tf.compat.v1.global_variables_initializer() + # negative derminant + self.evaluate(init_op) + desired_sign, desired_det = np.linalg.slogdet(self.evaluate(ops.full(kron_neg))) + actual_sign, actual_det = self.evaluate(kr.slog_determinant(kron_neg)) + self.assertEqual(desired_sign, actual_sign) + self.assertAllClose(desired_det, actual_det) + + # positive determinant + desired_sign, desired_det = np.linalg.slogdet(self.evaluate(ops.full(kron_pos))) + actual_sign, actual_det = self.evaluate(kr.slog_determinant(kron_pos)) + self.assertEqual(desired_sign, actual_sign) + self.assertAllClose(desired_det, actual_det) def testInv(self): # Tests the inv function initializer = initializers.random_matrix(((2, 3, 2), (2, 3, 2)), tt_rank=1, dtype=self.dtype) kron_mat = variables.get_variable('kron_mat', initializer=initializer) - init_op = tf.global_variables_initializer() - with self.test_session() as sess: - sess.run(init_op) - desired = np.linalg.inv(ops.full(kron_mat).eval()) - actual = ops.full(kr.inv(kron_mat)).eval() - self.assertAllClose(desired, actual) - + init_op = tf.compat.v1.global_variables_initializer() + self.evaluate(init_op) + desired = np.linalg.inv(self.evaluate(ops.full(kron_mat))) + actual = self.evaluate(ops.full(kr.inv(kron_mat))) + self.assertAllClose(desired, actual) + def testCholesky(self): # Tests the cholesky function np.random.seed(8) @@ -95,12 +93,11 @@ def testCholesky(self): K_2[None, :, :, None]], tt_ranks=7*[1]) kron_mat = variables.get_variable('kron_mat', initializer=initializer) - init_op = tf.global_variables_initializer() - with self.test_session() as sess: - sess.run(init_op) - desired = np.linalg.cholesky(K) - actual = ops.full(kr.cholesky(kron_mat)).eval() - self.assertAllClose(desired, actual, atol=1e-5, rtol=1e-5) + init_op = tf.compat.v1.global_variables_initializer() + self.evaluate(init_op) + desired = np.linalg.cholesky(K) + actual = self.evaluate(ops.full(kr.cholesky(kron_mat))) + self.assertAllClose(desired, actual, atol=1e-5, rtol=1e-5) class _BatchKroneckerTest(): @@ -112,7 +109,7 @@ def testIsKronNonKron(self): tt_mat_batch = variables.get_variable('tt_mat_batch', initializer=initializer) self.assertFalse(kr._is_kron(tt_mat_batch)) - + def testIsKronKron(self): # Tests _is_kron on a Kronecker matrix batch initializer = initializers.random_matrix_batch(((2, 3), (3, 2)), tt_rank=1, @@ -129,32 +126,30 @@ def testDet(self): dtype=self.dtype) kron_mat_batch = variables.get_variable('kron_mat_batch', initializer=initializer) - init_op = tf.global_variables_initializer() - with self.test_session() as sess: - sess.run(init_op) - desired = tf.matrix_determinant(ops.full(kron_mat_batch)).eval() - actual = kr.determinant(kron_mat_batch).eval() - self.assertAllClose(desired, actual) + init_op = tf.compat.v1.global_variables_initializer() + self.evaluate(init_op) + desired = self.evaluate(tf.linalg.det(ops.full(kron_mat_batch))) + actual = self.evaluate(kr.determinant(kron_mat_batch)) + self.assertAllClose(desired, actual) def testSlogDet(self): # Tests the slog_determinant function - tf.set_random_seed(1) # negative and positive determinants + tf.compat.v1.set_random_seed(1) # negative and positive determinants initializer = initializers.random_matrix_batch(((2, 3), (2, 3)), tt_rank=1, batch_size=3, dtype=self.dtype) kron_mat_batch = variables.get_variable('kron_mat_batch', initializer=initializer) - init_op = tf.global_variables_initializer() - with self.test_session() as sess: - # negative derminant - sess.run(init_op) - desired_sign, desired_det = np.linalg.slogdet( - ops.full(kron_mat_batch).eval()) - actual_sign, actual_det = sess.run(kr.slog_determinant(kron_mat_batch)) - self.assertAllEqual(desired_sign, actual_sign) - self.assertAllClose(desired_det, actual_det) + init_op = tf.compat.v1.global_variables_initializer() + # negative derminant + self.evaluate(init_op) + desired_sign, desired_det = np.linalg.slogdet( + self.evaluate(ops.full(kron_mat_batch))) + actual_sign, actual_det = self.evaluate(kr.slog_determinant(kron_mat_batch)) + self.assertAllEqual(desired_sign, actual_sign) + self.assertAllClose(desired_det, actual_det) def testInv(self): # Tests the inv function @@ -163,13 +158,12 @@ def testInv(self): dtype=self.dtype) kron_mat_batch = variables.get_variable('kron_mat_batch', initializer=initializer) - init_op = tf.global_variables_initializer() - with self.test_session() as sess: - sess.run(init_op) - desired = np.linalg.inv(ops.full(kron_mat_batch).eval()) - actual = ops.full(kr.inv(kron_mat_batch)).eval() - self.assertAllClose(desired, actual, atol=1e-4) - + init_op = tf.compat.v1.global_variables_initializer() + self.evaluate(init_op) + desired = np.linalg.inv(self.evaluate(ops.full(kron_mat_batch))) + actual = self.evaluate(ops.full(kr.inv(kron_mat_batch))) + self.assertAllClose(desired, actual, atol=1e-4) + def testCholesky(self): # Tests the cholesky function np.random.seed(8) @@ -185,12 +179,11 @@ def testCholesky(self): K_2[:, None, :, :, None]], tt_ranks=7*[1]) kron_mat_batch = variables.get_variable('kron_mat_batch', initializer=initializer) - init_op = tf.global_variables_initializer() - with self.test_session() as sess: - sess.run(init_op) - desired = np.linalg.cholesky(ops.full(kron_mat_batch).eval()) - actual = ops.full(kr.cholesky(kron_mat_batch)).eval() - self.assertAllClose(desired, actual) + init_op = tf.compat.v1.global_variables_initializer() + self.evaluate(init_op) + desired = np.linalg.cholesky(self.evaluate(ops.full(kron_mat_batch))) + actual = self.evaluate(ops.full(kr.cholesky(kron_mat_batch))) + self.assertAllClose(desired, actual) class KroneckerTestFloat32(tf.test.TestCase, _KroneckerTest): diff --git a/t3f/nn_test.py b/t3f/nn_test.py index cf167495..ca802577 100644 --- a/t3f/nn_test.py +++ b/t3f/nn_test.py @@ -1,5 +1,6 @@ import numpy as np -import tensorflow.compat.v1 as tf +import tensorflow as tf +tf.compat.v1.enable_eager_execution() from t3f import nn @@ -9,7 +10,7 @@ class _NeuralTest(): def testKerasDense(self): # Try to create the layer twice to check that it won't crush saying the # variable already exist. - x = tf.random_normal((20, 28*28)) + x = tf.random.normal((20, 28*28)) layer = nn.KerasDense(input_dims=[7, 4, 7, 4], output_dims=[5, 5, 5, 5]) layer(x) layer = nn.KerasDense(input_dims=[7, 4, 7, 4], output_dims=[5, 5, 5, 5]) diff --git a/t3f/ops.py b/t3f/ops.py index e292369e..86be42ec 100644 --- a/t3f/ops.py +++ b/t3f/ops.py @@ -1,4 +1,4 @@ -import tensorflow.compat.v1 as tf +import tensorflow as tf import numpy as np from t3f.tensor_train_base import TensorTrainBase from t3f.tensor_train import TensorTrain @@ -21,7 +21,7 @@ def full(tt, name='t3f_full'): Returns: tf.Tensor. """ - with tf.name_scope(name, values=tt.tt_cores): + with tf.name_scope(name): if isinstance(tt, TensorTrainBatch): # Batch of Tensor Trains. return _full_tt_batch(tt) @@ -199,8 +199,8 @@ def tt_dense_matmul(tt_matrix_a, matrix_b): raise ValueError('The first argument should be a TT-matrix') ndims = tt_matrix_a.ndims() - a_columns = tt_matrix_a.get_shape()[1].value - b_rows = matrix_b.get_shape()[0].value + a_columns = tt_matrix_a.get_shape().as_list()[1] + b_rows = matrix_b.get_shape().as_list()[0] if a_columns is not None and b_rows is not None: if a_columns != b_rows: raise ValueError('Arguments shapes should align got %d and %d instead.' % @@ -299,19 +299,19 @@ def matmul(a, b, name='t3f_matmul'): """ # TODO: is it safe to check types? What if a class is derived from TT? if isinstance(a, TensorTrainBase) and isinstance(b, TensorTrainBase): - with tf.name_scope(name, values=a.tt_cores+b.tt_cores): + with tf.name_scope(name): return tt_tt_matmul(a, b) elif isinstance(a, TensorTrain) and isinstance(b, tf.Tensor): - with tf.name_scope(name, values=a.tt_cores+(b,)): + with tf.name_scope(name): return tt_dense_matmul(a, b) elif isinstance(a, tf.Tensor) and isinstance(b, TensorTrain): - with tf.name_scope(name, values=(a,)+b.tt_cores): + with tf.name_scope(name): return dense_tt_matmul(a, b) elif isinstance(a, TensorTrain) and isinstance(b, tf.SparseTensor): - with tf.name_scope(name, values=a.tt_cores+(b,)): + with tf.name_scope(name): return tt_sparse_matmul(a, b) elif isinstance(a, tf.SparseTensor) and isinstance(b, TensorTrain): - with tf.name_scope(name, values=(a,)+b.tt_cores): + with tf.name_scope(name): return sparse_tt_matmul(a, b) else: raise ValueError('Argument types are not supported in matmul: %s x %s' % @@ -520,19 +520,19 @@ def flat_inner(a, b, name='t3f_flat_inner'): """ # TODO: is it safe to check types? What if a class is derived from TT? if isinstance(a, TensorTrainBase) and isinstance(b, TensorTrainBase): - with tf.name_scope(name, values=a.tt_cores+b.tt_cores): + with tf.name_scope(name): return tt_tt_flat_inner(a, b) elif isinstance(a, TensorTrain) and isinstance(b, tf.Tensor): - with tf.name_scope(name, values=a.tt_cores+(b,)): + with tf.name_scope(name): return tt_dense_flat_inner(a, b) elif isinstance(a, tf.Tensor) and isinstance(b, TensorTrain): - with tf.name_scope(name, values=(a,)+b.tt_cores): + with tf.name_scope(name): return dense_tt_flat_inner(a, b) elif isinstance(a, TensorTrain) and isinstance(b, tf.SparseTensor): - with tf.name_scope(name, values=a.tt_cores+(b,)): + with tf.name_scope(name): return tt_sparse_flat_inner(a, b) elif isinstance(a, tf.SparseTensor) and isinstance(b, TensorTrain): - with tf.name_scope(name, values=(a,)+b.tt_cores): + with tf.name_scope(name): return sparse_tt_flat_inner(a, b) else: raise ValueError('Argument types are not supported in flat_inner: %s x %s' % @@ -717,7 +717,7 @@ def add(tt_a, tt_b, name='t3f_add'): raise ValueError('The batch sizes are different and not 1, broadcasting is ' 'not available.') - with tf.name_scope(name, values=tt_a.tt_cores+tt_b.tt_cores): + with tf.name_scope(name): is_a_batch = isinstance(tt_a, TensorTrainBatch) is_b_batch = isinstance(tt_b, TensorTrainBatch) is_batch_case = is_a_batch or is_b_batch @@ -777,7 +777,7 @@ def multiply(tt_left, right, name='t3f_multiply'): is_batch_case = is_left_batch or is_right_batch ndims = tt_left.ndims() if not isinstance(right, TensorTrainBase): - with tf.name_scope(name, values=tt_left.tt_cores+(right,)): + with tf.name_scope(name): # Assume right is a number, not TensorTrain. # To squash right uniformly across TT-cores we pull its absolute value # and raise to the power 1/ndims. First TT-core is multiplied by the sign @@ -793,7 +793,7 @@ def multiply(tt_left, right, name='t3f_multiply'): if is_left_batch: out_batch_size = tt_left.batch_size else: - with tf.name_scope(name, values=tt_left.tt_cores+right.tt_cores): + with tf.name_scope(name): if tt_left.is_tt_matrix() != right.is_tt_matrix(): raise ValueError('The arguments should be both TT-tensors or both ' @@ -828,7 +828,7 @@ def multiply(tt_left, right, name='t3f_multiply'): data = [message, shapes.lazy_batch_size(tt_left), ' x ', shapes.lazy_batch_size(right)] bs_eq = tf.assert_equal(shapes.lazy_batch_size(tt_left), - shapes.lazy_batch_size(right), data=data) + shapes.lazy_batch_size(right)) dependencies.append(bs_eq) @@ -937,7 +937,7 @@ def frobenius_norm_squared(tt, differentiable=False, a Tensor of size tt.batch_size, consisting of the Frobenius norms squared of each TensorTrain in `tt`, if it is `TensorTrainBatch` """ - with tf.name_scope(name, values=tt.tt_cores): + with tf.name_scope(name): if differentiable: if hasattr(tt, 'batch_size'): bs_str = 'n' @@ -993,7 +993,7 @@ def frobenius_norm(tt, epsilon=1e-5, differentiable=False, a Tensor of size tt.batch_size, consisting of the Frobenius norms of each TensorTrain in `tt`, if it is `TensorTrainBatch` """ - with tf.name_scope(name, values=tt.tt_cores): + with tf.name_scope(name): return tf.sqrt(frobenius_norm_squared(tt, differentiable) + epsilon) @@ -1015,7 +1015,7 @@ def transpose(tt_matrix, name='t3f_transpose'): if not isinstance(tt_matrix, TensorTrainBase) or not tt_matrix.is_tt_matrix(): raise ValueError('The argument should be a TT-matrix.') - with tf.name_scope(name, values=tt_matrix.tt_cores): + with tf.name_scope(name): transposed_tt_cores = [] for core_idx in range(tt_matrix.ndims()): curr_core = tt_matrix.tt_cores[core_idx] @@ -1088,7 +1088,7 @@ def bilinear_form(A, b, c, name='t3f_bilinear_form'): c_bs_str = 'p' if c_is_batch else '' out_bs_str = 'p' if b_is_batch or c_is_batch else '' - with tf.name_scope(name, values=A.tt_cores+b.tt_cores+c.tt_cores): + with tf.name_scope(name): ndims = A.ndims() curr_core_1 = b.tt_cores[0] curr_core_2 = c.tt_cores[0] @@ -1153,7 +1153,7 @@ def bilinear_form_two_mat(x, A, B, y, name='t3f_bilinear_xaby'): y_bs_str = 'p' if y_is_batch else '' out_bs_str = 'p' if x_is_batch or y_is_batch else '' all_cores = x.tt_cores + A.tt_cores + B.tt_cores + y.tt_cores - with tf.name_scope(name, values=all_cores): + with tf.name_scope(name): ndims = A.ndims() curr_core_1 = x.tt_cores[0] curr_core_2 = y.tt_cores[0] @@ -1199,7 +1199,7 @@ def cast(tt, dtype, name='t3f_cast'): TypeError: If `tt` cannot be cast to the `dtype`. ValueError: If `tt` is not a `TensorTrain` or `TensorTrainBatch`. """ - with tf.name_scope(name, values=tt.tt_cores): + with tf.name_scope(name): res_cores = [] cores = tt.tt_cores for core_idx in range(tt.ndims()): @@ -1241,7 +1241,7 @@ def gather_nd(tt, indices, name='t3f_gather_nd'): ValueError if `indices` have wrong shape. NotImplementedError if `tt` is a TT-matrix. """ - with tf.name_scope(name, values=tt.tt_cores+(indices,)): + with tf.name_scope(name): if tt.is_tt_matrix(): raise NotImplementedError('gather_nd doesnt support TT-matrices yet ' '(got %s)' % tt) @@ -1290,7 +1290,7 @@ def renormalize_tt_cores(tt, epsilon=1e-8, name='t3f_renormalize_tt_cores'): case applies to each TT in `TensorTrainBatch`. """ # TODO: bad way to check if batch or not. - with tf.name_scope(name, values=tt.tt_cores): + with tf.name_scope(name): epsilon = tf.convert_to_tensor(epsilon, dtype=tt.dtype) if isinstance(tt, TensorTrain): new_cores = [] @@ -1299,7 +1299,7 @@ def renormalize_tt_cores(tt, epsilon=1e-8, name='t3f_renormalize_tt_cores'): for core in tt.tt_cores: cur_core_norm = tf.sqrt(tf.maximum(tf.reduce_sum(core ** 2), epsilon)) core_norms.append(cur_core_norm) - running_log_norm += tf.log(cur_core_norm) + running_log_norm += tf.math.log(cur_core_norm) running_log_norm = running_log_norm / tt.ndims() fact = tf.exp(running_log_norm) @@ -1313,10 +1313,10 @@ def renormalize_tt_cores(tt, epsilon=1e-8, name='t3f_renormalize_tt_cores'): ax = np.arange(len(tt.tt_cores[0].shape))[1:] fact_list = [] for core in tt.tt_cores: - cur_core_norm_sq = tf.reduce_sum(core**2, axis=ax, keep_dims=True) + cur_core_norm_sq = tf.reduce_sum(core**2, axis=ax, keepdims=True) cur_core_norm = tf.sqrt(tf.maximum(epsilon, cur_core_norm_sq)) fact_list.append(cur_core_norm) - running_core_log_norms += tf.log(cur_core_norm) + running_core_log_norms += tf.math.log(cur_core_norm) new_cores = [] exp_fact = tf.exp(running_core_log_norms / tt.ndims()) diff --git a/t3f/ops_no_eager_test.py b/t3f/ops_no_eager_test.py new file mode 100644 index 00000000..073cdced --- /dev/null +++ b/t3f/ops_no_eager_test.py @@ -0,0 +1,170 @@ +import numpy as np +import tensorflow.compat.v1 as tf +tf.disable_v2_behavior() + +from t3f.tensor_train import TensorTrain +from t3f.tensor_train_batch import TensorTrainBatch +from t3f import ops +from t3f import shapes +from t3f import initializers + + +class _TTMatrixTest(): + + def testUnknownRanksTTMatmul(self): + # Tests tt_tt_matmul for matrices with unknown ranks + K_1 = tf.placeholder(self.dtype, (1, 2, 2, None)) + K_2 = tf.placeholder(self.dtype, (None, 3, 3, 1)) + tt_mat = TensorTrain([K_1, K_2]) + res_actual = ops.full(ops.matmul(tt_mat, tt_mat)) + res_desired = tf.matmul(ops.full(tt_mat), ops.full(tt_mat)) + np.random.seed(1) + K_1_val = np.random.rand(1, 2, 2, 2) + K_2_val = np.random.rand(2, 3, 3, 1) + with tf.Session() as sess: + res_actual_val = sess.run(res_actual, {K_1: K_1_val, K_2: K_2_val}) + res_desired_val = sess.run(res_desired, {K_1: K_1_val, K_2: K_2_val}) + self.assertAllClose(res_desired_val, res_actual_val) + + def testHalfKnownRanksTTMatmul(self): + # Tests tt_tt_matmul for the case when one matrice has known ranks + # and the other one doesn't + np.random.seed(1) + K_1 = tf.placeholder(self.dtype, (1, 2, 2, None)) + K_2 = tf.placeholder(self.dtype, (None, 3, 3, 1)) + tt_mat_known_ranks = TensorTrain([K_1, K_2], tt_ranks=[1, 3, 1]) + tt_mat = TensorTrain([K_1, K_2]) + res_actual = ops.full(ops.matmul(tt_mat_known_ranks, tt_mat)) + res_desired = tf.matmul(ops.full(tt_mat_known_ranks), ops.full(tt_mat)) + np.random.seed(1) + K_1_val = np.random.rand(1, 2, 2, 3) + K_2_val = np.random.rand(3, 3, 3, 1) + with tf.Session() as sess: + res_actual_val = sess.run(res_actual, {K_1: K_1_val, K_2: K_2_val}) + res_desired_val = sess.run(res_desired, {K_1: K_1_val, K_2: K_2_val}) + self.assertAllClose(res_desired_val, res_actual_val) + + +class _TTTensorBatchTest(): + + def testMultiplyUnknownBatchSizeBroadcasting(self): + c1 = tf.placeholder(self.dtype, [None, 1, 3, 2]) + c2 = tf.placeholder(self.dtype, [None, 2, 3, 1]) + tt_a = TensorTrainBatch([c1, c2]) + tt_b = initializers.random_tensor_batch((3, 3), tt_rank=3, batch_size=1, + dtype=self.dtype) + tt_c = initializers.random_tensor((3, 3), tt_rank=3, + dtype=self.dtype) + res_ab = ops.full(ops.multiply(tt_a, tt_b)) + res_ba = ops.full(ops.multiply(tt_b, tt_a)) + res_ac = ops.full(ops.multiply(tt_a, tt_c)) + res_ca = ops.full(ops.multiply(tt_c, tt_a)) + res_desired_ab = ops.full(tt_a) * ops.full(tt_b) + res_desired_ac = ops.full(tt_a) * ops.full(tt_c) + to_run = [res_ab, res_ba, res_ac, res_ca, res_desired_ab, res_desired_ac] + feed_dict = {c1:np.random.rand(7, 1, 3, 2), + c2:np.random.rand(7, 2, 3, 1)} + with tf.Session() as sess: + ab, ba, ac, ca, des_ab, des_ac = sess.run(to_run, feed_dict=feed_dict) + self.assertAllClose(ab, des_ab) + self.assertAllClose(ba, des_ab) + self.assertAllClose(ac, des_ac) + self.assertAllClose(ca, des_ac) + + def testMultiplyTwoBatchesUnknownSize(self): + c1 = tf.placeholder(self.dtype, [None, 1, 3, 2]) + c2 = tf.placeholder(self.dtype, [None, 2, 3, 1]) + c3 = tf.placeholder(self.dtype, [None, 1, 3, 2]) + c4 = tf.placeholder(self.dtype, [None, 2, 3, 1]) + tt_a = TensorTrainBatch([c1, c2]) + tt_b = TensorTrainBatch([c3, c4]) + res_ab = ops.full(ops.multiply(tt_a, tt_b)) + res_ba = ops.full(ops.multiply(tt_b, tt_a)) + res_desired = ops.full(tt_a) * ops.full(tt_b) + to_run = [res_ab, res_ba, res_desired] + feed_dict = {c1:np.random.rand(7, 1, 3, 2), + c2:np.random.rand(7, 2, 3, 1), + c3:np.random.rand(7, 1, 3, 2), + c4:np.random.rand(7, 2, 3, 1)} + + feed_dict_err = {c1:np.random.rand(7, 1, 3, 2), + c2:np.random.rand(7, 2, 3, 1), + c3:np.random.rand(1, 1, 3, 2), + c4:np.random.rand(1, 2, 3, 1)} + + with tf.Session() as sess: + ab_full, ba_full, des_full = sess.run(to_run, feed_dict=feed_dict) + self.assertAllClose(ab_full, des_full) + self.assertAllClose(ba_full, des_full) + with self.assertRaises(tf.errors.InvalidArgumentError): + sess.run(to_run, feed_dict=feed_dict_err) + + def testMultiplyUnknownSizeBatchAndBatch(self): + c1 = tf.placeholder(self.dtype, [None, 1, 3, 2]) + c2 = tf.placeholder(self.dtype, [None, 2, 3, 1]) + tt_b = initializers.random_tensor_batch((3, 3), tt_rank=2, batch_size=8, + dtype=self.dtype) + tt_a = TensorTrainBatch([c1, c2]) + res_ab = ops.full(ops.multiply(tt_a, tt_b)) + res_ba = ops.full(ops.multiply(tt_b, tt_a)) + res_desired = ops.full(tt_a) * ops.full(tt_b) + to_run = [res_ab, res_ba, res_desired] + feed_dict = {c1:np.random.rand(8, 1, 3, 2), + c2:np.random.rand(8, 2, 3, 1)} + + feed_dict_err = {c1:np.random.rand(1, 1, 3, 2), + c2:np.random.rand(1, 2, 3, 1)} + + with tf.Session() as sess: + ab_full, ba_full, des_full = sess.run(to_run, feed_dict=feed_dict) + self.assertAllClose(ab_full, des_full) + self.assertAllClose(ba_full, des_full) + with self.assertRaises(tf.errors.InvalidArgumentError): + sess.run(to_run, feed_dict=feed_dict_err) + + def testGatherND(self): + idx = [[0, 0, 0], [0, 1, 2], [0, 1, 0]] + pl_idx = tf.placeholder(tf.int32, [None, 3]) + tt = initializers.random_tensor((3, 4, 5), tt_rank=2, dtype=self.dtype) + res_np = ops.gather_nd(tt, idx) + res_pl = ops.gather_nd(tt, pl_idx) + res_desired = tf.gather_nd(ops.full(tt), idx) + to_run = [res_np, res_pl, res_desired] + with tf.Session() as sess: + res_np_v, res_pl_v, des_v = sess.run(to_run, feed_dict={pl_idx: idx}) + self.assertAllClose(res_np_v, des_v) + self.assertAllClose(res_pl_v, res_pl_v) + + def testGatherNDBatch(self): + idx = [[0, 0, 0, 0], [1, 0, 1, 2], [0, 0, 1, 0]] + pl_idx = tf.placeholder(tf.int32, [None, 4]) + tt = initializers.random_tensor_batch((3, 4, 5), tt_rank=2, batch_size=2, + dtype=self.dtype) + res_np = ops.gather_nd(tt, idx) + res_pl = ops.gather_nd(tt, pl_idx) + res_desired = tf.gather_nd(ops.full(tt), idx) + to_run = [res_np, res_pl, res_desired] + with tf.Session() as sess: + res_np_v, res_pl_v, des_v = sess.run(to_run, feed_dict={pl_idx: idx}) + self.assertAllClose(res_np_v, des_v) + self.assertAllClose(res_pl_v, res_pl_v) + + +class TTMatrixTestFloat32(tf.test.TestCase, _TTMatrixTest): + dtype = tf.float32 + + +class TTMatrixTestFloat64(tf.test.TestCase, _TTMatrixTest): + dtype = tf.float64 + + +class TTTensorBatchTestFloat32(tf.test.TestCase, _TTTensorBatchTest): + dtype = tf.float32 + + +class TTTensorBatchTestFloat64(tf.test.TestCase, _TTTensorBatchTest): + dtype = tf.float64 + + +if __name__ == "__main__": + tf.test.main() diff --git a/t3f/ops_test.py b/t3f/ops_test.py index 1d650824..dbad6cf5 100644 --- a/t3f/ops_test.py +++ b/t3f/ops_test.py @@ -1,5 +1,6 @@ import numpy as np -import tensorflow.compat.v1 as tf +import tensorflow as tf +tf.compat.v1.enable_eager_execution() from t3f.tensor_train import TensorTrain from t3f.tensor_train_batch import TensorTrainBatch @@ -17,10 +18,9 @@ def testFullTensor2d(self): b = np.random.rand(rank, 9).astype(self.dtype.as_numpy_dtype) tt_cores = (a.reshape(1, 10, rank), b.reshape(rank, 9, 1)) desired = np.dot(a, b) - with self.test_session(): - tf_tens = TensorTrain(tt_cores) - actual = ops.full(tf_tens) - self.assertAllClose(desired, actual.eval()) + tf_tens = TensorTrain(tt_cores) + actual = self.evaluate(ops.full(tf_tens)) + self.assertAllClose(desired, actual) def testFullTensor3d(self): np.random.seed(1) @@ -33,10 +33,9 @@ def testFullTensor3d(self): desired = a.dot(b.reshape((rank_1, -1))) desired = desired.reshape((-1, 3)).dot(c) desired = desired.reshape(10, 9, 8) - with self.test_session(): - tf_tens = TensorTrain(tt_cores) - actual = ops.full(tf_tens) - self.assertAllClose(desired, actual.eval()) + tf_tens = TensorTrain(tt_cores) + actual = self.evaluate(ops.full(tf_tens)) + self.assertAllClose(desired, actual) def testFlatInnerTTTensbyTTTens(self): # Inner product between two TT-tensors. @@ -44,20 +43,19 @@ def testFlatInnerTTTensbyTTTens(self): (2, 3, 4), (4, 2, 5, 2)) rank_list = (1, 2) - with self.test_session() as sess: - for shape in shape_list: - for rank in rank_list: - tt_1 = initializers.random_tensor(shape, tt_rank=rank, - dtype=self.dtype) - tt_2 = initializers.random_tensor(shape, tt_rank=rank, - dtype=self.dtype) - res_actual = ops.flat_inner(tt_1, tt_2) - tt_1_full = tf.reshape(ops.full(tt_1), (1, -1)) - tt_2_full = tf.reshape(ops.full(tt_2), (-1, 1)) - res_desired = tf.matmul(tt_1_full, tt_2_full) - res_actual_val, res_desired_val = sess.run([res_actual, res_desired]) - self.assertAllClose(res_actual_val, np.squeeze(res_desired_val), - rtol=1e-5) + for shape in shape_list: + for rank in rank_list: + tt_1 = initializers.random_tensor(shape, tt_rank=rank, + dtype=self.dtype) + tt_2 = initializers.random_tensor(shape, tt_rank=rank, + dtype=self.dtype) + res_actual = ops.flat_inner(tt_1, tt_2) + tt_1_full = tf.reshape(ops.full(tt_1), (1, -1)) + tt_2_full = tf.reshape(ops.full(tt_2), (-1, 1)) + res_desired = tf.matmul(tt_1_full, tt_2_full) + res_actual_val, res_desired_val = self.evaluate([res_actual, res_desired]) + self.assertAllClose(res_actual_val, np.squeeze(res_desired_val), + rtol=1e-5) def testFlatInnerTTTensbySparseTens(self): # Inner product between a TT-tensor and a sparse tensor. @@ -66,24 +64,23 @@ def testFlatInnerTTTensbySparseTens(self): (4, 2, 5, 2)) rank_list = (1, 2) np.random.seed(1) - with self.test_session() as sess: - for shape in shape_list: - for rank in rank_list: - for num_elements in [1, 10]: - tt_1 = initializers.random_tensor(shape, tt_rank=rank, - dtype=self.dtype) - sparse_flat_indices = np.random.choice(np.prod(shape), num_elements) - sparse_flat_indices = sparse_flat_indices.astype(int) - sparse_indices = np.unravel_index(sparse_flat_indices, shape) - sparse_indices = np.vstack(sparse_indices).transpose() - values = np.random.randn(num_elements) - values = values.astype(self.dtype.as_numpy_dtype) - sparse_2 = tf.SparseTensor(indices=sparse_indices, values=values, - dense_shape=shape) - res_actual = ops.flat_inner(tt_1, sparse_2) - res_actual_val, tt_1_val = sess.run([res_actual, ops.full(tt_1)]) - res_desired_val = tt_1_val.flatten()[sparse_flat_indices].dot(values) - self.assertAllClose(res_actual_val, res_desired_val) + for shape in shape_list: + for rank in rank_list: + for num_elements in [1, 10]: + tt_1 = initializers.random_tensor(shape, tt_rank=rank, + dtype=self.dtype) + sparse_flat_indices = np.random.choice(np.prod(shape), num_elements) + sparse_flat_indices = sparse_flat_indices.astype(int) + sparse_indices = np.unravel_index(sparse_flat_indices, shape) + sparse_indices = np.vstack(sparse_indices).transpose() + values = np.random.randn(num_elements) + values = values.astype(self.dtype.as_numpy_dtype) + sparse_2 = tf.SparseTensor(indices=sparse_indices, values=values, + dense_shape=shape) + res_actual = ops.flat_inner(tt_1, sparse_2) + res_actual_val, tt_1_val = self.evaluate([res_actual, ops.full(tt_1)]) + res_desired_val = tt_1_val.flatten()[sparse_flat_indices].dot(values) + self.assertAllClose(res_actual_val, res_desired_val) def testAdd(self): # Sum two TT-tensors. @@ -91,14 +88,13 @@ def testAdd(self): dtype=self.dtype) tt_b = initializers.random_tensor((2, 1, 3, 4), tt_rank=[1, 2, 4, 3, 1], dtype=self.dtype) - with self.test_session() as sess: - res_actual = ops.full(ops.add(tt_a, tt_b)) - res_actual2 = ops.full(tt_a + tt_b) - res_desired = ops.full(tt_a) + ops.full(tt_b) - to_run = [res_actual, res_actual2, res_desired] - res_actual_val, res_actual2_val, res_desired_val = sess.run(to_run) - self.assertAllClose(res_actual_val, res_desired_val) - self.assertAllClose(res_actual2_val, res_desired_val) + res_actual = ops.full(ops.add(tt_a, tt_b)) + res_actual2 = ops.full(tt_a + tt_b) + res_desired = ops.full(tt_a) + ops.full(tt_b) + to_run = [res_actual, res_actual2, res_desired] + res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run) + self.assertAllClose(res_actual_val, res_desired_val) + self.assertAllClose(res_actual2_val, res_desired_val) def testMultiply(self): # Multiply two TT-tensors. @@ -106,27 +102,25 @@ def testMultiply(self): dtype=self.dtype) tt_b = initializers.random_tensor((1, 2, 3, 4), tt_rank=[1, 1, 4, 3, 1], dtype=self.dtype) - with self.test_session() as sess: - res_actual = ops.full(ops.multiply(tt_a, tt_b)) - res_actual2 = ops.full(tt_a * tt_b) - res_desired = ops.full(tt_a) * ops.full(tt_b) - to_run = [res_actual, res_actual2, res_desired] - res_actual_val, res_actual2_val, res_desired_val = sess.run(to_run) - self.assertAllClose(res_actual_val, res_desired_val) - self.assertAllClose(res_actual2_val, res_desired_val) + res_actual = ops.full(ops.multiply(tt_a, tt_b)) + res_actual2 = ops.full(tt_a * tt_b) + res_desired = ops.full(tt_a) * ops.full(tt_b) + to_run = [res_actual, res_actual2, res_desired] + res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run) + self.assertAllClose(res_actual_val, res_desired_val) + self.assertAllClose(res_actual2_val, res_desired_val) def testMultiplyByNumber(self): # Multiply a tensor by a number. tt = initializers.random_tensor((1, 2, 3), tt_rank=(1, 2, 3, 1), dtype=self.dtype) - with self.test_session() as sess: - res_actual = ops.full(ops.multiply(tt, 4)) - res_actual2 = ops.full(4.0 * tt) - res_desired = 4.0 * ops.full(tt) - to_run = [res_actual, res_actual2, res_desired] - res_actual_val, res_actual2_val, res_desired_val = sess.run(to_run) - self.assertAllClose(res_actual_val, res_desired_val) - self.assertAllClose(res_actual2_val, res_desired_val) + res_actual = ops.full(ops.multiply(tt, 4)) + res_actual2 = ops.full(4.0 * tt) + res_desired = 4.0 * ops.full(tt) + to_run = [res_actual, res_actual2, res_desired] + res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run) + self.assertAllClose(res_actual_val, res_desired_val) + self.assertAllClose(res_actual2_val, res_desired_val) def testFrobeniusNormTens(self): # Frobenius norm of a TT-tensor. @@ -134,31 +128,29 @@ def testFrobeniusNormTens(self): (2, 3, 4), (4, 2, 5, 2)) rank_list = (1, 2) - with self.test_session() as sess: - for shape in shape_list: - for rank in rank_list: - tt = initializers.random_tensor(shape, tt_rank=rank, - dtype=self.dtype) - norm_sq_actual = ops.frobenius_norm_squared(tt) - norm_actual = ops.frobenius_norm(tt) - vars = [norm_sq_actual, norm_actual, ops.full(tt)] - norm_sq_actual_val, norm_actual_val, tt_val = sess.run(vars) - tt_val = tt_val.flatten() - norm_sq_desired_val = tt_val.dot(tt_val) - norm_desired_val = np.linalg.norm(tt_val) - self.assertAllClose(norm_sq_actual_val, norm_sq_desired_val) - self.assertAllClose(norm_actual_val, norm_desired_val, atol=1e-5, - rtol=1e-5) + for shape in shape_list: + for rank in rank_list: + tt = initializers.random_tensor(shape, tt_rank=rank, + dtype=self.dtype) + norm_sq_actual = ops.frobenius_norm_squared(tt) + norm_actual = ops.frobenius_norm(tt, epsilon=0.0) + vars = [norm_sq_actual, norm_actual, ops.full(tt)] + norm_sq_actual_val, norm_actual_val, tt_val = self.evaluate(vars) + tt_val = tt_val.flatten() + norm_sq_desired_val = tt_val.dot(tt_val) + norm_desired_val = np.linalg.norm(tt_val) + self.assertAllClose(norm_sq_actual_val, norm_sq_desired_val) + self.assertAllClose(norm_actual_val, norm_desired_val, atol=1e-5, + rtol=1e-5) def testCastFloat(self): # Test cast function for float tt-tensors. tt_x = initializers.random_tensor((2, 3, 2), tt_rank=2) - with self.test_session() as sess: - casted = ops.cast(tt_x, self.dtype) - casted_val = sess.run(ops.full(casted)) - self.assertEqual(self.dtype, casted.dtype) - self.assertTrue(self.dtype, casted_val.dtype) + casted = ops.cast(tt_x, self.dtype) + casted_val = self.evaluate(ops.full(casted)) + self.assertEqual(self.dtype, casted.dtype) + self.assertTrue(self.dtype, casted_val.dtype) def testCastIntFloat(self): # Tests cast function from int to float for tensors. @@ -168,26 +160,24 @@ def testCastIntFloat(self): K_3 = np.random.randint(0, high=100, size=(2, 2, 1)) tt_int = TensorTrain([K_1, K_2, K_3], tt_ranks=[1, 2, 2, 1]) - with self.test_session() as sess: - casted = ops.cast(tt_int, self.dtype) - casted_val = sess.run(ops.full(casted)) - self.assertEqual(self.dtype, casted.dtype) - self.assertTrue(self.dtype, casted_val.dtype) + casted = ops.cast(tt_int, self.dtype) + casted_val = self.evaluate(ops.full(casted)) + self.assertEqual(self.dtype, casted.dtype) + self.assertTrue(self.dtype, casted_val.dtype) def testCoreRenorm(self): a = initializers.random_tensor(3 * (10,), tt_rank=7, dtype=self.dtype) b = ops.renormalize_tt_cores(a) var_list = [ops.full(a), ops.full(b)] - with self.test_session() as sess: - af, bf = sess.run(var_list) - b_cores = sess.run(b.tt_cores) - b_cores_norms = [] - for cr in b_cores: - b_cores_norms.append(np.linalg.norm(cr)) - self.assertAllClose(af, bf, atol=1e-5, rtol=1e-5) - self.assertAllClose(b_cores_norms, b_cores_norms[0] - * np.ones((len(b_cores)))) + af, bf = self.evaluate(var_list) + b_cores = self.evaluate(b.tt_cores) + b_cores_norms = [] + for cr in b_cores: + b_cores_norms.append(np.linalg.norm(cr)) + self.assertAllClose(af, bf, atol=1e-5, rtol=1e-5) + self.assertAllClose(b_cores_norms, b_cores_norms[0] + * np.ones((len(b_cores)))) class _TTMatrixTest(): @@ -203,10 +193,9 @@ def testFullMatrix2d(self): desired = desired.reshape((2, 3, 4, 5)) desired = desired.transpose((0, 2, 1, 3)) desired = desired.reshape((2 * 4, 3 * 5)) - with self.test_session(): - tf_mat = TensorTrain(tt_cores) - actual = ops.full(tf_mat) - self.assertAllClose(desired, actual.eval()) + tf_mat = TensorTrain(tt_cores) + actual = self.evaluate(ops.full(tf_mat)) + self.assertAllClose(desired, actual) def testFullMatrix3d(self): np.random.seed(1) @@ -222,27 +211,25 @@ def testFullMatrix3d(self): desired = desired.reshape((2, 3, 4, 5, 2, 2)) desired = desired.transpose((0, 2, 4, 1, 3, 5)) desired = desired.reshape((2 * 4 * 2, 3 * 5 * 2)) - with self.test_session(): - tf_mat = TensorTrain(tt_cores) - actual = ops.full(tf_mat) - self.assertAllClose(desired, actual.eval()) + tf_mat = TensorTrain(tt_cores) + actual = self.evaluate(ops.full(tf_mat)) + self.assertAllClose(desired, actual) def testTTMatTimesTTMat(self): # Multiply a TT-matrix by another TT-matrix. left_shape = (2, 3, 4) sum_shape = (4, 3, 5) right_shape = (4, 4, 4) - with self.test_session() as sess: - tt_mat_1 = initializers.random_matrix((left_shape, sum_shape), tt_rank=3, - dtype=self.dtype) - tt_mat_2 = initializers.random_matrix((sum_shape, right_shape), - dtype=self.dtype) - res_actual = ops.matmul(tt_mat_1, tt_mat_2) - res_actual = ops.full(res_actual) - res_desired = tf.matmul(ops.full(tt_mat_1), ops.full(tt_mat_2)) - res_actual_val, res_desired_val = sess.run([res_actual, res_desired]) - # TODO: why so bad accuracy? - self.assertAllClose(res_actual_val, res_desired_val, atol=1e-4, rtol=1e-4) + tt_mat_1 = initializers.random_matrix((left_shape, sum_shape), tt_rank=3, + dtype=self.dtype) + tt_mat_2 = initializers.random_matrix((sum_shape, right_shape), + dtype=self.dtype) + res_actual = ops.matmul(tt_mat_1, tt_mat_2) + res_actual = ops.full(res_actual) + res_desired = tf.matmul(ops.full(tt_mat_1), ops.full(tt_mat_2)) + res_actual_val, res_desired_val = self.evaluate([res_actual, res_desired]) + # TODO: why so bad accuracy? + self.assertAllClose(res_actual_val, res_desired_val, atol=1e-4, rtol=1e-4) def testTTMatTimesDenseVec(self): # Multiply a TT-matrix by a dense vector. @@ -250,15 +237,14 @@ def testTTMatTimesDenseVec(self): out_shape = (3, 4, 3) np.random.seed(1) vec = np.random.rand(np.prod(inp_shape), 1).astype(self.dtype.as_numpy_dtype) - with self.test_session() as sess: - tf_vec = tf.constant(vec) - tf.set_random_seed(1) - tt_mat = initializers.random_matrix((out_shape, inp_shape), - dtype=self.dtype) - res_actual = ops.matmul(tt_mat, tf_vec) - res_desired = tf.matmul(ops.full(tt_mat), tf_vec) - res_actual_val, res_desired_val = sess.run([res_actual, res_desired]) - self.assertAllClose(res_actual_val, res_desired_val) + tf_vec = tf.constant(vec) + tf.compat.v1.set_random_seed(1) + tt_mat = initializers.random_matrix((out_shape, inp_shape), + dtype=self.dtype) + res_actual = ops.matmul(tt_mat, tf_vec) + res_desired = tf.matmul(ops.full(tt_mat), tf_vec) + res_actual_val, res_desired_val = self.evaluate([res_actual, res_desired]) + self.assertAllClose(res_actual_val, res_desired_val) def testDenseMatTimesTTVec(self): # Multiply a TT-matrix by a dense vector. @@ -267,35 +253,33 @@ def testDenseMatTimesTTVec(self): np.random.seed(1) mat = np.random.rand(np.prod(out_shape), np.prod(inp_shape)) mat = mat.astype(self.dtype.as_numpy_dtype) - with self.test_session() as sess: - tf_mat = tf.constant(mat) - tf.set_random_seed(1) - tt_vec = initializers.random_matrix((inp_shape, None), - dtype=self.dtype) - res_actual = ops.matmul(tf_mat, tt_vec) - res_desired = tf.matmul(tf_mat, ops.full(tt_vec)) - res_actual_val, res_desired_val = sess.run([res_actual, res_desired]) - self.assertAllClose(res_actual_val, res_desired_val, atol=1e-4, rtol=1e-4) + tf_mat = tf.constant(mat) + tf.compat.v1.set_random_seed(1) + tt_vec = initializers.random_matrix((inp_shape, None), + dtype=self.dtype) + res_actual = ops.matmul(tf_mat, tt_vec) + res_desired = tf.matmul(tf_mat, ops.full(tt_vec)) + res_actual_val, res_desired_val = self.evaluate([res_actual, res_desired]) + self.assertAllClose(res_actual_val, res_desired_val, atol=1e-4, rtol=1e-4) def testFlatInnerTTMatbyTTMat(self): # Inner product between two TT-Matrices. shape_list = (((2, 2), (3, 4)), ((2, 3, 4), (2, 2, 2))) rank_list = (1, 2) - with self.test_session() as sess: - for shape in shape_list: - for rank in rank_list: - tt_1 = initializers.random_matrix(shape, tt_rank=rank, - dtype=self.dtype) - tt_2 = initializers.random_matrix(shape, tt_rank=rank, - dtype=self.dtype) - res_actual = ops.flat_inner(tt_1, tt_2) - tt_1_full = tf.reshape(ops.full(tt_1), (1, -1)) - tt_2_full = tf.reshape(ops.full(tt_2), (-1, 1)) - res_desired = tf.matmul(tt_1_full, tt_2_full) - res_actual_val, res_desired_val = sess.run([res_actual, res_desired]) - self.assertAllClose(res_actual_val, np.squeeze(res_desired_val), - rtol=1e-5, atol=1e-5) + for shape in shape_list: + for rank in rank_list: + tt_1 = initializers.random_matrix(shape, tt_rank=rank, + dtype=self.dtype) + tt_2 = initializers.random_matrix(shape, tt_rank=rank, + dtype=self.dtype) + res_actual = ops.flat_inner(tt_1, tt_2) + tt_1_full = tf.reshape(ops.full(tt_1), (1, -1)) + tt_2_full = tf.reshape(ops.full(tt_2), (-1, 1)) + res_desired = tf.matmul(tt_1_full, tt_2_full) + res_actual_val, res_desired_val = self.evaluate([res_actual, res_desired]) + self.assertAllClose(res_actual_val, np.squeeze(res_desired_val), + rtol=1e-5, atol=1e-5) def testFlatInnerTTMatbySparseMat(self): # Inner product between a TT-matrix and a sparse matrix. @@ -303,127 +287,121 @@ def testFlatInnerTTMatbySparseMat(self): ((2, 3, 4), (2, 2, 2))) rank_list = (1, 2) np.random.seed(1) - with self.test_session() as sess: - for tensor_shape in shape_list: - for rank in rank_list: - for num_elements in [1, 9]: - tt_1 = initializers.random_matrix(tensor_shape, tt_rank=rank, - dtype=self.dtype) - matrix_shape = np.prod(tensor_shape[0]), np.prod(tensor_shape[1]) - sparse_flat_indices = np.random.choice(np.prod(matrix_shape), num_elements) - sparse_flat_indices = sparse_flat_indices.astype(int) - sparse_indices = np.unravel_index(sparse_flat_indices, matrix_shape) - sparse_indices = np.vstack(sparse_indices).transpose() - values = np.random.randn(num_elements).astype(self.dtype.as_numpy_dtype) - sparse_2 = tf.SparseTensor(indices=sparse_indices, values=values, - dense_shape=matrix_shape) - res_actual = ops.flat_inner(tt_1, sparse_2) - res_actual_val, tt_1_val = sess.run([res_actual, ops.full(tt_1)]) - res_desired_val = tt_1_val.flatten()[sparse_flat_indices].dot(values) - self.assertAllClose(res_actual_val, res_desired_val) + for tensor_shape in shape_list: + for rank in rank_list: + for num_elements in [1, 9]: + tt_1 = initializers.random_matrix(tensor_shape, tt_rank=rank, + dtype=self.dtype) + matrix_shape = np.prod(tensor_shape[0]), np.prod(tensor_shape[1]) + sparse_flat_indices = np.random.choice(np.prod(matrix_shape), num_elements) + sparse_flat_indices = sparse_flat_indices.astype(int) + sparse_indices = np.unravel_index(sparse_flat_indices, matrix_shape) + sparse_indices = np.vstack(sparse_indices).transpose() + values = np.random.randn(num_elements).astype(self.dtype.as_numpy_dtype) + sparse_2 = tf.SparseTensor(indices=sparse_indices, values=values, + dense_shape=matrix_shape) + res_actual = ops.flat_inner(tt_1, sparse_2) + res_actual_val, tt_1_val = self.evaluate([res_actual, ops.full(tt_1)]) + res_desired_val = tt_1_val.flatten()[sparse_flat_indices].dot(values) + self.assertAllClose(res_actual_val, res_desired_val) def testFrobeniusNormMatrix(self): # Frobenius norm of a TT-matrix. shape_list = (((2, 2), (3, 4)), ((2, 3, 4), (2, 2, 2))) rank_list = (1, 2) - with self.test_session() as sess: - for tensor_shape in shape_list: - for rank in rank_list: - tt = initializers.random_matrix(tensor_shape, tt_rank=rank, - dtype=self.dtype) - norm_sq_actual = ops.frobenius_norm_squared(tt) - norm_actual = ops.frobenius_norm(tt) - vars = [norm_sq_actual, norm_actual, ops.full(tt)] - norm_sq_actual_val, norm_actual_val, tt_val = sess.run(vars) - tt_val = tt_val.flatten() - norm_sq_desired_val = tt_val.dot(tt_val) - norm_desired_val = np.linalg.norm(tt_val) - self.assertAllClose(norm_sq_actual_val, norm_sq_desired_val) - self.assertAllClose(norm_actual_val, norm_desired_val, atol=1e-5, - rtol=1e-5) + for tensor_shape in shape_list: + for rank in rank_list: + tt = initializers.random_matrix(tensor_shape, tt_rank=rank, + dtype=self.dtype) + norm_sq_actual = ops.frobenius_norm_squared(tt) + norm_actual = ops.frobenius_norm(tt) + vars = [norm_sq_actual, norm_actual, ops.full(tt)] + norm_sq_actual_val, norm_actual_val, tt_val = self.evaluate(vars) + tt_val = tt_val.flatten() + norm_sq_desired_val = tt_val.dot(tt_val) + norm_desired_val = np.linalg.norm(tt_val) + self.assertAllClose(norm_sq_actual_val, norm_sq_desired_val) + self.assertAllClose(norm_actual_val, norm_desired_val, atol=1e-5, + rtol=1e-5) def testTranspose(self): # Transpose a TT-matrix. shape_list = (((2, 2), (3, 4)), ((2, 3, 4), (2, 2, 2))) rank_list = (1, 2) - with self.test_session() as sess: - for tensor_shape in shape_list: - for rank in rank_list: - tt = initializers.random_matrix(tensor_shape, tt_rank=rank, - dtype=self.dtype) - res_actual = ops.full(ops.transpose(tt)) - res_actual_val, tt_val = sess.run([res_actual, ops.full(tt)]) - self.assertAllClose(tt_val.transpose(), res_actual_val) + for tensor_shape in shape_list: + for rank in rank_list: + tt = initializers.random_matrix(tensor_shape, tt_rank=rank, + dtype=self.dtype) + res_actual = ops.full(ops.transpose(tt)) + res_actual_val, tt_val = self.evaluate([res_actual, ops.full(tt)]) + self.assertAllClose(tt_val.transpose(), res_actual_val) def testBilinearForm(self): # Test bilinear form. shape_list = (((2, 2), (3, 4)), ((2, 3, 4), (2, 2, 2))) rank_list = (1, 2) - with self.test_session() as sess: - for tensor_shape in shape_list: - for rank in rank_list: - A = initializers.random_matrix(tensor_shape, tt_rank=rank, - dtype=self.dtype) - b = initializers.random_matrix((tensor_shape[0], None), tt_rank=rank, - dtype=self.dtype) - c = initializers.random_matrix((tensor_shape[1], None), tt_rank=rank, - dtype=self.dtype) - res_actual = ops.bilinear_form(A, b, c) - vars = [res_actual, ops.full(A), ops.full(b), ops.full(c)] - res_actual_val, A_val, b_val, c_val = sess.run(vars) - res_desired = b_val.T.dot(A_val).dot(c_val) - self.assertAllClose(res_actual_val, np.squeeze(res_desired), - atol=1e-5, rtol=1e-5) + for tensor_shape in shape_list: + for rank in rank_list: + A = initializers.random_matrix(tensor_shape, tt_rank=rank, + dtype=self.dtype) + b = initializers.random_matrix((tensor_shape[0], None), tt_rank=rank, + dtype=self.dtype) + c = initializers.random_matrix((tensor_shape[1], None), tt_rank=rank, + dtype=self.dtype) + res_actual = ops.bilinear_form(A, b, c) + vars = [res_actual, ops.full(A), ops.full(b), ops.full(c)] + res_actual_val, A_val, b_val, c_val = self.evaluate(vars) + res_desired = b_val.T.dot(A_val).dot(c_val) + self.assertAllClose(res_actual_val, np.squeeze(res_desired), + atol=1e-5, rtol=1e-5) def testBilinearFormBatch(self): # Test bilinear form for batch of tensors. shape_list = (((2, 2), (3, 4)), ((2, 3, 4), (2, 2, 2))) rank_list = (1, 2) - with self.test_session() as sess: - for tensor_shape in shape_list: - for rank in rank_list: - A = initializers.random_matrix(tensor_shape, tt_rank=rank, - dtype=self.dtype) - b = initializers.random_matrix_batch((tensor_shape[0], None), - tt_rank=rank, batch_size=5, - dtype=self.dtype) - c = initializers.random_matrix_batch((tensor_shape[1], None), - tt_rank=rank, batch_size=5, - dtype=self.dtype) - res_actual = ops.bilinear_form(A, b, c) - vars = [res_actual, ops.full(A), ops.full(b), ops.full(c)] - res_actual_val, A_val, b_val, c_val = sess.run(vars) - res_desired = np.diag(b_val[:, :, 0].dot(A_val).dot(c_val[:, :, 0].T)) - self.assertAllClose(res_actual_val, np.squeeze(res_desired), - atol=1e-5, rtol=1e-5) + for tensor_shape in shape_list: + for rank in rank_list: + A = initializers.random_matrix(tensor_shape, tt_rank=rank, + dtype=self.dtype) + b = initializers.random_matrix_batch((tensor_shape[0], None), + tt_rank=rank, batch_size=5, + dtype=self.dtype) + c = initializers.random_matrix_batch((tensor_shape[1], None), + tt_rank=rank, batch_size=5, + dtype=self.dtype) + res_actual = ops.bilinear_form(A, b, c) + vars = [res_actual, ops.full(A), ops.full(b), ops.full(c)] + res_actual_val, A_val, b_val, c_val = self.evaluate(vars) + res_desired = np.diag(b_val[:, :, 0].dot(A_val).dot(c_val[:, :, 0].T)) + self.assertAllClose(res_actual_val, np.squeeze(res_desired), + atol=1e-5, rtol=1e-5) def testBilinearFormTwoMat(self): # Test bilinear_form_two_mat. shape_list = (((2, 2), (3, 4)), ((2, 3, 4), (2, 2, 2))) rank_list = (1, 2) - with self.test_session() as sess: - for tensor_shape in shape_list: - for rank in rank_list: - A = initializers.random_matrix(tensor_shape, tt_rank=rank, - dtype=self.dtype) - B = initializers.random_matrix(tensor_shape, tt_rank=rank, - dtype=self.dtype) - B = ops.transpose(B) - x = initializers.random_matrix((tensor_shape[0], None), tt_rank=rank, - dtype=self.dtype) - y = initializers.random_matrix((tensor_shape[0], None), tt_rank=rank, - dtype=self.dtype) - res_actual = ops.bilinear_form_two_mat(x, A, B, y) - vars = [res_actual, ops.full(x), ops.full(A), ops.full(B), ops.full(y)] - res_actual_val, x_val, A_val, B_val, y_val = sess.run(vars) - res_desired = x_val.T.dot(A_val).dot(B_val).dot(y_val) - self.assertAllClose(res_actual_val, np.squeeze(res_desired), - atol=1e-5, rtol=1e-5) + for tensor_shape in shape_list: + for rank in rank_list: + A = initializers.random_matrix(tensor_shape, tt_rank=rank, + dtype=self.dtype) + B = initializers.random_matrix(tensor_shape, tt_rank=rank, + dtype=self.dtype) + B = ops.transpose(B) + x = initializers.random_matrix((tensor_shape[0], None), tt_rank=rank, + dtype=self.dtype) + y = initializers.random_matrix((tensor_shape[0], None), tt_rank=rank, + dtype=self.dtype) + res_actual = ops.bilinear_form_two_mat(x, A, B, y) + vars = [res_actual, ops.full(x), ops.full(A), ops.full(B), ops.full(y)] + res_actual_val, x_val, A_val, B_val, y_val = self.evaluate(vars) + res_desired = x_val.T.dot(A_val).dot(B_val).dot(y_val) + self.assertAllClose(res_actual_val, np.squeeze(res_desired), + atol=1e-5, rtol=1e-5) def testCastFloat(self): # Test cast function for float tt-matrices and vectors. @@ -431,12 +409,11 @@ def testCastFloat(self): tt_mat = initializers.random_matrix(((2, 3), (3, 2)), tt_rank=2) tt_vec = initializers.random_matrix(((2, 3), None), tt_rank=2) - with self.test_session() as sess: - for tt in [tt_mat, tt_vec]: - casted = ops.cast(tt, self.dtype) - casted_val = sess.run(ops.full(casted)) - self.assertEqual(self.dtype, casted.dtype) - self.assertTrue(self.dtype, casted_val.dtype) + for tt in [tt_mat, tt_vec]: + casted = ops.cast(tt, self.dtype) + casted_val = self.evaluate(ops.full(casted)) + self.assertEqual(self.dtype, casted.dtype) + self.assertTrue(self.dtype, casted_val.dtype) def testCastIntFloat(self): # Tests cast function from int to float for matrices. @@ -446,45 +423,10 @@ def testCastIntFloat(self): K_3 = np.random.randint(0, high=100, size=(2, 2, 2, 1)) tt_int = TensorTrain([K_1, K_2, K_3], tt_ranks=[1, 2, 2, 1]) - with self.test_session() as sess: - casted = ops.cast(tt_int, self.dtype) - casted_val = sess.run(ops.full(casted)) - self.assertEqual(self.dtype, casted.dtype) - self.assertTrue(self.dtype, casted_val.dtype) - - def testUnknownRanksTTMatmul(self): - # Tests tt_tt_matmul for matrices with unknown ranks - K_1 = tf.placeholder(self.dtype, (1, 2, 2, None)) - K_2 = tf.placeholder(self.dtype, (None, 3, 3, 1)) - tt_mat = TensorTrain([K_1, K_2]) - res_actual = ops.full(ops.matmul(tt_mat, tt_mat)) - res_desired = tf.matmul(ops.full(tt_mat), ops.full(tt_mat)) - np.random.seed(1) - K_1_val = np.random.rand(1, 2, 2, 2) - K_2_val = np.random.rand(2, 3, 3, 1) - with self.test_session() as sess: - res_actual_val = sess.run(res_actual, {K_1: K_1_val, K_2: K_2_val}) - res_desired_val = sess.run(res_desired, {K_1: K_1_val, K_2: K_2_val}) - self.assertAllClose(res_desired_val, res_actual_val) - - - def testHalfKnownRanksTTMatmul(self): - # Tests tt_tt_matmul for the case when one matrice has known ranks - # and the other one doesn't - np.random.seed(1) - K_1 = tf.placeholder(self.dtype, (1, 2, 2, None)) - K_2 = tf.placeholder(self.dtype, (None, 3, 3, 1)) - tt_mat_known_ranks = TensorTrain([K_1, K_2], tt_ranks=[1, 3, 1]) - tt_mat = TensorTrain([K_1, K_2]) - res_actual = ops.full(ops.matmul(tt_mat_known_ranks, tt_mat)) - res_desired = tf.matmul(ops.full(tt_mat_known_ranks), ops.full(tt_mat)) - np.random.seed(1) - K_1_val = np.random.rand(1, 2, 2, 3) - K_2_val = np.random.rand(3, 3, 3, 1) - with self.test_session() as sess: - res_actual_val = sess.run(res_actual, {K_1: K_1_val, K_2: K_2_val}) - res_desired_val = sess.run(res_desired, {K_1: K_1_val, K_2: K_2_val}) - self.assertAllClose(res_desired_val, res_actual_val) + casted = ops.cast(tt_int, self.dtype) + casted_val = self.evaluate(ops.full(casted)) + self.assertEqual(self.dtype, casted.dtype) + self.assertTrue(self.dtype, casted_val.dtype) class _TTTensorBatchTest(): @@ -496,10 +438,9 @@ def testFullTensor2d(self): b = np.random.rand(3, rank, 9).astype(self.dtype.as_numpy_dtype) tt_cores = (a.reshape(3, 1, 10, rank), b.reshape(3, rank, 9, 1)) desired = np.einsum('oib,obj->oij', a, b) - with self.test_session(): - tf_tens = TensorTrainBatch(tt_cores) - actual = ops.full(tf_tens) - self.assertAllClose(desired, actual.eval()) + tf_tens = TensorTrainBatch(tt_cores) + actual = self.evaluate(ops.full(tf_tens)) + self.assertAllClose(desired, actual) def testFullTensor3d(self): np.random.seed(1) @@ -510,31 +451,29 @@ def testFullTensor3d(self): tt_cores = (a.reshape(3, 1, 10, rank_1), b, c.reshape((3, 3, 8, 1))) # Basically do full by hand. desired = np.einsum('oia,oajb,obk->oijk', a, b, c) - with self.test_session(): - tf_tens = TensorTrainBatch(tt_cores) - actual = ops.full(tf_tens) - self.assertAllClose(desired, actual.eval()) + tf_tens = TensorTrainBatch(tt_cores) + actual = self.evaluate(ops.full(tf_tens)) + self.assertAllClose(desired, actual) def testFlatInnerTTTensbyTTTensSameBatchSize(self): # Inner product between two batch TT-tensors of the same batch_size. shape_list = ((2, 2), (2, 3, 4)) rank_list = (1, 2) - with self.test_session() as sess: - for shape in shape_list: - for rank in rank_list: - tt_1 = initializers.random_tensor_batch(shape, tt_rank=rank, - batch_size=2, - dtype=self.dtype) - tt_2 = initializers.random_tensor_batch(shape, tt_rank=rank, - batch_size=2, - dtype=self.dtype) - res_actual = ops.flat_inner(tt_1, tt_2) - tt_1_full = tf.reshape(ops.full(tt_1), (2, 1, -1)) - tt_2_full = tf.reshape(ops.full(tt_2), (2, -1, 1)) - res_desired = tf.matmul(tt_1_full, tt_2_full) - res_actual_val, res_desired_val = sess.run([res_actual, res_desired]) - self.assertAllClose(res_actual_val, np.squeeze(res_desired_val)) + for shape in shape_list: + for rank in rank_list: + tt_1 = initializers.random_tensor_batch(shape, tt_rank=rank, + batch_size=2, + dtype=self.dtype) + tt_2 = initializers.random_tensor_batch(shape, tt_rank=rank, + batch_size=2, + dtype=self.dtype) + res_actual = ops.flat_inner(tt_1, tt_2) + tt_1_full = tf.reshape(ops.full(tt_1), (2, 1, -1)) + tt_2_full = tf.reshape(ops.full(tt_2), (2, -1, 1)) + res_desired = tf.matmul(tt_1_full, tt_2_full) + res_actual_val, res_desired_val = self.evaluate([res_actual, res_desired]) + self.assertAllClose(res_actual_val, np.squeeze(res_desired_val)) def testFlatInnerTTTensbyTTTensBroadcasting(self): # Inner product between two batch TT-tensors with broadcasting. @@ -545,11 +484,10 @@ def testFlatInnerTTTensbyTTTensBroadcasting(self): res_actual_1 = ops.flat_inner(tt_1, tt_2) res_actual_2 = ops.flat_inner(tt_2, tt_1) res_desired = tf.einsum('ijk,oijk->o', ops.full(tt_1[0]), ops.full(tt_2)) - with self.test_session() as sess: - res = sess.run([res_actual_1, res_actual_2, res_desired]) - res_actual_1_val, res_actual_2_val, res_desired_val = res - self.assertAllClose(res_actual_1_val, res_desired_val) - self.assertAllClose(res_actual_2_val, res_desired_val) + res = self.evaluate([res_actual_1, res_actual_2, res_desired]) + res_actual_1_val, res_actual_2_val, res_desired_val = res + self.assertAllClose(res_actual_1_val, res_desired_val) + self.assertAllClose(res_actual_2_val, res_desired_val) tt_1 = initializers.random_tensor_batch((2, 3, 4), batch_size=2, dtype=self.dtype) @@ -563,14 +501,13 @@ def testAddSameBatchSize(self): dtype=self.dtype) tt_b = initializers.random_tensor_batch((2, 1, 4), tt_rank=[1, 2, 4, 1], batch_size=3, dtype=self.dtype) - with self.test_session() as sess: - res_actual = ops.full(ops.add(tt_a, tt_b)) - res_actual2 = ops.full(tt_a + tt_b) - res_desired = ops.full(tt_a) + ops.full(tt_b) - to_run = [res_actual, res_actual2, res_desired] - res_actual_val, res_actual2_val, res_desired_val = sess.run(to_run) - self.assertAllClose(res_actual_val, res_desired_val) - self.assertAllClose(res_actual2_val, res_desired_val) + res_actual = ops.full(ops.add(tt_a, tt_b)) + res_actual2 = ops.full(tt_a + tt_b) + res_desired = ops.full(tt_a) + ops.full(tt_b) + to_run = [res_actual, res_actual2, res_desired] + res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run) + self.assertAllClose(res_actual_val, res_desired_val) + self.assertAllClose(res_actual2_val, res_desired_val) def testAddBroadcasting(self): # Sum two TT-tensors with broadcasting. @@ -578,66 +515,62 @@ def testAddBroadcasting(self): dtype=self.dtype) tt_b = initializers.random_tensor_batch((2, 1, 4), tt_rank=[1, 2, 4, 1], batch_size=3, dtype=self.dtype) - with self.test_session() as sess: - res_actual = ops.full(ops.add(tt_a, tt_b)) - res_actual2 = ops.full(tt_b + tt_a) - res_desired = ops.full(tt_a) + ops.full(tt_b) - to_run = [res_actual, res_actual2, res_desired] - res_actual_val, res_actual2_val, res_desired_val = sess.run(to_run) - self.assertAllClose(res_actual_val, res_desired_val) - self.assertAllClose(res_actual2_val, res_desired_val) + res_actual = ops.full(ops.add(tt_a, tt_b)) + res_actual2 = ops.full(tt_b + tt_a) + res_desired = ops.full(tt_a) + ops.full(tt_b) + to_run = [res_actual, res_actual2, res_desired] + res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run) + self.assertAllClose(res_actual_val, res_desired_val) + self.assertAllClose(res_actual2_val, res_desired_val) def testMultiplyByNumber(self): # Multiply batch of tensors by a number. tt = initializers.random_tensor_batch((1, 2, 3), tt_rank=(1, 2, 3, 1), batch_size=3, dtype=self.dtype) - with self.test_session() as sess: - res_actual = ops.full(ops.multiply(tt, 4)) - res_actual2 = ops.full(4.0 * tt) - res_desired = 4.0 * ops.full(tt) - to_run = [res_actual, res_actual2, res_desired] - res_actual_val, res_actual2_val, res_desired_val = sess.run(to_run) - self.assertAllClose(res_actual_val, res_desired_val) - self.assertAllClose(res_actual2_val, res_desired_val) + res_actual = ops.full(ops.multiply(tt, 4)) + res_actual2 = ops.full(4.0 * tt) + res_desired = 4.0 * ops.full(tt) + to_run = [res_actual, res_actual2, res_desired] + res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run) + self.assertAllClose(res_actual_val, res_desired_val) + self.assertAllClose(res_actual2_val, res_desired_val) def testFrobeniusNormDifferentiableBatch(self): - with self.test_session() as sess: - tt = initializers.random_tensor_batch((3, 3, 3), tt_rank=2, batch_size=5, - dtype=self.dtype) - norm_sq_diff = ops.frobenius_norm_squared(tt, differentiable=True) - variables = [norm_sq_diff, ops.full(tt)] - norm_sq_diff_val, tt_full = sess.run(variables) - desired_norm = np.linalg.norm(tt_full.reshape((5, -1)), axis=1)**2 - self.assertAllClose(norm_sq_diff_val, desired_norm, atol=1e-5, rtol=1e-5) + tt = initializers.random_tensor_batch((3, 3, 3), tt_rank=2, batch_size=5, + dtype=self.dtype) + norm_sq_diff = ops.frobenius_norm_squared(tt, differentiable=True) + variables = [norm_sq_diff, ops.full(tt)] + norm_sq_diff_val, tt_full = self.evaluate(variables) + desired_norm = np.linalg.norm(tt_full.reshape((5, -1)), axis=1)**2 + self.assertAllClose(norm_sq_diff_val, desired_norm, atol=1e-5, rtol=1e-5) def testFrobeniusNormTens(self): # Frobenius norm of a batch of TT-tensors. - with self.test_session() as sess: - tt = initializers.tensor_batch_with_random_cores((2, 1, 3), batch_size=3, - dtype=self.dtype) - norm_sq_actual = ops.frobenius_norm_squared(tt) - norm_actual = ops.frobenius_norm(tt) - vars = [norm_sq_actual, norm_actual, ops.full(tt)] - norm_sq_actual_val, norm_actual_val, tt_val = sess.run(vars) - tt_val = tt_val.reshape((3, -1)) - norm_sq_desired_val = np.sum(tt_val * tt_val, axis=1) - norm_desired_val = np.sqrt(norm_sq_desired_val) - self.assertAllClose(norm_sq_actual_val, norm_sq_desired_val) - self.assertAllClose(norm_actual_val, norm_desired_val, atol=1e-5, - rtol=1e-5) + tt = initializers.tensor_batch_with_random_cores((2, 2, 3), batch_size=3, + tt_rank=2, + dtype=self.dtype) + norm_sq_actual = ops.frobenius_norm_squared(tt) + norm_actual = ops.frobenius_norm(tt, epsilon=0.0) + vars = [norm_sq_actual, norm_actual, ops.full(tt)] + norm_sq_actual_val, norm_actual_val, tt_val = self.evaluate(vars) + tt_val = tt_val.reshape((3, -1)) + norm_sq_desired_val = np.sum(tt_val * tt_val, axis=1) + norm_desired_val = np.sqrt(norm_sq_desired_val) + self.assertAllClose(norm_sq_actual_val, norm_sq_desired_val) + self.assertAllClose(norm_actual_val, norm_desired_val, atol=1e-5, + rtol=1e-5) def testMultiplyBatchByTensor(self): tt_a = initializers.random_tensor((3, 3, 3), tt_rank=2, dtype=self.dtype) tt_b = initializers.random_tensor_batch((3, 3, 3), tt_rank=2, batch_size=5, dtype=self.dtype) - with self.test_session() as sess: - res_actual = ops.full(ops.multiply(tt_a, tt_b)) - res_actual2 = ops.full(ops.multiply(tt_b, tt_a)) - res_desired = ops.full(tt_a) * ops.full(tt_b) - to_run = [res_actual, res_actual2, res_desired] - res_actual_val, res_actual2_val, res_desired_val = sess.run(to_run) - self.assertAllClose(res_actual_val, res_desired_val) - self.assertAllClose(res_actual2_val, res_desired_val) + res_actual = ops.full(ops.multiply(tt_a, tt_b)) + res_actual2 = ops.full(ops.multiply(tt_b, tt_a)) + res_desired = ops.full(tt_a) * ops.full(tt_b) + to_run = [res_actual, res_actual2, res_desired] + res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run) + self.assertAllClose(res_actual_val, res_desired_val) + self.assertAllClose(res_actual2_val, res_desired_val) def testMultiplyBatchByBatch(self): tt_a = initializers.random_tensor_batch((3, 3, 3), tt_rank=2, batch_size=5, @@ -648,130 +581,45 @@ def testMultiplyBatchByBatch(self): res_actual2 = ops.full(ops.multiply(tt_b, tt_a)) res_desired = ops.full(tt_a) * ops.full(tt_b) to_run = [res_actual, res_actual2, res_desired] - with self.test_session() as sess: - res_actual = ops.full(ops.multiply(tt_a, tt_b)) - res_actual2 = ops.full(ops.multiply(tt_b, tt_a)) - res_desired = ops.full(tt_a) * ops.full(tt_b) - to_run = [res_actual, res_actual2, res_desired] - res_actual_val, res_actual2_val, res_desired_val = sess.run(to_run) - self.assertAllClose(res_actual_val, res_desired_val) - self.assertAllClose(res_actual2_val, res_desired_val) + res_actual = ops.full(ops.multiply(tt_a, tt_b)) + res_actual2 = ops.full(ops.multiply(tt_b, tt_a)) + res_desired = ops.full(tt_a) * ops.full(tt_b) + to_run = [res_actual, res_actual2, res_desired] + res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run) + self.assertAllClose(res_actual_val, res_desired_val) + self.assertAllClose(res_actual2_val, res_desired_val) def testMultiplyBroadcasting(self): tt_a = initializers.random_tensor_batch((3, 3, 3), tt_rank=2, batch_size=1, dtype=self.dtype) tt_b = initializers.random_tensor_batch((3, 3, 3), tt_rank=2, batch_size=5, dtype=self.dtype) - with self.test_session() as sess: - res_actual = ops.full(ops.multiply(tt_a, tt_b)) - res_actual2 = ops.full(ops.multiply(tt_b, tt_a)) - res_desired = ops.full(tt_a) * ops.full(tt_b) - to_run = [res_actual, res_actual2, res_desired] - res_actual_val, res_actual2_val, res_desired_val = sess.run(to_run) - self.assertAllClose(res_actual_val, res_desired_val) - self.assertAllClose(res_actual2_val, res_desired_val) - - def testMultiplyUnknownBatchSizeBroadcasting(self): - c1 = tf.placeholder(self.dtype, [None, 1, 3, 2]) - c2 = tf.placeholder(self.dtype, [None, 2, 3, 1]) - tt_a = TensorTrainBatch([c1, c2]) - tt_b = initializers.random_tensor_batch((3, 3), tt_rank=3, batch_size=1, - dtype=self.dtype) - tt_c = initializers.random_tensor((3, 3), tt_rank=3, - dtype=self.dtype) - res_ab = ops.full(ops.multiply(tt_a, tt_b)) - res_ba = ops.full(ops.multiply(tt_b, tt_a)) - res_ac = ops.full(ops.multiply(tt_a, tt_c)) - res_ca = ops.full(ops.multiply(tt_c, tt_a)) - res_desired_ab = ops.full(tt_a) * ops.full(tt_b) - res_desired_ac = ops.full(tt_a) * ops.full(tt_c) - to_run = [res_ab, res_ba, res_ac, res_ca, res_desired_ab, res_desired_ac] - feed_dict = {c1:np.random.rand(7, 1, 3, 2), - c2:np.random.rand(7, 2, 3, 1)} - with self.test_session() as sess: - ab, ba, ac, ca, des_ab, des_ac = sess.run(to_run, feed_dict=feed_dict) - self.assertAllClose(ab, des_ab) - self.assertAllClose(ba, des_ab) - self.assertAllClose(ac, des_ac) - self.assertAllClose(ca, des_ac) - - def testMultiplyTwoBatchesUnknownSize(self): - c1 = tf.placeholder(self.dtype, [None, 1, 3, 2]) - c2 = tf.placeholder(self.dtype, [None, 2, 3, 1]) - c3 = tf.placeholder(self.dtype, [None, 1, 3, 2]) - c4 = tf.placeholder(self.dtype, [None, 2, 3, 1]) - tt_a = TensorTrainBatch([c1, c2]) - tt_b = TensorTrainBatch([c3, c4]) - res_ab = ops.full(ops.multiply(tt_a, tt_b)) - res_ba = ops.full(ops.multiply(tt_b, tt_a)) - res_desired = ops.full(tt_a) * ops.full(tt_b) - to_run = [res_ab, res_ba, res_desired] - feed_dict = {c1:np.random.rand(7, 1, 3, 2), - c2:np.random.rand(7, 2, 3, 1), - c3:np.random.rand(7, 1, 3, 2), - c4:np.random.rand(7, 2, 3, 1)} - - feed_dict_err = {c1:np.random.rand(7, 1, 3, 2), - c2:np.random.rand(7, 2, 3, 1), - c3:np.random.rand(1, 1, 3, 2), - c4:np.random.rand(1, 2, 3, 1)} - - with self.test_session() as sess: - ab_full, ba_full, des_full = sess.run(to_run, feed_dict=feed_dict) - self.assertAllClose(ab_full, des_full) - self.assertAllClose(ba_full, des_full) - with self.assertRaises(tf.errors.InvalidArgumentError): - sess.run(to_run, feed_dict=feed_dict_err) - - def testMultiplyUnknownSizeBatchAndBatch(self): - c1 = tf.placeholder(self.dtype, [None, 1, 3, 2]) - c2 = tf.placeholder(self.dtype, [None, 2, 3, 1]) - tt_b = initializers.random_tensor_batch((3, 3), tt_rank=2, batch_size=8, - dtype=self.dtype) - tt_a = TensorTrainBatch([c1, c2]) - res_ab = ops.full(ops.multiply(tt_a, tt_b)) - res_ba = ops.full(ops.multiply(tt_b, tt_a)) + res_actual = ops.full(ops.multiply(tt_a, tt_b)) + res_actual2 = ops.full(ops.multiply(tt_b, tt_a)) res_desired = ops.full(tt_a) * ops.full(tt_b) - to_run = [res_ab, res_ba, res_desired] - feed_dict = {c1:np.random.rand(8, 1, 3, 2), - c2:np.random.rand(8, 2, 3, 1)} - - feed_dict_err = {c1:np.random.rand(1, 1, 3, 2), - c2:np.random.rand(1, 2, 3, 1)} - - with self.test_session() as sess: - ab_full, ba_full, des_full = sess.run(to_run, feed_dict=feed_dict) - self.assertAllClose(ab_full, des_full) - self.assertAllClose(ba_full, des_full) - with self.assertRaises(tf.errors.InvalidArgumentError): - sess.run(to_run, feed_dict=feed_dict_err) + to_run = [res_actual, res_actual2, res_desired] + res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run) + self.assertAllClose(res_actual_val, res_desired_val) + self.assertAllClose(res_actual2_val, res_desired_val) def testGatherND(self): idx = [[0, 0, 0], [0, 1, 2], [0, 1, 0]] - pl_idx = tf.placeholder(tf.int32, [None, 3]) tt = initializers.random_tensor((3, 4, 5), tt_rank=2, dtype=self.dtype) res_np = ops.gather_nd(tt, idx) - res_pl = ops.gather_nd(tt, pl_idx) res_desired = tf.gather_nd(ops.full(tt), idx) - to_run = [res_np, res_pl, res_desired] - with self.test_session() as sess: - res_np_v, res_pl_v, des_v = sess.run(to_run, feed_dict={pl_idx: idx}) - self.assertAllClose(res_np_v, des_v) - self.assertAllClose(res_pl_v, res_pl_v) + to_run = [res_np, res_desired] + res_np_v, des_v = self.evaluate(to_run) + self.assertAllClose(res_np_v, des_v) def testGatherNDBatch(self): idx = [[0, 0, 0, 0], [1, 0, 1, 2], [0, 0, 1, 0]] - pl_idx = tf.placeholder(tf.int32, [None, 4]) tt = initializers.random_tensor_batch((3, 4, 5), tt_rank=2, batch_size=2, dtype=self.dtype) res_np = ops.gather_nd(tt, idx) - res_pl = ops.gather_nd(tt, pl_idx) res_desired = tf.gather_nd(ops.full(tt), idx) - to_run = [res_np, res_pl, res_desired] - with self.test_session() as sess: - res_np_v, res_pl_v, des_v = sess.run(to_run, feed_dict={pl_idx: idx}) - self.assertAllClose(res_np_v, des_v) - self.assertAllClose(res_pl_v, res_pl_v) + to_run = [res_np, res_desired] + res_np_v, des_v = self.evaluate(to_run) + self.assertAllClose(res_np_v, des_v) def testCoreRenormBatch(self): a = initializers.random_tensor_batch(3 * (10,), tt_rank=7, batch_size=5, @@ -779,15 +627,14 @@ def testCoreRenormBatch(self): b = ops.renormalize_tt_cores(a) var_list = [ops.full(a), ops.full(b)] - with self.test_session() as sess: - af, bf = sess.run(var_list) - b_cores = sess.run(b.tt_cores) - b_cores_norms = [] - for cr in b_cores: - b_cores_norms.append(np.linalg.norm(cr)) - self.assertAllClose(af, bf, atol=1e-5, rtol=1e-5) - self.assertAllClose(b_cores_norms, b_cores_norms[0] - * np.ones((len(b_cores)))) + af, bf = self.evaluate(var_list) + b_cores = self.evaluate(b.tt_cores) + b_cores_norms = [] + for cr in b_cores: + b_cores_norms.append(np.linalg.norm(cr)) + self.assertAllClose(af, bf, atol=1e-5, rtol=1e-5) + self.assertAllClose(b_cores_norms, b_cores_norms[0] + * np.ones((len(b_cores)))) class _TTMatrixTestBatch(): @@ -802,10 +649,9 @@ def testFullMatrix2d(self): desired = desired.reshape((3, 2, 3, 4, 5)) desired = desired.transpose((0, 1, 3, 2, 4)) desired = desired.reshape((3, 2 * 4, 3 * 5)) - with self.test_session(): - tf_mat = TensorTrainBatch(tt_cores) - actual = ops.full(tf_mat) - self.assertAllClose(desired, actual.eval()) + tf_mat = TensorTrainBatch(tt_cores) + actual = self.evaluate(ops.full(tf_mat)) + self.assertAllClose(desired, actual) def testFullMatrix3d(self): np.random.seed(1) @@ -820,10 +666,9 @@ def testFullMatrix3d(self): desired = desired.reshape((3, 2, 3, 4, 5, 2, 2)) desired = desired.transpose((0, 1, 3, 5, 2, 4, 6)) desired = desired.reshape((3, 2 * 4 * 2, 3 * 5 * 2)) - with self.test_session(): - tf_mat = TensorTrainBatch(tt_cores) - actual = ops.full(tf_mat) - self.assertAllClose(desired, actual.eval()) + tf_mat = TensorTrainBatch(tt_cores) + actual = self.evaluate(ops.full(tf_mat)) + self.assertAllClose(desired, actual) def testTTMatTimesTTMatSameBatchSize(self): # Multiply a batch of TT-matrices by another batch of TT-matrices with the @@ -831,19 +676,18 @@ def testTTMatTimesTTMatSameBatchSize(self): left_shape = (2, 3) sum_shape = (4, 3) right_shape = (4, 4) - with self.test_session() as sess: - tt_mat_1 = initializers.random_matrix_batch((left_shape, sum_shape), - tt_rank=3, batch_size=3, - dtype=self.dtype) - tt_mat_2 = initializers.random_matrix_batch((sum_shape, right_shape), - batch_size=3, - dtype=self.dtype) - res_actual = ops.matmul(tt_mat_1, tt_mat_2) - res_actual = ops.full(res_actual) - res_desired = tf.matmul(ops.full(tt_mat_1), ops.full(tt_mat_2)) - res_actual_val, res_desired_val = sess.run([res_actual, res_desired]) - # TODO: why so bad accuracy? - self.assertAllClose(res_actual_val, res_desired_val, atol=1e-5, rtol=1e-5) + tt_mat_1 = initializers.random_matrix_batch((left_shape, sum_shape), + tt_rank=3, batch_size=3, + dtype=self.dtype) + tt_mat_2 = initializers.random_matrix_batch((sum_shape, right_shape), + batch_size=3, + dtype=self.dtype) + res_actual = ops.matmul(tt_mat_1, tt_mat_2) + res_actual = ops.full(res_actual) + res_desired = tf.matmul(ops.full(tt_mat_1), ops.full(tt_mat_2)) + res_actual_val, res_desired_val = self.evaluate([res_actual, res_desired]) + # TODO: why so bad accuracy? + self.assertAllClose(res_actual_val, res_desired_val, atol=1e-5, rtol=1e-5) def testTTMatTimesTTMatBroadcasting(self): # Multiply a batch of TT-matrices by another batch of TT-matrices with @@ -851,34 +695,32 @@ def testTTMatTimesTTMatBroadcasting(self): left_shape = (2, 3) sum_shape = (4, 3) right_shape = (4, 4) - with self.test_session() as sess: - tt_mat_1 = initializers.random_matrix_batch((left_shape, sum_shape), - tt_rank=3, batch_size=3, - dtype=self.dtype) - tt_mat_2 = initializers.random_matrix_batch((sum_shape, right_shape), - dtype=self.dtype) - # TT-batch by one element TT-batch - res_actual = ops.matmul(tt_mat_1, tt_mat_2) - res_actual = ops.full(res_actual) - # TT by TT-batch. - res_actual2 = ops.matmul(ops.transpose(tt_mat_2[0]), ops.transpose(tt_mat_1)) - res_actual2 = ops.full(ops.transpose(res_actual2)) - res_desired = tf.einsum('oij,jk->oik', ops.full(tt_mat_1), - ops.full(tt_mat_2[0])) - to_run = [res_actual, res_actual2, res_desired] - res_actual_val, res_actual2_val, res_desired_val = sess.run(to_run) - self.assertAllClose(res_actual_val, res_desired_val, atol=1e-5, rtol=1e-5) - self.assertAllClose(res_actual2_val, res_desired_val, atol=1e-5, - rtol=1e-5) + tt_mat_1 = initializers.random_matrix_batch((left_shape, sum_shape), + tt_rank=3, batch_size=3, + dtype=self.dtype) + tt_mat_2 = initializers.random_matrix_batch((sum_shape, right_shape), + dtype=self.dtype) + # TT-batch by one element TT-batch + res_actual = ops.matmul(tt_mat_1, tt_mat_2) + res_actual = ops.full(res_actual) + # TT by TT-batch. + res_actual2 = ops.matmul(ops.transpose(tt_mat_2[0]), ops.transpose(tt_mat_1)) + res_actual2 = ops.full(ops.transpose(res_actual2)) + res_desired = tf.einsum('oij,jk->oik', ops.full(tt_mat_1), + ops.full(tt_mat_2[0])) + to_run = [res_actual, res_actual2, res_desired] + res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run) + self.assertAllClose(res_actual_val, res_desired_val, atol=1e-5, rtol=1e-5) + self.assertAllClose(res_actual2_val, res_desired_val, atol=1e-5, + rtol=1e-5) def testTranspose(self): # Transpose a batch of TT-matrices. - with self.test_session() as sess: - tt = initializers.random_matrix_batch(((2, 3, 4), (2, 2, 2)), - batch_size=2, dtype=self.dtype) - res_actual = ops.full(ops.transpose(tt)) - res_actual_val, tt_val = sess.run([res_actual, ops.full(tt)]) - self.assertAllClose(tt_val.transpose((0, 2, 1)), res_actual_val) + tt = initializers.random_matrix_batch(((2, 3, 4), (2, 2, 2)), + batch_size=2, dtype=self.dtype) + res_actual = ops.full(ops.transpose(tt)) + res_actual_val, tt_val = self.evaluate([res_actual, ops.full(tt)]) + self.assertAllClose(tt_val.transpose((0, 2, 1)), res_actual_val) def testAddSameBatchSize(self): # Sum two TT-matrices with the same batch size. @@ -887,14 +729,13 @@ def testAddSameBatchSize(self): tt_b = initializers.random_matrix_batch(((2, 1, 4), None), tt_rank=[1, 2, 4, 1], batch_size=3, dtype=self.dtype) - with self.test_session() as sess: - res_actual = ops.full(ops.add(tt_a, tt_b)) - res_actual2 = ops.full(tt_a + tt_b) - res_desired = ops.full(tt_a) + ops.full(tt_b) - to_run = [res_actual, res_actual2, res_desired] - res_actual_val, res_actual2_val, res_desired_val = sess.run(to_run) - self.assertAllClose(res_actual_val, res_desired_val) - self.assertAllClose(res_actual2_val, res_desired_val) + res_actual = ops.full(ops.add(tt_a, tt_b)) + res_actual2 = ops.full(tt_a + tt_b) + res_desired = ops.full(tt_a) + ops.full(tt_b) + to_run = [res_actual, res_actual2, res_desired] + res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run) + self.assertAllClose(res_actual_val, res_desired_val) + self.assertAllClose(res_actual2_val, res_desired_val) def testAddBroadcasting(self): # Sum two TT-matrices with broadcasting. @@ -903,25 +744,23 @@ def testAddBroadcasting(self): tt_b = initializers.random_matrix_batch(((2, 1, 4), (2, 2, 2)), tt_rank=[1, 2, 4, 1], batch_size=1, dtype=self.dtype) - with self.test_session() as sess: - res_actual = ops.full(ops.add(tt_a, tt_b)) - res_actual2 = ops.full(tt_b + tt_a) - res_desired = ops.full(tt_a) + ops.full(tt_b) - to_run = [res_actual, res_actual2, res_desired] - res_actual_val, res_actual2_val, res_desired_val = sess.run(to_run) - self.assertAllClose(res_actual_val, res_desired_val) - self.assertAllClose(res_actual2_val, res_desired_val) + res_actual = ops.full(ops.add(tt_a, tt_b)) + res_actual2 = ops.full(tt_b + tt_a) + res_desired = ops.full(tt_a) + ops.full(tt_b) + to_run = [res_actual, res_actual2, res_desired] + res_actual_val, res_actual2_val, res_desired_val = self.evaluate(to_run) + self.assertAllClose(res_actual_val, res_desired_val) + self.assertAllClose(res_actual2_val, res_desired_val) def testCastFloat(self): # Test cast function for float tt-matrices and vectors. tt_mat = initializers.random_matrix_batch(((2, 3), (3, 2)), tt_rank=2, batch_size=3) - with self.test_session() as sess: - casted = ops.cast(tt_mat, self.dtype) - casted_val = sess.run(ops.full(casted)) - self.assertEqual(self.dtype, casted.dtype) - self.assertTrue(self.dtype, casted_val.dtype) + casted = ops.cast(tt_mat, self.dtype) + casted_val = self.evaluate(ops.full(casted)) + self.assertEqual(self.dtype, casted.dtype) + self.assertTrue(self.dtype, casted_val.dtype) def testCastIntFloat(self): # Tests cast function from int to float for matrices. @@ -932,11 +771,10 @@ def testCastIntFloat(self): tt_int = TensorTrain([K_1, K_2, K_3], tt_ranks=[1, 2, 2, 1]) tt_int_batch = shapes.expand_batch_dim(tt_int) - with self.test_session() as sess: - casted = ops.cast(tt_int_batch, self.dtype) - casted_val = sess.run(ops.full(casted)) - self.assertEqual(self.dtype, casted.dtype) - self.assertTrue(self.dtype, casted_val.dtype) + casted = ops.cast(tt_int_batch, self.dtype) + casted_val = self.evaluate(ops.full(casted)) + self.assertEqual(self.dtype, casted.dtype) + self.assertTrue(self.dtype, casted_val.dtype) def _random_sparse(shape, non_zeros): diff --git a/t3f/regularizers.py b/t3f/regularizers.py index 449ba7ca..6bfd87c2 100644 --- a/t3f/regularizers.py +++ b/t3f/regularizers.py @@ -1,6 +1,6 @@ import numbers -import tensorflow.compat.v1 as tf +import tensorflow as tf from t3f import ops @@ -30,7 +30,7 @@ def l2_regularizer(scale, scope=None): def l2(tt): """Applies l2 regularization to TensorTrain object.""" - with tf.name_scope(scope, 'l2_regularizer', values=tt.tt_cores) as name: + with tf.name_scope(scope, 'l2_regularizer') as name: my_scale = tf.convert_to_tensor(scale, dtype=tt.dtype, name='scale') return tf.multiply(my_scale, ops.frobenius_norm_squared(tt), name=name) @@ -65,7 +65,7 @@ def cores_regularizer(core_regularizer, scale, scope=None): def regularizer(tt): """Applies the regularization to TensorTrain object.""" - with tf.name_scope(scope, 'l2_regularizer', values=tt.tt_cores) as name: + with tf.name_scope(scope, 'l2_regularizer') as name: my_scale = tf.convert_to_tensor(scale, dtype=tt.dtype, name='scale') penalty = 0.0 for i in range(tt.ndims()): diff --git a/t3f/riemannian.py b/t3f/riemannian.py index f7317e2c..649d3537 100644 --- a/t3f/riemannian.py +++ b/t3f/riemannian.py @@ -1,4 +1,4 @@ -import tensorflow.compat.v1 as tf +import tensorflow as tf from t3f.tensor_train import TensorTrain from t3f.tensor_train_batch import TensorTrainBatch @@ -86,7 +86,7 @@ def project_sum(what, where, weights=None): if output_is_batch: right_rank_dim += 1 left_rank_dim += 1 - output_batch_size = weights.get_shape()[1].value + output_batch_size = weights.get_shape().as_list()[1] # Prepare rhs vectors. # rhs[core_idx] is of size @@ -668,8 +668,8 @@ def add_n_projected(tt_objects, coef=None): # the TT-cores number of dimensions. some_core = tt_objects[0].tt_cores[0] dim_array = [1] * (some_core.get_shape().ndims + 1) - dim_array[0] = coef.get_shape()[0].value - dim_array[1] = coef.get_shape()[1].value + dim_array[0] = coef.get_shape().as_list()[0] + dim_array[1] = coef.get_shape().as_list()[1] coef = tf.reshape(coef, dim_array) ndims = tt_objects[0].ndims() @@ -780,7 +780,7 @@ def tangent_space_to_deltas(tt, name='t3f_tangent_space_to_deltas'): if int(tt_ranks[i] / 2) != tt_ranks[i] / 2: raise ValueError('tt argument is supposed to be a projection, but its ' 'ranks are not even.') - with tf.name_scope(name, values=tt.tt_cores): + with tf.name_scope(name): for i in range(1, num_dims - 1): r1, r2 = tt_ranks[i], tt_ranks[i + 1] curr_core = tt.tt_cores[i] @@ -832,7 +832,7 @@ def deltas_to_tangent_space(deltas, tt, left=None, right=None, input_tensors += list(left.tt_cores) if right is not None: input_tensors += list(right.tt_cores) - with tf.name_scope(name, values=input_tensors): + with tf.name_scope(name): if left is None: left = decompositions.orthogonalize_tt_cores(tt) if right is None: diff --git a/t3f/riemannian_test.py b/t3f/riemannian_test.py index 1531573b..2d81e4aa 100644 --- a/t3f/riemannian_test.py +++ b/t3f/riemannian_test.py @@ -1,6 +1,7 @@ import numpy as np -import tensorflow.compat.v1 as tf -tf.enable_resource_variables() +import tensorflow as tf +tf.compat.v1.enable_eager_execution() +tf.compat.v1.enable_resource_variables() from t3f.tensor_train import TensorTrain from t3f import ops @@ -17,9 +18,8 @@ def testProjectOnItself(self): # Projection of X into the tangent space of itself is X: P_x(x) = x. tens = initializers.random_tensor((2, 3, 4), dtype=self.dtype) proj = riemannian.project_sum(tens, tens) - with self.test_session() as sess: - actual_val, desired_val = sess.run((ops.full(proj), ops.full(tens))) - self.assertAllClose(desired_val, actual_val) + actual_val, desired_val = self.evaluate((ops.full(proj), ops.full(tens))) + self.assertAllClose(desired_val, actual_val) def testProject(self): # Compare our projection with the results obtained (and precomputed) from @@ -59,10 +59,9 @@ def testProject(self): [ 0.34431125, -0.20935516, -1.15864246]] proj = riemannian.project_sum(tens, tangent_tens) proj_full = ops.full(proj) - with self.test_session() as sess: - proj_v = proj_full.eval() - self.assertAllClose(desired_projection, proj_v) - self.assertEqual(self.dtype.as_numpy_dtype, proj_v.dtype) + proj_v = self.evaluate(proj_full) + self.assertAllClose(desired_projection, proj_v) + self.assertEqual(self.dtype.as_numpy_dtype, proj_v.dtype) def testProjectSum(self): # Test projecting a batch of TT-tensors. @@ -73,10 +72,9 @@ def testProjectSum(self): weighted_sum = tens[0] + tens[1] + tens[2] direct_proj = riemannian.project_sum(weighted_sum, tangent_tens) actual_proj = riemannian.project_sum(tens, tangent_tens) - with self.test_session() as sess: - res = sess.run((ops.full(direct_proj), ops.full(actual_proj))) - desired_val, actual_val = res - self.assertAllClose(desired_val, actual_val) + res = self.evaluate((ops.full(direct_proj), ops.full(actual_proj))) + desired_val, actual_val = res + self.assertAllClose(desired_val, actual_val) def testProjectWeightedSum(self): # Test projecting a batch of TT-tensors with providing coefs. @@ -89,10 +87,9 @@ def testProjectWeightedSum(self): weighted_sum += coef[3] * tens[3] direct_proj = riemannian.project_sum(weighted_sum, tangent_tens) actual_proj = riemannian.project_sum(tens, tangent_tens, coef) - with self.test_session() as sess: - res = sess.run((ops.full(direct_proj), ops.full(actual_proj))) - desired_val, actual_val = res - self.assertAllClose(desired_val, actual_val) + res = self.evaluate((ops.full(direct_proj), ops.full(actual_proj))) + desired_val, actual_val = res + self.assertAllClose(desired_val, actual_val) def testProjectWeightedSumMultipleOutputs(self): # Test projecting a batch of TT-tensors with providing weights and outputing @@ -113,10 +110,9 @@ def testProjectWeightedSumMultipleOutputs(self): direct_proj_2 = shapes.expand_batch_dim(direct_proj_2) direct_projs = batch_ops.concat_along_batch_dim((direct_proj_1, direct_proj_2)) actual_proj = riemannian.project_sum(tens, tangent_tens, weights) - with self.test_session() as sess: - res = sess.run((ops.full(direct_projs), ops.full(actual_proj))) - desired_val, actual_val = res - self.assertAllClose(desired_val, actual_val, atol=1e-5, rtol=1e-5) + res = self.evaluate((ops.full(direct_projs), ops.full(actual_proj))) + desired_val, actual_val = res + self.assertAllClose(desired_val, actual_val, atol=1e-5, rtol=1e-5) def testProjectWeightedSumDtypeBug(self): # Test that project_sum(TensorTrain, TensorTrain variable, np.array) works. @@ -134,9 +130,8 @@ def testProjectMatrixOnItself(self): tt_mat = initializers.random_matrix(((2, 3, 4), (2, 3, 4)), dtype=self.dtype) proj = riemannian.project_sum(tt_mat, tt_mat) - with self.test_session() as sess: - actual_val, desired_val = sess.run((ops.full(proj), ops.full(tt_mat))) - self.assertAllClose(desired_val, actual_val) + actual_val, desired_val = self.evaluate((ops.full(proj), ops.full(tt_mat))) + self.assertAllClose(desired_val, actual_val) def testCompareProjectSumAndProject(self): # Compare results of project_sum and project. @@ -146,10 +141,9 @@ def testCompareProjectSumAndProject(self): dtype=self.dtype) project_sum = riemannian.project_sum(tens, tangent_tens, np.eye(4)) project = riemannian.project(tens, tangent_tens) - with self.test_session() as sess: - res = sess.run((ops.full(project_sum), ops.full(project))) - project_sum_val, project_val = res - self.assertAllClose(project_sum_val, project_val) + res = self.evaluate((ops.full(project_sum), ops.full(project))) + project_sum_val, project_val = res + self.assertAllClose(project_sum_val, project_val) def testProjectMatmul(self): # Project a TT-matrix times TT-vector on a TT-vector. @@ -163,9 +157,8 @@ def testProjectMatmul(self): proj = riemannian.project_matmul(tt_vec_what, tt_vec_where, tt_mat) matvec = ops.matmul(tt_mat, tt_vec_what) proj_desired = riemannian.project(matvec, tt_vec_where) - with self.test_session() as sess: - actual_val, desired_val = sess.run((ops.full(proj), ops.full(proj_desired))) - self.assertAllClose(desired_val, actual_val, atol=1e-5, rtol=1e-5) + actual_val, desired_val = self.evaluate((ops.full(proj), ops.full(proj_desired))) + self.assertAllClose(desired_val, actual_val, atol=1e-5, rtol=1e-5) def testPairwiseFlatInnerTensor(self): # Compare pairwise_flat_inner_projected against naive implementation. @@ -178,9 +171,8 @@ def testPairwiseFlatInnerTensor(self): projected2 = riemannian.project(what2, where) desired = batch_ops.pairwise_flat_inner(projected1, projected2) actual = riemannian.pairwise_flat_inner_projected(projected1, projected2) - with self.test_session() as sess: - desired_val, actual_val = sess.run((desired, actual)) - self.assertAllClose(desired_val, actual_val, atol=1e-5, rtol=1e-5) + desired_val, actual_val = self.evaluate((desired, actual)) + self.assertAllClose(desired_val, actual_val, atol=1e-5, rtol=1e-5) with self.assertRaises(ValueError): # Second argument is not a projection on the tangent space. @@ -203,9 +195,8 @@ def testPairwiseFlatInnerMatrix(self): projected2 = riemannian.project(what2, where) desired = batch_ops.pairwise_flat_inner(projected1, projected2) actual = riemannian.pairwise_flat_inner_projected(projected1, projected2) - with self.test_session() as sess: - desired_val, actual_val = sess.run((desired, actual)) - self.assertAllClose(desired_val, actual_val, atol=1e-5, rtol=1e-5) + desired_val, actual_val = self.evaluate((desired, actual)) + self.assertAllClose(desired_val, actual_val, atol=1e-5, rtol=1e-5) with self.assertRaises(ValueError): # Second argument is not a projection on the tangent space. @@ -228,9 +219,8 @@ def testAddNProjected(self): projected2 = riemannian.project(what2, where) desired = ops.full(projected1 + projected2) actual = ops.full(riemannian.add_n_projected((projected1, projected2))) - with self.test_session() as sess: - desired_val, actual_val = sess.run((desired, actual)) - self.assertAllClose(desired_val, actual_val, atol=1e-5, rtol=1e-5) + desired_val, actual_val = self.evaluate((desired, actual)) + self.assertAllClose(desired_val, actual_val, atol=1e-5, rtol=1e-5) with self.assertRaises(ValueError): # Second argument is not a projection on the tangent space. @@ -251,9 +241,8 @@ def testWeightedAddNProjected(self): desired = ops.full(1.2 * projected1 + -2.0 * projected2) actual = ops.full(riemannian.add_n_projected((projected1, projected2), coef=[1.2, -2.0])) - with self.test_session() as sess: - desired_val, actual_val = sess.run((desired, actual)) - self.assertAllClose(desired_val, actual_val) + desired_val, actual_val = self.evaluate((desired, actual)) + self.assertAllClose(desired_val, actual_val) with self.assertRaises(ValueError): # Second argument is not a projection on the tangent space. @@ -282,9 +271,8 @@ def testWeightedAddNProjectedBatch(self): actual = ops.full(riemannian.add_n_projected((projected1, projected2), coef=[[1.2, 1.9, 0.0], [-2.0, 2.0, 1.0]])) - with self.test_session() as sess: - desired_val, actual_val = sess.run((desired, actual)) - self.assertAllClose(desired_val, actual_val, atol=1e-5, rtol=1e-5) + desired_val, actual_val = self.evaluate((desired, actual)) + self.assertAllClose(desired_val, actual_val, atol=1e-5, rtol=1e-5) def testToAndFromDeltas(self): # Test converting to and from deltas representation of the tangent space @@ -298,13 +286,12 @@ def testToAndFromDeltas(self): # Tangent space element norm can be computed from deltas norm. projected_normsq_desired = ops.frobenius_norm_squared(projected) projected_normsq_actual = tf.add_n([tf.reduce_sum(c * c) for c in deltas]) - with self.test_session() as sess: - desired_val, actual_val = sess.run((ops.full(projected), - ops.full(reconstructed_projected))) - self.assertAllClose(desired_val, actual_val) - desired_val, actual_val = sess.run((projected_normsq_desired, - projected_normsq_actual)) - self.assertAllClose(desired_val, actual_val) + desired_val, actual_val = self.evaluate((ops.full(projected), + ops.full(reconstructed_projected))) + self.assertAllClose(desired_val, actual_val) + desired_val, actual_val = self.evaluate((projected_normsq_desired, + projected_normsq_actual)) + self.assertAllClose(desired_val, actual_val) def testToAndFromDeltasBatch(self): # Test converting to and from deltas representation of the tangent space @@ -322,13 +309,12 @@ def testToAndFromDeltasBatch(self): d_normssq = [tf.reduce_sum(tf.reshape(c, (3, -1)) ** 2, 1) for c in deltas] projected_normsq_actual = tf.add_n(d_normssq) - with self.test_session() as sess: - desired_val, actual_val = sess.run((ops.full(projected), - ops.full(reconstructed_projected))) - self.assertAllClose(desired_val, actual_val) - desired_val, actual_val = sess.run((projected_normsq_desired, - projected_normsq_actual)) - self.assertAllClose(desired_val, actual_val) + desired_val, actual_val = self.evaluate((ops.full(projected), + ops.full(reconstructed_projected))) + self.assertAllClose(desired_val, actual_val) + desired_val, actual_val = self.evaluate((projected_normsq_desired, + projected_normsq_actual)) + self.assertAllClose(desired_val, actual_val) class RiemannianTestFloat32(tf.test.TestCase, _RiemannianTest): diff --git a/t3f/shapes.py b/t3f/shapes.py index 717474f9..de13d942 100644 --- a/t3f/shapes.py +++ b/t3f/shapes.py @@ -1,5 +1,5 @@ import numpy as np -import tensorflow.compat.v1 as tf +import tensorflow as tf # TODO: test all these functions. @@ -18,7 +18,7 @@ def tt_ranks(tt, name='t3f_tt_ranks'): """ num_dims = tt.ndims() ranks = [] - with tf.name_scope(name, values=tt.tt_cores): + with tf.name_scope(name): for i in range(num_dims): ranks.append(tf.shape(tt.tt_cores[i])[tt.left_tt_rank_dim]) ranks.append(tf.shape(tt.tt_cores[-1])[-1]) @@ -41,7 +41,7 @@ def shape(tt, name='t3f_shape'): Returns: A `Tensor` """ - with tf.name_scope(name, values=tt.tt_cores): + with tf.name_scope(name): tt_raw_shape = raw_shape(tt) if tt.is_tt_matrix(): res = tf.reduce_prod(tt_raw_shape, axis=1) @@ -75,7 +75,7 @@ def raw_shape(tt, name='t3f_raw_shape'): num_dims = tt.ndims() num_tensor_axis = len(tt.get_raw_shape()) final_raw_shape = [] - with tf.name_scope(name, values=tt.tt_cores): + with tf.name_scope(name): # TODO: ugly. from t3f.tensor_train import TensorTrain axes_shift = 1 if isinstance(tt, TensorTrain) else 2 @@ -104,7 +104,7 @@ def batch_size(tt, name='t3f_batch_size'): raise ValueError('batch size is not available for a TensorTrain object.') first_core = tt.tt_cores[0] # The first dimension of any TT-core in TensorTrainBatch is the batch size. - with tf.name_scope(name, values=tt.tt_cores): + with tf.name_scope(name): return tf.shape(first_core)[0] @@ -122,7 +122,7 @@ def lazy_tt_ranks(tt, name='t3f_lazy_tt_ranks'): Returns: A 1-D numpy array or `tf.Tensor` """ - with tf.name_scope(name, values=tt.tt_cores): + with tf.name_scope(name): static_tt_ranks = tt.get_tt_ranks() if static_tt_ranks.is_fully_defined(): return np.array(static_tt_ranks.as_list()) @@ -144,7 +144,7 @@ def lazy_shape(tt, name='t3f_lazy_shape'): Returns: A 1-D numpy array or `tf.Tensor` """ - with tf.name_scope(name, values=tt.tt_cores): + with tf.name_scope(name): static_shape = tt.get_shape() if static_shape.is_fully_defined(): return np.array(static_shape.as_list()) @@ -171,7 +171,7 @@ def lazy_raw_shape(tt, name='t3f_lazy_raw_shape'): """ # If get_shape is fully defined, it guaranties that all elements of raw shape # are defined. - with tf.name_scope(name, values=tt.tt_cores): + with tf.name_scope(name): if tt.get_shape().is_fully_defined(): return np.array([s.as_list() for s in tt.get_raw_shape()]) else: @@ -192,7 +192,7 @@ def lazy_batch_size(tt, name='t3f_lazy_batch_size'): ValueError if got `TensorTrain` which doesn't have batch_size as input.""" if not hasattr(tt, 'batch_size'): raise ValueError('batch size is not available for a TensorTrain object.') - with tf.name_scope(name, values=tt.tt_cores): + with tf.name_scope(name): if tt.batch_size is not None: return tt.batch_size else: @@ -274,7 +274,7 @@ def squeeze_batch_dim(tt, name='t3f_squeeze_batch_dim'): at compilation stage) or a TensorTrain. TensorTrainBatch otherwise. """ - with tf.name_scope(name, values=tt.tt_cores): + with tf.name_scope(name): try: if tt.batch_size == 1: return tt[0] @@ -296,7 +296,7 @@ def expand_batch_dim(tt, name='t3f_expand_batch_dim'): Returns: TensorTrainBatch """ - with tf.name_scope(name, values=tt.tt_cores): + with tf.name_scope(name): if hasattr(tt, 'batch_size'): return tt else: diff --git a/t3f/shapes_test.py b/t3f/shapes_test.py index 8253fd17..3981d98a 100644 --- a/t3f/shapes_test.py +++ b/t3f/shapes_test.py @@ -1,4 +1,5 @@ -import tensorflow.compat.v1 as tf +import tensorflow as tf +tf.compat.v1.enable_eager_execution() from t3f import initializers from t3f import shapes diff --git a/t3f/tensor_train.py b/t3f/tensor_train.py index d1128894..79807004 100644 --- a/t3f/tensor_train.py +++ b/t3f/tensor_train.py @@ -1,4 +1,4 @@ -import tensorflow.compat.v1 as tf +import tensorflow as tf from t3f.tensor_train_base import TensorTrainBase from t3f import shapes diff --git a/t3f/tensor_train_base.py b/t3f/tensor_train_base.py index 6ea95335..f4c25b79 100644 --- a/t3f/tensor_train_base.py +++ b/t3f/tensor_train_base.py @@ -1,6 +1,6 @@ from functools import reduce import numpy as np -import tensorflow.compat.v1 as tf +import tensorflow as tf # TODO: check the methods of _TensorLike diff --git a/t3f/tensor_train_batch.py b/t3f/tensor_train_batch.py index fcabdcc6..976582ac 100644 --- a/t3f/tensor_train_batch.py +++ b/t3f/tensor_train_batch.py @@ -1,5 +1,5 @@ import numpy as np -import tensorflow.compat.v1 as tf +import tensorflow as tf from t3f.tensor_train_base import TensorTrainBase from t3f.tensor_train import TensorTrain @@ -55,7 +55,7 @@ def __init__(self, tt_cores, shape=None, tt_ranks=None, batch_size=None, self._tt_cores = tuple(tt_cores) if batch_size is None: - self._batch_size = tt_cores[0].get_shape()[0].value + self._batch_size = tt_cores[0].shape.as_list()[0] else: self._batch_size = batch_size self._raw_shape = shapes.clean_raw_shape(shape) @@ -171,7 +171,7 @@ def _batch_dim_getitem(self, element_spec): return TensorTrain(new_tt_cores, self.get_raw_shape(), self.get_tt_ranks()) else: - batch_size = new_tt_cores[0].get_shape()[0].value + batch_size = new_tt_cores[0].shape.as_list()[0] return TensorTrainBatch(new_tt_cores, self.get_raw_shape(), self.get_tt_ranks(), batch_size) @@ -295,17 +295,18 @@ def _are_batch_tt_cores_valid(tt_cores, shape, tt_ranks, batch_size): return False try: for core_idx in range(num_dims): - curr_core_shape = tt_cores[core_idx].get_shape() + curr_core_shape = tt_cores[core_idx].shape.as_list() if len(curr_core_shape) != len(tt_cores[0].get_shape()): # Shapes are inconsistent. return False - if batch_size is not None and curr_core_shape[0].value is not None: - if curr_core_shape[0].value != batch_size: + if batch_size is not None and curr_core_shape[0] is not None: + if curr_core_shape[0] != batch_size: # The TT-cores are not aligned with the given batch_size. return False if shape is not None: for i in range(len(shape)): - if curr_core_shape[i + 2] != shape[i][core_idx]: + dim_a, dim_b = curr_core_shape[i + 2], shape[i][core_idx] + if dim_a is not None and dim_b is not None and dim_a != dim_b: # The TT-cores are not aligned with the given shape. return False if core_idx >= 1: diff --git a/t3f/tensor_train_batch_no_eager_test.py b/t3f/tensor_train_batch_no_eager_test.py new file mode 100644 index 00000000..a6ed84a5 --- /dev/null +++ b/t3f/tensor_train_batch_no_eager_test.py @@ -0,0 +1,48 @@ +import numpy as np +import tensorflow.compat.v1 as tf +tf.disable_eager_execution() + +from t3f import initializers +from t3f import ops + + +class _TensorTrainBatchTest(): + + def testPlaceholderTensorIndexing(self): + tens = initializers.random_tensor_batch((3, 3, 4), batch_size=3, + dtype=self.dtype) + with tf.Session() as sess: + start = tf.placeholder(tf.int32) + end = tf.placeholder(tf.int32) + + desired = ops.full(tens)[0:-1] + actual = ops.full(tens[start:end]) + desired, actual = sess.run([desired, actual], {start: 0, end: -1}) + self.assertAllClose(desired, actual) + + desired = ops.full(tens)[0:1] + actual = ops.full(tens[start:end]) + desired, actual = sess.run([desired, actual], {start: 0, end: 1}) + self.assertAllClose(desired, actual) + + desired = ops.full(tens)[1] + actual = ops.full(tens[start]) + desired, actual = sess.run([desired, actual], {start: 1}) + self.assertAllClose(desired, actual) + + desired = ops.full(tens)[1, 1:3, 1, :3] + actual = ops.full(tens[start, start:end, start, :end]) + desired, actual = sess.run([desired, actual], {start: 1, end: 3}) + self.assertAllClose(desired, actual) + + +class TensorTrainBatchTestFloat32(tf.test.TestCase, _TensorTrainBatchTest): + dtype = tf.float32 + + +class TensorTrainBatchTestFloat64(tf.test.TestCase, _TensorTrainBatchTest): + dtype = tf.float64 + + +if __name__ == "__main__": + tf.test.main() \ No newline at end of file diff --git a/t3f/tensor_train_batch_test.py b/t3f/tensor_train_batch_test.py index d9f558db..0b63a188 100644 --- a/t3f/tensor_train_batch_test.py +++ b/t3f/tensor_train_batch_test.py @@ -1,5 +1,6 @@ import numpy as np -import tensorflow.compat.v1 as tf +import tensorflow as tf +tf.compat.v1.enable_eager_execution() from t3f import initializers from t3f import ops @@ -10,70 +11,42 @@ class _TensorTrainBatchTest(): def testTensorIndexing(self): tens = initializers.random_tensor_batch((3, 3, 4), batch_size=3, dtype=self.dtype) - with self.test_session() as sess: - desired = ops.full(tens)[:, :, :, :] - actual = ops.full(tens[:, :, :, :]) - desired, actual = sess.run([desired, actual]) - self.assertAllClose(desired, actual) - desired = ops.full(tens)[1:3, :, :, :] - actual = ops.full(tens[1:3]) - desired, actual = sess.run([desired, actual]) - self.assertAllClose(desired, actual) - desired = ops.full(tens)[1, :, :, :] - actual = ops.full(tens[1]) - desired, actual = sess.run([desired, actual]) - self.assertAllClose(desired, actual) - desired = ops.full(tens)[2, 1, :, :] - actual = ops.full(tens[2, 1, :, :]) - desired, actual = sess.run([desired, actual]) - self.assertAllClose(desired, actual) - desired = ops.full(tens)[2, 1:2, 1, :] - actual = ops.full(tens[2, 1:2, 1, :]) - desired, actual = sess.run([desired, actual]) - self.assertAllClose(desired, actual) - desired = ops.full(tens)[1:2, 0:3, :, 3] - actual = ops.full(tens[1:2, 0:3, :, 3]) - desired, actual = sess.run([desired, actual]) - self.assertAllClose(desired, actual) - desired = ops.full(tens)[:, 1, :, 3] - actual = ops.full(tens[:, 1, :, 3]) - desired, actual = sess.run([desired, actual]) - self.assertAllClose(desired, actual) - - # Wrong number of dims. - with self.assertRaises(ValueError): - tens[1, :, 3] - with self.assertRaises(ValueError): - tens[1, :, 3, 1:2, 1:3] - with self.assertRaises(ValueError): - tens[1, 1] - - def testPlaceholderTensorIndexing(self): - tens = initializers.random_tensor_batch((3, 3, 4), batch_size=3, - dtype=self.dtype) - with self.test_session() as sess: - start = tf.placeholder(tf.int32) - end = tf.placeholder(tf.int32) - - desired = ops.full(tens)[0:-1] - actual = ops.full(tens[start:end]) - desired, actual = sess.run([desired, actual], {start: 0, end: -1}) - self.assertAllClose(desired, actual) - - desired = ops.full(tens)[0:1] - actual = ops.full(tens[start:end]) - desired, actual = sess.run([desired, actual], {start: 0, end: 1}) - self.assertAllClose(desired, actual) - - desired = ops.full(tens)[1] - actual = ops.full(tens[start]) - desired, actual = sess.run([desired, actual], {start: 1}) - self.assertAllClose(desired, actual) - - desired = ops.full(tens)[1, 1:3, 1, :3] - actual = ops.full(tens[start, start:end, start, :end]) - desired, actual = sess.run([desired, actual], {start: 1, end: 3}) - self.assertAllClose(desired, actual) + desired = ops.full(tens)[:, :, :, :] + actual = ops.full(tens[:, :, :, :]) + desired, actual = self.evaluate([desired, actual]) + self.assertAllClose(desired, actual) + desired = ops.full(tens)[1:3, :, :, :] + actual = ops.full(tens[1:3]) + desired, actual = self.evaluate([desired, actual]) + self.assertAllClose(desired, actual) + desired = ops.full(tens)[1, :, :, :] + actual = ops.full(tens[1]) + desired, actual = self.evaluate([desired, actual]) + self.assertAllClose(desired, actual) + desired = ops.full(tens)[2, 1, :, :] + actual = ops.full(tens[2, 1, :, :]) + desired, actual = self.evaluate([desired, actual]) + self.assertAllClose(desired, actual) + desired = ops.full(tens)[2, 1:2, 1, :] + actual = ops.full(tens[2, 1:2, 1, :]) + desired, actual = self.evaluate([desired, actual]) + self.assertAllClose(desired, actual) + desired = ops.full(tens)[1:2, 0:3, :, 3] + actual = ops.full(tens[1:2, 0:3, :, 3]) + desired, actual = self.evaluate([desired, actual]) + self.assertAllClose(desired, actual) + desired = ops.full(tens)[:, 1, :, 3] + actual = ops.full(tens[:, 1, :, 3]) + desired, actual = self.evaluate([desired, actual]) + self.assertAllClose(desired, actual) + + # Wrong number of dims. + with self.assertRaises(ValueError): + tens[1, :, 3] + with self.assertRaises(ValueError): + tens[1, :, 3, 1:2, 1:3] + with self.assertRaises(ValueError): + tens[1, 1] def testShapeOverflow(self): large_shape = [10] * 20 diff --git a/t3f/tensor_train_no_eager_test.py b/t3f/tensor_train_no_eager_test.py new file mode 100644 index 00000000..ef1f2ef0 --- /dev/null +++ b/t3f/tensor_train_no_eager_test.py @@ -0,0 +1,32 @@ +import numpy as np +import tensorflow.compat.v1 as tf +tf.disable_eager_execution() + +from t3f import tensor_train +from t3f import initializers +from t3f import ops + + +class _TensorTrainTest(): + + def testPlaceholderTensorIndexing(self): + tens = initializers.random_tensor((3, 3, 4), dtype=self.dtype) + with tf.Session() as sess: + start = tf.placeholder(tf.int32) + end = tf.placeholder(tf.int32) + desired = ops.full(tens)[1:3, 1, :3] + actual = ops.full(tens[start:end, start, :end]) + desired, actual = sess.run([desired, actual], {start: 1, end: 3}) + self.assertAllClose(desired, actual) + + +class TensorTrainTestFloat32(tf.test.TestCase, _TensorTrainTest): + dtype = tf.float32 + + +class TensorTrainTestFloat64(tf.test.TestCase, _TensorTrainTest): + dtype = tf.float64 + + +if __name__ == "__main__": + tf.test.main() \ No newline at end of file diff --git a/t3f/tensor_train_test.py b/t3f/tensor_train_test.py index f4d05344..507b267d 100644 --- a/t3f/tensor_train_test.py +++ b/t3f/tensor_train_test.py @@ -1,5 +1,6 @@ import numpy as np -import tensorflow.compat.v1 as tf +import tensorflow as tf +tf.compat.v1.enable_eager_execution() from t3f import tensor_train from t3f import initializers @@ -27,27 +28,26 @@ def testValidateTTCores2d(self): ((1, 2, 1, 1), (1, 2, 1), False)) for tt_ranks, claimed_tt_ranks, desired in schedule: - a = tf.random_normal((tt_ranks[0], 10, tt_ranks[1]), dtype=self.dtype) - b = tf.random_normal((tt_ranks[2], 9, tt_ranks[3]), dtype=self.dtype) - with self.test_session(): - actual = tensor_train._are_tt_cores_valid((a, b), (10, 9), - claimed_tt_ranks) - self.assertEqual(desired, actual) - # Wrong shape. - actual = tensor_train._are_tt_cores_valid((a, b), (9, 9), - claimed_tt_ranks) - self.assertEqual(False, actual) - if not desired: - with self.assertRaises(ValueError): - tensor_train.TensorTrain((a, b), (10, 9), claimed_tt_ranks) - - # Make dtypes inconsistent. - b_new = tf.cast(b, tf.float16) - actual = tensor_train._are_tt_cores_valid((a, b_new), (10, 9), - claimed_tt_ranks) - self.assertEqual(False, actual) + a = tf.random.normal((tt_ranks[0], 10, tt_ranks[1]), dtype=self.dtype) + b = tf.random.normal((tt_ranks[2], 9, tt_ranks[3]), dtype=self.dtype) + actual = tensor_train._are_tt_cores_valid((a, b), (10, 9), + claimed_tt_ranks) + self.assertEqual(desired, actual) + # Wrong shape. + actual = tensor_train._are_tt_cores_valid((a, b), (9, 9), + claimed_tt_ranks) + self.assertEqual(False, actual) + if not desired: with self.assertRaises(ValueError): - tensor_train.TensorTrain((a, b_new), (10, 9), claimed_tt_ranks) + tensor_train.TensorTrain((a, b), (10, 9), claimed_tt_ranks) + + # Make dtypes inconsistent. + b_new = tf.cast(b, tf.float16) + actual = tensor_train._are_tt_cores_valid((a, b_new), (10, 9), + claimed_tt_ranks) + self.assertEqual(False, actual) + with self.assertRaises(ValueError): + tensor_train.TensorTrain((a, b_new), (10, 9), claimed_tt_ranks) def testValidateTTCores3d(self): schedule = (((1, 1, 1, 1, 1, 1), (1, 1, 1, 1), True), @@ -72,68 +72,56 @@ def testValidateTTCores3d(self): ((1, 2, 2, 3, 3, 1), None, True)) for tt_ranks, claimed_tt_ranks, desired in schedule: - a = tf.random_normal((tt_ranks[0], 10, tt_ranks[1]), dtype=self.dtype) - b = tf.random_normal((tt_ranks[2], 1, tt_ranks[3]), dtype=self.dtype) - c = tf.random_normal((tt_ranks[4], 2, tt_ranks[5]), dtype=self.dtype) - with self.test_session(): - actual = tensor_train._are_tt_cores_valid((a, b, c), (10, 1, 2), - claimed_tt_ranks) - self.assertEqual(desired, actual) - # Wrong shape. - actual = tensor_train._are_tt_cores_valid((a, b, c), (10, 1, 1), - claimed_tt_ranks) - self.assertEqual(False, actual) - if not desired: - with self.assertRaises(ValueError): - tensor_train.TensorTrain((a, b, c), (10, 1, 2), claimed_tt_ranks) - - # Make dtypes inconsistent. - b_new = tf.cast(b, tf.float16) - actual = tensor_train._are_tt_cores_valid((a, b_new, c), (10, 1, 2), - claimed_tt_ranks) - self.assertEqual(False, actual) + a = tf.random.normal((tt_ranks[0], 10, tt_ranks[1]), dtype=self.dtype) + b = tf.random.normal((tt_ranks[2], 1, tt_ranks[3]), dtype=self.dtype) + c = tf.random.normal((tt_ranks[4], 2, tt_ranks[5]), dtype=self.dtype) + actual = tensor_train._are_tt_cores_valid((a, b, c), (10, 1, 2), + claimed_tt_ranks) + self.assertEqual(desired, actual) + # Wrong shape. + actual = tensor_train._are_tt_cores_valid((a, b, c), (10, 1, 1), + claimed_tt_ranks) + self.assertEqual(False, actual) + if not desired: with self.assertRaises(ValueError): - tensor_train.TensorTrain((a, b_new, c), (10, 1, 2), claimed_tt_ranks) + tensor_train.TensorTrain((a, b, c), (10, 1, 2), claimed_tt_ranks) - def testTensorIndexing(self): - tens = initializers.random_tensor((3, 3, 4), dtype=self.dtype) - with self.test_session() as sess: - desired = ops.full(tens)[:, :, :] - actual = ops.full(tens[:, :, :]) - desired, actual = sess.run([desired, actual]) - self.assertAllClose(desired, actual) - desired = ops.full(tens)[1, :, :] - actual = ops.full(tens[1, :, :]) - desired, actual = sess.run([desired, actual]) - self.assertAllClose(desired, actual) - desired = ops.full(tens)[1:2, 1, :] - actual = ops.full(tens[1:2, 1, :]) - desired, actual = sess.run([desired, actual]) - self.assertAllClose(desired, actual) - desired = ops.full(tens)[0:3, :, 3] - actual = ops.full(tens[0:3, :, 3]) - desired, actual = sess.run([desired, actual]) - self.assertAllClose(desired, actual) - desired = ops.full(tens)[1, :, 3] - actual = ops.full(tens[1, :, 3]) - desired, actual = sess.run([desired, actual]) - self.assertAllClose(desired, actual) - - # Wrong number of dims. - with self.assertRaises(ValueError): - tens[1, :, 3, :] + # Make dtypes inconsistent. + b_new = tf.cast(b, tf.float16) + actual = tensor_train._are_tt_cores_valid((a, b_new, c), (10, 1, 2), + claimed_tt_ranks) + self.assertEqual(False, actual) with self.assertRaises(ValueError): - tens[1, 1] + tensor_train.TensorTrain((a, b_new, c), (10, 1, 2), claimed_tt_ranks) - def testPlaceholderTensorIndexing(self): + def testTensorIndexing(self): tens = initializers.random_tensor((3, 3, 4), dtype=self.dtype) - with self.test_session() as sess: - start = tf.placeholder(tf.int32) - end = tf.placeholder(tf.int32) - desired = ops.full(tens)[1:3, 1, :3] - actual = ops.full(tens[start:end, start, :end]) - desired, actual = sess.run([desired, actual], {start: 1, end: 3}) - self.assertAllClose(desired, actual) + desired = ops.full(tens)[:, :, :] + actual = ops.full(tens[:, :, :]) + desired, actual = self.evaluate([desired, actual]) + self.assertAllClose(desired, actual) + desired = ops.full(tens)[1, :, :] + actual = ops.full(tens[1, :, :]) + desired, actual = self.evaluate([desired, actual]) + self.assertAllClose(desired, actual) + desired = ops.full(tens)[1:2, 1, :] + actual = ops.full(tens[1:2, 1, :]) + desired, actual = self.evaluate([desired, actual]) + self.assertAllClose(desired, actual) + desired = ops.full(tens)[0:3, :, 3] + actual = ops.full(tens[0:3, :, 3]) + desired, actual = self.evaluate([desired, actual]) + self.assertAllClose(desired, actual) + desired = ops.full(tens)[1, :, 3] + actual = ops.full(tens[1, :, 3]) + desired, actual = self.evaluate([desired, actual]) + self.assertAllClose(desired, actual) + + # Wrong number of dims. + with self.assertRaises(ValueError): + tens[1, :, 3, :] + with self.assertRaises(ValueError): + tens[1, 1] def testShapeOverflow(self): large_shape = [10] * 20 diff --git a/t3f/utils.py b/t3f/utils.py index caa9e03f..906664a0 100644 --- a/t3f/utils.py +++ b/t3f/utils.py @@ -1,5 +1,5 @@ import numpy as np -import tensorflow.compat.v1 as tf +import tensorflow as tf # TODO: substitute with native implementation when it's ready. @@ -8,7 +8,7 @@ def unravel_index(indices, shape): with tf.name_scope('unravel_index'): indices = tf.expand_dims(indices, 0) shape = tf.expand_dims(shape, 1) - strides_shifted = tf.cumprod(shape, exclusive=True, reverse=True) + strides_shifted = tf.math.cumprod(shape, exclusive=True, reverse=True) res = (indices // strides_shifted) % shape return tf.transpose(res, (1, 0)) @@ -20,25 +20,18 @@ def replace_tf_svd_with_np_svd(): if hasattr(tf, 'original_svd'): # This function has been already called and tf.svd is already replaced. return - tf.original_svd = tf.svd + tf.original_svd = tf.linalg.svd def my_svd(tensor, full_matrices=False, compute_uv=True): dtype = tensor.dtype - u, s, v = tf.py_func(np.linalg.svd, [tensor, full_matrices, compute_uv], - [dtype, dtype, dtype]) - s_, u_, v_ = tf.original_svd(tensor, full_matrices, compute_uv) - s = tf.reshape(s, s_.get_shape()) - u = tf.reshape(u, u_.get_shape()) - v_shape = v_.get_shape().as_list() - v_shape[-2], v_shape[-1] = v_shape[-1], v_shape[-2] - v = tf.reshape(v, v_shape) + u, s, v = np.linalg.svd(tensor, full_matrices, compute_uv) # Converting numpy order of v dims to TF order. order = list(range(tensor.get_shape().ndims)) order[-2], order[-1] = order[-1], order[-2] v = tf.transpose(v, order) - return s, u, v + return tf.constant(s), tf.constant(u), v - tf.svd = my_svd + tf.linalg.svd = my_svd def in_eager_mode(): diff --git a/t3f/utils_test.py b/t3f/utils_test.py index 54b30136..44d9b3c1 100644 --- a/t3f/utils_test.py +++ b/t3f/utils_test.py @@ -1,5 +1,6 @@ import numpy as np -import tensorflow.compat.v1 as tf +import tensorflow as tf +tf.compat.v1.enable_eager_execution() from t3f import utils @@ -7,29 +8,27 @@ class UtilsTest(tf.test.TestCase): def testUnravelIndex(self): - with self.test_session(): - # 2D. - shape = (7, 6) - linear_idx = [22, 41, 37] - desired = [[3, 4], [6, 5], [6, 1]] - actual = utils.unravel_index(linear_idx, shape) - self.assertAllEqual(desired, actual.eval()) - # 3D. - shape = (2, 3, 4) - linear_idx = [19, 17, 0, 23] - desired = [[1, 1, 3], [1, 1, 1], [0, 0, 0], [1, 2, 3]] - actual = utils.unravel_index(linear_idx, shape) - self.assertAllEqual(desired, actual.eval()) + # 2D. + shape = (7, 6) + linear_idx = [22, 41, 37] + desired = [[3, 4], [6, 5], [6, 1]] + actual = utils.unravel_index(linear_idx, shape) + self.assertAllEqual(desired, self.evaluate(actual)) + # 3D. + shape = (2, 3, 4) + linear_idx = [19, 17, 0, 23] + desired = [[1, 1, 3], [1, 1, 1], [0, 0, 0], [1, 2, 3]] + actual = utils.unravel_index(linear_idx, shape) + self.assertAllEqual(desired, self.evaluate(actual)) def testReplaceTfSvdWithNpSvd(self): - with self.test_session() as sess: - mat = tf.constant([[3., 4], [5, 6]]) - desired = sess.run(tf.svd(mat)) - utils.replace_tf_svd_with_np_svd() - actual = sess.run(tf.svd(mat)) - self.assertAllClose(actual[0], desired[0]) - self.assertAllClose(np.abs(np.dot(actual[1].T, desired[1])), np.eye(2)) - self.assertAllClose(np.abs(np.dot(actual[2].T, desired[2])), np.eye(2)) + mat = tf.constant([[3., 4], [5, 6]]) + desired = self.evaluate(tf.linalg.svd(mat)) + utils.replace_tf_svd_with_np_svd() + actual = self.evaluate(tf.linalg.svd(mat)) + self.assertAllClose(actual[0], desired[0]) + self.assertAllClose(np.abs(np.dot(actual[1].T, desired[1])), np.eye(2)) + self.assertAllClose(np.abs(np.dot(actual[2].T, desired[2])), np.eye(2)) if __name__ == "__main__": diff --git a/t3f/variables_no_eager_test.py b/t3f/variables_no_eager_test.py new file mode 100644 index 00000000..e04c2a4e --- /dev/null +++ b/t3f/variables_no_eager_test.py @@ -0,0 +1,62 @@ +import numpy as np +import tensorflow.compat.v1 as tf +tf.disable_eager_execution() + +from t3f import variables +from t3f import ops +from t3f import initializers + + +class _VariablesTest(): + + def testGetExistingVariable(self): + init = initializers.random_tensor([2, 3, 2], tt_rank=2, dtype=self.dtype) + tt_1 = variables.get_variable('tt_1', initializer=init) + with tf.variable_scope('test'): + tt_2 = variables.get_variable('tt_2', initializer=init) + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + with self.assertRaises(ValueError): + # The variable already exists and scope.reuse is False by default. + variables.get_variable('tt_1') + with self.assertRaises(ValueError): + with tf.variable_scope('', reuse=True): + # The variable doesn't exist. + variables.get_variable('tt_3') + + with tf.variable_scope('', reuse=True): + tt_1_copy = variables.get_variable('tt_1', dtype=self.dtype) + self.assertAllClose(ops.full(tt_1).eval(), ops.full(tt_1_copy).eval()) + + with tf.variable_scope('', reuse=True): + # Again try to retrieve an existing variable, but pass an initializer + # and check that it still works. + tt_1_copy = variables.get_variable('tt_1', initializer=0 * init, + dtype=self.dtype) + self.assertAllClose(ops.full(tt_1).eval(), ops.full(tt_1_copy).eval()) + + with self.assertRaises(ValueError): + with tf.variable_scope('', reuse=True): + # The variable is defined in a different scope + variables.get_variable('tt_2') + + with self.assertRaises(ValueError): + with tf.variable_scope('nottest', reuse=True): + # The variable is defined in a different scope + variables.get_variable('tt_2') + + with tf.variable_scope('test', reuse=True): + tt_2_copy = variables.get_variable('tt_2', dtype=self.dtype) + self.assertAllClose(ops.full(tt_2).eval(), ops.full(tt_2_copy).eval()) + + +class VariablesTestFloat32(tf.test.TestCase, _VariablesTest): + dtype = tf.float32 + + +class VariablesTestFloat64(tf.test.TestCase, _VariablesTest): + dtype = tf.float64 + + +if __name__ == "__main__": + tf.test.main() diff --git a/t3f/variables_test.py b/t3f/variables_test.py index c7ecc653..e83cb2aa 100644 --- a/t3f/variables_test.py +++ b/t3f/variables_test.py @@ -1,5 +1,6 @@ import numpy as np -import tensorflow.compat.v1 as tf +import tensorflow as tf +tf.compat.v1.enable_eager_execution() from t3f import variables from t3f import ops @@ -8,46 +9,6 @@ class _VariablesTest(): - def testGetExistingVariable(self): - init = initializers.random_tensor([2, 3, 2], tt_rank=2, dtype=self.dtype) - tt_1 = variables.get_variable('tt_1', initializer=init) - with tf.variable_scope('test'): - tt_2 = variables.get_variable('tt_2', initializer=init) - with self.test_session(): - tf.global_variables_initializer().run() - with self.assertRaises(ValueError): - # The variable already exists and scope.reuse is False by default. - variables.get_variable('tt_1') - with self.assertRaises(ValueError): - with tf.variable_scope('', reuse=True): - # The variable doesn't exist. - variables.get_variable('tt_3') - - with tf.variable_scope('', reuse=True): - tt_1_copy = variables.get_variable('tt_1', dtype=self.dtype) - self.assertAllClose(ops.full(tt_1).eval(), ops.full(tt_1_copy).eval()) - - with tf.variable_scope('', reuse=True): - # Again try to retrieve an existing variable, but pass an initializer - # and check that it still works. - tt_1_copy = variables.get_variable('tt_1', initializer=0 * init, - dtype=self.dtype) - self.assertAllClose(ops.full(tt_1).eval(), ops.full(tt_1_copy).eval()) - - with self.assertRaises(ValueError): - with tf.variable_scope('', reuse=True): - # The variable is defined in a different scope - variables.get_variable('tt_2') - - with self.assertRaises(ValueError): - with tf.variable_scope('nottest', reuse=True): - # The variable is defined in a different scope - variables.get_variable('tt_2') - - with tf.variable_scope('test', reuse=True): - tt_2_copy = variables.get_variable('tt_2', dtype=self.dtype) - self.assertAllClose(ops.full(tt_2).eval(), ops.full(tt_2_copy).eval()) - def testAttributes(self): # Test that after converting an initializer into a variable all the # attributes stays the same. @@ -69,18 +30,17 @@ def testAssign(self): tt = variables.get_variable('tt', initializer=old_init) new_init = initializers.random_tensor([2, 3, 2], tt_rank=2, dtype=self.dtype) + self.evaluate(tf.compat.v1.global_variables_initializer()) + init_value = self.evaluate(ops.full(tt)) assigner = variables.assign(tt, new_init) - with self.test_session(): - tf.global_variables_initializer().run() - init_value = ops.full(tt).eval() - assigner_value = ops.full(assigner).eval() - after_value = ops.full(tt) - after_value = after_value.eval() - self.assertAllClose(assigner_value, after_value) - # Assert that the value actually changed: - abs_diff = np.linalg.norm((init_value - after_value).flatten()) - rel_diff = abs_diff / np.linalg.norm((init_value).flatten()) - self.assertGreater(rel_diff, 0.2) + assigner_value = self.evaluate(ops.full(assigner)) + after_value = ops.full(tt) + after_value = self.evaluate(after_value) + self.assertAllClose(assigner_value, after_value) + # Assert that the value actually changed: + abs_diff = np.linalg.norm((init_value - after_value).flatten()) + rel_diff = abs_diff / np.linalg.norm((init_value).flatten()) + self.assertGreater(rel_diff, 0.2) class VariablesTestFloat32(tf.test.TestCase, _VariablesTest):