diff --git a/.gitignore b/.gitignore index ccc3fd8d..970830c2 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,7 @@ dist/ tests/__pycache__ pyttb/__pycache__ build/ +_build/ .coverage .ipynb_checkpoints htmlcov diff --git a/docs/source/index.rst b/docs/source/index.rst index 4a5ec16a..db038161 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -4,9 +4,9 @@ pyttb: Python Tensor Toolbox **************************** Tensors (also known as multidimensional arrays or N-way arrays) are used in a variety of applications ranging from chemometrics to network -analysis. +analysis. This Python package is an adaptation of the +`Tensor Toolbox for MATLAB `_. -- Install the latest release from pypi (``pip install pyttb``). - This is open source software. Please see `LICENSE`_ for the terms of the license (2-clause BSD). - For more information or for feedback on this project, please `contact us`_. @@ -14,6 +14,15 @@ analysis. .. _`LICENSE`: ../../../LICENSE .. _contact us: #contact +Installing +========== + +* Via pypi + - Install the latest release from pypi (``pip install pyttb``). +* From source + - Clone the repository from `github `_. + - Install the package with ``pip install .`` from the pyttb root directory. + Functionality ============== pyttb provides the following classes and functions diff --git a/docs/source/tensor.rst b/docs/source/tensor.rst index 8edf6b3e..c02a2232 100644 --- a/docs/source/tensor.rst +++ b/docs/source/tensor.rst @@ -1,8 +1,8 @@ pyttb.tensor -------------------- -.. autoclass:: pyttb.tensor +.. automodule:: pyttb.tensor :members: :special-members: - :exclude-members: __dict__, __weakref__, __slots__, __init__ + :exclude-members: __dict__, __weakref__, __slots__, __init__, mttv_left, mttv_mid, mttv_right, min_split :show-inheritance: \ No newline at end of file diff --git a/docs/source/tensor_classes.rst b/docs/source/tensor_classes.rst index 240627c8..a24ffce2 100644 --- a/docs/source/tensor_classes.rst +++ b/docs/source/tensor_classes.rst @@ -2,14 +2,14 @@ Tensor Classes ============== .. toctree:: - :maxdepth: 2 + :maxdepth: 3 - ktensor.rst - sptenmat.rst - sptensor.rst - sumtensor.rst - tensor.rst - ttensor.rst - tenmat.rst - pyttb_utils.rst + tensor + sptensor + ktensor + ttensor + sumtensor + tenmat + sptenmat + pyttb_utils diff --git a/docs/source/tutorial/class_tensor.ipynb b/docs/source/tutorial/class_tensor.ipynb index 3a8b75ac..9f944c6c 100644 --- a/docs/source/tutorial/class_tensor.ipynb +++ b/docs/source/tutorial/class_tensor.ipynb @@ -5,7 +5,7 @@ "metadata": {}, "source": [ "# Tensors\n", - "```\n", + "``` text\n", "Copyright 2022 National Technology & Engineering Solutions of Sandia,\n", "LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the\n", "U.S. Government retains certain rights in this software.\n", @@ -16,52 +16,97 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Tensors are extensions of multidimensial arrays with additional operations defined on them. Here we explain the basics for creating and working with tensors." + "Tensors are extensions of multidimensial arrays with additional operations defined on them. Here we explain the basics for creating and working with dense tensors." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For more details, see the {class}`pyttb.tensor` class documentation." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 195, "metadata": {}, "outputs": [], "source": [ "import pyttb as ttb\n", "import numpy as np\n", - "import sys" + "import sys\n", + "from pyttb.matlab.matlab_support import matlab_print" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Creating a `tensor` from an array" + "## Creating a tensor from an array" ] }, { - "cell_type": "code", - "execution_count": null, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "M = np.ones((2, 4, 3)) # A 2x4x3 array.\n", - "X = ttb.tensor(M) # Convert to a tensor object\n", - "X" + "The {class}`pyttb.tensor` command creates a (multidimensional) array as a tensor object. By default, it creates a deep copy of the input object. It also reorders that copy to be F-ordered if it isn't already. For a tensor of size $m \\times n \\times p$, the shape is `(m,n,p)`." ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": 196, "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (4, 3, 2) with order F\n", + "data[:, :, 0] =\n", + "[[1. 1. 1.]\n", + " [1. 1. 1.]\n", + " [1. 1. 1.]\n", + " [1. 1. 1.]]\n", + "data[:, :, 1] =\n", + "[[1. 1. 1.]\n", + " [1. 1. 1.]\n", + " [1. 1. 1.]\n", + " [1. 1. 1.]]" + ] + }, + "execution_count": 196, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "Optionally, you can specify a different shape for the `tensor`, so long as the input array has the right number of elements. " + "M = np.ones((4, 3, 2)) # Create numpy 4 x 3 x 2 array of ones.\n", + "X = ttb.tensor(M) # Convert to 4 x 3 x 2 tensor object.\n", + "X" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 197, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (4, 6) with order F\n", + "data[:, :] =\n", + "[[1. 1. 1. 1. 1. 1.]\n", + " [1. 1. 1. 1. 1. 1.]\n", + " [1. 1. 1. 1. 1. 1.]\n", + " [1. 1. 1. 1. 1. 1.]]" + ] + }, + "execution_count": 197, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "X = X.reshape((4, 2, 3))\n", + "X = ttb.tensor(M, (4, 6)) # Reshape to 4 x 6 tensor.\n", "X" ] }, @@ -69,18 +114,35 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Creating a one-dimensional `tensor`\n", - "`np.random.rand(m,n)` creates a two-dimensional tensor with `m` rows and `n` columns." + "There is an option to only do a shallow copy the input data, but it must be F-ordered. This can be useful for larger data. (A vector is both C- and F-ordered, which is useful for functions that don't support alternative orderings.)" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 198, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 3, 2) with order F\n", + "data[:, :, 0] =\n", + "[[0.5488135 0.60276338 0.4236548 ]\n", + " [0.71518937 0.54488318 0.64589411]]\n", + "data[:, :, 1] =\n", + "[[0.43758721 0.96366276 0.79172504]\n", + " [0.891773 0.38344152 0.52889492]]" + ] + }, + "execution_count": 198, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "np.random.seed(0)\n", - "X = ttb.tensor(np.random.rand(5, 1)) # Creates a 2-way tensor.\n", + "v = np.random.rand(12) # length-12 vector of uniform random numbers.\n", + "X = ttb.tensor(v, (2, 3, 2), copy=False) # Converted to 2 x 3 x 2 tensor.\n", "X" ] }, @@ -88,200 +150,661 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "To specify a 1-way `tensor`, use `(m,)` syntax, signifying a vector with `m` elements." + "## A Note on Display of Tensors\n", + "The display of a tensor is by _frontal slice_ where the first two indices range and the remainder stay fixed. This is different than how Python normal displays multidimensional arrays where the last two indices range and the remainder stay fixed." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 279, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "array([[[0.5488135 , 0.71518937],\n", + " [0.60276338, 0.54488318],\n", + " [0.4236548 , 0.64589411]],\n", + "\n", + " [[0.43758721, 0.891773 ],\n", + " [0.96366276, 0.38344152],\n", + " [0.79172504, 0.52889492]],\n", + "\n", + " [[0.56804456, 0.92559664],\n", + " [0.07103606, 0.0871293 ],\n", + " [0.0202184 , 0.83261985]]])" + ] + }, + "execution_count": 279, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "np.random.seed(0)\n", - "X = ttb.tensor(np.random.rand(5), shape=(5,)) # Creates a 1-way tensor.\n", - "X" + "# Display of the above tensor object in the usual Python way.\n", + "X.data" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Specifying trailing singleton dimensions in a `tensor`\n", - "Likewise, trailing singleton dimensions must be explicitly specified." + "## Printing similar to MATLAB \n", + "It is possible to print similar to MATLAB using {func}`matlab_print` which has the optional arguments `name` and `format` to further customize the outputs. You will need \n", + "\n", + "``` python\n", + "from pyttb.matlab.matlab_support import matlab_print\n", + "```\n", + "\n", + "in your code for this to work as shown here." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 199, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape 2 x 3 x 2\n", + "\tX(:,:, 0) =\n", + "\t\t0.5488 0.6028 0.4237\n", + "\t\t0.7152 0.5449 0.6459\n", + "\n", + "\tX(:,:, 1) =\n", + "\t\t0.4376 0.9637 0.7917\n", + "\t\t0.8918 0.3834 0.5289\n", + "\n" + ] + } + ], "source": [ - "np.random.seed(0)\n", - "Y = ttb.tensor(np.random.rand(4, 3)) # Creates a 2-way tensor.\n", - "Y" + "matlab_print(X,name='X',format='7.4f') # Print tensor X in MATLAB format." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Optionally, you can specify a different shape for the tensor, so long as the input array has the right number of elements. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Creating a tensor with elements generated by a function\n", + "\n", + "Using {meth}`pyttb.tensor.from_function` takes another function that is used to generate entries of the tensor. The returned array should be in Fortran order to avoid unnecessary copies and rearrangement. Since the data will be reshape in any case, returning a vector is recommended. Alternatively, ensure the function returns in F-order for those methods that support it." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 200, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape 2 x 3 x 2\n", + "\tX(:,:, 0) =\n", + "\t\t1.6243 -0.5282 0.8654\n", + "\t\t-0.6118 -1.0730 -2.3015\n", + "\n", + "\tX(:,:, 1) =\n", + "\t\t1.7448 0.3190 1.4621\n", + "\t\t-0.7612 -0.2494 -2.0601\n", + "\n" + ] + } + ], "source": [ - "np.random.seed(0)\n", - "Y = ttb.tensor(np.random.rand(3, 4, 1), (3, 4, 1)) # Creates a 3-way tensor.\n", - "Y" + "# Ensure reproducibility of random numbers.\n", + "np.random.seed(1) \n", + "# Function to generate normally distributed random numbers.\n", + "randn = lambda s: np.random.randn(np.prod(s))\n", + "# Create 2 x 3 x 2 tensor of normally distributed random numbers.\n", + "X = ttb.tensor.from_function(randn, (2, 3, 2)) \n", + "# Print tensor X in MATLAB format.\n", + "matlab_print(X,name='X',format='7.4f') " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## The constituent parts of a `tensor`" + "We show how to use {meth}`pyttb.tensor.from_function` in the next example to create a tensor of all ones, but it's even easier to use {meth}`pyttb.tenones` described below." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 201, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape 3 x 4 x 2\n", + "\tX(:,:, 0) =\n", + "\t\t 1 1 1 1\n", + "\t\t 1 1 1 1\n", + "\t\t 1 1 1 1\n", + "\n", + "\tX(:,:, 1) =\n", + "\t\t 1 1 1 1\n", + "\t\t 1 1 1 1\n", + "\t\t 1 1 1 1\n", + "\n" + ] + } + ], "source": [ - "np.random.seed(0)\n", - "X = ttb.tenrand((2, 4, 3)) # Create data.\n", - "X.data # The array." + "# Function to generate tensor of ones. Uses explicit Fortran order.\n", + "ones = lambda s: np.ones(s,order='F') \n", + "# Create 3 x 4 x 2 tensor of ones.\n", + "X = ttb.tensor.from_function(ones, (3, 4, 2))\n", + "# Print tensor X in MATLAB format.\n", + "matlab_print(X,name='X',format='2.0f')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Create a tensor of all ones.\n", + "Using {func}`pyttb.tenones` to create a tensor of all ones." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 202, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape 3 x 4 x 2\n", + "\tX(:,:, 0) =\n", + "\t\t 1 1 1 1\n", + "\t\t 1 1 1 1\n", + "\t\t 1 1 1 1\n", + "\n", + "\tX(:,:, 1) =\n", + "\t\t 1 1 1 1\n", + "\t\t 1 1 1 1\n", + "\t\t 1 1 1 1\n", + "\n" + ] + } + ], "source": [ - "X.shape # The shape." + "\n", + "X = ttb.tenones((3, 4, 2)) \n", + "matlab_print(X,name='X',format='2.0f')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Creating a `tensor` from its constituent parts" + "## Create a tensor of all zeros\n", + "Use {func}`pyttb.tenzeros` to create a tensor of all zeros.\n" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 203, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape 1 x 4 x 2\n", + "\tX(:,:, 0) =\n", + "\t\t 0 0 0 0\n", + "\n", + "\tX(:,:, 1) =\n", + "\t\t 0 0 0 0\n", + "\n" + ] + } + ], "source": [ - "np.random.seed(0)\n", - "X = ttb.tenrand((2, 4, 3)) # Create data.\n", - "Y = X.copy() # Copies X.\n", - "Y" + "X = ttb.tenzeros((1, 4, 2)) # Creates a 1 x 4 x 2 tensor of zeroes.\n", + "matlab_print(X,name='X',format='2.0f')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Creating an empty `tensor`\n", - "An empty constructor exists." + "## Create a random tensor\n", + "Use {func}`pyttb.tenrand` to create a tensor with uniform random values from [0,1]." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 204, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape 5 x 4 x 2\n", + "\tX(:,:, 0) =\n", + "\t\t0.4360 0.5497 0.4204 0.2046\n", + "\t\t0.2997 0.6211 0.1346 0.1844\n", + "\t\t0.8540 0.8466 0.5052 0.4281\n", + "\t\t0.1272 0.2260 0.2203 0.4678\n", + "\t\t0.6404 0.5052 0.7936 0.1623\n", + "\n", + "\tX(:,:, 1) =\n", + "\t\t0.0259 0.4353 0.3303 0.6193\n", + "\t\t0.2668 0.5291 0.5136 0.7853\n", + "\t\t0.4942 0.0796 0.0653 0.0965\n", + "\t\t0.5967 0.1069 0.3498 0.2017\n", + "\t\t0.4831 0.3869 0.5800 0.7008\n", + "\n" + ] + } + ], "source": [ - "X = ttb.tensor() # Creates an empty tensor\n", - "X" + "# Creates a 5 x 4 x 2 tensor of uniform [0,1] random numbers\n", + "np.random.seed(2) # Reproducible random numbers\n", + "X = ttb.tenrand((5, 4, 2))\n", + "matlab_print(X,name='X',format='7.4f')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Use `tenones` to create a `tensor` of all ones" + "## Creating a one-dimensional tensor\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To specify a 1-way tensor of size $m$, the shape should be of the form `(m,)`." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 205, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape 5\n", + "\tX(:) =\n", + "\t\t0.5508\n", + "\t\t0.7081\n", + "\t\t0.2909\n", + "\t\t0.5108\n", + "\t\t0.8929\n" + ] + } + ], "source": [ - "X = ttb.tenones((2, 3, 4)) # Creates a 2x3x4 tensor of ones.\n", - "X" + "np.random.seed(3)\n", + "X = ttb.tenrand((5,)) # Creates a 1-way tensor.\n", + "matlab_print(X,name='X',format='7.4f')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Use `tenzeros` to create a `tensor` of all zeros" + "## Specifying trailing singleton dimensions in a tensor\n", + "Likewise, trailing singleton dimensions must be explicitly specified." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 206, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Y is a tensor of shape 3 x 4\n", + "\tY(:,:) =\n", + "\t\t0.9670 0.5472 0.9727 0.7148\n", + "\t\t0.6977 0.2161 0.9763 0.0062\n", + "\t\t0.2530 0.4348 0.7794 0.1977\n" + ] + } + ], "source": [ - "X = ttb.tenzeros((2, 1, 4)) # Creates a 2x1x4 tensor of zeroes.\n", - "X" + "np.random.seed(4)\n", + "Y = ttb.tenrand((3, 4)) # Creates a 2-way tensor of size 4 x 3.\n", + "matlab_print(Y,name='Y',format='7.4f')" + ] + }, + { + "cell_type": "code", + "execution_count": 207, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Y is a tensor of shape 3 x 4 x 1\n", + "\tY(:,:, 0) =\n", + "\t\t0.9670 0.5472 0.9727 0.7148\n", + "\t\t0.6977 0.2161 0.9763 0.0062\n", + "\t\t0.2530 0.4348 0.7794 0.1977\n", + "\n" + ] + } + ], + "source": [ + "np.random.seed(4)\n", + "Y = ttb.tenrand((3, 4, 1)) # Creates a 3-way tensor of size 3 x 4 x 1.\n", + "matlab_print(Y,name='Y',format='7.4f')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Use `tenrand` to create a random `tensor`" + "## The constituent parts of a tensor\n", + "A tensor has two parts: `data` (a multidimensional array) and `shape` (a tuple of integers)." + ] + }, + { + "cell_type": "code", + "execution_count": 208, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape 2 x 4 x 3\n", + "\tX(:,:, 0) =\n", + "\t\t0.2220 0.9186 0.7659 0.1877\n", + "\t\t0.4413 0.2741 0.6288 0.2658\n", + "\n", + "\tX(:,:, 1) =\n", + "\t\t0.8707 0.4884 0.5184 0.0807\n", + "\t\t0.1583 0.4142 0.5798 0.2847\n", + "\n", + "\tX(:,:, 2) =\n", + "\t\t0.2067 0.6117 0.2968 0.7384\n", + "\t\t0.8799 0.2961 0.5999 0.2536\n", + "\n" + ] + } + ], + "source": [ + "np.random.seed(5)\n", + "X = ttb.tenrand((2, 4, 3)) # Create tensor of size 2 x 4 x 3 with random numbers.\n", + "matlab_print(X,name='X',format='7.4f')" + ] + }, + { + "cell_type": "code", + "execution_count": 280, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[[0.5488135 , 0.71518937],\n", + " [0.60276338, 0.54488318],\n", + " [0.4236548 , 0.64589411]],\n", + "\n", + " [[0.43758721, 0.891773 ],\n", + " [0.96366276, 0.38344152],\n", + " [0.79172504, 0.52889492]],\n", + "\n", + " [[0.56804456, 0.92559664],\n", + " [0.07103606, 0.0871293 ],\n", + " [0.0202184 , 0.83261985]]])" + ] + }, + "execution_count": 280, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# The array (note that its display order is different from the tensor).\n", + "X.data # The array." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 210, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + " C_CONTIGUOUS : False\n", + " F_CONTIGUOUS : True\n", + " OWNDATA : False\n", + " WRITEABLE : True\n", + " ALIGNED : True\n", + " WRITEBACKIFCOPY : False" + ] + }, + "execution_count": 210, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Note that it's is stored in Fortran format (F_CONTIGUOUS = True).\n", + "X.data.flags" + ] + }, + { + "cell_type": "code", + "execution_count": 211, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(2, 4, 3)" + ] + }, + "execution_count": 211, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# The shape\n", + "X.shape" + ] + }, + { + "cell_type": "markdown", "metadata": {}, - "outputs": [], + "source": [ + "## Creating a tensor from its constituent parts\n", + "_This is an efficient way to create a tensor copy, but it illustrates the role of the parts. A more efficient way is to use `Y = X` (shallow copy) or `Y = X.copy()` (deep copy)._" + ] + }, + { + "cell_type": "code", + "execution_count": 212, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape 2 x 4 x 3\n", + "\tX(:,:, 0) =\n", + "\t\t0.5488 0.5449 0.4376 0.3834\n", + "\t\t0.5680 0.0871 0.7782 0.7992\n", + "\n", + "\tX(:,:, 1) =\n", + "\t\t0.7152 0.4237 0.8918 0.7917\n", + "\t\t0.9256 0.0202 0.8700 0.4615\n", + "\n", + "\tX(:,:, 2) =\n", + "\t\t0.6028 0.6459 0.9637 0.5289\n", + "\t\t0.0710 0.8326 0.9786 0.7805\n", + "\n", + "Y is a tensor of shape 2 x 4 x 3\n", + "\tY(:,:, 0) =\n", + "\t\t0.5488 0.5449 0.4376 0.3834\n", + "\t\t0.5680 0.0871 0.7782 0.7992\n", + "\n", + "\tY(:,:, 1) =\n", + "\t\t0.7152 0.4237 0.8918 0.7917\n", + "\t\t0.9256 0.0202 0.8700 0.4615\n", + "\n", + "\tY(:,:, 2) =\n", + "\t\t0.6028 0.6459 0.9637 0.5289\n", + "\t\t0.0710 0.8326 0.9786 0.7805\n", + "\n" + ] + } + ], "source": [ "np.random.seed(0)\n", - "X = ttb.tenrand((2, 5, 4))\n", - "X" + "X = ttb.tenrand((2, 4, 3)) # Create data.\n", + "matlab_print(X,name='X',format='7.4f') # Print tensor X in MATLAB format.\n", + "Y = ttb.tensor(X.data, X.shape) # Creates a (deep) copy of X from its parts.\n", + "matlab_print(Y,name='Y',format='7.4f')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Use `squeeze` to remove singleton dimensions from a `tensor`" + "## Creating an empty tensor\n", + "An empty constructor exists." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 213, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape \n", + "\tX(:) =\n", + "\n" + ] + } + ], + "source": [ + "X = ttb.tensor() # Creates an empty tensor\n", + "matlab_print(X,name='X',format='7.4f')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Removing singleton dimensions from a tensor\n", + "Use {meth}`pyttb.tensor.squeeze` to remove single dimensions from a tensor." + ] + }, + { + "cell_type": "code", + "execution_count": 214, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Y is a tensor of shape 4 x 3 x 1\n", + "\tY(:,:, 0) =\n", + "\t\t0.5488 0.7152 0.6028\n", + "\t\t0.5449 0.4237 0.6459\n", + "\t\t0.4376 0.8918 0.9637\n", + "\t\t0.3834 0.7917 0.5289\n", + "\n" + ] + } + ], "source": [ "np.random.seed(0)\n", - "X = ttb.tenrand((2, 5, 4)) # Create the data.\n", - "Y = X.copy()\n", - "# Add singleton dimension.\n", - "Y[0, 0, 0, 0] = Y[0, 0, 0]\n", - "# Remove singleton dimension.\n", - "Y.squeeze().isequal(X)" + "Y = ttb.tenrand((4, 3, 1)) # Create the data.\n", + "matlab_print(Y,name='Y',format='7.4f') # Print tensor Y in MATLAB format." + ] + }, + { + "cell_type": "code", + "execution_count": 215, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Z is a tensor of shape 4 x 3\n", + "\tZ(:,:) =\n", + "\t\t0.5488 0.7152 0.6028\n", + "\t\t0.5449 0.4237 0.6459\n", + "\t\t0.4376 0.8918 0.9637\n", + "\t\t0.3834 0.7917 0.5289\n" + ] + } + ], + "source": [ + "Z = Y.squeeze() # Squeeze out the singleton dimension.\n", + "matlab_print(Z,name='Z',format='7.4f') # Print tensor Z in MATLAB format. " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Use `double` to convert a `tensor` to a (multidimensional) array" + "## Convert a tensor to a (multidimensional) array\n", + "Use {meth}`pyttb.tensor.double` to convert a tensor to a numpy array; this is identical to extracting the `data` member." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 216, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "array([[[0.5488135 , 0.71518937, 0.60276338, 0.54488318],\n", + " [0.4236548 , 0.64589411, 0.43758721, 0.891773 ],\n", + " [0.96366276, 0.38344152, 0.79172504, 0.52889492],\n", + " [0.56804456, 0.92559664, 0.07103606, 0.0871293 ],\n", + " [0.0202184 , 0.83261985, 0.77815675, 0.87001215]],\n", + "\n", + " [[0.97861834, 0.79915856, 0.46147936, 0.78052918],\n", + " [0.11827443, 0.63992102, 0.14335329, 0.94466892],\n", + " [0.52184832, 0.41466194, 0.26455561, 0.77423369],\n", + " [0.45615033, 0.56843395, 0.0187898 , 0.6176355 ],\n", + " [0.61209572, 0.616934 , 0.94374808, 0.6818203 ]]])" + ] + }, + "execution_count": 216, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "np.random.seed(0)\n", "X = ttb.tenrand((2, 5, 4)) # Create the data.\n", @@ -290,9 +813,30 @@ }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 217, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[[0.5488135 , 0.71518937, 0.60276338, 0.54488318],\n", + " [0.4236548 , 0.64589411, 0.43758721, 0.891773 ],\n", + " [0.96366276, 0.38344152, 0.79172504, 0.52889492],\n", + " [0.56804456, 0.92559664, 0.07103606, 0.0871293 ],\n", + " [0.0202184 , 0.83261985, 0.77815675, 0.87001215]],\n", + "\n", + " [[0.97861834, 0.79915856, 0.46147936, 0.78052918],\n", + " [0.11827443, 0.63992102, 0.14335329, 0.94466892],\n", + " [0.52184832, 0.41466194, 0.26455561, 0.77423369],\n", + " [0.45615033, 0.56843395, 0.0187898 , 0.6176355 ],\n", + " [0.61209572, 0.616934 , 0.94374808, 0.6818203 ]]])" + ] + }, + "execution_count": 217, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "X.data # Same thing." ] @@ -301,51 +845,104 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Use `ndims` and `shape` to get the shape of a `tensor`" + "## Use `ndims` and `shape` to get the shape of a tensor" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 282, "metadata": {}, "outputs": [], + "source": [ + "X = ttb.tenrand((4,3,2)) # Create a 4 x 3 x 2 tensor of random numbers." + ] + }, + { + "cell_type": "code", + "execution_count": 283, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "3" + ] + }, + "execution_count": 283, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "X.ndims # Number of dimensions (or ways)." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 284, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "(4, 3, 2)" + ] + }, + "execution_count": 284, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "X.shape # Row vector with the shapes of all dimensions." + "X.shape # Tuple with the sizes of all dimensions." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 285, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "2" + ] + }, + "execution_count": 285, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "X.shape[2] # shape of a single dimension." + "X.shape[2] # Size of a single dimension." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Subscripted reference for a `tensor`" + "## Subscripted reference for a tensor" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 287, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "0.5488135039273248" + ] + }, + "execution_count": 287, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "np.random.seed(0)\n", - "X = ttb.tenrand((2, 3, 4, 1)) # Create a 3x4x2x1 random tensor.\n", + "X = ttb.tenrand((3, 4, 2, 1)) # Create a 3x4x2x1 random tensor.\n", "X[0, 0, 0, 0] # Extract a single element." ] }, @@ -358,20 +955,48 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 288, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (1,) with order F\n", + "data[:] =\n", + "[0.5488135]" + ] + }, + "execution_count": 288, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "X[0, 0, 0, :] # Produces a tensor of order 1 and shape 1." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 289, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (3, 1) with order F\n", + "data[:, :] =\n", + "[[0.5488135 ]\n", + " [0.96366276]\n", + " [0.0202184 ]]" + ] + }, + "execution_count": 289, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "X[0, :, 0, :] # Produces a tensor of shape 3x1." + "X[:, 0, 0, :] # Produces a tensor of shape 3x1." ] }, { @@ -383,11 +1008,25 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 292, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 1) with order F\n", + "data[:, :, 0] =\n", + "[[0.60276338 0.43758721]\n", + " [0.79172504 0.07103606]]" + ] + }, + "execution_count": 292, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "X[0:2, 0, [1, 3], :] # Produces a tensor of shape 2x2x1." + "X[0:2, [1, 3], 0, :] # Produces a tensor of shape 2x2x1." ] }, { @@ -399,19 +1038,41 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 293, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "array([0.5488135 , 0.78052918])" + ] + }, + "execution_count": 293, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "subs = np.array([[0, 0, 0, 0], [1, 2, 3, 0]])\n", + "subs = np.array([[0, 0, 0, 0], [2, 3, 1, 0]])\n", "X[subs] # Extract 2 values by subscript." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 226, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "array([0.5488135 , 0.78052918])" + ] + }, + "execution_count": 226, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "inds = np.array([0, 23])\n", "X[inds] # Same thing with linear indices." @@ -419,7 +1080,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 227, "metadata": {}, "outputs": [], "source": [ @@ -429,9 +1090,20 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 228, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "array([0.5488135 , 0.71518937, 0.60276338, 0.54488318, 0.4236548 ])" + ] + }, + "execution_count": 228, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "X[0:5] # Extract a subtensor." ] @@ -440,66 +1112,168 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Subscripted assignment for a `tensor\n", - "We can assign a single element, an entire subtensor, or a list of values for a `tensor`.`" + "## Subscripted assignment for a tensor\n", + "We can assign a single element, an entire subtensor, or a list of values for a tensor." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 331, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape 3 x 4 x 2\n", + "\tX(:,:, 0) =\n", + "\t\t0.0000 0.6028 0.4237 0.4376\n", + "\t\t0.9637 0.7917 0.5680 0.0710\n", + "\t\t0.0202 0.7782 0.9786 0.4615\n", + "\n", + "\tX(:,:, 1) =\n", + "\t\t0.7152 0.5449 0.6459 0.8918\n", + "\t\t0.3834 0.5289 0.9256 0.0871\n", + "\t\t0.8326 0.8700 0.7992 0.7805\n", + "\n" + ] + } + ], "source": [ "np.random.seed(0)\n", - "X = ttb.tenrand((2, 3, 4)) # Create some data.\n", + "X = ttb.tenrand((3,4,2)) # Create some data.\n", "X[0, 0, 0] = 0 # Replaces the [0,0,0] element.\n", - "X" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "X[0, 0:2, 0:2] = np.ones((2, 2)) # Replaces a subtensor.\n", - "X" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "X[(0, 0, 0)], X[1, 0, 0] = [5, 7] # Replaces the (0,0,0) and (1,0,0) elements." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "X[[0, 1]] = [5, 7] # Same as above using linear indices.\n", - "X" + "matlab_print(X,name='X',format='7.4f') # Print tensor X in MATLAB format." + ] + }, + { + "cell_type": "code", + "execution_count": 332, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape 3 x 4 x 2\n", + "\tX(:,:, 0) =\n", + "\t\t1.0000 1.0000 0.4237 0.4376\n", + "\t\t1.0000 1.0000 0.5680 0.0710\n", + "\t\t0.0202 0.7782 0.9786 0.4615\n", + "\n", + "\tX(:,:, 1) =\n", + "\t\t0.7152 0.5449 0.6459 0.8918\n", + "\t\t0.3834 0.5289 0.9256 0.0871\n", + "\t\t0.8326 0.8700 0.7992 0.7805\n", + "\n" + ] + } + ], + "source": [ + "X[0:2, 0:2,0] = np.ones((2, 2)) # Replaces a subtensor.\n", + "matlab_print(X,name='X',format='7.4f') # Print tensor X in MATLAB format." + ] + }, + { + "cell_type": "code", + "execution_count": 333, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape 3 x 4 x 2\n", + "\tX(:,:, 0) =\n", + "\t\t5.0000 1.0000 0.4237 0.4376\n", + "\t\t1.0000 1.0000 0.5680 0.0710\n", + "\t\t0.0202 0.7782 0.9786 0.4615\n", + "\n", + "\tX(:,:, 1) =\n", + "\t\t7.0000 0.5449 0.6459 0.8918\n", + "\t\t0.3834 0.5289 0.9256 0.0871\n", + "\t\t0.8326 0.8700 0.7992 0.7805\n", + "\n" + ] + } + ], + "source": [ + "subs = np.array([[0, 0, 0], [0,0,1]])\n", + "X[subs] = [5, 7] # Replaces the (0,0,0) and (1,0,0) elements.\n", + "matlab_print(X,name='X',format='7.4f') # Print tensor X in MATLAB format." + ] + }, + { + "cell_type": "code", + "execution_count": 339, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape 4 x 3 x 2\n", + "\tX(:,:, 0) =\n", + "\t\t 5.0000 0.6028 0.4237\n", + "\t\t 0.4376 0.9637 0.7917\n", + "\t\t 0.5680 0.0710 0.0202\n", + "\t\t 0.7782 0.9786 0.4615\n", + "\n", + "\tX(:,:, 1) =\n", + "\t\t 7.0000 0.5449 0.6459\n", + "\t\t 0.8918 0.3834 0.5289\n", + "\t\t 0.9256 0.0871 0.8326\n", + "\t\t 0.8700 0.7992 0.7805\n", + "\n" + ] + } + ], + "source": [ + "X[[0, 12]] = [5, 7] # Same as above using linear indices.\n", + "matlab_print(X,name='X',format='7.4f') # Print tensor X in MATLAB format." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "It is possible to **grow** the `tensor` automatically by assigning elements outside the original range of the `tensor`." + "It is possible to **grow** the tensor automatically by assigning elements outside the original range of the tensor." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 340, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape 4 x 3 x 3\n", + "\tX(:,:, 0) =\n", + "\t\t 5.0000 0.6028 0.4237\n", + "\t\t 0.4376 0.9637 0.7917\n", + "\t\t 0.5680 0.0710 0.0202\n", + "\t\t 0.7782 0.9786 0.4615\n", + "\n", + "\tX(:,:, 1) =\n", + "\t\t 7.0000 0.5449 0.6459\n", + "\t\t 0.8918 0.3834 0.5289\n", + "\t\t 0.9256 0.0871 0.8326\n", + "\t\t 0.8700 0.7992 0.7805\n", + "\n", + "\tX(:,:, 2) =\n", + "\t\t 1.0000 0.0000 0.0000\n", + "\t\t 0.0000 0.0000 0.0000\n", + "\t\t 0.0000 0.0000 0.0000\n", + "\t\t 0.0000 0.0000 0.0000\n", + "\n" + ] + } + ], "source": [ - "X[2, 1, 1] = 1 # Grows the shape of the tensor.\n", - "X" + "X[0,0,2] = 1 # Grows the shape of the tensor.\n", + "matlab_print(X,name='X',format='7.4f') # Print tensor X in MATLAB format." ] }, { @@ -511,55 +1285,95 @@ }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 341, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape 4 x 3 x 2\n", + "\tX(:,:, 0) =\n", + "\t\t 0.5488 0.6028 0.4237\n", + "\t\t 0.4376 0.9637 0.7917\n", + "\t\t 0.5680 0.0710 0.0202\n", + "\t\t 0.7782 0.9786 0.4615\n", + "\n", + "\tX(:,:, 1) =\n", + "\t\t 0.7152 0.5449 0.6459\n", + "\t\t 0.8918 0.3834 0.5289\n", + "\t\t 0.9256 0.0871 0.8326\n", + "\t\t 0.8700 0.7992 0.7805\n", + "\n" + ] + } + ], "source": [ "np.random.seed(0)\n", - "X = ttb.tenrand((2, 3, 4)) # Create some data.\n", - "np.prod(X.shape) - 1 # The index of the last element of the flattened tensor." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "X[2, 2, 3] = 99 # Inserting 99 into last element\n", - "X[-1] # Same as X[2,2,3]" + "X = ttb.tenrand((4,3,2)) # Create some data.\n", + "matlab_print(X,name='X',format='7.4f') # Print tensor X in MATLAB format." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 344, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "'Last value in array is 0.7805'" + ] + }, + "execution_count": 344, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "X[0:-1]" + "f\"Last value in array is {X[-1]:.4f}\" # Same as X(3,2,1)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Use `find` for subscripts of nonzero elements of a `tensor`" + "## Extracting subscripts of nonzero elements of a tensor\n", + "Use {meth}`pyttb.tensor.find` to get nonzero elements and values from a tensor." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 385, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape 2 x 2 x 2\n", + "\tX(:,:, 0) =\n", + "\t\t 2 2\n", + "\t\t 1 2\n", + "\n", + "\tX(:,:, 1) =\n", + "\t\t 0 0\n", + "\t\t 1 0\n", + "\n" + ] + } + ], "source": [ - "np.random.seed(0)\n", - "X = ttb.tensor(3 * np.random.rand(2, 2, 2)) # Generate some data.\n", - "X" + "# Create a tensor that's about 33% zeros.\n", + "np.random.seed(5)\n", + "randint = lambda s: np.random.randint(0, 3, np.prod(s))\n", + "X = ttb.tensor.from_function(randint, (2, 2, 2)) # Create a tensor.\n", + "matlab_print(X,name='X',format='2.0f') # Print tensor X in MATLAB format." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 386, "metadata": {}, "outputs": [], "source": [ @@ -568,38 +1382,95 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 387, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "array([[0, 0, 0],\n", + " [1, 0, 0],\n", + " [0, 1, 0],\n", + " [1, 1, 0],\n", + " [1, 0, 1]])" + ] + }, + "execution_count": 387, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "S # Nonzero subscripts" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 388, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "array([[2],\n", + " [1],\n", + " [2],\n", + " [2],\n", + " [1]], dtype=int32)" + ] + }, + "execution_count": 388, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "V # Values" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 390, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "(array([[0, 0, 0],\n", + " [0, 1, 0],\n", + " [1, 1, 0]]),\n", + " array([[ True],\n", + " [ True],\n", + " [ True]]))" + ] + }, + "execution_count": 390, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "larger_entries = X >= 2\n", + "larger_entries = X >= 2 # Find entries >= 2.\n", "larger_subs, larger_vals = larger_entries.find() # Find subscripts of values >= 2.\n", "larger_subs, larger_vals" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 379, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "array([2., 2., 2.])" + ] + }, + "execution_count": 379, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "V = X[larger_subs]\n", "V" @@ -609,18 +1480,29 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Computing the Frobenius norm of a `tensor`\n", - "`norm` computes the Frobenius norm of a tensor. This corresponds to the Euclidean norm of the vectorized tensor." + "## Computing the Frobenius norm of a tensor\n", + "The method {meth}`pyttb.tensor.norm` computes the Frobenius norm of a tensor. This corresponds to the Euclidean norm of the vectorized tensor." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 391, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "2.631397990238147" + ] + }, + "execution_count": 391, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "np.random.seed(0)\n", - "X = ttb.tensor(np.ones((3, 2, 3)))\n", + "X = ttb.tenrand((2,3,3))\n", "X.norm()" ] }, @@ -628,32 +1510,70 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Using `reshape` to rearrange elements in a `tensor`\n", - "`reshape` reshapes a tensor into a given shape array. The total number of elements in the tensor cannot change." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "## Reshaping a tensor\n", + "The method {meth}`pyttb.tensor.reshape` reshapes a tensor into a given shape array. The total number of elements in the tensor cannot change.\n", + "_Currently, this methods creates a **copy** of the tensor, and this needs to be fixed._" + ] + }, + { + "cell_type": "code", + "execution_count": 395, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape 3 x 2 x 3\n", + "\tX(:,:, 0) =\n", + "\t\t 5 3\n", + "\t\t 0 7\n", + "\t\t 3 9\n", + "\n", + "\tX(:,:, 1) =\n", + "\t\t 3 4\n", + "\t\t 5 7\n", + "\t\t 2 6\n", + "\n", + "\tX(:,:, 2) =\n", + "\t\t 8 6\n", + "\t\t 8 7\n", + "\t\t 1 7\n", + "\n", + "Y is a tensor of shape 3 x 3 x 2\n", + "\tY(:,:, 0) =\n", + "\t\t 5 3 3\n", + "\t\t 0 7 5\n", + "\t\t 3 9 2\n", + "\n", + "\tY(:,:, 1) =\n", + "\t\t 4 8 6\n", + "\t\t 7 8 7\n", + "\t\t 6 1 7\n", + "\n" + ] + } + ], "source": [ "np.random.seed(0)\n", - "X = ttb.tensor(np.random.rand(3, 2, 3, 10))\n", - "X.reshape((6, 30))" + "randint = lambda s: np.random.randint(0, 10, np.prod(s))\n", + "X = ttb.tensor.from_function(randint, (3, 2, 3))\n", + "matlab_print(X,name='X',format='2.0f')\n", + "Y = X.reshape((3,3,2))\n", + "matlab_print(Y,name='Y',format='2.0f')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Basic operations (plus, minus, and, or, etc.) on a `tensor`\n", - "`tensor`s support plus, minus, times, divide, power, equals, and not-equals operators. `tensor`s can use their operators with another `tensor` or a scalar (with the exception of equalities which only takes `tensor`s). All mathematical operators are elementwise operations." + "## Basic operations (plus, minus, and, or, etc.) on a tensor\n", + "tensors support plus, minus, times, divide, power, equals, and not-equals operators. tensors can use their operators with another tensor or a scalar (with the exception of equalities which only takes tensors). All mathematical operators are elementwise operations." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 245, "metadata": {}, "outputs": [], "source": [ @@ -664,180 +1584,580 @@ }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 246, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[1. 0.]\n", + " [1. 1.]]\n", + "data[:, :, 1] =\n", + "[[1. 0.]\n", + " [1. 1.]]\n", + "data[:, :, 2] =\n", + "[[0. 1.]\n", + " [1. 1.]]" + ] + }, + "execution_count": 246, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "A.logical_and(B) # Calls and." ] }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 247, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[1. 1.]\n", + " [1. 1.]]\n", + "data[:, :, 1] =\n", + "[[1. 1.]\n", + " [1. 1.]]\n", + "data[:, :, 2] =\n", + "[[1. 1.]\n", + " [1. 1.]]" + ] + }, + "execution_count": 247, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "A.logical_or(B)" ] }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 248, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[0. 1.]\n", + " [0. 0.]]\n", + "data[:, :, 1] =\n", + "[[0. 1.]\n", + " [0. 0.]]\n", + "data[:, :, 2] =\n", + "[[1. 0.]\n", + " [0. 0.]]" + ] + }, + "execution_count": 248, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "A.logical_xor(B)" ] }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 249, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[ True False]\n", + " [False False]]\n", + "data[:, :, 1] =\n", + "[[ True False]\n", + " [ True False]]\n", + "data[:, :, 2] =\n", + "[[False False]\n", + " [ True False]]" + ] + }, + "execution_count": 249, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "A == B # Calls eq." ] }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 250, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[False True]\n", + " [ True True]]\n", + "data[:, :, 1] =\n", + "[[False True]\n", + " [False True]]\n", + "data[:, :, 2] =\n", + "[[ True True]\n", + " [False True]]" + ] + }, + "execution_count": 250, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "A != B # Calls neq." ] }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 251, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[False True]\n", + " [False False]]\n", + "data[:, :, 1] =\n", + "[[False True]\n", + " [False True]]\n", + "data[:, :, 2] =\n", + "[[ True False]\n", + " [False False]]" + ] + }, + "execution_count": 251, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "A > B # Calls gt." ] }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 252, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[ True True]\n", + " [False False]]\n", + "data[:, :, 1] =\n", + "[[ True True]\n", + " [ True True]]\n", + "data[:, :, 2] =\n", + "[[ True False]\n", + " [ True False]]" + ] + }, + "execution_count": 252, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "A >= B # Calls ge." ] }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 253, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[False False]\n", + " [ True True]]\n", + "data[:, :, 1] =\n", + "[[False False]\n", + " [False False]]\n", + "data[:, :, 2] =\n", + "[[False True]\n", + " [False True]]" + ] + }, + "execution_count": 253, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "A < B # Calls lt." ] }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 254, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[ True False]\n", + " [ True True]]\n", + "data[:, :, 1] =\n", + "[[ True False]\n", + " [ True False]]\n", + "data[:, :, 2] =\n", + "[[False True]\n", + " [ True True]]" + ] + }, + "execution_count": 254, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "A <= B # Calls le." ] }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 255, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[0. 0.]\n", + " [0. 0.]]\n", + "data[:, :, 1] =\n", + "[[0. 0.]\n", + " [0. 0.]]\n", + "data[:, :, 2] =\n", + "[[0. 0.]\n", + " [0. 0.]]" + ] + }, + "execution_count": 255, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "A.logical_not() # Calls not." ] }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 256, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[1. 1.]\n", + " [1. 1.]]\n", + "data[:, :, 1] =\n", + "[[2. 1.]\n", + " [2. 2.]]\n", + "data[:, :, 2] =\n", + "[[1. 1.]\n", + " [2. 1.]]" + ] + }, + "execution_count": 256, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "+A # Calls uplus." ] }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 257, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[-1. -1.]\n", + " [-1. -1.]]\n", + "data[:, :, 1] =\n", + "[[-2. -1.]\n", + " [-2. -2.]]\n", + "data[:, :, 2] =\n", + "[[-1. -1.]\n", + " [-2. -1.]]" + ] + }, + "execution_count": 257, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "-A # Calls uminus." ] }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 258, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[2. 1.]\n", + " [3. 3.]]\n", + "data[:, :, 1] =\n", + "[[4. 1.]\n", + " [4. 3.]]\n", + "data[:, :, 2] =\n", + "[[1. 3.]\n", + " [4. 3.]]" + ] + }, + "execution_count": 258, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "A + B # Calls plus." ] }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 259, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[ 0. 1.]\n", + " [-1. -1.]]\n", + "data[:, :, 1] =\n", + "[[0. 1.]\n", + " [0. 1.]]\n", + "data[:, :, 2] =\n", + "[[ 1. -1.]\n", + " [ 0. -1.]]" + ] + }, + "execution_count": 259, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "A - B # Calls minus." ] }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 260, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[1. 0.]\n", + " [2. 2.]]\n", + "data[:, :, 1] =\n", + "[[4. 0.]\n", + " [4. 2.]]\n", + "data[:, :, 2] =\n", + "[[0. 2.]\n", + " [4. 2.]]" + ] + }, + "execution_count": 260, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "A * B # Calls times." ] }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 261, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[5. 5.]\n", + " [5. 5.]]\n", + "data[:, :, 1] =\n", + "[[10. 5.]\n", + " [10. 10.]]\n", + "data[:, :, 2] =\n", + "[[ 5. 5.]\n", + " [10. 5.]]" + ] + }, + "execution_count": 261, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "5 * A # Calls mtimes." ] }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 262, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[1. 1.]\n", + " [1. 1.]]\n", + "data[:, :, 1] =\n", + "[[4. 1.]\n", + " [4. 2.]]\n", + "data[:, :, 2] =\n", + "[[1. 1.]\n", + " [4. 1.]]" + ] + }, + "execution_count": 262, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "A**B # Calls power." ] }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 263, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[1. 1.]\n", + " [1. 1.]]\n", + "data[:, :, 1] =\n", + "[[4. 1.]\n", + " [4. 4.]]\n", + "data[:, :, 2] =\n", + "[[1. 1.]\n", + " [4. 1.]]" + ] + }, + "execution_count": 263, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "A**2 # Calls power." ] }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 264, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[1. inf]\n", + " [0.5 0.5]]\n", + "data[:, :, 1] =\n", + "[[ 1. inf]\n", + " [ 1. 2.]]\n", + "data[:, :, 2] =\n", + "[[inf 0.5]\n", + " [1. 0.5]]" + ] + }, + "execution_count": 264, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "A / B # Calls ldivide." ] }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 265, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[2. 2.]\n", + " [2. 2.]]\n", + "data[:, :, 1] =\n", + "[[1. 2.]\n", + " [1. 1.]]\n", + "data[:, :, 2] =\n", + "[[2. 2.]\n", + " [1. 2.]]" + ] + }, + "execution_count": 265, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "2 / A # Calls rdivide." ] @@ -846,15 +2166,35 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Using `tenfun` for elementwise operations on one or more `tensor`s\n", - "The method `tenfun` applies a specified function to a number of `tensor`s. This can be used for any function that is not predefined for `tensor`s." + "## Using `tenfun` for elementwise operations on one or more tensors\n", + "The method `tenfun` applies a specified function to a number of tensors. This can be used for any function that is not predefined for tensors." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 266, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[2. 2.]\n", + " [2. 2.]]\n", + "data[:, :, 1] =\n", + "[[3. 2.]\n", + " [3. 3.]]\n", + "data[:, :, 2] =\n", + "[[2. 2.]\n", + " [3. 2.]]" + ] + }, + "execution_count": 266, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "np.random.seed(0)\n", "A = ttb.tensor(np.floor(3 * np.random.rand(2, 2, 3), order=\"F\")) # Generate some data.\n", @@ -863,9 +2203,29 @@ }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 267, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[1. 1.]\n", + " [2. 2.]]\n", + "data[:, :, 1] =\n", + "[[2. 1.]\n", + " [2. 2.]]\n", + "data[:, :, 2] =\n", + "[[1. 2.]\n", + " [2. 2.]]" + ] + }, + "execution_count": 267, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "# Wrap np.maximum in a function with a function signature that Python's inspect.signature can handle.\n", "def max_elements(a, b):\n", @@ -877,9 +2237,29 @@ }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 268, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[1. 1.]\n", + " [1. 1.]]\n", + "data[:, :, 1] =\n", + "[[2. 1.]\n", + " [2. 2.]]\n", + "data[:, :, 2] =\n", + "[[1. 2.]\n", + " [2. 1.]]" + ] + }, + "execution_count": 268, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "np.random.seed(0)\n", "C = ttb.tensor(\n", @@ -899,14 +2279,34 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Use `permute` to reorder the modes of a `tensor`" + "## Use `permute` to reorder the modes of a tensor" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 269, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape (2, 3, 4) with order F\n", + "data[:, :, 0] =\n", + "[[1 3 5]\n", + " [2 4 6]]\n", + "data[:, :, 1] =\n", + "[[ 7 9 11]\n", + " [ 8 10 12]]\n", + "data[:, :, 2] =\n", + "[[13 15 17]\n", + " [14 16 18]]\n", + "data[:, :, 3] =\n", + "[[19 21 23]\n", + " [20 22 24]]\n" + ] + } + ], "source": [ "X = ttb.tensor(np.arange(1, 25), shape=(2, 3, 4))\n", "print(f\"X is a {X}\")" @@ -914,9 +2314,30 @@ }, { "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "execution_count": 270, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (4, 3, 2) with order F\n", + "data[:, :, 0] =\n", + "[[ 1 3 5]\n", + " [ 7 9 11]\n", + " [13 15 17]\n", + " [19 21 23]]\n", + "data[:, :, 1] =\n", + "[[ 2 4 6]\n", + " [ 8 10 12]\n", + " [14 16 18]\n", + " [20 22 24]]" + ] + }, + "execution_count": 270, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "X.permute(np.array((2, 1, 0))) # Reverse the modes." ] @@ -930,9 +2351,22 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 271, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (4,) with order F\n", + "data[:] =\n", + "[1 2 3 4]" + ] + }, + "execution_count": 271, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "X = ttb.tensor(np.arange(1, 5), (4,))\n", "X.permute(\n", @@ -946,13 +2380,13 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Symmetrizing and checking for symmetry in a `tensor`\n", - "A `tensor` can be symmetrized in a collection of modes with the command `symmetrize`. The new, symmetric `tensor` is formed by averaging over all elements in the `tensor` which are required to be equal." + "## Symmetrizing and checking for symmetry in a tensor\n", + "A tensor can be symmetrized in a collection of modes with the command `symmetrize`. The new, symmetric tensor is formed by averaging over all elements in the tensor which are required to be equal." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 272, "metadata": {}, "outputs": [], "source": [ @@ -966,12 +2400,12 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "An optional argument `grps` can also be passed to `symmetrize` which specifies an array of modes with respect to which the `tensor` should be symmetrized." + "An optional argument `grps` can also be passed to `symmetrize` which specifies an array of modes with respect to which the tensor should be symmetrized." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 273, "metadata": {}, "outputs": [], "source": [ @@ -989,18 +2423,40 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 274, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "True" + ] + }, + "execution_count": 274, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "Y.issymmetric()" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 275, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "False" + ] + }, + "execution_count": 275, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "Z.issymmetric(np.array((1, 2)))" ] @@ -1009,29 +2465,82 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Displaying a `tensor`" + "## Displaying a tensor" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 276, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "tensor of shape (3, 3, 2) with order F\n", + "data[:, :, 0] =\n", + "[[0.5488135 0.60276338 0.4236548 ]\n", + " [0.43758721 0.96366276 0.79172504]\n", + " [0.56804456 0.07103606 0.0202184 ]]\n", + "data[:, :, 1] =\n", + "[[0.71518937 0.54488318 0.64589411]\n", + " [0.891773 0.38344152 0.52889492]\n", + " [0.92559664 0.0871293 0.83261985]]\n" + ] + } + ], "source": [ "print(X)" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 277, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (3, 3, 2) with order F\n", + "data[:, :, 0] =\n", + "[[0.5488135 0.60276338 0.4236548 ]\n", + " [0.43758721 0.96366276 0.79172504]\n", + " [0.56804456 0.07103606 0.0202184 ]]\n", + "data[:, :, 1] =\n", + "[[0.71518937 0.54488318 0.64589411]\n", + " [0.891773 0.38344152 0.52889492]\n", + " [0.92559664 0.0871293 0.83261985]]" + ] + }, + "execution_count": 277, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "X # In the python interface" ] } ], - "metadata": {}, + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, "nbformat": 4, "nbformat_minor": 1 } diff --git a/pyttb/tensor.py b/pyttb/tensor.py index db178277..9192a6bc 100644 --- a/pyttb/tensor.py +++ b/pyttb/tensor.py @@ -52,25 +52,16 @@ class tensor: - """ - TENSOR Class for dense tensors. - - Contains the following data members: + """Class for dense tensors. - ``data``: :class:`numpy.ndarray` dense array containing the data elements - of the tensor. + **Members** - Instances of :class:`pyttb.tensor` can be created using `__init__()` or - the following method: + * ``data``: :class:`numpy.ndarray` containing the data elements of the tensor + stored, by default, in Fortran order - * :meth:`from_function` + * ``shape``: :class:`tuple` of integers containing the size of each mode of + the tensor - Examples - -------- - For all examples listed below, the following module imports are assumed: - - >>> import pyttb as ttb - >>> import numpy as np """ __slots__ = ("data", "shape") @@ -81,68 +72,118 @@ def __init__( shape: Optional[Shape] = None, copy: bool = True, ): - """Create a :class:`pyttb.tensor` from a :class:`numpy.ndarray`. - - Note that 1D tensors (i.e., when len(shape)==1) contains a data - array that follow the Numpy convention of being a row vector. + """ + Construct a :class:`pyttb.tensor`. Parameters ---------- - data: - Tensor source data. - shape: - Shape of resulting tensor if not the same as data shape. - copy: - Whether to make a copy of provided data or just reference it. + data : optional + Source data as :class:`numpy.ndarray` (default: empty). + shape : optional + Shape of the tensor as a :class:`tuple` (default: ``data.shape()``). + copy : optional + Whether to copy (versus reference) the data (default: True). Examples -------- - Create an empty :class:`pyttb.tensor`: - - >>> T = ttb.tensor() - >>> print(T) - empty tensor of shape () - data = [] - - Create a :class:`pyttb.tensor` from a :class:`numpy.ndarray`: - - >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) - >>> print(T) - tensor of shape (2, 2) with order F - data[:, :] = - [[1 2] - [3 4]] - """ + For *all* examples in this document, the following module imports are assumed:: + + >>> import pyttb as ttb + >>> import numpy as np + + Create a :class:`pyttb.tensor` from a three-way :class:`numpy.ndarray`:: + + >>> data = np.array([[[1,13],[5,17],[9,21]], + ... [[2,14],[6,18],[10,22]], + ... [[3,15],[7,19],[11,23]], + ... [[4,16],[8,20],[12,24]]]) + >>> T = ttb.tensor(data) + >>> print(T) + tensor of shape (4, 3, 2) with order F + data[:, :, 0] = + [[ 1 5 9] + [ 2 6 10] + [ 3 7 11] + [ 4 8 12]] + data[:, :, 1] = + [[13 17 21] + [14 18 22] + [15 19 23] + [16 20 24]] + + Create a :class:`pyttb.tensor` from a :class:`numpy.ndarray` vector and + reshape it:: + + >>> data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + ... 17, 18, 19, 20, 21, 22, 23, 24]) + >>> T = ttb.tensor(data, shape=(4, 3, 2)) + >>> print(T) + tensor of shape (4, 3, 2) with order F + data[:, :, 0] = + [[ 1 5 9] + [ 2 6 10] + [ 3 7 11] + [ 4 8 12]] + data[:, :, 1] = + [[13 17 21] + [14 18 22] + [15 19 23] + [16 20 24]] + + Create an empty :class:`pyttb.tensor`:: + + >>> T = ttb.tensor() + >>> print(T) + empty tensor of shape () + data = [] + + See Also + -------- + * :doc:`/tutorial/class_tensor` - Getting started with the tensor class + * :meth:`from_function` - Create a tensor from a function + such as :meth:`numpy.ones` + * :meth:`copy` - Make a deep copy of a tensor + * :meth:`pyttb.sptensor.to_tensor` - Convert a sparse tensor to a dense tensor + * :meth:`pyttb.ktensor.to_tensor` - Convert a Kruskal tensor to a dense tensor + * :meth:`pyttb.ttensor.to_tensor` - Convert a Tucker tensor to a dense tensor + * :meth:`pyttb.tenmat.to_tensor` - Convert a tenmat to a dense tensor + """ + # EMPTY / DEFAULT CONSTRUCTOR if data is None: - # EMPTY / DEFAULT CONSTRUCTOR self.data: np.ndarray = np.array([], order=self.order) self.shape: Tuple = () return - # CONVERT A MULTIDIMENSIONAL ARRAY + # Check that data is an numpy number or boolean array if not issubclass(data.dtype.type, np.number) and not issubclass( data.dtype.type, np.bool_ ): - assert False, "First argument must be a multidimensional array." + raise AssertionError("Data (1st argument) must be a numpy ndarray") - # Create or check second argument + # Create or check second argument (can be a variety of things) if shape is None: shape = data.shape + shape = parse_shape(shape) # Make sure the number of elements matches what's been specified if len(shape) == 0: if data.size > 0: - assert False, "Empty tensor cannot contain any elements" + raise AssertionError( + "Shape (2nd argument) has zero length, " + "but data (1st argument) was not empty" + ) elif prod(shape) != data.size: - assert ( - False - ), "TTB:WrongSize, Size of data does not match specified size of tensor" + raise AssertionError( + "Shape (2nd argument) does not match number of " + "elements in data (1st argument)" + ) # Make sure the data is indeed the right shape if data.size > 0 and len(shape) > 0: # reshaping using Fortran ordering to match Matlab conventions + # TODO: Check if there is a reordering of the data that is expense. data = np.reshape(data, np.array(shape), order=self.order) # Create the tensor @@ -151,8 +192,8 @@ def __init__( else: if not self._matches_order(data): logging.warning( - f"Selected no copy, but input data isn't {self.order} ordered " - "so must copy." + "Tensor Constructor: Selected no copy, but input data isn't " + f"{self.order} ordered so must copy" ) self.data = to_memory_order(data, self.order) self.shape = shape @@ -160,7 +201,10 @@ def __init__( @property def order(self) -> Literal["F"]: - """Return the data layout of the underlying storage.""" + """Return the data layout of the underlying storage. + + The data layout is required to be Fortran. + """ return "F" def _matches_order(self, array: np.ndarray) -> bool: @@ -177,16 +221,17 @@ def from_function( function_handle: Callable[[Tuple[int, ...]], np.ndarray], shape: Shape, ) -> tensor: - """Construct a :class:`pyttb.tensor` with data from a function. + """Construct :class:`pyttb.tensor` with data generated by given function. Parameters ---------- function_handle: - A function that can accept a shape (i.e., :class:`tuple` of - dimension sizes) and return a :class:`numpy.ndarray` of that shape. - `numpy.zeros`, `numpy.ones`. + A function that takes a tuple of integers and returns a + :class:`numpy.ndarray`. The array should be in Fortran order to avoid + data being copied. The data will be reshaped to the shape, + so returning a vector of length equal to the product of the shape is fine. shape: - Shape of the resulting tensor. + Shape of the resulting tensor; e.g., a tuple of integers. Returns ------- @@ -194,25 +239,48 @@ def from_function( Examples -------- - Create a :class:`pyttb.tensor` with entries equal to 1: + Create a :class:`pyttb.tensor` with entries drawn from a normal distribution + using :func:`numpy.random.randn`. Observe that we actually generate a vector to + avoid having a C-ordered array (the default if we had provided the shape array) + be rearranged as a F-ordered array:: + + >>> randn = lambda s : np.random.randn(np.prod(s)) + >>> np.random.seed(0) # reproducibility + >>> T = ttb.tensor.from_function(randn, (4, 3, 2)) + >>> print(T) + tensor of shape (4, 3, 2) with order F + data[:, :, 0] = + [[ 1.76405235 1.86755799 -0.10321885] + [ 0.40015721 -0.97727788 0.4105985 ] + [ 0.97873798 0.95008842 0.14404357] + [ 2.2408932 -0.15135721 1.45427351]] + data[:, :, 1] = + [[ 0.76103773 1.49407907 -2.55298982] + [ 0.12167502 -0.20515826 0.6536186 ] + [ 0.44386323 0.3130677 0.8644362 ] + [ 0.33367433 -0.85409574 -0.74216502]] + + Create a :class:`pyttb.tensor` with all entries equal to 1 using + :func:`numpy.ones`. Observe that we specifically specify Fortran order:: + + >>> T = ttb.tensor.from_function(lambda s: np.ones(s,order='F'), (2, 3, 4)) + >>> print(T) + tensor of shape (2, 3, 4) with order F + data[:, :, 0] = + [[1. 1. 1.] + [1. 1. 1.]] + data[:, :, 1] = + [[1. 1. 1.] + [1. 1. 1.]] + data[:, :, 2] = + [[1. 1. 1.] + [1. 1. 1.]] + data[:, :, 3] = + [[1. 1. 1.] + [1. 1. 1.]] + """ + # TODO Create documentation page for collapsing and scaling tensors - >>> fortran_order_ones = lambda shape: np.ones(shape=shape, order="F") - >>> T = ttb.tensor.from_function(fortran_order_ones, (2, 3, 4)) - >>> print(T) - tensor of shape (2, 3, 4) with order F - data[:, :, 0] = - [[1. 1. 1.] - [1. 1. 1.]] - data[:, :, 1] = - [[1. 1. 1.] - [1. 1. 1.]] - data[:, :, 2] = - [[1. 1. 1.] - [1. 1. 1.]] - data[:, :, 3] = - [[1. 1. 1.] - [1. 1. 1.]] - """ # Check size shape = parse_shape(shape) @@ -225,25 +293,37 @@ def from_function( def copy(self) -> tensor: """Make a deep copy of a :class:`pyttb.tensor`. + The standard copy of a tensor creates a shallow copy of the data. + Any changes to the old or new tensor will affect the other. + In contrast, the copy method creates a deep copy of the tensor which + is totally independent of what it was copied from. + Returns ------- - Copy of original tensor. + Deep copy of original tensor. Examples -------- - >>> T1 = ttb.tensor(np.ones((3, 2))) - >>> T2 = T1 - >>> T3 = T2.copy() - >>> T1[0, 0] = 3 - >>> T1[0, 0] == T2[0, 0] - True - >>> T1[0, 0] == T3[0, 0] - False + Observing the difference between a shallow copy and a deep copy. When the + original tensor changes, so does the shallow copy, but the deep copy does not:: + + >>> T = ttb.tensor(np.ones((3, 2))) + >>> T_shallow = T + >>> T_deep = T.copy() + >>> T[0, 0] = 3 + >>> T[0, 0] == T_shallow[0, 0] + True + >>> T[0, 0] == T_deep[0, 0] + False """ return ttb.tensor(self.data, self.shape, copy=True) def __deepcopy__(self, memo): - """Return deep copy of this tensor.""" + """Return deep copy of this tensor. + + This a python construct to support copy operations; + see https://docs.python.org/3/library/copy.html for details. + """ return self.copy() def collapse( @@ -252,32 +332,69 @@ def collapse( fun: Callable[[np.ndarray], Union[float, np.ndarray]] = np.sum, ) -> Union[float, np.ndarray, tensor]: """ - Collapse tensor along specified dimensions. + Collapse tensor along specified dimensions using a function. Parameters ---------- - dims: - Dimensions to collapse. - fun: - Method used to collapse dimensions. + dims: optional + Dimensions to collapse (default: all). + fun: optional + Method used to collapse dimensions (default: :meth:`numpy.sum`). Returns ------- - Collapsed value. + Scalar (if all dimensions collapsed) or tensor. Examples -------- - >>> T = ttb.tensor(np.ones((2, 2))) - >>> T.collapse() - 4.0 - >>> T.collapse(np.array([0])) - tensor of shape (2,) with order F - data[:] = - [2. 2.] - >>> T.collapse(np.arange(T.ndims), sum) - 4.0 - >>> T.collapse(np.arange(T.ndims), np.prod) - 1.0 + Sum all elements of tensor:: + + >>> T = ttb.tensor(np.ones((4,3,2),order='F')) + >>> T.collapse() + 24.0 + + Compute the sum for each mode-0 fiber (output is a tensor):: + + >>> T.collapse(0) + tensor of shape (3, 2) with order F + data[:, :] = + [[4. 4.] + [4. 4.] + [4. 4.]] + + Compute the sum of the entries in each mode-0 slice (output is a tensor):: + + >>> T.collapse([1, 2]) + tensor of shape (4,) with order F + data[:] = + [6. 6. 6. 6.] + + Compute the max entry in each mode-2 slice (output is a tensor):: + + >>> T.collapse([0, 1], np.max) + tensor of shape (2,) with order F + data[:] = + [1. 1.] + + Find the maximum and minimum values in a tensor:: + + >>> randn = lambda s : np.random.randn(np.prod(s)) + >>> np.random.seed(0) # reproducibility + >>> T = ttb.tensor.from_function(randn, (2, 2, 2)) + >>> print(T) + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[1.76405235 0.97873798] + [0.40015721 2.2408932 ]] + data[:, :, 1] = + [[ 1.86755799 0.95008842] + [-0.97727788 -0.15135721]] + >>> max_val = T.collapse(fun=np.max) + >>> min_val = T.collapse(fun=np.min) + >>> print(f"Max value: {max_val}") + Max value: 2.240893199201458 + >>> print(f"Min value: {min_val}") + Min value: -0.977277879876411 """ if self.data.size == 0: return np.array([], order=self.order) @@ -1102,6 +1219,7 @@ def nvecs(self, n: int, r: int, flipsign: bool = True) -> np.ndarray: Xn = self.to_tenmat(rdims=np.array([n])).double() y = Xn @ Xn.T + # TODO (TK) RandSVD would probably be better. if r < y.shape[0] - 1: w, v = scipy.sparse.linalg.eigsh(y, r) v = v[:, (-np.abs(w)).argsort()] @@ -1191,7 +1309,7 @@ def reshape(self, shape: Shape) -> tensor: shape = parse_shape(shape) if prod(self.shape) != prod(shape): assert False, "Reshaping a tensor cannot change number of elements" - + # TODO: This is a copy, but it should be a view return ttb.tensor(np.reshape(self.data, shape, order=self.order), shape) def scale( @@ -2766,8 +2884,9 @@ def tenones(shape: Shape, order: MemoryLayout = "F") -> tensor: ---------- shape: Shape of resulting tensor. - order: - Memory layout for resulting tensor. + order: optional + Memory layout for resulting tensor (default: F). + *Note: C order is not recommended.* Returns ------- @@ -2787,6 +2906,10 @@ def tenones(shape: Shape, order: MemoryLayout = "F") -> tensor: [[1. 1. 1.] [1. 1. 1.] [1. 1. 1.]] + + See Also + -------- + * :meth:`pyttb.tensor.from_function` - Create a tensor from a function. """ def ones(shape: Tuple[int, ...]) -> np.ndarray: @@ -2802,8 +2925,9 @@ def tenzeros(shape: Shape, order: MemoryLayout = "F") -> tensor: ---------- shape: Shape of resulting tensor. - order: - Memory layout for resulting tensor. + order: optional + Memory layout for resulting tensor (default: F). + *Note: C order is not recommended.* Returns ------- @@ -2838,8 +2962,9 @@ def tenrand(shape: Shape, order: MemoryLayout = "F") -> tensor: ---------- shape: Shape of resulting tensor. - order: - Memory layout for resulting tensor. + order: optional + Memory layout for resulting tensor (default: F). + *Note: C order is not recommended.* Returns ------- @@ -2858,8 +2983,7 @@ def tenrand(shape: Shape, order: MemoryLayout = "F") -> tensor: # Typing doesn't play nice with partial # mypy issue: 1484 def unit_uniform(pass_through_shape: Tuple[int, ...]) -> np.ndarray: - data = np.random.uniform(low=0, high=1, size=pass_through_shape) - to_memory_order(data, order) + data = np.random.uniform(low=0, high=1, size=np.prod(pass_through_shape)) return data return tensor.from_function(unit_uniform, shape) diff --git a/tests/test_package.py b/tests/test_package.py index ba6301bc..97f28c0b 100644 --- a/tests/test_package.py +++ b/tests/test_package.py @@ -21,7 +21,7 @@ def test_linting(): root_dir = os.path.dirname(os.path.dirname(__file__)) toml_file = os.path.join(root_dir, "pyproject.toml") subprocess.run( - f"ruff check {root_dir} --config {toml_file}", + f'ruff check "{root_dir}" --config "{toml_file}"', check=True, shell=True, ) @@ -32,7 +32,7 @@ def test_formatting(): root_dir = os.path.dirname(os.path.dirname(__file__)) toml_file = os.path.join(root_dir, "pyproject.toml") subprocess.run( - f"ruff format --check {root_dir} --config {toml_file}", + f'ruff format --check "{root_dir}" --config "{toml_file}"', check=True, shell=True, ) @@ -50,7 +50,18 @@ def test_typing(): root_dir = os.path.dirname(os.path.dirname(__file__)) toml_file = os.path.join(root_dir, "pyproject.toml") subprocess.run( - f"mypy -p pyttb --config-file {toml_file} {skip_untyped}", + f'mypy -p pyttb --config-file "{toml_file}" {skip_untyped}', + check=True, + shell=True, + ) + + +def test_spelling(): + """Confirm spelling is enforced""" + root_dir = os.path.dirname(os.path.dirname(__file__)) + toml_file = os.path.join(root_dir, "pyproject.toml") + subprocess.run( + f'codespell --toml "{toml_file}"', check=True, shell=True, ) diff --git a/tests/test_tensor.py b/tests/test_tensor.py index 20f913a8..6b7c5630 100644 --- a/tests/test_tensor.py +++ b/tests/test_tensor.py @@ -41,19 +41,23 @@ def test_tensor_initialization_from_data(sample_tensor_2way): with pytest.raises(AssertionError) as excinfo: ttb.tensor(params["data"], ()) - assert "Empty tensor cannot contain any elements" in str(excinfo) + assert ( + "Shape (2nd argument) has zero length, but data (1st argument) was not empty" + in str(excinfo) + ) with pytest.raises(AssertionError) as excinfo: ttb.tensor(params["data"], (2, 4)) - assert "TTB:WrongSize, Size of data does not match specified size of tensor" in str( - excinfo + assert ( + "Shape (2nd argument) does not match number of elements in data (1st argument)" + in str(excinfo) ) # TODO how else to break this logical statement? data = np.array([["a", 2, 3], [4, 5, 6]]) with pytest.raises(AssertionError) as excinfo: ttb.tensor(data, (2, 3)) - assert "First argument must be a multidimensional array." in str(excinfo) + assert "Data (1st argument) must be a numpy ndarray" in str(excinfo) # 1D tensors # no shape specified