From 9eba56d091cd6bc81b5c6ccc83478aabf7ed785a Mon Sep 17 00:00:00 2001 From: Nick Johnson <24689722+ntjohnson1@users.noreply.github.com> Date: Sat, 14 Dec 2024 09:08:47 -0500 Subject: [PATCH 01/33] Internal: Try new link checker --- .github/workflows/markdown-check.yml | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/.github/workflows/markdown-check.yml b/.github/workflows/markdown-check.yml index 6e0dcbe3..2014353d 100644 --- a/.github/workflows/markdown-check.yml +++ b/.github/workflows/markdown-check.yml @@ -7,8 +7,14 @@ on: branches: [ "main" ] jobs: - markdown-link-check: + check-links: + name: runner / linkspector runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 - - uses: gaurav-nelson/github-action-markdown-link-check@v1 + - uses: actions/checkout@v4 + - name: Run linkspector + uses: umbrelladocs/action-linkspector@v1 + with: + github_token: ${{ secrets.github_token }} + reporter: github-pr-review + fail_on_error: true From fd1bf9128c8c6630e56a29c3d1d60ebb6c8776d4 Mon Sep 17 00:00:00 2001 From: Nick Johnson <24689722+ntjohnson1@users.noreply.github.com> Date: Sat, 14 Dec 2024 09:40:28 -0500 Subject: [PATCH 02/33] Internal: Add codespell and fix typos. --- .pre-commit-config.yaml | 4 ++++ CHANGELOG.md | 4 ++-- CONTRIBUTING.md | 16 +++++++++++----- README.md | 2 +- docs/source/tutorial/algorithm_cp_als.ipynb | 4 ++-- docs/source/tutorial/algorithm_gcp_opt.ipynb | 2 +- docs/source/tutorial/algorithm_hosvd.ipynb | 2 +- docs/source/tutorial/class_sptensor.ipynb | 2 +- docs/source/tutorial/class_sumtensor.ipynb | 2 +- docs/source/tutorial/class_tenmat.ipynb | 2 +- docs/source/tutorial/class_tensor.ipynb | 4 ++-- docs/source/tutorial/class_ttensor.ipynb | 2 +- profiling/algorithms_profiling.ipynb | 8 ++++---- pyproject.toml | 20 ++++++++++++++++++++ pyttb/cp_als.py | 2 +- pyttb/cp_apr.py | 20 ++++++++++---------- pyttb/gcp/optimizers.py | 2 +- pyttb/hosvd.py | 2 +- pyttb/ktensor.py | 6 +++--- pyttb/pyttb_utils.py | 6 +++--- pyttb/sptenmat.py | 2 +- pyttb/sptensor.py | 8 ++++---- pyttb/tensor.py | 10 +++++----- tests/gcp/test_fg_est.py | 2 +- tests/test_cp_als.py | 2 +- tests/test_cp_apr.py | 2 +- tests/test_ktensor.py | 6 +++--- tests/test_package.py | 11 +++++++++++ tests/test_sptensor.py | 6 +++--- tests/test_tensor.py | 2 +- 30 files changed, 102 insertions(+), 61 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index bdc356f4..a53b97a9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -13,3 +13,7 @@ repos: --extra-keys=metadata.language_info metadata.vscode metadata.kernelspec cell.metadata.vscode, --drop-empty-cells ] + - repo: https://github.com/codespell-project/codespell + rev: v2.3.0 + hooks: + - id: codespell \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 317f9849..12dfcfe2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,7 +3,7 @@ - Aligning comparison operator output for data classes (https://github.com/sandialabs/pyttb/pull/331) - Improved: - Getting starting documentation (https://github.com/sandialabs/pyttb/pull/324) - - Development enviroment (https://github.com/sandialabs/pyttb/pull/329, https://github.com/sandialabs/pyttb/pull/330) + - Development environment (https://github.com/sandialabs/pyttb/pull/329, https://github.com/sandialabs/pyttb/pull/330) - Documentation (https://github.com/sandialabs/pyttb/pull/328, https://github.com/sandialabs/pyttb/pull/334) # v1.8.0 (2024-10-23) @@ -84,7 +84,7 @@ - Addresses ambiguity of -0 by using `exclude_dims` (`numpy.ndarray`) parameter - `ktensor.ttv`, `sptensor.ttv`, `tensor.ttv`, `ttensor.ttv` - Use `exlude_dims` parameter instead of `-dims` - - Explicit nameing of dimensions to exclude + - Explicit naming of dimensions to exclude - `tensor.ttsv` - Use `skip_dim` (`int`) parameter instead of `-dims` - Exclude all dimensions up to and including `skip_dim` diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4aaf4c8e..9250cd63 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -35,12 +35,12 @@ current or filing a new [issue](https://github.com/sandialabs/pyttb/issues). ``` git checkout -b my-new-feature-branch ``` -1. Formatters and linting +1. Formatters and linting (These are checked in the full test suite as well) 1. Run autoformatters and linting from root of project (they will change your code) - ```commandline - ruff check . --fix - ruff format - ``` + ```commandline + ruff check . --fix + ruff format + ``` 1. Ruff's `--fix` won't necessarily address everything and may point out issues that need manual attention 1. [We](./.pre-commit-config.yaml) optionally support [pre-commit hooks](https://pre-commit.com/) for this 1. Alternatively, you can run `pre-commit run --all-files` from the command line if you don't want to install the hooks. @@ -48,6 +48,12 @@ current or filing a new [issue](https://github.com/sandialabs/pyttb/issues). ```commandline mypy pyttb/ ``` + 1. Not included in our pre-commit hooks because of slow runtime. + 1. Check spelling + ```commandline + codespell + ``` + 1. This is also included in the optional pre-commit hooks. 1. Run tests (at desired fidelity) 1. Just doctests (enabled by default) diff --git a/README.md b/README.md index 3382d6f1..9551b025 100644 --- a/README.md +++ b/README.md @@ -32,7 +32,7 @@ low-rank tensor decompositions: [`cp_apr`](https://pyttb.readthedocs.io/en/stable/cpapr.html "CP decomposition via Alternating Poisson Regression"), [`gcp_opt`](https://pyttb.readthedocs.io/en/stable/gcpopt.html "Generalized CP decomposition"), [`hosvd`](https://pyttb.readthedocs.io/en/stable/hosvd.html "Tucker decomposition via Higher Order Singular Value Decomposition"), -[`tucker_als`](https://pyttb.readthedocs.io/en/stable/tuckerals.html "Tucker decompostion via Alternating Least Squares") +[`tucker_als`](https://pyttb.readthedocs.io/en/stable/tuckerals.html "Tucker decomposition via Alternating Least Squares") ## Quick Start diff --git a/docs/source/tutorial/algorithm_cp_als.ipynb b/docs/source/tutorial/algorithm_cp_als.ipynb index 4ccc5975..a74c9436 100644 --- a/docs/source/tutorial/algorithm_cp_als.ipynb +++ b/docs/source/tutorial/algorithm_cp_als.ipynb @@ -122,7 +122,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Increase the maximium number of iterations\n", + "## Increase the maximum number of iterations\n", "Note that the previous run kicked out at only 10 iterations, before reaching the specified convegence tolerance. Let's increase the maximum number of iterations and try again, using the same initial guess." ] }, @@ -337,7 +337,7 @@ "source": [ "## Recommendations\n", "* Run multiple times with different guesses and select the solution with the best fit.\n", - "* Try different ranks and choose the solution that is the best descriptor for your data based on the combination of the fit and the interpretaton of the factors, e.g., by visualizing the results." + "* Try different ranks and choose the solution that is the best descriptor for your data based on the combination of the fit and the interpretation of the factors, e.g., by visualizing the results." ] } ], diff --git a/docs/source/tutorial/algorithm_gcp_opt.ipynb b/docs/source/tutorial/algorithm_gcp_opt.ipynb index ee5b5d04..78488d16 100644 --- a/docs/source/tutorial/algorithm_gcp_opt.ipynb +++ b/docs/source/tutorial/algorithm_gcp_opt.ipynb @@ -19,7 +19,7 @@ "tags": [] }, "source": [ - "This document outlines usage and examples for the generalized CP (GCP) tensor decomposition implmented in `pyttb.gcp_opt`. GCP allows alternate objective functions besides sum of squared errors, which is the standard for CP. The code support both dense and sparse input tensors, but the sparse input tensors require randomized optimization methods.\n", + "This document outlines usage and examples for the generalized CP (GCP) tensor decomposition implemented in `pyttb.gcp_opt`. GCP allows alternate objective functions besides sum of squared errors, which is the standard for CP. The code support both dense and sparse input tensors, but the sparse input tensors require randomized optimization methods.\n", "\n", "GCP is described in greater detail in the manuscripts:\n", "* D. Hong, T. G. Kolda, J. A. Duersch, Generalized Canonical Polyadic Tensor Decomposition, SIAM Review, 62:133-163, 2020, https://doi.org/10.1137/18M1203626\n", diff --git a/docs/source/tutorial/algorithm_hosvd.ipynb b/docs/source/tutorial/algorithm_hosvd.ipynb index c17c790d..e8973c0d 100644 --- a/docs/source/tutorial/algorithm_hosvd.ipynb +++ b/docs/source/tutorial/algorithm_hosvd.ipynb @@ -94,7 +94,7 @@ "metadata": {}, "source": [ "## Generate a core with different accuracies for different shapes\n", - "We will create a core `tensor` that has is nearly block diagonal. The blocks are expontentially decreasing in norm, with the idea that we can pick off one block at a time as we increate the prescribed accuracy of the HOSVD. To do this, we define and use a function `tenrandblk()`." + "We will create a core `tensor` that has is nearly block diagonal. The blocks are expontentially decreasing in norm, with the idea that we can pick off one block at a time as we increase the prescribed accuracy of the HOSVD. To do this, we define and use a function `tenrandblk()`." ] }, { diff --git a/docs/source/tutorial/class_sptensor.ipynb b/docs/source/tutorial/class_sptensor.ipynb index bd2b072a..75c10ce6 100644 --- a/docs/source/tutorial/class_sptensor.ipynb +++ b/docs/source/tutorial/class_sptensor.ipynb @@ -17,7 +17,7 @@ "metadata": {}, "source": [ "## Creating a `sptensor`\n", - "The `sptensor` class stores the data in coordinate format. A sparse `sptensor` can be created by passing in a list of subscripts and values. For example, here we pass in three subscripts and a scalar value. The resuling sparse `sptensor` has three nonzero entries, and the `shape` is the size of the largest subscript in each dimension." + "The `sptensor` class stores the data in coordinate format. A sparse `sptensor` can be created by passing in a list of subscripts and values. For example, here we pass in three subscripts and a scalar value. The resulting sparse `sptensor` has three nonzero entries, and the `shape` is the size of the largest subscript in each dimension." ] }, { diff --git a/docs/source/tutorial/class_sumtensor.ipynb b/docs/source/tutorial/class_sumtensor.ipynb index 8230044f..4ca273ae 100644 --- a/docs/source/tutorial/class_sumtensor.ipynb +++ b/docs/source/tutorial/class_sumtensor.ipynb @@ -54,7 +54,7 @@ "metadata": {}, "source": [ "## Creating sumtensors\n", - "A sumtensor `T` can only be delared as a sum of same-shaped tensors T1, T2,...,TN. The summand tensors are stored internally, which define the \"parts\" of the `sumtensor`. The parts of a `sumtensor` can be (dense) tensors (`tensor`), sparse tensors (` sptensor`), Kruskal tensors (`ktensor`), or Tucker tensors (`ttensor`). An example of the use of the sumtensor constructor follows." + "A sumtensor `T` can only be declared as a sum of same-shaped tensors T1, T2,...,TN. The summand tensors are stored internally, which define the \"parts\" of the `sumtensor`. The parts of a `sumtensor` can be (dense) tensors (`tensor`), sparse tensors (` sptensor`), Kruskal tensors (`ktensor`), or Tucker tensors (`ttensor`). An example of the use of the sumtensor constructor follows." ] }, { diff --git a/docs/source/tutorial/class_tenmat.ipynb b/docs/source/tutorial/class_tenmat.ipynb index 034c0ce9..4b5e9b22 100644 --- a/docs/source/tutorial/class_tenmat.ipynb +++ b/docs/source/tutorial/class_tenmat.ipynb @@ -16,7 +16,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We show how to convert a `tensor` to a 2D numpy array stored with extra information so that it can be converted back to a `tensor`. Converting to a 2D numpy array requies an ordered mapping of the `tensor` indices to the rows and the columns of the 2D numpy array." + "We show how to convert a `tensor` to a 2D numpy array stored with extra information so that it can be converted back to a `tensor`. Converting to a 2D numpy array requires an ordered mapping of the `tensor` indices to the rows and the columns of the 2D numpy array." ] }, { diff --git a/docs/source/tutorial/class_tensor.ipynb b/docs/source/tutorial/class_tensor.ipynb index bc1b1fb6..b8db6cdf 100644 --- a/docs/source/tutorial/class_tensor.ipynb +++ b/docs/source/tutorial/class_tensor.ipynb @@ -107,7 +107,7 @@ "metadata": {}, "source": [ "## Specifying trailing singleton dimensions in a `tensor`\n", - "Likewise, trailing singleton dimensions must be explictly specified." + "Likewise, trailing singleton dimensions must be explicitly specified." ] }, { @@ -136,7 +136,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## The constitutent parts of a `tensor`" + "## The constituent parts of a `tensor`" ] }, { diff --git a/docs/source/tutorial/class_ttensor.ipynb b/docs/source/tutorial/class_ttensor.ipynb index 0b00f4db..86c25055 100644 --- a/docs/source/tutorial/class_ttensor.ipynb +++ b/docs/source/tutorial/class_ttensor.ipynb @@ -630,7 +630,7 @@ "metadata": {}, "source": [ "### Compare visualizations\n", - "We can compare the results of reconstruction. There is no degredation in doing only a partial reconstruction. Downsampling is obviously lower resolution, but the same result as first doing the full reconstruction and then downsampling." + "We can compare the results of reconstruction. There is no degradation in doing only a partial reconstruction. Downsampling is obviously lower resolution, but the same result as first doing the full reconstruction and then downsampling." ] }, { diff --git a/profiling/algorithms_profiling.ipynb b/profiling/algorithms_profiling.ipynb index 18e34179..9e955727 100644 --- a/profiling/algorithms_profiling.ipynb +++ b/profiling/algorithms_profiling.ipynb @@ -90,7 +90,7 @@ " label:\n", " The user-supplied label to distinguish a test run.\n", " params:\n", - " Paramters passed to the algorithm function.\n", + " Parameters passed to the algorithm function.\n", " 'rank' may be given to the CP algorithms; 'tol' and 'verbosity' to hosvd.\n", " \"\"\"\n", "\n", @@ -108,7 +108,7 @@ " # stop collecting data, and send data to Stats object and sort\n", " profiler.disable()\n", "\n", - " # save profiling ouput to sub-directory specific to the function being tested.\n", + " # save profiling output to sub-directory specific to the function being tested.\n", " output_directory = f\"./pstats_files/{algorithm_name}\"\n", " if not os.path.exists(output_directory):\n", " os.makedirs(output_directory) # create directory if it doesn't exist\n", @@ -155,7 +155,7 @@ " label:\n", " The user-supplied label to distinguish a test run. This will be used in the output file name.\n", " params:\n", - " Paramters passed to the algorithm function.\n", + " Parameters passed to the algorithm function.\n", " 'rank' may be given to the CP algorithms; 'tol' and 'verbosity' to hosvd.\n", " \"\"\"\n", "\n", @@ -410,7 +410,7 @@ "source": [ "### Generating all algorithms' profiling images\n", " \n", - "The cell bellow will generate all profiling images for all algorithms in `./gprof2dot_images/`" + "The cell below will generate all profiling images for all algorithms in `./gprof2dot_images/`" ] }, { diff --git a/pyproject.toml b/pyproject.toml index bf5eb0b5..2d0f1c55 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -40,6 +40,7 @@ dev = [ # Also in pre-commit "ruff>=0.7,<0.8", "pre-commit>=4.0,<5.0", + "codespell>=2.3.0,<2.4.0" ] doc = [ "sphinx >= 4.0", @@ -120,3 +121,22 @@ addopts = "--doctest-modules pyttb" filterwarnings = [ "ignore:.*deprecated.*:" ] + +[tool.codespell] +skip = [ + # Built documentation + "./docs/build", + "./docs/jupyter_execute", + # Project build artifacts + "./build" +] +count = true +ignore-words-list = [ + # Conventions carried from MATLAB ttb (consider changing) + "ans", + "siz", + # Tensor/repo Nomenclature + "COO", + "nd", + "als", +] \ No newline at end of file diff --git a/pyttb/cp_als.py b/pyttb/cp_als.py index 68842228..94616547 100644 --- a/pyttb/cp_als.py +++ b/pyttb/cp_als.py @@ -76,7 +76,7 @@ def cp_als( # noqa: PLR0912,PLR0913,PLR0915 Example ------- - Random initialization causes slight pertubation in intermediate results. + Random initialization causes slight perturbation in intermediate results. `...` is our place holder for these numeric values. Example using default values ("random" initialization): diff --git a/pyttb/cp_apr.py b/pyttb/cp_apr.py index 6ae00b62..d87454ee 100644 --- a/pyttb/cp_apr.py +++ b/pyttb/cp_apr.py @@ -104,7 +104,7 @@ def cp_apr( # noqa: PLR0913 assert init.ndims == N, "Initial guess does not have the right number of modes" assert ( init.ncomponents == rank - ), "Initial guess does not have the right number of componenets" + ), "Initial guess does not have the right number of components" for n in range(N): if init.shape[n] != input_tensor.shape[n]: assert False, f"Mode {n} of the initial guess is the wrong size" @@ -256,7 +256,7 @@ def tt_cp_apr_mu( # noqa: PLR0912,PLR0913,PLR0915 M.normalize(normtype=1) Phi = [] # np.zeros((N,))#cell(N,1) for n in range(N): - # TODO prepopulation Phi instead of appen should be faster + # TODO prepopulation Phi instead of append should be faster Phi.append(np.zeros(M.factor_matrices[n].shape)) kktModeViolations = np.zeros((N,)) @@ -488,7 +488,7 @@ def tt_cp_apr_pdnr( # noqa: PLR0912,PLR0913,PLR0915 if isinstance(input_tensor, ttb.sptensor) and isSparse and precompinds: # Precompute sparse index sets for all the row subproblems. - # Takes more memory but can cut exectuion time significantly in some cases. + # Takes more memory but can cut execution time significantly in some cases. if printitn > 0: print("\tPrecomuting sparse index sets...") sparseIx = [] @@ -847,7 +847,7 @@ def tt_cp_apr_pqnr( # noqa: PLR0912,PLR0913,PLR0915 if isinstance(input_tensor, ttb.sptensor) and precompinds: # Precompute sparse index sets for all the row subproblems. - # Takes more memory but can cut exectuion time significantly in some cases. + # Takes more memory but can cut execution time significantly in some cases. if printitn > 0: print("\tPrecomuting sparse index sets...") sparseIx = [] @@ -989,12 +989,12 @@ def tt_cp_apr_pqnr( # noqa: PLR0912,PLR0913,PLR0915 delg[:, lbfgsPos] = tmp_delg rho[lbfgsPos] = tmp_rho else: - # Rho is required to be postive; if not, then skip the L-BFGS + # Rho is required to be positive; if not, then skip the L-BFGS # update pair. The recommended safeguard for full BFGS is # Powell damping, but not clear how to damp in 2-loop L-BFGS if dispLineWarn: warnings.warn( - "WARNING: skipping L-BFGS update, rho whould be " + "WARNING: skipping L-BFGS update, rho would be " f"1 / {tmp_delm * tmp_delg}" ) # Roll back lbfgsPos since it will increment later. @@ -1384,7 +1384,7 @@ def tt_linesearch_prowsubprob( # noqa: PLR0913 max_steps: maximum number of steps to try (suggest 10) suff_decr: - sufficent decrease for convergence (suggest 1.0e-4) + sufficient decrease for convergence (suggest 1.0e-4) isSparse: sparsity flag for computing the objective data_row: @@ -1414,7 +1414,7 @@ def tt_linesearch_prowsubprob( # noqa: PLR0913 stepSize = step_len - # Evalute the current objective value + # Evaluate the current objective value f_old = -tt_loglikelihood_row(isSparse, data_row, model_old, Pi) num_evals = 1 count = 1 @@ -1613,7 +1613,7 @@ def get_search_dir_pqnr( # noqa: PLR0913 lbfgsSize = delta_model.shape[1] # Determine active and free variables. - # TODO: is the bellow relevant? + # TODO: is the below relevant? # If epsActSet is zero, then the following works: # fixedVars = find((m_row == 0) & (grad' > 0)); # For the general case this works but is less clear and assumes m_row > 0: @@ -1747,7 +1747,7 @@ def calculate_phi( # noqa: PLR0913 Pi: np.ndarray, epsilon: float, ) -> np.ndarray: - """Calcualte Phi. + """Calculate Phi. Parameters ---------- diff --git a/pyttb/gcp/optimizers.py b/pyttb/gcp/optimizers.py index 300a7fcf..d41e1989 100644 --- a/pyttb/gcp/optimizers.py +++ b/pyttb/gcp/optimizers.py @@ -512,7 +512,7 @@ def lbfgsb_func_grad(vector: np.ndarray): lbfgsb_info["final_f"] = final_f lbfgsb_info["callback"] = vars(monitor) - # Unregister monitor in case of re-use + # Unregister monitor in case of reuse self._solver_kwargs["callback"] = monitor.callback # TODO big print output diff --git a/pyttb/hosvd.py b/pyttb/hosvd.py index e999c95a..47807f58 100644 --- a/pyttb/hosvd.py +++ b/pyttb/hosvd.py @@ -116,7 +116,7 @@ def hosvd( # noqa: PLR0912,PLR0913,PLR0915 ranks[k] = np.where(eigsum > eigsumthresh)[0][-1] if verbosity > 5: - print("Reverse cummulative sum of evals of Gram matrix:") + print("Reverse cumulative sum of evals of Gram matrix:") for i, a_sum in enumerate(eigsum): print_msg = f"{i: d}: {a_sum: 6.4f}" if i == ranks[k]: diff --git a/pyttb/ktensor.py b/pyttb/ktensor.py index 5c5f5571..51aa0d57 100644 --- a/pyttb/ktensor.py +++ b/pyttb/ktensor.py @@ -945,7 +945,7 @@ def to_tenmat( Mapping of column indices. cdims_cyclic: When only rdims is specified maps a single rdim to the rows and - the remaining dimensons span the columns. _fc_ (forward cyclic) + the remaining dimensions span the columns. _fc_ (forward cyclic) in the order range(rdims,self.ndims()) followed by range(0, rdims). _bc_ (backward cyclic) range(rdims-1, -1, -1) then range(self.ndims(), rdims, -1). @@ -1378,7 +1378,7 @@ def normalize( if sort: if self.ncomponents > 1: - # indices of srting in descending order + # indices of string in descending order p = np.argsort(self.weights)[::-1] self.arrange(permutation=p) @@ -2300,7 +2300,7 @@ def vis( # noqa: PLR0912, PLR0913 >>> fig, axs = K.vis(show_figure=False) # doctest: +ELLIPSIS >>> plt.close(fig) - Define a more realistic plot fuctions with x labels, + Define a more realistic plot functions with x labels, control relative widths of each plot, and set mode titles. diff --git a/pyttb/pyttb_utils.py b/pyttb/pyttb_utils.py index 2718dd93..b7fe0109 100644 --- a/pyttb/pyttb_utils.py +++ b/pyttb/pyttb_utils.py @@ -190,7 +190,7 @@ def tt_dimscheck( # noqa: PLR0912 # Check sizes to determine how to index multiplicands if P == M: # Case 1: Number of items in dims and number of multiplicands are equal; - # therfore, index in order of sdims + # therefore, index in order of sdims vidx = sidx else: # Case 2: Number of multiplicands is equal to the number of dimensions of @@ -545,7 +545,7 @@ def tt_sizecheck(shape: Tuple[int, ...], nargout: bool = True) -> bool: TT_SIZECHECK Checks that the shape is valid. TT_SIZECHECK(S) throws an error if S is not a valid shape tuple, - which means that it is a row vector with strictly postitive, + which means that it is a row vector with strictly positive, real-valued, finite integer values. Parameters @@ -820,7 +820,7 @@ def gather_wrap_dims( Mapping of column indices. cdims_cyclic: When only rdims is specified maps a single rdim to the rows and - the remaining dimensons span the columns. _fc_ (forward cyclic[1]_) + the remaining dimensions span the columns. _fc_ (forward cyclic[1]_) in the order range(rdims,self.ndims()) followed by range(0, rdims). _bc_ (backward cyclic[2]_) range(rdims-1, -1, -1) then range(self.ndims(), rdims, -1). diff --git a/pyttb/sptenmat.py b/pyttb/sptenmat.py index 7b374c62..3a3c13d5 100644 --- a/pyttb/sptenmat.py +++ b/pyttb/sptenmat.py @@ -253,7 +253,7 @@ def __deepcopy__(self, memo): return self.copy() def to_sptensor(self) -> ttb.sptensor: - """Contruct a :class:`pyttb.sptensor` from `:class:pyttb.sptenmat`. + """Construct a :class:`pyttb.sptensor` from `:class:pyttb.sptenmat`. Examples -------- diff --git a/pyttb/sptensor.py b/pyttb/sptensor.py index 509f2795..26a7fd5a 100644 --- a/pyttb/sptensor.py +++ b/pyttb/sptensor.py @@ -738,7 +738,7 @@ def to_sptenmat( Mapping of column indices. cdims_cyclic: When only rdims is specified maps a single rdim to the rows and - the remaining dimensons span the columns. _fc_ (forward cyclic[1]_) + the remaining dimensions span the columns. _fc_ (forward cyclic[1]_) in the order range(rdims,self.ndims()) followed by range(0, rdims). _bc_ (backward cyclic[2]_) range(rdims-1, -1, -1) then range(self.ndims(), rdims, -1). @@ -2029,7 +2029,7 @@ def __getitem__(self, item): # noqa: PLR0912, PLR0915 linear subscripts, returns a vector of `p` values. Any ambiguity results in executing the first valid case. This - is particularily an issue if `self.ndims == 1`. + is particularly an issue if `self.ndims == 1`. Examples -------- @@ -2368,7 +2368,7 @@ def _set_subscripts(self, key, value): # noqa: PLR0912 # Process Group A: Changing values if np.sum(idxa) > 0: self.vals[tf[idxa]] = newvals[idxa] - # Proces Group B: Removing Values + # Process Group B: Removing Values if np.sum(idxb) > 0: removesubs = loc[idxb] keepsubs = np.setdiff1d(range(0, self.nnz), removesubs) @@ -3119,7 +3119,7 @@ def __le__(self, other): Parameters ---------- other: - Oject to compare with. + Object to compare with. Examples -------- diff --git a/pyttb/tensor.py b/pyttb/tensor.py index 465522d9..f16994b3 100644 --- a/pyttb/tensor.py +++ b/pyttb/tensor.py @@ -456,7 +456,7 @@ def find(self) -> Tuple[np.ndarray, np.ndarray]: return subs, vals def to_sptensor(self) -> ttb.sptensor: - """Contruct a :class:`pyttb.sptensor` from `:class:pyttb.tensor`. + """Construct a :class:`pyttb.sptensor` from `:class:pyttb.tensor`. Returns ------- @@ -509,7 +509,7 @@ def to_tenmat( Mapping of column indices. cdims_cyclic: When only rdims is specified maps a single rdim to the rows and - the remaining dimensons span the columns. _fc_ (forward cyclic) + the remaining dimensions span the columns. _fc_ (forward cyclic) in the order range(rdims,self.ndims()) followed by range(0, rdims). _bc_ (backward cyclic) range(rdims-1, -1, -1) then range(self.ndims(), rdims, -1). @@ -1384,7 +1384,7 @@ def symmetrize( # noqa: PLR0912,PLR0915 combos.append(np.array(list(permutations(grps[i, :])))) combos = np.stack(combos) - # Create all the permuations to be averaged + # Create all the permutations to be averaged combo_lengths = [len(perm) for perm in combos] total_perms = prod(combo_lengths) sym_perms = np.tile(np.arange(0, n), [total_perms, 1]) @@ -2023,7 +2023,7 @@ def _set_subtensor(self, key, value): # noqa: PLR0912 not isinstance(entry, (float, int, np.generic)) for entry in element ): raise ValueError( - f"Entries for setitem must be numeric but recieved, {element}" + f"Entries for setitem must be numeric but received, {element}" ) sliceCheck.append(max(element)) else: @@ -2838,7 +2838,7 @@ def tendiag( ) -> tensor: """Create a tensor with elements along super diagonal. - If provided shape is too small the tensor will be enlarged to accomodate. + If provided shape is too small the tensor will be enlarged to accommodate. Parameters ---------- diff --git a/tests/gcp/test_fg_est.py b/tests/gcp/test_fg_est.py index 04f900f9..9699adc1 100644 --- a/tests/gcp/test_fg_est.py +++ b/tests/gcp/test_fg_est.py @@ -27,7 +27,7 @@ def test_estimate_helper(): all_indices = np.array(all_indices) values, _ = estimate_helper(factor_matrices, np.array(all_indices)) np.testing.assert_array_equal(full[all_indices], values) - # TODO should probably test Zexploded but thats a pain + # TODO should probably test Zexploded but that's a pain values, Z = estimate_helper(factor_matrices, np.array([])) assert values.size == 0 diff --git a/tests/test_cp_als.py b/tests/test_cp_als.py index 49d0cb2a..79c0a513 100644 --- a/tests/test_cp_als.py +++ b/tests/test_cp_als.py @@ -82,7 +82,7 @@ def test_cp_als_incorrect_init(capsys, sample_tensor): (M, Minit, output) = ttb.cp_als(T, 2, init="init") assert "The selected initialization method is not supported" in str(excinfo) - # incorrect size of intial ktensor + # incorrect size of initial ktensor Tshape_incorrect = list(T.shape) Tshape_incorrect[0] = Tshape_incorrect[0] + 1 Tshape_incorrect = tuple(Tshape_incorrect) diff --git a/tests/test_cp_apr.py b/tests/test_cp_apr.py index 1197d001..c6d88bf7 100644 --- a/tests/test_cp_apr.py +++ b/tests/test_cp_apr.py @@ -192,7 +192,7 @@ def test_cpapr_mu(capsys, sample_tensor1, default_init_ktensor): capsys.readouterr() assert np.isclose(M.full().data, ktensorSolnInstance.full().data).all() - # Assert given an inital guess of the final answer yields immediate convergence + # Assert given an initial guess of the final answer yields immediate convergence M, _, output = ttb.cp_apr( input_tensor=tensorInstance, rank=2, init=ktensorSolnInstance ) diff --git a/tests/test_ktensor.py b/tests/test_ktensor.py index a31f30a8..0dba3efb 100644 --- a/tests/test_ktensor.py +++ b/tests/test_ktensor.py @@ -212,7 +212,7 @@ def test_ktensor_arrange(sample_ktensor_2way): assert np.linalg.norm(K1.factor_matrices[0] - fm0) < 1e-8 assert np.linalg.norm(K1.factor_matrices[1] - fm1) < 1e-8 - # error, cannot shoft weight and permute simultaneously + # error, cannot shift weight and permute simultaneously with pytest.raises(AssertionError) as excinfo: K1.arrange(weight_factor=0, permutation=p) assert ( @@ -459,7 +459,7 @@ def test_ktensor_issymetric(sample_ktensor_2way, sample_ktensor_symmetric): assert np.array_equal(diffs, np.array([[0.0, 8.0], [0.0, 0]])) # should be symmetric - (datas, K1) = sample_ktensor_symmetric + _, K1 = sample_ktensor_symmetric assert K1.issymmetric() issym1, diffs1 = K1.issymmetric(return_diffs=True) assert np.array_equal(diffs1, np.array([[0.0, 0.0], [0.0, 0]])) @@ -849,7 +849,7 @@ def test_ktensor_score(): score, Aperm, flag, best_perm = A.score(B) assert "Size mismatch" in str(excinfo) - # invalid: number of compnents of first ktensor must be greater than or + # invalid: number of components of first ktensor must be greater than or # equal to number of components of second ktensor with pytest.raises(AssertionError) as excinfo: B = ttb.ktensor( diff --git a/tests/test_package.py b/tests/test_package.py index bde6b5c8..ba6301bc 100644 --- a/tests/test_package.py +++ b/tests/test_package.py @@ -54,3 +54,14 @@ def test_typing(): check=True, shell=True, ) + + +def test_spelling(): + """Confirm spelling is enforced""" + root_dir = os.path.dirname(os.path.dirname(__file__)) + toml_file = os.path.join(root_dir, "pyproject.toml") + subprocess.run( + f"codespell --toml {toml_file}", + check=True, + shell=True, + ) diff --git a/tests/test_sptensor.py b/tests/test_sptensor.py index 2417eb0a..4b7054d6 100644 --- a/tests/test_sptensor.py +++ b/tests/test_sptensor.py @@ -89,7 +89,7 @@ def function_handle(*args): assert sptensorInstance.shape == shape assert len(sptensorInstance.subs) == nz - # NZ as a propotion in [0,1) + # NZ as a proportion in [0,1) nz = 0.09375 sptensorInstance = ttb.sptensor.from_function(function_handle, shape, nz) assert np.array_equal(sptensorInstance.vals, function_handle()) @@ -487,7 +487,7 @@ def test_subtensor_growth(self, sample_sptensor): # Set empty tensor with sptensor via ambiguous slice emptyTensor = ttb.sptensor() - # TODO revist this after setitem cleanup. Probably won't support arbitrary slice on empty tensor + # TODO revisit this after setitem cleanup. Probably won't support arbitrary slice on empty tensor emptyTensor[:, :, :] = sptensorInstance assert emptyTensor.isequal(sptensorInstance) @@ -1165,7 +1165,7 @@ def test_sptensor__gt__(sample_sptensor): # Test comparison to tensor assert (sptensorInstance > sptensorInstance.full()).vals.size == 0 - # Test comparison to tensor of different sparsity patter + # Test comparison to tensor of different sparsity pattern denseTensor = sptensorInstance.full() denseTensor[1, 1, 2] = -1 assert np.array_equal( diff --git a/tests/test_tensor.py b/tests/test_tensor.py index 04e22b51..a7ab0abf 100644 --- a/tests/test_tensor.py +++ b/tests/test_tensor.py @@ -367,7 +367,7 @@ def test_subtensor(self, sample_tensor_2way): ttb.tensor(three_way_data)[two_slices].double(), three_way_data[two_slices], ) - # Combining slice with (multi-)integer indicies + # Combining slice with (multi-)integer indices assert np.array_equal( tensorInstance[np.array([0, 1]), :].data, tensorInstance.data[[0, 1], :] ) From 87ea5f5562befcc271931c575aea6a1f6e3e3dd4 Mon Sep 17 00:00:00 2001 From: Nick Johnson <24689722+ntjohnson1@users.noreply.github.com> Date: Sat, 14 Dec 2024 09:53:52 -0500 Subject: [PATCH 03/33] Internal: See if codespell precommit finds config. --- .pre-commit-config.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a53b97a9..f6dec37b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,4 +16,5 @@ repos: - repo: https://github.com/codespell-project/codespell rev: v2.3.0 hooks: - - id: codespell \ No newline at end of file + - id: codespell + args: [ --toml, "pyproject.toml"] \ No newline at end of file From 244069ed7b718ae5506cdada32809616352e1b37 Mon Sep 17 00:00:00 2001 From: Nick Johnson <24689722+ntjohnson1@users.noreply.github.com> Date: Sat, 14 Dec 2024 09:55:19 -0500 Subject: [PATCH 04/33] Internal: Found config. Now enable reading it --- .pre-commit-config.yaml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f6dec37b..32bbcf63 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -17,4 +17,6 @@ repos: rev: v2.3.0 hooks: - id: codespell - args: [ --toml, "pyproject.toml"] \ No newline at end of file + args: [ --toml, "pyproject.toml"] + additional_dependencies: + - tomli \ No newline at end of file From 81ed5fede4bb7be850b9bb8f0ba4f07d3cedcd1d Mon Sep 17 00:00:00 2001 From: Nick Johnson <24689722+ntjohnson1@users.noreply.github.com> Date: Sat, 14 Dec 2024 12:00:09 -0500 Subject: [PATCH 05/33] MATLAB: Add initial support for more matlab support. --- pyttb/__init__.py | 2 + pyttb/matlab/__init__.py | 1 + pyttb/matlab/matlab_support.py | 38 ++++++++++++++++ pyttb/matlab/matlab_utilities.py | 68 +++++++++++++++++++++++++++++ pyttb/tensor.py | 19 ++++++++ tests/matlab/test_matlab_support.py | 44 +++++++++++++++++++ 6 files changed, 172 insertions(+) create mode 100644 pyttb/matlab/__init__.py create mode 100644 pyttb/matlab/matlab_support.py create mode 100644 pyttb/matlab/matlab_utilities.py create mode 100644 tests/matlab/test_matlab_support.py diff --git a/pyttb/__init__.py b/pyttb/__init__.py index 6df27a82..0e8c2afd 100644 --- a/pyttb/__init__.py +++ b/pyttb/__init__.py @@ -19,6 +19,7 @@ from pyttb.import_data import import_data from pyttb.khatrirao import khatrirao from pyttb.ktensor import ktensor +from pyttb.matlab import matlab_support from pyttb.sptenmat import sptenmat from pyttb.sptensor import sptendiag, sptenrand, sptensor from pyttb.sptensor3 import sptensor3 @@ -51,6 +52,7 @@ def ignore_warnings(ignore=True): import_data.__name__, khatrirao.__name__, ktensor.__name__, + matlab_support.__name__, sptenmat.__name__, sptendiag.__name__, sptenrand.__name__, diff --git a/pyttb/matlab/__init__.py b/pyttb/matlab/__init__.py new file mode 100644 index 00000000..245159d9 --- /dev/null +++ b/pyttb/matlab/__init__.py @@ -0,0 +1 @@ +"""Partial support of MATLAB users in PYTTB.""" diff --git a/pyttb/matlab/matlab_support.py b/pyttb/matlab/matlab_support.py new file mode 100644 index 00000000..8118a043 --- /dev/null +++ b/pyttb/matlab/matlab_support.py @@ -0,0 +1,38 @@ +"""A limited number of utilities to support users coming from MATLAB.""" + +# Copyright 2024 National Technology & Engineering Solutions of Sandia, +# LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the +# U.S. Government retains certain rights in this software. + +from typing import Optional, Union + +import numpy as np + +from pyttb.tensor import tensor + +from .matlab_utilities import _matlab_array_str + +PRINT_CLASSES = Union[tensor, np.ndarray] + + +def matlab_print( + data: Union[tensor, np.ndarray], + format: Optional[str] = None, + name: Optional[str] = None, +): + """Print data in a format more similar to MATLAB. + + Arguments + --------- + data: Object to print + format: Numerical formatting + """ + if not isinstance(data, (tensor, np.ndarray)): + raise ValueError( + f"matlab_print only supports inputs of type {PRINT_CLASSES} but got" + f" {type(data)}." + ) + if isinstance(data, np.ndarray): + print(_matlab_array_str(data, format, name)) + return + print(data._matlab_str(format, name)) diff --git a/pyttb/matlab/matlab_utilities.py b/pyttb/matlab/matlab_utilities.py new file mode 100644 index 00000000..eb163594 --- /dev/null +++ b/pyttb/matlab/matlab_utilities.py @@ -0,0 +1,68 @@ +"""Internal tools to aid in building MATLAB support. + +Tensor classes can use these common tools, where matlab_support uses tensors. +matlab_support can depend on this, but tensors and this shouldn't depend on it. +Probably best for everything here to be private functions. +""" + +# Copyright 2024 National Technology & Engineering Solutions of Sandia, +# LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the +# U.S. Government retains certain rights in this software. + +import textwrap +from typing import Optional, Tuple, Union + +import numpy as np + + +def _matlab_array_str( + array: np.ndarray, + format: Optional[str] = None, + name: Optional[str] = None, + skip_name: bool = False, +) -> str: + """Convert numpy array to string more similar to MATLAB.""" + if name is None: + name = type(array).__name__ + header_str = "" + body_str = "" + if len(array.shape) > 2: + matlab_str = "" + # Iterate over all possible slices (in Fortran order) + for index in np.ndindex( + array.shape[2:][::-1] + ): # Skip the first two dimensions and reverse the order + original_index = index[::-1] # Reverse the order back to the original + # Construct the slice indices + slice_indices: Tuple[Union[int, slice], ...] = ( + slice(None), + slice(None), + *original_index, + ) + slice_data = array[slice_indices] + matlab_str += f"{name}(:,:, {', '.join(map(str, original_index))}) =" + matlab_str += "\n" + array_str = _matlab_array_str(slice_data, format, name, skip_name=True) + matlab_str += textwrap.indent(array_str, "\t") + matlab_str += "\n" + return matlab_str[:-1] # Trim extra newline + elif len(array.shape) == 2: + header_str += f"{name}(:,:) =" + for row in array: + if format is None: + body_str += " ".join(f"{val}" for val in row) + else: + body_str += " ".join(f"{val:{format}}" for val in row) + body_str += "\n" + else: + header_str += f"{name}(:) =" + for val in array: + if format is None: + body_str += f"{val}" + else: + body_str += f"{val:{format}}" + body_str += "\n" + + if skip_name: + return body_str + return header_str + "\n" + textwrap.indent(body_str[:-1], "\t") diff --git a/pyttb/tensor.py b/pyttb/tensor.py index f16994b3..903c4094 100644 --- a/pyttb/tensor.py +++ b/pyttb/tensor.py @@ -7,6 +7,7 @@ from __future__ import annotations import logging +import textwrap from collections.abc import Iterable from inspect import signature from itertools import combinations_with_replacement, permutations @@ -30,6 +31,7 @@ from scipy import sparse import pyttb as ttb +from pyttb.matlab.matlab_utilities import _matlab_array_str from pyttb.pyttb_utils import ( IndexVariant, OneDArray, @@ -2723,6 +2725,23 @@ def __repr__(self): __str__ = __repr__ + def _matlab_str( + self, format: Optional[str] = None, name: Optional[str] = None + ) -> str: + """Non-standard representation to be more similar to MATLAB.""" + header = name + if name is None: + name = "data" + if header is None: + header = "This" + + matlab_str = f"{header} is a tensor of shape " + " x ".join( + map(str, self.shape) + ) + + array_str = _matlab_array_str(self.data, format, name) + return matlab_str + "\n" + textwrap.indent(array_str, "\t") + def tenones(shape: Shape, order: Union[Literal["F"], Literal["C"]] = "F") -> tensor: """Create a tensor of all ones. diff --git a/tests/matlab/test_matlab_support.py b/tests/matlab/test_matlab_support.py new file mode 100644 index 00000000..98d2b5c6 --- /dev/null +++ b/tests/matlab/test_matlab_support.py @@ -0,0 +1,44 @@ +# Copyright 2024 National Technology & Engineering Solutions of Sandia, +# LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the +# U.S. Government retains certain rights in this software. + +import numpy as np +import pytest + +from pyttb import matlab_support, tensor + + +def test_matlab_printing_negative(): + with pytest.raises(ValueError): + matlab_support.matlab_print("foo") + + +def test_np_printing(): + """These are just smoke tests since formatting needs manual style verification.""" + # Check different dimensionality support + one_d_array = np.ones((1,)) + matlab_support.matlab_print(one_d_array) + two_d_array = np.ones((1, 1)) + matlab_support.matlab_print(two_d_array) + three_d_array = np.ones((1, 1, 1)) + matlab_support.matlab_print(three_d_array) + + # Check name and format + matlab_support.matlab_print(one_d_array, format="5.1f", name="X") + matlab_support.matlab_print(two_d_array, format="5.1f", name="X") + matlab_support.matlab_print(three_d_array, format="5.1f", name="X") + + +def test_dense_printing(): + """These are just smoke tests since formatting needs manual style verification.""" + # Check different dimensionality support + example = tensor(np.arange(16), shape=(2, 2, 2, 2)) + # 4D + matlab_support.matlab_print(example) + # 2D + matlab_support.matlab_print(example[:, :, 0, 0]) + # 1D + matlab_support.matlab_print(example[:, 0, 0, 0]) + + # Check name and format + matlab_support.matlab_print(example, format="5.1f", name="X") From b8e183bef765412e49e372bc498d1a751b2867c8 Mon Sep 17 00:00:00 2001 From: Nick Johnson <24689722+ntjohnson1@users.noreply.github.com> Date: Tue, 17 Dec 2024 09:37:34 -0500 Subject: [PATCH 06/33] DEV: Updating contributing doc for more details on adding a tutorial --- CONTRIBUTING.md | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9250cd63..54708a6b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -76,6 +76,28 @@ current or filing a new [issue](https://github.com/sandialabs/pyttb/issues). ``` 2. Clear notebook outputs if run locally see `nbstripout` in our [pre-commit configuration](.pre-commit-config.yaml) +### Adding tutorials + +1. Follow general setup from above + 1. Checkout a branch to make your changes + 1. Install from source with dev and doc dependencies + 1. Verify you can build the existing docs with sphinx + +1. Create a new Jupyter notebook in [./docs/source/tutorial](./docs/source/tutorial) + 1. Our current convention is to prefix the filename with the type of tutorial and all lower case + +1. Add a reference to your notebook in [./docs/source/tutorials.rst](./docs/source/tutorials.rst) + +1. Rebuild the docs, review locally, and iterate on changes until ready for review + +#### Tutorial References +Generally, inspecting existing documentation or tutorials should provide a reasonable starting point for capabilities, +but the following links may be useful if that's not sufficient. + +1. We use [sphinx](https://www.sphinx-doc.org/) to automatically build our docs and may be useful for `.rst` issues + +1. We use [myst-nb](https://myst-nb.readthedocs.io/) to render our notebooks to documentation + ## GitHub Workflow ### Proposing Changes From 1526f4c7f706f159d119ac2524927f23a9508161 Mon Sep 17 00:00:00 2001 From: Nick Johnson <24689722+ntjohnson1@users.noreply.github.com> Date: Fri, 20 Dec 2024 09:35:25 -0500 Subject: [PATCH 07/33] Fix internal calls to avoid fortran warnings in tutorials --- pyttb/cp_apr.py | 14 +++++++++----- pyttb/ktensor.py | 29 ++++++++++++++++++++++------- pyttb/sptensor.py | 7 ++++++- pyttb/tenmat.py | 8 +++++++- pyttb/tensor.py | 35 +++++++++++++++++++++++------------ tests/test_tenmat.py | 10 +++++++--- 6 files changed, 74 insertions(+), 29 deletions(-) diff --git a/pyttb/cp_apr.py b/pyttb/cp_apr.py index d87454ee..35b7b4e9 100644 --- a/pyttb/cp_apr.py +++ b/pyttb/cp_apr.py @@ -521,7 +521,9 @@ def tt_cp_apr_pdnr( # noqa: PLR0912,PLR0913,PLR0915 if isinstance(input_tensor, ttb.tensor) and isSparse is False: # Data is not a sparse tensor. Pi = tt_calcpi_prowsubprob(input_tensor, M, rank, n, N, isSparse) - X_mat = input_tensor.to_tenmat(np.array([n]), copy=False).data + X_mat = input_tensor.to_tenmat( + np.array([n], order=input_tensor.order), copy=False + ).data num_rows = M.factor_matrices[n].shape[0] isRowNOTconverged = np.zeros((num_rows,)) @@ -876,7 +878,9 @@ def tt_cp_apr_pqnr( # noqa: PLR0912,PLR0913,PLR0915 if not isinstance(input_tensor, ttb.sptensor) and not isSparse: # Data is not a sparse tensor. Pi = tt_calcpi_prowsubprob(input_tensor, M, rank, n, N, isSparse) - X_mat = input_tensor.to_tenmat(np.array([n]), copy=False).data + X_mat = input_tensor.to_tenmat( + np.array([n], order=input_tensor.order), copy=False + ).data num_rows = M.factor_matrices[n].shape[0] isRowNOTconverged = np.zeros((num_rows,)) @@ -1772,7 +1776,7 @@ def calculate_phi( # noqa: PLR0913 ) Phi[:, r] = Yr else: - Xn = Data.to_tenmat(np.array([factorIndex]), copy=False).data + Xn = Data.to_tenmat(np.array([factorIndex], order=Data.order), copy=False).data V = Model.factor_matrices[factorIndex].dot(Pi.transpose()) W = Xn / np.maximum(V, epsilon) Y = W.dot(Pi) @@ -1817,8 +1821,8 @@ def tt_loglikelihood( np.sum(Data.vals * np.log(np.sum(A, axis=1))[:, None]) - np.sum(Model.factor_matrices[0]) ) - dX = Data.to_tenmat(np.array([1]), copy=False).data - dM = Model.to_tenmat(np.array([1]), copy=False).data + dX = Data.to_tenmat(np.array([1], order=Data.order), copy=False).data + dM = Model.to_tenmat(np.array([1], order=Model.order), copy=False).data f = 0 for i in range(dX.shape[0]): for j in range(dX.shape[1]): diff --git a/pyttb/ktensor.py b/pyttb/ktensor.py index 51aa0d57..3288d52f 100644 --- a/pyttb/ktensor.py +++ b/pyttb/ktensor.py @@ -74,7 +74,7 @@ class ktensor: __slots__ = ("weights", "factor_matrices") - def __init__( + def __init__( # noqa: PLR0912 self, factor_matrices: Optional[Sequence[np.ndarray]] = None, weights: Optional[np.ndarray] = None, @@ -147,7 +147,7 @@ def __init__( # Empty constructor if factor_matrices is None and weights is None: - self.weights = np.array([]) + self.weights = np.array([], order=self.order) self.factor_matrices: List[np.ndarray] = [] return @@ -183,12 +183,17 @@ def __init__( ) # make copy or use reference if copy: - self.weights = weights.copy() + self.weights = weights.copy(self.order) else: - self.weights = weights + if not self._matches_order(weights): + logging.warning( + f"Selected no copy, but input data isn't {self.order} ordered " + "so must copy." + ) + self.weights = np.asfortranarray(weights) else: # create weights if not provided - self.weights = np.ones(num_components) + self.weights = np.ones(num_components, order=self.order) # process factor_matrices if copy: @@ -419,6 +424,14 @@ def order(self) -> Literal["F"]: """Return the data layout of the underlying storage.""" return "F" + def _matches_order(self, array: np.ndarray) -> bool: + """Check if provided array matches tensor memory layout.""" + if array.flags["C_CONTIGUOUS"] and self.order == "C": + return True + if array.flags["F_CONTIGUOUS"] and self.order == "F": + return True + return False + def arrange( self, weight_factor: Optional[int] = None, @@ -924,7 +937,9 @@ def min_split_dims(dims: Tuple[int, ...]): data = ( ttb.khatrirao(*self.factor_matrices[:i_split], reverse=True) * self.weights ) @ ttb.khatrirao(*self.factor_matrices[i_split:], reverse=True).T - return ttb.tensor(data, self.shape, copy=False) + # Copy needed to ensure F order. Transpose above means both elements are + # different layout. If originally in C order can save on this copy. + return ttb.tensor(data, self.shape, copy=True) def to_tenmat( self, @@ -1678,7 +1693,7 @@ def score( # Compute all possible vector-vector congruences. # Compute every pair for each mode - Cbig = ttb.tensor.from_function(np.zeros, (RA, RB, N)) + Cbig = ttb.tensor(np.zeros((RA, RB, N), order=self.order)) for n in range(N): Cbig[:, :, n] = np.abs(A.factor_matrices[n].T @ B.factor_matrices[n]) diff --git a/pyttb/sptensor.py b/pyttb/sptensor.py index 26a7fd5a..864e159a 100644 --- a/pyttb/sptensor.py +++ b/pyttb/sptensor.py @@ -386,6 +386,11 @@ def copy(self) -> sptensor: """ return ttb.sptensor(self.subs, self.vals, self.shape, copy=True) + @property + def order(self) -> Literal["F"]: + """Return the data layout of the underlying storage.""" + return "F" + def __deepcopy__(self, memo): """Return deep copy of this sptensor.""" return self.copy() @@ -708,7 +713,7 @@ def full(self) -> ttb.tensor: return ttb.tensor() # Create a dense zero tensor B that is the same shape as A - B = ttb.tensor(np.zeros(shape=self.shape), copy=False) + B = ttb.tensor(np.zeros(shape=self.shape, order=self.order), copy=False) if self.subs.size == 0: return B diff --git a/pyttb/tenmat.py b/pyttb/tenmat.py index 482cbca5..076e2730 100644 --- a/pyttb/tenmat.py +++ b/pyttb/tenmat.py @@ -6,6 +6,7 @@ from __future__ import annotations +import logging from math import prod from typing import Literal, Optional, Tuple, Union @@ -255,7 +256,12 @@ def to_tensor(self, copy: bool = True) -> ttb.tensor: data = self.data.copy() data = np.reshape(data, np.array(shape)[order], order=self.order) if order.size > 1: - data = np.transpose(data, np.argsort(order)) + if not copy: + logging.warning( + "This tenmat cannot be trivially unwrapped into tensor " + "so must copy." + ) + data = np.asfortranarray(np.transpose(data, np.argsort(order))) return ttb.tensor(data, shape, copy=False) def ctranspose(self) -> tenmat: diff --git a/pyttb/tensor.py b/pyttb/tensor.py index 903c4094..db3c79c8 100644 --- a/pyttb/tensor.py +++ b/pyttb/tensor.py @@ -152,7 +152,7 @@ def __init__( f"Selected no copy, but input data isn't {self.order} ordered " "so must copy." ) - self.data = data + self.data = np.asfortranarray(data) self.shape = shape return @@ -611,6 +611,7 @@ def to_tenmat( (rprod, cprod), order=self.order, ) + assert data.flags["F_CONTIGUOUS"] return ttb.tenmat(data, rdims, cdims, tshape=tshape, copy=copy) def innerprod( @@ -1161,7 +1162,8 @@ def permute(self, order: OneDArray) -> tensor: return self.copy() # Np transpose does error checking on order, acts as permutation - return ttb.tensor(np.transpose(self.data, order), copy=False) + + return ttb.tensor(np.asfortranarray(np.transpose(self.data, order)), copy=False) def reshape(self, shape: Shape) -> tensor: """ @@ -1361,7 +1363,7 @@ def symmetrize( # noqa: PLR0912,PLR0915 avg = classSum / classNum newdata = avg[linclassidx] - data = np.reshape(newdata, self.shape) + data = np.reshape(newdata, self.shape, order=self.order) return ttb.tensor(data, copy=False) @@ -1521,7 +1523,7 @@ def ttm( ) Y_data: np.ndarray = np.reshape(newdata, newshape, order=self.order) Y_data = np.transpose(Y_data, np.argsort(order)) - return ttb.tensor(Y_data, copy=False) + return ttb.tensor(Y_data, copy=True) def ttt( self, @@ -1880,12 +1882,20 @@ def tenfun_binary( if not isinstance(other, (float, int)): Y = other.data else: - Y = np.array(other) + Y = np.array(other, order=self.order) if not first: Y, X = X, Y data = function_handle(X, Y) - Z = ttb.tensor(data, copy=False) + copy = False + if not self._matches_order(data): + copy = True + logging.warning( + f"Tenfun function expects data of order {self.order}." + f" Update function to return data or the order to avoid " + "extra data copy." + ) + Z = ttb.tensor(data, copy=copy) return Z def tenfun_unary( @@ -1913,14 +1923,14 @@ def tenfun_unary( ), f"Tensor {i} is not the same size as the first tensor input" if len(inputs) == 0: X = self.data - X = np.reshape(X, (1, -1)) + X = np.reshape(X, (1, -1), order=self.order) else: - X = np.zeros((len(inputs) + 1, np.prod(sz))) - X[0, :] = np.reshape(self.data, (np.prod(sz))) + X = np.zeros((len(inputs) + 1, np.prod(sz)), order=self.order) + X[0, :] = np.reshape(self.data, (np.prod(sz)), order=self.order) for i, an_input in enumerate(inputs): - X[i + 1, :] = np.reshape(an_input.data, (np.prod(sz))) + X[i + 1, :] = np.reshape(an_input.data, (np.prod(sz)), order=self.order) data = function_handle(X) - data = np.reshape(data, sz) + data = np.reshape(data, sz, order=self.order) Z = ttb.tensor(data, copy=False) return Z @@ -2170,7 +2180,8 @@ def __getitem__(self, item): # noqa: PLR0912 if newsiz.size == 0: a = newdata.item() else: - a = ttb.tensor(newdata, copy=False) + # Copy data to ensure correct data ordering + a = ttb.tensor(newdata, copy=True) return a # *** CASE 2a: Subscript indexing *** diff --git a/tests/test_tenmat.py b/tests/test_tenmat.py index 150b644b..bfbdd3e5 100644 --- a/tests/test_tenmat.py +++ b/tests/test_tenmat.py @@ -2,6 +2,7 @@ # LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the # U.S. Government retains certain rights in this software. +import logging from copy import deepcopy import numpy as np @@ -324,7 +325,7 @@ def test_tenmat_initialization_from_tensor_type( assert exc in str(excinfo) -def test_tenmat_to_tensor(): +def test_tenmat_to_tensor(caplog): tensorInstance = ttb.tenrand((4, 3)) tensorInstance4 = ttb.tenrand((4, 3, 2, 2)) # tenmat @@ -345,8 +346,11 @@ def test_tenmat_to_tensor(): assert not np.may_share_memory(tensorTenmatInstance4.data, tenmatInstance4.data) # Reference instead of copy - tensorTenmatInstance4_ref = tenmatInstance4.to_tensor(copy=False) - assert np.may_share_memory(tensorTenmatInstance4_ref.data, tenmatInstance4.data) + with caplog.at_level(logging.WARNING): + tensorTenmatInstance4_ref = tenmatInstance4.to_tensor(copy=False) + assert not np.may_share_memory( + tensorTenmatInstance4_ref.data, tenmatInstance4.data + ) def test_tenmat_ctranspose(sample_tenmat_4way): From 8c84a01503f937a8a19d30659658fc57304cce4b Mon Sep 17 00:00:00 2001 From: Nick Johnson <24689722+ntjohnson1@users.noreply.github.com> Date: Fri, 20 Dec 2024 09:43:18 -0500 Subject: [PATCH 08/33] Update one tutorial with "F" order to avoid warnings. --- docs/source/tutorial/class_tensor.ipynb | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/docs/source/tutorial/class_tensor.ipynb b/docs/source/tutorial/class_tensor.ipynb index b8db6cdf..3a8b75ac 100644 --- a/docs/source/tutorial/class_tensor.ipynb +++ b/docs/source/tutorial/class_tensor.ipynb @@ -857,7 +857,7 @@ "outputs": [], "source": [ "np.random.seed(0)\n", - "A = ttb.tensor(np.floor(3 * np.random.rand(2, 2, 3))) # Generate some data.\n", + "A = ttb.tensor(np.floor(3 * np.random.rand(2, 2, 3), order=\"F\")) # Generate some data.\n", "A.tenfun(lambda x: x + 1) # Increment every element of A by one." ] }, @@ -882,12 +882,14 @@ "outputs": [], "source": [ "np.random.seed(0)\n", - "C = ttb.tensor(np.floor(5 * np.random.rand(2, 2, 3))) # Create another tensor.\n", + "C = ttb.tensor(\n", + " np.floor(5 * np.random.rand(2, 2, 3), order=\"F\")\n", + ") # Create another tensor.\n", "\n", "\n", "def elementwise_mean(X):\n", " # finding mean for the columns\n", - " return np.floor(np.mean(X, axis=0))\n", + " return np.floor(np.mean(X, axis=0), order=\"F\")\n", "\n", "\n", "A.tenfun(elementwise_mean, B, C) # Elementwise means for A, B, and C." From 5d94cac07af75e57c7f6ac348df4228e5a256820 Mon Sep 17 00:00:00 2001 From: Tammy Kolda Date: Fri, 27 Dec 2024 07:25:32 -0800 Subject: [PATCH 09/33] Various documentation updates, on top of Nick's Fortran fixes. --- .gitignore | 1 + docs/source/index.rst | 13 +++++++++++-- pyttb/tensor.py | 41 ++++++++++++++++++++--------------------- 3 files changed, 32 insertions(+), 23 deletions(-) diff --git a/.gitignore b/.gitignore index ccc3fd8d..970830c2 100644 --- a/.gitignore +++ b/.gitignore @@ -6,6 +6,7 @@ dist/ tests/__pycache__ pyttb/__pycache__ build/ +_build/ .coverage .ipynb_checkpoints htmlcov diff --git a/docs/source/index.rst b/docs/source/index.rst index 292af598..bb03127b 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -4,9 +4,9 @@ pyttb: Python Tensor Toolbox **************************** Tensors (also known as multidimensional arrays or N-way arrays) are used in a variety of applications ranging from chemometrics to network -analysis. +analysis. This Python package is an adaptation of the +`Tensor Toolbox for MATLAB `_. -- Install the latest release from pypi (``pip install pyttb``). - This is open source software. Please see `LICENSE`_ for the terms of the license (2-clause BSD). - For more information or for feedback on this project, please `contact us`_. @@ -14,6 +14,15 @@ analysis. .. _`LICENSE`: ../../../LICENSE .. _contact us: #contact +Installing +========== + +* Via pypi + - Install the latest release from pypi (``pip install pyttb``). +* From source + - Clone the repository from `github `_. + - Install the package with ``pip install .`` from the pyttb root directory. + Functionality ============== pyttb provides the following classes and functions diff --git a/pyttb/tensor.py b/pyttb/tensor.py index db3c79c8..3e0568e8 100644 --- a/pyttb/tensor.py +++ b/pyttb/tensor.py @@ -50,25 +50,15 @@ class tensor: - """ - TENSOR Class for dense tensors. - - Contains the following data members: + """Class for dense tensors. + + **Members** - ``data``: :class:`numpy.ndarray` dense array containing the data elements - of the tensor. + * ``data``: :class:`numpy.ndarray` containing the data elements of the tensor stored, by default, in Fortran order. - Instances of :class:`pyttb.tensor` can be created using `__init__()` or - the following method: + * ``shape``: :class:`tuple` of integers containing the size of each mode of the tensor. *Technically, this is redudant since the shape can be inferred from the data. This is an artifact of the transfer from the MATLAB Tensor Toolbox because MATLAB does not propertly store the size of a 1D tensor.* - * :meth:`from_function` - - Examples - -------- - For all examples listed below, the following module imports are assumed: - - >>> import pyttb as ttb - >>> import numpy as np + """ __slots__ = ("data", "shape") @@ -79,11 +69,9 @@ def __init__( shape: Optional[Shape] = None, copy: bool = True, ): - """Create a :class:`pyttb.tensor` from a :class:`numpy.ndarray`. - - Note that 1D tensors (i.e., when len(shape)==1) contains a data - array that follow the Numpy convention of being a row vector. - + """ + **Constructor** + Parameters ---------- data: @@ -95,6 +83,12 @@ def __init__( Examples -------- + + For *all* examples in this document, the following module imports are assumed: + + >>> import pyttb as ttb + >>> import numpy as np + Create an empty :class:`pyttb.tensor`: >>> T = ttb.tensor() @@ -110,6 +104,10 @@ def __init__( data[:, :] = [[1 2] [3 4]] + + See Also + -------- + :meth:`from_function` """ if data is None: # EMPTY / DEFAULT CONSTRUCTOR @@ -1099,6 +1097,7 @@ def nvecs(self, n: int, r: int, flipsign: bool = True) -> np.ndarray: Xn = self.to_tenmat(rdims=np.array([n])).double() y = Xn @ Xn.T + # TODO (TK) We shouldn't use sparse library functions. RandSVD would probably be better. if r < y.shape[0] - 1: w, v = scipy.sparse.linalg.eigsh(y, r) v = v[:, (-np.abs(w)).argsort()] From c3d672896e1ac32c04500dcc4591dab4536c036f Mon Sep 17 00:00:00 2001 From: Tammy Kolda Date: Sun, 29 Dec 2024 13:22:28 -0800 Subject: [PATCH 10/33] Fighting with the tensor class. --- pyttb/tensor.py | 114 +++++++++++++++++++++++++++--------------------- 1 file changed, 64 insertions(+), 50 deletions(-) diff --git a/pyttb/tensor.py b/pyttb/tensor.py index 3e0568e8..7f38d79a 100644 --- a/pyttb/tensor.py +++ b/pyttb/tensor.py @@ -56,9 +56,8 @@ class tensor: * ``data``: :class:`numpy.ndarray` containing the data elements of the tensor stored, by default, in Fortran order. - * ``shape``: :class:`tuple` of integers containing the size of each mode of the tensor. *Technically, this is redudant since the shape can be inferred from the data. This is an artifact of the transfer from the MATLAB Tensor Toolbox because MATLAB does not propertly store the size of a 1D tensor.* - - + * ``shape``: :class:`tuple` of integers containing the size of each mode of the tensor. *Technically, this is (mostly?) redudant since the shape can be inferred from the data.* + """ __slots__ = ("data", "shape") @@ -69,45 +68,66 @@ def __init__( shape: Optional[Shape] = None, copy: bool = True, ): - """ - **Constructor** - + """Constructor for :class:`pyttb.tensor`. + Parameters ---------- - data: - Tensor source data. - shape: - Shape of resulting tensor if not the same as data shape. - copy: - Whether to make a copy of provided data or just reference it. + data : + Source data as :class:`numpy.ndarray` (default: empty) + shape : + Shape of the tensor as a :class:`tuple` (default: ``data.shape()``) + copy : bool + Whether to copy the data or reference it (default: True) Examples -------- - For *all* examples in this document, the following module imports are assumed: - - >>> import pyttb as ttb - >>> import numpy as np - - Create an empty :class:`pyttb.tensor`: - - >>> T = ttb.tensor() - >>> print(T) - empty tensor of shape () - data = [] - - Create a :class:`pyttb.tensor` from a :class:`numpy.ndarray`: - - >>> T = ttb.tensor(np.array([[1, 2], [3, 4]])) - >>> print(T) - tensor of shape (2, 2) with order F - data[:, :] = - [[1 2] - [3 4]] - - See Also - -------- - :meth:`from_function` + For *all* examples in this document, the following module imports are assumed:: + + >>> import pyttb as ttb + >>> import numpy as np + + Create a :class:`pyttb.tensor` from a 3D :class:`numpy.ndarray`:: + + >>> data = np.array([[[1,13],[5,17],[9,21]], + [[2,14],[6,18],[10,22]], + [[3,15],[7,19],[11,23]], + [[4,16],[8,20],[12,24]]]) + >>> T = ttb.tensor(data) + >>> print(T) + tensor of shape (4, 3, 2) with order F + data[:, :, 0] = + [[ 1 5 9] + [ 2 6 10] + [ 3 7 11] + [ 4 8 12]] + data[:, :, 1] = + [[13 17 21] + [14 18 22] + [15 19 23] + [16 20 24]] + + Create a :class:`pyttb.tensor` from a 1D :class:`numpy.ndarray` and reshape it:: + + >>> data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]) + >>> T = ttb.tensor(data, shape=(4, 3, 2)) + >>> print(T) + tensor of shape (4, 3, 2) with order F + data[:, :, 0] = + [[ 1 5 9] + [ 2 6 10] + [ 3 7 11] + [ 4 8 12]] + data[:, :, 1] = + [[13 17 21] + [14 18 22] + [15 19 23] + [16 20 24]] + + See Also + -------- + * :meth:`pyttb.tensor.from_function` - Create a tensor from a function + * :class:`pyttb.sptensor` - Sparse tensor class """ if data is None: # EMPTY / DEFAULT CONSTRUCTOR @@ -116,25 +136,18 @@ def __init__( return # CONVERT A MULTIDIMENSIONAL ARRAY - if not issubclass(data.dtype.type, np.number) and not issubclass( - data.dtype.type, np.bool_ - ): - assert False, "First argument must be a multidimensional array." + assert issubclass(data.dtype.type, np.number) or issubclass(data.dtype.type, np.bool_), "Data (1st argument) must be a numpy ndarray." # Create or check second argument if shape is None: shape = data.shape - shape = parse_shape(shape) + shape = parse_shape(shape) # parse variety of inputs into a tuple # Make sure the number of elements matches what's been specified if len(shape) == 0: - if data.size > 0: - assert False, "Empty tensor cannot contain any elements" - - elif prod(shape) != data.size: - assert ( - False - ), "TTB:WrongSize, Size of data does not match specified size of tensor" + assert data.size == 0, "Shape (2nd argument) has zero length, but data (1st argument) was not empty." + else: + assert prod(shape) == data.size, "Shape (2nd argument) does not match number of elements in data (1st argument)" # Make sure the data is indeed the right shape if data.size > 0 and len(shape) > 0: @@ -143,12 +156,13 @@ def __init__( # Create the tensor if copy: + # TODO This may break later if the data is C-ordered self.data = data.copy(self.order) else: + # TODO This is a strange hack to if not self._matches_order(data): logging.warning( - f"Selected no copy, but input data isn't {self.order} ordered " - "so must copy." + f"Tensor Constructor: Selected no copy, but input data isn't {self.order} ordered so must copy." ) self.data = np.asfortranarray(data) self.shape = shape From 8e26de2da227fa82d809895460b776deb1b091d5 Mon Sep 17 00:00:00 2001 From: Tammy Kolda Date: Mon, 30 Dec 2024 14:07:08 -0800 Subject: [PATCH 11/33] Continuing to work on documentation to make it match TensorToolbox. --- pyttb/tensor.py | 72 +++++++++++++++++++++++++++++-------------------- 1 file changed, 43 insertions(+), 29 deletions(-) diff --git a/pyttb/tensor.py b/pyttb/tensor.py index 7f38d79a..d1e67226 100644 --- a/pyttb/tensor.py +++ b/pyttb/tensor.py @@ -50,13 +50,13 @@ class tensor: - """Class for dense tensors. + """Class for dense tensors **Members** - * ``data``: :class:`numpy.ndarray` containing the data elements of the tensor stored, by default, in Fortran order. + * ``data``: :class:`numpy.ndarray` containing the data elements of the tensor stored, by default, in Fortran order - * ``shape``: :class:`tuple` of integers containing the size of each mode of the tensor. *Technically, this is (mostly?) redudant since the shape can be inferred from the data.* + * ``shape``: :class:`tuple` of integers containing the size of each mode of the tensor """ @@ -72,11 +72,11 @@ def __init__( Parameters ---------- - data : + data : optional Source data as :class:`numpy.ndarray` (default: empty) - shape : - Shape of the tensor as a :class:`tuple` (default: ``data.shape()``) - copy : bool + shape : optional + Shape of the tensor as a :class:`tuple` (default: ``data.shape()``) + copy : optional bool Whether to copy the data or reference it (default: True) Examples @@ -87,12 +87,12 @@ def __init__( >>> import pyttb as ttb >>> import numpy as np - Create a :class:`pyttb.tensor` from a 3D :class:`numpy.ndarray`:: + Create a :class:`pyttb.tensor` from a three-way :class:`numpy.ndarray`:: >>> data = np.array([[[1,13],[5,17],[9,21]], - [[2,14],[6,18],[10,22]], - [[3,15],[7,19],[11,23]], - [[4,16],[8,20],[12,24]]]) + ... [[2,14],[6,18],[10,22]], + ... [[3,15],[7,19],[11,23]], + ... [[4,16],[8,20],[12,24]]]) >>> T = ttb.tensor(data) >>> print(T) tensor of shape (4, 3, 2) with order F @@ -107,7 +107,7 @@ def __init__( [15 19 23] [16 20 24]] - Create a :class:`pyttb.tensor` from a 1D :class:`numpy.ndarray` and reshape it:: + Create a :class:`pyttb.tensor` from a :class:`numpy.ndarray` vector and reshape it:: >>> data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]) >>> T = ttb.tensor(data, shape=(4, 3, 2)) @@ -120,34 +120,50 @@ def __init__( [ 4 8 12]] data[:, :, 1] = [[13 17 21] - [14 18 22] - [15 19 23] - [16 20 24]] + [14 18 22] + [15 19 23] + [16 20 24]] + + Create an empty :class:`ppytb.tensor`:: + >>> T = ttb.tensor() + >>> print(T) + empty tensor of shape () + data = [] See Also -------- - * :meth:`pyttb.tensor.from_function` - Create a tensor from a function - * :class:`pyttb.sptensor` - Sparse tensor class - """ + * :meth:`pyttb.tensor.from_function` - Create a tensor from a function such as np.random.rand or np.ones + * :meth:`pyttb.tensor.copy` - Make a deep copy of a tensor + * :meth:`pyttb.sptensor.to_tensor` - Convert a sparse tensor to a dense tensor + * :meth:`pyttb.ktensor.to_tensor` - Convert a Kruskal tensor to a dense tensor + * :meth:`pyttb.ttensor.to_tensor` - Convert a Tucker tensor to a dense tensor + * :meth:`pyttb.tenmat.to_tensor` - Convert a tenmat to a dense tensor + """ + # EMPTY / DEFAULT CONSTRUCTOR if data is None: - # EMPTY / DEFAULT CONSTRUCTOR self.data: np.ndarray = np.array([], order=self.order) self.shape: Tuple = () return - # CONVERT A MULTIDIMENSIONAL ARRAY - assert issubclass(data.dtype.type, np.number) or issubclass(data.dtype.type, np.bool_), "Data (1st argument) must be a numpy ndarray." + # Check that data is an numpy number or boolean array + if not issubclass(data.dtype.type, np.number) and not issubclass( + data.dtype.type, np.bool_ + ): + raise AssertionError("Data (1st argument) must be a numpy ndarray") - # Create or check second argument + # Create or check second argument (can be a variety of things) if shape is None: shape = data.shape - shape = parse_shape(shape) # parse variety of inputs into a tuple + else: + shape = parse_shape(shape) # Make sure the number of elements matches what's been specified if len(shape) == 0: - assert data.size == 0, "Shape (2nd argument) has zero length, but data (1st argument) was not empty." - else: - assert prod(shape) == data.size, "Shape (2nd argument) does not match number of elements in data (1st argument)" + if data.size > 0: + raise AssertionError("Shape (2nd argument) has zero length, but data (1st argument) was not empty") + + elif prod(shape) != data.size: + raise AssertionError("Shape (2nd argument) does not match number of elements in data (1st argument)") # Make sure the data is indeed the right shape if data.size > 0 and len(shape) > 0: @@ -156,13 +172,11 @@ def __init__( # Create the tensor if copy: - # TODO This may break later if the data is C-ordered self.data = data.copy(self.order) else: - # TODO This is a strange hack to if not self._matches_order(data): logging.warning( - f"Tensor Constructor: Selected no copy, but input data isn't {self.order} ordered so must copy." + f"Tensor Constructor: Selected no copy, but input data isn't {self.order} ordered so must copy" ) self.data = np.asfortranarray(data) self.shape = shape From eb5d8e3808b3209a6d44a2f1d0ec93cb0510b9dc Mon Sep 17 00:00:00 2001 From: Tammy Kolda Date: Mon, 30 Dec 2024 14:22:18 -0800 Subject: [PATCH 12/33] Had to put quotes around some arguments for file names that have a space in the path. --- tests/test_package.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_package.py b/tests/test_package.py index bde6b5c8..d0986137 100644 --- a/tests/test_package.py +++ b/tests/test_package.py @@ -21,7 +21,7 @@ def test_linting(): root_dir = os.path.dirname(os.path.dirname(__file__)) toml_file = os.path.join(root_dir, "pyproject.toml") subprocess.run( - f"ruff check {root_dir} --config {toml_file}", + f'ruff check "{root_dir}" --config "{toml_file}"', check=True, shell=True, ) @@ -32,7 +32,7 @@ def test_formatting(): root_dir = os.path.dirname(os.path.dirname(__file__)) toml_file = os.path.join(root_dir, "pyproject.toml") subprocess.run( - f"ruff format --check {root_dir} --config {toml_file}", + f'ruff format --check "{root_dir}" --config "{toml_file}"', check=True, shell=True, ) @@ -50,7 +50,7 @@ def test_typing(): root_dir = os.path.dirname(os.path.dirname(__file__)) toml_file = os.path.join(root_dir, "pyproject.toml") subprocess.run( - f"mypy -p pyttb --config-file {toml_file} {skip_untyped}", + f'mypy -p pyttb --config-file "{toml_file}" {skip_untyped}', check=True, shell=True, ) From 28482530155ddb83ec375d90fda8c7b1d95c8a24 Mon Sep 17 00:00:00 2001 From: Tammy Kolda Date: Mon, 30 Dec 2024 14:33:50 -0800 Subject: [PATCH 13/33] Fixing tests to match updates error messages. --- tests/test_tensor.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/test_tensor.py b/tests/test_tensor.py index a7ab0abf..5ce85e80 100644 --- a/tests/test_tensor.py +++ b/tests/test_tensor.py @@ -54,11 +54,11 @@ def test_tensor_initialization_from_data(sample_tensor_2way): with pytest.raises(AssertionError) as excinfo: ttb.tensor(params["data"], ()) - assert "Empty tensor cannot contain any elements" in str(excinfo) + assert "Shape (2nd argument) has zero length, but data (1st argument) was not empty" in str(excinfo) with pytest.raises(AssertionError) as excinfo: ttb.tensor(params["data"], (2, 4)) - assert "TTB:WrongSize, Size of data does not match specified size of tensor" in str( + assert "Shape (2nd argument) does not match number of elements in data (1st argument)" in str( excinfo ) @@ -66,7 +66,7 @@ def test_tensor_initialization_from_data(sample_tensor_2way): data = np.array([["a", 2, 3], [4, 5, 6]]) with pytest.raises(AssertionError) as excinfo: ttb.tensor(data, (2, 3)) - assert "First argument must be a multidimensional array." in str(excinfo) + assert "Data (1st argument) must be a numpy ndarray" in str(excinfo) # 1D tensors # no shape specified From 5c24317651bacfa6d844effd3970b3e0aca7bcde Mon Sep 17 00:00:00 2001 From: Tammy Kolda Date: Mon, 30 Dec 2024 15:22:20 -0800 Subject: [PATCH 14/33] Resolving various errors from checks. --- pyttb/tensor.py | 51 +++++++++++++++++++++++++++----------------- tests/test_tensor.py | 10 ++++++--- 2 files changed, 39 insertions(+), 22 deletions(-) diff --git a/pyttb/tensor.py b/pyttb/tensor.py index d1e67226..8dbd93c5 100644 --- a/pyttb/tensor.py +++ b/pyttb/tensor.py @@ -50,14 +50,16 @@ class tensor: - """Class for dense tensors - + """Class for dense tensors. + **Members** - * ``data``: :class:`numpy.ndarray` containing the data elements of the tensor stored, by default, in Fortran order + * ``data``: :class:`numpy.ndarray` containing the data elements of the tensor + stored, by default, in Fortran order + + * ``shape``: :class:`tuple` of integers containing the size of each mode of + the tensor - * ``shape``: :class:`tuple` of integers containing the size of each mode of the tensor - """ __slots__ = ("data", "shape") @@ -68,7 +70,8 @@ def __init__( shape: Optional[Shape] = None, copy: bool = True, ): - """Constructor for :class:`pyttb.tensor`. + """ + Create a :class:`pyttb.tensor`. Parameters ---------- @@ -81,7 +84,6 @@ def __init__( Examples -------- - For *all* examples in this document, the following module imports are assumed:: >>> import pyttb as ttb @@ -106,10 +108,12 @@ def __init__( [14 18 22] [15 19 23] [16 20 24]] - - Create a :class:`pyttb.tensor` from a :class:`numpy.ndarray` vector and reshape it:: - >>> data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]) + Create a :class:`pyttb.tensor` from a :class:`numpy.ndarray` vector and + reshape it:: + + >>> data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, + ... 17, 18, 19, 20, 21, 22, 23, 24]) >>> T = ttb.tensor(data, shape=(4, 3, 2)) >>> print(T) tensor of shape (4, 3, 2) with order F @@ -124,15 +128,17 @@ def __init__( [15 19 23] [16 20 24]] - Create an empty :class:`ppytb.tensor`:: + Create an empty :class:`pyttb.tensor`:: + >>> T = ttb.tensor() >>> print(T) empty tensor of shape () data = [] - + See Also -------- - * :meth:`pyttb.tensor.from_function` - Create a tensor from a function such as np.random.rand or np.ones + * :meth:`pyttb.tensor.from_function` - Create a tensor from a function + such as :meth:`numpy.ones` * :meth:`pyttb.tensor.copy` - Make a deep copy of a tensor * :meth:`pyttb.sptensor.to_tensor` - Convert a sparse tensor to a dense tensor * :meth:`pyttb.ktensor.to_tensor` - Convert a Kruskal tensor to a dense tensor @@ -154,16 +160,22 @@ def __init__( # Create or check second argument (can be a variety of things) if shape is None: shape = data.shape - else: - shape = parse_shape(shape) + + shape = parse_shape(shape) # Make sure the number of elements matches what's been specified if len(shape) == 0: if data.size > 0: - raise AssertionError("Shape (2nd argument) has zero length, but data (1st argument) was not empty") + raise AssertionError( + "Shape (2nd argument) has zero length," + "but data (1st argument) was not empty" + ) elif prod(shape) != data.size: - raise AssertionError("Shape (2nd argument) does not match number of elements in data (1st argument)") + raise AssertionError( + "Shape (2nd argument) does not match number of" + "elements in data (1st argument)" + ) # Make sure the data is indeed the right shape if data.size > 0 and len(shape) > 0: @@ -176,7 +188,8 @@ def __init__( else: if not self._matches_order(data): logging.warning( - f"Tensor Constructor: Selected no copy, but input data isn't {self.order} ordered so must copy" + "Tensor Constructor: Selected no copy, but input data isn't " + f"{self.order} ordered so must copy" ) self.data = np.asfortranarray(data) self.shape = shape @@ -1125,7 +1138,7 @@ def nvecs(self, n: int, r: int, flipsign: bool = True) -> np.ndarray: Xn = self.to_tenmat(rdims=np.array([n])).double() y = Xn @ Xn.T - # TODO (TK) We shouldn't use sparse library functions. RandSVD would probably be better. + # TODO (TK) RandSVD would probably be better. if r < y.shape[0] - 1: w, v = scipy.sparse.linalg.eigsh(y, r) v = v[:, (-np.abs(w)).argsort()] diff --git a/tests/test_tensor.py b/tests/test_tensor.py index 5ce85e80..aea01c51 100644 --- a/tests/test_tensor.py +++ b/tests/test_tensor.py @@ -54,12 +54,16 @@ def test_tensor_initialization_from_data(sample_tensor_2way): with pytest.raises(AssertionError) as excinfo: ttb.tensor(params["data"], ()) - assert "Shape (2nd argument) has zero length, but data (1st argument) was not empty" in str(excinfo) + assert ( + "Shape (2nd argument) has zero length, but data (1st argument) was not empty" + in str(excinfo) + ) with pytest.raises(AssertionError) as excinfo: ttb.tensor(params["data"], (2, 4)) - assert "Shape (2nd argument) does not match number of elements in data (1st argument)" in str( - excinfo + assert ( + "Shape (2nd argument) does not match number of elements in data (1st argument)" + in str(excinfo) ) # TODO how else to break this logical statement? From d3c2d2e30b3e0e5d1d01fabf26b380ddd6545b2e Mon Sep 17 00:00:00 2001 From: Tammy Kolda Date: Mon, 30 Dec 2024 15:27:33 -0800 Subject: [PATCH 15/33] Extra fix related to #378. --- tests/test_package.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_package.py b/tests/test_package.py index db507436..860a91f6 100644 --- a/tests/test_package.py +++ b/tests/test_package.py @@ -61,7 +61,7 @@ def test_spelling(): root_dir = os.path.dirname(os.path.dirname(__file__)) toml_file = os.path.join(root_dir, "pyproject.toml") subprocess.run( - f"codespell --toml {toml_file}", + f'codespell --toml "{toml_file}"', check=True, shell=True, ) From 97d22292f419345efa87087311990213d3357bf0 Mon Sep 17 00:00:00 2001 From: Tammy Kolda Date: Mon, 30 Dec 2024 15:27:47 -0800 Subject: [PATCH 16/33] More fixes to pass tests. --- pyttb/tensor.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyttb/tensor.py b/pyttb/tensor.py index 8dbd93c5..655670cd 100644 --- a/pyttb/tensor.py +++ b/pyttb/tensor.py @@ -167,13 +167,13 @@ def __init__( if len(shape) == 0: if data.size > 0: raise AssertionError( - "Shape (2nd argument) has zero length," + "Shape (2nd argument) has zero length, " "but data (1st argument) was not empty" ) elif prod(shape) != data.size: raise AssertionError( - "Shape (2nd argument) does not match number of" + "Shape (2nd argument) does not match number of " "elements in data (1st argument)" ) From f6c8de7f0860ceeedd4a155742a4e8250f9b3b97 Mon Sep 17 00:00:00 2001 From: Tammy Kolda Date: Mon, 30 Dec 2024 15:50:13 -0800 Subject: [PATCH 17/33] Fixes one of the many problems caused by #368. Just create an array of the appropriate length using the function, and then reshape it. --- pyttb/tensor.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pyttb/tensor.py b/pyttb/tensor.py index 655670cd..6627e724 100644 --- a/pyttb/tensor.py +++ b/pyttb/tensor.py @@ -253,7 +253,8 @@ def from_function( shape = parse_shape(shape) # Generate data - data = function_handle(shape) + totalsize = prod(shape) + data = function_handle((totalsize,)) # Create the tensor return cls(data, shape, copy=False) From f0e40edeaf86adc04ec0664ad32a7fe2e3d60143 Mon Sep 17 00:00:00 2001 From: Tammy Kolda Date: Mon, 30 Dec 2024 16:09:28 -0800 Subject: [PATCH 18/33] Make it so that from_function works with np.random. --- pyttb/tensor.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/pyttb/tensor.py b/pyttb/tensor.py index 6627e724..a68a044e 100644 --- a/pyttb/tensor.py +++ b/pyttb/tensor.py @@ -219,9 +219,8 @@ def from_function( Parameters ---------- function_handle: - A function that can accept a shape (i.e., :class:`tuple` of - dimension sizes) and return a :class:`numpy.ndarray` of that shape. - `numpy.zeros`, `numpy.ones`. + A function that can accept an integer length and + return a :class:`numpy.ndarray` vector of that length. shape: Shape of the resulting tensor. @@ -231,7 +230,7 @@ def from_function( Examples -------- - Create a :class:`pyttb.tensor` with entries equal to 1: + Create a :class:`pyttb.tensor` with all entries equal to 1:: >>> T = ttb.tensor.from_function(np.ones, (2, 3, 4)) >>> print(T) @@ -254,7 +253,7 @@ def from_function( # Generate data totalsize = prod(shape) - data = function_handle((totalsize,)) + data = function_handle(totalsize) # Create the tensor return cls(data, shape, copy=False) From d127d65442642a3ba81c7fb37f2587e39caf7c95 Mon Sep 17 00:00:00 2001 From: Tammy Kolda Date: Mon, 30 Dec 2024 16:09:55 -0800 Subject: [PATCH 19/33] Adding a comment about the (current) requirement of Fortran ordering. --- pyttb/tensor.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pyttb/tensor.py b/pyttb/tensor.py index a68a044e..d2840129 100644 --- a/pyttb/tensor.py +++ b/pyttb/tensor.py @@ -197,7 +197,10 @@ def __init__( @property def order(self) -> Literal["F"]: - """Return the data layout of the underlying storage.""" + """Return the data layout of the underlying storage. + + The data layout is required to be Fortran. + """ return "F" def _matches_order(self, array: np.ndarray) -> bool: From e60a2a7f8af10594bd0241b817d0a56aac9ecae2 Mon Sep 17 00:00:00 2001 From: Tammy Kolda Date: Mon, 30 Dec 2024 16:28:26 -0800 Subject: [PATCH 20/33] Adding help to show how to use from_function to work with randn. --- pyttb/tensor.py | 53 +++++++++++++++++++++++++++++++++---------------- 1 file changed, 36 insertions(+), 17 deletions(-) diff --git a/pyttb/tensor.py b/pyttb/tensor.py index d2840129..f784f316 100644 --- a/pyttb/tensor.py +++ b/pyttb/tensor.py @@ -217,7 +217,7 @@ def from_function( function_handle: Callable[[Tuple[int, ...]], np.ndarray], shape: Shape, ) -> tensor: - """Construct a :class:`pyttb.tensor` with data from a function. + """Construct a :class:`pyttb.tensor` with data generated by function. Parameters ---------- @@ -233,23 +233,42 @@ def from_function( Examples -------- - Create a :class:`pyttb.tensor` with all entries equal to 1:: + Create a :class:`pyttb.tensor` with entries drawn from a normal distribution + using :func:`numpy.random.randn`:: - >>> T = ttb.tensor.from_function(np.ones, (2, 3, 4)) - >>> print(T) - tensor of shape (2, 3, 4) with order F - data[:, :, 0] = - [[1. 1. 1.] - [1. 1. 1.]] - data[:, :, 1] = - [[1. 1. 1.] - [1. 1. 1.]] - data[:, :, 2] = - [[1. 1. 1.] - [1. 1. 1.]] - data[:, :, 3] = - [[1. 1. 1.] - [1. 1. 1.]] + >>> np.random.seed(0) + >>> T = ttb.tensor.from_function(np.random.randn, (4, 3, 2)) + >>> print(T) + tensor of shape (4, 3, 2) with order F + data[:, :, 0] = + [[ 1.76405235 1.86755799 -0.10321885] + [ 0.40015721 -0.97727788 0.4105985 ] + [ 0.97873798 0.95008842 0.14404357] + [ 2.2408932 -0.15135721 1.45427351]] + data[:, :, 1] = + [[ 0.76103773 1.49407907 -2.55298982] + [ 0.12167502 -0.20515826 0.6536186 ] + [ 0.44386323 0.3130677 0.8644362 ] + [ 0.33367433 -0.85409574 -0.74216502]] + + Create a :class:`pyttb.tensor` with all entries equal to 1 + using :func:`numpy.ones`:: + + >>> T = ttb.tensor.from_function(np.ones, (2, 3, 4)) + >>> print(T) + tensor of shape (2, 3, 4) with order F + data[:, :, 0] = + [[1. 1. 1.] + [1. 1. 1.]] + data[:, :, 1] = + [[1. 1. 1.] + [1. 1. 1.]] + data[:, :, 2] = + [[1. 1. 1.] + [1. 1. 1.]] + data[:, :, 3] = + [[1. 1. 1.] + [1. 1. 1.]] """ # Check size shape = parse_shape(shape) From d4d378ef48b74a0577a3445b50b49a3ec0cca6e2 Mon Sep 17 00:00:00 2001 From: Tammy Kolda Date: Mon, 30 Dec 2024 18:02:32 -0800 Subject: [PATCH 21/33] Better solution for #380 and also #368. The function isn't changed but the instructions are. --- pyttb/tensor.py | 40 +++++++++++++++++++++------------------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/pyttb/tensor.py b/pyttb/tensor.py index f784f316..61e82250 100644 --- a/pyttb/tensor.py +++ b/pyttb/tensor.py @@ -217,15 +217,17 @@ def from_function( function_handle: Callable[[Tuple[int, ...]], np.ndarray], shape: Shape, ) -> tensor: - """Construct a :class:`pyttb.tensor` with data generated by function. + """Construct :class:`pyttb.tensor` with data generated by given function. Parameters ---------- function_handle: - A function that can accept an integer length and - return a :class:`numpy.ndarray` vector of that length. + A function that takes a tuple of integers and returns a + :class:`numpy.ndarray`. The array should be in Fortran order to avoid + warnings of data being copied. The data will be reshaped to the shape, + so returning a vector of length equal to the product of the shape is fine. shape: - Shape of the resulting tensor. + Shape of the resulting tensor; e.g., a tuple of integers. Returns ------- @@ -236,46 +238,46 @@ def from_function( Create a :class:`pyttb.tensor` with entries drawn from a normal distribution using :func:`numpy.random.randn`:: - >>> np.random.seed(0) - >>> T = ttb.tensor.from_function(np.random.randn, (4, 3, 2)) + >>> randn = lambda s : np.random.randn(np.prod(s)) + >>> np.random.seed(0) # reproducibility + >>> T = ttb.tensor.from_function(randn, (4, 3, 2)) >>> print(T) tensor of shape (4, 3, 2) with order F data[:, :, 0] = [[ 1.76405235 1.86755799 -0.10321885] - [ 0.40015721 -0.97727788 0.4105985 ] - [ 0.97873798 0.95008842 0.14404357] - [ 2.2408932 -0.15135721 1.45427351]] + [ 0.40015721 -0.97727788 0.4105985 ] + [ 0.97873798 0.95008842 0.14404357] + [ 2.2408932 -0.15135721 1.45427351]] data[:, :, 1] = [[ 0.76103773 1.49407907 -2.55298982] - [ 0.12167502 -0.20515826 0.6536186 ] - [ 0.44386323 0.3130677 0.8644362 ] - [ 0.33367433 -0.85409574 -0.74216502]] + [ 0.12167502 -0.20515826 0.6536186 ] + [ 0.44386323 0.3130677 0.8644362 ] + [ 0.33367433 -0.85409574 -0.74216502]] Create a :class:`pyttb.tensor` with all entries equal to 1 using :func:`numpy.ones`:: - >>> T = ttb.tensor.from_function(np.ones, (2, 3, 4)) + >>> T = ttb.tensor.from_function(lambda s: np.ones(s,order='F'), (2, 3, 4)) >>> print(T) tensor of shape (2, 3, 4) with order F data[:, :, 0] = [[1. 1. 1.] - [1. 1. 1.]] + [1. 1. 1.]] data[:, :, 1] = [[1. 1. 1.] - [1. 1. 1.]] + [1. 1. 1.]] data[:, :, 2] = [[1. 1. 1.] - [1. 1. 1.]] + [1. 1. 1.]] data[:, :, 3] = [[1. 1. 1.] - [1. 1. 1.]] + [1. 1. 1.]] """ # Check size shape = parse_shape(shape) # Generate data - totalsize = prod(shape) - data = function_handle(totalsize) + data = function_handle(shape) # Create the tensor return cls(data, shape, copy=False) From 023346f8c80b72d8abb3ee00ac7492e358b6cb25 Mon Sep 17 00:00:00 2001 From: Tammy Kolda Date: Mon, 30 Dec 2024 18:03:29 -0800 Subject: [PATCH 22/33] Being more consistent with periods and language. --- pyttb/tensor.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/pyttb/tensor.py b/pyttb/tensor.py index 61e82250..c3a502e9 100644 --- a/pyttb/tensor.py +++ b/pyttb/tensor.py @@ -71,16 +71,16 @@ def __init__( copy: bool = True, ): """ - Create a :class:`pyttb.tensor`. + Construct a :class:`pyttb.tensor`. Parameters ---------- data : optional - Source data as :class:`numpy.ndarray` (default: empty) + Source data as :class:`numpy.ndarray` (default: empty). shape : optional - Shape of the tensor as a :class:`tuple` (default: ``data.shape()``) + Shape of the tensor as a :class:`tuple` (default: ``data.shape()``). copy : optional bool - Whether to copy the data or reference it (default: True) + Whether to copy the data or reference it (default: True). Examples -------- From a5ca9be01e7971252b2c60a810284e66ea525ee0 Mon Sep 17 00:00:00 2001 From: Tammy Kolda Date: Mon, 30 Dec 2024 19:31:43 -0800 Subject: [PATCH 23/33] More documentation cleaning. --- pyttb/tensor.py | 110 +++++++++++++++++++++++++++++++++++------------- 1 file changed, 80 insertions(+), 30 deletions(-) diff --git a/pyttb/tensor.py b/pyttb/tensor.py index c3a502e9..b7159f40 100644 --- a/pyttb/tensor.py +++ b/pyttb/tensor.py @@ -224,7 +224,7 @@ def from_function( function_handle: A function that takes a tuple of integers and returns a :class:`numpy.ndarray`. The array should be in Fortran order to avoid - warnings of data being copied. The data will be reshaped to the shape, + data being copied. The data will be reshaped to the shape, so returning a vector of length equal to the product of the shape is fine. shape: Shape of the resulting tensor; e.g., a tuple of integers. @@ -236,7 +236,9 @@ def from_function( Examples -------- Create a :class:`pyttb.tensor` with entries drawn from a normal distribution - using :func:`numpy.random.randn`:: + using :func:`numpy.random.randn`. Observe that we actually generate a vector to + avoid having a C-ordered array (the default if we had provided the shape array) + be rearranged as a F-ordered array:: >>> randn = lambda s : np.random.randn(np.prod(s)) >>> np.random.seed(0) # reproducibility @@ -254,8 +256,8 @@ def from_function( [ 0.44386323 0.3130677 0.8644362 ] [ 0.33367433 -0.85409574 -0.74216502]] - Create a :class:`pyttb.tensor` with all entries equal to 1 - using :func:`numpy.ones`:: + Create a :class:`pyttb.tensor` with all entries equal to 1 using + :func:`numpy.ones`. Observe that we specifically specify Fortran order:: >>> T = ttb.tensor.from_function(lambda s: np.ones(s,order='F'), (2, 3, 4)) >>> print(T) @@ -273,6 +275,8 @@ def from_function( [[1. 1. 1.] [1. 1. 1.]] """ + #TODO Create documentation page for collapsing and scaling tensors + # Check size shape = parse_shape(shape) @@ -285,20 +289,28 @@ def from_function( def copy(self) -> tensor: """Make a deep copy of a :class:`pyttb.tensor`. + The standard copy of a tensor creates a shallow copy of the data. + Any changes to the old or new tensor will affect the other. + In contrast, the copy method creates a deep copy of the tensor which + is totally independent of what it was copied from. + Returns ------- - Copy of original tensor. + Deep copy of original tensor. Examples -------- - >>> T1 = ttb.tensor(np.ones((3, 2))) - >>> T2 = T1 - >>> T3 = T2.copy() - >>> T1[0, 0] = 3 - >>> T1[0, 0] == T2[0, 0] - True - >>> T1[0, 0] == T3[0, 0] - False + Observing the difference between a shallow copy and a deep copy. When the + original tensor changes, so does the shallow copy, but the deep copy does not:: + + >>> T = ttb.tensor(np.ones((3, 2))) + >>> T_shallow = T + >>> T_deep = T.copy() + >>> T[0, 0] = 3 + >>> T[0, 0] == T_shallow[0, 0] + True + >>> T[0, 0] == T_deep[0, 0] + False """ return ttb.tensor(self.data, self.shape, copy=True) @@ -312,32 +324,70 @@ def collapse( fun: Callable[[np.ndarray], Union[float, np.ndarray]] = np.sum, ) -> Union[float, np.ndarray, tensor]: """ - Collapse tensor along specified dimensions. + Collapse tensor along specified dimensions using a function. Parameters ---------- - dims: - Dimensions to collapse. - fun: - Method used to collapse dimensions. + dims: optional + Dimensions to collapse (default: all). + fun: optional + Method used to collapse dimensions (default: :meth:`numpy.sum`). Returns ------- - Collapsed value. + Scalar (if all dimensions collapsed) or tensor. Examples -------- - >>> T = ttb.tensor(np.ones((2, 2))) - >>> T.collapse() - 4.0 - >>> T.collapse(np.array([0])) - tensor of shape (2,) with order F - data[:] = - [2. 2.] - >>> T.collapse(np.arange(T.ndims), sum) - 4.0 - >>> T.collapse(np.arange(T.ndims), np.prod) - 1.0 + Sum all elements of tensor:: + + >>> T = ttb.tensor(np.ones((4,3,2),order='F')) + >>> T.collapse() + 24.0 + + Compute the sum for each mode-0 fiber (output is a tensor):: + + >>> T.collapse(0) + tensor of shape (3, 2) with order F + data[:, :] = + [[4. 4.] + [4. 4.] + [4. 4.]] + + Compute the sum of the entries in each mode-0 slice (output is a tensor):: + + >>> T.collapse([1, 2]) + tensor of shape (4,) with order F + data[:] = + [6. 6. 6. 6.] + + Compute the max entry in each mode-2 slice (output is a tensor):: + + >>> T.collapse([0 1], np.max) + tensor of shape (2,) with order F + data[:] = + [1. 1.] + + Find the maximum and minimum values in a tensor:: + + >>> randn = lambda s : np.random.randn(np.prod(s)) + >>> np.random.seed(0) # reproducibility + >>> T = ttb.tensor.from_function(randn, (2, 2, 2)) + >>> print(T) + >>> max_val = T.collapse(fun=np.max) + >>> min_val = T.collapse(fun=np.min) + >>> print(f"Max value: {max_val}") + >>> print(f"Min value: {min_val}") + tensor of shape (2, 2, 2) with order F + data[:, :, 0] = + [[1.76405235 0.97873798] + [0.40015721 2.2408932 ]] + data[:, :, 1] = + [[ 1.86755799 0.95008842] + [-0.97727788 -0.15135721]] + Max value: 2.240893199201458 + Min value: -0.977277879876411 + """ if self.data.size == 0: return np.array([], order=self.order) From 38655fc63f4cdd5bc11243c9b796af8f420fa983 Mon Sep 17 00:00:00 2001 From: Tammy Kolda Date: Tue, 31 Dec 2024 09:17:48 -0800 Subject: [PATCH 24/33] Aligning docs more with original docs. Adding stuff specific to Python. Still missing links to methods. Only partly done. --- docs/source/tutorial/class_tensor.ipynb | 347 +++++++++++++++++++----- 1 file changed, 285 insertions(+), 62 deletions(-) diff --git a/docs/source/tutorial/class_tensor.ipynb b/docs/source/tutorial/class_tensor.ipynb index 3a8b75ac..b8c338df 100644 --- a/docs/source/tutorial/class_tensor.ipynb +++ b/docs/source/tutorial/class_tensor.ipynb @@ -5,7 +5,7 @@ "metadata": {}, "source": [ "# Tensors\n", - "```\n", + "``` text\n", "Copyright 2022 National Technology & Engineering Solutions of Sandia,\n", "LLC (NTESS). Under the terms of Contract DE-NA0003525 with NTESS, the\n", "U.S. Government retains certain rights in this software.\n", @@ -19,9 +19,24 @@ "Tensors are extensions of multidimensial arrays with additional operations defined on them. Here we explain the basics for creating and working with tensors." ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For more details, see the {class}`pyttb.tensor` class documentation." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```{eval-rst}\n", + "For more details, see the :class:`pyttb.tensor` class documentation." + ] + }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": {}, "outputs": [], "source": [ @@ -34,17 +49,45 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Creating a `tensor` from an array" + "## Creating a tensor from an array" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The {class}`pyttb.tensor` command creates a deep copy of a (multidimensional) array as a tensor object. It also reorders that copy to be F-ordered if it isn't already." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 15, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (4, 3, 2) with order F\n", + "data[:, :, 0] =\n", + "[[1. 1. 1.]\n", + " [1. 1. 1.]\n", + " [1. 1. 1.]\n", + " [1. 1. 1.]]\n", + "data[:, :, 1] =\n", + "[[1. 1. 1.]\n", + " [1. 1. 1.]\n", + " [1. 1. 1.]\n", + " [1. 1. 1.]]" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "M = np.ones((2, 4, 3)) # A 2x4x3 array.\n", - "X = ttb.tensor(M) # Convert to a tensor object\n", + "M = np.ones((4,3,2)) # A 4 x 3 x 2 array.\n", + "X = ttb.tensor(M) # Convert to a tensor object.\n", "X" ] }, @@ -52,16 +95,32 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Optionally, you can specify a different shape for the `tensor`, so long as the input array has the right number of elements. " + "Optionally, you can specify a different shape for the tensor, so long as the input array has the right number of elements. " ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 16, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (4, 6) with order F\n", + "data[:, :] =\n", + "[[1. 1. 1. 1. 1. 1.]\n", + " [1. 1. 1. 1. 1. 1.]\n", + " [1. 1. 1. 1. 1. 1.]\n", + " [1. 1. 1. 1. 1. 1.]]" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "X = X.reshape((4, 2, 3))\n", + "X = ttb.tensor(M, (4,6))\n", "X" ] }, @@ -69,18 +128,32 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Creating a one-dimensional `tensor`\n", - "`np.random.rand(m,n)` creates a two-dimensional tensor with `m` rows and `n` columns." + "There is an option to only do a shallow copy the input data, but it must be F-ordered. This can be useful for larger data. (A vector is both C- and F-ordered, which is useful for functions that don't support alternative orderings.)" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 24, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "tensor of shape (2, 3, 2) with order F\n", + "data[:, :, 0] =\n", + "[[0.5488135 0.60276338 0.4236548 ]\n", + " [0.71518937 0.54488318 0.64589411]]\n", + "data[:, :, 1] =\n", + "[[0.43758721 0.96366276 0.79172504]\n", + " [0.891773 0.38344152 0.52889492]]\n" + ] + } + ], "source": [ "np.random.seed(0)\n", - "X = ttb.tensor(np.random.rand(5, 1)) # Creates a 2-way tensor.\n", + "M = np.random.rand(12)\n", + "X = ttb.tensor(M,(2,3,2),copy=False)\n", "X" ] }, @@ -88,14 +161,34 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "To specify a 1-way `tensor`, use `(m,)` syntax, signifying a vector with `m` elements." + "## Creating a one-dimensional tensor\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To specify a 1-way tensor, the shape should be of the form `(m,)`." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 25, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (5,) with order F\n", + "data[:] =\n", + "[0.5488135 0.71518937 0.60276338 0.54488318 0.4236548 ]" + ] + }, + "execution_count": 25, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "np.random.seed(0)\n", "X = ttb.tensor(np.random.rand(5), shape=(5,)) # Creates a 1-way tensor.\n", @@ -106,15 +199,31 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Specifying trailing singleton dimensions in a `tensor`\n", + "## Specifying trailing singleton dimensions in a tensor\n", "Likewise, trailing singleton dimensions must be explicitly specified." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 30, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (4, 3) with order F\n", + "data[:, :] =\n", + "[[0.5488135 0.71518937 0.60276338]\n", + " [0.54488318 0.4236548 0.64589411]\n", + " [0.43758721 0.891773 0.96366276]\n", + " [0.38344152 0.79172504 0.52889492]]" + ] + }, + "execution_count": 30, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "np.random.seed(0)\n", "Y = ttb.tensor(np.random.rand(4, 3)) # Creates a 2-way tensor.\n", @@ -123,9 +232,24 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 31, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (3, 4, 1) with order F\n", + "data[:, :, 0] =\n", + "[[0.5488135 0.71518937 0.60276338 0.54488318]\n", + " [0.4236548 0.64589411 0.43758721 0.891773 ]\n", + " [0.96366276 0.38344152 0.79172504 0.52889492]]" + ] + }, + "execution_count": 31, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "np.random.seed(0)\n", "Y = ttb.tensor(np.random.rand(3, 4, 1), (3, 4, 1)) # Creates a 3-way tensor.\n", @@ -136,14 +260,33 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## The constituent parts of a `tensor`" + "## The constituent parts of a tensor" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 32, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "array([[[0.5488135 , 0.71518937, 0.60276338],\n", + " [0.54488318, 0.4236548 , 0.64589411],\n", + " [0.43758721, 0.891773 , 0.96366276],\n", + " [0.38344152, 0.79172504, 0.52889492]],\n", + "\n", + " [[0.56804456, 0.92559664, 0.07103606],\n", + " [0.0871293 , 0.0202184 , 0.83261985],\n", + " [0.77815675, 0.87001215, 0.97861834],\n", + " [0.79915856, 0.46147936, 0.78052918]]])" + ] + }, + "execution_count": 32, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "np.random.seed(0)\n", "X = ttb.tenrand((2, 4, 3)) # Create data.\n", @@ -152,9 +295,20 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 33, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "(2, 4, 3)" + ] + }, + "execution_count": 33, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "X.shape # The shape." ] @@ -163,18 +317,38 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Creating a `tensor` from its constituent parts" + "## Creating a tensor from its constituent parts" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 35, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 4, 3) with order F\n", + "data[:, :, 0] =\n", + "[[0.5488135 0.54488318 0.43758721 0.38344152]\n", + " [0.56804456 0.0871293 0.77815675 0.79915856]]\n", + "data[:, :, 1] =\n", + "[[0.71518937 0.4236548 0.891773 0.79172504]\n", + " [0.92559664 0.0202184 0.87001215 0.46147936]]\n", + "data[:, :, 2] =\n", + "[[0.60276338 0.64589411 0.96366276 0.52889492]\n", + " [0.07103606 0.83261985 0.97861834 0.78052918]]" + ] + }, + "execution_count": 35, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "np.random.seed(0)\n", "X = ttb.tenrand((2, 4, 3)) # Create data.\n", - "Y = X.copy() # Copies X.\n", + "Y = ttb.tensor(X.data,X.shape) # Creates a (deep) copy of X from its parts.\n", "Y" ] }, @@ -182,15 +356,27 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Creating an empty `tensor`\n", + "## Creating an empty tensor\n", "An empty constructor exists." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 36, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "empty tensor of shape ()\n", + "data = []" + ] + }, + "execution_count": 36, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "X = ttb.tensor() # Creates an empty tensor\n", "X" @@ -200,16 +386,35 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Use `tenones` to create a `tensor` of all ones" + "## Use {func}`pyttb.tenones` to create a tensor of all ones.\n" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 37, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (3, 4, 2) with order F\n", + "data[:, :, 0] =\n", + "[[1. 1. 1. 1.]\n", + " [1. 1. 1. 1.]\n", + " [1. 1. 1. 1.]]\n", + "data[:, :, 1] =\n", + "[[1. 1. 1. 1.]\n", + " [1. 1. 1. 1.]\n", + " [1. 1. 1. 1.]]" + ] + }, + "execution_count": 37, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "X = ttb.tenones((2, 3, 4)) # Creates a 2x3x4 tensor of ones.\n", + "X = ttb.tenones((3,4,2)) # Creates a 3 x 4 x 2 tensor of ones.\n", "X" ] }, @@ -217,7 +422,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Use `tenzeros` to create a `tensor` of all zeros" + "## Use `tenzeros` to create a tensor of all zeros" ] }, { @@ -234,7 +439,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Use `tenrand` to create a random `tensor`" + "## Use `tenrand` to create a random tensor" ] }, { @@ -252,7 +457,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Use `squeeze` to remove singleton dimensions from a `tensor`" + "## Use `squeeze` to remove singleton dimensions from a tensor" ] }, { @@ -274,7 +479,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Use `double` to convert a `tensor` to a (multidimensional) array" + "## Use `double` to convert a tensor to a (multidimensional) array" ] }, { @@ -301,7 +506,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Use `ndims` and `shape` to get the shape of a `tensor`" + "## Use `ndims` and `shape` to get the shape of a tensor" ] }, { @@ -335,7 +540,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Subscripted reference for a `tensor`" + "## Subscripted reference for a tensor" ] }, { @@ -440,8 +645,8 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Subscripted assignment for a `tensor\n", - "We can assign a single element, an entire subtensor, or a list of values for a `tensor`.`" + "## Subscripted assignment for a tensor\n", + "We can assign a single element, an entire subtensor, or a list of values for a tensor.`" ] }, { @@ -489,7 +694,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "It is possible to **grow** the `tensor` automatically by assigning elements outside the original range of the `tensor`." + "It is possible to **grow** the tensor automatically by assigning elements outside the original range of the tensor." ] }, { @@ -543,7 +748,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Use `find` for subscripts of nonzero elements of a `tensor`" + "## Use `find` for subscripts of nonzero elements of a tensor" ] }, { @@ -609,7 +814,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Computing the Frobenius norm of a `tensor`\n", + "## Computing the Frobenius norm of a tensor\n", "`norm` computes the Frobenius norm of a tensor. This corresponds to the Euclidean norm of the vectorized tensor." ] }, @@ -628,7 +833,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Using `reshape` to rearrange elements in a `tensor`\n", + "## Using `reshape` to rearrange elements in a tensor\n", "`reshape` reshapes a tensor into a given shape array. The total number of elements in the tensor cannot change." ] }, @@ -647,8 +852,8 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Basic operations (plus, minus, and, or, etc.) on a `tensor`\n", - "`tensor`s support plus, minus, times, divide, power, equals, and not-equals operators. `tensor`s can use their operators with another `tensor` or a scalar (with the exception of equalities which only takes `tensor`s). All mathematical operators are elementwise operations." + "## Basic operations (plus, minus, and, or, etc.) on a tensor\n", + "tensors support plus, minus, times, divide, power, equals, and not-equals operators. tensors can use their operators with another tensor or a scalar (with the exception of equalities which only takes tensors). All mathematical operators are elementwise operations." ] }, { @@ -846,8 +1051,8 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Using `tenfun` for elementwise operations on one or more `tensor`s\n", - "The method `tenfun` applies a specified function to a number of `tensor`s. This can be used for any function that is not predefined for `tensor`s." + "## Using `tenfun` for elementwise operations on one or more tensors\n", + "The method `tenfun` applies a specified function to a number of tensors. This can be used for any function that is not predefined for tensors." ] }, { @@ -899,7 +1104,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Use `permute` to reorder the modes of a `tensor`" + "## Use `permute` to reorder the modes of a tensor" ] }, { @@ -946,8 +1151,8 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Symmetrizing and checking for symmetry in a `tensor`\n", - "A `tensor` can be symmetrized in a collection of modes with the command `symmetrize`. The new, symmetric `tensor` is formed by averaging over all elements in the `tensor` which are required to be equal." + "## Symmetrizing and checking for symmetry in a tensor\n", + "A tensor can be symmetrized in a collection of modes with the command `symmetrize`. The new, symmetric tensor is formed by averaging over all elements in the tensor which are required to be equal." ] }, { @@ -966,7 +1171,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "An optional argument `grps` can also be passed to `symmetrize` which specifies an array of modes with respect to which the `tensor` should be symmetrized." + "An optional argument `grps` can also be passed to `symmetrize` which specifies an array of modes with respect to which the tensor should be symmetrized." ] }, { @@ -1009,7 +1214,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Displaying a `tensor`" + "## Displaying a tensor" ] }, { @@ -1031,7 +1236,25 @@ ] } ], - "metadata": {}, + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.11" + } + }, "nbformat": 4, "nbformat_minor": 1 } From 54e3eb4b0c09da6502abc78efd3b1f581521c6f4 Mon Sep 17 00:00:00 2001 From: Tammy Kolda Date: Tue, 31 Dec 2024 09:18:03 -0800 Subject: [PATCH 25/33] Linking to tutorial. --- pyttb/tensor.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/pyttb/tensor.py b/pyttb/tensor.py index b7159f40..7013209f 100644 --- a/pyttb/tensor.py +++ b/pyttb/tensor.py @@ -137,6 +137,7 @@ def __init__( See Also -------- + * :doc:`/tutorial/class_tensor` - Getting started with the tensor class * :meth:`pyttb.tensor.from_function` - Create a tensor from a function such as :meth:`numpy.ones` * :meth:`pyttb.tensor.copy` - Make a deep copy of a tensor @@ -180,6 +181,7 @@ def __init__( # Make sure the data is indeed the right shape if data.size > 0 and len(shape) > 0: # reshaping using Fortran ordering to match Matlab conventions + #TODO: Check if there is a reordering of the data that is expense. data = np.reshape(data, np.array(shape), order=self.order) # Create the tensor From 327acaf88a24a0c6fb9aa6678e9986c3d6107efb Mon Sep 17 00:00:00 2001 From: Tammy Kolda Date: Tue, 31 Dec 2024 09:28:21 -0800 Subject: [PATCH 26/33] Fixing spacing to satisfy tests. Fixing outputs to satisfy tests. --- docs/source/tutorial/class_tensor.ipynb | 10 +++++----- pyttb/tensor.py | 15 +++++++-------- 2 files changed, 12 insertions(+), 13 deletions(-) diff --git a/docs/source/tutorial/class_tensor.ipynb b/docs/source/tutorial/class_tensor.ipynb index b8c338df..5aaada67 100644 --- a/docs/source/tutorial/class_tensor.ipynb +++ b/docs/source/tutorial/class_tensor.ipynb @@ -86,7 +86,7 @@ } ], "source": [ - "M = np.ones((4,3,2)) # A 4 x 3 x 2 array.\n", + "M = np.ones((4, 3, 2)) # A 4 x 3 x 2 array.\n", "X = ttb.tensor(M) # Convert to a tensor object.\n", "X" ] @@ -120,7 +120,7 @@ } ], "source": [ - "X = ttb.tensor(M, (4,6))\n", + "X = ttb.tensor(M, (4, 6))\n", "X" ] }, @@ -153,7 +153,7 @@ "source": [ "np.random.seed(0)\n", "M = np.random.rand(12)\n", - "X = ttb.tensor(M,(2,3,2),copy=False)\n", + "X = ttb.tensor(M, (2, 3, 2), copy=False)\n", "X" ] }, @@ -348,7 +348,7 @@ "source": [ "np.random.seed(0)\n", "X = ttb.tenrand((2, 4, 3)) # Create data.\n", - "Y = ttb.tensor(X.data,X.shape) # Creates a (deep) copy of X from its parts.\n", + "Y = ttb.tensor(X.data, X.shape) # Creates a (deep) copy of X from its parts.\n", "Y" ] }, @@ -414,7 +414,7 @@ } ], "source": [ - "X = ttb.tenones((3,4,2)) # Creates a 3 x 4 x 2 tensor of ones.\n", + "X = ttb.tenones((3, 4, 2)) # Creates a 3 x 4 x 2 tensor of ones.\n", "X" ] }, diff --git a/pyttb/tensor.py b/pyttb/tensor.py index 7013209f..e4a43f70 100644 --- a/pyttb/tensor.py +++ b/pyttb/tensor.py @@ -181,7 +181,7 @@ def __init__( # Make sure the data is indeed the right shape if data.size > 0 and len(shape) > 0: # reshaping using Fortran ordering to match Matlab conventions - #TODO: Check if there is a reordering of the data that is expense. + # TODO: Check if there is a reordering of the data that is expense. data = np.reshape(data, np.array(shape), order=self.order) # Create the tensor @@ -277,7 +277,7 @@ def from_function( [[1. 1. 1.] [1. 1. 1.]] """ - #TODO Create documentation page for collapsing and scaling tensors + # TODO Create documentation page for collapsing and scaling tensors # Check size shape = parse_shape(shape) @@ -365,7 +365,7 @@ def collapse( Compute the max entry in each mode-2 slice (output is a tensor):: - >>> T.collapse([0 1], np.max) + >>> T.collapse([0, 1], np.max) tensor of shape (2,) with order F data[:] = [1. 1.] @@ -376,10 +376,6 @@ def collapse( >>> np.random.seed(0) # reproducibility >>> T = ttb.tensor.from_function(randn, (2, 2, 2)) >>> print(T) - >>> max_val = T.collapse(fun=np.max) - >>> min_val = T.collapse(fun=np.min) - >>> print(f"Max value: {max_val}") - >>> print(f"Min value: {min_val}") tensor of shape (2, 2, 2) with order F data[:, :, 0] = [[1.76405235 0.97873798] @@ -387,9 +383,12 @@ def collapse( data[:, :, 1] = [[ 1.86755799 0.95008842] [-0.97727788 -0.15135721]] + >>> max_val = T.collapse(fun=np.max) + >>> min_val = T.collapse(fun=np.min) + >>> print(f"Max value: {max_val}") Max value: 2.240893199201458 + >>> print(f"Min value: {min_val}") Min value: -0.977277879876411 - """ if self.data.size == 0: return np.array([], order=self.order) From cc899bf5934d504cc7b3ca91bd5c4b19c32ea62c Mon Sep 17 00:00:00 2001 From: Tammy Kolda Date: Tue, 31 Dec 2024 11:40:30 -0800 Subject: [PATCH 27/33] Rearrange order of tensor classes --- docs/source/tensor_classes.rst | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/docs/source/tensor_classes.rst b/docs/source/tensor_classes.rst index 240627c8..a24ffce2 100644 --- a/docs/source/tensor_classes.rst +++ b/docs/source/tensor_classes.rst @@ -2,14 +2,14 @@ Tensor Classes ============== .. toctree:: - :maxdepth: 2 + :maxdepth: 3 - ktensor.rst - sptenmat.rst - sptensor.rst - sumtensor.rst - tensor.rst - ttensor.rst - tenmat.rst - pyttb_utils.rst + tensor + sptensor + ktensor + ttensor + sumtensor + tenmat + sptenmat + pyttb_utils From deca97d1735b951729b615f68e345bb37728c73b Mon Sep 17 00:00:00 2001 From: Tammy Kolda Date: Tue, 31 Dec 2024 11:42:38 -0800 Subject: [PATCH 28/33] Making sure methods are also included in documentation. --- docs/source/tensor.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/tensor.rst b/docs/source/tensor.rst index 8edf6b3e..c02a2232 100644 --- a/docs/source/tensor.rst +++ b/docs/source/tensor.rst @@ -1,8 +1,8 @@ pyttb.tensor -------------------- -.. autoclass:: pyttb.tensor +.. automodule:: pyttb.tensor :members: :special-members: - :exclude-members: __dict__, __weakref__, __slots__, __init__ + :exclude-members: __dict__, __weakref__, __slots__, __init__, mttv_left, mttv_mid, mttv_right, min_split :show-inheritance: \ No newline at end of file From b2b15a59a99066cb1b1d41978040b598f5251482 Mon Sep 17 00:00:00 2001 From: Tammy Kolda Date: Tue, 31 Dec 2024 13:23:13 -0800 Subject: [PATCH 29/33] Updating comments and avoiding asfortranarray call for tenrand. Partially addresses #368. --- pyttb/tensor.py | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/pyttb/tensor.py b/pyttb/tensor.py index e4a43f70..8433e004 100644 --- a/pyttb/tensor.py +++ b/pyttb/tensor.py @@ -138,9 +138,9 @@ def __init__( See Also -------- * :doc:`/tutorial/class_tensor` - Getting started with the tensor class - * :meth:`pyttb.tensor.from_function` - Create a tensor from a function + * :meth:`from_function` - Create a tensor from a function such as :meth:`numpy.ones` - * :meth:`pyttb.tensor.copy` - Make a deep copy of a tensor + * :meth:`copy` - Make a deep copy of a tensor * :meth:`pyttb.sptensor.to_tensor` - Convert a sparse tensor to a dense tensor * :meth:`pyttb.ktensor.to_tensor` - Convert a Kruskal tensor to a dense tensor * :meth:`pyttb.ttensor.to_tensor` - Convert a Tucker tensor to a dense tensor @@ -2876,8 +2876,9 @@ def tenones(shape: Shape, order: Union[Literal["F"], Literal["C"]] = "F") -> ten ---------- shape: Shape of resulting tensor. - order: - Memory layout for resulting tensor. + order: optional + Memory layout for resulting tensor (default: F). + *Note: C order is not recommended.* Returns ------- @@ -2897,6 +2898,10 @@ def tenones(shape: Shape, order: Union[Literal["F"], Literal["C"]] = "F") -> ten [[1. 1. 1.] [1. 1. 1.] [1. 1. 1.]] + + See Also + -------- + * :meth:`pyttb.tensor.from_function` - Create a tensor from a function. """ def ones(shape: Tuple[int, ...]) -> np.ndarray: @@ -2912,8 +2917,9 @@ def tenzeros(shape: Shape, order: Union[Literal["F"], Literal["C"]] = "F") -> te ---------- shape: Shape of resulting tensor. - order: - Memory layout for resulting tensor. + order: optional + Memory layout for resulting tensor (default: F). + *Note: C order is not recommended.* Returns ------- @@ -2948,8 +2954,9 @@ def tenrand(shape: Shape, order: Union[Literal["F"], Literal["C"]] = "F") -> ten ---------- shape: Shape of resulting tensor. - order: - Memory layout for resulting tensor. + order: optional + Memory layout for resulting tensor (default: F). + *Note: C order is not recommended.* Returns ------- @@ -2968,9 +2975,7 @@ def tenrand(shape: Shape, order: Union[Literal["F"], Literal["C"]] = "F") -> ten # Typing doesn't play nice with partial # mypy issue: 1484 def unit_uniform(pass_through_shape: Tuple[int, ...]) -> np.ndarray: - data = np.random.uniform(low=0, high=1, size=pass_through_shape) - if order == "F": - return np.asfortranarray(data) + data = np.random.uniform(low=0, high=1, size=np.prod(pass_through_shape)) return data return tensor.from_function(unit_uniform, shape) From 5f9c0f7b5a3e0039e2592fe8d042ab8b8c641bf2 Mon Sep 17 00:00:00 2001 From: Tammy Kolda Date: Tue, 31 Dec 2024 13:48:32 -0800 Subject: [PATCH 30/33] Working more on the documentation. --- docs/source/tutorial/class_tensor.ipynb | 358 +++++++++++++++++------- 1 file changed, 259 insertions(+), 99 deletions(-) diff --git a/docs/source/tutorial/class_tensor.ipynb b/docs/source/tutorial/class_tensor.ipynb index 5aaada67..1d7ef334 100644 --- a/docs/source/tutorial/class_tensor.ipynb +++ b/docs/source/tutorial/class_tensor.ipynb @@ -26,14 +26,6 @@ "For more details, see the {class}`pyttb.tensor` class documentation." ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "```{eval-rst}\n", - "For more details, see the :class:`pyttb.tensor` class documentation." - ] - }, { "cell_type": "code", "execution_count": 1, @@ -56,7 +48,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The {class}`pyttb.tensor` command creates a deep copy of a (multidimensional) array as a tensor object. It also reorders that copy to be F-ordered if it isn't already." + "The {class}`pyttb.tensor` command creates a (multidimensional) array as a tensor object. By default, it creates a deep copy of the input object. It also reorders that copy to be F-ordered if it isn't already. For a tensor of size $m \\times n \\times p$, the shape is `(m,n,p)`." ] }, { @@ -86,8 +78,8 @@ } ], "source": [ - "M = np.ones((4, 3, 2)) # A 4 x 3 x 2 array.\n", - "X = ttb.tensor(M) # Convert to a tensor object.\n", + "M = np.ones((4, 3, 2)) # <-- 4 x 3 x 2 mutlidimensional array of ones.\n", + "X = ttb.tensor(M) # <-- Convert to 4 x 3 x 2 tensor object.\n", "X" ] }, @@ -120,7 +112,7 @@ } ], "source": [ - "X = ttb.tensor(M, (4, 6))\n", + "X = ttb.tensor(M, (4, 6)) # <-- Reshape to 4 x 6 tensor.\n", "X" ] }, @@ -152,8 +144,154 @@ ], "source": [ "np.random.seed(0)\n", - "M = np.random.rand(12)\n", - "X = ttb.tensor(M, (2, 3, 2), copy=False)\n", + "v = np.random.rand(12) # <-- length-12 vector of uniform random numbers.\n", + "X = ttb.tensor(v, (2, 3, 2), copy=False) # <-- Converted to 2 x 3 x 2 tensor.\n", + "X" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using {meth}`pyttb.tensors.from_function` to create a tensor with elements generated by a function\n", + "\n", + "This function takes another function that is used to generate entries of the tensor. The returned array should be in Fortran order to avoid unneccesary copies and rearrangment. Since the data will be reshape in any case, returning an array is fine. Alternatively, ensure the function returns in F-order for those methods that support it." + ] + }, + { + "cell_type": "code", + "execution_count": 44, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 3, 2) with order F\n", + "data[:, :, 0] =\n", + "[[ 1.76405235 0.97873798 1.86755799]\n", + " [ 0.40015721 2.2408932 -0.97727788]]\n", + "data[:, :, 1] =\n", + "[[ 0.95008842 -0.10321885 0.14404357]\n", + " [-0.15135721 0.4105985 1.45427351]]" + ] + }, + "execution_count": 44, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "np.random.seed(1)\n", + "randn = lambda s : np.random.randn(np.prod(s))\n", + "X = ttb.tensor.from_function(randn, (2, 3, 2)) # <-- 2 x 3 x 2 tensor of normally distributed random numbers.\n", + "X" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Use {func}`pyttb.tenones` to create a tensor of all ones.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (3, 4, 2) with order F\n", + "data[:, :, 0] =\n", + "[[1. 1. 1. 1.]\n", + " [1. 1. 1. 1.]\n", + " [1. 1. 1. 1.]]\n", + "data[:, :, 1] =\n", + "[[1. 1. 1. 1.]\n", + " [1. 1. 1. 1.]\n", + " [1. 1. 1. 1.]]" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "X = ttb.tenones((3, 4, 2)) # <-- Creates a 3 x 4 x 2 tensor of ones.\n", + "X" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Use {func}`pyttb.tenzeros` to create a tensor of all zeros" + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (1, 4, 2) with order F\n", + "data[:, :, 0] =\n", + "[[0. 0. 0. 0.]]\n", + "data[:, :, 1] =\n", + "[[0. 0. 0. 0.]]" + ] + }, + "execution_count": 38, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "X = ttb.tenzeros((1,4,2)) # <-- Creates a 1 x 4 x 2 tensor of zeroes.\n", + "X" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Use {func}`pyttb.tenrand` to create a random tensor" + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (5, 4, 2) with order F\n", + "data[:, :, 0] =\n", + "[[0.5488135 0.60276338 0.4236548 0.43758721]\n", + " [0.96366276 0.79172504 0.56804456 0.07103606]\n", + " [0.0202184 0.77815675 0.97861834 0.46147936]\n", + " [0.11827443 0.14335329 0.52184832 0.26455561]\n", + " [0.45615033 0.0187898 0.61209572 0.94374808]]\n", + "data[:, :, 1] =\n", + "[[0.71518937 0.54488318 0.64589411 0.891773 ]\n", + " [0.38344152 0.52889492 0.92559664 0.0871293 ]\n", + " [0.83261985 0.87001215 0.79915856 0.78052918]\n", + " [0.63992102 0.94466892 0.41466194 0.77423369]\n", + " [0.56843395 0.6176355 0.616934 0.6818203 ]]" + ] + }, + "execution_count": 39, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "np.random.seed(2)\n", + "X = ttb.tenrand((5,4,2)) # <-- Creates a 5 x 4 x 2 tensor of uniform [0,1] random numbers.\n", "X" ] }, @@ -168,7 +306,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "To specify a 1-way tensor, the shape should be of the form `(m,)`." + "To specify a 1-way tensor of size $m$, the shape should be of the form `(m,)`." ] }, { @@ -190,8 +328,8 @@ } ], "source": [ - "np.random.seed(0)\n", - "X = ttb.tensor(np.random.rand(5), shape=(5,)) # Creates a 1-way tensor.\n", + "np.random.seed(3)\n", + "X = ttb.tensor(np.random.rand(5), (5,)) # Creates a 1-way tensor.\n", "X" ] }, @@ -205,28 +343,27 @@ }, { "cell_type": "code", - "execution_count": 30, + "execution_count": 46, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "tensor of shape (4, 3) with order F\n", + "tensor of shape (3, 4) with order F\n", "data[:, :] =\n", - "[[0.5488135 0.71518937 0.60276338]\n", - " [0.54488318 0.4236548 0.64589411]\n", - " [0.43758721 0.891773 0.96366276]\n", - " [0.38344152 0.79172504 0.52889492]]" + "[[0.96702984 0.71481599 0.97627445 0.43479153]\n", + " [0.54723225 0.69772882 0.00623026 0.77938292]\n", + " [0.97268436 0.2160895 0.25298236 0.19768507]]" ] }, - "execution_count": 30, + "execution_count": 46, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "np.random.seed(0)\n", - "Y = ttb.tensor(np.random.rand(4, 3)) # Creates a 2-way tensor.\n", + "np.random.seed(4)\n", + "Y = ttb.tensor(np.random.rand(12),(3, 4)) # <-- Creates a 2-way tensor of size 4 x 3.\n", "Y" ] }, @@ -251,8 +388,8 @@ } ], "source": [ - "np.random.seed(0)\n", - "Y = ttb.tensor(np.random.rand(3, 4, 1), (3, 4, 1)) # Creates a 3-way tensor.\n", + "np.random.seed(4)\n", + "Y = ttb.tensor(np.random.rand(12), (3, 4, 1)) # <-- Creates a 3-way tensor of size 3 x 4 x 1.\n", "Y" ] }, @@ -260,7 +397,8 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## The constituent parts of a tensor" + "## The constituent parts of a tensor\n", + "A tensor has two parts: `data` (a multidimensional array) and `shape` (a tuple of integers)." ] }, { @@ -288,36 +426,91 @@ } ], "source": [ - "np.random.seed(0)\n", - "X = ttb.tenrand((2, 4, 3)) # Create data.\n", + "np.random.seed(5)\n", + "X = ttb.tenrand((2, 4, 3)) # <-- Create tensor of size 2 x 4 x 3 with random numbers.\n", + "X" + ] + }, + { + "cell_type": "code", + "execution_count": 47, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[[ 1.76405235, 0.95008842],\n", + " [ 0.97873798, -0.10321885],\n", + " [ 1.86755799, 0.14404357]],\n", + "\n", + " [[ 0.40015721, -0.15135721],\n", + " [ 2.2408932 , 0.4105985 ],\n", + " [-0.97727788, 1.45427351]]])" + ] + }, + "execution_count": 47, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# The array\n", "X.data # The array." ] }, { "cell_type": "code", - "execution_count": 33, + "execution_count": 48, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + " C_CONTIGUOUS : False\n", + " F_CONTIGUOUS : True\n", + " OWNDATA : False\n", + " WRITEABLE : True\n", + " ALIGNED : True\n", + " WRITEBACKIFCOPY : False" + ] + }, + "execution_count": 48, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Note that it's is stored in Fortran format\n", + "X.data.flags" + ] + }, + { + "cell_type": "code", + "execution_count": 49, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "(2, 4, 3)" + "(2, 3, 2)" ] }, - "execution_count": 33, + "execution_count": 49, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "X.shape # The shape." + "# The shape\n", + "X.shape " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Creating a tensor from its constituent parts" + "## Creating a tensor from its constituent parts\n", + "*This is an efficient way to create a tensor copy, but it illustrates the role of the parts. A more efficient way is to use `Y = X` (shallow copy) or `Y = X.copy()` (deep copy). *" ] }, { @@ -386,93 +579,60 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Use {func}`pyttb.tenones` to create a tensor of all ones.\n" + "## Use `squeeze` to remove singleton dimensions from a tensor" ] }, { "cell_type": "code", - "execution_count": 37, + "execution_count": 51, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "tensor of shape (3, 4, 2) with order F\n", + "tensor of shape (4, 3, 1) with order F\n", "data[:, :, 0] =\n", - "[[1. 1. 1. 1.]\n", - " [1. 1. 1. 1.]\n", - " [1. 1. 1. 1.]]\n", - "data[:, :, 1] =\n", - "[[1. 1. 1. 1.]\n", - " [1. 1. 1. 1.]\n", - " [1. 1. 1. 1.]]" + "[[0.5488135 0.71518937 0.60276338]\n", + " [0.54488318 0.4236548 0.64589411]\n", + " [0.43758721 0.891773 0.96366276]\n", + " [0.38344152 0.79172504 0.52889492]]" ] }, - "execution_count": 37, + "execution_count": 51, "metadata": {}, "output_type": "execute_result" } ], - "source": [ - "X = ttb.tenones((3, 4, 2)) # Creates a 3 x 4 x 2 tensor of ones.\n", - "X" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Use `tenzeros` to create a tensor of all zeros" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "X = ttb.tenzeros((2, 1, 4)) # Creates a 2x1x4 tensor of zeroes.\n", - "X" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Use `tenrand` to create a random tensor" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], "source": [ "np.random.seed(0)\n", - "X = ttb.tenrand((2, 5, 4))\n", - "X" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Use `squeeze` to remove singleton dimensions from a tensor" + "Y = ttb.tenrand((4,3,1)) # Create the data.\n", + "Y" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 52, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (4, 3) with order F\n", + "data[:, :] =\n", + "[[0.5488135 0.71518937 0.60276338]\n", + " [0.54488318 0.4236548 0.64589411]\n", + " [0.43758721 0.891773 0.96366276]\n", + " [0.38344152 0.79172504 0.52889492]]" + ] + }, + "execution_count": 52, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "np.random.seed(0)\n", - "X = ttb.tenrand((2, 5, 4)) # Create the data.\n", - "Y = X.copy()\n", - "# Add singleton dimension.\n", - "Y[0, 0, 0, 0] = Y[0, 0, 0]\n", - "# Remove singleton dimension.\n", - "Y.squeeze().isequal(X)" + "Z = Y.squeeze() # Squeeze out the singleton dimension.\n", + "Z" ] }, { From a0d08d78228394013e47f0c45cdef8d3313726e7 Mon Sep 17 00:00:00 2001 From: Tammy Kolda Date: Thu, 2 Jan 2025 11:39:01 -0800 Subject: [PATCH 31/33] Adding more explanation about __deepcopy__ per #381. --- pyttb/tensor.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/pyttb/tensor.py b/pyttb/tensor.py index 8433e004..fe22de6b 100644 --- a/pyttb/tensor.py +++ b/pyttb/tensor.py @@ -317,7 +317,11 @@ def copy(self) -> tensor: return ttb.tensor(self.data, self.shape, copy=True) def __deepcopy__(self, memo): - """Return deep copy of this tensor.""" + """Return deep copy of this tensor. + + This a python construct to support copy operations; + see https://docs.python.org/3/library/copy.html for details. + """ return self.copy() def collapse( @@ -1301,7 +1305,7 @@ def reshape(self, shape: Shape) -> tensor: shape = parse_shape(shape) if prod(self.shape) != prod(shape): assert False, "Reshaping a tensor cannot change number of elements" - + # TODO: This is a copy, but it should be a view return ttb.tensor(np.reshape(self.data, shape, order=self.order), shape) def scale( From aa3bd96373dcac0251a2bceffed458ff5baf1d27 Mon Sep 17 00:00:00 2001 From: Tammy Kolda Date: Thu, 2 Jan 2025 13:27:11 -0800 Subject: [PATCH 32/33] Removing type and minor rewording so that it's clear what True/False actually means. --- pyttb/tensor.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyttb/tensor.py b/pyttb/tensor.py index fe22de6b..454f1d33 100644 --- a/pyttb/tensor.py +++ b/pyttb/tensor.py @@ -79,8 +79,8 @@ def __init__( Source data as :class:`numpy.ndarray` (default: empty). shape : optional Shape of the tensor as a :class:`tuple` (default: ``data.shape()``). - copy : optional bool - Whether to copy the data or reference it (default: True). + copy : optional + Whether to copy (versus reference) the data (default: True). Examples -------- From 130d95a5f8f2f4519aa0ad3e310223642fd6195f Mon Sep 17 00:00:00 2001 From: Tammy Kolda Date: Fri, 31 Jan 2025 11:45:07 -0800 Subject: [PATCH 33/33] Updates to tensor tutorial. --- docs/source/tutorial/class_tensor.ipynb | 1936 ++++++++++++++++++----- 1 file changed, 1531 insertions(+), 405 deletions(-) diff --git a/docs/source/tutorial/class_tensor.ipynb b/docs/source/tutorial/class_tensor.ipynb index 1d7ef334..9f944c6c 100644 --- a/docs/source/tutorial/class_tensor.ipynb +++ b/docs/source/tutorial/class_tensor.ipynb @@ -16,7 +16,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Tensors are extensions of multidimensial arrays with additional operations defined on them. Here we explain the basics for creating and working with tensors." + "Tensors are extensions of multidimensial arrays with additional operations defined on them. Here we explain the basics for creating and working with dense tensors." ] }, { @@ -28,13 +28,14 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 195, "metadata": {}, "outputs": [], "source": [ "import pyttb as ttb\n", "import numpy as np\n", - "import sys" + "import sys\n", + "from pyttb.matlab.matlab_support import matlab_print" ] }, { @@ -53,7 +54,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 196, "metadata": {}, "outputs": [ { @@ -72,27 +73,20 @@ " [1. 1. 1.]]" ] }, - "execution_count": 15, + "execution_count": 196, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "M = np.ones((4, 3, 2)) # <-- 4 x 3 x 2 mutlidimensional array of ones.\n", - "X = ttb.tensor(M) # <-- Convert to 4 x 3 x 2 tensor object.\n", + "M = np.ones((4, 3, 2)) # Create numpy 4 x 3 x 2 array of ones.\n", + "X = ttb.tensor(M) # Convert to 4 x 3 x 2 tensor object.\n", "X" ] }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Optionally, you can specify a different shape for the tensor, so long as the input array has the right number of elements. " - ] - }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 197, "metadata": {}, "outputs": [ { @@ -106,13 +100,13 @@ " [1. 1. 1. 1. 1. 1.]]" ] }, - "execution_count": 16, + "execution_count": 197, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "X = ttb.tensor(M, (4, 6)) # <-- Reshape to 4 x 6 tensor.\n", + "X = ttb.tensor(M, (4, 6)) # Reshape to 4 x 6 tensor.\n", "X" ] }, @@ -125,27 +119,30 @@ }, { "cell_type": "code", - "execution_count": 24, + "execution_count": 198, "metadata": {}, "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "tensor of shape (2, 3, 2) with order F\n", - "data[:, :, 0] =\n", - "[[0.5488135 0.60276338 0.4236548 ]\n", - " [0.71518937 0.54488318 0.64589411]]\n", - "data[:, :, 1] =\n", - "[[0.43758721 0.96366276 0.79172504]\n", - " [0.891773 0.38344152 0.52889492]]\n" - ] + "data": { + "text/plain": [ + "tensor of shape (2, 3, 2) with order F\n", + "data[:, :, 0] =\n", + "[[0.5488135 0.60276338 0.4236548 ]\n", + " [0.71518937 0.54488318 0.64589411]]\n", + "data[:, :, 1] =\n", + "[[0.43758721 0.96366276 0.79172504]\n", + " [0.891773 0.38344152 0.52889492]]" + ] + }, + "execution_count": 198, + "metadata": {}, + "output_type": "execute_result" } ], "source": [ "np.random.seed(0)\n", - "v = np.random.rand(12) # <-- length-12 vector of uniform random numbers.\n", - "X = ttb.tensor(v, (2, 3, 2), copy=False) # <-- Converted to 2 x 3 x 2 tensor.\n", + "v = np.random.rand(12) # length-12 vector of uniform random numbers.\n", + "X = ttb.tensor(v, (2, 3, 2), copy=False) # Converted to 2 x 3 x 2 tensor.\n", "X" ] }, @@ -153,146 +150,276 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Using {meth}`pyttb.tensors.from_function` to create a tensor with elements generated by a function\n", - "\n", - "This function takes another function that is used to generate entries of the tensor. The returned array should be in Fortran order to avoid unneccesary copies and rearrangment. Since the data will be reshape in any case, returning an array is fine. Alternatively, ensure the function returns in F-order for those methods that support it." + "## A Note on Display of Tensors\n", + "The display of a tensor is by _frontal slice_ where the first two indices range and the remainder stay fixed. This is different than how Python normal displays multidimensional arrays where the last two indices range and the remainder stay fixed." ] }, { "cell_type": "code", - "execution_count": 44, + "execution_count": 279, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "tensor of shape (2, 3, 2) with order F\n", - "data[:, :, 0] =\n", - "[[ 1.76405235 0.97873798 1.86755799]\n", - " [ 0.40015721 2.2408932 -0.97727788]]\n", - "data[:, :, 1] =\n", - "[[ 0.95008842 -0.10321885 0.14404357]\n", - " [-0.15135721 0.4105985 1.45427351]]" + "array([[[0.5488135 , 0.71518937],\n", + " [0.60276338, 0.54488318],\n", + " [0.4236548 , 0.64589411]],\n", + "\n", + " [[0.43758721, 0.891773 ],\n", + " [0.96366276, 0.38344152],\n", + " [0.79172504, 0.52889492]],\n", + "\n", + " [[0.56804456, 0.92559664],\n", + " [0.07103606, 0.0871293 ],\n", + " [0.0202184 , 0.83261985]]])" ] }, - "execution_count": 44, + "execution_count": 279, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "np.random.seed(1)\n", - "randn = lambda s : np.random.randn(np.prod(s))\n", - "X = ttb.tensor.from_function(randn, (2, 3, 2)) # <-- 2 x 3 x 2 tensor of normally distributed random numbers.\n", - "X" + "# Display of the above tensor object in the usual Python way.\n", + "X.data" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Use {func}`pyttb.tenones` to create a tensor of all ones.\n" + "## Printing similar to MATLAB \n", + "It is possible to print similar to MATLAB using {func}`matlab_print` which has the optional arguments `name` and `format` to further customize the outputs. You will need \n", + "\n", + "``` python\n", + "from pyttb.matlab.matlab_support import matlab_print\n", + "```\n", + "\n", + "in your code for this to work as shown here." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 199, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "tensor of shape (3, 4, 2) with order F\n", - "data[:, :, 0] =\n", - "[[1. 1. 1. 1.]\n", - " [1. 1. 1. 1.]\n", - " [1. 1. 1. 1.]]\n", - "data[:, :, 1] =\n", - "[[1. 1. 1. 1.]\n", - " [1. 1. 1. 1.]\n", - " [1. 1. 1. 1.]]" - ] - }, - "metadata": {}, - "output_type": "display_data" + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape 2 x 3 x 2\n", + "\tX(:,:, 0) =\n", + "\t\t0.5488 0.6028 0.4237\n", + "\t\t0.7152 0.5449 0.6459\n", + "\n", + "\tX(:,:, 1) =\n", + "\t\t0.4376 0.9637 0.7917\n", + "\t\t0.8918 0.3834 0.5289\n", + "\n" + ] } ], "source": [ - "X = ttb.tenones((3, 4, 2)) # <-- Creates a 3 x 4 x 2 tensor of ones.\n", - "X" + "matlab_print(X,name='X',format='7.4f') # Print tensor X in MATLAB format." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Optionally, you can specify a different shape for the tensor, so long as the input array has the right number of elements. " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Use {func}`pyttb.tenzeros` to create a tensor of all zeros" + "## Creating a tensor with elements generated by a function\n", + "\n", + "Using {meth}`pyttb.tensor.from_function` takes another function that is used to generate entries of the tensor. The returned array should be in Fortran order to avoid unnecessary copies and rearrangement. Since the data will be reshape in any case, returning a vector is recommended. Alternatively, ensure the function returns in F-order for those methods that support it." ] }, { "cell_type": "code", - "execution_count": 38, + "execution_count": 200, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "tensor of shape (1, 4, 2) with order F\n", - "data[:, :, 0] =\n", - "[[0. 0. 0. 0.]]\n", - "data[:, :, 1] =\n", - "[[0. 0. 0. 0.]]" - ] - }, - "execution_count": 38, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape 2 x 3 x 2\n", + "\tX(:,:, 0) =\n", + "\t\t1.6243 -0.5282 0.8654\n", + "\t\t-0.6118 -1.0730 -2.3015\n", + "\n", + "\tX(:,:, 1) =\n", + "\t\t1.7448 0.3190 1.4621\n", + "\t\t-0.7612 -0.2494 -2.0601\n", + "\n" + ] } ], "source": [ - "X = ttb.tenzeros((1,4,2)) # <-- Creates a 1 x 4 x 2 tensor of zeroes.\n", - "X" + "# Ensure reproducibility of random numbers.\n", + "np.random.seed(1) \n", + "# Function to generate normally distributed random numbers.\n", + "randn = lambda s: np.random.randn(np.prod(s))\n", + "# Create 2 x 3 x 2 tensor of normally distributed random numbers.\n", + "X = ttb.tensor.from_function(randn, (2, 3, 2)) \n", + "# Print tensor X in MATLAB format.\n", + "matlab_print(X,name='X',format='7.4f') " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Use {func}`pyttb.tenrand` to create a random tensor" + "We show how to use {meth}`pyttb.tensor.from_function` in the next example to create a tensor of all ones, but it's even easier to use {meth}`pyttb.tenones` described below." ] }, { "cell_type": "code", - "execution_count": 39, + "execution_count": 201, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "tensor of shape (5, 4, 2) with order F\n", - "data[:, :, 0] =\n", - "[[0.5488135 0.60276338 0.4236548 0.43758721]\n", - " [0.96366276 0.79172504 0.56804456 0.07103606]\n", - " [0.0202184 0.77815675 0.97861834 0.46147936]\n", - " [0.11827443 0.14335329 0.52184832 0.26455561]\n", - " [0.45615033 0.0187898 0.61209572 0.94374808]]\n", - "data[:, :, 1] =\n", - "[[0.71518937 0.54488318 0.64589411 0.891773 ]\n", - " [0.38344152 0.52889492 0.92559664 0.0871293 ]\n", - " [0.83261985 0.87001215 0.79915856 0.78052918]\n", - " [0.63992102 0.94466892 0.41466194 0.77423369]\n", - " [0.56843395 0.6176355 0.616934 0.6818203 ]]" - ] - }, - "execution_count": 39, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape 3 x 4 x 2\n", + "\tX(:,:, 0) =\n", + "\t\t 1 1 1 1\n", + "\t\t 1 1 1 1\n", + "\t\t 1 1 1 1\n", + "\n", + "\tX(:,:, 1) =\n", + "\t\t 1 1 1 1\n", + "\t\t 1 1 1 1\n", + "\t\t 1 1 1 1\n", + "\n" + ] } ], "source": [ - "np.random.seed(2)\n", - "X = ttb.tenrand((5,4,2)) # <-- Creates a 5 x 4 x 2 tensor of uniform [0,1] random numbers.\n", - "X" + "# Function to generate tensor of ones. Uses explicit Fortran order.\n", + "ones = lambda s: np.ones(s,order='F') \n", + "# Create 3 x 4 x 2 tensor of ones.\n", + "X = ttb.tensor.from_function(ones, (3, 4, 2))\n", + "# Print tensor X in MATLAB format.\n", + "matlab_print(X,name='X',format='2.0f')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Create a tensor of all ones.\n", + "Using {func}`pyttb.tenones` to create a tensor of all ones." + ] + }, + { + "cell_type": "code", + "execution_count": 202, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape 3 x 4 x 2\n", + "\tX(:,:, 0) =\n", + "\t\t 1 1 1 1\n", + "\t\t 1 1 1 1\n", + "\t\t 1 1 1 1\n", + "\n", + "\tX(:,:, 1) =\n", + "\t\t 1 1 1 1\n", + "\t\t 1 1 1 1\n", + "\t\t 1 1 1 1\n", + "\n" + ] + } + ], + "source": [ + "\n", + "X = ttb.tenones((3, 4, 2)) \n", + "matlab_print(X,name='X',format='2.0f')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Create a tensor of all zeros\n", + "Use {func}`pyttb.tenzeros` to create a tensor of all zeros.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 203, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape 1 x 4 x 2\n", + "\tX(:,:, 0) =\n", + "\t\t 0 0 0 0\n", + "\n", + "\tX(:,:, 1) =\n", + "\t\t 0 0 0 0\n", + "\n" + ] + } + ], + "source": [ + "X = ttb.tenzeros((1, 4, 2)) # Creates a 1 x 4 x 2 tensor of zeroes.\n", + "matlab_print(X,name='X',format='2.0f')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Create a random tensor\n", + "Use {func}`pyttb.tenrand` to create a tensor with uniform random values from [0,1]." + ] + }, + { + "cell_type": "code", + "execution_count": 204, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape 5 x 4 x 2\n", + "\tX(:,:, 0) =\n", + "\t\t0.4360 0.5497 0.4204 0.2046\n", + "\t\t0.2997 0.6211 0.1346 0.1844\n", + "\t\t0.8540 0.8466 0.5052 0.4281\n", + "\t\t0.1272 0.2260 0.2203 0.4678\n", + "\t\t0.6404 0.5052 0.7936 0.1623\n", + "\n", + "\tX(:,:, 1) =\n", + "\t\t0.0259 0.4353 0.3303 0.6193\n", + "\t\t0.2668 0.5291 0.5136 0.7853\n", + "\t\t0.4942 0.0796 0.0653 0.0965\n", + "\t\t0.5967 0.1069 0.3498 0.2017\n", + "\t\t0.4831 0.3869 0.5800 0.7008\n", + "\n" + ] + } + ], + "source": [ + "# Creates a 5 x 4 x 2 tensor of uniform [0,1] random numbers\n", + "np.random.seed(2) # Reproducible random numbers\n", + "X = ttb.tenrand((5, 4, 2))\n", + "matlab_print(X,name='X',format='7.4f')" ] }, { @@ -311,26 +438,27 @@ }, { "cell_type": "code", - "execution_count": 25, + "execution_count": 205, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "tensor of shape (5,) with order F\n", - "data[:] =\n", - "[0.5488135 0.71518937 0.60276338 0.54488318 0.4236548 ]" - ] - }, - "execution_count": 25, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape 5\n", + "\tX(:) =\n", + "\t\t0.5508\n", + "\t\t0.7081\n", + "\t\t0.2909\n", + "\t\t0.5108\n", + "\t\t0.8929\n" + ] } ], "source": [ "np.random.seed(3)\n", - "X = ttb.tensor(np.random.rand(5), (5,)) # Creates a 1-way tensor.\n", - "X" + "X = ttb.tenrand((5,)) # Creates a 1-way tensor.\n", + "matlab_print(X,name='X',format='7.4f')" ] }, { @@ -343,54 +471,49 @@ }, { "cell_type": "code", - "execution_count": 46, + "execution_count": 206, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "tensor of shape (3, 4) with order F\n", - "data[:, :] =\n", - "[[0.96702984 0.71481599 0.97627445 0.43479153]\n", - " [0.54723225 0.69772882 0.00623026 0.77938292]\n", - " [0.97268436 0.2160895 0.25298236 0.19768507]]" - ] - }, - "execution_count": 46, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "Y is a tensor of shape 3 x 4\n", + "\tY(:,:) =\n", + "\t\t0.9670 0.5472 0.9727 0.7148\n", + "\t\t0.6977 0.2161 0.9763 0.0062\n", + "\t\t0.2530 0.4348 0.7794 0.1977\n" + ] } ], "source": [ "np.random.seed(4)\n", - "Y = ttb.tensor(np.random.rand(12),(3, 4)) # <-- Creates a 2-way tensor of size 4 x 3.\n", - "Y" + "Y = ttb.tenrand((3, 4)) # Creates a 2-way tensor of size 4 x 3.\n", + "matlab_print(Y,name='Y',format='7.4f')" ] }, { "cell_type": "code", - "execution_count": 31, + "execution_count": 207, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "tensor of shape (3, 4, 1) with order F\n", - "data[:, :, 0] =\n", - "[[0.5488135 0.71518937 0.60276338 0.54488318]\n", - " [0.4236548 0.64589411 0.43758721 0.891773 ]\n", - " [0.96366276 0.38344152 0.79172504 0.52889492]]" - ] - }, - "execution_count": 31, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "Y is a tensor of shape 3 x 4 x 1\n", + "\tY(:,:, 0) =\n", + "\t\t0.9670 0.5472 0.9727 0.7148\n", + "\t\t0.6977 0.2161 0.9763 0.0062\n", + "\t\t0.2530 0.4348 0.7794 0.1977\n", + "\n" + ] } ], "source": [ "np.random.seed(4)\n", - "Y = ttb.tensor(np.random.rand(12), (3, 4, 1)) # <-- Creates a 3-way tensor of size 3 x 4 x 1.\n", - "Y" + "Y = ttb.tenrand((3, 4, 1)) # Creates a 3-way tensor of size 3 x 4 x 1.\n", + "matlab_print(Y,name='Y',format='7.4f')" ] }, { @@ -403,64 +526,69 @@ }, { "cell_type": "code", - "execution_count": 32, + "execution_count": 208, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "array([[[0.5488135 , 0.71518937, 0.60276338],\n", - " [0.54488318, 0.4236548 , 0.64589411],\n", - " [0.43758721, 0.891773 , 0.96366276],\n", - " [0.38344152, 0.79172504, 0.52889492]],\n", - "\n", - " [[0.56804456, 0.92559664, 0.07103606],\n", - " [0.0871293 , 0.0202184 , 0.83261985],\n", - " [0.77815675, 0.87001215, 0.97861834],\n", - " [0.79915856, 0.46147936, 0.78052918]]])" - ] - }, - "execution_count": 32, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape 2 x 4 x 3\n", + "\tX(:,:, 0) =\n", + "\t\t0.2220 0.9186 0.7659 0.1877\n", + "\t\t0.4413 0.2741 0.6288 0.2658\n", + "\n", + "\tX(:,:, 1) =\n", + "\t\t0.8707 0.4884 0.5184 0.0807\n", + "\t\t0.1583 0.4142 0.5798 0.2847\n", + "\n", + "\tX(:,:, 2) =\n", + "\t\t0.2067 0.6117 0.2968 0.7384\n", + "\t\t0.8799 0.2961 0.5999 0.2536\n", + "\n" + ] } ], "source": [ "np.random.seed(5)\n", - "X = ttb.tenrand((2, 4, 3)) # <-- Create tensor of size 2 x 4 x 3 with random numbers.\n", - "X" + "X = ttb.tenrand((2, 4, 3)) # Create tensor of size 2 x 4 x 3 with random numbers.\n", + "matlab_print(X,name='X',format='7.4f')" ] }, { "cell_type": "code", - "execution_count": 47, + "execution_count": 280, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "array([[[ 1.76405235, 0.95008842],\n", - " [ 0.97873798, -0.10321885],\n", - " [ 1.86755799, 0.14404357]],\n", + "array([[[0.5488135 , 0.71518937],\n", + " [0.60276338, 0.54488318],\n", + " [0.4236548 , 0.64589411]],\n", + "\n", + " [[0.43758721, 0.891773 ],\n", + " [0.96366276, 0.38344152],\n", + " [0.79172504, 0.52889492]],\n", "\n", - " [[ 0.40015721, -0.15135721],\n", - " [ 2.2408932 , 0.4105985 ],\n", - " [-0.97727788, 1.45427351]]])" + " [[0.56804456, 0.92559664],\n", + " [0.07103606, 0.0871293 ],\n", + " [0.0202184 , 0.83261985]]])" ] }, - "execution_count": 47, + "execution_count": 280, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "# The array\n", + "# The array (note that its display order is different from the tensor).\n", "X.data # The array." ] }, { "cell_type": "code", - "execution_count": 48, + "execution_count": 210, "metadata": {}, "outputs": [ { @@ -474,35 +602,35 @@ " WRITEBACKIFCOPY : False" ] }, - "execution_count": 48, + "execution_count": 210, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "# Note that it's is stored in Fortran format\n", + "# Note that it's is stored in Fortran format (F_CONTIGUOUS = True).\n", "X.data.flags" ] }, { "cell_type": "code", - "execution_count": 49, + "execution_count": 211, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "(2, 3, 2)" + "(2, 4, 3)" ] }, - "execution_count": 49, + "execution_count": 211, "metadata": {}, "output_type": "execute_result" } ], "source": [ "# The shape\n", - "X.shape " + "X.shape" ] }, { @@ -510,39 +638,53 @@ "metadata": {}, "source": [ "## Creating a tensor from its constituent parts\n", - "*This is an efficient way to create a tensor copy, but it illustrates the role of the parts. A more efficient way is to use `Y = X` (shallow copy) or `Y = X.copy()` (deep copy). *" + "_This is an efficient way to create a tensor copy, but it illustrates the role of the parts. A more efficient way is to use `Y = X` (shallow copy) or `Y = X.copy()` (deep copy)._" ] }, { "cell_type": "code", - "execution_count": 35, + "execution_count": 212, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "tensor of shape (2, 4, 3) with order F\n", - "data[:, :, 0] =\n", - "[[0.5488135 0.54488318 0.43758721 0.38344152]\n", - " [0.56804456 0.0871293 0.77815675 0.79915856]]\n", - "data[:, :, 1] =\n", - "[[0.71518937 0.4236548 0.891773 0.79172504]\n", - " [0.92559664 0.0202184 0.87001215 0.46147936]]\n", - "data[:, :, 2] =\n", - "[[0.60276338 0.64589411 0.96366276 0.52889492]\n", - " [0.07103606 0.83261985 0.97861834 0.78052918]]" - ] - }, - "execution_count": 35, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape 2 x 4 x 3\n", + "\tX(:,:, 0) =\n", + "\t\t0.5488 0.5449 0.4376 0.3834\n", + "\t\t0.5680 0.0871 0.7782 0.7992\n", + "\n", + "\tX(:,:, 1) =\n", + "\t\t0.7152 0.4237 0.8918 0.7917\n", + "\t\t0.9256 0.0202 0.8700 0.4615\n", + "\n", + "\tX(:,:, 2) =\n", + "\t\t0.6028 0.6459 0.9637 0.5289\n", + "\t\t0.0710 0.8326 0.9786 0.7805\n", + "\n", + "Y is a tensor of shape 2 x 4 x 3\n", + "\tY(:,:, 0) =\n", + "\t\t0.5488 0.5449 0.4376 0.3834\n", + "\t\t0.5680 0.0871 0.7782 0.7992\n", + "\n", + "\tY(:,:, 1) =\n", + "\t\t0.7152 0.4237 0.8918 0.7917\n", + "\t\t0.9256 0.0202 0.8700 0.4615\n", + "\n", + "\tY(:,:, 2) =\n", + "\t\t0.6028 0.6459 0.9637 0.5289\n", + "\t\t0.0710 0.8326 0.9786 0.7805\n", + "\n" + ] } ], "source": [ "np.random.seed(0)\n", "X = ttb.tenrand((2, 4, 3)) # Create data.\n", + "matlab_print(X,name='X',format='7.4f') # Print tensor X in MATLAB format.\n", "Y = ttb.tensor(X.data, X.shape) # Creates a (deep) copy of X from its parts.\n", - "Y" + "matlab_print(Y,name='Y',format='7.4f')" ] }, { @@ -555,98 +697,114 @@ }, { "cell_type": "code", - "execution_count": 36, + "execution_count": 213, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "empty tensor of shape ()\n", - "data = []" - ] - }, - "execution_count": 36, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape \n", + "\tX(:) =\n", + "\n" + ] } ], "source": [ "X = ttb.tensor() # Creates an empty tensor\n", - "X" + "matlab_print(X,name='X',format='7.4f')" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Use `squeeze` to remove singleton dimensions from a tensor" + "## Removing singleton dimensions from a tensor\n", + "Use {meth}`pyttb.tensor.squeeze` to remove single dimensions from a tensor." ] }, { "cell_type": "code", - "execution_count": 51, + "execution_count": 214, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "tensor of shape (4, 3, 1) with order F\n", - "data[:, :, 0] =\n", - "[[0.5488135 0.71518937 0.60276338]\n", - " [0.54488318 0.4236548 0.64589411]\n", - " [0.43758721 0.891773 0.96366276]\n", - " [0.38344152 0.79172504 0.52889492]]" - ] - }, - "execution_count": 51, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "Y is a tensor of shape 4 x 3 x 1\n", + "\tY(:,:, 0) =\n", + "\t\t0.5488 0.7152 0.6028\n", + "\t\t0.5449 0.4237 0.6459\n", + "\t\t0.4376 0.8918 0.9637\n", + "\t\t0.3834 0.7917 0.5289\n", + "\n" + ] } ], "source": [ "np.random.seed(0)\n", - "Y = ttb.tenrand((4,3,1)) # Create the data.\n", - "Y" + "Y = ttb.tenrand((4, 3, 1)) # Create the data.\n", + "matlab_print(Y,name='Y',format='7.4f') # Print tensor Y in MATLAB format." ] }, { "cell_type": "code", - "execution_count": 52, + "execution_count": 215, "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "tensor of shape (4, 3) with order F\n", - "data[:, :] =\n", - "[[0.5488135 0.71518937 0.60276338]\n", - " [0.54488318 0.4236548 0.64589411]\n", - " [0.43758721 0.891773 0.96366276]\n", - " [0.38344152 0.79172504 0.52889492]]" - ] - }, - "execution_count": 52, - "metadata": {}, - "output_type": "execute_result" + "name": "stdout", + "output_type": "stream", + "text": [ + "Z is a tensor of shape 4 x 3\n", + "\tZ(:,:) =\n", + "\t\t0.5488 0.7152 0.6028\n", + "\t\t0.5449 0.4237 0.6459\n", + "\t\t0.4376 0.8918 0.9637\n", + "\t\t0.3834 0.7917 0.5289\n" + ] } ], "source": [ "Z = Y.squeeze() # Squeeze out the singleton dimension.\n", - "Z" + "matlab_print(Z,name='Z',format='7.4f') # Print tensor Z in MATLAB format. " ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Use `double` to convert a tensor to a (multidimensional) array" + "## Convert a tensor to a (multidimensional) array\n", + "Use {meth}`pyttb.tensor.double` to convert a tensor to a numpy array; this is identical to extracting the `data` member." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 216, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "array([[[0.5488135 , 0.71518937, 0.60276338, 0.54488318],\n", + " [0.4236548 , 0.64589411, 0.43758721, 0.891773 ],\n", + " [0.96366276, 0.38344152, 0.79172504, 0.52889492],\n", + " [0.56804456, 0.92559664, 0.07103606, 0.0871293 ],\n", + " [0.0202184 , 0.83261985, 0.77815675, 0.87001215]],\n", + "\n", + " [[0.97861834, 0.79915856, 0.46147936, 0.78052918],\n", + " [0.11827443, 0.63992102, 0.14335329, 0.94466892],\n", + " [0.52184832, 0.41466194, 0.26455561, 0.77423369],\n", + " [0.45615033, 0.56843395, 0.0187898 , 0.6176355 ],\n", + " [0.61209572, 0.616934 , 0.94374808, 0.6818203 ]]])" + ] + }, + "execution_count": 216, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "np.random.seed(0)\n", "X = ttb.tenrand((2, 5, 4)) # Create the data.\n", @@ -655,9 +813,30 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 217, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "array([[[0.5488135 , 0.71518937, 0.60276338, 0.54488318],\n", + " [0.4236548 , 0.64589411, 0.43758721, 0.891773 ],\n", + " [0.96366276, 0.38344152, 0.79172504, 0.52889492],\n", + " [0.56804456, 0.92559664, 0.07103606, 0.0871293 ],\n", + " [0.0202184 , 0.83261985, 0.77815675, 0.87001215]],\n", + "\n", + " [[0.97861834, 0.79915856, 0.46147936, 0.78052918],\n", + " [0.11827443, 0.63992102, 0.14335329, 0.94466892],\n", + " [0.52184832, 0.41466194, 0.26455561, 0.77423369],\n", + " [0.45615033, 0.56843395, 0.0187898 , 0.6176355 ],\n", + " [0.61209572, 0.616934 , 0.94374808, 0.6818203 ]]])" + ] + }, + "execution_count": 217, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "X.data # Same thing." ] @@ -671,29 +850,71 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 282, "metadata": {}, "outputs": [], + "source": [ + "X = ttb.tenrand((4,3,2)) # Create a 4 x 3 x 2 tensor of random numbers." + ] + }, + { + "cell_type": "code", + "execution_count": 283, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "3" + ] + }, + "execution_count": 283, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "X.ndims # Number of dimensions (or ways)." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 284, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "(4, 3, 2)" + ] + }, + "execution_count": 284, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "X.shape # Row vector with the shapes of all dimensions." + "X.shape # Tuple with the sizes of all dimensions." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 285, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "2" + ] + }, + "execution_count": 285, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "X.shape[2] # shape of a single dimension." + "X.shape[2] # Size of a single dimension." ] }, { @@ -705,12 +926,23 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 287, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "0.5488135039273248" + ] + }, + "execution_count": 287, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "np.random.seed(0)\n", - "X = ttb.tenrand((2, 3, 4, 1)) # Create a 3x4x2x1 random tensor.\n", + "X = ttb.tenrand((3, 4, 2, 1)) # Create a 3x4x2x1 random tensor.\n", "X[0, 0, 0, 0] # Extract a single element." ] }, @@ -723,20 +955,48 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 288, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (1,) with order F\n", + "data[:] =\n", + "[0.5488135]" + ] + }, + "execution_count": 288, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "X[0, 0, 0, :] # Produces a tensor of order 1 and shape 1." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 289, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (3, 1) with order F\n", + "data[:, :] =\n", + "[[0.5488135 ]\n", + " [0.96366276]\n", + " [0.0202184 ]]" + ] + }, + "execution_count": 289, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "X[0, :, 0, :] # Produces a tensor of shape 3x1." + "X[:, 0, 0, :] # Produces a tensor of shape 3x1." ] }, { @@ -748,11 +1008,25 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 292, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 1) with order F\n", + "data[:, :, 0] =\n", + "[[0.60276338 0.43758721]\n", + " [0.79172504 0.07103606]]" + ] + }, + "execution_count": 292, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "X[0:2, 0, [1, 3], :] # Produces a tensor of shape 2x2x1." + "X[0:2, [1, 3], 0, :] # Produces a tensor of shape 2x2x1." ] }, { @@ -764,19 +1038,41 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 293, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "array([0.5488135 , 0.78052918])" + ] + }, + "execution_count": 293, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "subs = np.array([[0, 0, 0, 0], [1, 2, 3, 0]])\n", + "subs = np.array([[0, 0, 0, 0], [2, 3, 1, 0]])\n", "X[subs] # Extract 2 values by subscript." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 226, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "array([0.5488135 , 0.78052918])" + ] + }, + "execution_count": 226, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "inds = np.array([0, 23])\n", "X[inds] # Same thing with linear indices." @@ -784,7 +1080,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 227, "metadata": {}, "outputs": [], "source": [ @@ -794,9 +1090,20 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 228, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "array([0.5488135 , 0.71518937, 0.60276338, 0.54488318, 0.4236548 ])" + ] + }, + "execution_count": 228, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "X[0:5] # Extract a subtensor." ] @@ -806,48 +1113,124 @@ "metadata": {}, "source": [ "## Subscripted assignment for a tensor\n", - "We can assign a single element, an entire subtensor, or a list of values for a tensor.`" + "We can assign a single element, an entire subtensor, or a list of values for a tensor." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 331, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape 3 x 4 x 2\n", + "\tX(:,:, 0) =\n", + "\t\t0.0000 0.6028 0.4237 0.4376\n", + "\t\t0.9637 0.7917 0.5680 0.0710\n", + "\t\t0.0202 0.7782 0.9786 0.4615\n", + "\n", + "\tX(:,:, 1) =\n", + "\t\t0.7152 0.5449 0.6459 0.8918\n", + "\t\t0.3834 0.5289 0.9256 0.0871\n", + "\t\t0.8326 0.8700 0.7992 0.7805\n", + "\n" + ] + } + ], "source": [ "np.random.seed(0)\n", - "X = ttb.tenrand((2, 3, 4)) # Create some data.\n", + "X = ttb.tenrand((3,4,2)) # Create some data.\n", "X[0, 0, 0] = 0 # Replaces the [0,0,0] element.\n", - "X" + "matlab_print(X,name='X',format='7.4f') # Print tensor X in MATLAB format." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 332, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape 3 x 4 x 2\n", + "\tX(:,:, 0) =\n", + "\t\t1.0000 1.0000 0.4237 0.4376\n", + "\t\t1.0000 1.0000 0.5680 0.0710\n", + "\t\t0.0202 0.7782 0.9786 0.4615\n", + "\n", + "\tX(:,:, 1) =\n", + "\t\t0.7152 0.5449 0.6459 0.8918\n", + "\t\t0.3834 0.5289 0.9256 0.0871\n", + "\t\t0.8326 0.8700 0.7992 0.7805\n", + "\n" + ] + } + ], "source": [ - "X[0, 0:2, 0:2] = np.ones((2, 2)) # Replaces a subtensor.\n", - "X" + "X[0:2, 0:2,0] = np.ones((2, 2)) # Replaces a subtensor.\n", + "matlab_print(X,name='X',format='7.4f') # Print tensor X in MATLAB format." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 333, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape 3 x 4 x 2\n", + "\tX(:,:, 0) =\n", + "\t\t5.0000 1.0000 0.4237 0.4376\n", + "\t\t1.0000 1.0000 0.5680 0.0710\n", + "\t\t0.0202 0.7782 0.9786 0.4615\n", + "\n", + "\tX(:,:, 1) =\n", + "\t\t7.0000 0.5449 0.6459 0.8918\n", + "\t\t0.3834 0.5289 0.9256 0.0871\n", + "\t\t0.8326 0.8700 0.7992 0.7805\n", + "\n" + ] + } + ], "source": [ - "X[(0, 0, 0)], X[1, 0, 0] = [5, 7] # Replaces the (0,0,0) and (1,0,0) elements." + "subs = np.array([[0, 0, 0], [0,0,1]])\n", + "X[subs] = [5, 7] # Replaces the (0,0,0) and (1,0,0) elements.\n", + "matlab_print(X,name='X',format='7.4f') # Print tensor X in MATLAB format." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 339, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape 4 x 3 x 2\n", + "\tX(:,:, 0) =\n", + "\t\t 5.0000 0.6028 0.4237\n", + "\t\t 0.4376 0.9637 0.7917\n", + "\t\t 0.5680 0.0710 0.0202\n", + "\t\t 0.7782 0.9786 0.4615\n", + "\n", + "\tX(:,:, 1) =\n", + "\t\t 7.0000 0.5449 0.6459\n", + "\t\t 0.8918 0.3834 0.5289\n", + "\t\t 0.9256 0.0871 0.8326\n", + "\t\t 0.8700 0.7992 0.7805\n", + "\n" + ] + } + ], "source": [ - "X[[0, 1]] = [5, 7] # Same as above using linear indices.\n", - "X" + "X[[0, 12]] = [5, 7] # Same as above using linear indices.\n", + "matlab_print(X,name='X',format='7.4f') # Print tensor X in MATLAB format." ] }, { @@ -859,12 +1242,38 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 340, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape 4 x 3 x 3\n", + "\tX(:,:, 0) =\n", + "\t\t 5.0000 0.6028 0.4237\n", + "\t\t 0.4376 0.9637 0.7917\n", + "\t\t 0.5680 0.0710 0.0202\n", + "\t\t 0.7782 0.9786 0.4615\n", + "\n", + "\tX(:,:, 1) =\n", + "\t\t 7.0000 0.5449 0.6459\n", + "\t\t 0.8918 0.3834 0.5289\n", + "\t\t 0.9256 0.0871 0.8326\n", + "\t\t 0.8700 0.7992 0.7805\n", + "\n", + "\tX(:,:, 2) =\n", + "\t\t 1.0000 0.0000 0.0000\n", + "\t\t 0.0000 0.0000 0.0000\n", + "\t\t 0.0000 0.0000 0.0000\n", + "\t\t 0.0000 0.0000 0.0000\n", + "\n" + ] + } + ], "source": [ - "X[2, 1, 1] = 1 # Grows the shape of the tensor.\n", - "X" + "X[0,0,2] = 1 # Grows the shape of the tensor.\n", + "matlab_print(X,name='X',format='7.4f') # Print tensor X in MATLAB format." ] }, { @@ -876,55 +1285,95 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 341, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape 4 x 3 x 2\n", + "\tX(:,:, 0) =\n", + "\t\t 0.5488 0.6028 0.4237\n", + "\t\t 0.4376 0.9637 0.7917\n", + "\t\t 0.5680 0.0710 0.0202\n", + "\t\t 0.7782 0.9786 0.4615\n", + "\n", + "\tX(:,:, 1) =\n", + "\t\t 0.7152 0.5449 0.6459\n", + "\t\t 0.8918 0.3834 0.5289\n", + "\t\t 0.9256 0.0871 0.8326\n", + "\t\t 0.8700 0.7992 0.7805\n", + "\n" + ] + } + ], "source": [ "np.random.seed(0)\n", - "X = ttb.tenrand((2, 3, 4)) # Create some data.\n", - "np.prod(X.shape) - 1 # The index of the last element of the flattened tensor." + "X = ttb.tenrand((4,3,2)) # Create some data.\n", + "matlab_print(X,name='X',format='7.4f') # Print tensor X in MATLAB format." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 344, "metadata": {}, - "outputs": [], - "source": [ - "X[2, 2, 3] = 99 # Inserting 99 into last element\n", - "X[-1] # Same as X[2,2,3]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "'Last value in array is 0.7805'" + ] + }, + "execution_count": 344, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "X[0:-1]" + "f\"Last value in array is {X[-1]:.4f}\" # Same as X(3,2,1)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Use `find` for subscripts of nonzero elements of a tensor" + "## Extracting subscripts of nonzero elements of a tensor\n", + "Use {meth}`pyttb.tensor.find` to get nonzero elements and values from a tensor." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 385, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape 2 x 2 x 2\n", + "\tX(:,:, 0) =\n", + "\t\t 2 2\n", + "\t\t 1 2\n", + "\n", + "\tX(:,:, 1) =\n", + "\t\t 0 0\n", + "\t\t 1 0\n", + "\n" + ] + } + ], "source": [ - "np.random.seed(0)\n", - "X = ttb.tensor(3 * np.random.rand(2, 2, 2)) # Generate some data.\n", - "X" + "# Create a tensor that's about 33% zeros.\n", + "np.random.seed(5)\n", + "randint = lambda s: np.random.randint(0, 3, np.prod(s))\n", + "X = ttb.tensor.from_function(randint, (2, 2, 2)) # Create a tensor.\n", + "matlab_print(X,name='X',format='2.0f') # Print tensor X in MATLAB format." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 386, "metadata": {}, "outputs": [], "source": [ @@ -933,38 +1382,95 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 387, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "array([[0, 0, 0],\n", + " [1, 0, 0],\n", + " [0, 1, 0],\n", + " [1, 1, 0],\n", + " [1, 0, 1]])" + ] + }, + "execution_count": 387, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "S # Nonzero subscripts" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 388, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "array([[2],\n", + " [1],\n", + " [2],\n", + " [2],\n", + " [1]], dtype=int32)" + ] + }, + "execution_count": 388, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "V # Values" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 390, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "(array([[0, 0, 0],\n", + " [0, 1, 0],\n", + " [1, 1, 0]]),\n", + " array([[ True],\n", + " [ True],\n", + " [ True]]))" + ] + }, + "execution_count": 390, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "larger_entries = X >= 2\n", + "larger_entries = X >= 2 # Find entries >= 2.\n", "larger_subs, larger_vals = larger_entries.find() # Find subscripts of values >= 2.\n", "larger_subs, larger_vals" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 379, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "array([2., 2., 2.])" + ] + }, + "execution_count": 379, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "V = X[larger_subs]\n", "V" @@ -975,17 +1481,28 @@ "metadata": {}, "source": [ "## Computing the Frobenius norm of a tensor\n", - "`norm` computes the Frobenius norm of a tensor. This corresponds to the Euclidean norm of the vectorized tensor." + "The method {meth}`pyttb.tensor.norm` computes the Frobenius norm of a tensor. This corresponds to the Euclidean norm of the vectorized tensor." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 391, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "2.631397990238147" + ] + }, + "execution_count": 391, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "np.random.seed(0)\n", - "X = ttb.tensor(np.ones((3, 2, 3)))\n", + "X = ttb.tenrand((2,3,3))\n", "X.norm()" ] }, @@ -993,19 +1510,57 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Using `reshape` to rearrange elements in a tensor\n", - "`reshape` reshapes a tensor into a given shape array. The total number of elements in the tensor cannot change." + "## Reshaping a tensor\n", + "The method {meth}`pyttb.tensor.reshape` reshapes a tensor into a given shape array. The total number of elements in the tensor cannot change.\n", + "_Currently, this methods creates a **copy** of the tensor, and this needs to be fixed._" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 395, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape 3 x 2 x 3\n", + "\tX(:,:, 0) =\n", + "\t\t 5 3\n", + "\t\t 0 7\n", + "\t\t 3 9\n", + "\n", + "\tX(:,:, 1) =\n", + "\t\t 3 4\n", + "\t\t 5 7\n", + "\t\t 2 6\n", + "\n", + "\tX(:,:, 2) =\n", + "\t\t 8 6\n", + "\t\t 8 7\n", + "\t\t 1 7\n", + "\n", + "Y is a tensor of shape 3 x 3 x 2\n", + "\tY(:,:, 0) =\n", + "\t\t 5 3 3\n", + "\t\t 0 7 5\n", + "\t\t 3 9 2\n", + "\n", + "\tY(:,:, 1) =\n", + "\t\t 4 8 6\n", + "\t\t 7 8 7\n", + "\t\t 6 1 7\n", + "\n" + ] + } + ], "source": [ "np.random.seed(0)\n", - "X = ttb.tensor(np.random.rand(3, 2, 3, 10))\n", - "X.reshape((6, 30))" + "randint = lambda s: np.random.randint(0, 10, np.prod(s))\n", + "X = ttb.tensor.from_function(randint, (3, 2, 3))\n", + "matlab_print(X,name='X',format='2.0f')\n", + "Y = X.reshape((3,3,2))\n", + "matlab_print(Y,name='Y',format='2.0f')" ] }, { @@ -1018,7 +1573,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 245, "metadata": {}, "outputs": [], "source": [ @@ -1029,180 +1584,580 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 246, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[1. 0.]\n", + " [1. 1.]]\n", + "data[:, :, 1] =\n", + "[[1. 0.]\n", + " [1. 1.]]\n", + "data[:, :, 2] =\n", + "[[0. 1.]\n", + " [1. 1.]]" + ] + }, + "execution_count": 246, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "A.logical_and(B) # Calls and." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 247, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[1. 1.]\n", + " [1. 1.]]\n", + "data[:, :, 1] =\n", + "[[1. 1.]\n", + " [1. 1.]]\n", + "data[:, :, 2] =\n", + "[[1. 1.]\n", + " [1. 1.]]" + ] + }, + "execution_count": 247, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "A.logical_or(B)" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 248, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[0. 1.]\n", + " [0. 0.]]\n", + "data[:, :, 1] =\n", + "[[0. 1.]\n", + " [0. 0.]]\n", + "data[:, :, 2] =\n", + "[[1. 0.]\n", + " [0. 0.]]" + ] + }, + "execution_count": 248, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "A.logical_xor(B)" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 249, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[ True False]\n", + " [False False]]\n", + "data[:, :, 1] =\n", + "[[ True False]\n", + " [ True False]]\n", + "data[:, :, 2] =\n", + "[[False False]\n", + " [ True False]]" + ] + }, + "execution_count": 249, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "A == B # Calls eq." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 250, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[False True]\n", + " [ True True]]\n", + "data[:, :, 1] =\n", + "[[False True]\n", + " [False True]]\n", + "data[:, :, 2] =\n", + "[[ True True]\n", + " [False True]]" + ] + }, + "execution_count": 250, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "A != B # Calls neq." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 251, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[False True]\n", + " [False False]]\n", + "data[:, :, 1] =\n", + "[[False True]\n", + " [False True]]\n", + "data[:, :, 2] =\n", + "[[ True False]\n", + " [False False]]" + ] + }, + "execution_count": 251, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "A > B # Calls gt." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 252, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[ True True]\n", + " [False False]]\n", + "data[:, :, 1] =\n", + "[[ True True]\n", + " [ True True]]\n", + "data[:, :, 2] =\n", + "[[ True False]\n", + " [ True False]]" + ] + }, + "execution_count": 252, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "A >= B # Calls ge." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 253, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[False False]\n", + " [ True True]]\n", + "data[:, :, 1] =\n", + "[[False False]\n", + " [False False]]\n", + "data[:, :, 2] =\n", + "[[False True]\n", + " [False True]]" + ] + }, + "execution_count": 253, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "A < B # Calls lt." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 254, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[ True False]\n", + " [ True True]]\n", + "data[:, :, 1] =\n", + "[[ True False]\n", + " [ True False]]\n", + "data[:, :, 2] =\n", + "[[False True]\n", + " [ True True]]" + ] + }, + "execution_count": 254, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "A <= B # Calls le." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 255, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[0. 0.]\n", + " [0. 0.]]\n", + "data[:, :, 1] =\n", + "[[0. 0.]\n", + " [0. 0.]]\n", + "data[:, :, 2] =\n", + "[[0. 0.]\n", + " [0. 0.]]" + ] + }, + "execution_count": 255, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "A.logical_not() # Calls not." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 256, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[1. 1.]\n", + " [1. 1.]]\n", + "data[:, :, 1] =\n", + "[[2. 1.]\n", + " [2. 2.]]\n", + "data[:, :, 2] =\n", + "[[1. 1.]\n", + " [2. 1.]]" + ] + }, + "execution_count": 256, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "+A # Calls uplus." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 257, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[-1. -1.]\n", + " [-1. -1.]]\n", + "data[:, :, 1] =\n", + "[[-2. -1.]\n", + " [-2. -2.]]\n", + "data[:, :, 2] =\n", + "[[-1. -1.]\n", + " [-2. -1.]]" + ] + }, + "execution_count": 257, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "-A # Calls uminus." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 258, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[2. 1.]\n", + " [3. 3.]]\n", + "data[:, :, 1] =\n", + "[[4. 1.]\n", + " [4. 3.]]\n", + "data[:, :, 2] =\n", + "[[1. 3.]\n", + " [4. 3.]]" + ] + }, + "execution_count": 258, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "A + B # Calls plus." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 259, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[ 0. 1.]\n", + " [-1. -1.]]\n", + "data[:, :, 1] =\n", + "[[0. 1.]\n", + " [0. 1.]]\n", + "data[:, :, 2] =\n", + "[[ 1. -1.]\n", + " [ 0. -1.]]" + ] + }, + "execution_count": 259, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "A - B # Calls minus." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 260, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[1. 0.]\n", + " [2. 2.]]\n", + "data[:, :, 1] =\n", + "[[4. 0.]\n", + " [4. 2.]]\n", + "data[:, :, 2] =\n", + "[[0. 2.]\n", + " [4. 2.]]" + ] + }, + "execution_count": 260, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "A * B # Calls times." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 261, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[5. 5.]\n", + " [5. 5.]]\n", + "data[:, :, 1] =\n", + "[[10. 5.]\n", + " [10. 10.]]\n", + "data[:, :, 2] =\n", + "[[ 5. 5.]\n", + " [10. 5.]]" + ] + }, + "execution_count": 261, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "5 * A # Calls mtimes." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 262, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[1. 1.]\n", + " [1. 1.]]\n", + "data[:, :, 1] =\n", + "[[4. 1.]\n", + " [4. 2.]]\n", + "data[:, :, 2] =\n", + "[[1. 1.]\n", + " [4. 1.]]" + ] + }, + "execution_count": 262, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "A**B # Calls power." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 263, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[1. 1.]\n", + " [1. 1.]]\n", + "data[:, :, 1] =\n", + "[[4. 1.]\n", + " [4. 4.]]\n", + "data[:, :, 2] =\n", + "[[1. 1.]\n", + " [4. 1.]]" + ] + }, + "execution_count": 263, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "A**2 # Calls power." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 264, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[1. inf]\n", + " [0.5 0.5]]\n", + "data[:, :, 1] =\n", + "[[ 1. inf]\n", + " [ 1. 2.]]\n", + "data[:, :, 2] =\n", + "[[inf 0.5]\n", + " [1. 0.5]]" + ] + }, + "execution_count": 264, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "A / B # Calls ldivide." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 265, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[2. 2.]\n", + " [2. 2.]]\n", + "data[:, :, 1] =\n", + "[[1. 2.]\n", + " [1. 1.]]\n", + "data[:, :, 2] =\n", + "[[2. 2.]\n", + " [1. 2.]]" + ] + }, + "execution_count": 265, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "2 / A # Calls rdivide." ] @@ -1217,9 +2172,29 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 266, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[2. 2.]\n", + " [2. 2.]]\n", + "data[:, :, 1] =\n", + "[[3. 2.]\n", + " [3. 3.]]\n", + "data[:, :, 2] =\n", + "[[2. 2.]\n", + " [3. 2.]]" + ] + }, + "execution_count": 266, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "np.random.seed(0)\n", "A = ttb.tensor(np.floor(3 * np.random.rand(2, 2, 3), order=\"F\")) # Generate some data.\n", @@ -1228,9 +2203,29 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 267, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[1. 1.]\n", + " [2. 2.]]\n", + "data[:, :, 1] =\n", + "[[2. 1.]\n", + " [2. 2.]]\n", + "data[:, :, 2] =\n", + "[[1. 2.]\n", + " [2. 2.]]" + ] + }, + "execution_count": 267, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "# Wrap np.maximum in a function with a function signature that Python's inspect.signature can handle.\n", "def max_elements(a, b):\n", @@ -1242,9 +2237,29 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 268, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (2, 2, 3) with order F\n", + "data[:, :, 0] =\n", + "[[1. 1.]\n", + " [1. 1.]]\n", + "data[:, :, 1] =\n", + "[[2. 1.]\n", + " [2. 2.]]\n", + "data[:, :, 2] =\n", + "[[1. 2.]\n", + " [2. 1.]]" + ] + }, + "execution_count": 268, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "np.random.seed(0)\n", "C = ttb.tensor(\n", @@ -1269,9 +2284,29 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 269, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "X is a tensor of shape (2, 3, 4) with order F\n", + "data[:, :, 0] =\n", + "[[1 3 5]\n", + " [2 4 6]]\n", + "data[:, :, 1] =\n", + "[[ 7 9 11]\n", + " [ 8 10 12]]\n", + "data[:, :, 2] =\n", + "[[13 15 17]\n", + " [14 16 18]]\n", + "data[:, :, 3] =\n", + "[[19 21 23]\n", + " [20 22 24]]\n" + ] + } + ], "source": [ "X = ttb.tensor(np.arange(1, 25), shape=(2, 3, 4))\n", "print(f\"X is a {X}\")" @@ -1279,9 +2314,30 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 270, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (4, 3, 2) with order F\n", + "data[:, :, 0] =\n", + "[[ 1 3 5]\n", + " [ 7 9 11]\n", + " [13 15 17]\n", + " [19 21 23]]\n", + "data[:, :, 1] =\n", + "[[ 2 4 6]\n", + " [ 8 10 12]\n", + " [14 16 18]\n", + " [20 22 24]]" + ] + }, + "execution_count": 270, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "X.permute(np.array((2, 1, 0))) # Reverse the modes." ] @@ -1295,9 +2351,22 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 271, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (4,) with order F\n", + "data[:] =\n", + "[1 2 3 4]" + ] + }, + "execution_count": 271, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "X = ttb.tensor(np.arange(1, 5), (4,))\n", "X.permute(\n", @@ -1317,7 +2386,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 272, "metadata": {}, "outputs": [], "source": [ @@ -1336,7 +2405,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 273, "metadata": {}, "outputs": [], "source": [ @@ -1354,18 +2423,40 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 274, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "True" + ] + }, + "execution_count": 274, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "Y.issymmetric()" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 275, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "False" + ] + }, + "execution_count": 275, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "Z.issymmetric(np.array((1, 2)))" ] @@ -1379,18 +2470,53 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 276, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "tensor of shape (3, 3, 2) with order F\n", + "data[:, :, 0] =\n", + "[[0.5488135 0.60276338 0.4236548 ]\n", + " [0.43758721 0.96366276 0.79172504]\n", + " [0.56804456 0.07103606 0.0202184 ]]\n", + "data[:, :, 1] =\n", + "[[0.71518937 0.54488318 0.64589411]\n", + " [0.891773 0.38344152 0.52889492]\n", + " [0.92559664 0.0871293 0.83261985]]\n" + ] + } + ], "source": [ "print(X)" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 277, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "tensor of shape (3, 3, 2) with order F\n", + "data[:, :, 0] =\n", + "[[0.5488135 0.60276338 0.4236548 ]\n", + " [0.43758721 0.96366276 0.79172504]\n", + " [0.56804456 0.07103606 0.0202184 ]]\n", + "data[:, :, 1] =\n", + "[[0.71518937 0.54488318 0.64589411]\n", + " [0.891773 0.38344152 0.52889492]\n", + " [0.92559664 0.0871293 0.83261985]]" + ] + }, + "execution_count": 277, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "X # In the python interface" ]