Path: blob/master/ invest-robot-contest_TinkoffBotTwitch-main/venv/lib/python3.8/site-packages/numpy/linalg/linalg.py
7771 views
"""Lite version of scipy.linalg.12Notes3-----4This module is a lite version of the linalg.py module in SciPy which5contains high-level Python interface to the LAPACK library. The lite6version only accesses the following LAPACK functions: dgesv, zgesv,7dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,8zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.9"""1011__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',12'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',13'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',14'LinAlgError', 'multi_dot']1516import functools17import operator18import warnings1920from numpy.core import (21array, asarray, zeros, empty, empty_like, intc, single, double,22csingle, cdouble, inexact, complexfloating, newaxis, all, Inf, dot,23add, multiply, sqrt, fastCopyAndTranspose, sum, isfinite,24finfo, errstate, geterrobj, moveaxis, amin, amax, product, abs,25atleast_2d, intp, asanyarray, object_, matmul,26swapaxes, divide, count_nonzero, isnan, sign, argsort, sort27)28from numpy.core.multiarray import normalize_axis_index29from numpy.core.overrides import set_module30from numpy.core import overrides31from numpy.lib.twodim_base import triu, eye32from numpy.linalg import _umath_linalg333435array_function_dispatch = functools.partial(36overrides.array_function_dispatch, module='numpy.linalg')373839fortran_int = intc404142@set_module('numpy.linalg')43class LinAlgError(Exception):44"""45Generic Python-exception-derived object raised by linalg functions.4647General purpose exception class, derived from Python's exception.Exception48class, programmatically raised in linalg functions when a Linear49Algebra-related condition would prevent further correct execution of the50function.5152Parameters53----------54None5556Examples57--------58>>> from numpy import linalg as LA59>>> LA.inv(np.zeros((2,2)))60Traceback (most recent call last):61File "<stdin>", line 1, in <module>62File "...linalg.py", line 350,63in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))64File "...linalg.py", line 249,65in solve66raise LinAlgError('Singular matrix')67numpy.linalg.LinAlgError: Singular matrix6869"""707172def _determine_error_states():73errobj = geterrobj()74bufsize = errobj[0]7576with errstate(invalid='call', over='ignore',77divide='ignore', under='ignore'):78invalid_call_errmask = geterrobj()[1]7980return [bufsize, invalid_call_errmask, None]8182# Dealing with errors in _umath_linalg83_linalg_error_extobj = _determine_error_states()84del _determine_error_states8586def _raise_linalgerror_singular(err, flag):87raise LinAlgError("Singular matrix")8889def _raise_linalgerror_nonposdef(err, flag):90raise LinAlgError("Matrix is not positive definite")9192def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):93raise LinAlgError("Eigenvalues did not converge")9495def _raise_linalgerror_svd_nonconvergence(err, flag):96raise LinAlgError("SVD did not converge")9798def _raise_linalgerror_lstsq(err, flag):99raise LinAlgError("SVD did not converge in Linear Least Squares")100101def _raise_linalgerror_qr(err, flag):102raise LinAlgError("Incorrect argument found while performing "103"QR factorization")104105def get_linalg_error_extobj(callback):106extobj = list(_linalg_error_extobj) # make a copy107extobj[2] = callback108return extobj109110def _makearray(a):111new = asarray(a)112wrap = getattr(a, "__array_prepare__", new.__array_wrap__)113return new, wrap114115def isComplexType(t):116return issubclass(t, complexfloating)117118_real_types_map = {single : single,119double : double,120csingle : single,121cdouble : double}122123_complex_types_map = {single : csingle,124double : cdouble,125csingle : csingle,126cdouble : cdouble}127128def _realType(t, default=double):129return _real_types_map.get(t, default)130131def _complexType(t, default=cdouble):132return _complex_types_map.get(t, default)133134def _commonType(*arrays):135# in lite version, use higher precision (always double or cdouble)136result_type = single137is_complex = False138for a in arrays:139if issubclass(a.dtype.type, inexact):140if isComplexType(a.dtype.type):141is_complex = True142rt = _realType(a.dtype.type, default=None)143if rt is None:144# unsupported inexact scalar145raise TypeError("array type %s is unsupported in linalg" %146(a.dtype.name,))147else:148rt = double149if rt is double:150result_type = double151if is_complex:152t = cdouble153result_type = _complex_types_map[result_type]154else:155t = double156return t, result_type157158159# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).160161_fastCT = fastCopyAndTranspose162163def _to_native_byte_order(*arrays):164ret = []165for arr in arrays:166if arr.dtype.byteorder not in ('=', '|'):167ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))168else:169ret.append(arr)170if len(ret) == 1:171return ret[0]172else:173return ret174175def _fastCopyAndTranspose(type, *arrays):176cast_arrays = ()177for a in arrays:178if a.dtype.type is not type:179a = a.astype(type)180cast_arrays = cast_arrays + (_fastCT(a),)181if len(cast_arrays) == 1:182return cast_arrays[0]183else:184return cast_arrays185186def _assert_2d(*arrays):187for a in arrays:188if a.ndim != 2:189raise LinAlgError('%d-dimensional array given. Array must be '190'two-dimensional' % a.ndim)191192def _assert_stacked_2d(*arrays):193for a in arrays:194if a.ndim < 2:195raise LinAlgError('%d-dimensional array given. Array must be '196'at least two-dimensional' % a.ndim)197198def _assert_stacked_square(*arrays):199for a in arrays:200m, n = a.shape[-2:]201if m != n:202raise LinAlgError('Last 2 dimensions of the array must be square')203204def _assert_finite(*arrays):205for a in arrays:206if not isfinite(a).all():207raise LinAlgError("Array must not contain infs or NaNs")208209def _is_empty_2d(arr):210# check size first for efficiency211return arr.size == 0 and product(arr.shape[-2:]) == 0212213214def transpose(a):215"""216Transpose each matrix in a stack of matrices.217218Unlike np.transpose, this only swaps the last two axes, rather than all of219them220221Parameters222----------223a : (...,M,N) array_like224225Returns226-------227aT : (...,N,M) ndarray228"""229return swapaxes(a, -1, -2)230231# Linear equations232233def _tensorsolve_dispatcher(a, b, axes=None):234return (a, b)235236237@array_function_dispatch(_tensorsolve_dispatcher)238def tensorsolve(a, b, axes=None):239"""240Solve the tensor equation ``a x = b`` for x.241242It is assumed that all indices of `x` are summed over in the product,243together with the rightmost indices of `a`, as is done in, for example,244``tensordot(a, x, axes=b.ndim)``.245246Parameters247----------248a : array_like249Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals250the shape of that sub-tensor of `a` consisting of the appropriate251number of its rightmost indices, and must be such that252``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be253'square').254b : array_like255Right-hand tensor, which can be of any shape.256axes : tuple of ints, optional257Axes in `a` to reorder to the right, before inversion.258If None (default), no reordering is done.259260Returns261-------262x : ndarray, shape Q263264Raises265------266LinAlgError267If `a` is singular or not 'square' (in the above sense).268269See Also270--------271numpy.tensordot, tensorinv, numpy.einsum272273Examples274--------275>>> a = np.eye(2*3*4)276>>> a.shape = (2*3, 4, 2, 3, 4)277>>> b = np.random.randn(2*3, 4)278>>> x = np.linalg.tensorsolve(a, b)279>>> x.shape280(2, 3, 4)281>>> np.allclose(np.tensordot(a, x, axes=3), b)282True283284"""285a, wrap = _makearray(a)286b = asarray(b)287an = a.ndim288289if axes is not None:290allaxes = list(range(0, an))291for k in axes:292allaxes.remove(k)293allaxes.insert(an, k)294a = a.transpose(allaxes)295296oldshape = a.shape[-(an-b.ndim):]297prod = 1298for k in oldshape:299prod *= k300301a = a.reshape(-1, prod)302b = b.ravel()303res = wrap(solve(a, b))304res.shape = oldshape305return res306307308def _solve_dispatcher(a, b):309return (a, b)310311312@array_function_dispatch(_solve_dispatcher)313def solve(a, b):314"""315Solve a linear matrix equation, or system of linear scalar equations.316317Computes the "exact" solution, `x`, of the well-determined, i.e., full318rank, linear matrix equation `ax = b`.319320Parameters321----------322a : (..., M, M) array_like323Coefficient matrix.324b : {(..., M,), (..., M, K)}, array_like325Ordinate or "dependent variable" values.326327Returns328-------329x : {(..., M,), (..., M, K)} ndarray330Solution to the system a x = b. Returned shape is identical to `b`.331332Raises333------334LinAlgError335If `a` is singular or not square.336337See Also338--------339scipy.linalg.solve : Similar function in SciPy.340341Notes342-----343344.. versionadded:: 1.8.0345346Broadcasting rules apply, see the `numpy.linalg` documentation for347details.348349The solutions are computed using LAPACK routine ``_gesv``.350351`a` must be square and of full-rank, i.e., all rows (or, equivalently,352columns) must be linearly independent; if either is not true, use353`lstsq` for the least-squares best "solution" of the354system/equation.355356References357----------358.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,359FL, Academic Press, Inc., 1980, pg. 22.360361Examples362--------363Solve the system of equations ``x0 + 2 * x1 = 1`` and ``3 * x0 + 5 * x1 = 2``:364365>>> a = np.array([[1, 2], [3, 5]])366>>> b = np.array([1, 2])367>>> x = np.linalg.solve(a, b)368>>> x369array([-1., 1.])370371Check that the solution is correct:372373>>> np.allclose(np.dot(a, x), b)374True375376"""377a, _ = _makearray(a)378_assert_stacked_2d(a)379_assert_stacked_square(a)380b, wrap = _makearray(b)381t, result_t = _commonType(a, b)382383# We use the b = (..., M,) logic, only if the number of extra dimensions384# match exactly385if b.ndim == a.ndim - 1:386gufunc = _umath_linalg.solve1387else:388gufunc = _umath_linalg.solve389390signature = 'DD->D' if isComplexType(t) else 'dd->d'391extobj = get_linalg_error_extobj(_raise_linalgerror_singular)392r = gufunc(a, b, signature=signature, extobj=extobj)393394return wrap(r.astype(result_t, copy=False))395396397def _tensorinv_dispatcher(a, ind=None):398return (a,)399400401@array_function_dispatch(_tensorinv_dispatcher)402def tensorinv(a, ind=2):403"""404Compute the 'inverse' of an N-dimensional array.405406The result is an inverse for `a` relative to the tensordot operation407``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,408``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the409tensordot operation.410411Parameters412----------413a : array_like414Tensor to 'invert'. Its shape must be 'square', i. e.,415``prod(a.shape[:ind]) == prod(a.shape[ind:])``.416ind : int, optional417Number of first indices that are involved in the inverse sum.418Must be a positive integer, default is 2.419420Returns421-------422b : ndarray423`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.424425Raises426------427LinAlgError428If `a` is singular or not 'square' (in the above sense).429430See Also431--------432numpy.tensordot, tensorsolve433434Examples435--------436>>> a = np.eye(4*6)437>>> a.shape = (4, 6, 8, 3)438>>> ainv = np.linalg.tensorinv(a, ind=2)439>>> ainv.shape440(8, 3, 4, 6)441>>> b = np.random.randn(4, 6)442>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))443True444445>>> a = np.eye(4*6)446>>> a.shape = (24, 8, 3)447>>> ainv = np.linalg.tensorinv(a, ind=1)448>>> ainv.shape449(8, 3, 24)450>>> b = np.random.randn(24)451>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))452True453454"""455a = asarray(a)456oldshape = a.shape457prod = 1458if ind > 0:459invshape = oldshape[ind:] + oldshape[:ind]460for k in oldshape[ind:]:461prod *= k462else:463raise ValueError("Invalid ind argument.")464a = a.reshape(prod, -1)465ia = inv(a)466return ia.reshape(*invshape)467468469# Matrix inversion470471def _unary_dispatcher(a):472return (a,)473474475@array_function_dispatch(_unary_dispatcher)476def inv(a):477"""478Compute the (multiplicative) inverse of a matrix.479480Given a square matrix `a`, return the matrix `ainv` satisfying481``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.482483Parameters484----------485a : (..., M, M) array_like486Matrix to be inverted.487488Returns489-------490ainv : (..., M, M) ndarray or matrix491(Multiplicative) inverse of the matrix `a`.492493Raises494------495LinAlgError496If `a` is not square or inversion fails.497498See Also499--------500scipy.linalg.inv : Similar function in SciPy.501502Notes503-----504505.. versionadded:: 1.8.0506507Broadcasting rules apply, see the `numpy.linalg` documentation for508details.509510Examples511--------512>>> from numpy.linalg import inv513>>> a = np.array([[1., 2.], [3., 4.]])514>>> ainv = inv(a)515>>> np.allclose(np.dot(a, ainv), np.eye(2))516True517>>> np.allclose(np.dot(ainv, a), np.eye(2))518True519520If a is a matrix object, then the return value is a matrix as well:521522>>> ainv = inv(np.matrix(a))523>>> ainv524matrix([[-2. , 1. ],525[ 1.5, -0.5]])526527Inverses of several matrices can be computed at once:528529>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])530>>> inv(a)531array([[[-2. , 1. ],532[ 1.5 , -0.5 ]],533[[-1.25, 0.75],534[ 0.75, -0.25]]])535536"""537a, wrap = _makearray(a)538_assert_stacked_2d(a)539_assert_stacked_square(a)540t, result_t = _commonType(a)541542signature = 'D->D' if isComplexType(t) else 'd->d'543extobj = get_linalg_error_extobj(_raise_linalgerror_singular)544ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)545return wrap(ainv.astype(result_t, copy=False))546547548def _matrix_power_dispatcher(a, n):549return (a,)550551552@array_function_dispatch(_matrix_power_dispatcher)553def matrix_power(a, n):554"""555Raise a square matrix to the (integer) power `n`.556557For positive integers `n`, the power is computed by repeated matrix558squarings and matrix multiplications. If ``n == 0``, the identity matrix559of the same shape as M is returned. If ``n < 0``, the inverse560is computed and then raised to the ``abs(n)``.561562.. note:: Stacks of object matrices are not currently supported.563564Parameters565----------566a : (..., M, M) array_like567Matrix to be "powered".568n : int569The exponent can be any integer or long integer, positive,570negative, or zero.571572Returns573-------574a**n : (..., M, M) ndarray or matrix object575The return value is the same shape and type as `M`;576if the exponent is positive or zero then the type of the577elements is the same as those of `M`. If the exponent is578negative the elements are floating-point.579580Raises581------582LinAlgError583For matrices that are not square or that (for negative powers) cannot584be inverted numerically.585586Examples587--------588>>> from numpy.linalg import matrix_power589>>> i = np.array([[0, 1], [-1, 0]]) # matrix equiv. of the imaginary unit590>>> matrix_power(i, 3) # should = -i591array([[ 0, -1],592[ 1, 0]])593>>> matrix_power(i, 0)594array([[1, 0],595[0, 1]])596>>> matrix_power(i, -3) # should = 1/(-i) = i, but w/ f.p. elements597array([[ 0., 1.],598[-1., 0.]])599600Somewhat more sophisticated example601602>>> q = np.zeros((4, 4))603>>> q[0:2, 0:2] = -i604>>> q[2:4, 2:4] = i605>>> q # one of the three quaternion units not equal to 1606array([[ 0., -1., 0., 0.],607[ 1., 0., 0., 0.],608[ 0., 0., 0., 1.],609[ 0., 0., -1., 0.]])610>>> matrix_power(q, 2) # = -np.eye(4)611array([[-1., 0., 0., 0.],612[ 0., -1., 0., 0.],613[ 0., 0., -1., 0.],614[ 0., 0., 0., -1.]])615616"""617a = asanyarray(a)618_assert_stacked_2d(a)619_assert_stacked_square(a)620621try:622n = operator.index(n)623except TypeError as e:624raise TypeError("exponent must be an integer") from e625626# Fall back on dot for object arrays. Object arrays are not supported by627# the current implementation of matmul using einsum628if a.dtype != object:629fmatmul = matmul630elif a.ndim == 2:631fmatmul = dot632else:633raise NotImplementedError(634"matrix_power not supported for stacks of object arrays")635636if n == 0:637a = empty_like(a)638a[...] = eye(a.shape[-2], dtype=a.dtype)639return a640641elif n < 0:642a = inv(a)643n = abs(n)644645# short-cuts.646if n == 1:647return a648649elif n == 2:650return fmatmul(a, a)651652elif n == 3:653return fmatmul(fmatmul(a, a), a)654655# Use binary decomposition to reduce the number of matrix multiplications.656# Here, we iterate over the bits of n, from LSB to MSB, raise `a` to657# increasing powers of 2, and multiply into the result as needed.658z = result = None659while n > 0:660z = a if z is None else fmatmul(z, z)661n, bit = divmod(n, 2)662if bit:663result = z if result is None else fmatmul(result, z)664665return result666667668# Cholesky decomposition669670671@array_function_dispatch(_unary_dispatcher)672def cholesky(a):673"""674Cholesky decomposition.675676Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,677where `L` is lower-triangular and .H is the conjugate transpose operator678(which is the ordinary transpose if `a` is real-valued). `a` must be679Hermitian (symmetric if real-valued) and positive-definite. No680checking is performed to verify whether `a` is Hermitian or not.681In addition, only the lower-triangular and diagonal elements of `a`682are used. Only `L` is actually returned.683684Parameters685----------686a : (..., M, M) array_like687Hermitian (symmetric if all elements are real), positive-definite688input matrix.689690Returns691-------692L : (..., M, M) array_like693Upper or lower-triangular Cholesky factor of `a`. Returns a694matrix object if `a` is a matrix object.695696Raises697------698LinAlgError699If the decomposition fails, for example, if `a` is not700positive-definite.701702See Also703--------704scipy.linalg.cholesky : Similar function in SciPy.705scipy.linalg.cholesky_banded : Cholesky decompose a banded Hermitian706positive-definite matrix.707scipy.linalg.cho_factor : Cholesky decomposition of a matrix, to use in708`scipy.linalg.cho_solve`.709710Notes711-----712713.. versionadded:: 1.8.0714715Broadcasting rules apply, see the `numpy.linalg` documentation for716details.717718The Cholesky decomposition is often used as a fast way of solving719720.. math:: A \\mathbf{x} = \\mathbf{b}721722(when `A` is both Hermitian/symmetric and positive-definite).723724First, we solve for :math:`\\mathbf{y}` in725726.. math:: L \\mathbf{y} = \\mathbf{b},727728and then for :math:`\\mathbf{x}` in729730.. math:: L.H \\mathbf{x} = \\mathbf{y}.731732Examples733--------734>>> A = np.array([[1,-2j],[2j,5]])735>>> A736array([[ 1.+0.j, -0.-2.j],737[ 0.+2.j, 5.+0.j]])738>>> L = np.linalg.cholesky(A)739>>> L740array([[1.+0.j, 0.+0.j],741[0.+2.j, 1.+0.j]])742>>> np.dot(L, L.T.conj()) # verify that L * L.H = A743array([[1.+0.j, 0.-2.j],744[0.+2.j, 5.+0.j]])745>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?746>>> np.linalg.cholesky(A) # an ndarray object is returned747array([[1.+0.j, 0.+0.j],748[0.+2.j, 1.+0.j]])749>>> # But a matrix object is returned if A is a matrix object750>>> np.linalg.cholesky(np.matrix(A))751matrix([[ 1.+0.j, 0.+0.j],752[ 0.+2.j, 1.+0.j]])753754"""755extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)756gufunc = _umath_linalg.cholesky_lo757a, wrap = _makearray(a)758_assert_stacked_2d(a)759_assert_stacked_square(a)760t, result_t = _commonType(a)761signature = 'D->D' if isComplexType(t) else 'd->d'762r = gufunc(a, signature=signature, extobj=extobj)763return wrap(r.astype(result_t, copy=False))764765766# QR decomposition767768def _qr_dispatcher(a, mode=None):769return (a,)770771772@array_function_dispatch(_qr_dispatcher)773def qr(a, mode='reduced'):774"""775Compute the qr factorization of a matrix.776777Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is778upper-triangular.779780Parameters781----------782a : array_like, shape (..., M, N)783An array-like object with the dimensionality of at least 2.784mode : {'reduced', 'complete', 'r', 'raw'}, optional785If K = min(M, N), then786787* 'reduced' : returns q, r with dimensions788(..., M, K), (..., K, N) (default)789* 'complete' : returns q, r with dimensions (..., M, M), (..., M, N)790* 'r' : returns r only with dimensions (..., K, N)791* 'raw' : returns h, tau with dimensions (..., N, M), (..., K,)792793The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,794see the notes for more information. The default is 'reduced', and to795maintain backward compatibility with earlier versions of numpy both796it and the old default 'full' can be omitted. Note that array h797returned in 'raw' mode is transposed for calling Fortran. The798'economic' mode is deprecated. The modes 'full' and 'economic' may799be passed using only the first letter for backwards compatibility,800but all others must be spelled out. See the Notes for more801explanation.802803804Returns805-------806q : ndarray of float or complex, optional807A matrix with orthonormal columns. When mode = 'complete' the808result is an orthogonal/unitary matrix depending on whether or not809a is real/complex. The determinant may be either +/- 1 in that810case. In case the number of dimensions in the input array is811greater than 2 then a stack of the matrices with above properties812is returned.813r : ndarray of float or complex, optional814The upper-triangular matrix or a stack of upper-triangular815matrices if the number of dimensions in the input array is greater816than 2.817(h, tau) : ndarrays of np.double or np.cdouble, optional818The array h contains the Householder reflectors that generate q819along with r. The tau array contains scaling factors for the820reflectors. In the deprecated 'economic' mode only h is returned.821822Raises823------824LinAlgError825If factoring fails.826827See Also828--------829scipy.linalg.qr : Similar function in SciPy.830scipy.linalg.rq : Compute RQ decomposition of a matrix.831832Notes833-----834This is an interface to the LAPACK routines ``dgeqrf``, ``zgeqrf``,835``dorgqr``, and ``zungqr``.836837For more information on the qr factorization, see for example:838https://en.wikipedia.org/wiki/QR_factorization839840Subclasses of `ndarray` are preserved except for the 'raw' mode. So if841`a` is of type `matrix`, all the return values will be matrices too.842843New 'reduced', 'complete', and 'raw' options for mode were added in844NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In845addition the options 'full' and 'economic' were deprecated. Because846'full' was the previous default and 'reduced' is the new default,847backward compatibility can be maintained by letting `mode` default.848The 'raw' option was added so that LAPACK routines that can multiply849arrays by q using the Householder reflectors can be used. Note that in850this case the returned arrays are of type np.double or np.cdouble and851the h array is transposed to be FORTRAN compatible. No routines using852the 'raw' return are currently exposed by numpy, but some are available853in lapack_lite and just await the necessary work.854855Examples856--------857>>> a = np.random.randn(9, 6)858>>> q, r = np.linalg.qr(a)859>>> np.allclose(a, np.dot(q, r)) # a does equal qr860True861>>> r2 = np.linalg.qr(a, mode='r')862>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'863True864>>> a = np.random.normal(size=(3, 2, 2)) # Stack of 2 x 2 matrices as input865>>> q, r = np.linalg.qr(a)866>>> q.shape867(3, 2, 2)868>>> r.shape869(3, 2, 2)870>>> np.allclose(a, np.matmul(q, r))871True872873Example illustrating a common use of `qr`: solving of least squares874problems875876What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for877the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points878and you'll see that it should be y0 = 0, m = 1.) The answer is provided879by solving the over-determined matrix equation ``Ax = b``, where::880881A = array([[0, 1], [1, 1], [1, 1], [2, 1]])882x = array([[y0], [m]])883b = array([[1], [0], [2], [1]])884885If A = qr such that q is orthonormal (which is always possible via886Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,887however, we simply use `lstsq`.)888889>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])890>>> A891array([[0, 1],892[1, 1],893[1, 1],894[2, 1]])895>>> b = np.array([1, 0, 2, 1])896>>> q, r = np.linalg.qr(A)897>>> p = np.dot(q.T, b)898>>> np.dot(np.linalg.inv(r), p)899array([ 1.1e-16, 1.0e+00])900901"""902if mode not in ('reduced', 'complete', 'r', 'raw'):903if mode in ('f', 'full'):904# 2013-04-01, 1.8905msg = "".join((906"The 'full' option is deprecated in favor of 'reduced'.\n",907"For backward compatibility let mode default."))908warnings.warn(msg, DeprecationWarning, stacklevel=3)909mode = 'reduced'910elif mode in ('e', 'economic'):911# 2013-04-01, 1.8912msg = "The 'economic' option is deprecated."913warnings.warn(msg, DeprecationWarning, stacklevel=3)914mode = 'economic'915else:916raise ValueError(f"Unrecognized mode '{mode}'")917918a, wrap = _makearray(a)919_assert_stacked_2d(a)920m, n = a.shape[-2:]921t, result_t = _commonType(a)922a = a.astype(t, copy=True)923a = _to_native_byte_order(a)924mn = min(m, n)925926if m <= n:927gufunc = _umath_linalg.qr_r_raw_m928else:929gufunc = _umath_linalg.qr_r_raw_n930931signature = 'D->D' if isComplexType(t) else 'd->d'932extobj = get_linalg_error_extobj(_raise_linalgerror_qr)933tau = gufunc(a, signature=signature, extobj=extobj)934935# handle modes that don't return q936if mode == 'r':937r = triu(a[..., :mn, :])938r = r.astype(result_t, copy=False)939return wrap(r)940941if mode == 'raw':942q = transpose(a)943q = q.astype(result_t, copy=False)944tau = tau.astype(result_t, copy=False)945return wrap(q), tau946947if mode == 'economic':948a = a.astype(result_t, copy=False)949return wrap(a)950951# mc is the number of columns in the resulting q952# matrix. If the mode is complete then it is953# same as number of rows, and if the mode is reduced,954# then it is the minimum of number of rows and columns.955if mode == 'complete' and m > n:956mc = m957gufunc = _umath_linalg.qr_complete958else:959mc = mn960gufunc = _umath_linalg.qr_reduced961962signature = 'DD->D' if isComplexType(t) else 'dd->d'963extobj = get_linalg_error_extobj(_raise_linalgerror_qr)964q = gufunc(a, tau, signature=signature, extobj=extobj)965r = triu(a[..., :mc, :])966967q = q.astype(result_t, copy=False)968r = r.astype(result_t, copy=False)969970return wrap(q), wrap(r)971972# Eigenvalues973974975@array_function_dispatch(_unary_dispatcher)976def eigvals(a):977"""978Compute the eigenvalues of a general matrix.979980Main difference between `eigvals` and `eig`: the eigenvectors aren't981returned.982983Parameters984----------985a : (..., M, M) array_like986A complex- or real-valued matrix whose eigenvalues will be computed.987988Returns989-------990w : (..., M,) ndarray991The eigenvalues, each repeated according to its multiplicity.992They are not necessarily ordered, nor are they necessarily993real for real matrices.994995Raises996------997LinAlgError998If the eigenvalue computation does not converge.9991000See Also1001--------1002eig : eigenvalues and right eigenvectors of general arrays1003eigvalsh : eigenvalues of real symmetric or complex Hermitian1004(conjugate symmetric) arrays.1005eigh : eigenvalues and eigenvectors of real symmetric or complex1006Hermitian (conjugate symmetric) arrays.1007scipy.linalg.eigvals : Similar function in SciPy.10081009Notes1010-----10111012.. versionadded:: 1.8.010131014Broadcasting rules apply, see the `numpy.linalg` documentation for1015details.10161017This is implemented using the ``_geev`` LAPACK routines which compute1018the eigenvalues and eigenvectors of general square arrays.10191020Examples1021--------1022Illustration, using the fact that the eigenvalues of a diagonal matrix1023are its diagonal elements, that multiplying a matrix on the left1024by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose1025of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,1026if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as1027``A``:10281029>>> from numpy import linalg as LA1030>>> x = np.random.random()1031>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])1032>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])1033(1.0, 1.0, 0.0)10341035Now multiply a diagonal matrix by ``Q`` on one side and by ``Q.T`` on the other:10361037>>> D = np.diag((-1,1))1038>>> LA.eigvals(D)1039array([-1., 1.])1040>>> A = np.dot(Q, D)1041>>> A = np.dot(A, Q.T)1042>>> LA.eigvals(A)1043array([ 1., -1.]) # random10441045"""1046a, wrap = _makearray(a)1047_assert_stacked_2d(a)1048_assert_stacked_square(a)1049_assert_finite(a)1050t, result_t = _commonType(a)10511052extobj = get_linalg_error_extobj(1053_raise_linalgerror_eigenvalues_nonconvergence)1054signature = 'D->D' if isComplexType(t) else 'd->D'1055w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)10561057if not isComplexType(t):1058if all(w.imag == 0):1059w = w.real1060result_t = _realType(result_t)1061else:1062result_t = _complexType(result_t)10631064return w.astype(result_t, copy=False)106510661067def _eigvalsh_dispatcher(a, UPLO=None):1068return (a,)106910701071@array_function_dispatch(_eigvalsh_dispatcher)1072def eigvalsh(a, UPLO='L'):1073"""1074Compute the eigenvalues of a complex Hermitian or real symmetric matrix.10751076Main difference from eigh: the eigenvectors are not computed.10771078Parameters1079----------1080a : (..., M, M) array_like1081A complex- or real-valued matrix whose eigenvalues are to be1082computed.1083UPLO : {'L', 'U'}, optional1084Specifies whether the calculation is done with the lower triangular1085part of `a` ('L', default) or the upper triangular part ('U').1086Irrespective of this value only the real parts of the diagonal will1087be considered in the computation to preserve the notion of a Hermitian1088matrix. It therefore follows that the imaginary part of the diagonal1089will always be treated as zero.10901091Returns1092-------1093w : (..., M,) ndarray1094The eigenvalues in ascending order, each repeated according to1095its multiplicity.10961097Raises1098------1099LinAlgError1100If the eigenvalue computation does not converge.11011102See Also1103--------1104eigh : eigenvalues and eigenvectors of real symmetric or complex Hermitian1105(conjugate symmetric) arrays.1106eigvals : eigenvalues of general real or complex arrays.1107eig : eigenvalues and right eigenvectors of general real or complex1108arrays.1109scipy.linalg.eigvalsh : Similar function in SciPy.11101111Notes1112-----11131114.. versionadded:: 1.8.011151116Broadcasting rules apply, see the `numpy.linalg` documentation for1117details.11181119The eigenvalues are computed using LAPACK routines ``_syevd``, ``_heevd``.11201121Examples1122--------1123>>> from numpy import linalg as LA1124>>> a = np.array([[1, -2j], [2j, 5]])1125>>> LA.eigvalsh(a)1126array([ 0.17157288, 5.82842712]) # may vary11271128>>> # demonstrate the treatment of the imaginary part of the diagonal1129>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])1130>>> a1131array([[5.+2.j, 9.-2.j],1132[0.+2.j, 2.-1.j]])1133>>> # with UPLO='L' this is numerically equivalent to using LA.eigvals()1134>>> # with:1135>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])1136>>> b1137array([[5.+0.j, 0.-2.j],1138[0.+2.j, 2.+0.j]])1139>>> wa = LA.eigvalsh(a)1140>>> wb = LA.eigvals(b)1141>>> wa; wb1142array([1., 6.])1143array([6.+0.j, 1.+0.j])11441145"""1146UPLO = UPLO.upper()1147if UPLO not in ('L', 'U'):1148raise ValueError("UPLO argument must be 'L' or 'U'")11491150extobj = get_linalg_error_extobj(1151_raise_linalgerror_eigenvalues_nonconvergence)1152if UPLO == 'L':1153gufunc = _umath_linalg.eigvalsh_lo1154else:1155gufunc = _umath_linalg.eigvalsh_up11561157a, wrap = _makearray(a)1158_assert_stacked_2d(a)1159_assert_stacked_square(a)1160t, result_t = _commonType(a)1161signature = 'D->d' if isComplexType(t) else 'd->d'1162w = gufunc(a, signature=signature, extobj=extobj)1163return w.astype(_realType(result_t), copy=False)11641165def _convertarray(a):1166t, result_t = _commonType(a)1167a = _fastCT(a.astype(t))1168return a, t, result_t116911701171# Eigenvectors117211731174@array_function_dispatch(_unary_dispatcher)1175def eig(a):1176"""1177Compute the eigenvalues and right eigenvectors of a square array.11781179Parameters1180----------1181a : (..., M, M) array1182Matrices for which the eigenvalues and right eigenvectors will1183be computed11841185Returns1186-------1187w : (..., M) array1188The eigenvalues, each repeated according to its multiplicity.1189The eigenvalues are not necessarily ordered. The resulting1190array will be of complex type, unless the imaginary part is1191zero in which case it will be cast to a real type. When `a`1192is real the resulting eigenvalues will be real (0 imaginary1193part) or occur in conjugate pairs11941195v : (..., M, M) array1196The normalized (unit "length") eigenvectors, such that the1197column ``v[:,i]`` is the eigenvector corresponding to the1198eigenvalue ``w[i]``.11991200Raises1201------1202LinAlgError1203If the eigenvalue computation does not converge.12041205See Also1206--------1207eigvals : eigenvalues of a non-symmetric array.1208eigh : eigenvalues and eigenvectors of a real symmetric or complex1209Hermitian (conjugate symmetric) array.1210eigvalsh : eigenvalues of a real symmetric or complex Hermitian1211(conjugate symmetric) array.1212scipy.linalg.eig : Similar function in SciPy that also solves the1213generalized eigenvalue problem.1214scipy.linalg.schur : Best choice for unitary and other non-Hermitian1215normal matrices.12161217Notes1218-----12191220.. versionadded:: 1.8.012211222Broadcasting rules apply, see the `numpy.linalg` documentation for1223details.12241225This is implemented using the ``_geev`` LAPACK routines which compute1226the eigenvalues and eigenvectors of general square arrays.12271228The number `w` is an eigenvalue of `a` if there exists a vector1229`v` such that ``a @ v = w * v``. Thus, the arrays `a`, `w`, and1230`v` satisfy the equations ``a @ v[:,i] = w[i] * v[:,i]``1231for :math:`i \\in \\{0,...,M-1\\}`.12321233The array `v` of eigenvectors may not be of maximum rank, that is, some1234of the columns may be linearly dependent, although round-off error may1235obscure that fact. If the eigenvalues are all different, then theoretically1236the eigenvectors are linearly independent and `a` can be diagonalized by1237a similarity transformation using `v`, i.e, ``inv(v) @ a @ v`` is diagonal.12381239For non-Hermitian normal matrices the SciPy function `scipy.linalg.schur`1240is preferred because the matrix `v` is guaranteed to be unitary, which is1241not the case when using `eig`. The Schur factorization produces an1242upper triangular matrix rather than a diagonal matrix, but for normal1243matrices only the diagonal of the upper triangular matrix is needed, the1244rest is roundoff error.12451246Finally, it is emphasized that `v` consists of the *right* (as in1247right-hand side) eigenvectors of `a`. A vector `y` satisfying1248``y.T @ a = z * y.T`` for some number `z` is called a *left*1249eigenvector of `a`, and, in general, the left and right eigenvectors1250of a matrix are not necessarily the (perhaps conjugate) transposes1251of each other.12521253References1254----------1255G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,1256Academic Press, Inc., 1980, Various pp.12571258Examples1259--------1260>>> from numpy import linalg as LA12611262(Almost) trivial example with real e-values and e-vectors.12631264>>> w, v = LA.eig(np.diag((1, 2, 3)))1265>>> w; v1266array([1., 2., 3.])1267array([[1., 0., 0.],1268[0., 1., 0.],1269[0., 0., 1.]])12701271Real matrix possessing complex e-values and e-vectors; note that the1272e-values are complex conjugates of each other.12731274>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))1275>>> w; v1276array([1.+1.j, 1.-1.j])1277array([[0.70710678+0.j , 0.70710678-0.j ],1278[0. -0.70710678j, 0. +0.70710678j]])12791280Complex-valued matrix with real e-values (but complex-valued e-vectors);1281note that ``a.conj().T == a``, i.e., `a` is Hermitian.12821283>>> a = np.array([[1, 1j], [-1j, 1]])1284>>> w, v = LA.eig(a)1285>>> w; v1286array([2.+0.j, 0.+0.j])1287array([[ 0. +0.70710678j, 0.70710678+0.j ], # may vary1288[ 0.70710678+0.j , -0. +0.70710678j]])12891290Be careful about round-off error!12911292>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])1293>>> # Theor. e-values are 1 +/- 1e-91294>>> w, v = LA.eig(a)1295>>> w; v1296array([1., 1.])1297array([[1., 0.],1298[0., 1.]])12991300"""1301a, wrap = _makearray(a)1302_assert_stacked_2d(a)1303_assert_stacked_square(a)1304_assert_finite(a)1305t, result_t = _commonType(a)13061307extobj = get_linalg_error_extobj(1308_raise_linalgerror_eigenvalues_nonconvergence)1309signature = 'D->DD' if isComplexType(t) else 'd->DD'1310w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)13111312if not isComplexType(t) and all(w.imag == 0.0):1313w = w.real1314vt = vt.real1315result_t = _realType(result_t)1316else:1317result_t = _complexType(result_t)13181319vt = vt.astype(result_t, copy=False)1320return w.astype(result_t, copy=False), wrap(vt)132113221323@array_function_dispatch(_eigvalsh_dispatcher)1324def eigh(a, UPLO='L'):1325"""1326Return the eigenvalues and eigenvectors of a complex Hermitian1327(conjugate symmetric) or a real symmetric matrix.13281329Returns two objects, a 1-D array containing the eigenvalues of `a`, and1330a 2-D square array or matrix (depending on the input type) of the1331corresponding eigenvectors (in columns).13321333Parameters1334----------1335a : (..., M, M) array1336Hermitian or real symmetric matrices whose eigenvalues and1337eigenvectors are to be computed.1338UPLO : {'L', 'U'}, optional1339Specifies whether the calculation is done with the lower triangular1340part of `a` ('L', default) or the upper triangular part ('U').1341Irrespective of this value only the real parts of the diagonal will1342be considered in the computation to preserve the notion of a Hermitian1343matrix. It therefore follows that the imaginary part of the diagonal1344will always be treated as zero.13451346Returns1347-------1348w : (..., M) ndarray1349The eigenvalues in ascending order, each repeated according to1350its multiplicity.1351v : {(..., M, M) ndarray, (..., M, M) matrix}1352The column ``v[:, i]`` is the normalized eigenvector corresponding1353to the eigenvalue ``w[i]``. Will return a matrix object if `a` is1354a matrix object.13551356Raises1357------1358LinAlgError1359If the eigenvalue computation does not converge.13601361See Also1362--------1363eigvalsh : eigenvalues of real symmetric or complex Hermitian1364(conjugate symmetric) arrays.1365eig : eigenvalues and right eigenvectors for non-symmetric arrays.1366eigvals : eigenvalues of non-symmetric arrays.1367scipy.linalg.eigh : Similar function in SciPy (but also solves the1368generalized eigenvalue problem).13691370Notes1371-----13721373.. versionadded:: 1.8.013741375Broadcasting rules apply, see the `numpy.linalg` documentation for1376details.13771378The eigenvalues/eigenvectors are computed using LAPACK routines ``_syevd``,1379``_heevd``.13801381The eigenvalues of real symmetric or complex Hermitian matrices are1382always real. [1]_ The array `v` of (column) eigenvectors is unitary1383and `a`, `w`, and `v` satisfy the equations1384``dot(a, v[:, i]) = w[i] * v[:, i]``.13851386References1387----------1388.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,1389FL, Academic Press, Inc., 1980, pg. 222.13901391Examples1392--------1393>>> from numpy import linalg as LA1394>>> a = np.array([[1, -2j], [2j, 5]])1395>>> a1396array([[ 1.+0.j, -0.-2.j],1397[ 0.+2.j, 5.+0.j]])1398>>> w, v = LA.eigh(a)1399>>> w; v1400array([0.17157288, 5.82842712])1401array([[-0.92387953+0.j , -0.38268343+0.j ], # may vary1402[ 0. +0.38268343j, 0. -0.92387953j]])14031404>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair1405array([5.55111512e-17+0.0000000e+00j, 0.00000000e+00+1.2490009e-16j])1406>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair1407array([0.+0.j, 0.+0.j])14081409>>> A = np.matrix(a) # what happens if input is a matrix object1410>>> A1411matrix([[ 1.+0.j, -0.-2.j],1412[ 0.+2.j, 5.+0.j]])1413>>> w, v = LA.eigh(A)1414>>> w; v1415array([0.17157288, 5.82842712])1416matrix([[-0.92387953+0.j , -0.38268343+0.j ], # may vary1417[ 0. +0.38268343j, 0. -0.92387953j]])14181419>>> # demonstrate the treatment of the imaginary part of the diagonal1420>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])1421>>> a1422array([[5.+2.j, 9.-2.j],1423[0.+2.j, 2.-1.j]])1424>>> # with UPLO='L' this is numerically equivalent to using LA.eig() with:1425>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])1426>>> b1427array([[5.+0.j, 0.-2.j],1428[0.+2.j, 2.+0.j]])1429>>> wa, va = LA.eigh(a)1430>>> wb, vb = LA.eig(b)1431>>> wa; wb1432array([1., 6.])1433array([6.+0.j, 1.+0.j])1434>>> va; vb1435array([[-0.4472136 +0.j , -0.89442719+0.j ], # may vary1436[ 0. +0.89442719j, 0. -0.4472136j ]])1437array([[ 0.89442719+0.j , -0. +0.4472136j],1438[-0. +0.4472136j, 0.89442719+0.j ]])1439"""1440UPLO = UPLO.upper()1441if UPLO not in ('L', 'U'):1442raise ValueError("UPLO argument must be 'L' or 'U'")14431444a, wrap = _makearray(a)1445_assert_stacked_2d(a)1446_assert_stacked_square(a)1447t, result_t = _commonType(a)14481449extobj = get_linalg_error_extobj(1450_raise_linalgerror_eigenvalues_nonconvergence)1451if UPLO == 'L':1452gufunc = _umath_linalg.eigh_lo1453else:1454gufunc = _umath_linalg.eigh_up14551456signature = 'D->dD' if isComplexType(t) else 'd->dd'1457w, vt = gufunc(a, signature=signature, extobj=extobj)1458w = w.astype(_realType(result_t), copy=False)1459vt = vt.astype(result_t, copy=False)1460return w, wrap(vt)146114621463# Singular value decomposition14641465def _svd_dispatcher(a, full_matrices=None, compute_uv=None, hermitian=None):1466return (a,)146714681469@array_function_dispatch(_svd_dispatcher)1470def svd(a, full_matrices=True, compute_uv=True, hermitian=False):1471"""1472Singular Value Decomposition.14731474When `a` is a 2D array, it is factorized as ``u @ np.diag(s) @ vh1475= (u * s) @ vh``, where `u` and `vh` are 2D unitary arrays and `s` is a 1D1476array of `a`'s singular values. When `a` is higher-dimensional, SVD is1477applied in stacked mode as explained below.14781479Parameters1480----------1481a : (..., M, N) array_like1482A real or complex array with ``a.ndim >= 2``.1483full_matrices : bool, optional1484If True (default), `u` and `vh` have the shapes ``(..., M, M)`` and1485``(..., N, N)``, respectively. Otherwise, the shapes are1486``(..., M, K)`` and ``(..., K, N)``, respectively, where1487``K = min(M, N)``.1488compute_uv : bool, optional1489Whether or not to compute `u` and `vh` in addition to `s`. True1490by default.1491hermitian : bool, optional1492If True, `a` is assumed to be Hermitian (symmetric if real-valued),1493enabling a more efficient method for finding singular values.1494Defaults to False.14951496.. versionadded:: 1.17.014971498Returns1499-------1500u : { (..., M, M), (..., M, K) } array1501Unitary array(s). The first ``a.ndim - 2`` dimensions have the same1502size as those of the input `a`. The size of the last two dimensions1503depends on the value of `full_matrices`. Only returned when1504`compute_uv` is True.1505s : (..., K) array1506Vector(s) with the singular values, within each vector sorted in1507descending order. The first ``a.ndim - 2`` dimensions have the same1508size as those of the input `a`.1509vh : { (..., N, N), (..., K, N) } array1510Unitary array(s). The first ``a.ndim - 2`` dimensions have the same1511size as those of the input `a`. The size of the last two dimensions1512depends on the value of `full_matrices`. Only returned when1513`compute_uv` is True.15141515Raises1516------1517LinAlgError1518If SVD computation does not converge.15191520See Also1521--------1522scipy.linalg.svd : Similar function in SciPy.1523scipy.linalg.svdvals : Compute singular values of a matrix.15241525Notes1526-----15271528.. versionchanged:: 1.8.01529Broadcasting rules apply, see the `numpy.linalg` documentation for1530details.15311532The decomposition is performed using LAPACK routine ``_gesdd``.15331534SVD is usually described for the factorization of a 2D matrix :math:`A`.1535The higher-dimensional case will be discussed below. In the 2D case, SVD is1536written as :math:`A = U S V^H`, where :math:`A = a`, :math:`U= u`,1537:math:`S= \\mathtt{np.diag}(s)` and :math:`V^H = vh`. The 1D array `s`1538contains the singular values of `a` and `u` and `vh` are unitary. The rows1539of `vh` are the eigenvectors of :math:`A^H A` and the columns of `u` are1540the eigenvectors of :math:`A A^H`. In both cases the corresponding1541(possibly non-zero) eigenvalues are given by ``s**2``.15421543If `a` has more than two dimensions, then broadcasting rules apply, as1544explained in :ref:`routines.linalg-broadcasting`. This means that SVD is1545working in "stacked" mode: it iterates over all indices of the first1546``a.ndim - 2`` dimensions and for each combination SVD is applied to the1547last two indices. The matrix `a` can be reconstructed from the1548decomposition with either ``(u * s[..., None, :]) @ vh`` or1549``u @ (s[..., None] * vh)``. (The ``@`` operator can be replaced by the1550function ``np.matmul`` for python versions below 3.5.)15511552If `a` is a ``matrix`` object (as opposed to an ``ndarray``), then so are1553all the return values.15541555Examples1556--------1557>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)1558>>> b = np.random.randn(2, 7, 8, 3) + 1j*np.random.randn(2, 7, 8, 3)15591560Reconstruction based on full SVD, 2D case:15611562>>> u, s, vh = np.linalg.svd(a, full_matrices=True)1563>>> u.shape, s.shape, vh.shape1564((9, 9), (6,), (6, 6))1565>>> np.allclose(a, np.dot(u[:, :6] * s, vh))1566True1567>>> smat = np.zeros((9, 6), dtype=complex)1568>>> smat[:6, :6] = np.diag(s)1569>>> np.allclose(a, np.dot(u, np.dot(smat, vh)))1570True15711572Reconstruction based on reduced SVD, 2D case:15731574>>> u, s, vh = np.linalg.svd(a, full_matrices=False)1575>>> u.shape, s.shape, vh.shape1576((9, 6), (6,), (6, 6))1577>>> np.allclose(a, np.dot(u * s, vh))1578True1579>>> smat = np.diag(s)1580>>> np.allclose(a, np.dot(u, np.dot(smat, vh)))1581True15821583Reconstruction based on full SVD, 4D case:15841585>>> u, s, vh = np.linalg.svd(b, full_matrices=True)1586>>> u.shape, s.shape, vh.shape1587((2, 7, 8, 8), (2, 7, 3), (2, 7, 3, 3))1588>>> np.allclose(b, np.matmul(u[..., :3] * s[..., None, :], vh))1589True1590>>> np.allclose(b, np.matmul(u[..., :3], s[..., None] * vh))1591True15921593Reconstruction based on reduced SVD, 4D case:15941595>>> u, s, vh = np.linalg.svd(b, full_matrices=False)1596>>> u.shape, s.shape, vh.shape1597((2, 7, 8, 3), (2, 7, 3), (2, 7, 3, 3))1598>>> np.allclose(b, np.matmul(u * s[..., None, :], vh))1599True1600>>> np.allclose(b, np.matmul(u, s[..., None] * vh))1601True16021603"""1604import numpy as _nx1605a, wrap = _makearray(a)16061607if hermitian:1608# note: lapack svd returns eigenvalues with s ** 2 sorted descending,1609# but eig returns s sorted ascending, so we re-order the eigenvalues1610# and related arrays to have the correct order1611if compute_uv:1612s, u = eigh(a)1613sgn = sign(s)1614s = abs(s)1615sidx = argsort(s)[..., ::-1]1616sgn = _nx.take_along_axis(sgn, sidx, axis=-1)1617s = _nx.take_along_axis(s, sidx, axis=-1)1618u = _nx.take_along_axis(u, sidx[..., None, :], axis=-1)1619# singular values are unsigned, move the sign into v1620vt = transpose(u * sgn[..., None, :]).conjugate()1621return wrap(u), s, wrap(vt)1622else:1623s = eigvalsh(a)1624s = s[..., ::-1]1625s = abs(s)1626return sort(s)[..., ::-1]16271628_assert_stacked_2d(a)1629t, result_t = _commonType(a)16301631extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)16321633m, n = a.shape[-2:]1634if compute_uv:1635if full_matrices:1636if m < n:1637gufunc = _umath_linalg.svd_m_f1638else:1639gufunc = _umath_linalg.svd_n_f1640else:1641if m < n:1642gufunc = _umath_linalg.svd_m_s1643else:1644gufunc = _umath_linalg.svd_n_s16451646signature = 'D->DdD' if isComplexType(t) else 'd->ddd'1647u, s, vh = gufunc(a, signature=signature, extobj=extobj)1648u = u.astype(result_t, copy=False)1649s = s.astype(_realType(result_t), copy=False)1650vh = vh.astype(result_t, copy=False)1651return wrap(u), s, wrap(vh)1652else:1653if m < n:1654gufunc = _umath_linalg.svd_m1655else:1656gufunc = _umath_linalg.svd_n16571658signature = 'D->d' if isComplexType(t) else 'd->d'1659s = gufunc(a, signature=signature, extobj=extobj)1660s = s.astype(_realType(result_t), copy=False)1661return s166216631664def _cond_dispatcher(x, p=None):1665return (x,)166616671668@array_function_dispatch(_cond_dispatcher)1669def cond(x, p=None):1670"""1671Compute the condition number of a matrix.16721673This function is capable of returning the condition number using1674one of seven different norms, depending on the value of `p` (see1675Parameters below).16761677Parameters1678----------1679x : (..., M, N) array_like1680The matrix whose condition number is sought.1681p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional1682Order of the norm used in the condition number computation:16831684===== ============================1685p norm for matrices1686===== ============================1687None 2-norm, computed directly using the ``SVD``1688'fro' Frobenius norm1689inf max(sum(abs(x), axis=1))1690-inf min(sum(abs(x), axis=1))16911 max(sum(abs(x), axis=0))1692-1 min(sum(abs(x), axis=0))16932 2-norm (largest sing. value)1694-2 smallest singular value1695===== ============================16961697inf means the `numpy.inf` object, and the Frobenius norm is1698the root-of-sum-of-squares norm.16991700Returns1701-------1702c : {float, inf}1703The condition number of the matrix. May be infinite.17041705See Also1706--------1707numpy.linalg.norm17081709Notes1710-----1711The condition number of `x` is defined as the norm of `x` times the1712norm of the inverse of `x` [1]_; the norm can be the usual L2-norm1713(root-of-sum-of-squares) or one of a number of other matrix norms.17141715References1716----------1717.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,1718Academic Press, Inc., 1980, pg. 285.17191720Examples1721--------1722>>> from numpy import linalg as LA1723>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])1724>>> a1725array([[ 1, 0, -1],1726[ 0, 1, 0],1727[ 1, 0, 1]])1728>>> LA.cond(a)17291.41421356237309511730>>> LA.cond(a, 'fro')17313.16227766016837951732>>> LA.cond(a, np.inf)17332.01734>>> LA.cond(a, -np.inf)17351.01736>>> LA.cond(a, 1)17372.01738>>> LA.cond(a, -1)17391.01740>>> LA.cond(a, 2)17411.41421356237309511742>>> LA.cond(a, -2)17430.70710678118654746 # may vary1744>>> min(LA.svd(a, compute_uv=False))*min(LA.svd(LA.inv(a), compute_uv=False))17450.70710678118654746 # may vary17461747"""1748x = asarray(x) # in case we have a matrix1749if _is_empty_2d(x):1750raise LinAlgError("cond is not defined on empty arrays")1751if p is None or p == 2 or p == -2:1752s = svd(x, compute_uv=False)1753with errstate(all='ignore'):1754if p == -2:1755r = s[..., -1] / s[..., 0]1756else:1757r = s[..., 0] / s[..., -1]1758else:1759# Call inv(x) ignoring errors. The result array will1760# contain nans in the entries where inversion failed.1761_assert_stacked_2d(x)1762_assert_stacked_square(x)1763t, result_t = _commonType(x)1764signature = 'D->D' if isComplexType(t) else 'd->d'1765with errstate(all='ignore'):1766invx = _umath_linalg.inv(x, signature=signature)1767r = norm(x, p, axis=(-2, -1)) * norm(invx, p, axis=(-2, -1))1768r = r.astype(result_t, copy=False)17691770# Convert nans to infs unless the original array had nan entries1771r = asarray(r)1772nan_mask = isnan(r)1773if nan_mask.any():1774nan_mask &= ~isnan(x).any(axis=(-2, -1))1775if r.ndim > 0:1776r[nan_mask] = Inf1777elif nan_mask:1778r[()] = Inf17791780# Convention is to return scalars instead of 0d arrays1781if r.ndim == 0:1782r = r[()]17831784return r178517861787def _matrix_rank_dispatcher(A, tol=None, hermitian=None):1788return (A,)178917901791@array_function_dispatch(_matrix_rank_dispatcher)1792def matrix_rank(A, tol=None, hermitian=False):1793"""1794Return matrix rank of array using SVD method17951796Rank of the array is the number of singular values of the array that are1797greater than `tol`.17981799.. versionchanged:: 1.141800Can now operate on stacks of matrices18011802Parameters1803----------1804A : {(M,), (..., M, N)} array_like1805Input vector or stack of matrices.1806tol : (...) array_like, float, optional1807Threshold below which SVD values are considered zero. If `tol` is1808None, and ``S`` is an array with singular values for `M`, and1809``eps`` is the epsilon value for datatype of ``S``, then `tol` is1810set to ``S.max() * max(M, N) * eps``.18111812.. versionchanged:: 1.141813Broadcasted against the stack of matrices1814hermitian : bool, optional1815If True, `A` is assumed to be Hermitian (symmetric if real-valued),1816enabling a more efficient method for finding singular values.1817Defaults to False.18181819.. versionadded:: 1.1418201821Returns1822-------1823rank : (...) array_like1824Rank of A.18251826Notes1827-----1828The default threshold to detect rank deficiency is a test on the magnitude1829of the singular values of `A`. By default, we identify singular values less1830than ``S.max() * max(M, N) * eps`` as indicating rank deficiency (with1831the symbols defined above). This is the algorithm MATLAB uses [1]. It also1832appears in *Numerical recipes* in the discussion of SVD solutions for linear1833least squares [2].18341835This default threshold is designed to detect rank deficiency accounting for1836the numerical errors of the SVD computation. Imagine that there is a column1837in `A` that is an exact (in floating point) linear combination of other1838columns in `A`. Computing the SVD on `A` will not produce a singular value1839exactly equal to 0 in general: any difference of the smallest SVD value from18400 will be caused by numerical imprecision in the calculation of the SVD.1841Our threshold for small SVD values takes this numerical imprecision into1842account, and the default threshold will detect such numerical rank1843deficiency. The threshold may declare a matrix `A` rank deficient even if1844the linear combination of some columns of `A` is not exactly equal to1845another column of `A` but only numerically very close to another column of1846`A`.18471848We chose our default threshold because it is in wide use. Other thresholds1849are possible. For example, elsewhere in the 2007 edition of *Numerical1850recipes* there is an alternative threshold of ``S.max() *1851np.finfo(A.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe1852this threshold as being based on "expected roundoff error" (p 71).18531854The thresholds above deal with floating point roundoff error in the1855calculation of the SVD. However, you may have more information about the1856sources of error in `A` that would make you consider other tolerance values1857to detect *effective* rank deficiency. The most useful measure of the1858tolerance depends on the operations you intend to use on your matrix. For1859example, if your data come from uncertain measurements with uncertainties1860greater than floating point epsilon, choosing a tolerance near that1861uncertainty may be preferable. The tolerance may be absolute if the1862uncertainties are absolute rather than relative.18631864References1865----------1866.. [1] MATLAB reference documentation, "Rank"1867https://www.mathworks.com/help/techdoc/ref/rank.html1868.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,1869"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,1870page 795.18711872Examples1873--------1874>>> from numpy.linalg import matrix_rank1875>>> matrix_rank(np.eye(4)) # Full rank matrix187641877>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix1878>>> matrix_rank(I)187931880>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0188111882>>> matrix_rank(np.zeros((4,)))188301884"""1885A = asarray(A)1886if A.ndim < 2:1887return int(not all(A==0))1888S = svd(A, compute_uv=False, hermitian=hermitian)1889if tol is None:1890tol = S.max(axis=-1, keepdims=True) * max(A.shape[-2:]) * finfo(S.dtype).eps1891else:1892tol = asarray(tol)[..., newaxis]1893return count_nonzero(S > tol, axis=-1)189418951896# Generalized inverse18971898def _pinv_dispatcher(a, rcond=None, hermitian=None):1899return (a,)190019011902@array_function_dispatch(_pinv_dispatcher)1903def pinv(a, rcond=1e-15, hermitian=False):1904"""1905Compute the (Moore-Penrose) pseudo-inverse of a matrix.19061907Calculate the generalized inverse of a matrix using its1908singular-value decomposition (SVD) and including all1909*large* singular values.19101911.. versionchanged:: 1.141912Can now operate on stacks of matrices19131914Parameters1915----------1916a : (..., M, N) array_like1917Matrix or stack of matrices to be pseudo-inverted.1918rcond : (...) array_like of float1919Cutoff for small singular values.1920Singular values less than or equal to1921``rcond * largest_singular_value`` are set to zero.1922Broadcasts against the stack of matrices.1923hermitian : bool, optional1924If True, `a` is assumed to be Hermitian (symmetric if real-valued),1925enabling a more efficient method for finding singular values.1926Defaults to False.19271928.. versionadded:: 1.17.019291930Returns1931-------1932B : (..., N, M) ndarray1933The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so1934is `B`.19351936Raises1937------1938LinAlgError1939If the SVD computation does not converge.19401941See Also1942--------1943scipy.linalg.pinv : Similar function in SciPy.1944scipy.linalg.pinv2 : Similar function in SciPy (SVD-based).1945scipy.linalg.pinvh : Compute the (Moore-Penrose) pseudo-inverse of a1946Hermitian matrix.19471948Notes1949-----1950The pseudo-inverse of a matrix A, denoted :math:`A^+`, is1951defined as: "the matrix that 'solves' [the least-squares problem]1952:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then1953:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.19541955It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular1956value decomposition of A, then1957:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are1958orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting1959of A's so-called singular values, (followed, typically, by1960zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix1961consisting of the reciprocals of A's singular values1962(again, followed by zeros). [1]_19631964References1965----------1966.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,1967FL, Academic Press, Inc., 1980, pp. 139-142.19681969Examples1970--------1971The following example checks that ``a * a+ * a == a`` and1972``a+ * a * a+ == a+``:19731974>>> a = np.random.randn(9, 6)1975>>> B = np.linalg.pinv(a)1976>>> np.allclose(a, np.dot(a, np.dot(B, a)))1977True1978>>> np.allclose(B, np.dot(B, np.dot(a, B)))1979True19801981"""1982a, wrap = _makearray(a)1983rcond = asarray(rcond)1984if _is_empty_2d(a):1985m, n = a.shape[-2:]1986res = empty(a.shape[:-2] + (n, m), dtype=a.dtype)1987return wrap(res)1988a = a.conjugate()1989u, s, vt = svd(a, full_matrices=False, hermitian=hermitian)19901991# discard small singular values1992cutoff = rcond[..., newaxis] * amax(s, axis=-1, keepdims=True)1993large = s > cutoff1994s = divide(1, s, where=large, out=s)1995s[~large] = 019961997res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u)))1998return wrap(res)199920002001# Determinant200220032004@array_function_dispatch(_unary_dispatcher)2005def slogdet(a):2006"""2007Compute the sign and (natural) logarithm of the determinant of an array.20082009If an array has a very small or very large determinant, then a call to2010`det` may overflow or underflow. This routine is more robust against such2011issues, because it computes the logarithm of the determinant rather than2012the determinant itself.20132014Parameters2015----------2016a : (..., M, M) array_like2017Input array, has to be a square 2-D array.20182019Returns2020-------2021sign : (...) array_like2022A number representing the sign of the determinant. For a real matrix,2023this is 1, 0, or -1. For a complex matrix, this is a complex number2024with absolute value 1 (i.e., it is on the unit circle), or else 0.2025logdet : (...) array_like2026The natural log of the absolute value of the determinant.20272028If the determinant is zero, then `sign` will be 0 and `logdet` will be2029-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.20302031See Also2032--------2033det20342035Notes2036-----20372038.. versionadded:: 1.8.020392040Broadcasting rules apply, see the `numpy.linalg` documentation for2041details.20422043.. versionadded:: 1.6.020442045The determinant is computed via LU factorization using the LAPACK2046routine ``z/dgetrf``.204720482049Examples2050--------2051The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:20522053>>> a = np.array([[1, 2], [3, 4]])2054>>> (sign, logdet) = np.linalg.slogdet(a)2055>>> (sign, logdet)2056(-1, 0.69314718055994529) # may vary2057>>> sign * np.exp(logdet)2058-2.020592060Computing log-determinants for a stack of matrices:20612062>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])2063>>> a.shape2064(3, 2, 2)2065>>> sign, logdet = np.linalg.slogdet(a)2066>>> (sign, logdet)2067(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))2068>>> sign * np.exp(logdet)2069array([-2., -3., -8.])20702071This routine succeeds where ordinary `det` does not:20722073>>> np.linalg.det(np.eye(500) * 0.1)20740.02075>>> np.linalg.slogdet(np.eye(500) * 0.1)2076(1, -1151.2925464970228)20772078"""2079a = asarray(a)2080_assert_stacked_2d(a)2081_assert_stacked_square(a)2082t, result_t = _commonType(a)2083real_t = _realType(result_t)2084signature = 'D->Dd' if isComplexType(t) else 'd->dd'2085sign, logdet = _umath_linalg.slogdet(a, signature=signature)2086sign = sign.astype(result_t, copy=False)2087logdet = logdet.astype(real_t, copy=False)2088return sign, logdet208920902091@array_function_dispatch(_unary_dispatcher)2092def det(a):2093"""2094Compute the determinant of an array.20952096Parameters2097----------2098a : (..., M, M) array_like2099Input array to compute determinants for.21002101Returns2102-------2103det : (...) array_like2104Determinant of `a`.21052106See Also2107--------2108slogdet : Another way to represent the determinant, more suitable2109for large matrices where underflow/overflow may occur.2110scipy.linalg.det : Similar function in SciPy.21112112Notes2113-----21142115.. versionadded:: 1.8.021162117Broadcasting rules apply, see the `numpy.linalg` documentation for2118details.21192120The determinant is computed via LU factorization using the LAPACK2121routine ``z/dgetrf``.21222123Examples2124--------2125The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:21262127>>> a = np.array([[1, 2], [3, 4]])2128>>> np.linalg.det(a)2129-2.0 # may vary21302131Computing determinants for a stack of matrices:21322133>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])2134>>> a.shape2135(3, 2, 2)2136>>> np.linalg.det(a)2137array([-2., -3., -8.])21382139"""2140a = asarray(a)2141_assert_stacked_2d(a)2142_assert_stacked_square(a)2143t, result_t = _commonType(a)2144signature = 'D->D' if isComplexType(t) else 'd->d'2145r = _umath_linalg.det(a, signature=signature)2146r = r.astype(result_t, copy=False)2147return r214821492150# Linear Least Squares21512152def _lstsq_dispatcher(a, b, rcond=None):2153return (a, b)215421552156@array_function_dispatch(_lstsq_dispatcher)2157def lstsq(a, b, rcond="warn"):2158r"""2159Return the least-squares solution to a linear matrix equation.21602161Computes the vector `x` that approximately solves the equation2162``a @ x = b``. The equation may be under-, well-, or over-determined2163(i.e., the number of linearly independent rows of `a` can be less than,2164equal to, or greater than its number of linearly independent columns).2165If `a` is square and of full rank, then `x` (but for round-off error)2166is the "exact" solution of the equation. Else, `x` minimizes the2167Euclidean 2-norm :math:`||b - ax||`. If there are multiple minimizing2168solutions, the one with the smallest 2-norm :math:`||x||` is returned.21692170Parameters2171----------2172a : (M, N) array_like2173"Coefficient" matrix.2174b : {(M,), (M, K)} array_like2175Ordinate or "dependent variable" values. If `b` is two-dimensional,2176the least-squares solution is calculated for each of the `K` columns2177of `b`.2178rcond : float, optional2179Cut-off ratio for small singular values of `a`.2180For the purposes of rank determination, singular values are treated2181as zero if they are smaller than `rcond` times the largest singular2182value of `a`.21832184.. versionchanged:: 1.14.02185If not set, a FutureWarning is given. The previous default2186of ``-1`` will use the machine precision as `rcond` parameter,2187the new default will use the machine precision times `max(M, N)`.2188To silence the warning and use the new default, use ``rcond=None``,2189to keep using the old behavior, use ``rcond=-1``.21902191Returns2192-------2193x : {(N,), (N, K)} ndarray2194Least-squares solution. If `b` is two-dimensional,2195the solutions are in the `K` columns of `x`.2196residuals : {(1,), (K,), (0,)} ndarray2197Sums of squared residuals: Squared Euclidean 2-norm for each column in2198``b - a @ x``.2199If the rank of `a` is < N or M <= N, this is an empty array.2200If `b` is 1-dimensional, this is a (1,) shape array.2201Otherwise the shape is (K,).2202rank : int2203Rank of matrix `a`.2204s : (min(M, N),) ndarray2205Singular values of `a`.22062207Raises2208------2209LinAlgError2210If computation does not converge.22112212See Also2213--------2214scipy.linalg.lstsq : Similar function in SciPy.22152216Notes2217-----2218If `b` is a matrix, then all array results are returned as matrices.22192220Examples2221--------2222Fit a line, ``y = mx + c``, through some noisy data-points:22232224>>> x = np.array([0, 1, 2, 3])2225>>> y = np.array([-1, 0.2, 0.9, 2.1])22262227By examining the coefficients, we see that the line should have a2228gradient of roughly 1 and cut the y-axis at, more or less, -1.22292230We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``2231and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:22322233>>> A = np.vstack([x, np.ones(len(x))]).T2234>>> A2235array([[ 0., 1.],2236[ 1., 1.],2237[ 2., 1.],2238[ 3., 1.]])22392240>>> m, c = np.linalg.lstsq(A, y, rcond=None)[0]2241>>> m, c2242(1.0 -0.95) # may vary22432244Plot the data along with the fitted line:22452246>>> import matplotlib.pyplot as plt2247>>> _ = plt.plot(x, y, 'o', label='Original data', markersize=10)2248>>> _ = plt.plot(x, m*x + c, 'r', label='Fitted line')2249>>> _ = plt.legend()2250>>> plt.show()22512252"""2253a, _ = _makearray(a)2254b, wrap = _makearray(b)2255is_1d = b.ndim == 12256if is_1d:2257b = b[:, newaxis]2258_assert_2d(a, b)2259m, n = a.shape[-2:]2260m2, n_rhs = b.shape[-2:]2261if m != m2:2262raise LinAlgError('Incompatible dimensions')22632264t, result_t = _commonType(a, b)2265result_real_t = _realType(result_t)22662267# Determine default rcond value2268if rcond == "warn":2269# 2017-08-19, 1.14.02270warnings.warn("`rcond` parameter will change to the default of "2271"machine precision times ``max(M, N)`` where M and N "2272"are the input matrix dimensions.\n"2273"To use the future default and silence this warning "2274"we advise to pass `rcond=None`, to keep using the old, "2275"explicitly pass `rcond=-1`.",2276FutureWarning, stacklevel=3)2277rcond = -12278if rcond is None:2279rcond = finfo(t).eps * max(n, m)22802281if m <= n:2282gufunc = _umath_linalg.lstsq_m2283else:2284gufunc = _umath_linalg.lstsq_n22852286signature = 'DDd->Ddid' if isComplexType(t) else 'ddd->ddid'2287extobj = get_linalg_error_extobj(_raise_linalgerror_lstsq)2288if n_rhs == 0:2289# lapack can't handle n_rhs = 0 - so allocate the array one larger in that axis2290b = zeros(b.shape[:-2] + (m, n_rhs + 1), dtype=b.dtype)2291x, resids, rank, s = gufunc(a, b, rcond, signature=signature, extobj=extobj)2292if m == 0:2293x[...] = 02294if n_rhs == 0:2295# remove the item we added2296x = x[..., :n_rhs]2297resids = resids[..., :n_rhs]22982299# remove the axis we added2300if is_1d:2301x = x.squeeze(axis=-1)2302# we probably should squeeze resids too, but we can't2303# without breaking compatibility.23042305# as documented2306if rank != n or m <= n:2307resids = array([], result_real_t)23082309# coerce output arrays2310s = s.astype(result_real_t, copy=False)2311resids = resids.astype(result_real_t, copy=False)2312x = x.astype(result_t, copy=True) # Copying lets the memory in r_parts be freed2313return wrap(x), wrap(resids), rank, s231423152316def _multi_svd_norm(x, row_axis, col_axis, op):2317"""Compute a function of the singular values of the 2-D matrices in `x`.23182319This is a private utility function used by `numpy.linalg.norm()`.23202321Parameters2322----------2323x : ndarray2324row_axis, col_axis : int2325The axes of `x` that hold the 2-D matrices.2326op : callable2327This should be either numpy.amin or `numpy.amax` or `numpy.sum`.23282329Returns2330-------2331result : float or ndarray2332If `x` is 2-D, the return values is a float.2333Otherwise, it is an array with ``x.ndim - 2`` dimensions.2334The return values are either the minimum or maximum or sum of the2335singular values of the matrices, depending on whether `op`2336is `numpy.amin` or `numpy.amax` or `numpy.sum`.23372338"""2339y = moveaxis(x, (row_axis, col_axis), (-2, -1))2340result = op(svd(y, compute_uv=False), axis=-1)2341return result234223432344def _norm_dispatcher(x, ord=None, axis=None, keepdims=None):2345return (x,)234623472348@array_function_dispatch(_norm_dispatcher)2349def norm(x, ord=None, axis=None, keepdims=False):2350"""2351Matrix or vector norm.23522353This function is able to return one of eight different matrix norms,2354or one of an infinite number of vector norms (described below), depending2355on the value of the ``ord`` parameter.23562357Parameters2358----------2359x : array_like2360Input array. If `axis` is None, `x` must be 1-D or 2-D, unless `ord`2361is None. If both `axis` and `ord` are None, the 2-norm of2362``x.ravel`` will be returned.2363ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional2364Order of the norm (see table under ``Notes``). inf means numpy's2365`inf` object. The default is None.2366axis : {None, int, 2-tuple of ints}, optional.2367If `axis` is an integer, it specifies the axis of `x` along which to2368compute the vector norms. If `axis` is a 2-tuple, it specifies the2369axes that hold 2-D matrices, and the matrix norms of these matrices2370are computed. If `axis` is None then either a vector norm (when `x`2371is 1-D) or a matrix norm (when `x` is 2-D) is returned. The default2372is None.23732374.. versionadded:: 1.8.023752376keepdims : bool, optional2377If this is set to True, the axes which are normed over are left in the2378result as dimensions with size one. With this option the result will2379broadcast correctly against the original `x`.23802381.. versionadded:: 1.10.023822383Returns2384-------2385n : float or ndarray2386Norm of the matrix or vector(s).23872388See Also2389--------2390scipy.linalg.norm : Similar function in SciPy.23912392Notes2393-----2394For values of ``ord < 1``, the result is, strictly speaking, not a2395mathematical 'norm', but it may still be useful for various numerical2396purposes.23972398The following norms can be calculated:23992400===== ============================ ==========================2401ord norm for matrices norm for vectors2402===== ============================ ==========================2403None Frobenius norm 2-norm2404'fro' Frobenius norm --2405'nuc' nuclear norm --2406inf max(sum(abs(x), axis=1)) max(abs(x))2407-inf min(sum(abs(x), axis=1)) min(abs(x))24080 -- sum(x != 0)24091 max(sum(abs(x), axis=0)) as below2410-1 min(sum(abs(x), axis=0)) as below24112 2-norm (largest sing. value) as below2412-2 smallest singular value as below2413other -- sum(abs(x)**ord)**(1./ord)2414===== ============================ ==========================24152416The Frobenius norm is given by [1]_:24172418:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`24192420The nuclear norm is the sum of the singular values.24212422Both the Frobenius and nuclear norm orders are only defined for2423matrices and raise a ValueError when ``x.ndim != 2``.24242425References2426----------2427.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,2428Baltimore, MD, Johns Hopkins University Press, 1985, pg. 1524292430Examples2431--------2432>>> from numpy import linalg as LA2433>>> a = np.arange(9) - 42434>>> a2435array([-4, -3, -2, ..., 2, 3, 4])2436>>> b = a.reshape((3, 3))2437>>> b2438array([[-4, -3, -2],2439[-1, 0, 1],2440[ 2, 3, 4]])24412442>>> LA.norm(a)24437.7459666924148342444>>> LA.norm(b)24457.7459666924148342446>>> LA.norm(b, 'fro')24477.7459666924148342448>>> LA.norm(a, np.inf)24494.02450>>> LA.norm(b, np.inf)24519.02452>>> LA.norm(a, -np.inf)24530.02454>>> LA.norm(b, -np.inf)24552.024562457>>> LA.norm(a, 1)245820.02459>>> LA.norm(b, 1)24607.02461>>> LA.norm(a, -1)2462-4.6566128774142013e-0102463>>> LA.norm(b, -1)24646.02465>>> LA.norm(a, 2)24667.7459666924148342467>>> LA.norm(b, 2)24687.348469228349534524692470>>> LA.norm(a, -2)24710.02472>>> LA.norm(b, -2)24731.8570331885190563e-016 # may vary2474>>> LA.norm(a, 3)24755.8480354764257312 # may vary2476>>> LA.norm(a, -3)24770.024782479Using the `axis` argument to compute vector norms:24802481>>> c = np.array([[ 1, 2, 3],2482... [-1, 1, 4]])2483>>> LA.norm(c, axis=0)2484array([ 1.41421356, 2.23606798, 5. ])2485>>> LA.norm(c, axis=1)2486array([ 3.74165739, 4.24264069])2487>>> LA.norm(c, ord=1, axis=1)2488array([ 6., 6.])24892490Using the `axis` argument to compute matrix norms:24912492>>> m = np.arange(8).reshape(2,2,2)2493>>> LA.norm(m, axis=(1,2))2494array([ 3.74165739, 11.22497216])2495>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])2496(3.7416573867739413, 11.224972160321824)24972498"""2499x = asarray(x)25002501if not issubclass(x.dtype.type, (inexact, object_)):2502x = x.astype(float)25032504# Immediately handle some default, simple, fast, and common cases.2505if axis is None:2506ndim = x.ndim2507if ((ord is None) or2508(ord in ('f', 'fro') and ndim == 2) or2509(ord == 2 and ndim == 1)):25102511x = x.ravel(order='K')2512if isComplexType(x.dtype.type):2513sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)2514else:2515sqnorm = dot(x, x)2516ret = sqrt(sqnorm)2517if keepdims:2518ret = ret.reshape(ndim*[1])2519return ret25202521# Normalize the `axis` argument to a tuple.2522nd = x.ndim2523if axis is None:2524axis = tuple(range(nd))2525elif not isinstance(axis, tuple):2526try:2527axis = int(axis)2528except Exception as e:2529raise TypeError("'axis' must be None, an integer or a tuple of integers") from e2530axis = (axis,)25312532if len(axis) == 1:2533if ord == Inf:2534return abs(x).max(axis=axis, keepdims=keepdims)2535elif ord == -Inf:2536return abs(x).min(axis=axis, keepdims=keepdims)2537elif ord == 0:2538# Zero norm2539return (x != 0).astype(x.real.dtype).sum(axis=axis, keepdims=keepdims)2540elif ord == 1:2541# special case for speedup2542return add.reduce(abs(x), axis=axis, keepdims=keepdims)2543elif ord is None or ord == 2:2544# special case for speedup2545s = (x.conj() * x).real2546return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))2547# None of the str-type keywords for ord ('fro', 'nuc')2548# are valid for vectors2549elif isinstance(ord, str):2550raise ValueError(f"Invalid norm order '{ord}' for vectors")2551else:2552absx = abs(x)2553absx **= ord2554ret = add.reduce(absx, axis=axis, keepdims=keepdims)2555ret **= (1 / ord)2556return ret2557elif len(axis) == 2:2558row_axis, col_axis = axis2559row_axis = normalize_axis_index(row_axis, nd)2560col_axis = normalize_axis_index(col_axis, nd)2561if row_axis == col_axis:2562raise ValueError('Duplicate axes given.')2563if ord == 2:2564ret = _multi_svd_norm(x, row_axis, col_axis, amax)2565elif ord == -2:2566ret = _multi_svd_norm(x, row_axis, col_axis, amin)2567elif ord == 1:2568if col_axis > row_axis:2569col_axis -= 12570ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)2571elif ord == Inf:2572if row_axis > col_axis:2573row_axis -= 12574ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)2575elif ord == -1:2576if col_axis > row_axis:2577col_axis -= 12578ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)2579elif ord == -Inf:2580if row_axis > col_axis:2581row_axis -= 12582ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)2583elif ord in [None, 'fro', 'f']:2584ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))2585elif ord == 'nuc':2586ret = _multi_svd_norm(x, row_axis, col_axis, sum)2587else:2588raise ValueError("Invalid norm order for matrices.")2589if keepdims:2590ret_shape = list(x.shape)2591ret_shape[axis[0]] = 12592ret_shape[axis[1]] = 12593ret = ret.reshape(ret_shape)2594return ret2595else:2596raise ValueError("Improper number of dimensions to norm.")259725982599# multi_dot26002601def _multidot_dispatcher(arrays, *, out=None):2602yield from arrays2603yield out260426052606@array_function_dispatch(_multidot_dispatcher)2607def multi_dot(arrays, *, out=None):2608"""2609Compute the dot product of two or more arrays in a single function call,2610while automatically selecting the fastest evaluation order.26112612`multi_dot` chains `numpy.dot` and uses optimal parenthesization2613of the matrices [1]_ [2]_. Depending on the shapes of the matrices,2614this can speed up the multiplication a lot.26152616If the first argument is 1-D it is treated as a row vector.2617If the last argument is 1-D it is treated as a column vector.2618The other arguments must be 2-D.26192620Think of `multi_dot` as::26212622def multi_dot(arrays): return functools.reduce(np.dot, arrays)262326242625Parameters2626----------2627arrays : sequence of array_like2628If the first argument is 1-D it is treated as row vector.2629If the last argument is 1-D it is treated as column vector.2630The other arguments must be 2-D.2631out : ndarray, optional2632Output argument. This must have the exact kind that would be returned2633if it was not used. In particular, it must have the right type, must be2634C-contiguous, and its dtype must be the dtype that would be returned2635for `dot(a, b)`. This is a performance feature. Therefore, if these2636conditions are not met, an exception is raised, instead of attempting2637to be flexible.26382639.. versionadded:: 1.19.026402641Returns2642-------2643output : ndarray2644Returns the dot product of the supplied arrays.26452646See Also2647--------2648numpy.dot : dot multiplication with two arguments.26492650References2651----------26522653.. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-3782654.. [2] https://en.wikipedia.org/wiki/Matrix_chain_multiplication26552656Examples2657--------2658`multi_dot` allows you to write::26592660>>> from numpy.linalg import multi_dot2661>>> # Prepare some data2662>>> A = np.random.random((10000, 100))2663>>> B = np.random.random((100, 1000))2664>>> C = np.random.random((1000, 5))2665>>> D = np.random.random((5, 333))2666>>> # the actual dot multiplication2667>>> _ = multi_dot([A, B, C, D])26682669instead of::26702671>>> _ = np.dot(np.dot(np.dot(A, B), C), D)2672>>> # or2673>>> _ = A.dot(B).dot(C).dot(D)26742675Notes2676-----2677The cost for a matrix multiplication can be calculated with the2678following function::26792680def cost(A, B):2681return A.shape[0] * A.shape[1] * B.shape[1]26822683Assume we have three matrices2684:math:`A_{10x100}, B_{100x5}, C_{5x50}`.26852686The costs for the two different parenthesizations are as follows::26872688cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 75002689cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 7500026902691"""2692n = len(arrays)2693# optimization only makes sense for len(arrays) > 22694if n < 2:2695raise ValueError("Expecting at least two arrays.")2696elif n == 2:2697return dot(arrays[0], arrays[1], out=out)26982699arrays = [asanyarray(a) for a in arrays]27002701# save original ndim to reshape the result array into the proper form later2702ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim2703# Explicitly convert vectors to 2D arrays to keep the logic of the internal2704# _multi_dot_* functions as simple as possible.2705if arrays[0].ndim == 1:2706arrays[0] = atleast_2d(arrays[0])2707if arrays[-1].ndim == 1:2708arrays[-1] = atleast_2d(arrays[-1]).T2709_assert_2d(*arrays)27102711# _multi_dot_three is much faster than _multi_dot_matrix_chain_order2712if n == 3:2713result = _multi_dot_three(arrays[0], arrays[1], arrays[2], out=out)2714else:2715order = _multi_dot_matrix_chain_order(arrays)2716result = _multi_dot(arrays, order, 0, n - 1, out=out)27172718# return proper shape2719if ndim_first == 1 and ndim_last == 1:2720return result[0, 0] # scalar2721elif ndim_first == 1 or ndim_last == 1:2722return result.ravel() # 1-D2723else:2724return result272527262727def _multi_dot_three(A, B, C, out=None):2728"""2729Find the best order for three arrays and do the multiplication.27302731For three arguments `_multi_dot_three` is approximately 15 times faster2732than `_multi_dot_matrix_chain_order`27332734"""2735a0, a1b0 = A.shape2736b1c0, c1 = C.shape2737# cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c12738cost1 = a0 * b1c0 * (a1b0 + c1)2739# cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c12740cost2 = a1b0 * c1 * (a0 + b1c0)27412742if cost1 < cost2:2743return dot(dot(A, B), C, out=out)2744else:2745return dot(A, dot(B, C), out=out)274627472748def _multi_dot_matrix_chain_order(arrays, return_costs=False):2749"""2750Return a np.array that encodes the optimal order of mutiplications.27512752The optimal order array is then used by `_multi_dot()` to do the2753multiplication.27542755Also return the cost matrix if `return_costs` is `True`27562757The implementation CLOSELY follows Cormen, "Introduction to Algorithms",2758Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.27592760cost[i, j] = min([2761cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)2762for k in range(i, j)])27632764"""2765n = len(arrays)2766# p stores the dimensions of the matrices2767# Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]2768p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]2769# m is a matrix of costs of the subproblems2770# m[i,j]: min number of scalar multiplications needed to compute A_{i..j}2771m = zeros((n, n), dtype=double)2772# s is the actual ordering2773# s[i, j] is the value of k at which we split the product A_i..A_j2774s = empty((n, n), dtype=intp)27752776for l in range(1, n):2777for i in range(n - l):2778j = i + l2779m[i, j] = Inf2780for k in range(i, j):2781q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]2782if q < m[i, j]:2783m[i, j] = q2784s[i, j] = k # Note that Cormen uses 1-based index27852786return (s, m) if return_costs else s278727882789def _multi_dot(arrays, order, i, j, out=None):2790"""Actually do the multiplication with the given order."""2791if i == j:2792# the initial call with non-None out should never get here2793assert out is None27942795return arrays[i]2796else:2797return dot(_multi_dot(arrays, order, i, order[i, j]),2798_multi_dot(arrays, order, order[i, j] + 1, j),2799out=out)280028012802