Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Sparse logical #269

Merged
merged 4 commits into from
Sep 28, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 9 additions & 4 deletions pyttb/sptensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -703,7 +703,7 @@ def logical_and(self, B: Union[float, sptensor, ttb.tensor]) -> sptensor:
if B == 0:
C = sptensor(shape=self.shape)
else:
newvals = self.vals == B
newvals = np.ones_like(self.vals)
C = sptensor(self.subs, newvals, self.shape)
return C
# Case 2: Argument is a tensor of some sort
Expand All @@ -718,6 +718,7 @@ def logical_and(self, B: Union[float, sptensor, ttb.tensor]) -> sptensor:
self.shape,
lambda x: len(x) == 2,
)
C.vals = C.vals.astype(self.vals.dtype)

return C

Expand All @@ -741,7 +742,7 @@ def logical_not(self) -> sptensor:
allsubs = self.allsubs()
subsIdx = tt_setdiff_rows(allsubs, self.subs)
subs = allsubs[subsIdx]
trueVector = np.ones(shape=(subs.shape[0], 1), dtype=bool)
trueVector = np.ones(shape=(subs.shape[0], 1), dtype=self.vals.dtype)
return sptensor(subs, trueVector, self.shape)

@overload
Expand Down Expand Up @@ -771,12 +772,14 @@ def logical_or(
assert False, "Logical Or requires tensors of the same size"

if isinstance(B, ttb.sptensor):
return sptensor.from_aggregator(
C = sptensor.from_aggregator(
np.vstack((self.subs, B.subs)),
np.ones((self.subs.shape[0] + B.subs.shape[0], 1)),
self.shape,
lambda x: len(x) >= 1,
)
C.vals = C.vals.astype(self.vals.dtype)
return C

assert False, "Sptensor Logical Or argument must be scalar or sptensor"

Expand Down Expand Up @@ -814,9 +817,11 @@ def logical_xor(
assert False, "Logical XOR requires tensors of the same size"

subs = np.vstack((self.subs, other.subs))
return ttb.sptensor.from_aggregator(
result = ttb.sptensor.from_aggregator(
subs, np.ones((len(subs), 1)), self.shape, lambda x: len(x) == 1
)
result.vals = result.vals.astype(self.vals.dtype)
return result

assert False, "The argument must be an sptensor, tensor or scalar"

Expand Down
47 changes: 24 additions & 23 deletions pyttb/tensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -643,13 +643,13 @@ def logical_and(self, other: Union[float, tensor]) -> tensor:

Examples
--------
>>> T = ttb.tensor(np.ones((2,2), dtype=bool))
>>> T = ttb.tenones((2,2))
>>> T.logical_and(T).collapse() # All true
4
4.0
"""

def logical_and(x, y):
return np.logical_and(x, y)
return np.logical_and(x, y).astype(dtype=x.dtype)

return tt_tenfun(logical_and, self, other)

Expand All @@ -659,11 +659,12 @@ def logical_not(self) -> tensor:

Examples
--------
>>> T = ttb.tensor(np.ones((2,2), dtype=bool))
>>> T = ttb.tenones((2,2))
>>> T.logical_not().collapse() # All false
0
0.0
"""
return ttb.tensor(np.logical_not(self.data), copy=False)
# Np logical not dtype argument seems to not work here
return ttb.tensor(np.logical_not(self.data).astype(self.data.dtype), copy=False)

def logical_or(self, other: Union[float, tensor]) -> tensor:
"""
Expand All @@ -676,13 +677,13 @@ def logical_or(self, other: Union[float, tensor]) -> tensor:

Examples
--------
>>> T = ttb.tensor(np.ones((2,2), dtype=bool))
>>> T = ttb.tenones((2,2))
>>> T.logical_or(T.logical_not()).collapse() # All true
4
4.0
"""

def tensor_or(x, y):
return np.logical_or(x, y)
return np.logical_or(x, y).astype(x.dtype)

return tt_tenfun(tensor_or, self, other)

Expand All @@ -697,13 +698,13 @@ def logical_xor(self, other: Union[float, tensor]) -> tensor:

Examples
--------
>>> T = ttb.tensor(np.ones((2,2), dtype=bool))
>>> T = ttb.tenones((2,2))
>>> T.logical_xor(T.logical_not()).collapse() # All true
4
4.0
"""

def tensor_xor(x, y):
return np.logical_xor(x, y)
return np.logical_xor(x, y).astype(dtype=x.dtype)

return tt_tenfun(tensor_xor, self, other)

Expand All @@ -723,7 +724,7 @@ def mask(self, W: tensor) -> np.ndarray:
Examples
--------
>>> T = ttb.tensor(np.array([[1, 2], [3, 4]]))
>>> W = ttb.tensor(np.ones((2,2)))
>>> W = ttb.tenones((2,2))
>>> T.mask(W)
array([1, 3, 2, 4])
"""
Expand Down Expand Up @@ -758,7 +759,7 @@ def mttkrp( # noqa: PLR0912

Examples
--------
>>> T = ttb.tensor(np.ones((2,2,2)))
>>> T = ttb.tenones((2,2,2))
>>> U = [np.ones((2,2))] * 3
>>> T.mttkrp(U, 2)
array([[4., 4.],
Expand Down Expand Up @@ -841,7 +842,7 @@ def mttkrps(self, U: Union[ttb.ktensor, List[np.ndarray]]) -> List[np.ndarray]:

Examples
--------
>>> T = ttb.tensor(np.ones((2,2,2)))
>>> T = ttb.tenones((2,2,2))
>>> U = [np.ones((2,2))] * 3
>>> T.mttkrps(U)
[array([[4., 4.],
Expand Down Expand Up @@ -876,7 +877,7 @@ def ndims(self) -> int:

Examples
--------
>>> T = ttb.tensor(np.ones((2,2)))
>>> T = ttb.tenones((2,2))
>>> T.ndims
2
"""
Expand All @@ -891,7 +892,7 @@ def nnz(self) -> int:

Examples
--------
>>> T = ttb.tensor(np.ones((2,2,2)))
>>> T = ttb.tenones((2,2,2))
>>> T.nnz
8
"""
Expand All @@ -904,7 +905,7 @@ def norm(self) -> float:

Examples
--------
>>> T = ttb.tensor(np.ones((2,2,2,2)))
>>> T = ttb.tenones((2,2,2,2))
>>> T.norm()
4.0
"""
Expand Down Expand Up @@ -1025,7 +1026,7 @@ def reshape(self, shape: Tuple[int, ...]) -> tensor:

Examples
--------
>>> T1 = ttb.tensor(np.ones((2,2)))
>>> T1 = ttb.tenones((2,2))
>>> T1.shape
(2, 2)
>>> T2 = T1.reshape((4,1))
Expand Down Expand Up @@ -1152,7 +1153,7 @@ def symmetrize( # noqa: PLR0912,PLR0915

Examples
--------
>>> T = ttb.tensor(np.ones((2,2,2)))
>>> T = ttb.tenones((2,2,2))
>>> T.symmetrize(np.array([0,2]))
tensor of shape (2, 2, 2)
data[0, :, :] =
Expand Down Expand Up @@ -1317,7 +1318,7 @@ def ttm(

Examples
--------
>>> T = ttb.tensor(np.ones((2,2,2,2)))
>>> T = ttb.tenones((2,2,2,2))
>>> A = 2*np.ones((2,1))
>>> T.ttm([A,A], dims=[0,1], transpose=True)
tensor of shape (1, 1, 2, 2)
Expand Down Expand Up @@ -1665,7 +1666,7 @@ def __setitem__(self, key, value):

Examples
--------
>>> T = tensor(np.ones((3,4,2)))
>>> T = tenones((3,4,2))
>>> # replaces subtensor
>>> T[0:2,0:2,0] = np.ones((2,2))
>>> # replaces two elements
Expand Down Expand Up @@ -1810,7 +1811,7 @@ def __getitem__(self, item): # noqa: PLR0912

Examples
--------
>>> T = tensor(np.ones((3,4,2,1)))
>>> T = tenones((3,4,2,1))
>>> T[0,0,0,0] # produces a scalar
1.0
>>> # produces a tensor of order 1 and size 1
Expand Down
18 changes: 16 additions & 2 deletions tests/test_sptensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,11 +174,14 @@ def test_sptensor_and_scalar(sample_sptensor):
assert b.subs.size == 0
assert b.vals.size == 0
assert b.shape == data["shape"]
assert b.vals.dtype == sptensorInstance.vals.dtype

# Sparsity pattern check not exact value equality
b = sptensorInstance.logical_and(0.5)
assert np.array_equal(b.subs, data["subs"])
assert np.array_equal(b.vals, np.array([[True], [False], [False], [False]]))
assert np.array_equal(b.vals, np.array([[True], [True], [True], [True]]))
assert b.shape == data["shape"]
assert b.vals.dtype == sptensorInstance.vals.dtype


def test_sptensor_and_sptensor(sample_sptensor):
Expand All @@ -188,6 +191,7 @@ def test_sptensor_and_sptensor(sample_sptensor):
assert np.array_equal(b.subs, data["subs"])
assert np.array_equal(b.vals, np.array([[True], [True], [True], [True]]))
assert b.shape == data["shape"]
assert b.vals.dtype == sptensorInstance.vals.dtype

with pytest.raises(AssertionError) as excinfo:
sptensorInstance.logical_and(
Expand All @@ -207,6 +211,7 @@ def test_sptensor_and_tensor(sample_sptensor):
b = sptensorInstance.logical_and(sptensorInstance.to_tensor())
assert np.array_equal(b.subs, data["subs"])
assert np.array_equal(b.vals, np.ones(data["vals"].shape))
assert b.vals.dtype == sptensorInstance.vals.dtype


def test_sptensor_full(sample_sptensor):
Expand Down Expand Up @@ -685,6 +690,7 @@ def test_sptensor_logical_not(sample_sptensor):
assert all(notSptensorInstance.vals == 1)
assert np.array_equal(notSptensorInstance.subs, np.array(result))
assert notSptensorInstance.shape == data["shape"]
assert notSptensorInstance.vals.dtype == sptensorInstance.vals.dtype


def test_sptensor_logical_or(sample_sptensor):
Expand All @@ -695,20 +701,24 @@ def test_sptensor_logical_or(sample_sptensor):
assert sptensorOr.shape == data["shape"]
assert np.array_equal(sptensorOr.subs, data["subs"])
assert np.array_equal(sptensorOr.vals, np.ones((data["vals"].shape[0], 1)))
assert sptensorOr.vals.dtype == sptensorInstance.vals.dtype

# Sptensor logical or with tensor
sptensorOr = sptensorInstance.logical_or(sptensorInstance.to_tensor())
nonZeroMatrix = np.zeros(data["shape"])
nonZeroMatrix[tuple(data["subs"].transpose())] = 1
assert np.array_equal(sptensorOr.data, nonZeroMatrix)
assert sptensorOr.data.dtype == sptensorInstance.vals.dtype

# Sptensor logical or with scalar, 0
sptensorOr = sptensorInstance.logical_or(0)
assert np.array_equal(sptensorOr.data, nonZeroMatrix)
assert sptensorOr.data.dtype == sptensorInstance.vals.dtype

# Sptensor logical or with scalar, not 0
sptensorOr = sptensorInstance.logical_or(1)
assert np.array_equal(sptensorOr.data, np.ones(data["shape"]))
assert sptensorOr.data.dtype == sptensorInstance.vals.dtype

# Sptensor logical or with wrong shape sptensor
with pytest.raises(AssertionError) as excinfo:
Expand Down Expand Up @@ -1165,19 +1175,23 @@ def test_sptensor_logical_xor(sample_sptensor):
# Sptensor logical xor with scalar, 0
sptensorXor = sptensorInstance.logical_xor(0)
assert np.array_equal(sptensorXor.data, nonZeroMatrix)
assert sptensorXor.data.dtype == sptensorInstance.vals.dtype

# Sptensor logical xor with scalar, not 0
sptensorXor = sptensorInstance.logical_xor(1)
assert np.array_equal(sptensorXor.data, sptensorInstance.logical_not().full().data)
assert sptensorXor.data.dtype == sptensorInstance.vals.dtype

# Sptensor logical xor with another sptensor
sptensorXor = sptensorInstance.logical_xor(sptensorInstance)
assert sptensorXor.shape == data["shape"]
assert sptensorXor.vals.size == 0
assert sptensorXor.vals.dtype == sptensorInstance.vals.dtype

# Sptensor logical xor with tensor
sptensorXor = sptensorInstance.logical_xor(sptensorInstance.to_tensor())
assert np.array_equal(sptensorXor.data, np.zeros(data["shape"], dtype=bool))
assert np.array_equal(sptensorXor.data, np.zeros(data["shape"]))
assert sptensorXor.data.dtype == sptensorInstance.vals.dtype

# Sptensor logical xor with wrong shape sptensor
with pytest.raises(AssertionError) as excinfo:
Expand Down
Loading