Skip to content

Commit

Permalink
Merge pull request #34 from Katterrina/issue29
Browse files Browse the repository at this point in the history
[FIX] issue #29
  • Loading branch information
saratheriver committed Apr 8, 2024
2 parents 70151fa + 5ccb045 commit 2591cce
Show file tree
Hide file tree
Showing 9 changed files with 37 additions and 37 deletions.
32 changes: 16 additions & 16 deletions enigmatoolbox/datasets/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,8 +38,8 @@ def load_mask(name='midline', surface_name="fsa5", join=False):
name = ''
else:
name = '_' + name
mask_lh = np.loadtxt(ipth.format('lh', name), dtype=np.bool)
mask_rh = np.loadtxt(ipth.format('rh', name), dtype=np.bool)
mask_lh = np.loadtxt(ipth.format('lh', name), dtype=bool)
mask_rh = np.loadtxt(ipth.format('rh', name), dtype=bool)
if join:
return np.concatenate([mask_lh, mask_rh])

Expand All @@ -49,8 +49,8 @@ def load_mask(name='midline', surface_name="fsa5", join=False):
name = ''
else:
print('sorry there\'s no other option for now')
mask_lh = np.loadtxt(ipth.format('lh', name), dtype=np.bool)
mask_rh = np.loadtxt(ipth.format('rh', name), dtype=np.bool)
mask_lh = np.loadtxt(ipth.format('lh', name), dtype=bool)
mask_rh = np.loadtxt(ipth.format('rh', name), dtype=bool)
if join:
return np.concatenate([mask_lh, mask_rh])

Expand Down Expand Up @@ -262,10 +262,10 @@ def load_sc(parcellation='aparc'):
sctxL = 'strucLabels_sctx_' + parcellation + '.csv'
sctxL_ipth = os.path.join(root_pth, 'matrices', 'hcp_connectivity', sctxL)

return np.loadtxt(ctx_ipth, dtype=np.float, delimiter=','), \
np.loadtxt(ctxL_ipth, dtype='str', delimiter=','), \
np.loadtxt(sctx_ipth, dtype=np.float, delimiter=','), \
np.loadtxt(sctxL_ipth, dtype='str', delimiter=',')
return np.loadtxt(ctx_ipth, dtype=float, delimiter=','), \
np.loadtxt(ctxL_ipth, dtype=str, delimiter=','), \
np.loadtxt(sctx_ipth, dtype=float, delimiter=','), \
np.loadtxt(sctxL_ipth, dtype=str, delimiter=',')


def load_fc(parcellation='aparc'):
Expand Down Expand Up @@ -316,10 +316,10 @@ def load_fc(parcellation='aparc'):
sctxL = 'funcLabels_sctx_' + parcellation + '.csv'
sctxL_ipth = os.path.join(root_pth, 'matrices', 'hcp_connectivity', sctxL)

return np.loadtxt(ctx_ipth, dtype=np.float, delimiter=','), \
np.loadtxt(ctxL_ipth, dtype='str', delimiter=','), \
np.loadtxt(sctx_ipth, dtype=np.float, delimiter=','), \
np.loadtxt(sctxL_ipth, dtype='str', delimiter=',')
return np.loadtxt(ctx_ipth, dtype=float, delimiter=','), \
np.loadtxt(ctxL_ipth, dtype=str, delimiter=','), \
np.loadtxt(sctx_ipth, dtype=float, delimiter=','), \
np.loadtxt(sctxL_ipth, dtype=str, delimiter=',')


def load_sc_as_one(parcellation='aparc'):
Expand Down Expand Up @@ -353,8 +353,8 @@ def load_sc_as_one(parcellation='aparc'):
ctxL = 'strucLabels_with_sctx_' + parcellation + '.csv'
ctxL_ipth = os.path.join(root_pth, 'matrices', 'hcp_connectivity', ctxL)

return np.loadtxt(ctx_ipth, dtype=np.float, delimiter=','), \
np.loadtxt(ctxL_ipth, dtype='str', delimiter=','), \
return np.loadtxt(ctx_ipth, dtype=float, delimiter=','), \
np.loadtxt(ctxL_ipth, dtype=str, delimiter=','), \


def load_fc_as_one(parcellation='aparc'):
Expand Down Expand Up @@ -389,8 +389,8 @@ def load_fc_as_one(parcellation='aparc'):
ctxL = 'funcLabels_with_sctx_' + parcellation + '.csv'
ctxL_ipth = os.path.join(root_pth, 'matrices', 'hcp_connectivity', ctxL)

return np.loadtxt(ctx_ipth, dtype=np.float, delimiter=','), \
np.loadtxt(ctxL_ipth, dtype='str', delimiter=','), \
return np.loadtxt(ctx_ipth, dtype=float, delimiter=','), \
np.loadtxt(ctxL_ipth, dtype=str, delimiter=','), \


def structural_covariance(zdata):
Expand Down
4 changes: 2 additions & 2 deletions enigmatoolbox/histology/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,10 +48,10 @@ def economo_koskinas_spider(parcel_data=None, parcellation='aparc_fsa5', fill=0,
# Average within ve classes
if 'fsa5' in parcellation:
parc_pth = os.path.dirname(os.path.dirname(__file__)) + '/datasets/parcellations/economo_koskinas_fsa5.csv'
ve = np.loadtxt(parc_pth, dtype=np.int)
ve = np.loadtxt(parc_pth, dtype=int)
elif 'conte69' in parcellation:
parc_pth = os.path.dirname(os.path.dirname(__file__)) + '/datasets/parcellations/economo_koskinas_conte69.csv'
ve = np.loadtxt(parc_pth, dtype=np.int)
ve = np.loadtxt(parc_pth, dtype=int)

ve_class = np.zeros((5, 1))
for ii in range(5):
Expand Down
10 changes: 5 additions & 5 deletions enigmatoolbox/mesh/array_operations.py
Original file line number Diff line number Diff line change
Expand Up @@ -430,7 +430,7 @@ def compute_point_area(surf, cell_area=None, area_as='one_third'):
#
# adj = me.get_ring_adjacency(surf, n_ring=radius, include_self=False)
# mask = labeling == dilate_label
# am = adj[mask].max(axis=0).A[0].astype(np.bool)
# am = adj[mask].max(axis=0).A[0].astype(bool)
# am &= labeling == background
# labeling[am] = dilate_label
#
Expand Down Expand Up @@ -610,9 +610,9 @@ def propagate_labeling(surf, labeling, no_label=np.nan, mask=None, alpha=0.99,
# Graph matrix
if mode == 'connectivity':
adj = me.get_ring_adjacency(surf, n_ring=n_ring, include_self=False,
dtype=np.float)
dtype=np.float64)
else:
adj = me.get_ring_distance(surf, n_ring=n_ring, dtype=np.float)
adj = me.get_ring_distance(surf, n_ring=n_ring)
adj.data[:] = np.exp(-adj.data/n_ring**2)

if mask is not None:
Expand Down Expand Up @@ -729,7 +729,7 @@ def smooth_array(surf, point_data, n_iter=5, mask=None, kernel='gaussian',

if kernel == 'uniform':
w = me.get_immediate_adjacency(surf, include_self=False, mask=mask,
dtype=np.float)
dtype=np.float64)
elif kernel == 'gaussian':
w = me.get_immediate_distance(surf, metric='sqeuclidean', mask=mask)
if sigma is None:
Expand All @@ -754,7 +754,7 @@ def smooth_array(surf, point_data, n_iter=5, mask=None, kernel='gaussian',
if np.issubdtype(pd.dtype, np.floating):
spd = pd.copy()
else:
spd = pd.astype(np.float)
spd = pd.astype(np.float64)

for _ in range(n_iter):
wp = w.dot(spd)
Expand Down
2 changes: 1 addition & 1 deletion enigmatoolbox/mesh/mesh_correspondence.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def _find_correspondence(surf, ref_surf, eps=0, n_jobs=1, use_cell=False):
tree = cKDTree(ref_points, leafsize=20, compact_nodes=False,
copy_data=False, balanced_tree=False)
d, idx = tree.query(points, k=1, eps=0, n_jobs=n_jobs,
distance_upper_bound=eps+np.finfo(np.float).eps)
distance_upper_bound=eps+np.finfo(np.float64).eps)

if np.isinf(d).any():
raise ValueError('Cannot find correspondences. Try increasing '
Expand Down
14 changes: 7 additions & 7 deletions enigmatoolbox/mesh/mesh_elements.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ def get_cell_neighbors(surf, include_self=True, with_edge=True,
ce.eliminate_zeros()

else:
ce = get_cell2point_connectivity(surf, dtype=np.bool)
ce = get_cell2point_connectivity(surf, dtype=bool)
ce *= ce.T
if not include_self:
ce.setdiag(0)
Expand Down Expand Up @@ -242,7 +242,7 @@ def get_immediate_adjacency(surf, include_self=True, mask=None,
Immediate adjacency: set to one all entries of points that
share and edge with current point.
"""
adj = get_point2cell_connectivity(surf, mask=mask, dtype=np.bool)
adj = get_point2cell_connectivity(surf, mask=mask, dtype=bool)
adj *= adj.T
if not include_self:
adj.setdiag(0)
Expand Down Expand Up @@ -287,7 +287,7 @@ def get_ring_adjacency(surf, n_ring=1, include_self=True, mask=None,
mask=mask, dtype=dtype)

adj = get_immediate_adjacency(surf, include_self=True, mask=mask,
dtype=np.bool)
dtype=bool)
adj **= n_ring
if not include_self:
adj.setdiag(0)
Expand Down Expand Up @@ -320,7 +320,7 @@ def get_edges(surf, mask=None):
"""
adj = get_immediate_adjacency(surf, include_self=False, mask=mask,
dtype=np.bool)
dtype=bool)
adj.sort_indices()
adj_ud = ssp.triu(adj, k=1, format='coo')
edges = np.column_stack([adj_ud.row, adj_ud.col])
Expand Down Expand Up @@ -620,7 +620,7 @@ def get_boundary_cells(surf, with_edge=True):


def get_immediate_distance(surf, metric='euclidean', mask=None,
dtype=np.float):
dtype=float):
"""Get immediate distance matrix.
Parameters
Expand Down Expand Up @@ -668,7 +668,7 @@ def get_immediate_distance(surf, metric='euclidean', mask=None,


def get_ring_distance(surf, n_ring=1, metric='geodesic', mask=None,
dtype=np.float):
dtype=float):
"""Get distance matrix in the neighborhood of each point.
Parameters
Expand All @@ -683,7 +683,7 @@ def get_ring_distance(surf, n_ring=1, metric='geodesic', mask=None,
Binary mask. If specified, only use points within the mask.
Default is None.
dtype : dtype, optional
Data type. Default is np.float.
Data type. Default is float.
Returns
-------
Expand Down
2 changes: 1 addition & 1 deletion enigmatoolbox/mesh/mesh_operations.py
Original file line number Diff line number Diff line change
Expand Up @@ -488,7 +488,7 @@ def downsample_with_parcellation(surf, labeling, name='parcel',
adj = get_immediate_adjacency(surf)
adj_neigh = adj.multiply(labeling_con).tocsr()

adj_small = np.zeros((nlabs, nlabs), dtype=np.bool)
adj_small = np.zeros((nlabs, nlabs), dtype=bool)
for i in range(nlabs):
arow = adj_neigh[labeling_con == i]
for j in range(i + 1, nlabs):
Expand Down
2 changes: 1 addition & 1 deletion enigmatoolbox/plotting/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -489,7 +489,7 @@ def AddRenderer(self, row, col, renderer=None, **kwargs):

def AddRenderers(self, **kwargs):
"""Some description here."""
ren = np.empty((self.nrow, self.ncol), dtype=np.object)
ren = np.empty((self.nrow, self.ncol), dtype=object)
for i in range(self.nrow):
for j in range(self.ncol):
ren[i, j] = super().AddRenderer(row=i, col=j, **kwargs)
Expand Down
6 changes: 3 additions & 3 deletions enigmatoolbox/utils/parcellation.py
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ def parcel_to_surface(source_val, target_lab, mask=None, fill=0, source_lab=None
if isinstance(target_lab, str):
fname = target_lab + '.csv'
parc_pth = os.path.dirname(os.path.dirname(__file__)) + '/datasets/parcellations/' + fname
target_lab = np.loadtxt(parc_pth, dtype=np.int)
target_lab = np.loadtxt(parc_pth, dtype=int)

if source_val.size == 68 and np.unique(target_lab).size == 71:
a_idx = list(range(1, 4)) + list(range(5, 39)) + list(range(40, 71))
Expand Down Expand Up @@ -267,7 +267,7 @@ def fred(x, w): return np.min(x, axis=axis)


def surface_to_parcel(values, labels, weights=None, target_labels=None,
red_op='mean', axis=0, dtype=np.float):
red_op='mean', axis=0, dtype=float):
"""Summarize data in `values` according to `labels` (author: @OualidBenkarim)
Parameters
Expand Down Expand Up @@ -304,7 +304,7 @@ def surface_to_parcel(values, labels, weights=None, target_labels=None,
if isinstance(labels, str):
fname = labels + '.csv'
parc_pth = os.path.dirname(os.path.dirname(__file__)) + '/datasets/parcellations/' + fname
labels = np.loadtxt(parc_pth, dtype=np.int)
labels = np.loadtxt(parc_pth, dtype=int)

if axis == 1 and values.ndim == 1:
axis = 0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def _fread3_many(fobj, n):
out : 1D array
An array of 3 byte int
"""
b1, b2, b3 = np.fromfile(fobj, ">u1", 3 * n).reshape(-1, 3).astype(np.int).T
b1, b2, b3 = np.fromfile(fobj, ">u1", 3 * n).reshape(-1, 3).astype(int).T
return (b1 << 16) + (b2 << 8) + b3


Expand Down

0 comments on commit 2591cce

Please sign in to comment.