Skip to content

Commit

Permalink
Merge pull request #909 from f0k/int_types
Browse files Browse the repository at this point in the history
Properly handle integral types other than plain int
  • Loading branch information
f0k committed Apr 26, 2018
2 parents 37ca134 + 97d67d1 commit 7992faa
Show file tree
Hide file tree
Showing 10 changed files with 79 additions and 32 deletions.
2 changes: 2 additions & 0 deletions docs/modules/utils.rst
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,8 @@

.. automodule:: lasagne.utils

.. autodata:: int_types
:annotation: = (numbers.Integral, np.integer)
.. autofunction:: floatX
.. autofunction:: shared_empty
.. autofunction:: as_theano_expression
Expand Down
18 changes: 9 additions & 9 deletions lasagne/layers/conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

from .. import init
from .. import nonlinearities
from ..utils import as_tuple, inspect_kwargs
from ..utils import as_tuple, int_types, inspect_kwargs
from ..theano_extensions import conv

from .base import Layer
Expand Down Expand Up @@ -75,7 +75,7 @@ def conv_output_length(input_length, filter_size, stride, pad=0):
output_length = input_length + filter_size - 1
elif pad == 'same':
output_length = input_length
elif isinstance(pad, int):
elif isinstance(pad, int_types):
output_length = input_length + 2 * pad - filter_size + 1
else:
raise ValueError('Invalid pad: {0}'.format(pad))
Expand Down Expand Up @@ -151,7 +151,7 @@ def conv_input_length(output_length, filter_size, stride, pad=0):
pad = filter_size - 1
elif pad == 'same':
pad = filter_size // 2
if not isinstance(pad, int):
if not isinstance(pad, int_types):
raise ValueError('Invalid pad: {0}'.format(pad))
return (output_length - 1) * stride - 2 * pad + filter_size

Expand Down Expand Up @@ -288,9 +288,9 @@ def __init__(self, incoming, num_filters, filter_size, stride=1, pad=0,
(n, self.input_shape, n+2, n))
self.n = n
self.num_filters = num_filters
self.filter_size = as_tuple(filter_size, n, int)
self.filter_size = as_tuple(filter_size, n, int_types)
self.flip_filters = flip_filters
self.stride = as_tuple(stride, n, int)
self.stride = as_tuple(stride, n, int_types)
self.untie_biases = untie_biases

if pad == 'same':
Expand All @@ -302,7 +302,7 @@ def __init__(self, incoming, num_filters, filter_size, stride=1, pad=0,
elif pad in ('full', 'same'):
self.pad = pad
else:
self.pad = as_tuple(pad, n, int)
self.pad = as_tuple(pad, n, int_types)

if (num_groups <= 0 or
self.num_filters % num_groups != 0 or
Expand Down Expand Up @@ -934,7 +934,7 @@ def __init__(self, incoming, num_filters, filter_size, stride=(1, 1),
# output_size must be set before calling the super constructor
if (not isinstance(output_size, T.Variable) and
output_size is not None):
output_size = as_tuple(output_size, 2, int)
output_size = as_tuple(output_size, 2, int_types)
self.output_size = output_size
super(TransposedConv2DLayer, self).__init__(
incoming, num_filters, filter_size, stride, crop, untie_biases,
Expand Down Expand Up @@ -1127,7 +1127,7 @@ def __init__(self, incoming, num_filters, filter_size,
# output_size must be set before calling the super constructor
if (not isinstance(output_size, T.Variable) and
output_size is not None):
output_size = as_tuple(output_size, 3, int)
output_size = as_tuple(output_size, 3, int_types)
self.output_size = output_size
BaseConvLayer.__init__(self, incoming, num_filters, filter_size,
stride, crop, untie_biases, W, b,
Expand Down Expand Up @@ -1286,7 +1286,7 @@ def __init__(self, incoming, num_filters, filter_size, dilation=(1, 1),
W=init.GlorotUniform(), b=init.Constant(0.),
nonlinearity=nonlinearities.rectify, flip_filters=False,
**kwargs):
self.dilation = as_tuple(dilation, 2, int)
self.dilation = as_tuple(dilation, 2, int_types)
super(DilatedConv2DLayer, self).__init__(
incoming, num_filters, filter_size, 1, pad,
untie_biases, W, b, nonlinearity, flip_filters, n=2, **kwargs)
Expand Down
3 changes: 2 additions & 1 deletion lasagne/layers/normalization.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@

from .. import init
from .. import nonlinearities
from ..utils import int_types

from .base import Layer

Expand Down Expand Up @@ -235,7 +236,7 @@ def __init__(self, incoming, axes='auto', epsilon=1e-4, alpha=0.1,
if axes == 'auto':
# default: normalize over all but the second axis
axes = (0,) + tuple(range(2, len(self.input_shape)))
elif isinstance(axes, int):
elif isinstance(axes, int_types):
axes = (axes,)
self.axes = axes

Expand Down
13 changes: 7 additions & 6 deletions lasagne/layers/shape.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import theano.tensor as T

from ..theano_extensions import padding
from ..utils import int_types

from .base import Layer

Expand Down Expand Up @@ -105,11 +106,11 @@ def __init__(self, incoming, shape, **kwargs):
super(ReshapeLayer, self).__init__(incoming, **kwargs)
shape = tuple(shape)
for s in shape:
if isinstance(s, int):
if isinstance(s, int_types):
if s == 0 or s < - 1:
raise ValueError("`shape` integers must be positive or -1")
elif isinstance(s, list):
if len(s) != 1 or not isinstance(s[0], int) or s[0] < 0:
if len(s) != 1 or not isinstance(s[0], int_types) or s[0] < 0:
raise ValueError("`shape` input references must be "
"single-element lists of int >= 0")
elif isinstance(s, T.TensorVariable):
Expand Down Expand Up @@ -232,7 +233,7 @@ def __init__(self, incoming, pattern, **kwargs):
# Sanity check the pattern
used_dims = set()
for p in pattern:
if isinstance(p, int):
if isinstance(p, int_types):
# Dimension p
if p in used_dims:
raise ValueError("pattern contains dimension {0} more "
Expand All @@ -256,7 +257,7 @@ def get_output_shape_for(self, input_shape):
output_shape = []
dims_used = [False] * len(input_shape)
for p in self.pattern:
if isinstance(p, int):
if isinstance(p, int_types):
if p < 0 or p >= len(input_shape):
raise ValueError("pattern contains {0}, but input shape "
"has {1} dimensions "
Expand Down Expand Up @@ -321,7 +322,7 @@ def __init__(self, incoming, width, val=0, batch_ndim=2, **kwargs):
def get_output_shape_for(self, input_shape):
output_shape = list(input_shape)

if isinstance(self.width, int):
if isinstance(self.width, int_types):
widths = [self.width] * (len(input_shape) - self.batch_ndim)
else:
widths = self.width
Expand Down Expand Up @@ -381,7 +382,7 @@ def __init__(self, incoming, indices, axis=-1, **kwargs):

def get_output_shape_for(self, input_shape):
output_shape = list(input_shape)
if isinstance(self.slice, int):
if isinstance(self.slice, int_types):
del output_shape[self.axis]
elif input_shape[self.axis] is not None:
output_shape[self.axis] = len(
Expand Down
10 changes: 5 additions & 5 deletions lasagne/layers/special.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

from .. import init
from .. import nonlinearities
from ..utils import as_tuple, floatX
from ..utils import as_tuple, floatX, int_types
from ..random import get_rng
from .base import Layer, MergeLayer
from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams
Expand Down Expand Up @@ -94,7 +94,7 @@ def __init__(self, incoming, b=init.Constant(0), shared_axes='auto',
if shared_axes == 'auto':
# default: share biases over all but the second axis
shared_axes = (0,) + tuple(range(2, len(self.input_shape)))
elif isinstance(shared_axes, int):
elif isinstance(shared_axes, int_types):
shared_axes = (shared_axes,)
self.shared_axes = shared_axes

Expand Down Expand Up @@ -161,7 +161,7 @@ def __init__(self, incoming, scales=init.Constant(1), shared_axes='auto',
if shared_axes == 'auto':
# default: share scales over all but the second axis
shared_axes = (0,) + tuple(range(2, len(self.input_shape)))
elif isinstance(shared_axes, int):
elif isinstance(shared_axes, int_types):
shared_axes = (shared_axes,)
self.shared_axes = shared_axes

Expand Down Expand Up @@ -1007,7 +1007,7 @@ def __init__(self, incoming, alpha=init.Constant(0.25), shared_axes='auto',
self.shared_axes = (0,) + tuple(range(2, len(self.input_shape)))
elif shared_axes == 'all':
self.shared_axes = tuple(range(len(self.input_shape)))
elif isinstance(shared_axes, int):
elif isinstance(shared_axes, int_types):
self.shared_axes = (shared_axes,)
else:
self.shared_axes = shared_axes
Expand Down Expand Up @@ -1120,7 +1120,7 @@ def __init__(self, incoming, lower=0.3, upper=0.8, shared_axes='auto',
self.shared_axes = (0,) + tuple(range(2, len(self.input_shape)))
elif shared_axes == 'all':
self.shared_axes = tuple(range(len(self.input_shape)))
elif isinstance(shared_axes, int):
elif isinstance(shared_axes, int_types):
self.shared_axes = (shared_axes,)
else:
self.shared_axes = shared_axes
Expand Down
5 changes: 5 additions & 0 deletions lasagne/tests/layers/test_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -399,6 +399,11 @@ def test_fail_on_mismatching_groups(self):
BaseConvLayer((2, 3, 4), 1, 3, num_groups=-3)
assert "must be positive" in exc.value.args[0]

def test_integer_types(self):
from lasagne.layers.conv import BaseConvLayer
BaseConvLayer((2, 3, 4), np.int64(1), np.int64(3))
BaseConvLayer((2, 3, 4, 5), 1, np.empty((3, 3)).shape)


class TestConv1DLayer:

Expand Down
29 changes: 26 additions & 3 deletions lasagne/tests/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,21 @@
import theano.tensor as T


def test_int_types():
from lasagne.utils import int_types
assert isinstance(42, int_types)
assert isinstance(np.int8(42), int_types)
assert isinstance(np.int16(42), int_types)
assert isinstance(np.int32(42), int_types)
assert isinstance(np.int64(42), int_types)
assert isinstance(np.empty(42).shape[0], int_types)
assert isinstance(np.prod(np.empty(42).shape), int_types)
try:
assert isinstance(long(42), int_types)
except NameError:
pass


def test_shared_empty():
from lasagne.utils import shared_empty

Expand Down Expand Up @@ -45,11 +60,19 @@ def test_one_hot():


def test_as_tuple_fails():
from lasagne.utils import as_tuple
with pytest.raises(ValueError):
from lasagne.utils import as_tuple, int_types
with pytest.raises(ValueError) as exc:
as_tuple([1, 2, 3], 4)
with pytest.raises(TypeError):
assert "length 4" in exc.value.args[0]
with pytest.raises(TypeError) as exc:
as_tuple('asdf', 4, int)
assert "of int," in exc.value.args[0]
with pytest.raises(TypeError) as exc:
as_tuple('asdf', 4, (int, float))
assert "of int or float," in exc.value.args[0]
with pytest.raises(TypeError) as exc:
as_tuple('asdf', 4, int_types)
assert "of int," in exc.value.args[0]


def test_inspect_kwargs():
Expand Down
7 changes: 4 additions & 3 deletions lasagne/theano_extensions/conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,10 @@
"""

import numpy as np

import theano.tensor as T

from ..utils import int_types


# 1D convolutions

Expand Down Expand Up @@ -60,7 +61,7 @@ def conv1d_mc0(input, filters, image_shape=None, filter_shape=None,

if isinstance(border_mode, tuple):
(border_mode,) = border_mode
if isinstance(border_mode, int):
if isinstance(border_mode, int_types):
border_mode = (0, border_mode)

input_mc0 = input.dimshuffle(0, 1, 'x', 2)
Expand Down Expand Up @@ -94,7 +95,7 @@ def conv1d_mc1(input, filters, image_shape=None, filter_shape=None,

if isinstance(border_mode, tuple):
(border_mode,) = border_mode
if isinstance(border_mode, int):
if isinstance(border_mode, int_types):
border_mode = (border_mode, 0)

input_mc1 = input.dimshuffle(0, 1, 2, 'x')
Expand Down
4 changes: 3 additions & 1 deletion lasagne/theano_extensions/padding.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@

import theano.tensor as T

from ..utils import int_types


def pad(x, width, val=0, batch_ndim=1):
"""
Expand Down Expand Up @@ -33,7 +35,7 @@ def pad(x, width, val=0, batch_ndim=1):
output_shape = list(input_shape)
indices = [slice(None) for _ in output_shape]

if isinstance(width, int):
if isinstance(width, int_types):
widths = [width] * (input_ndim - batch_ndim)
else:
widths = width
Expand Down
20 changes: 16 additions & 4 deletions lasagne/utils.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,15 @@
import numpy as np
import numbers

import numpy as np
import theano
import theano.tensor as T


#: Tuple of ``int``-like types for ``isinstance`` checks.
#: Specifically includes long integers and numpy integers.
int_types = (numbers.Integral, np.integer)


def floatX(arr):
"""Converts data to a numpy array of dtype ``theano.config.floatX``.
Expand Down Expand Up @@ -171,8 +177,8 @@ def as_tuple(x, N, t=None):
x : value or iterable
N : integer
length of the desired tuple
t : type, optional
required type for all elements
t : type or tuple of type, optional
required type or types for all elements
Returns
-------
Expand All @@ -192,8 +198,14 @@ def as_tuple(x, N, t=None):
X = (x,) * N

if (t is not None) and not all(isinstance(v, t) for v in X):
if t == int_types:
expected_type = "int" # easier to understand
elif isinstance(t, tuple):
expected_type = " or ".join(tt.__name__ for tt in t)
else:
expected_type = t.__name__
raise TypeError("expected a single value or an iterable "
"of {0}, got {1} instead".format(t.__name__, x))
"of {0}, got {1} instead".format(expected_type, x))

if len(X) != N:
raise ValueError("expected a single value or an iterable "
Expand Down

0 comments on commit 7992faa

Please sign in to comment.