Skip to content

Commit

Permalink
Fixed ICUDAUnit @Implementer and initialize(snapshot)
Browse files Browse the repository at this point in the history
Change-Id: I7a639f14cf713390b6bb13ea79e2ef9c28db60af
  • Loading branch information
vmarkovtsev committed Mar 19, 2015
1 parent 5ae6475 commit 5728a8c
Show file tree
Hide file tree
Showing 15 changed files with 32 additions and 42 deletions.
22 changes: 3 additions & 19 deletions activation.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
import numpy
from zope.interface import implementer

from veles.accelerated_units import AcceleratedUnit, IOpenCLUnit
from veles.accelerated_units import AcceleratedUnit, IOpenCLUnit, ICUDAUnit
import veles.error as error
from veles.memory import eq_addr, ravel
from veles.znicz.nn_units import Forward, GradientDescentBase
Expand All @@ -22,7 +22,7 @@ def init_unpickled(self):
self.sources_["activation"] = {}


@implementer(IOpenCLUnit)
@implementer(IOpenCLUnit, ICUDAUnit)
class ActivationForward(Forward, Activation):
MAPPING = set()

Expand Down Expand Up @@ -87,7 +87,7 @@ def cuda_run(self):
self._gpu_run()


@implementer(IOpenCLUnit)
@implementer(IOpenCLUnit, ICUDAUnit)
class ActivationBackward(GradientDescentBase, Activation):
"""Backward activation pass: err_input = err_output * F'(output).
Expand Down Expand Up @@ -178,7 +178,6 @@ def cuda_run(self):
self._gpu_run()


@implementer(IOpenCLUnit)
class ForwardTanh(ActivationForward):
"""Forward pass for y = 1.7159 * tanh(0.6666 * x).
"""
Expand All @@ -192,7 +191,6 @@ def cpu_run(self):
out *= 1.7159


@implementer(IOpenCLUnit)
class BackwardTanh(ActivationBackward):
"""Backward pass for :class:`ForwardTanh`.
"""
Expand All @@ -208,7 +206,6 @@ def cpu_run(self):
err_input)


@implementer(IOpenCLUnit)
class ForwardSigmoid(ActivationForward):
"""Forward pass for y = 1.0 / (1.0 + exp(-x)).
"""
Expand All @@ -221,7 +218,6 @@ def cpu_run(self):
numpy.reciprocal(1.0 + numpy.exp(-out), out)


@implementer(IOpenCLUnit)
class BackwardSigmoid(ActivationBackward):
"""Backward pass for :class:`ForwardSigmoid`.
"""
Expand All @@ -235,7 +231,6 @@ def cpu_run(self):
numpy.multiply(err_output, output * (1.0 - output), err_input)


@implementer(IOpenCLUnit)
class ForwardMul(ActivationForward):
"""Forward pass for :math:`y = k x`.
"""
Expand Down Expand Up @@ -302,7 +297,6 @@ def cpu_run(self):
out *= self.factor


@implementer(IOpenCLUnit)
class BackwardMul(ActivationBackward):
"""Backward pass for :class:`ForwardMul`.
"""
Expand Down Expand Up @@ -342,7 +336,6 @@ def cpu_run(self):
err_input[:] = err_output[:] * self.factor


@implementer(IOpenCLUnit)
class ForwardRELU(ActivationForward):
"""
This activation is taken from article
Expand All @@ -361,7 +354,6 @@ def cpu_run(self):
out[:] = numpy.where(inp > 15, inp, numpy.log(numpy.exp(inp) + 1.0))


@implementer(IOpenCLUnit)
class BackwardRELU(ActivationBackward):
"""Backward pass for :class:`ForwardRELU`
"""
Expand All @@ -375,7 +367,6 @@ def cpu_run(self):
numpy.multiply(err_output, 1.0 - numpy.exp(-output), err_input)


@implementer(IOpenCLUnit)
class ForwardStrictRELU(ActivationForward):
"""
Forward pass for :math:`y = \\max(0, x)`.
Expand Down Expand Up @@ -405,7 +396,6 @@ def drop_slave(self, slave):
pass


@implementer(IOpenCLUnit)
class BackwardStrictRELU(ActivationBackward):
"""
Backward pass for :class:`ForwardStrictRELU`.
Expand Down Expand Up @@ -438,7 +428,6 @@ def drop_slave(self, slave):
pass


@implementer(IOpenCLUnit)
class ForwardLog(ActivationForward):
"""Forward pass for :math:`y = \\log(x + \\sqrt{x^2 + 1})`.
"""
Expand All @@ -461,7 +450,6 @@ def cpu_run(self):
numpy.log(inp + numpy.sqrt(numpy.square(inp) + 1), out)


@implementer(IOpenCLUnit)
class BackwardLog(ActivationBackward):
"""Backward pass for :class:`ForwardLog`.
"""
Expand All @@ -488,7 +476,6 @@ def cpu_run(self):
err_input)


@implementer(IOpenCLUnit)
class ForwardTanhLog(ActivationForward):
"""Forward pass for hybrid tanh-log function.
"""
Expand Down Expand Up @@ -517,7 +504,6 @@ def cpu_run(self):
out[i] = y


@implementer(IOpenCLUnit)
class BackwardTanhLog(ActivationBackward):
"""Backward pass for hybrid tanh-log function.
"""
Expand Down Expand Up @@ -554,7 +540,6 @@ def _set_activation_args(self):
self.set_args(self.input, self.output, self.err_output, self.err_input)


@implementer(IOpenCLUnit)
class ForwardSinCos(ActivationForward):
"""Forward pass for y = sin(x) if idx(x) is odd else cos(x).
"""
Expand All @@ -575,7 +560,6 @@ def cpu_run(self):
out[0::2] = numpy.cos(inp[0::2])


@implementer(IOpenCLUnit)
class BackwardSinCos(ActivationBackward):
"""Backward pass for :class:`ForwardSinCos`.
"""
Expand Down
4 changes: 2 additions & 2 deletions all2all.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,13 @@
import numpy
from zope.interface import implementer

from veles.accelerated_units import IOpenCLUnit
from veles.accelerated_units import IOpenCLUnit, ICUDAUnit
import veles.error as error
from veles.memory import reshape, roundup, Vector
import veles.znicz.nn_units as nn_units


@implementer(IOpenCLUnit)
@implementer(IOpenCLUnit, ICUDAUnit)
class All2All(nn_units.NNLayerBase):
"""All2All with linear activation f(x) = x.
Expand Down
4 changes: 2 additions & 2 deletions evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
from veles.distributable import TriviallyDistributable
import veles.error as error
from veles.memory import assert_addr, ravel, Vector
from veles.accelerated_units import AcceleratedUnit, IOpenCLUnit
from veles.accelerated_units import AcceleratedUnit, IOpenCLUnit, ICUDAUnit
from veles.opencl_types import numpy_dtype_to_opencl


Expand Down Expand Up @@ -44,7 +44,7 @@ def initialize(self, device, **kwargs):
vec.initialize(self.device)


@implementer(IOpenCLUnit)
@implementer(IOpenCLUnit, ICUDAUnit)
class EvaluatorSoftmax(EvaluatorBase, TriviallyDistributable):
"""Evaluator for nn softmax output from the batch labels.
Expand Down
4 changes: 2 additions & 2 deletions gd.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
from zope.interface import implementer

from veles.memory import reshape, roundup, Vector
from veles.accelerated_units import IOpenCLUnit
from veles.accelerated_units import IOpenCLUnit, ICUDAUnit
import veles.znicz.nn_units as nn_units
from collections import namedtuple

Expand All @@ -41,7 +41,7 @@
"bias"))


@implementer(IOpenCLUnit)
@implementer(IOpenCLUnit, ICUDAUnit)
class GradientDescent(nn_units.GradientDescentBase):
"""Gradient Descent unit for :class:`veles.znicz.all2all.All2All`.
Expand Down
2 changes: 1 addition & 1 deletion samples/MnistSimple/mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -194,7 +194,7 @@ def __init__(self, workflow, **kwargs):
def initialize(self, learning_rate, weights_decay, device, **kwargs):
return super(MnistWorkflow, self).initialize(
learning_rate=learning_rate, weights_decay=weights_decay,
device=device)
device=device, **kwargs)


def run(load, main):
Expand Down
6 changes: 4 additions & 2 deletions tests/functional/test_approximator.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,8 @@ def test_approximator(self):
device=self.device,
learning_rate=root.approximator.learning_rate,
weights_decay=root.approximator.weights_decay,
minibatch_size=root.approximator.loader.minibatch_size)
minibatch_size=root.approximator.loader.minibatch_size,
snapshot=False)
self.w.run()
file_name = self.w.snapshotter.file_name

Expand All @@ -83,7 +84,8 @@ def test_approximator(self):
device=self.device,
learning_rate=root.approximator.learning_rate,
weights_decay=root.approximator.weights_decay,
minibatch_size=root.approximator.loader.minibatch_size)
minibatch_size=root.approximator.loader.minibatch_size,
snapshot=True)
self.wf.run()

avg_mse = self.wf.decision.epoch_metrics[2][0]
Expand Down
6 changes: 4 additions & 2 deletions tests/functional/test_channels.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,8 @@ def test_channels_all2all(self):
self.assertEqual(self.w.evaluator.labels,
self.w.loader.minibatch_labels)
self.w.initialize(device=self.device,
minibatch_size=root.channels.loader.minibatch_size)
minibatch_size=root.channels.loader.minibatch_size,
snapshot=False)
self.assertEqual(self.w.evaluator.labels,
self.w.loader.minibatch_labels)
self.w.run()
Expand All @@ -107,7 +108,8 @@ def test_channels_all2all(self):
self.assertEqual(self.wf.evaluator.labels,
self.wf.loader.minibatch_labels)
self.wf.initialize(device=self.device,
minibatch_size=root.channels.loader.minibatch_size)
minibatch_size=root.channels.loader.minibatch_size,
snapshot=True)
self.assertEqual(self.wf.evaluator.labels,
self.wf.loader.minibatch_labels)
self.wf.run()
Expand Down
6 changes: 4 additions & 2 deletions tests/functional/test_cifar_all2all.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,8 @@ def test_cifar_all2all(self):
self.assertEqual(self.w.evaluator.labels,
self.w.loader.minibatch_labels)
self.w.initialize(device=self.device,
minibatch_size=root.cifar.loader.minibatch_size)
minibatch_size=root.cifar.loader.minibatch_size,
snapshot=False)
self.assertEqual(self.w.evaluator.labels,
self.w.loader.minibatch_labels)
self.w.run()
Expand All @@ -102,7 +103,8 @@ def test_cifar_all2all(self):
self.assertEqual(self.wf.evaluator.labels,
self.wf.loader.minibatch_labels)
self.wf.initialize(device=self.device,
minibatch_size=root.cifar.loader.minibatch_size)
minibatch_size=root.cifar.loader.minibatch_size,
snapshot=True)
self.assertEqual(self.wf.evaluator.labels,
self.wf.loader.minibatch_labels)
self.wf.run()
Expand Down
2 changes: 1 addition & 1 deletion tests/research/Approximator/approximator.py
Original file line number Diff line number Diff line change
Expand Up @@ -304,7 +304,7 @@ def initialize(self, learning_rate, weights_decay, minibatch_size,
device, **kwargs):
super(ApproximatorWorkflow, self).initialize(
learning_rate=learning_rate, weights_decay=weights_decay,
minibatch_size=minibatch_size, device=device)
minibatch_size=minibatch_size, device=device, **kwargs)


def run(load, main):
Expand Down
2 changes: 1 addition & 1 deletion tests/research/VideoAE/video_ae.py
Original file line number Diff line number Diff line change
Expand Up @@ -219,7 +219,7 @@ def initialize(self, learning_rate, weights_decay, device, **kwargs):
forward.device = device
return super(VideoAEWorkflow, self).initialize(
learning_rate=learning_rate, weights_decay=weights_decay,
device=device)
device=device, **kwargs)


def run(load, main):
Expand Down
2 changes: 1 addition & 1 deletion tests/research/WineRelu/wine_relu.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ def __init__(self, workflow, **kwargs):
def initialize(self, learning_rate, weights_decay, device, **kwargs):
super(WineReluWorkflow, self).initialize(
learning_rate=learning_rate, weights_decay=weights_decay,
device=device)
device=device, **kwargs)


def run(load, main):
Expand Down
2 changes: 1 addition & 1 deletion tests/unit/test_caffe_complex.py
Original file line number Diff line number Diff line change
Expand Up @@ -471,7 +471,7 @@ def test_all(self):
self._create_gd_units(cur_iter)
self.workflow.end_point.link_from(self.workflow["gd_conv1"])

self.workflow.initialize(device=self.device)
self.workflow.initialize(device=self.device, snapshot=False)
self.info("self.workflow.initialize() completed")

self._load_labels_and_data(cur_iter)
Expand Down
4 changes: 2 additions & 2 deletions tests/unit/test_matrix_multiplication.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
import veles.prng as prng
from veles import opencl_types
from veles.dummy import DummyWorkflow
from veles.accelerated_units import TrivialOpenCLUnit
from veles.accelerated_units import TrivialAcceleratedUnit
import veles.znicz as znicz
znicz.nothing()

Expand Down Expand Up @@ -92,7 +92,7 @@ def _cleanup_after_tsts(self):
def _do_tst(self, device, BLOCK_SIZE):
"""Do test for specific context
"""
obj = TrivialOpenCLUnit(DummyWorkflow())
obj = TrivialAcceleratedUnit(DummyWorkflow())
obj.initialize(device=device)

self.a.initialize(device)
Expand Down
4 changes: 2 additions & 2 deletions tests/unit/test_matrix_reduce.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
import veles.opencl_types as opencl_types
import veles.prng as prng
from veles.dummy import DummyWorkflow
from veles.accelerated_units import TrivialOpenCLUnit
from veles.accelerated_units import TrivialAcceleratedUnit


class TestMatrixReduce(unittest.TestCase):
Expand All @@ -42,7 +42,7 @@ def _build_program(self, a, b, A_WIDTH, A_HEIGHT, A_COL, REDUCE_SIZE):
if A_COL:
defines["A_COL"] = 1

tmp = TrivialOpenCLUnit(DummyWorkflow())
tmp = TrivialAcceleratedUnit(DummyWorkflow())
tmp.initialize(device=self.device)
tmp.sources_["test_matrix_reduce"] = {}

Expand Down
4 changes: 2 additions & 2 deletions tests/unit/test_matrix_transpose.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
from veles.backends import Device
import veles.prng as prng
from veles.dummy import DummyWorkflow
from veles.accelerated_units import TrivialOpenCLUnit
from veles.accelerated_units import TrivialAcceleratedUnit


class TestMatrixTranspose(unittest.TestCase):
Expand All @@ -33,7 +33,7 @@ def test_transpose(self):
prng.get().fill(a.mem)
b = Vector(numpy.zeros([WIDTH * 2, HEIGHT], dtype=dtype))

obj = TrivialOpenCLUnit(DummyWorkflow())
obj = TrivialAcceleratedUnit(DummyWorkflow())
obj.initialize(device=device)

a.initialize(device)
Expand Down

0 comments on commit 5728a8c

Please sign in to comment.