Skip to content

Commit

Permalink
Use OpenVINO 2022.1
Browse files Browse the repository at this point in the history
  • Loading branch information
dkurt committed May 2, 2022
1 parent 8980f4f commit 975821a
Show file tree
Hide file tree
Showing 3 changed files with 24 additions and 29 deletions.
42 changes: 21 additions & 21 deletions cellpose/contrib/openvino_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@

import numpy as np
import torch
from openvino.inference_engine import IECore
from openvino.runtime import Core

ie = IECore()
ie = Core()

def to_openvino(model):
if isinstance(model.net, OpenVINOModel):
Expand All @@ -19,13 +19,12 @@ class OpenVINOModel(object):
def __init__(self, model):
self._base_model = model
self._nets = {}
self._exec_nets = {}
self._model_id = "default"


def _init_model(self, inp):
if self._model_id in self._nets:
return self._nets[self._model_id], self._exec_nets[self._model_id]
return self._nets[self._model_id]

# Load a new instance of the model with updated weights
if self._model_id != "default":
Expand All @@ -34,33 +33,34 @@ def _init_model(self, inp):
buf = io.BytesIO()
dummy_input = torch.zeros([1] + list(inp.shape[1:])) # To avoid extra network reloading we process batch in the loop
torch.onnx.export(self._base_model, dummy_input, buf, input_names=["input"], output_names=["output", "style"])
net = ie.read_network(buf.getvalue(), b"", init_from_buffer=True)
exec_net = ie.load_network(net, "CPU")
net = ie.read_model(buf.getvalue(), b"")
exec_net = ie.compile_model(net, "CPU").create_infer_request()

self._nets[self._model_id] = net
self._exec_nets[self._model_id] = exec_net
self._nets[self._model_id] = exec_net

return net, exec_net
return exec_net


def __call__(self, inp):
net, exec_net = self._init_model(inp)
exec_net = self._init_model(inp)

batch_size = inp.shape[0]
if batch_size > 1:
out_shape = net.outputs["output"].shape
style_shape = net.outputs["style"].shape
output = np.zeros([batch_size] + out_shape[1:], np.float32)
style = np.zeros([batch_size] + style_shape[1:], np.float32)
outputs = []
styles = []
for i in range(batch_size):
out = exec_net.infer({"input": inp[i : i + 1]})
output[i] = out["output"]
style[i] = out["style"]

return torch.tensor(output), torch.tensor(style)
outs = exec_net.infer({"input": inp[i : i + 1]})
outs = {out.get_any_name(): value for out, value in outs.items()}
outputs.append(outs["output"])
styles.append(outs["style"])

outputs = np.concatenate(outputs)
styles = np.concatenate(styles)
return torch.tensor(outputs), torch.tensor(styles)
else:
out = exec_net.infer({"input": inp})
return torch.tensor(out["output"]), torch.tensor(out["style"])
outs = exec_net.infer({"input": inp})
outs = {out.get_any_name(): value for out, value in outs.items()}
return torch.tensor(outs["output"]), torch.tensor(outs["style"])


def load_model(self, path, cpu):
Expand Down
4 changes: 2 additions & 2 deletions docs/openvino.rst
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,11 @@ OpenVINO

`OpenVINO <https://github.com/openvinotoolkit/openvino>`_ is an optional backend for Cellpose which optimizes deep learning inference for Intel Architectures.

It can be installed with a primary package by adding extra suffix:
It should be installed in the same environment with Cellpose by the following command :

::

pip install cellpose[openvino]
pip install --no-deps openvino

Using ``openvino_utils.to_openvino``, convert PyTorch model to OpenVINO one:

Expand Down
7 changes: 1 addition & 6 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,6 @@
'scikit-learn',
]

openvino_deps = [
'openvino==2021.3',
]

try:
import torch
a = torch.ones(2, 3)
Expand Down Expand Up @@ -67,8 +63,7 @@
'docs': docs_deps,
'gui': gui_deps,
'distributed': distributed_deps,
'openvino': openvino_deps,
'all': gui_deps + distributed_deps + openvino_deps,
'all': gui_deps + distributed_deps,
},
include_package_data=True,
classifiers=(
Expand Down

0 comments on commit 975821a

Please sign in to comment.