diff --git a/proto/inference.proto b/proto/inference.proto index 39845dc5..59ae4d26 100644 --- a/proto/inference.proto +++ b/proto/inference.proto @@ -2,6 +2,7 @@ syntax = "proto3"; service Inference { rpc CreateModelSession(CreateModelSessionRequest) returns (ModelSession) {} + rpc CloseModelSession(ModelSession) returns (Empty) {} rpc CreateDatasetDescription(CreateDatasetDescriptionRequest) returns (DatasetDescription) {} @@ -56,34 +57,6 @@ message NamedFloats { } -/* InputShape will always be expected to have `shape` set. - * For `ShapeType` PARAMETRIZED, also a `stepShape` has to be given. - * ref: https://github.com/bioimage-io/spec-bioimage-io/blob/gh-pages/model_spec_latest.md */ -message InputShape { - enum ShapeType { - EXPLICIT = 0; - PARAMETRIZED = 1; - } - - ShapeType shapeType = 1; - // shape is min, when PARAMETRIZED - NamedInts shape = 2; - NamedInts stepShape = 4; -} - -message OutputShape { - enum ShapeType { - EXPLICIT = 0; - IMPLICIT = 1; - } - ShapeType shapeType = 1; - NamedInts shape = 2; - NamedInts halo = 3; - string referenceTensor = 4; - NamedFloats scale = 5; - NamedFloats offset = 6; -} - message ModelSession { string id = 1; } @@ -136,16 +109,11 @@ message PredictResponse { message Empty {} - service FlightControl { rpc Ping(Empty) returns (Empty) {} rpc Shutdown(Empty) returns (Empty) {} } -message ModelInfo { - repeated string deviceIds = 1; -} - message CreateModelSessionChunkedRequest { oneof data { ModelInfo info = 1; diff --git a/tests/data/__init__.py b/tests/data/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/data/dummy/Dummy.model.yaml b/tests/data/dummy/Dummy.model.yaml deleted file mode 100644 index 1aa8fe90..00000000 --- a/tests/data/dummy/Dummy.model.yaml +++ /dev/null @@ -1,56 +0,0 @@ -format_version: 0.3.3 -language: python -framework: pytorch - -name: UNet2DNucleiBroad -description: A 2d U-Net pretrained on broad nucleus dataset. -cite: - - text: "Ronneberger, Olaf et al. U-net: Convolutional networks for biomedical image segmentation. MICCAI 2015." - doi: https://doi.org/10.1007/978-3-319-24574-4_28 -authors: - - name: "ilastik-team" - affiliation: "EMBL Heidelberg" - -documentation: dummy.md -tags: [pytorch, nucleus-segmentation] -license: MIT -git_repo: https://github.com/ilastik/tiktorch -covers: [] - -source: dummy.py::Dummy -sha256: 00ffb1647cf7ec524892206dce6258d9da498fe040c62838f31b501a09bfd573 -timestamp: 2019-12-11T12:22:32Z # ISO 8601 - -test_inputs: [dummy_in.npy] -test_outputs: [dummy_out.npy] - -weights: - pytorch_state_dict: - source: ./weights - sha256: 518cb80bad2eb3ec3dfbe6bab74920951391ce8fb24e15cf59b9b9f052a575a6 - authors: - - name: "ilastik-team" - affiliation: "EMBL Heidelberg" - - -# TODO double check inputs/outputs -inputs: - - name: input - axes: bcyx - data_type: float32 - data_range: [-inf, inf] - shape: [1, 1, 128, 128] - - -outputs: - - name: output - axes: bcyx - data_type: float32 - data_range: [0, 1] - shape: - reference_tensor: input - scale: [1, 1, 1, 1] - offset: [0, 0, 0, 0] - halo: [0, 0, 32, 32] - -type: model diff --git a/tests/data/dummy/dummy.md b/tests/data/dummy/dummy.md deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/data/dummy/dummy.py b/tests/data/dummy/dummy.py deleted file mode 100644 index 195e98e3..00000000 --- a/tests/data/dummy/dummy.py +++ /dev/null @@ -1,7 +0,0 @@ -from torch import nn - - -class Dummy(nn.Module): - def forward(self, input): - x = input - return x + 1 diff --git a/tests/data/dummy/dummy_in.npy b/tests/data/dummy/dummy_in.npy deleted file mode 100644 index 96a78a7b..00000000 Binary files a/tests/data/dummy/dummy_in.npy and /dev/null differ diff --git a/tests/data/dummy/dummy_out.npy b/tests/data/dummy/dummy_out.npy deleted file mode 100644 index 56f76ca7..00000000 Binary files a/tests/data/dummy/dummy_out.npy and /dev/null differ diff --git a/tests/data/dummy/environment.yaml b/tests/data/dummy/environment.yaml deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/data/dummy/weights b/tests/data/dummy/weights deleted file mode 100644 index da14f342..00000000 Binary files a/tests/data/dummy/weights and /dev/null differ diff --git a/tests/data/dummy_param/Dummy.model_param.yaml b/tests/data/dummy_param/Dummy.model_param.yaml deleted file mode 100644 index 87ddd885..00000000 --- a/tests/data/dummy_param/Dummy.model_param.yaml +++ /dev/null @@ -1,57 +0,0 @@ -format_version: 0.3.3 -language: python -framework: pytorch - -name: UNet2DNucleiBroad -description: A 2d U-Net pretrained on broad nucleus dataset. -cite: - - text: "Ronneberger, Olaf et al. U-net: Convolutional networks for biomedical image segmentation. MICCAI 2015." - doi: https://doi.org/10.1007/978-3-319-24574-4_28 -authors: - - name: "ilastik-team" - affiliation: "EMBL Heidelberg" - -documentation: dummy.md -tags: [pytorch, nucleus-segmentation] -license: MIT -git_repo: https://github.com/ilastik/tiktorch -covers: [] - -source: dummy.py::Dummy -sha256: 00ffb1647cf7ec524892206dce6258d9da498fe040c62838f31b501a09bfd573 -timestamp: 2019-12-11T12:22:32Z # ISO 8601 - -test_inputs: [dummy_in.npy] -test_outputs: [dummy_out.npy] - -weights: - pytorch_state_dict: - source: ./weights - sha256: 518cb80bad2eb3ec3dfbe6bab74920951391ce8fb24e15cf59b9b9f052a575a6 - authors: - - name: "ilastik-team" - affiliation: "EMBL Heidelberg" - - -# TODO double check inputs/outputs -inputs: - - name: param - axes: bcyx - data_type: float32 - data_range: [-inf, inf] - shape: - min: [1, 1, 64, 64] - step: [0, 0, 2, 1] - -outputs: - - name: output - axes: bcyx - data_type: float32 - data_range: [0, 1] - shape: - reference_tensor: param - scale: [1, 1, 1, 1] - offset: [0, 0, 0, 0] - halo: [0, 0, 8, 8] - -type: model diff --git a/tests/data/dummy_param/dummy.md b/tests/data/dummy_param/dummy.md deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/data/dummy_param/dummy.py b/tests/data/dummy_param/dummy.py deleted file mode 100644 index 195e98e3..00000000 --- a/tests/data/dummy_param/dummy.py +++ /dev/null @@ -1,7 +0,0 @@ -from torch import nn - - -class Dummy(nn.Module): - def forward(self, input): - x = input - return x + 1 diff --git a/tests/data/dummy_param/dummy_in.npy b/tests/data/dummy_param/dummy_in.npy deleted file mode 100644 index 96a78a7b..00000000 Binary files a/tests/data/dummy_param/dummy_in.npy and /dev/null differ diff --git a/tests/data/dummy_param/dummy_out.npy b/tests/data/dummy_param/dummy_out.npy deleted file mode 100644 index 56f76ca7..00000000 Binary files a/tests/data/dummy_param/dummy_out.npy and /dev/null differ diff --git a/tests/data/dummy_param/environment.yaml b/tests/data/dummy_param/environment.yaml deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/data/dummy_param/weights b/tests/data/dummy_param/weights deleted file mode 100644 index da14f342..00000000 Binary files a/tests/data/dummy_param/weights and /dev/null differ diff --git a/tests/data/dummy_tensorflow/cover.png b/tests/data/dummy_tensorflow/cover.png deleted file mode 100644 index b22b5124..00000000 Binary files a/tests/data/dummy_tensorflow/cover.png and /dev/null differ diff --git a/tests/data/dummy_tensorflow/dummy-model-weights.zip b/tests/data/dummy_tensorflow/dummy-model-weights.zip deleted file mode 100644 index 96df58e3..00000000 Binary files a/tests/data/dummy_tensorflow/dummy-model-weights.zip and /dev/null differ diff --git a/tests/data/dummy_tensorflow/rdf.yaml b/tests/data/dummy_tensorflow/rdf.yaml deleted file mode 100644 index e5a8597f..00000000 --- a/tests/data/dummy_tensorflow/rdf.yaml +++ /dev/null @@ -1,34 +0,0 @@ -authors: -- {name: ilastik-team} -cite: -- {text: text, url: pass} -covers: [cover.png] -description: simple model that increments input by 1 -documentation: tensorflow-dummy.md -format_version: 0.4.3 -inputs: -- axes: bcyx - data_range: [0.0, 255.0] - data_type: uint8 - name: input0 - shape: [1, 1, 32, 32] -license: CC-BY-4.0 -name: dummy-keras -outputs: -- axes: bcyx - data_range: [-.inf, .inf] - data_type: float32 - halo: [0, 0, 0, 0] - name: output0 - shape: - offset: [0.0, 0.0, 0.0, 0.0] - reference_tensor: input0 - scale: [1.0, 1.0, 1.0, 1.0] -tags: [testing] -test_inputs: [test_input.npy] -test_outputs: [test_output.npy] -timestamp: '2022-02-02T11:19:07.303147' -type: model -weights: - tensorflow_saved_model_bundle: {sha256: a3c86e3b2ec3766d29b635ae4422e0c12912be5af571a88ad3e44ba6c93f737c, - source: dummy-model-weights.zip, tensorflow_version: '1.14'} diff --git a/tests/data/dummy_tensorflow/tensorflow-dummy.md b/tests/data/dummy_tensorflow/tensorflow-dummy.md deleted file mode 100644 index 8b76d7aa..00000000 --- a/tests/data/dummy_tensorflow/tensorflow-dummy.md +++ /dev/null @@ -1,74 +0,0 @@ -# Simple tensorflow test model - -## Code to generate the model - -```python -# Simple tensorflow test model -# inverts input. - -## Code to generate the model - -```python -import tensorflow as tf -from tensorflow.keras.models import Sequential -from tensorflow.keras.layers import Input, Lambda -import numpy -import xarray -import shutil - -shutil.rmtree("./dummy-model") - -model = Sequential() -model.add(Input(shape=(1, 32, 32), name="input0")) -model.add(Lambda(lambda x: x * -1, name="output0")) -tf.keras.experimental.export_saved_model(model, "./dummy-model") - -input_data = numpy.random.randint(0, 255, (1, 1, 32, 32), dtype="uint8") -input_ = xarray.DataArray(input_data, dims=("b", "c", "y", "x")) - -output_ = model.predict(input_) - -numpy.save("test_input.npy", input_) -numpy.save("test_output.npy", output_) -``` - - -```Python -# generate a model zoo model out of it -from bioimageio.core.build_spec import build_model -import shutil - -weight_file = "./dummy-model-weights.zip" -name = "dummy-keras" -input_axes = ["bcyx"] -output_axes = ["bcyx"] -zip_path = "./dummy-model-tensorflow.zip" - - -shutil.make_archive("dummy-model-weights", "zip", root_dir="dummy-model") - - -new_mod_raw = build_model( - weight_uri=weight_file, - test_inputs=["./test_input.npy"], - test_outputs=["./test_output.npy"], - input_axes=input_axes, - output_axes=output_axes, - output_path=zip_path, - name=name, - input_names=["input0"], - description="simple model that increments input by 1", - authors=[{"name": "ilastik-team"}], - documentation="./tensorflow-dummy.md", - tags=["testing"], - cite={"text": "pass"}, - tensorflow_version="1.14", - weight_type="tensorflow_saved_model_bundle", - halo=[[0, 0, 0, 0]], - output_offset=[[0, 0, 0, 0]], - output_reference=["input0"], - output_scale=[[1.0, 1.0, 1.0, 1.0]], - output_names=["output0"], -) - -``` diff --git a/tests/data/dummy_tensorflow/test_input.npy b/tests/data/dummy_tensorflow/test_input.npy deleted file mode 100644 index 048f67e5..00000000 Binary files a/tests/data/dummy_tensorflow/test_input.npy and /dev/null differ diff --git a/tests/data/dummy_tensorflow/test_output.npy b/tests/data/dummy_tensorflow/test_output.npy deleted file mode 100644 index 093872dc..00000000 Binary files a/tests/data/dummy_tensorflow/test_output.npy and /dev/null differ diff --git a/tests/data/tiny_models.py b/tests/data/tiny_models.py deleted file mode 100644 index 72ac02ec..00000000 --- a/tests/data/tiny_models.py +++ /dev/null @@ -1,102 +0,0 @@ -import logging - -import torch - -logger = logging.getLogger(__name__) - - -class TestModel0(torch.nn.Module): - def __init__(self, N=7, D_in=3, H=4, D_out=2): - super().__init__() - self.linear1 = torch.nn.Linear(D_in, H) - self.linear2 = torch.nn.Linear(H, D_out) - - torch.manual_seed(0) - self.x = torch.randn(N, D_in) - self.y = self.forward(self.x).detach() - - def forward(self, x): - h_relu = self.linear1(x).clamp(min=0) - y_pred = self.linear2(h_relu) - return y_pred - - -class TinyConvNet2d(torch.nn.Module): - def __init__(self, in_channels=1, out_channels=1): - super().__init__() - self.conv1 = torch.nn.Conv2d(in_channels, 16, 1) - self.nlin1 = torch.nn.ReLU() - self.conv2 = torch.nn.Conv2d(16, 64, 1) - self.nlin2 = torch.nn.ReLU() - self.conv3 = torch.nn.Conv2d(64, out_channels, 1) - self.nlin3 = torch.nn.Sigmoid() - - def forward(self, x): - return torch.nn.Sequential(self.conv1, self.nlin1, self.conv2, self.nlin2, self.conv3, self.nlin3)(x) - - -class TinyConvNet3d(torch.nn.Module): - def __init__(self, in_channels=1, out_channels=1): - super().__init__() - self.conv1 = torch.nn.Conv3d(in_channels, 16, 1) - self.nlin1 = torch.nn.ReLU() - self.conv2 = torch.nn.Conv3d(16, 64, 1) - self.nlin2 = torch.nn.ReLU() - self.conv3 = torch.nn.Conv3d(64, out_channels, 1) - self.nlin3 = torch.nn.Sigmoid() - - def forward(self, x): - return torch.nn.Sequential(self.conv1, self.nlin1, self.conv2, self.nlin2, self.conv3, self.nlin3)(x) - - -def train(model, x, y): - loss_fn = torch.nn.MSELoss(reduction="sum") - optimizer = torch.optim.Adam(model.parameters(), lr=1e-3) - - for t in range(10): - y_pred = model(x) - - loss = loss_fn(y_pred, y) - logger.debug(t, loss.item()) - - optimizer.zero_grad() - loss.backward() - optimizer.step() - - -if __name__ == "__main__": - test1 = TestModel0() - print("train", test1.training) - # test.to(dtype=torch.float) - y = test1(test1.x) - assert y.allclose(test1.y) - - test2 = test1.__class__() - test2.eval() - test2.load_state_dict(test1.state_dict()) - print("here train2", test2.training) - print(id(test1.state_dict())) - print(id(test2.state_dict())) - - sd = test2.state_dict() - for key, value in sd.items(): - sd[key] = torch.zeros_like(value) - - print("train2", test2.training) - - print(id(test1) == id(test2)) - print(id(test1.state_dict()["linear1.weight"]) == id(test2.state_dict()["linear1.weight"])) - print(id(test1.state_dict()), test1.state_dict()["linear1.weight"]) - print(id(test2.state_dict()), test2.state_dict()["linear1.weight"]) - test2.load_state_dict(sd) - print(id(test1) == id(test2)) - magic1 = test1.state_dict() - magic2 = test2.state_dict() - print(id(test1.state_dict()) == id(test2.state_dict())) - print(id(magic1) == id(magic2)) - # print(id(magic1['linear1.weight']) == id(magic2['linear1.weight'])) - # print(id(test1.state_dict()['linear1.weight']) == id(test2.state_dict()['linear1.weight'])) - print(id(test1.state_dict()), test1.state_dict()["linear1.weight"]) - print(id(test2.state_dict()), test2.state_dict()["linear1.weight"]) - - print("train2", test2.training) diff --git a/tests/data/unet2d/UNet2DNucleiBroad.md b/tests/data/unet2d/UNet2DNucleiBroad.md deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/data/unet2d/UNet2DNucleiBroad.model.yaml b/tests/data/unet2d/UNet2DNucleiBroad.model.yaml deleted file mode 100644 index 2da3d352..00000000 --- a/tests/data/unet2d/UNet2DNucleiBroad.model.yaml +++ /dev/null @@ -1,76 +0,0 @@ -# TODO physical scale of the data -format_version: 0.3.3 -type: model - -name: UNet 2D Nuclei Broad -description: A 2d U-Net trained on the nuclei broad dataset. -authors: - - name: "Constantin Pape;@bioimage-io" - affiliation: "EMBL Heidelberg" - orcid: "0000-0001-6562-7187" - - name: "Fynn Beuttenmueller" - affiliation: "EMBL Heidelberg" - orcid: "0000-0002-8567-6389" - -# we allow for multiple citations. Each citation contains TEXT, DOI and URL. One of DOI or URL needs to be given. -cite: - - text: "Ronneberger, Olaf et al. U-net: Convolutional networks for biomedical image segmentation. MICCAI 2015." - doi: https://doi.org/10.1007/978-3-319-24574-4_28 - - text: "2018 Data Science Bowl" - url: https://www.kaggle.com/c/data-science-bowl-2018 - -git_repo: https://github.com/bioimage-io/spec-bioimage-io/tree/main/example_specs/models/unet2d_nuclei_broad -tags: [unet2d, pytorch, nucleus, segmentation, dsb2018] -license: MIT - -documentation: UNet2DNucleiBroad.md -covers: [] -attachments: {} -timestamp: 2019-12-11T12:22:32Z # ISO 8601 - -inputs: - - name: raw - description: raw input - axes: bcyx # letters of axes in btczyx - data_type: float32 - data_range: [-.inf, .inf] - shape: [1, 1, 512, 512] - preprocessing: # list of preprocessing steps - - name: zero_mean_unit_variance # name of preprocessing step - kwargs: - mode: per_sample # mode in [fixed, per_dataset, per_sample] - axes: yx # subset of axes to normalize jointly, batch ('b') is not a valid axis key here! - -outputs: - - name: probability - description: probability in [0,1] - axes: bcyx - data_type: float32 - data_range: [-.inf, .inf] - halo: [0, 0, 32, 32] - shape: - reference_tensor: raw - scale: [1.0, 1.0, 1.0, 1.0] - offset: [0, 0, 0, 0] - -language: python -framework: pytorch -source: unet2d.py:UNet2d -sha256: cf42a6d86adeb4eb6e8e37b539a20e5413866b183bed88f4e2e26ad1639761ed -kwargs: {input_channels: 1, output_channels: 1} -dependencies: conda:./environment.yaml - -test_inputs: [test_input.npy] -test_outputs: [test_output.npy] - -sample_inputs: [test_input.npy] -sample_outputs: [test_output.npy] - -weights: - pytorch_state_dict: - authors: - - name: "Constantin Pape;@bioimage-io" - affiliation: "EMBL Heidelberg" - orcid: "0000-0001-6562-7187" - sha256: e4d3885bccbe41cbf6c1d825f3cd2b707c7021ead5593156007e407a16b27cf2 - source: https://zenodo.org/record/3446812/files/unet2d_weights.torch \ No newline at end of file diff --git a/tests/data/unet2d/environment.yaml b/tests/data/unet2d/environment.yaml deleted file mode 100644 index 4297c6ca..00000000 --- a/tests/data/unet2d/environment.yaml +++ /dev/null @@ -1,8 +0,0 @@ -name: - unet2d_nuclei_broad -channels: - - conda-forge - - defaults -dependencies: - - pytorch - - numpy diff --git a/tests/data/unet2d/test_input.npy b/tests/data/unet2d/test_input.npy deleted file mode 100644 index 228057f8..00000000 Binary files a/tests/data/unet2d/test_input.npy and /dev/null differ diff --git a/tests/data/unet2d/test_output.npy b/tests/data/unet2d/test_output.npy deleted file mode 100644 index 4aea8257..00000000 Binary files a/tests/data/unet2d/test_output.npy and /dev/null differ diff --git a/tests/data/unet2d/unet2d.py b/tests/data/unet2d/unet2d.py deleted file mode 100644 index fedc9be1..00000000 --- a/tests/data/unet2d/unet2d.py +++ /dev/null @@ -1,73 +0,0 @@ -import torch -import torch.nn as nn - - -class Upsample(nn.Module): - def __init__(self, scale_factor, mode="bilinear"): - super().__init__() - self.scale_factor = scale_factor - self.mode = mode - - def forward(self, input): - return nn.functional.interpolate(input, scale_factor=self.scale_factor, mode=self.mode, align_corners=False) - - -class UNet2d(nn.Module): - def __init__(self, input_channels, output_channels, training=False): - super().__init__() - self.input_channels = input_channels - self.output_channels = output_channels - self.n_levels = 3 - - self.encoders = nn.ModuleList( - [self.conv_layer(self.input_channels, 16), self.conv_layer(16, 32), self.conv_layer(32, 64)] - ) - self.downsamplers = nn.ModuleList([self.downsampler()] * self.n_levels) - - self.base = self.conv_layer(64, 128) - - self.decoders = nn.ModuleList([self.conv_layer(128, 64), self.conv_layer(64, 32), self.conv_layer(32, 16)]) - self.upsamplers = nn.ModuleList([self.upsampler(128, 64), self.upsampler(64, 32), self.upsampler(32, 16)]) - - self.output = nn.Conv2d(16, self.output_channels, 1) - self.training = training - - def conv_layer(self, in_channels, out_channels): - kernel_size = 3 - padding = 1 - return nn.Sequential( - nn.Conv2d(in_channels, out_channels, kernel_size, padding=padding), - nn.Conv2d(out_channels, out_channels, kernel_size, padding=padding), - nn.ReLU(inplace=True), - ) - - def downsampler(self): - return nn.MaxPool2d(2) - - def upsampler(self, in_channels, out_channels): - return nn.Sequential(Upsample(2), nn.Conv2d(in_channels, out_channels, 1)) - - def forward(self, input): - x = input - - from_encoder = [] - for encoder, sampler in zip(self.encoders, self.downsamplers): - x = encoder(x) - from_encoder.append(x) - x = sampler(x) - - x = self.base(x) - - for decoder, sampler, enc in zip(self.decoders, self.upsamplers, from_encoder[::-1]): - x = sampler(x) - x = torch.cat([enc, x], dim=1) - x = decoder(x) - - x = self.output(x) - - # apply a sigmoid directly if we are in inference mode - if not self.training: - # postprocessing - x = torch.sigmoid(x) - - return x diff --git a/tests/data/unet2d_onnx/UNet2DNucleiBroad.md b/tests/data/unet2d_onnx/UNet2DNucleiBroad.md deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/data/unet2d_onnx/UNet2DNucleiBroad.model.yaml b/tests/data/unet2d_onnx/UNet2DNucleiBroad.model.yaml deleted file mode 100644 index 7225c75d..00000000 --- a/tests/data/unet2d_onnx/UNet2DNucleiBroad.model.yaml +++ /dev/null @@ -1,73 +0,0 @@ -# TODO physical scale of the data -format_version: 0.3.0 - -name: UNet 2D Nuclei Broad -description: A 2d U-Net trained on the nuclei broad dataset. -authors: - - Constantin Pape;@bioimage-io - - Fynn Beuttenmüller - -# we allow for multiple citations. Each citation contains TEXT, DOI and URL. One of DOI or URL needs to be given. -cite: - - text: "Ronneberger, Olaf et al. U-net: Convolutional networks for biomedical image segmentation. MICCAI 2015." - doi: https://doi.org/10.1007/978-3-319-24574-4_28 - - text: "2018 Data Science Bowl" - url: https://www.kaggle.com/c/data-science-bowl-2018 - -git_repo: https://github.com/bioimage-io/pytorch-bioimage-io/tree/master/specs/models/unet2d -tags: [unet2d, pytorch, nucleus, segmentation, dsb2018] -license: MIT - -documentation: UNet2DNucleiBroad.md -covers: [] # todo unet 2d arch image -attachments: {} -timestamp: 2019-12-11T12:22:32Z # ISO 8601 - -inputs: - - name: raw - description: raw input - axes: bcyx # letters of axes in btczyx - data_type: float32 - data_range: [-inf, inf] - shape: [1, 1, 512, 512] - preprocessing: # list of preprocessing steps - - name: zero_mean_unit_variance # name of preprocessing step - kwargs: - mode: per_sample # mode in [fixed, per_dataset, per_sample] - axes: yx # subset of axes to normalize jointly, batch ('b') is not a valid axis key here! -# # example for zero_mean_unit_variance with mode 'fixed' -# - name: zero_mean_unit_variance -# kwargs: # example kwargs for zero_mean_unit_variance -# mode: fixed -# axes: xy # subset of axes to reduce -# mean: [262.3] # mean if mode == fixed. An array as nested lists. Its shape equals the input shape without the reduced axes. -# std: [220.2] # standard deviation if mode == fixed analogously to mean - -outputs: - - name: probability - description: probability in [0,1] - axes: bcyx - data_type: float32 - data_range: [-inf, inf] - halo: [0, 0, 32, 32] - shape: - reference_input: raw - scale: [1, 1, 1, 1] - offset: [0, 0, 0, 0] - -language: python -framework: pytorch -source: bioimageio.torch.models.unet2d.UNet2d -kwargs: {input_channels: 1, output_channels: 1} -dependencies: conda:../environment.yaml - -test_inputs: [test_input.npy] -test_outputs: [test_output.npy] - -sample_inputs: [sample_input.npy] -sample_outputs: [sample_output.npy] - -weights: - onnx: - sha256: 6d180e39527789d8a17b39a6eb78405bdeb58c3e6c2cf8d5a2c6dcc3ac5dcc7f - source: ./weights.onnx diff --git a/tests/data/unet2d_onnx/cover0.png b/tests/data/unet2d_onnx/cover0.png deleted file mode 100644 index cac369cb..00000000 Binary files a/tests/data/unet2d_onnx/cover0.png and /dev/null differ diff --git a/tests/data/unet2d_onnx/sample_input.tif b/tests/data/unet2d_onnx/sample_input.tif deleted file mode 100644 index 640f1a75..00000000 Binary files a/tests/data/unet2d_onnx/sample_input.tif and /dev/null differ diff --git a/tests/data/unet2d_onnx/sample_output.tif b/tests/data/unet2d_onnx/sample_output.tif deleted file mode 100644 index be0dfc2e..00000000 Binary files a/tests/data/unet2d_onnx/sample_output.tif and /dev/null differ diff --git a/tests/data/unet2d_onnx/test_input.npy b/tests/data/unet2d_onnx/test_input.npy deleted file mode 100644 index 228057f8..00000000 Binary files a/tests/data/unet2d_onnx/test_input.npy and /dev/null differ diff --git a/tests/data/unet2d_onnx/test_output.npy b/tests/data/unet2d_onnx/test_output.npy deleted file mode 100644 index 4aea8257..00000000 Binary files a/tests/data/unet2d_onnx/test_output.npy and /dev/null differ diff --git a/tests/data/unet2d_onnx/weights.onnx b/tests/data/unet2d_onnx/weights.onnx deleted file mode 100644 index 7ac57a89..00000000 Binary files a/tests/data/unet2d_onnx/weights.onnx and /dev/null differ diff --git a/tests/data/unet2d_torchscript/UNet2DNucleiBroad.md b/tests/data/unet2d_torchscript/UNet2DNucleiBroad.md deleted file mode 100644 index e69de29b..00000000 diff --git a/tests/data/unet2d_torchscript/UNet2DNucleiBroad.model.yaml b/tests/data/unet2d_torchscript/UNet2DNucleiBroad.model.yaml deleted file mode 100644 index eeca5837..00000000 --- a/tests/data/unet2d_torchscript/UNet2DNucleiBroad.model.yaml +++ /dev/null @@ -1,66 +0,0 @@ -# TODO physical scale of the data -format_version: 0.3.0 - -name: UNet 2D Nuclei Broad -description: A 2d U-Net trained on the nuclei broad dataset. -authors: - - Constantin Pape;@bioimage-io - - Fynn Beuttenmüller - -# we allow for multiple citations. Each citation contains TEXT, DOI and URL. One of DOI or URL needs to be given. -cite: - - text: "Ronneberger, Olaf et al. U-net: Convolutional networks for biomedical image segmentation. MICCAI 2015." - doi: https://doi.org/10.1007/978-3-319-24574-4_28 - - text: "2018 Data Science Bowl" - url: https://www.kaggle.com/c/data-science-bowl-2018 - -git_repo: https://github.com/bioimage-io/pytorch-bioimage-io/tree/master/specs/models/unet2d -tags: [unet2d, pytorch, nucleus, segmentation, dsb2018] -license: MIT - -documentation: UNet2DNucleiBroad.md -covers: [cover0.png] -attachments: {} -timestamp: 2019-12-11T12:22:32Z # ISO 8601 - -inputs: - - name: raw - description: raw input - axes: bcyx # letters of axes in btczyx - data_type: float32 - data_range: [-inf, inf] - shape: [1, 1, 512, 512] - preprocessing: # list of preprocessing steps - - name: zero_mean_unit_variance # name of preprocessing step - kwargs: - mode: per_sample # mode in [fixed, per_dataset, per_sample] - axes: yx # subset of axes to normalize jointly, batch ('b') is not a valid axis key here! - -outputs: - - name: probability - description: probability in [0,1] - axes: bcyx - data_type: float32 - data_range: [-inf, inf] - halo: [0, 0, 32, 32] - shape: - reference_input: raw - scale: [1, 1, 1, 1] - offset: [0, 0, 0, 0] - -language: python -framework: pytorch -source: bioimageio.torch.models.unet2d.UNet2d -kwargs: {input_channels: 1, output_channels: 1} -dependencies: conda:../environment.yaml - -test_inputs: [test_input.npy] -test_outputs: [test_output.npy] - -sample_inputs: [sample_input.npy] -sample_outputs: [sample_output.npy] - -weights: - pytorch_script: - sha256: b7f9dcf1da55a6d4cb29a0186d5558a86e4969916368479754517d00fa365848 - source: ./weights.pt diff --git a/tests/data/unet2d_torchscript/cover0.png b/tests/data/unet2d_torchscript/cover0.png deleted file mode 100644 index cac369cb..00000000 Binary files a/tests/data/unet2d_torchscript/cover0.png and /dev/null differ diff --git a/tests/data/unet2d_torchscript/test_input.npy b/tests/data/unet2d_torchscript/test_input.npy deleted file mode 100644 index 228057f8..00000000 Binary files a/tests/data/unet2d_torchscript/test_input.npy and /dev/null differ diff --git a/tests/data/unet2d_torchscript/test_output.npy b/tests/data/unet2d_torchscript/test_output.npy deleted file mode 100644 index de18244e..00000000 Binary files a/tests/data/unet2d_torchscript/test_output.npy and /dev/null differ diff --git a/tests/data/unet2d_torchscript/weights.pt b/tests/data/unet2d_torchscript/weights.pt deleted file mode 100644 index abe68864..00000000 Binary files a/tests/data/unet2d_torchscript/weights.pt and /dev/null differ diff --git a/tests/test_converters.py b/tests/test_converters.py index be268e42..e112ede9 100644 --- a/tests/test_converters.py +++ b/tests/test_converters.py @@ -3,18 +3,7 @@ import xarray as xr from numpy.testing import assert_array_equal -from tiktorch.converters import ( - NamedExplicitOutputShape, - NamedImplicitOutputShape, - NamedParametrizedShape, - Sample, - input_shape_to_pb_input_shape, - numpy_to_pb_tensor, - output_shape_to_pb_output_shape, - pb_tensor_to_numpy, - pb_tensor_to_xarray, - xarray_to_pb_tensor, -) +from tiktorch.converters import Sample, numpy_to_pb_tensor, pb_tensor_to_numpy, pb_tensor_to_xarray, xarray_to_pb_tensor from tiktorch.proto import inference_pb2 @@ -177,99 +166,6 @@ def test_should_same_data(self, shape): assert_array_equal(arr, result_arr) -class TestShapeConversions: - def to_named_explicit_shape(self, shape, axes, halo): - return NamedExplicitOutputShape( - halo=[(name, dim) for name, dim in zip(axes, halo)], shape=[(name, dim) for name, dim in zip(axes, shape)] - ) - - def to_named_implicit_shape(self, axes, halo, offset, scales, reference_tensor): - return NamedImplicitOutputShape( - halo=[(name, dim) for name, dim in zip(axes, halo)], - offset=[(name, dim) for name, dim in zip(axes, offset)], - scale=[(name, scale) for name, scale in zip(axes, scales)], - reference_tensor=reference_tensor, - ) - - def to_named_paramtrized_shape(self, min_shape, axes, step): - return NamedParametrizedShape( - min_shape=[(name, dim) for name, dim in zip(axes, min_shape)], - step_shape=[(name, dim) for name, dim in zip(axes, step)], - ) - - @pytest.mark.parametrize( - "shape,axes,halo", - [((42,), "x", (0,)), ((42, 128, 5), "abc", (1, 1, 1)), ((5, 4, 3, 2, 1, 42), "btzyxc", (1, 2, 3, 4, 5, 24))], - ) - def test_explicit_output_shape(self, shape, axes, halo): - named_shape = self.to_named_explicit_shape(shape, axes, halo) - pb_shape = output_shape_to_pb_output_shape(named_shape) - - assert pb_shape.shapeType == 0 - assert pb_shape.referenceTensor == "" - assert len(pb_shape.scale.namedFloats) == 0 - assert len(pb_shape.offset.namedFloats) == 0 - - assert [(d.name, d.size) for d in pb_shape.halo.namedInts] == [(name, size) for name, size in zip(axes, halo)] - assert [(d.name, d.size) for d in pb_shape.shape.namedInts] == [(name, size) for name, size in zip(axes, shape)] - - @pytest.mark.parametrize( - "axes,halo,offset,scales,reference_tensor", - [("x", (0,), (10,), (1.0,), "forty-two"), ("abc", (1, 1, 1), (1, 2, 3), (1.0, 2.0, 3.0), "helloworld")], - ) - def test_implicit_output_shape(self, axes, halo, offset, scales, reference_tensor): - named_shape = self.to_named_implicit_shape(axes, halo, offset, scales, reference_tensor) - pb_shape = output_shape_to_pb_output_shape(named_shape) - - assert pb_shape.shapeType == 1 - assert pb_shape.referenceTensor == reference_tensor - assert [(d.name, d.size) for d in pb_shape.scale.namedFloats] == [ - (name, size) for name, size in zip(axes, scales) - ] - assert [(d.name, d.size) for d in pb_shape.offset.namedFloats] == [ - (name, size) for name, size in zip(axes, offset) - ] - - assert [(d.name, d.size) for d in pb_shape.halo.namedInts] == [(name, size) for name, size in zip(axes, halo)] - assert len(pb_shape.shape.namedInts) == 0 - - def test_output_shape_raises(self): - shape = [("a", 1)] - with pytest.raises(TypeError): - _ = output_shape_to_pb_output_shape(shape) - - @pytest.mark.parametrize( - "shape,axes", - [((42,), "x"), ((42, 128, 5), "abc"), ((5, 4, 3, 2, 1, 42), "btzyxc")], - ) - def test_explicit_input_shape(self, shape, axes): - named_shape = [(name, dim) for name, dim in zip(axes, shape)] - pb_shape = input_shape_to_pb_input_shape(named_shape) - - assert pb_shape.shapeType == 0 - assert [(d.name, d.size) for d in pb_shape.shape.namedInts] == [(name, size) for name, size in zip(axes, shape)] - - @pytest.mark.parametrize( - "min_shape,axes,step", - [ - ((42,), "x", (5,)), - ((42, 128, 5), "abc", (1, 2, 3)), - ((5, 4, 3, 2, 1, 42), "btzyxc", (15, 24, 33, 42, 51, 642)), - ], - ) - def test_parametrized_input_shape(self, min_shape, axes, step): - named_shape = self.to_named_paramtrized_shape(min_shape, axes, step) - pb_shape = input_shape_to_pb_input_shape(named_shape) - - assert pb_shape.shapeType == 1 - assert [(d.name, d.size) for d in pb_shape.shape.namedInts] == [ - (name, size) for name, size in zip(axes, min_shape) - ] - assert [(d.name, d.size) for d in pb_shape.stepShape.namedInts] == [ - (name, size) for name, size in zip(axes, step) - ] - - class TestSample: def test_create_sample_from_pb_tensors(self): arr_1 = np.arange(32 * 32, dtype=np.int64).reshape(32, 32) diff --git a/tiktorch/converters.py b/tiktorch/converters.py index b7fee3fe..5ce890a7 100644 --- a/tiktorch/converters.py +++ b/tiktorch/converters.py @@ -1,39 +1,13 @@ from __future__ import annotations import dataclasses -from typing import Dict, List, Tuple, Union +from typing import Dict, List import numpy as np import xarray as xr from tiktorch.proto import inference_pb2 -# pairs of axis-shape for a single tensor -NamedInt = Tuple[str, int] -NamedFloat = Tuple[str, float] -NamedShape = List[NamedInt] -NamedVec = List[NamedFloat] - - -@dataclasses.dataclass -class NamedParametrizedShape: - min_shape: NamedShape - step_shape: NamedShape - - -@dataclasses.dataclass -class NamedExplicitOutputShape: - shape: NamedShape - halo: NamedShape - - -@dataclasses.dataclass -class NamedImplicitOutputShape: - reference_tensor: str - offset: NamedShape - scale: NamedVec - halo: NamedShape - @dataclasses.dataclass(frozen=True) class Sample: @@ -77,41 +51,6 @@ def name_float_tuples_to_pb_NamedFloats(name_float_tuples) -> inference_pb2.Name ) -def input_shape_to_pb_input_shape(input_shape: Union[NamedShape, NamedParametrizedShape]) -> inference_pb2.InputShape: - if isinstance(input_shape, NamedParametrizedShape): - return inference_pb2.InputShape( - shapeType=1, - shape=name_int_tuples_to_pb_NamedInts(input_shape.min_shape), - stepShape=name_int_tuples_to_pb_NamedInts(input_shape.step_shape), - ) - else: - return inference_pb2.InputShape( - shapeType=0, - shape=name_int_tuples_to_pb_NamedInts(input_shape), - ) - - -def output_shape_to_pb_output_shape( - output_shape: Union[NamedExplicitOutputShape, NamedImplicitOutputShape] -) -> inference_pb2.InputShape: - if isinstance(output_shape, NamedImplicitOutputShape): - return inference_pb2.OutputShape( - shapeType=1, - halo=name_int_tuples_to_pb_NamedInts(output_shape.halo), - referenceTensor=output_shape.reference_tensor, - scale=name_float_tuples_to_pb_NamedFloats(output_shape.scale), - offset=name_float_tuples_to_pb_NamedFloats(output_shape.offset), - ) - elif isinstance(output_shape, NamedExplicitOutputShape): - return inference_pb2.OutputShape( - shapeType=0, - shape=name_int_tuples_to_pb_NamedInts(output_shape.shape), - halo=name_int_tuples_to_pb_NamedInts(output_shape.halo), - ) - else: - raise TypeError(f"Conversion not supported for type {type(output_shape)}") - - def pb_tensor_to_xarray(tensor: inference_pb2.Tensor) -> inference_pb2.Tensor: if not tensor.dtype: raise ValueError("Tensor dtype is not specified")