Skip to content

Commit

Permalink
Convert CRLF to LF in repo. Enforce correct line endings
Browse files Browse the repository at this point in the history
  • Loading branch information
m-novikov committed Mar 28, 2019
1 parent a3336e2 commit feea07c
Show file tree
Hide file tree
Showing 5 changed files with 490 additions and 473 deletions.
16 changes: 16 additions & 0 deletions .editorconfig
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
root = true

[*]
indent_style = space
indent_size = 4
tab_width = 8
end_of_line = lf
charset = utf-8
trim_trailing_whitespace = true
insert_final_newline = true

[*.{yml,yaml}]
indent_size = 2

[Makefile]
indent_style = tab
1 change: 1 addition & 0 deletions .gitattributes
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
* text=auto eol=lf
70 changes: 35 additions & 35 deletions tests/handler/test_inference.py
Original file line number Diff line number Diff line change
@@ -1,37 +1,37 @@
import torch

from torch import multiprocessing as mp

from tiktorch.handler.inference import IInference, InferenceProcess, run
from tiktorch.tiktypes import TikTensor
from tiktorch.rpc.mp import MPClient, Shutdown

from tests.data.tiny_models import TinyConvNet2d


def test_inference(tiny_model_2d):
config = tiny_model_2d["config"]
in_channels = config["input_channels"]
model = TinyConvNet2d(in_channels=in_channels)
inference = InferenceProcess(config=config, model=model)
data = TikTensor(torch.zeros(in_channels, 15, 15), (0,))
pred = inference.forward(data)
assert isinstance(pred.result(), TikTensor)
try:
inference.shutdown()
except Shutdown:
pass


def test_inference_in_proc(tiny_model_2d, log_queue):
config = tiny_model_2d["config"]
in_channels = config["input_channels"]
model = TinyConvNet2d(in_channels=in_channels)
handler_conn, inference_conn = mp.Pipe()
p = mp.Process(target=run, kwargs={"conn": inference_conn, "model": model, "config": config, "log_queue": log_queue})
p.start()
client = MPClient(IInference(), handler_conn)
data = TikTensor(torch.zeros(in_channels, 15, 15), (0,))
f = client.forward(data)
import torch

from torch import multiprocessing as mp

from tiktorch.handler.inference import IInference, InferenceProcess, run
from tiktorch.tiktypes import TikTensor
from tiktorch.rpc.mp import MPClient, Shutdown

from tests.data.tiny_models import TinyConvNet2d


def test_inference(tiny_model_2d):
config = tiny_model_2d["config"]
in_channels = config["input_channels"]
model = TinyConvNet2d(in_channels=in_channels)
inference = InferenceProcess(config=config, model=model)
data = TikTensor(torch.zeros(in_channels, 15, 15), (0,))
pred = inference.forward(data)
assert isinstance(pred.result(), TikTensor)
try:
inference.shutdown()
except Shutdown:
pass


def test_inference_in_proc(tiny_model_2d, log_queue):
config = tiny_model_2d["config"]
in_channels = config["input_channels"]
model = TinyConvNet2d(in_channels=in_channels)
handler_conn, inference_conn = mp.Pipe()
p = mp.Process(target=run, kwargs={"conn": inference_conn, "model": model, "config": config, "log_queue": log_queue})
p.start()
client = MPClient(IInference(), handler_conn)
data = TikTensor(torch.zeros(in_channels, 15, 15), (0,))
f = client.forward(data)
f.result()
client.shutdown()
Loading

0 comments on commit feea07c

Please sign in to comment.