diff --git a/README.md b/README.md index a1aa031..c0f59fe 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,8 @@ There are a few "sights" you can metaphorically visit in this repository: - Build C++ and/or CUDA extensions by going into the `cpp/` or `cuda/` folder and executing `python setup.py install`, - JIT-compile C++ and/or CUDA extensions by going into the `cpp/` or `cuda/` folder and calling `python jit.py`, which will JIT-compile the extension and load it, - Benchmark Python vs. C++ vs. CUDA by running `python benchmark.py {py, cpp, cuda} [--cuda]`, -- Run gradient-checks on the code by running `python grad_check.py {py, cpp, cuda}`. +- Run gradient checks on the code by running `python grad_check.py {py, cpp, cuda} [--cuda]`. +- Run output checks on the code by running `python check.py {forward, backward} [--cuda]`. ## Authors diff --git a/check.py b/check.py index 2f40e7b..4238e3a 100644 --- a/check.py +++ b/check.py @@ -24,6 +24,11 @@ def check_equal(first, second, verbose): np.testing.assert_allclose(x, y, err_msg="Index: {}".format(i)) +def zero_grad(variables): + for variable in variables: + variable.grad.zero_() + + def check_forward(variables, with_cuda, verbose): baseline_values = python.lltm_baseline.LLTMFunction.apply(*variables) cpp_values = cpp.lltm.LLTMFunction.apply(*variables) @@ -44,6 +49,8 @@ def check_backward(variables, with_cuda, verbose): (baseline_values[0] + baseline_values[1]).sum().backward() grad_baseline = [var.grad for var in variables] + zero_grad(variables) + cpp_values = cpp.lltm.LLTMFunction.apply(*variables) (cpp_values[0] + cpp_values[1]).sum().backward() grad_cpp = [var.grad for var in variables] @@ -53,6 +60,7 @@ def check_backward(variables, with_cuda, verbose): print('Ok') if with_cuda: + zero_grad(variables) cuda_values = cuda.lltm.LLTMFunction.apply(*variables) (cuda_values[0] + cuda_values[1]).sum().backward() grad_cuda = [var.grad for var in variables]