diff --git a/.readthedocs.yml b/.readthedocs.yml
new file mode 100644
index 00000000..d2e3e018
--- /dev/null
+++ b/.readthedocs.yml
@@ -0,0 +1,24 @@
+# .readthedocs.yml
+# Read the Docs configuration file
+# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
+
+# Required
+version: 2
+
+# Build documentation in the docs/ directory with Sphinx
+sphinx:
+ configuration: docs/source/conf.py
+
+# Build documentation with MkDocs
+#mkdocs:
+# configuration: mkdocs.yml
+
+# Optionally build your docs in additional formats such as PDF and ePub
+formats: all
+
+# Optionally set the version of Python and requirements required to build your docs
+python:
+ version: 3.7
+ install:
+ - requirements: docs/requirements.txt
+ #- requirements: requirements.txt
\ No newline at end of file
diff --git a/README.md b/README.md
index 04f6fb57..1518fd99 100644
--- a/README.md
+++ b/README.md
@@ -1,4 +1,4 @@
-![logo](docs/_static/rising_logo.svg "rising")
+![logo](docs/source/images/logo/rising_logo.svg "rising")
![Project Status](https://img.shields.io/badge/status-alpha-red)
![PyPI](https://img.shields.io/pypi/v/rising)
diff --git a/docs/Makefile b/docs/Makefile
new file mode 100644
index 00000000..69fe55ec
--- /dev/null
+++ b/docs/Makefile
@@ -0,0 +1,19 @@
+# Minimal makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line.
+SPHINXOPTS =
+SPHINXBUILD = sphinx-build
+SOURCEDIR = source
+BUILDDIR = build
+
+# Put it first so that "make" without argument is like "make help".
+help:
+ @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
+
+.PHONY: help Makefile
+
+# Catch-all target: route all unknown targets to Sphinx using the new
+# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
+%: Makefile
+ @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
\ No newline at end of file
diff --git a/docs/_static/rising_Logo.png b/docs/_static/rising_Logo.png
deleted file mode 100644
index 1f0b9702..00000000
Binary files a/docs/_static/rising_Logo.png and /dev/null differ
diff --git a/docs/_static/rising_logo.svg b/docs/_static/rising_logo.svg
deleted file mode 100644
index 4d6fab25..00000000
--- a/docs/_static/rising_logo.svg
+++ /dev/null
@@ -1,36 +0,0 @@
-
-
diff --git a/docs/build_docs.sh b/docs/build_docs.sh
new file mode 100644
index 00000000..9474626f
--- /dev/null
+++ b/docs/build_docs.sh
@@ -0,0 +1,2 @@
+#!/usr/bin/env bash
+make clean ; make html --debug --jobs 2 SPHINXOPTS="-W"; make html --debug --jobs 2 SPHINXOPTS="-W"
\ No newline at end of file
diff --git a/docs/make.bat b/docs/make.bat
new file mode 100644
index 00000000..8710393c
--- /dev/null
+++ b/docs/make.bat
@@ -0,0 +1,35 @@
+@ECHO OFF
+
+pushd %~dp0
+
+REM Command file for Sphinx documentation
+
+if "%SPHINXBUILD%" == "" (
+ set SPHINXBUILD=sphinx-build
+)
+set SOURCEDIR=source
+set BUILDDIR=build
+
+if "%1" == "" goto help
+
+%SPHINXBUILD% >NUL 2>NUL
+if errorlevel 9009 (
+ echo.
+ echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
+ echo.installed, then set the SPHINXBUILD environment variable to point
+ echo.to the full path of the 'sphinx-build' executable. Alternatively you
+ echo.may add the Sphinx directory to PATH.
+ echo.
+ echo.If you don't have Sphinx installed, grab it from
+ echo.http://sphinx-doc.org/
+ exit /b 1
+)
+
+%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
+goto end
+
+:help
+%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
+
+:end
+popd
\ No newline at end of file
diff --git a/docs/requirements.txt b/docs/requirements.txt
new file mode 100644
index 00000000..6059b679
--- /dev/null
+++ b/docs/requirements.txt
@@ -0,0 +1,12 @@
+sphinx>=2.0, <3.0
+recommonmark # fails with badges
+nbsphinx
+pandoc
+docutils
+sphinxcontrib-fulltoc
+sphinxcontrib-mockautodoc
+git+https://github.com/PhoenixDL/rising_sphinx_theme.git
+sphinx-autodoc-typehints
+sphinx-paramlinks<0.4.0
+sphinxcontrib.katex
+javasphinx
\ No newline at end of file
diff --git a/docs/source/_templates/layout.html b/docs/source/_templates/layout.html
new file mode 100644
index 00000000..0686497a
--- /dev/null
+++ b/docs/source/_templates/layout.html
@@ -0,0 +1,39 @@
+{% extends "!layout.html" %}
+
+
+{% block menu %}
+
+{{ super() }}
+{% endblock %}
+
+{% block footer %}
+{{ super() }}
+
+
+
+
+
+
+
+{% endblock %}
\ No newline at end of file
diff --git a/docs/source/_templates/theme_variables.jinja b/docs/source/_templates/theme_variables.jinja
new file mode 100644
index 00000000..888097fa
--- /dev/null
+++ b/docs/source/_templates/theme_variables.jinja
@@ -0,0 +1,13 @@
+{%- set external_urls = {
+ 'github': 'https://github.com/PhoenixDL/rising',
+ 'github_issues': 'https://github.com/PhoenixDl/rising/issues',
+ 'contributing': 'https://github.com/PhoenixDl/rising/blob/master/CONTRIBUTING.md',
+ 'docs': 'https://rising.rtfd.io/en/latest',
+ 'discuss': 'https://phoenixdl.slack.com',
+ 'previous_pytorch_versions': 'https://rising.rtfd.io/en/latest/',
+ 'home': 'https://rising.rtfd.io/en/latest/',
+ 'get_started': 'https://rising.readthedocs.io/en/latest/get_started.html',
+ 'features': 'https://rising.rtfd.io/en/latest/',
+ 'support': 'https://github.com/PhoenixDl/rising/issues',
+}
+-%}
\ No newline at end of file
diff --git a/docs/source/_templates_stable/layout.html b/docs/source/_templates_stable/layout.html
new file mode 100644
index 00000000..d971396d
--- /dev/null
+++ b/docs/source/_templates_stable/layout.html
@@ -0,0 +1,35 @@
+
+{% extends "!layout.html" %}
+
+
+{% block menu %}
+
+{{ super() }}
+{% endblock %}
+
+{% block footer %}
+{{ super() }}
+
+
+
+
+
+
+
+{% endblock %}
\ No newline at end of file
diff --git a/docs/source/conf.py b/docs/source/conf.py
new file mode 100644
index 00000000..8f3f4ac1
--- /dev/null
+++ b/docs/source/conf.py
@@ -0,0 +1,453 @@
+# -*- coding: utf-8 -*-
+#
+# Configuration file for the Sphinx documentation builder.
+#
+# This file does only contain a selection of the most common options. For a
+# full list see the documentation:
+# http://www.sphinx-doc.org/en/master/config
+
+# -- Path setup --------------------------------------------------------------
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+
+import inspect
+import os
+import shutil
+import sys
+
+# import m2r
+import rising_sphinx_theme
+
+PATH_HERE = os.path.abspath(os.path.dirname(__file__))
+PATH_ROOT = os.path.dirname(os.path.dirname(PATH_HERE))
+sys.path.insert(0, os.path.abspath(PATH_ROOT))
+
+import rising # noqa: E402
+
+# -- Project documents -------------------------------------------------------
+
+# # export the documentation
+# with open('intro.rst', 'w') as fp:
+# intro = pytorch_lightning.__doc__.replace(os.linesep + ' ', '')
+# fp.write(m2r.convert(intro))
+# # fp.write(pytorch_lightning.__doc__)
+
+# # export the READme
+# with open(os.path.join(PATH_ROOT, 'README.md'), 'r') as fp:
+# readme = fp.read()
+# # replace all paths to relative
+# for ndir in (os.path.basename(p) for p in glob.glob(os.path.join(PATH_ROOT, '*'))
+# if os.path.isdir(p)):
+# readme = readme.replace('](%s/' % ndir, '](%s/%s/' % (PATH_ROOT, ndir))
+# with open('readme.md', 'w') as fp:
+# fp.write(readme)
+
+for md in ['CONTRIBUTING.md']:
+ shutil.copy(os.path.join(PATH_ROOT, md), os.path.join(PATH_HERE, md.lower()))
+
+# -- Project information -----------------------------------------------------
+
+project = 'rising'
+copyright = rising.__copyright__
+author = rising.__author__
+
+# The short X.Y version
+version = rising.__version__
+# The full version, including alpha/beta/rc tags
+release = rising.__version__
+
+
+IS_REALESE = not ('+' in version or 'dirty' in version or len(version.split('.')) > 3)
+# -- General configuration ---------------------------------------------------
+
+# If your documentation needs a minimal Sphinx version, state it here.
+
+needs_sphinx = '2.0'
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+
+extensions = [
+ 'sphinx.ext.autodoc',
+ 'sphinx.ext.autosummary',
+ 'sphinx.ext.doctest',
+ 'sphinx.ext.intersphinx',
+ 'sphinx.ext.todo',
+ 'sphinx.ext.coverage',
+ 'sphinx.ext.linkcode',
+ 'sphinx.ext.autosummary',
+ 'sphinx.ext.napoleon',
+ 'sphinx.ext.viewcode',
+ 'sphinxcontrib.katex',
+ 'recommonmark',
+ 'sphinx.ext.autosectionlabel',
+ 'nbsphinx',
+ 'sphinx_autodoc_typehints',
+ 'sphinx_paramlinks',
+ 'javasphinx'
+]
+
+katex_prerender = True
+
+napoleon_use_ivar = True
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+if IS_REALESE:
+ templates_path = ['_templates_stable']
+
+# The suffix(es) of source filenames.
+# You can specify multiple suffix as a list of string:
+#
+# source_suffix = ['.rst', '.md']
+# source_suffix = ['.rst', '.md', '.ipynb']
+source_suffix = {
+ '.rst': 'restructuredtext',
+ '.txt': 'markdown',
+ '.md': 'markdown',
+ '.ipynb': 'nbsphinx',
+}
+
+# The master toctree document.
+master_doc = 'index'
+
+# The language for content autogenerated by Sphinx. Refer to documentation
+# for a list of supported languages.
+#
+# This is also used if you do content translation via gettext catalogs.
+# Usually you set "language" from the command line for these cases.
+language = None
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+# This pattern also affects html_static_path and html_extra_path.
+exclude_patterns = [
+]
+
+# The name of the Pygments (syntax highlighting) style to use.
+pygments_style = 'sphinx'
+
+# -- Options for HTML output -------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages. See the documentation for
+# a list of builtin themes.
+# http://www.sphinx-doc.org/en/master/usage/theming.html#builtin-themes
+# html_theme = 'bizstyle'
+# https://sphinx-themes.org
+# html_theme = 'pytorch_sphinx_theme'
+# html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()]
+html_theme = 'rising_sphinx_theme'
+html_theme_path = [rising_sphinx_theme.get_html_theme_path()]
+
+# Theme options are theme-specific and customize the look and feel of a theme
+# further. For a list of options available for each theme, see the
+# documentation.
+
+html_theme_options = {
+ 'pytorch_project': rising.__homepage__,
+ 'canonical_url': rising.__homepage__,
+ 'collapse_navigation': False,
+ 'display_version': True,
+ 'logo_only': True,
+}
+
+html_logo = 'images/logo/rising_logo.svg'
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['images']
+
+# Custom sidebar templates, must be a dictionary that maps document names
+# to template names.
+#
+# The default sidebars (for documents that don't match any pattern) are
+# defined by theme itself. Builtin themes are using these templates by
+# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
+# 'searchbox.html']``.
+#
+# html_sidebars = {}
+
+
+# -- Options for HTMLHelp output ---------------------------------------------
+
+# Output file base name for HTML help builder.
+htmlhelp_basename = project + '-doc'
+
+# -- Options for LaTeX output ------------------------------------------------
+
+latex_elements = {
+ # The paper size ('letterpaper' or 'a4paper').
+ # 'papersize': 'letterpaper',
+
+ # The font size ('10pt', '11pt' or '12pt').
+ # 'pointsize': '10pt',
+
+ # Additional stuff for the LaTeX preamble.
+ # 'preamble': '',
+
+ # Latex figure (float) alignment
+ 'figure_align': 'htbp',
+}
+
+# Grouping the document tree into LaTeX files. List of tuples
+# (source start file, target name, title,
+# author, documentclass [howto, manual, or own class]).
+latex_documents = [
+ (master_doc, project + '.tex', project + ' Documentation', author, 'manual'),
+]
+
+# -- Options for manual page output ------------------------------------------
+
+# One entry per manual page. List of tuples
+# (source start file, name, description, authors, manual section).
+man_pages = [
+ (master_doc, project, project + ' Documentation', [author], 1)
+]
+
+# -- Options for Texinfo output ----------------------------------------------
+
+# Grouping the document tree into Texinfo files. List of tuples
+# (source start file, target name, title, author,
+# dir menu entry, description, category)
+texinfo_documents = [
+ (master_doc, project, project + ' Documentation', author, project,
+ 'One line description of project.', 'Miscellaneous'),
+]
+
+# -- Options for Epub output -------------------------------------------------
+
+# Bibliographic Dublin Core info.
+epub_title = project
+
+# The unique identifier of the text. This can be a ISBN number
+# or the project homepage.
+#
+# epub_identifier = ''
+
+# A unique identification for the text.
+#
+# epub_uid = ''
+
+# A list of files that should not be packed into the epub file.
+epub_exclude_files = ['search.html']
+
+# -- Extension configuration -------------------------------------------------
+
+# -- Options for intersphinx extension ---------------------------------------
+
+intersphinx_mapping = {
+ 'python': ('https://docs.python.org/3', None),
+ 'torch': ('https://pytorch.org/docs/stable/', None),
+ 'numpy': ('https://docs.scipy.org/doc/numpy/', None),
+ 'PIL': ('https://pillow.readthedocs.io/en/stable/', None),
+ 'dill': ('https://dill.rtfd.io/en/stable', None),
+}
+
+# -- Options for todo extension ----------------------------------------------
+
+# If true, `todo` and `todoList` produce output, else they produce nothing.
+todo_include_todos = True
+
+# Disable docstring inheritance
+autodoc_inherit_docstrings = True
+
+# https://github.com/rtfd/readthedocs.org/issues/1139
+# I use sphinx-apidoc to auto-generate API documentation for my project.
+# Right now I have to commit these auto-generated files to my repository
+# so that RTD can build them into HTML docs. It'd be cool if RTD could run
+# sphinx-apidoc for me, since it's easy to forget to regen API docs
+# and commit them to my repo after making changes to my code.
+
+PACKAGES = [
+ rising.__name__,
+]
+
+# -- A patch that prevents Sphinx from cross-referencing ivar tags -------
+# See http://stackoverflow.com/a/41184353/3343043
+
+from docutils import nodes
+from sphinx.util.docfields import TypedField
+from sphinx import addnodes
+import sphinx.ext.doctest
+
+# Without this, doctest adds any example with a `>>>` as a test
+doctest_test_doctest_blocks = ''
+doctest_default_flags = sphinx.ext.doctest.doctest.ELLIPSIS
+
+# def run_apidoc(_):
+# for pkg in PACKAGES:
+# argv = ['-e', '-o', PATH_HERE, os.path.join(PATH_HERE, PATH_ROOT, pkg),
+# '**/test_*', '--force', '--private', '--module-first']
+# try:
+# # Sphinx 1.7+
+# from sphinx.ext import apidoc
+# apidoc.main(argv)
+# except ImportError:
+# # Sphinx 1.6 (and earlier)
+# from sphinx import apidoc
+# argv.insert(0, apidoc.__file__)
+# apidoc.main(argv)
+#
+#
+# def setup(app):
+# app.connect('builder-inited', run_apidoc)
+
+
+# copy all notebooks to local folder
+# path_nbs = os.path.join(PATH_HERE, 'notebooks')
+# if not os.path.isdir(path_nbs):
+# os.mkdir(path_nbs)
+# for path_ipynb in glob.glob(os.path.join(PATH_ROOT, 'notebooks', '*.ipynb')):
+# path_ipynb2 = os.path.join(path_nbs, os.path.basename(path_ipynb))
+# shutil.copy(path_ipynb, path_ipynb2)
+
+# Ignoring Third-party packages
+# https://stackoverflow.com/questions/15889621/sphinx-how-to-exclude-imports-in-automodule
+
+MOCK_REQUIRE_PACKAGES = []
+with open(os.path.join(PATH_ROOT, 'requirements', 'install.txt'), 'r') as fp:
+ for ln in fp.readlines():
+ found = [ln.index(ch) for ch in list(',=<>#') if ch in ln]
+ pkg = ln[:min(found)] if found else ln
+ if pkg.rstrip():
+ MOCK_REQUIRE_PACKAGES.append(pkg.rstrip())
+
+with open(os.path.join(PATH_ROOT, 'requirements', 'install_async.txt'), 'r') as fp:
+ for ln in fp.readlines():
+ found = [ln.index(ch) for ch in list(',=<>#') if ch in ln]
+ pkg = ln[:min(found)] if found else ln
+ if pkg.rstrip():
+ MOCK_REQUIRE_PACKAGES.append(pkg.rstrip())
+
+# TODO: better parse from package since the import name and package name may differ
+MOCK_MANUAL_PACKAGES = [
+ 'torch',
+ 'torchvision',
+ 'numpy',
+ 'dill'
+]
+autodoc_mock_imports = MOCK_REQUIRE_PACKAGES + MOCK_MANUAL_PACKAGES
+# for mod_name in MOCK_REQUIRE_PACKAGES:
+# sys.modules[mod_name] = mock.Mock()
+
+
+# Options for the linkcode extension
+# ----------------------------------
+github_user = 'PhoenixDL'
+github_repo = project
+
+
+# Resolve function
+# This function is used to populate the (source) links in the API
+def linkcode_resolve(domain, info):
+ def find_source():
+ # try to find the file and line number, based on code from numpy:
+ # https://github.com/numpy/numpy/blob/master/doc/source/conf.py#L286
+ obj = sys.modules[info['module']]
+ for part in info['fullname'].split('.'):
+ obj = getattr(obj, part)
+ fname = inspect.getsourcefile(obj)
+ # https://github.com/rtfd/readthedocs.org/issues/5735
+ if any([s in fname for s in ('readthedocs', 'rtfd', 'checkouts')]):
+ # /home/docs/checkouts/readthedocs.org/user_builds/pytorch_lightning/checkouts/
+ # devel/pytorch_lightning/utilities/cls_experiment.py#L26-L176
+ path_top = os.path.abspath(os.path.join('..', '..', '..'))
+ fname = os.path.relpath(fname, start=path_top)
+ else:
+ # Local build, imitate master
+ fname = 'master/' + os.path.relpath(fname, start=os.path.abspath('..'))
+ source, lineno = inspect.getsourcelines(obj)
+ return fname, lineno, lineno + len(source) - 1
+
+ if domain != 'py' or not info['module']:
+ return None
+ try:
+ filename = '%s#L%d-L%d' % find_source()
+ except Exception:
+ filename = info['module'].replace('.', '/') + '.py'
+ # import subprocess
+ # tag = subprocess.Popen(['git', 'rev-parse', 'HEAD'], stdout=subprocess.PIPE,
+ # universal_newlines=True).communicate()[0][:-1]
+ branch = filename.split('/')[0]
+ # do mapping from latest tags to master
+ branch = {'latest': 'master', 'stable': 'master'}.get(branch, branch)
+ filename = '/'.join([branch] + filename.split('/')[1:])
+ return "https://github.com/%s/%s/blob/%s" \
+ % (github_user, github_repo, filename)
+
+
+autodoc_member_order = 'groupwise'
+autoclass_content = 'both'
+# the options are fixed and will be soon in release,
+# see https://github.com/sphinx-doc/sphinx/issues/5459
+autodoc_default_options = {
+ 'members': None,
+ 'methods': None,
+ # 'attributes': None,
+ 'special-members': '__call__',
+ 'exclude-members': '_abc_impl',
+ 'show-inheritance': True,
+ 'private-members': True,
+ 'noindex': True,
+}
+
+# Sphinx will add “permalinks” for each heading and description environment as paragraph signs that
+# become visible when the mouse hovers over them.
+# This value determines the text for the permalink; it defaults to "¶". Set it to None or the empty
+# string to disable permalinks.
+# https://www.sphinx-doc.org/en/master/usage/configuration.html#confval-html_add_permalinks
+html_add_permalinks = "¶"
+
+# True to prefix each section label with the name of the document it is in, followed by a colon.
+# For example, index:Introduction for a section called Introduction that appears in document index.rst.
+# Useful for avoiding ambiguity when the same section heading appears in different documents.
+# http://www.sphinx-doc.org/en/master/usage/extensions/autosectionlabel.html
+autosectionlabel_prefix_document = True
+
+def patched_make_field(self, types, domain, items, **kw):
+ # `kw` catches `env=None` needed for newer sphinx while maintaining
+ # backwards compatibility when passed along further down!
+
+ # type: (List, unicode, Tuple) -> nodes.field
+ def handle_item(fieldarg, content):
+ par = nodes.paragraph()
+ par += addnodes.literal_strong('', fieldarg) # Patch: this line added
+ # par.extend(self.make_xrefs(self.rolename, domain, fieldarg,
+ # addnodes.literal_strong))
+ if fieldarg in types:
+ par += nodes.Text(' (')
+ # NOTE: using .pop() here to prevent a single type node to be
+ # inserted twice into the doctree, which leads to
+ # inconsistencies later when references are resolved
+ fieldtype = types.pop(fieldarg)
+ if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text):
+ typename = u''.join(n.astext() for n in fieldtype)
+ typename = typename.replace('int', 'python:int')
+ typename = typename.replace('long', 'python:long')
+ typename = typename.replace('float', 'python:float')
+ typename = typename.replace('type', 'python:type')
+ par.extend(self.make_xrefs(self.typerolename, domain, typename,
+ addnodes.literal_emphasis, **kw))
+ else:
+ par += fieldtype
+ par += nodes.Text(')')
+ par += nodes.Text(' -- ')
+ par += content
+ return par
+
+ fieldname = nodes.field_name('', self.label)
+ if len(items) == 1 and self.can_collapse:
+ fieldarg, content = items[0]
+ bodynode = handle_item(fieldarg, content)
+ else:
+ bodynode = self.list_type()
+ for fieldarg, content in items:
+ bodynode += nodes.list_item('', handle_item(fieldarg, content))
+ fieldbody = nodes.field_body('', bodynode)
+ return nodes.field('', fieldname, fieldbody)
+
+TypedField.make_field = patched_make_field
diff --git a/docs/source/getting_started.rst b/docs/source/getting_started.rst
new file mode 100644
index 00000000..3ce1d78a
--- /dev/null
+++ b/docs/source/getting_started.rst
@@ -0,0 +1,2 @@
+Getting Started
+===============
diff --git a/docs/source/images/logo/rising_icon.svg b/docs/source/images/logo/rising_icon.svg
new file mode 100644
index 00000000..3f5ff070
--- /dev/null
+++ b/docs/source/images/logo/rising_icon.svg
@@ -0,0 +1,26 @@
+
+
diff --git a/docs/source/images/logo/rising_logo.png b/docs/source/images/logo/rising_logo.png
new file mode 100644
index 00000000..490496e5
Binary files /dev/null and b/docs/source/images/logo/rising_logo.png differ
diff --git a/docs/source/images/logo/rising_logo.svg b/docs/source/images/logo/rising_logo.svg
new file mode 100644
index 00000000..a0015893
--- /dev/null
+++ b/docs/source/images/logo/rising_logo.svg
@@ -0,0 +1,30 @@
+
+
diff --git a/docs/source/index.rst b/docs/source/index.rst
new file mode 100644
index 00000000..e92a215c
--- /dev/null
+++ b/docs/source/index.rst
@@ -0,0 +1,47 @@
+:github_url: https://github.com/PhoenixDL/rising
+
+rising Documentation
+====================
+
+``rising`` is a highly performant, ``PyTorch`` only, framework for efficient data augmentation with native
+support for volumetric data
+
+.. toctree::
+ :maxdepth: 2
+ :name: introduction
+ :caption: Getting Started
+
+ getting_started
+
+.. toctree::
+ :maxdepth: 3
+ :name: docs
+ :caption: Python API
+
+ loading
+ ops
+ transforms
+ transforms.functional
+ utils
+
+.. toctree::
+ :maxdepth: 1
+ :name: Tutorials & Examples
+ :caption: Tutorials
+
+ Using external transformations
+ An Overview on rising transformsations
+
+.. toctree::
+ :maxdepth: 1
+ :name: community
+ :caption: Community
+
+ contributing.md
+
+Indices and tables
+------------------
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
diff --git a/docs/source/loading.rst b/docs/source/loading.rst
new file mode 100644
index 00000000..1c5b7188
--- /dev/null
+++ b/docs/source/loading.rst
@@ -0,0 +1,98 @@
+.. role:: hidden
+ :class: hidden-section
+
+rising.loading
+===============
+
+.. automodule:: rising.loading
+
+.. currentmodule:: rising.loading
+
+DataLoader
+-----------
+
+.. automodule:: rising.loading.loader
+
+:hidden:`DataLoader`
+~~~~~~~~~~~~~~~~~~~~
+
+.. currentmodule:: rising.loading.loader
+
+.. autoclass:: DataLoader
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`default_transform_call`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: default_transform_call
+
+:hidden:`BatchTransformer`
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: BatchTransformer
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`patch_worker_init_fn`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: patch_worker_init_fn
+
+:hidden:`patch_collate_fn`
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: patch_collate_fn
+
+Dataset
+-----------
+
+.. automodule:: rising.loading.dataset
+
+:hidden:`Dataset`
+~~~~~~~~~~~~~~~~~
+
+.. currentmodule:: rising.loading.dataset
+
+.. autoclass:: Dataset
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`AsyncDataset`
+~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: AsyncDataset
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`dill_helper`
+~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: dill_helper
+
+:hidden:`load_async`
+~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: load_async
+
+Collation
+-----------
+
+.. automodule:: rising.loading.collate
+
+:hidden:`numpy_collate`
+~~~~~~~~~~~~~~~~~~~~~~~
+
+.. currentmodule:: rising.loading.collate
+
+.. autofunction:: numpy_collate
+
+:hidden:`do_nothing_collate`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: do_nothing_collate
+
diff --git a/docs/source/ops.rst b/docs/source/ops.rst
new file mode 100644
index 00000000..c534a378
--- /dev/null
+++ b/docs/source/ops.rst
@@ -0,0 +1,24 @@
+.. role:: hidden
+ :class: hidden-section
+
+rising.ops
+=========================
+
+.. automodule:: rising.ops
+
+.. currentmodule:: rising.ops
+
+On Tensors
+------------------------
+
+:hidden:`torch_one_hot`
+~~~~~~~~~~~~~~~~~~~~~~~
+
+.. currentmodule:: rising.ops.tensor
+
+.. autofunction:: torch_one_hot
+
+:hidden:`np_one_hot`
+~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: np_one_hot
diff --git a/docs/source/transforms.functional.rst b/docs/source/transforms.functional.rst
new file mode 100644
index 00000000..097ae660
--- /dev/null
+++ b/docs/source/transforms.functional.rst
@@ -0,0 +1,234 @@
+.. role:: hidden
+ :class: hidden-section
+
+rising.transforms.functional
+-----------------------------------------------------------
+
+.. automodule:: rising.transforms.functional
+
+Affine Transforms
+**********************************************************
+
+.. automodule:: rising.transforms.functional.affine
+
+.. currentmodule:: rising.transforms.functional.affine
+
+:hidden:`affine_image_transform`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: affine_image_transform
+
+:hidden:`affine_point_transform`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: affine_point_transform
+
+:hidden:`parametrize_matrix`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: parametrize_matrix
+
+:hidden:`create_rotation`
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: create_rotation
+
+:hidden:`create_rotation_2d`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: create_rotation_2d
+
+:hidden:`create_rotation_3d`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: create_rotation_3d
+
+:hidden:`create_rotation_3d_0`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: create_rotation_3d_0
+
+:hidden:`create_rotation_3d_1`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: create_rotation_3d_1
+
+:hidden:`create_rotation_3d_2`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: create_rotation_3d_2
+
+
+:hidden:`create_scale`
+~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: create_scale
+
+:hidden:`create_translation`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: create_translation
+
+:hidden:`expand_scalar_param`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: expand_scalar_param
+
+Channel Transforms
+**********************************************************
+
+.. automodule:: rising.transforms.functional.channel
+
+.. currentmodule:: rising.transforms.functional.channel
+
+:hidden:`one_hot_batch`
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: one_hot_batch
+
+Cropping Transforms
+********************
+
+.. automodule:: rising.transforms.functional.crop
+
+.. currentmodule:: rising.transforms.functional.crop
+
+:hidden:`crop`
+~~~~~~~~~~~~~~~
+
+.. autofunction:: crop
+
+:hidden:`center_crop`
+~~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: center_crop
+
+:hidden:`random_crop`
+~~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: random_crop
+
+Device Transforms
+**********************************************************
+
+.. automodule:: rising.transforms.functional.device
+
+.. currentmodule:: rising.transforms.functional.device
+
+:hidden:`to_device`
+~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: to_device
+
+Intensity Transforms
+**********************************************************
+
+.. automodule:: rising.transforms.functional.intensity
+
+.. currentmodule:: rising.transforms.functional.intensity
+
+:hidden:`norm_range`
+~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: norm_range
+
+:hidden:`norm_min_max`
+~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: norm_min_max
+
+:hidden:`norm_zero_mean_unit_std`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: norm_zero_mean_unit_std
+
+:hidden:`norm_mean_std`
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: norm_mean_std
+
+:hidden:`add_noise`
+~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: add_noise
+
+:hidden:`add_value`
+~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: add_value
+
+:hidden:`gamma_correction`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: gamma_correction
+
+:hidden:`scale_by_value`
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: scale_by_value
+
+Spatial Transforms
+**********************************************************
+
+.. automodule:: rising.transforms.functional.spatial
+
+.. currentmodule:: rising.transforms.functional.spatial
+
+:hidden:`mirror`
+~~~~~~~~~~~~~~~~~
+
+.. autofunction:: mirror
+
+:hidden:`rot90`
+~~~~~~~~~~~~~~~~
+
+.. autofunction:: rot90
+
+:hidden:`resize_native`
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: resize_native
+
+Tensor Transforms
+**********************************************************
+
+.. automodule:: rising.transforms.functional.tensor
+
+.. currentmodule:: rising.transforms.functional.tensor
+
+:hidden:`tensor_op`
+~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: tensor_op
+
+Utility Transforms
+**********************************************************
+
+.. automodule:: rising.transforms.functional.utility
+
+.. currentmodule:: rising.transforms.functional.utility
+
+:hidden:`box_to_seg`
+~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: box_to_seg
+
+:hidden:`seg_to_box`
+~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: seg_to_box
+
+:hidden:`instance_to_semantic`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: instance_to_semantic
+
+:hidden:`pop_keys`
+~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: pop_keys
+
+:hidden:`filter_keys`
+~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: filter_keys
diff --git a/docs/source/transforms.rst b/docs/source/transforms.rst
new file mode 100644
index 00000000..0ef478ca
--- /dev/null
+++ b/docs/source/transforms.rst
@@ -0,0 +1,508 @@
+.. role:: hidden
+ :class: hidden-section
+
+rising.transforms
+========================================
+
+.. automodule:: rising.transforms
+
+.. currentmodule:: rising.transforms
+
+Transformation Base Classes
+***************************
+
+.. automodule:: rising.transforms.abstract
+
+:hidden:`AbstractTransform`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. currentmodule:: rising.transforms.abstract
+
+.. autoclass:: AbstractTransform
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`BaseTransform`
+~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: BaseTransform
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`PerSampleTransform`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: PerSampleTransform
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`PerChannelTransform`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: PerChannelTransform
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`RandomDimsTransform`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: RandomDimsTransform
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`RandomProcess`
+~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: RandomProcess
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Compose Transforms
+******************
+
+.. automodule:: rising.transforms.compose
+
+.. currentmodule:: rising.transforms.compose
+
+:hidden:`Compose`
+~~~~~~~~~~~~~~~~~
+
+.. autoclass:: Compose
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`DropoutCompose`
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: DropoutCompose
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`dict_call`
+~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: dict_call
+
+Affine Transforms
+*****************
+
+.. automodule:: rising.transforms.affine
+
+.. currentmodule:: rising.transforms.affine
+
+:hidden:`Affine`
+~~~~~~~~~~~~~~~~
+
+.. autoclass:: Affine
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`StackedAffine`
+~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: StackedAffine
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`BaseAffine`
+~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: BaseAffine
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`Rotate`
+~~~~~~~~~~~~~~~~~
+
+.. autoclass:: Rotate
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`Translate`
+~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: Translate
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`Scale`
+~~~~~~~~~~~~~~~~
+
+.. autoclass:: Scale
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`Resize`
+~~~~~~~~~~~~~~~~~
+
+.. autoclass:: Resize
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Channel Transforms
+*******************
+
+.. automodule:: rising.transforms.channel
+
+.. currentmodule:: rising.transforms.channel
+
+:hidden:`OneHot`
+~~~~~~~~~~~~~~~~~
+
+.. autoclass:: OneHot
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Cropping Transforms
+********************
+
+.. automodule:: rising.transforms.crop
+
+.. currentmodule:: rising.transforms.crop
+
+:hidden:`CenterCrop`
+~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: CenterCrop
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`RandomCrop`
+~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: RandomCrop
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`CenterCropRandomSize`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: CenterCropRandomSize
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`RandomCropRandomSize`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: RandomCropRandomSize
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Format Transforms
+******************
+
+.. automodule:: rising.transforms.format
+
+.. currentmodule:: rising.transforms.format
+
+:hidden:`MapToSeq`
+~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: MapToSeq
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`SeqToMap`
+~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: SeqToMap
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Intensity Transforms
+*********************
+
+.. currentmodule:: rising.transforms.intensity
+
+.. currentmodule:: rising.transforms.intensity
+
+:hidden:`Clamp`
+~~~~~~~~~~~~~~~~
+
+.. autoclass:: Clamp
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`NormRange`
+~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: NormRange
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`NormMinMax`
+~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: NormMinMax
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`NormZeroMeanUnitStd`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: NormZeroMeanUnitStd
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`NormMeanStd`
+~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: NormMeanStd
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`Noise`
+~~~~~~~~~~~~~~~~
+
+.. autoclass:: Noise
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`GaussianNoise`
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: GaussianNoise
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`ExponentialNoise`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: ExponentialNoise
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`GammaCorrection`
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: GammaCorrection
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`RandomValuePerChannel`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: RandomValuePerChannel
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`RandomAddValue`
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: RandomAddValue
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`RandomScaleValue`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: RandomScaleValue
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Kernel Transforms
+******************
+
+.. currentmodule:: rising.transforms.kernel
+
+.. currentmodule:: rising.transforms.kernel
+
+:hidden:`KernelTransform`
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: KernelTransform
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`GaussianSmoothing`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: GaussianSmoothing
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Spatial Transforms
+*******************
+
+.. automodule:: rising.transforms.spatial
+
+.. currentmodule:: rising.transforms.spatial
+
+:hidden:`Mirror`
+~~~~~~~~~~~~~~~~~
+
+.. autoclass:: Mirror
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`Rot90`
+~~~~~~~~~~~~~~~~
+
+.. autoclass:: Rot90
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`ResizeNative`
+~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: ResizeNative
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`Zoom`
+~~~~~~~~~~~~~~~
+
+.. autoclass:: Zoom
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`ProgressiveResize`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: ProgressiveResize
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`SizeStepScheduler`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: SizeStepScheduler
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Tensor Transforms
+******************
+
+.. automodule:: rising.transforms.tensor
+
+.. currentmodule:: rising.transforms.tensor
+
+:hidden:`ToTensor`
+~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: ToTensor
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`ToDevice`
+~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: ToDevice
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`TensorOp`
+~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: TensorOp
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`Permute`
+~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: Permute
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+Utility Transforms
+*******************
+
+.. automodule:: rising.transforms.utility
+
+.. currentmodule:: rising.transforms.utility
+
+:hidden:`DoNothing`
+~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: DoNothing
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`SegToBox`
+~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: SegToBox
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`BoxToSeg`
+~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: BoxToSeg
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`InstanceToSemantic`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: InstanceToSemantic
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`PopKeys`
+~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: PopKeys
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
+:hidden:`FilterKeys`
+~~~~~~~~~~~~~~~~~~~~~
+
+.. autoclass:: FilterKeys
+ :members:
+ :undoc-members:
+ :show-inheritance:
+
diff --git a/docs/source/utils.rst b/docs/source/utils.rst
new file mode 100644
index 00000000..8db85a3c
--- /dev/null
+++ b/docs/source/utils.rst
@@ -0,0 +1,66 @@
+.. role:: hidden
+ :class: hidden-section
+
+rising.utils
+==================================================
+
+.. automodule:: rising.utils
+
+Affines
+-------------------------------------------------
+
+.. automodule:: rising.utils.affine
+
+.. currentmodule:: rising.utils.affine
+
+:hidden:`points_to_homogeneous`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: points_to_homogeneous
+
+:hidden:`matrix_to_homogeneous`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: matrix_to_homogeneous
+
+:hidden:`matrix_to_cartesian`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: matrix_to_cartesian
+
+:hidden:`points_to_cartesian`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: points_to_cartesian
+
+:hidden:`matrix_revert_coordinate_order`
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: matrix_revert_coordinate_order
+
+:hidden:`get_batched_eye`
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: get_batched_eye
+
+:hidden:`deg_to_rad`
+~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: deg_to_rad
+
+:hidden:`unit_box`
+~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: unit_box
+
+Type Checks
+------------------------------------------
+
+.. automodule:: rising.utils.checktype
+
+.. currentmodule:: rising.utils.checktype
+
+:hidden:`check_scalar`
+~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autofunction:: check_scalar
\ No newline at end of file
diff --git a/rising/__init__.py b/rising/__init__.py
index 70264e5d..a88b6d5a 100644
--- a/rising/__init__.py
+++ b/rising/__init__.py
@@ -1,5 +1,29 @@
from ._version import get_versions
+
__version__ = get_versions()['version']
del get_versions
-from rising.interface import AbstractMixin
+__author__ = 'Justus Schock, Michael Baumgartner'
+__author_email__ = 'justus.schock@rwth-aachen.de'
+__license__ = 'MIT'
+__copyright__ = 'Copyright (c) 2019-2020, %s.' % __author__
+__homepage__ = 'https://github.com/PhoenixDL/rising'
+# this has to be simple string, see: https://github.com/pypa/twine/issues/522
+__docs__ = "rising is a highly performant, PyTorch only framework for " \
+ "efficient data augmentation with support for volumetric data"
+__long_docs__ = ""
+
+try:
+ # This variable is injected in the __builtins__ by the build
+ # process. It used to enable importing subpackages of skimage when
+ # the binaries are not built
+ __RISING_SETUP__
+except NameError:
+ __RISING_SETUP__ = False
+
+if __RISING_SETUP__:
+ import sys # pragma: no-cover
+ sys.stdout.write(f'Partial import of `{__name__}` during the build process.\n') # pragma: no-cover
+ # We are not importing the rest of the lightning during the build process, as it may not be compiled yet
+else:
+ from rising.interface import AbstractMixin
diff --git a/rising/loading/__init__.py b/rising/loading/__init__.py
index 7eec160e..0abb4389 100644
--- a/rising/loading/__init__.py
+++ b/rising/loading/__init__.py
@@ -1,3 +1,28 @@
+"""
+``rising.loading`` provides an alternative :class:`DataLoader` that extends
+:class:`torch.utils.data.DataLoader` by the following:
+
+* Seeding of Numpy in each worker process: The seed is generated by numpy in
+ the main process before starting the workers. For reproducibility numpy must
+ be seeded in the main process.
+
+* Per-Sample Transforms outside the dataset (optional with pseudo
+ batch dimension if the transforms require it).
+ Will be executed within the spawned worker processes before batching.
+
+* Batched Transforms for better performance.
+ Will be executed within the worker processes after batching.
+
+* Batched GPU-Transforms. Will be executed after syncing results back to
+ main process (i.e. as last transforms) to avoid multiple CUDA initializations.
+
+Furthermore it also provides a :class:`Dataset` (based on
+:class:`torch.utils.data.Dataset`)that can create subsets from itself by
+given indices and an :class:`AsyncDataset` as well as different options for
+collation.
+
+"""
+
from rising.loading.collate import numpy_collate
from rising.loading.dataset import Dataset, AsyncDataset
from rising.loading.loader import DataLoader, default_transform_call
diff --git a/rising/loading/collate.py b/rising/loading/collate.py
index 094be43c..b40a0dd5 100644
--- a/rising/loading/collate.py
+++ b/rising/loading/collate.py
@@ -1,12 +1,11 @@
-import numpy as np
-import torch
import collections.abc
from typing import Any
+import numpy as np
+import torch
__all__ = ["numpy_collate", "do_nothing_collate"]
-
default_collate_err_msg_format = (
"default_collate: batch must contain tensors, numpy arrays, numbers, "
"dicts or lists; found {}")
@@ -17,6 +16,7 @@ def numpy_collate(batch: Any) -> Any:
function to collate the samples to a whole batch of numpy arrays.
PyTorch Tensors, scalar values and sequences will be casted to arrays
automatically.
+
Args:
batch: a batch of samples. In most cases either sequence,
mapping or mixture of them
diff --git a/rising/loading/dataset.py b/rising/loading/dataset.py
index e13bbdc5..ebdf0a3e 100644
--- a/rising/loading/dataset.py
+++ b/rising/loading/dataset.py
@@ -2,12 +2,12 @@
import os
import pathlib
from functools import partial
-from multiprocessing import cpu_count, Pool as MPPool
-from typing import Any, Sequence, Callable, Union, List, Hashable, Dict, Iterator, Generator, Optional
-from warnings import warn
+from multiprocessing import cpu_count
+from typing import Any, Sequence, Callable, Union, List, Iterator, Generator, Optional
try:
import dill
+
DILL_AVAILABLE = True
except ImportError:
DILL_AVAILABLE = False
@@ -114,7 +114,7 @@ def __init__(self,
data_path: the path(s) containing the actual data samples
load_fn: function to load the actual data
mode: whether to append the sample to a list or to extend the list
- by it. Supported modes are: :param:`append` and :param:`extend`.
+ by it. Supported modes are: ``append`` and ``extend``.
Default: ``append``
num_workers: the number of workers to use for preloading.
``0`` means, all the data will be loaded in the main process,
@@ -122,12 +122,12 @@ def __init__(self,
the number of logical cores.
verbose: whether to show the loading progress.
**load_kwargs: additional keyword arguments.
- Passed directly to :param:`load_fn`
+ Passed directly to :attr:`load_fn`
Warnings:
if using multiprocessing to load data, there are some restrictions
to which :func:`load_fn` are supported, please refer to the
- :module:`dill` or :module:`pickle` documentation
+ :mod:`dill` or :mod:`pickle` documentation
"""
super().__init__()
@@ -224,14 +224,13 @@ def update(*a):
def _add_item(data: list, item: Any, mode: str) -> None:
"""
Adds items to the given data list. The actual way of adding these
- items depends on :param:`mode`
+ items depends on :attr:`mode`
Args:
data: the list containing the already loaded data
item: the current item which will be added to the list
mode: the string specifying the mode of how the item should be
- added.
-
+ added.F
Raises:
TypeError: No known mode detected
"""
diff --git a/rising/loading/loader.py b/rising/loading/loader.py
index e25a8a3d..b25d952a 100644
--- a/rising/loading/loader.py
+++ b/rising/loading/loader.py
@@ -65,8 +65,7 @@ class DataLoader(_DataLoader):
used, whose :meth:`__len__` is not implemented, because the actual
length depends on both the iterable as well as multi-process
loading configurations. So one should not query this method unless
- they work with a map-style dataset. See `Dataset Types`_ for more
- details on these two types of datasets.
+ they work with a map-style dataset.
Warnings:
If the ``spawn`` start method is used, :attr:`worker_init_fn`
@@ -111,12 +110,12 @@ def __init__(self, dataset: Union[Sequence, Dataset],
batch. Usually this accepts either mappings or sequences and
returns the same type containing transformed elements
gpu_transforms: transforms which can be applied to a whole batch
- (on the GPU). Unlike :param:`batch_transforms` this is not
+ (on the GPU). Unlike :attr:`batch_transforms` this is not
done in multiple processes, but in the main process on the
GPU, because GPUs are capable of non-blocking and asynchronous
working. Before executing these transforms all data will be
- moved to :param:`device`. This copy is done in a non-blocking
- way if :param:`pin_memory` is set to True.
+ moved to :attr:`device`. This copy is done in a non-blocking
+ way if :attr:`pin_memory` is set to True.
sample_transforms: transforms applied to each sample (on CPU).
These are the first transforms applied to the data, since they
are applied on sample retrieval from dataset before batching
@@ -511,8 +510,9 @@ def __next__(self) -> Any:
class _SingleProcessDataLoaderIter(__SingleProcessDataLoaderIter):
"""Iterator over Dataloader.
- This iterator adds functionality for per-sample transforms
+ This iterator adds functionality for per-sample transforms
outside the dataset and per-batch transforms on both, CPU and GPU.
+
"""
def __init__(self, loader: DataLoader):
diff --git a/rising/ops/__init__.py b/rising/ops/__init__.py
index 73916e74..29e550ed 100644
--- a/rising/ops/__init__.py
+++ b/rising/ops/__init__.py
@@ -1 +1,5 @@
+"""
+Provides Operators working on single tensors.
+"""
+
from .tensor import torch_one_hot, np_one_hot
diff --git a/rising/ops/tensor.py b/rising/ops/tensor.py
index 20af81fd..54dbb0a1 100644
--- a/rising/ops/tensor.py
+++ b/rising/ops/tensor.py
@@ -10,7 +10,7 @@ def torch_one_hot(target: torch.Tensor, num_classes: Optional[int] = None) -> to
Args:
target: tensor to be converted
- num_classes: number of classes. If :param:`num_classes` is None,
+ num_classes: number of classes. If :attr:`num_classes` is None,
the maximum of target is used
Returns:
diff --git a/rising/transforms/__init__.py b/rising/transforms/__init__.py
index 026fe35f..f84674f2 100644
--- a/rising/transforms/__init__.py
+++ b/rising/transforms/__init__.py
@@ -1,3 +1,23 @@
+"""
+Provides the Augmentations and Transforms used by the
+:class:`rising.loading.DataLoader`.
+
+Implementations include:
+
+* Transformation Base Classes
+* Composed Transforms
+* Affine Transforms
+* Channel Transforms
+* Cropping Transforms
+* Device Transforms
+* Format Transforms
+* Intensity Transforms
+* Kernel Transforms
+* Spatial Transforms
+* Tensor Transforms
+* Utility Transforms
+"""
+
from rising.transforms.abstract import *
from rising.transforms.channel import *
from rising.transforms.compose import *
diff --git a/rising/transforms/abstract.py b/rising/transforms/abstract.py
index 011e0368..cfe561bf 100644
--- a/rising/transforms/abstract.py
+++ b/rising/transforms/abstract.py
@@ -97,7 +97,7 @@ def forward(self, **data) -> dict:
class PerSampleTransform(BaseTransform):
"""
Apply transformation to each sample in batch individually
- :param:`augment_fn` must be callable with option :param:`out`
+ :attr:`augment_fn` must be callable with option :attr:`out`
where results are saved in
"""
@@ -203,7 +203,7 @@ def forward(self, **data) -> dict:
class RandomProcess(AbstractMixin):
"""
Saves specified function to generate random values to current class.
- Function is saved inside :param:`random_fn`.
+ Function is saved inside :attr:`random_fn`.
"""
def __init__(self, *args, random_mode: str,
@@ -222,7 +222,7 @@ def __init__(self, *args, random_mode: str,
random_module: module from where function random function
should be imported
rand_seq: if enabled, multiple random values are generated
- if :param:`random_args` is of type Sequence[Sequence]
+ if :attr:`random_args` is of type Sequence[Sequence]
"""
super().__init__(*args, **kwargs)
self.random_module = random_module
diff --git a/rising/transforms/affine.py b/rising/transforms/affine.py
index 5d841f22..f44cf634 100644
--- a/rising/transforms/affine.py
+++ b/rising/transforms/affine.py
@@ -39,8 +39,8 @@ def __init__(self,
**kwargs):
"""
Args:
- matrix: if given, overwrites the parameters for :param:`scale`,
- :param:rotation` and :param:`translation`.
+ matrix: if given, overwrites the parameters for :attr:`scale`,
+ :attr:rotation` and :attr:`translation`.
Should be a matrix of shape [(BATCHSIZE,) NDIM, NDIM(+1)]
This matrix represents the whole transformation matrix
keys: keys which should be augmented
@@ -318,32 +318,32 @@ def __init__(self,
Args:
scale: the scale factor(s). Supported are:
* a single parameter (as float or int), which will be
- replicated for all dimensions and batch samples
+ replicated for all dimensions and batch samples
* a parameter per sample, which will be
- replicated for all dimensions
+ replicated for all dimensions
* a parameter per dimension, which will be replicated for all
- batch samples
+ batch samples
* a parameter per sampler per dimension
* None will be treated as a scaling factor of 1
rotation: the rotation factor(s). The rotation is performed in
consecutive order axis0 -> axis1 (-> axis 2). Supported are:
* a single parameter (as float or int), which will be
- replicated for all dimensions and batch samples
+ replicated for all dimensions and batch samples
* a parameter per sample, which will be
- replicated for all dimensions
+ replicated for all dimensions
* a parameter per dimension, which will be replicated for all
- batch samples
+ batch samples
* a parameter per sampler per dimension
- * None will be treated as a rotation factor of 1
+ * None will be treated as a rotation angle of 0
translation : torch.Tensor, int, float
the translation offset(s) relative to image (should be in the
range [0, 1]). Supported are:
* a single parameter (as float or int), which will be
- replicated for all dimensions and batch samples
+ replicated for all dimensions and batch samples
* a parameter per sample, which will be
- replicated for all dimensions
+ replicated for all dimensions
* a parameter per dimension, which will be replicated for all
- batch samples
+ batch samples
* a parameter per sampler per dimension
* None will be treated as a translation offset of 0
keys: keys which should be augmented
@@ -438,13 +438,13 @@ def __init__(self,
rotation: the rotation factor(s). The rotation is performed in
consecutive order axis0 -> axis1 (-> axis 2). Supported are:
* a single parameter (as float or int), which will be
- replicated for all dimensions and batch samples
+ replicated for all dimensions and batch samples
* a parameter per sample, which will be
- replicated for all dimensions
+ replicated for all dimensions
* a parameter per dimension, which will be replicated for all
- batch samples
+ batch samples
* a parameter per sampler per dimension
- * None will be treated as a rotation factor of 1
+ * ``None`` will be treated as a rotation angle of 0
keys: keys which should be augmented
grad: enable gradient computation inside transformation
degree: whether the given rotation(s) are in degrees.
@@ -515,11 +515,11 @@ def __init__(self,
the translation offset(s) relative to image (should be in the
range [0, 1]). Supported are:
* a single parameter (as float or int), which will be
- replicated for all dimensions and batch samples
+ replicated for all dimensions and batch samples
* a parameter per sample, which will be
- replicated for all dimensions
+ replicated for all dimensions
* a parameter per dimension, which will be replicated for all
- batch samples
+ batch samples
* a parameter per sampler per dimension
* None will be treated as a translation offset of 0
keys: keys which should be augmented
@@ -583,6 +583,12 @@ def assemble_matrix(self, **data) -> torch.Tensor:
class Scale(BaseAffine):
+ """Class Performing a Scale-Only Affine Transformation on a given
+ sample dict.
+ The transformation will be applied to all the dict-entries specified
+ in :attr:`keys`.
+ """
+
def __init__(self,
scale: AffineParamType,
keys: Sequence = ('data',),
@@ -595,56 +601,50 @@ def __init__(self,
reverse_order: bool = False,
**kwargs):
"""
- Class Performing a Scale-Only Affine Transformation on a given
- sample dict.
- The transformation will be applied to all the dict-entries specified
- in :attr:`keys`.
-
- Parameters
- ----------
- scale : torch.Tensor, int, float, optional
- the scale factor(s). Supported are:
- * a single parameter (as float or int), which will be replicated
- for all dimensions and batch samples
+ Args:
+ scale : torch.Tensor, int, float, optional
+ the scale factor(s). Supported are:
+ * a single parameter (as float or int), which will be
+ replicated for all dimensions and batch samples
* a parameter per sample, which will be
- replicated for all dimensions
- * a parameter per dimension, which will be replicated for all
- batch samples
+ replicated for all dimensions
+ * a parameter per dimension, which will be replicated for
+ all batch samples
* a parameter per sampler per dimension
- None will be treated as a scaling factor of 1
- keys: Sequence
- keys which should be augmented
- grad: bool
- enable gradient computation inside transformation
- degree : bool
- whether the given rotation(s) are in degrees.
- Only valid for rotation parameters, which aren't passed as full
- transformation matrix.
- output_size : Iterable
- if given, this will be the resulting image size.
- Defaults to ``None``
- adjust_size : bool
- if True, the resulting image size will be calculated dynamically
- to ensure that the whole image fits.
- interpolation_mode : str
- interpolation mode to calculate output values
- 'bilinear' | 'nearest'. Default: 'bilinear'
- padding_mode :
- padding mode for outside grid values
- 'zeros' | 'border' | 'reflection'. Default: 'zeros'
- align_corners : bool
- Geometrically, we consider the pixels of the input as
- squares rather than points. If set to True, the extrema (-1 and 1)
- are considered as referring to the center points of the input’s
- corner pixels. If set to False, they are instead considered as
- referring to the corner points of the input’s corner pixels,
- making the sampling more resolution agnostic.
- reverse_order: bool
- reverses the coordinate order of the transformation to conform
- to the pytorch convention: transformation params order [W,H(,D)] and
- batch order [(D,)H,W]
- **kwargs :
- additional keyword arguments passed to the affine transform
+ * None will be treated as a scaling factor of 1
+ keys: Sequence
+ keys which should be augmented
+ grad: bool
+ enable gradient computation inside transformation
+ degree : bool
+ whether the given rotation(s) are in degrees.
+ Only valid for rotation parameters, which aren't passed as full
+ transformation matrix.
+ output_size : Iterable
+ if given, this will be the resulting image size.
+ Defaults to ``None``
+ adjust_size : bool
+ if True, the resulting image size will be calculated
+ dynamically to ensure that the whole image fits.
+ interpolation_mode : str
+ interpolation mode to calculate output values
+ 'bilinear' | 'nearest'. Default: 'bilinear'
+ padding_mode :
+ padding mode for outside grid values
+ 'zeros' | 'border' | 'reflection'. Default: 'zeros'
+ align_corners : bool
+ Geometrically, we consider the pixels of the input as
+ squares rather than points. If set to True, the extrema
+ (-1 and 1) are considered as referring to the center points of
+ the input’s corner pixels. If set to False, they are instead
+ considered as referring to the corner points of the input’s
+ corner pixels, making the sampling more resolution agnostic.
+ reverse_order: bool
+ reverses the coordinate order of the transformation to conform
+ to the pytorch convention: transformation params order
+ [W,H(,D)] and batch order [(D,)H,W]
+ **kwargs :
+ additional keyword arguments passed to the affine transform
"""
super().__init__(scale=scale,
rotation=None,
diff --git a/rising/transforms/channel.py b/rising/transforms/channel.py
index b51ed123..f3138503 100644
--- a/rising/transforms/channel.py
+++ b/rising/transforms/channel.py
@@ -19,7 +19,7 @@ def __init__(self, num_classes: int, keys: Sequence = ('seg',),
"""
Args:
- num_classes: number of classes. If :param:`num_classes` is None,
+ num_classes: number of classes. If :attr:`num_classes` is None,
the number of classes is automatically determined from the
current batch (by using the max of the current batch and
assuming a consecutive order from zero)
diff --git a/rising/transforms/compose.py b/rising/transforms/compose.py
index e6925cdd..cd678a3d 100644
--- a/rising/transforms/compose.py
+++ b/rising/transforms/compose.py
@@ -56,13 +56,14 @@ class Compose(AbstractTransform):
def __init__(self, *transforms, shuffle: bool = False,
transform_call: Callable[[Any, Callable], Any] = dict_call):
"""
- Args
+ Args:
transforms: one or multiple transformations which are applied
in consecutive order
shuffle: apply transforms in random order
transform_call: function which determines how transforms are
called. By default Mappings and Sequences are unpacked
during the transform.
+
"""
super().__init__(grad=True)
if isinstance(transforms[0], Sequence):
@@ -168,7 +169,7 @@ def __init__(self, *transforms, dropout: Union[float, Sequence[float]] = 0.5,
onsecutive order
dropout: if provided as float, each transform is skipped with the
given probability
- if :param:`dropout` is a sequence, it needs to specify the
+ if :attr:`dropout` is a sequence, it needs to specify the
dropout probability for each given transform
random_mode: specifies distribution which should be used to sample
additive value
diff --git a/rising/transforms/functional/__init__.py b/rising/transforms/functional/__init__.py
index b1a5aabe..b57be95a 100644
--- a/rising/transforms/functional/__init__.py
+++ b/rising/transforms/functional/__init__.py
@@ -1,3 +1,19 @@
+"""
+Provides a functional interface for transforms
+(usually working on single tensors rather then collections thereof).
+All transformations are implemented to work on batched tensors.
+Implementations include:
+
+* Affine Transforms
+* Channel Transforms
+* Cropping Transforms
+* Device Transforms
+* Intensity Transforms
+* Spatial Transforms
+* Tensor Transforms
+* Utility Transforms
+"""
+
from rising.transforms.functional.crop import *
from rising.transforms.functional.device import *
from rising.transforms.functional.intensity import *
diff --git a/rising/transforms/functional/affine.py b/rising/transforms/functional/affine.py
index 4dac4050..65baba91 100644
--- a/rising/transforms/functional/affine.py
+++ b/rising/transforms/functional/affine.py
@@ -15,6 +15,7 @@
"create_rotation",
"create_scale",
"create_translation",
+ "parametrize_matrix"
]
AffineParamType = Union[int, float, Sequence, torch.Tensor]
@@ -63,24 +64,24 @@ def create_scale(scale: AffineParamType,
Formats the given scale parameters to a homogeneous transformation matrix
Args:
- scale : the scale factor(s). Supported are:
- * a single parameter (as float or int), which will be replicated
+ scale : the scale factor(s). Supported are:
+ * a single parameter (as float or int), which will be replicated
for all dimensions and batch samples
- * a parameter per sample, which will be
+ * a parameter per sample, which will be
replicated for all dimensions
- * a parameter per dimension, which will be replicated for all
+ * a parameter per dimension, which will be replicated for all
batch samples
- * a parameter per sampler per dimension
- * None will be treated as a scaling factor of 1
- batchsize: the number of samples per batch
- ndim: the dimensionality of the transform
- device: the device to put the resulting tensor to.
- Defaults to the torch default device
- dtype: the dtype of the resulting trensor.
- Defaults to the torch default dtype
- image_transform: inverts the scale matrix to match expected behavior
- when applied to an image, e.g. scale>1 increases the size of an
- image but decrease the size of an grid
+ * a parameter per sampler per dimension
+ * None will be treated as a scaling factor of 1
+ batchsize: the number of samples per batch
+ ndim: the dimensionality of the transform
+ device: the device to put the resulting tensor to.
+ Defaults to the torch default device
+ dtype: the dtype of the resulting trensor.
+ Defaults to the torch default dtype
+ image_transform: inverts the scale matrix to match expected behavior
+ when applied to an image, e.g. scale>1 increases the size of an
+ image but decrease the size of an grid
Returns:
the homogeneous transformation matrix [N, NDIM + 1, NDIM + 1], N is
@@ -111,11 +112,11 @@ def create_translation(offset: AffineParamType,
Args:
offset: the translation offset(s). Supported are:
* a single parameter (as float or int), which will be replicated
- for all dimensions and batch samples
+ for all dimensions and batch samples
* a parameter per sample, which will be
- replicated for all dimensions
+ replicated for all dimensions
* a parameter per dimension, which will be replicated for all
- batch samples
+ batch samples
* a parameter per sampler per dimension
* None will be treated as a translation offset of 0
batchsize: the number of samples per batch
@@ -157,13 +158,13 @@ def create_rotation(rotation: AffineParamType,
Args:
rotation: the rotation factor(s). Supported are:
* a single parameter (as float or int), which will be replicated
- for all dimensions and batch samples
+ for all dimensions and batch samples
* a parameter per sample, which will be
- replicated for all dimensions
+ replicated for all dimensions
* a parameter per dimension, which will be replicated for all
- batch samples
+ batch samples
* a parameter per sampler per dimension
- * None will be treated as a rotation factor of 0
+ * None will be treated as a rotation angle of 0
batchsize: the number of samples per batch
ndim : the dimensionality of the transform
degree: whether the given rotation(s) are in degrees.
@@ -294,29 +295,29 @@ def parametrize_matrix(scale: AffineParamType,
Args:
scale: the scale factor(s). Supported are:
* a single parameter (as float or int), which will be replicated
- for all dimensions and batch samples
+ for all dimensions and batch samples
* a parameter per sample, which will be
- replicated for all dimensions
+ replicated for all dimensions
* a parameter per dimension, which will be replicated for all
- batch samples
+ batch samples
* a parameter per sampler per dimension
* None will be treated as a scaling factor of 1
rotation: the rotation factor(s). Supported are:
* a single parameter (as float or int), which will be replicated
- for all dimensions and batch samples
+ for all dimensions and batch samples
* a parameter per sample, which will be
- replicated for all dimensions
+ replicated for all dimensions
* a parameter per dimension, which will be replicated for all
- batch samples
+ batch samples
* a parameter per sampler per dimension
* None will be treated as a rotation factor of 1
translation: the translation offset(s). Supported are:
* a single parameter (as float or int), which will be replicated
- for all dimensions and batch samples
+ for all dimensions and batch samples
* a parameter per sample, which will be
- replicated for all dimensions
+ replicated for all dimensions
* a parameter per dimension, which will be replicated for all
- batch samples
+ batch samples
* a parameter per sampler per dimension
* None will be treated as a translation offset of 0
batchsize: the number of samples per batch
@@ -360,7 +361,7 @@ def affine_point_transform(point_batch: torch.Tensor,
``NDIM`` is the number of spatial dimensions
matrix_batch : torch.Tensor
a batch of affine matrices with shape [N, NDIM, NDIM + 1],
- N is the batch size and NDIM is the number of spatial dimensions
+ N is the batch size and NDIM is the number of spatial dimensions
Returns:
the batch of transformed points in cartesian coordinates)
@@ -414,7 +415,7 @@ def affine_image_transform(image_batch: torch.Tensor,
resolutions (that is, after being upsampled or downsampled).
Notes:
- :param:`output_size` and :param:`adjust_size` are mutually exclusive.
+ :attr:`output_size` and :attr:`adjust_size` are mutually exclusive.
If None of them is set, the resulting image will have the same size
as the input image.
"""
diff --git a/rising/transforms/functional/channel.py b/rising/transforms/functional/channel.py
index eaa532b5..40cab2e9 100644
--- a/rising/transforms/functional/channel.py
+++ b/rising/transforms/functional/channel.py
@@ -14,7 +14,7 @@ def one_hot_batch(target: torch.Tensor, num_classes: Optional[int] = None) -> to
Args:
target: tensor to be converted
num_classes: number of classes.
- If :param:`num_classes` is None, the maximum of target is used
+ If :attr:`num_classes` is None, the maximum of target is used
Returns:
one hot encoded tensor
diff --git a/rising/transforms/functional/spatial.py b/rising/transforms/functional/spatial.py
index 4468ee61..94ca7e2c 100644
--- a/rising/transforms/functional/spatial.py
+++ b/rising/transforms/functional/spatial.py
@@ -3,7 +3,7 @@
from rising.utils import check_scalar
-__all__ = ["mirror", "rot90", "resize"]
+__all__ = ["mirror", "rot90", "resize_native"]
def mirror(data: torch.Tensor, dims: Union[int, Sequence[int]]) -> torch.Tensor:
@@ -40,14 +40,14 @@ def rot90(data: torch.Tensor, k: int, dims: Union[int, Sequence[int]]):
return torch.rot90(data, k, dims)
-def resize(data: torch.Tensor,
- size: Optional[Union[int, Sequence[int]]] = None,
- scale_factor: Optional[Union[float, Sequence[float]]] = None,
- mode: str = 'nearest', align_corners: Optional[bool] = None,
- preserve_range: bool = False):
+def resize_native(data: torch.Tensor,
+ size: Optional[Union[int, Sequence[int]]] = None,
+ scale_factor: Optional[Union[float, Sequence[float]]] = None,
+ mode: str = 'nearest', align_corners: Optional[bool] = None,
+ preserve_range: bool = False):
"""
- Down/up-sample sample to either the given :param:`size` or the given
- :param:`scale_factor`
+ Down/up-sample sample to either the given :attr:`size` or the given
+ :attr:`scale_factor`
The modes available for resizing are: nearest, linear (3D-only), bilinear,
bicubic (4D-only), trilinear (5D-only), area
diff --git a/rising/transforms/functional/tensor.py b/rising/transforms/functional/tensor.py
index 33ac0e6e..8110e8e6 100644
--- a/rising/transforms/functional/tensor.py
+++ b/rising/transforms/functional/tensor.py
@@ -13,11 +13,11 @@ def tensor_op(data: data_type, fn: str, *args, **kwargs) -> data_type:
Invokes a function form a tensor
Args:
- data: data which should be pushed to device. Sequence and mapping items
- are mapping individually to gpu
- fn: tensor function
- *args: positional arguments passed to tensor function
- **kwargs: keyword arguments passed to tensor function
+ data: data which should be pushed to device. Sequence and mapping items
+ are mapping individually to gpu
+ fn: tensor function
+ *args: positional arguments passed to tensor function
+ **kwargs: keyword arguments passed to tensor function
Returns:
data which was pushed to device
diff --git a/rising/transforms/functional/utility.py b/rising/transforms/functional/utility.py
index 7e69cf2a..b6026644 100644
--- a/rising/transforms/functional/utility.py
+++ b/rising/transforms/functional/utility.py
@@ -18,11 +18,11 @@ def box_to_seg(boxes: Sequence[Sequence[int]],
(dim0_min, dim1_min, dim0_max, dim1_max, [dim2_min, dim2_max]).
Supported bounding boxes for 2D (4 entries per box)
and 3d (6 entries per box)
- shape: if :param:`out` is not provided, shape of output tensor must
+ shape: if :attr:`out` is not provided, shape of output tensor must
be specified
- dtype: if :param:`out` is not provided,
+ dtype: if :attr:`out` is not provided,
dtype of output tensor must be specified
- device: if :param:`out` is not provided,
+ device: if :attr:`out` is not provided,
device of output tensor must be specified
out: if not None, the segmentation will be saved inside this tensor
@@ -82,7 +82,7 @@ def instance_to_semantic(instance: torch.Tensor,
semantic segmentation
Warnings:
- :param:`instance` needs to encode objects starting from 1 and the
+ :attr:`instance` needs to encode objects starting from 1 and the
indices need to be continuous (0 is interpreted as background)
"""
seg = torch.zeros_like(instance)
diff --git a/rising/transforms/intensity.py b/rising/transforms/intensity.py
index fb62b6f4..d522ca98 100644
--- a/rising/transforms/intensity.py
+++ b/rising/transforms/intensity.py
@@ -22,11 +22,11 @@ def __init__(self, min: float, max: float, keys: Sequence = ('data',),
grad: bool = False, **kwargs):
"""
Args:
- min: minimal value
- max: maximal value
- keys: the keys corresponding to the values to clamp
- grad: enable gradient computation inside transformation
- **kwargs: keyword arguments passed to augment_fn
+ min: minimal value
+ max: maximal value
+ keys: the keys corresponding to the values to clamp
+ grad: enable gradient computation inside transformation
+ **kwargs: keyword arguments passed to augment_fn
"""
super().__init__(augment_fn=torch.clamp, keys=keys, grad=grad,
min=min, max=max, **kwargs)
@@ -165,11 +165,11 @@ def __init__(self, gamma: Union[float, Sequence] = (0.5, 2),
gamma: if gamma is float it is always applied.
if gamma is a sequence it is interpreted as the minimal and
maximal value. If the maximal value is greater than one,
- the transform chooses gamma <1 in 50% of the cases and
- gamma >1 in the other cases.
- keys: keys to normalize
- grad: enable gradient computation inside transformation
- **kwargs: keyword arguments passed to superclass
+ the transform chooses gamma < 1 in 50% of the cases and
+ gamma > 1 in the other cases.
+ keys: keys to normalize
+ grad: enable gradient computation inside transformation
+ **kwargs: keyword arguments passed to superclass
"""
super().__init__(augment_fn=gamma_correction, keys=keys, grad=grad)
self.kwargs = kwargs
@@ -207,7 +207,7 @@ def forward(self, **data) -> dict:
class RandomValuePerChannel(RandomProcess, PerChannelTransform):
"""
Apply augmentations which take random values as input by keyword
- :param:`value`
+ :attr:`value`
"""
def __init__(self, augment_fn: callable, random_mode: str, random_args: Sequence = (),
diff --git a/rising/transforms/spatial.py b/rising/transforms/spatial.py
index e78c52c1..6bc9847a 100644
--- a/rising/transforms/spatial.py
+++ b/rising/transforms/spatial.py
@@ -10,7 +10,7 @@
from .abstract import RandomDimsTransform, AbstractTransform, BaseTransform, RandomProcess
from .functional.spatial import *
-__all__ = ["Mirror", "Rot90", "Resize",
+__all__ = ["Mirror", "Rot90", "ResizeNative",
"Zoom", "ProgressiveResize", "SizeStepScheduler"]
scheduler_type = Callable[[int], Union[int, Sequence[int]]]
@@ -92,7 +92,7 @@ def dims(self, dims: Sequence):
self._permutations = tuple(permutations(dims, 2))
-class Resize(BaseTransform):
+class ResizeNative(BaseTransform):
"""Resize data to given size"""
def __init__(self, size: Union[int, Sequence[int]], mode: str = 'nearest',
@@ -113,7 +113,7 @@ def __init__(self, size: Union[int, Sequence[int]], mode: str = 'nearest',
grad: enable gradient computation inside transformation
**kwargs: keyword arguments passed to augment_fn
"""
- super().__init__(augment_fn=resize, size=size, mode=mode,
+ super().__init__(augment_fn=resize_native, size=size, mode=mode,
align_corners=align_corners, preserve_range=preserve_range,
keys=keys, grad=grad, **kwargs)
@@ -121,7 +121,7 @@ def __init__(self, size: Union[int, Sequence[int]], mode: str = 'nearest',
class Zoom(RandomProcess, BaseTransform):
"""Apply augment_fn to keys. By default the scaling factor is sampled
from a uniform distribution with the range specified by
- :param:`random_args`
+ :attr:`random_args`
"""
def __init__(self, random_args: Union[Sequence, Sequence[Sequence]] = (0.75, 1.25),
@@ -151,7 +151,7 @@ def __init__(self, random_args: Union[Sequence, Sequence[Sequence]] = (0.75, 1.2
See Also:
:func:`random.uniform`, :func:`torch.nn.functional.interpolate`
"""
- super().__init__(augment_fn=resize, random_args=random_args,
+ super().__init__(augment_fn=resize_native, random_args=random_args,
random_mode=random_mode, mode=mode,
align_corners=align_corners, preserve_range=preserve_range,
keys=keys, grad=grad, **kwargs)
@@ -170,7 +170,7 @@ def forward(self, **data) -> dict:
return super().forward(**data)
-class ProgressiveResize(Resize):
+class ProgressiveResize(ResizeNative):
"""Resize data to sizes specified by scheduler"""
def __init__(self, scheduler: scheduler_type, mode: str = 'nearest',
@@ -181,16 +181,16 @@ def __init__(self, scheduler: scheduler_type, mode: str = 'nearest',
scheduler: scheduler which determined the current size.
The scheduler is called with the current iteration of the
transform
- mode: one of ``nearest``, ``linear``, ``bilinear``, ``bicubic``,
- ``trilinear``, ``area`` (for more inforamtion see
- :func:`torch.nn.functional.interpolate`)
- align_corners: input and output tensors are aligned by the center
- points of their corners pixels, preserving the values at the
- corner pixels.
- preserve_range: output tensor has same range as input tensor
- keys: keys which should be augmented
- grad: enable gradient computation inside transformation
- **kwargs: keyword arguments passed to augment_fn
+ mode: one of ``nearest``, ``linear``, ``bilinear``, ``bicubic``,
+ ``trilinear``, ``area`` (for more inforamtion see
+ :func:`torch.nn.functional.interpolate`)
+ align_corners: input and output tensors are aligned by the center
+ points of their corners pixels, preserving the values at the
+ corner pixels.
+ preserve_range: output tensor has same range as input tensor
+ keys: keys which should be augmented
+ grad: enable gradient computation inside transformation
+ **kwargs: keyword arguments passed to augment_fn
Warnings:
When this transformations is used in combination with
@@ -205,7 +205,7 @@ def __init__(self, scheduler: scheduler_type, mode: str = 'nearest',
self.scheduler = scheduler
self._step = Value('i', 0)
- def reset_step(self) -> Resize:
+ def reset_step(self) -> ResizeNative:
"""
Reset step to 0
@@ -216,7 +216,7 @@ def reset_step(self) -> Resize:
self._step.value = 0
return self
- def increment(self) -> Resize:
+ def increment(self) -> ResizeNative:
"""
Increment step by 1
diff --git a/rising/transforms/tensor.py b/rising/transforms/tensor.py
index dbf67361..16d78298 100644
--- a/rising/transforms/tensor.py
+++ b/rising/transforms/tensor.py
@@ -16,8 +16,8 @@ def __init__(self, keys: Sequence = ('data',), grad: bool = False, **kwargs):
"""
Args:
keys: keys which should be transformed
- grad: enable gradient computation inside transformation
- **kwargs: keyword arguments passed to augment_fn
+ grad: enable gradient computation inside transformation
+ **kwargs: keyword arguments passed to augment_fn
"""
super().__init__(augment_fn=default_convert, keys=keys, grad=grad, **kwargs)
diff --git a/rising/transforms/utility.py b/rising/transforms/utility.py
index d0cadaa9..a6e6c8d8 100644
--- a/rising/transforms/utility.py
+++ b/rising/transforms/utility.py
@@ -67,14 +67,15 @@ def __init__(self, keys: Mapping[Hashable, Hashable], shape: Sequence[int],
dtype: torch.dtype, device: Union[torch.device, str],
grad: bool = False, **kwargs):
"""
- keys: the key specifies which item to use as the bounding boxes and
- the item specifies where the save the bounding boxes
- shape: spatial shape of output tensor (batchsize is derived from
- bounding boxes and has one channel)
- dtype: dtype of segmentation
- device: device of segmentation
- grad: enable gradient computation inside transformation
- **kwargs: Additional keyword arguments forwarded to the Base Class
+ Args:
+ keys: the key specifies which item to use as the bounding boxes and
+ the item specifies where the save the bounding boxes
+ shape: spatial shape of output tensor (batchsize is derived from
+ bounding boxes and has one channel)
+ dtype: dtype of segmentation
+ device: device of segmentation
+ grad: enable gradient computation inside transformation
+ **kwargs: Additional keyword arguments forwarded to the Base Class
"""
super().__init__(grad=grad, **kwargs)
self.keys = keys
diff --git a/rising/utils/affine.py b/rising/utils/affine.py
index 125b08fb..7a5fa51d 100644
--- a/rising/utils/affine.py
+++ b/rising/utils/affine.py
@@ -54,11 +54,11 @@ def matrix_to_cartesian(batch: torch.Tensor, keep_square: bool = False
coordinates.
Args:
- batch: the batch oif matrices to convert back
- keep_square: if False: returns a NDIM x NDIM+1 matrix to keep the
- translation part
- if True: returns a NDIM x NDIM matrix but looses the translation part
- defaults to False.
+ batch: the batch oif matrices to convert back
+ keep_square: if False: returns a NDIM x NDIM+1 matrix to keep the
+ translation part
+ if True: returns a NDIM x NDIM matrix but looses the translation
+ part. defaults to False.
Returns:
the given matrix in cartesian coordinates
@@ -122,9 +122,7 @@ def get_batched_eye(batchsize: int, ndim: int,
dtype : torch.dtype, str, optional
the dtype of the resulting trensor. Defaults to the default dtype
- Returns
- -------
- torch.Tensor
+ Returns:
batched eye matrix
"""
@@ -152,8 +150,8 @@ def unit_box(n: int, scale: Optional[torch.Tensor] = None) -> torch.Tensor:
Create a sclaed version of a unit box
Args:
- n: number of dimensions
- scale: scaling of each dimension
+ n: number of dimensions
+ scale: scaling of each dimension
Returns:
scaled unit box
diff --git a/rising/utils/checktype.py b/rising/utils/checktype.py
index e1ff5183..44a65f80 100644
--- a/rising/utils/checktype.py
+++ b/rising/utils/checktype.py
@@ -9,7 +9,4 @@ def check_scalar(x):
Returns:
True if input is scalar
"""
- if isinstance(x, (int, float)):
- return True
- else:
- return False
+ return isinstance(x, (int, float))
diff --git a/setup.py b/setup.py
index 84be1b7e..f9f613e0 100644
--- a/setup.py
+++ b/setup.py
@@ -4,6 +4,13 @@
import versioneer
+try:
+ import builtins
+except ImportError:
+ import __builtin__ as builtins
+
+builtins.__RISING_SETUP__ = True
+
def resolve_requirements(file):
requirements = []
@@ -29,14 +36,17 @@ def read_file(file):
requirements_async = resolve_requirements(
os.path.join(os.path.dirname(__file__), "requirements", 'install_async.txt'))
-readme = read_file(os.path.join(os.path.dirname(__file__), "README.md"))
+readme = read_file(os.path.join(os.path.dirname(__file__), "README.md")
+ ).replace('.svg', '.png')
+
+import rising
setup(
name='rising',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
packages=find_packages(),
- url='https://github.com/phoenixdl/rising',
+ url=rising.__homepage__,
test_suite="unittest",
long_description=readme,
long_description_content_type='text/markdown',
@@ -44,8 +54,19 @@ def read_file(file):
extras_require={'async': requirements_async},
tests_require=["coverage"],
python_requires=">=3.6",
- author="PhoenixDL",
- maintainer='Michael Baumgartner, Justus Schock',
- maintainer_email='justus.schock@rwth-aachen.de',
- license='MIT',
+ author=rising.__author__,
+ author_email=rising.__author_email__,
+ include_package_data=True,
+ zip_safe=False,
+ keywords=['deep learning', 'augmentation', 'transforms', 'pytorch', 'medical'],
+ license=rising.__license__,
+ project_urls={
+ "Bug Tracker": "https://github.com/PhoenixDL/rising/issues",
+ "Documentation": "https://rising.rtfd.io/en/latest/",
+ "Source Code": "https://github.com/PhoenixDL/rising",
+ },
+ # TODO: Populate classifiers
+ classifiers=[
+
+ ]
)
diff --git a/tests/transforms/functional/test_spatial.py b/tests/transforms/functional/test_spatial.py
index 9feee2bb..7a80cce4 100644
--- a/tests/transforms/functional/test_spatial.py
+++ b/tests/transforms/functional/test_spatial.py
@@ -26,7 +26,7 @@ def test_rot90(self):
self.assertTrue((outp == expected).all())
def test_resize(self):
- out = resize(self.batch_2d.float(), (2, 2), preserve_range=True)
+ out = resize_native(self.batch_2d.float(), (2, 2), preserve_range=True)
expected = torch.tensor([[1, 2], [4, 5]])
self.assertTrue((out == expected).all())
diff --git a/tests/transforms/test_spatial_transforms.py b/tests/transforms/test_spatial_transforms.py
index 5ed212f9..b5e28ce7 100644
--- a/tests/transforms/test_spatial_transforms.py
+++ b/tests/transforms/test_spatial_transforms.py
@@ -4,7 +4,7 @@
from tests.transforms import chech_data_preservation
from rising.transforms.spatial import *
-from rising.transforms.functional.spatial import resize
+from rising.transforms.functional.spatial import resize_native
from rising.loading import DataLoader
@@ -44,7 +44,7 @@ def test_rot90_transform(self):
self.assertTrue((outp["data"] == data_orig).all())
def test_resize_transform(self):
- trafo = Resize((2, 2))
+ trafo = ResizeNative((2, 2))
out = trafo(**self.batch_dict)
expected = torch.tensor([[1, 2], [4, 5]])
self.assertTrue((out["data"] == expected).all())
@@ -58,7 +58,7 @@ def test_zoom_transform(self):
random.seed(0)
out = trafo(**self.batch_dict)
- expected = resize(self.batch_dict["data"], mode="nearest", scale_factor=scale_factor)
+ expected = resize_native(self.batch_dict["data"], mode="nearest", scale_factor=scale_factor)
self.assertTrue((out["data"] == expected).all())
def test_progressive_resize(self):