diff --git a/.github/workflows/unit-tests.yml b/.github/workflows/unit-tests.yml
index 97eecdb84..42af21e53 100644
--- a/.github/workflows/unit-tests.yml
+++ b/.github/workflows/unit-tests.yml
@@ -8,7 +8,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- python-version: ['3.6', '3.8', '3.10']
+ python-version: ['3.7', '3.8', '3.10']
steps:
- name: Checkout 🛎️
diff --git a/README.md b/README.md
index 10bdc959d..795c81964 100644
--- a/README.md
+++ b/README.md
@@ -50,9 +50,10 @@ See the [detailed installation instructions](https://tum-pbs.github.io/PhiFlow/I
To get started, check out our YouTube tutorial series and the following Jupyter notebooks:
-* [ Fluids](https://colab.research.google.com/github/tum-pbs/PhiFlow/blob/develop/docs/Fluids_Tutorial.ipynb): Introduction to core classes and fluid-related functions.
-* [ Solar System](https://colab.research.google.com/github/tum-pbs/PhiFlow/blob/develop/docs/Planets_Tutorial.ipynb): Visualize a many-body system with Newtonian gravity.
-* [ Learning to Throw](https://colab.research.google.com/github/tum-pbs/PhiFlow/blob/develop/docs/Learn_to_Throw_Tutorial.ipynb): Train a neural network to hit a target, comparing supervised and differentiable physics losses.
+* [](https://colab.research.google.com/github/tum-pbs/PhiFlow/blob/develop/docs/Math_Introduction.ipynb) [Tensors](https://tum-pbs.github.io/PhiFlow/Math_Introduction.html): Introduction to tensors.
+* [](https://colab.research.google.com/github/tum-pbs/PhiFlow/blob/develop/docs/Fluids_Tutorial.ipynb) [Fluids](https://tum-pbs.github.io/PhiFlow/Fluids_Tutorial.html): Introduction to core classes and fluid-related functions.
+* [](https://colab.research.google.com/github/tum-pbs/PhiFlow/blob/develop/docs/Planets_Tutorial.ipynb) [Solar System](https://tum-pbs.github.io/PhiFlow/Planets_Tutorial.html): Visualize a many-body system with Newtonian gravity.
+* [](https://colab.research.google.com/github/tum-pbs/PhiFlow/blob/develop/docs/Learn_to_Throw_Tutorial.ipynb) [Learn to Throw](https://tum-pbs.github.io/PhiFlow/Learn_to_Throw_Tutorial.html): Train a neural network to hit a target, comparing supervised and differentiable physics losses.
If you like to work with an IDE, like PyCharm or VS Code, the following demos will also be helpful:
@@ -61,13 +62,20 @@ If you like to work with an IDE, like PyCharm or VS Code, the following demos wi
## Publications
-We have recently submitted a whitepaper.
+We will upload a whitepaper, soon.
In the meantime, please cite the ICLR 2020 paper.
* [Learning to Control PDEs with Differentiable Physics](https://ge.in.tum.de/publications/2020-iclr-holl/), *Philipp Holl, Vladlen Koltun, Nils Thuerey*, ICLR 2020.
* [Solver-in-the-Loop: Learning from Differentiable Physics to Interact with Iterative PDE-Solvers](https://arxiv.org/abs/2007.00016), *Kiwon Um, Raymond Fei, Philipp Holl, Robert Brand, Nils Thuerey*, NeurIPS 2020.
* [ΦFlow: A Differentiable PDE Solving Framework for Deep Learning via Physical Simulations](https://montrealrobotics.ca/diffcvgp/), *Nils Thuerey, Kiwon Um, Philipp Holl*, DiffCVGP workshop at NeurIPS 2020.
+* [Physics-based Deep Learning](https://physicsbaseddeeplearning.org/intro.html) (book), *Nils Thuerey, Philipp Holl, Maximilian Mueller, Patrick Schnell, Felix Trost, Kiwon Um*, DiffCVGP workshop at NeurIPS 2020.
* [Half-Inverse Gradients for Physical Deep Learning](https://arxiv.org/abs/2203.10131), *Patrick Schnell, Philipp Holl, Nils Thuerey*, ICLR 2022.
+* [Scale-invariant Learning by Physics Inversion](https://arxiv.org/abs/2109.15048), *Philipp Holl, Vladlen Koltun, Nils Thuerey*, NeurIPS 2022.
+
+ΦFlow has been used in the following data sets:
+
+* [PDEBench](https://github.com/pdebench/PDEBench)
+* [PDEarena](https://microsoft.github.io/pdearena/)
## Version History
diff --git a/demos/pipe.py b/demos/pipe.py
index 0d25b435d..17bcec56f 100644
--- a/demos/pipe.py
+++ b/demos/pipe.py
@@ -4,8 +4,7 @@
from phi.flow import *
DT = 1.
-INFLOW_BC = extrapolation.combine_by_direction(normal=1, tangential=0)
-velocity = StaggeredGrid(0, extrapolation.combine_sides(x=(INFLOW_BC, extrapolation.BOUNDARY), y=0), x=50, y=32)
+velocity = StaggeredGrid(0, extrapolation.combine_sides(x=(vec(x=1, y=0), extrapolation.BOUNDARY), y=0), x=50, y=32)
pressure = None
for _ in view('velocity, pressure', namespace=globals()).range():
diff --git a/phi/VERSION b/phi/VERSION
index ecf00d901..1506473e2 100644
--- a/phi/VERSION
+++ b/phi/VERSION
@@ -1 +1 @@
-2.2.5
\ No newline at end of file
+2.2.6
\ No newline at end of file
diff --git a/phi/field/_grid.py b/phi/field/_grid.py
index 5b90af34e..8e7286c4a 100644
--- a/phi/field/_grid.py
+++ b/phi/field/_grid.py
@@ -1,4 +1,4 @@
-from typing import TypeVar, Any
+from typing import TypeVar, Any, Tuple
from phi import math, geom
from phi.geom import Box, Geometry, GridCell
@@ -70,6 +70,19 @@ def __value_attrs__(self):
def __variable_attrs__(self):
return '_values',
+ def __expand__(self, dims: Shape, **kwargs) -> 'Grid':
+ return self.with_values(math.expand(self.values, dims, **kwargs))
+
+ def __replace_dims__(self, dims: Tuple[str, ...], new_dims: Shape, **kwargs) -> 'Grid':
+ for dim in dims:
+ if dim in self._resolution:
+ return NotImplemented
+ values = math.rename_dims(self.values, dims, new_dims)
+ extrapolation = math.rename_dims(self.extrapolation, dims, new_dims, **kwargs)
+ bounds = math.rename_dims(self.bounds, dims, new_dims, **kwargs)
+ return type(self)(values, extrapolation=extrapolation, bounds=bounds, resolution=self._resolution)
+
+
def __eq__(self, other):
if not type(self) == type(other):
return False
@@ -487,10 +500,10 @@ def expand_staggered(values: Tensor, resolution: Shape, extrapolation: Extrapola
def resolution_from_staggered_tensor(values: Tensor, extrapolation: Extrapolation):
any_dim = values.shape.spatial.names[0]
- x = values.vector[any_dim]
+ x_shape = values.shape.after_gather({'vector': any_dim})
ext_lower, ext_upper = extrapolation.valid_outer_faces(any_dim)
delta = int(ext_lower) + int(ext_upper) - 1
- resolution = x.shape.spatial._replace_single_size(any_dim, x.shape.get_size(any_dim) - delta)
+ resolution = x_shape.spatial._replace_single_size(any_dim, x_shape.get_size(any_dim) - delta)
return resolution
diff --git a/phi/field/_point_cloud.py b/phi/field/_point_cloud.py
index d16346a23..e20655dad 100644
--- a/phi/field/_point_cloud.py
+++ b/phi/field/_point_cloud.py
@@ -1,5 +1,5 @@
import warnings
-from typing import Any
+from typing import Any, Tuple
from phi import math
from phi.geom import Geometry, GridCell, Box, Point
@@ -82,6 +82,15 @@ def __value_attrs__(self):
def __variable_attrs__(self):
return '_values', '_elements'
+ def __expand__(self, dims: Shape, **kwargs) -> 'PointCloud':
+ return self.with_values(math.expand(self.values, dims, **kwargs))
+
+ def __replace_dims__(self, dims: Tuple[str, ...], new_dims: Shape, **kwargs) -> 'PointCloud':
+ elements = math.rename_dims(self.elements, dims, new_dims)
+ values = math.rename_dims(self.values, dims, new_dims)
+ extrapolation = math.rename_dims(self.extrapolation, dims, new_dims, **kwargs)
+ return PointCloud(elements, values, extrapolation, self._add_overlapping, self._bounds, self._color)
+
def __eq__(self, other):
if not type(self) == type(other):
return False
diff --git a/phi/field/_scene.py b/phi/field/_scene.py
index cc92999bb..242a80ffe 100644
--- a/phi/field/_scene.py
+++ b/phi/field/_scene.py
@@ -199,7 +199,7 @@ def at(directory: str or tuple or list or math.Tensor or 'Scene', id: int or mat
id = math.wrap(id)
paths = math.map(lambda d, i: join(d, f"sim_{i:06d}"), directory, id)
# test all exist
- for path in math.flatten(paths):
+ for path in math.flatten(paths, flatten_batch=True):
if not isdir(path):
raise IOError(f"There is no scene at '{path}'")
return Scene(paths)
@@ -266,7 +266,7 @@ def exist_properties(self):
if self._properties is not None:
return True # must have been written or read
else:
- json_file = join(next(iter(math.flatten(self._paths))), "description.json")
+ json_file = join(next(iter(math.flatten(self._paths, flatten_batch=True))), "description.json")
return isfile(json_file)
def exists_config(self):
@@ -454,7 +454,7 @@ def copy_calling_script(self, full_trace=False, include_context_information=True
text = "\n\n".join(blocks)
self.copy_src_text('ipython.py', text)
if include_context_information:
- for path in math.flatten(self._paths):
+ for path in math.flatten(self._paths, flatten_batch=True):
with open(join(path, 'src', 'context.json'), 'w') as context_file:
json.dump({
'phi_version': phi_version,
@@ -462,23 +462,23 @@ def copy_calling_script(self, full_trace=False, include_context_information=True
}, context_file)
def copy_src(self, script_path, only_external=True):
- for path in math.flatten(self._paths):
+ for path in math.flatten(self._paths, flatten_batch=True):
if not only_external or not _is_phi_file(script_path):
shutil.copy(script_path, join(path, 'src', basename(script_path)))
def copy_src_text(self, filename, text):
- for path in math.flatten(self._paths):
+ for path in math.flatten(self._paths, flatten_batch=True):
target = join(path, 'src', filename)
with open(target, "w") as file:
file.writelines(text)
def mkdir(self):
- for path in math.flatten(self._paths):
+ for path in math.flatten(self._paths, flatten_batch=True):
isdir(path) or os.mkdir(path)
def remove(self):
""" Deletes the scene directory and all contained files. """
- for p in math.flatten(self._paths):
+ for p in math.flatten(self._paths, flatten_batch=True):
p = abspath(p)
if isdir(p):
shutil.rmtree(p)
diff --git a/phi/geom/_geom.py b/phi/geom/_geom.py
index bbb373bc7..e65f53692 100644
--- a/phi/geom/_geom.py
+++ b/phi/geom/_geom.py
@@ -1,4 +1,5 @@
from numbers import Number
+from typing import Tuple
import numpy as np
@@ -515,6 +516,30 @@ def __stack__(self, values: tuple, dim: Shape, **kwargs) -> 'Geometry':
else:
return Geometry.__stack__(self, values, dim, **kwargs)
+ def __concat__(self, values: tuple, dim: str, **kwargs) -> 'Point':
+ if all(isinstance(v, Point) for v in values):
+ return Point(math.concat([v.center for v in values], dim, **kwargs))
+ else:
+ return NotImplemented
+
+ def __replace_dims__(self, dims: Tuple[str, ...], new_dims: Shape, **kwargs) -> 'Point':
+ return Point(math.rename_dims(self._location, dims, new_dims, **kwargs))
+
+ def __expand__(self, dims: Shape, **kwargs) -> 'Point':
+ return Point(math.expand(self._location, dims, **kwargs))
+
+ def __pack_dims__(self, dims: Tuple[str, ...], packed_dim: Shape, pos: int or None, **kwargs) -> 'Point':
+ return Point(math.pack_dims(self._location, dims, packed_dim, pos, **kwargs))
+
+ def __unpack_dim__(self, dim: str, unpacked_dims: Shape, **kwargs) -> 'Point':
+ return Point(math.unpack_dim(self._location, dim, unpacked_dims, **kwargs))
+
+ def __flatten__(self, flat_dim: Shape, flatten_batch: bool, **kwargs) -> 'Point':
+ dims = self.shape.without('vector')
+ if not flatten_batch:
+ dims = dims.non_batch
+ return Point(math.pack_dims(self._location, dims, flat_dim, **kwargs))
+
def assert_same_rank(rank1, rank2, error_message):
""" Tests that two objects have the same spatial rank. Objects can be of types: `int`, `None` (no check), `Geometry`, `Shape`, `Tensor` """
diff --git a/phi/geom/_sphere.py b/phi/geom/_sphere.py
index b7d8d2a73..7ee000959 100644
--- a/phi/geom/_sphere.py
+++ b/phi/geom/_sphere.py
@@ -1,11 +1,8 @@
-import warnings
-from typing import Dict
+from typing import Tuple
from phi import math
-
from ._geom import Geometry, _keep_vector
from ..math import wrap, Tensor, Shape
-from ..math.backend import PHI_LOGGER
from ..math.magic import slicing_dict
@@ -120,6 +117,30 @@ def __stack__(self, values: tuple, dim: Shape, **kwargs) -> 'Geometry':
else:
return Geometry.__stack__(self, values, dim, **kwargs)
+ def __concat__(self, values: tuple, dim: str, **kwargs) -> 'Sphere':
+ if all(isinstance(v, Sphere) for v in values):
+ return Sphere(math.concat([v.center for v in values], dim, **kwargs), radius=math.concat([v.radius for v in values], dim, **kwargs))
+ else:
+ return NotImplemented
+
+ def __replace_dims__(self, dims: Tuple[str, ...], new_dims: Shape, **kwargs) -> 'Sphere':
+ return Sphere(math.rename_dims(self._center, dims, new_dims, **kwargs), radius=math.rename_dims(self._radius, dims, new_dims, **kwargs))
+
+ def __expand__(self, dims: Shape, **kwargs) -> 'Sphere':
+ return Sphere(math.expand(self._center, dims, **kwargs), radius=self._radius)
+
+ def __pack_dims__(self, dims: Tuple[str, ...], packed_dim: Shape, pos: int or None, **kwargs) -> 'Sphere':
+ return Sphere(math.pack_dims(self._center, dims, packed_dim, pos, **kwargs), radius=math.pack_dims(self._radius, dims, packed_dim, pos, **kwargs))
+
+ def __unpack_dim__(self, dim: str, unpacked_dims: Shape, **kwargs) -> 'Sphere':
+ return Sphere(math.unpack_dim(self._center, dim, unpacked_dims, **kwargs), radius=math.unpack_dim(self._radius, dim, unpacked_dims, **kwargs))
+
+ def __flatten__(self, flat_dim: Shape, flatten_batch: bool, **kwargs) -> 'Shapable':
+ dims = self.shape.without('vector')
+ if not flatten_batch:
+ dims = dims.non_batch
+ return Sphere(math.pack_dims(self._center, dims, flat_dim, **kwargs), radius=math.pack_dims(self._radius, dims, flat_dim, **kwargs))
+
def push(self, positions: Tensor, outward: bool = True, shift_amount: float = 0) -> Tensor:
raise NotImplementedError()
diff --git a/phi/math/_functional.py b/phi/math/_functional.py
index 638832d29..a66daf6ae 100644
--- a/phi/math/_functional.py
+++ b/phi/math/_functional.py
@@ -1522,9 +1522,12 @@ def native_function(x_flat):
else:
y = f(x)
_, y_tensors = disassemble_tree(y)
- assert not non_batch(
- y_tensors[0]), f"Failed to minimize '{f.__name__}' because it returned a non-scalar output {shape(y_tensors[0])}. Reduce all non-batch dimensions, e.g. using math.l2_loss()"
- return y_tensors[0].sum, (reshaped_native(y_tensors[0], [batch_dims]),)
+ assert not non_batch(y_tensors[0]), f"Failed to minimize '{f.__name__}' because it returned a non-scalar output {shape(y_tensors[0])}. Reduce all non-batch dimensions, e.g. using math.l2_loss()"
+ try:
+ loss_native = reshaped_native(y_tensors[0], [batch_dims])
+ except AssertionError:
+ raise AssertionError(f"Failed to minimize '{f.__name__}' because its output loss {shape(y_tensors[0])} has more batch dimensions than the initial guess {batch_dims}.")
+ return y_tensors[0].sum, (loss_native,)
atol = backend.to_float(reshaped_native(solve.absolute_tolerance, [batch_dims], force_expand=True))
maxi = backend.to_int32(reshaped_native(solve.max_iterations, [batch_dims], force_expand=True))
@@ -1766,13 +1769,16 @@ def f(x):
`identity(value)` which when differentiated, prints the gradient vector.
"""
- def print_grad(_x, _y, dx):
- if all_available(_x, dx):
+ def print_grad(params: dict, _y, dx):
+ param_name, x = next(iter(params.items()))
+ if all_available(x, dx):
if detailed:
print_(dx, name=name)
else:
print(f"{name}: \t{dx}")
- return dx,
+ else:
+ print(f"Cannot print gradient for {param_name}, data not available.")
+ return {param_name: dx}
identity = custom_gradient(lambda x: x, print_grad)
return identity(value)
diff --git a/phi/math/_magic_ops.py b/phi/math/_magic_ops.py
index c9157c72e..f2e4053b4 100644
--- a/phi/math/_magic_ops.py
+++ b/phi/math/_magic_ops.py
@@ -31,7 +31,7 @@ def unstack(value, dim: DimFilter):
# Out: (0.0, 0.0, 0.0, 0.0, 0.0)
```
"""
- assert isinstance(value, Sliceable) and isinstance(value, Shaped)
+ assert isinstance(value, Sliceable) and isinstance(value, Shaped), f"Cannot unstack {type(value).__name__}. Must be Sliceable and Shaped, see https://tum-pbs.github.io/PhiFlow/phi/math/magic.html"
dims = shape(value).only(dim)
assert dims.rank > 0, "unstack() requires at least one dimension"
if dims.rank == 1:
@@ -45,15 +45,15 @@ def unstack(value, dim: DimFilter):
else: # multiple dimensions
if hasattr(value, '__pack_dims__'):
packed_dim = batch('_unstack')
- value = value.__pack_dims__(dims.names, packed_dim, pos=None)
- if value is not NotImplemented:
- return unstack(value, packed_dim)
+ value_packed = value.__pack_dims__(dims.names, packed_dim, pos=None)
+ if value_packed is not NotImplemented:
+ return unstack(value_packed, packed_dim)
first_unstacked = unstack(value, dims[0])
inner_unstacked = [unstack(v, dims.without(dims[0])) for v in first_unstacked]
return sum(inner_unstacked, ())
-def stack(values: tuple or list or dict, dim: Shape, **kwargs):
+def stack(values: tuple or list or dict, dim: Shape, expand_values=False, **kwargs):
"""
Stacks `values` along the new dimension `dim`.
All values must have the same spatial, instance and channel dimensions. If the dimension sizes vary, the resulting tensor will be non-uniform.
@@ -68,6 +68,9 @@ def stack(values: tuple or list or dict, dim: Shape, **kwargs):
dim: `Shape` with a least one dimension. None of these dimensions can be present with any of the `values`.
If `dim` is a single-dimension shape, its size is determined from `len(values)` and can be left undefined (`None`).
If `dim` is a multi-dimension shape, its volume must be equal to `len(values)`.
+ expand_values: If `True`, will first add missing dimensions to all values, not just batch dimensions.
+ This allows tensors with different dimensions to be stacked.
+ The resulting tensor will have all dimensions that are present in `values`.
**kwargs: Additional keyword arguments required by specific implementations.
Adding spatial dimensions to fields requires the `bounds: Box` argument specifying the physical extent of the new dimensions.
Adding batch dimensions must always work without keyword arguments.
@@ -91,14 +94,22 @@ def stack(values: tuple or list or dict, dim: Shape, **kwargs):
assert len(values) > 0, f"stack() got empty sequence {values}"
assert isinstance(dim, Shape)
values_ = tuple(values.values()) if isinstance(values, dict) else values
- for v in values_[1:]:
- assert set(non_batch(v).names) == set(non_batch(values_[0]).names), f"Stacked values must have the same non-batch dimensions but got {non_batch(values_[0])} and {non_batch(v)}"
- # --- Add missing batch dimensions ---
- all_batch_dims = merge_shapes(*[batch(v) for v in values_])
- if isinstance(values, dict):
- values = {k: expand(v, all_batch_dims) for k, v in values.items()}
+ if not expand_values:
+ for v in values_[1:]:
+ assert set(non_batch(v).names) == set(non_batch(values_[0]).names), f"Stacked values must have the same non-batch dimensions but got {non_batch(values_[0])} and {non_batch(v)}"
+ # --- Add missing dimensions ---
+ if expand_values:
+ all_dims = merge_shapes(*values_)
+ if isinstance(values, dict):
+ values = {k: expand(v, all_dims.without(shape(v).non_batch)) for k, v in values.items()}
+ else:
+ values = [expand(v, all_dims.without(shape(v).non_batch)) for v in values]
else:
- values = [expand(v, all_batch_dims) for v in values]
+ all_batch_dims = merge_shapes(*[batch(v) for v in values_])
+ if isinstance(values, dict):
+ values = {k: expand(v, all_batch_dims) for k, v in values.items()}
+ else:
+ values = [expand(v, all_batch_dims) for v in values]
if dim.rank == 1:
assert dim.size == len(values) or dim.size is None, f"stack dim size must match len(values) or be undefined but got {dim} for {len(values)} values"
if dim.size is None:
@@ -248,7 +259,7 @@ def expand(value, dims: Shape, **kwargs):
# Fallback: stack
if hasattr(value, '__stack__'):
if dims.volume > 8:
- warnings.warn(f"expand() default implementation is slow on large shapes {dims}. Please implement __expand__()", RuntimeWarning, stacklevel=2)
+ warnings.warn(f"expand() default implementation is slow on large shapes {dims}. Please implement __expand__() for {type(value).__name__} as defined in phi.math.magic", RuntimeWarning, stacklevel=2)
for dim in reversed(dims):
value = stack((value,) * dim.size, dim, **kwargs)
assert value is not NotImplemented, "Value must implement either __expand__ or __stack__"
@@ -286,7 +297,7 @@ def rename_dims(value,
"""
if isinstance(value, Shape):
return value._replace_names_and_types(dims, names)
- assert isinstance(value, Shapable) and isinstance(value, Shaped), "value must be a Shape or Shapable."
+ assert isinstance(value, Shapable) and isinstance(value, Shaped), f"value must be a Shape or Shapable but got {type(value).__name__}"
dims = shape(value).only(dims)
names = dims._replace_names_and_types(dims, names)
if hasattr(value, '__replace_dims__'):
@@ -295,7 +306,7 @@ def rename_dims(value,
return result
# Fallback: unstack and stack
if shape(value).only(dims).volume > 8:
- warnings.warn(f"rename_dims() default implementation is slow on large dimensions ({shape(value).only(dims)}). Please implement __replace_dims__()", RuntimeWarning, stacklevel=2)
+ warnings.warn(f"rename_dims() default implementation is slow on large dimensions ({shape(value).only(dims)}). Please implement __replace_dims__() for {type(value).__name__} as defined in phi.math.magic", RuntimeWarning, stacklevel=2)
for old_name, new_dim in zip(dims.names, names):
value = stack(unstack(value, old_name), new_dim, **kwargs)
return value
@@ -342,17 +353,13 @@ def pack_dims(value, dims: DimFilter, packed_dim: Shape, pos: int or None = None
return value if packed_dim.size is None else expand(value, packed_dim, **kwargs) # Inserting size=1 can cause shape errors
elif len(dims) == 1:
return rename_dims(value, dims, packed_dim, **kwargs)
- if dims.rank == shape(value).rank and hasattr(value, '__flatten__'):
- result = value.__flatten__(packed_dim, **kwargs)
- if result is not NotImplemented:
- return result
if hasattr(value, '__pack_dims__'):
result = value.__pack_dims__(dims.names, packed_dim, pos, **kwargs)
if result is not NotImplemented:
return result
# Fallback: unstack and stack
if shape(value).only(dims).volume > 8:
- warnings.warn(f"pack_dims() default implementation is slow on large dimensions ({shape(value).only(dims)}). Please implement __pack_dims__()", RuntimeWarning, stacklevel=2)
+ warnings.warn(f"pack_dims() default implementation is slow on large dimensions ({shape(value).only(dims)}). Please implement __pack_dims__() for {type(value).__name__} as defined in phi.math.magic", RuntimeWarning, stacklevel=2)
return stack(unstack(value, dims), packed_dim, **kwargs)
@@ -387,7 +394,9 @@ def unpack_dim(value, dim: str or Shape, unpacked_dims: Shape, **kwargs):
assert isinstance(value, Shapable) and isinstance(value, Sliceable) and isinstance(value, Shaped), f"value must be Shapable but got {type(value)}"
if isinstance(dim, Shape):
dim = dim.name
- assert isinstance(dim, str), f"dim must be a str but got {type(dim)}"
+ assert isinstance(dim, str), f"dim must be a str or Shape but got {type(dim)}"
+ if dim not in shape(value):
+ return value # Nothing to do, maybe expand?
if unpacked_dims.rank == 0:
return value[{dim: 0}] # remove dim
elif unpacked_dims.rank == 1:
@@ -398,15 +407,14 @@ def unpack_dim(value, dim: str or Shape, unpacked_dims: Shape, **kwargs):
return result
# Fallback: unstack and stack
if shape(value).only(dim).volume > 8:
- warnings.warn(f"pack_dims() default implementation is slow on large dimensions ({shape(value).only(dim)}). Please implement __unpack_dim__()", RuntimeWarning, stacklevel=2)
+ warnings.warn(f"pack_dims() default implementation is slow on large dimensions ({shape(value).only(dim)}). Please implement __unpack_dim__() for {type(value).__name__} as defined in phi.math.magic", RuntimeWarning, stacklevel=2)
unstacked = unstack(value, dim)
for dim in reversed(unpacked_dims):
unstacked = [stack(unstacked[i:i+dim.size], dim, **kwargs) for i in range(0, len(unstacked), dim.size)]
return unstacked[0]
-
-def flatten(value, flat_dim: Shape = instance('flat'), **kwargs):
+def flatten(value, flat_dim: Shape = instance('flat'), flatten_batch=False, **kwargs):
"""
Returns a `Tensor` with the same values as `value` but only a single dimension `flat_dim`.
The order of the values in memory is not changed.
@@ -414,6 +422,8 @@ def flatten(value, flat_dim: Shape = instance('flat'), **kwargs):
Args:
value: `phi.math.magic.Shapable`, such as `Tensor`.
flat_dim: Dimension name and type as `Shape` object. The size is ignored.
+ flatten_batch: Whether to flatten batch dimensions as well.
+ If `False`, batch dimensions are kept, only onn-batch dimensions are flattened.
**kwargs: Additional keyword arguments required by specific implementations.
Adding spatial dimensions to fields requires the `bounds: Box` argument specifying the physical extent of the new dimensions.
Adding batch dimensions must always work without keyword arguments.
@@ -430,11 +440,11 @@ def flatten(value, flat_dim: Shape = instance('flat'), **kwargs):
assert isinstance(flat_dim, Shape) and flat_dim.rank == 1, flat_dim
assert isinstance(value, Shapable) and isinstance(value, Shaped), f"value must be Shapable but got {type(value)}"
if hasattr(value, '__flatten__'):
- result = value.__flatten__(flat_dim, **kwargs)
+ result = value.__flatten__(flat_dim, flatten_batch, **kwargs)
if result is not NotImplemented:
return result
# Fallback: pack_dims
- return pack_dims(value, shape(value), flat_dim, **kwargs)
+ return pack_dims(value, shape(value) if flatten_batch else non_batch(value), flat_dim, **kwargs)
# PhiTreeNode
diff --git a/phi/math/_nd.py b/phi/math/_nd.py
index f758095f5..f83d19651 100644
--- a/phi/math/_nd.py
+++ b/phi/math/_nd.py
@@ -5,7 +5,7 @@
from . import _ops as math
from . import extrapolation as extrapolation
from ._magic_ops import stack, rename_dims, concat, variable_values
-from ._shape import Shape, channel, batch, spatial, DimFilter, parse_dim_order, shape
+from ._shape import Shape, channel, batch, spatial, DimFilter, parse_dim_order, shape, merge_shapes
from ._tensors import Tensor, wrap
from .magic import PhiTreeNode
from .extrapolation import Extrapolation
@@ -21,8 +21,20 @@ def vec(name='vector', **components) -> Tensor:
Returns:
`Tensor`
+
+ Examples:
+ ```python
+ vec(x=1, y=0, z=-1)
+ # Out: (x=1, y=0, z=-1)
+
+ vec(x=1., z=0)
+ # Out: (x=1.000, z=0.000)
+
+ vec(x=tensor([1, 2, 3], instance('particles')), y=0)
+ # Out: (x=1, y=0); (x=2, y=0); (x=3, y=0) (particlesⁱ=3, vectorᶜ=x,y)
+ ```
"""
- return stack(components, channel(name))
+ return stack(components, channel(name), expand_values=True)
def const_vec(value: float or Tensor, dim: Shape or tuple or list or str):
diff --git a/phi/math/_ops.py b/phi/math/_ops.py
index 90013439d..3238daa9c 100644
--- a/phi/math/_ops.py
+++ b/phi/math/_ops.py
@@ -380,7 +380,7 @@ def map_(function, *values, **kwargs) -> Tensor or None:
values = [wrap(v) for v in values]
shape = merge_shapes(*[v.shape for v in values])
values_reshaped = [expand(v, shape) for v in values]
- flat = [flatten(v) for v in values_reshaped]
+ flat = [flatten(v, flatten_batch=True) for v in values_reshaped]
result = []
for items in zip(*flat):
result.append(function(*items, **kwargs))
@@ -630,6 +630,15 @@ def linspace(start: int or Tensor, stop, dim: Shape) -> Tensor:
Returns:
`Tensor`
+
+ Examples:
+ ```python
+ math.linspace(0, 1, spatial(x=5))
+ # Out: (0.000, 0.250, 0.500, 0.750, 1.000) along xˢ
+
+ math.linspace(0, (-1, 1), spatial(x=3))
+ # Out: (0.000, 0.000); (-0.500, 0.500); (-1.000, 1.000) (xˢ=3, vectorᶜ=2)
+ ```
"""
assert isinstance(dim, Shape) and dim.rank == 1, f"dim must be a single-dimension Shape but got {dim}"
if is_scalar(start) and is_scalar(stop):
@@ -738,15 +747,26 @@ def pad(value: Tensor, widths: dict, mode: 'e_.Extrapolation' or Tensor or Numbe
Returns:
Padded `Tensor`
+
+ Examples:
+ ```python
+ math.pad(math.ones(spatial(x=10, y=10)), {'x': (1, 1), 'y': (2, 1)}, 0)
+ # Out: (xˢ=12, yˢ=13) 0.641 ± 0.480 (0e+00...1e+00)
+
+ math.pad(math.ones(spatial(x=10, y=10)), {'x': (1, -1)}, 0)
+ # Out: (xˢ=10, yˢ=10) 0.900 ± 0.300 (0e+00...1e+00)
+ ```
"""
mode = mode if isinstance(mode, e_.Extrapolation) else e_.ConstantExtrapolation(mode)
- has_negative_widths = any(w[0] < 0 or w[1] < 0 for w in widths.values())
+ has_negative_widths = any(w0 < 0 or w1 < 0 for w0, w1 in widths.values())
+ has_positive_widths = any(w0 > 0 or w1 > 0 for w0, w1 in widths.values())
slices = None
if has_negative_widths:
slices = {dim: slice(max(0, -w[0]), min(0, w[1]) or None) for dim, w in widths.items()}
widths = {dim: (max(0, w[0]), max(0, w[1])) for dim, w in widths.items()}
- result = mode.pad(value, widths, **kwargs)
- return result[slices] if has_negative_widths else result
+ result_padded = mode.pad(value, widths, **kwargs) if has_positive_widths else value
+ result_sliced = result_padded[slices] if has_negative_widths else result_padded
+ return result_sliced
def closest_grid_values(grid: Tensor,
@@ -1407,10 +1427,11 @@ def dot(x: Tensor,
result_native = backend.tensordot(x_native, x.shape.indices(x_dims), y_native, y.shape.indices(y_dims))
result_shape = concat_shapes(remaining_shape_x, remaining_shape_y)
else: # shared batch dimensions -> einsum
+ result_shape = merge_shapes(x.shape.without(x_dims), y.shape.without(y_dims))
REDUCE_LETTERS = list('ijklmn')
KEEP_LETTERS = list('abcdefgh')
x_letters = [(REDUCE_LETTERS if dim in x_dims else KEEP_LETTERS).pop(0) for dim in x.shape.names]
- x_letter_map = {dim: letter for dim, letter in zip(x.shape.names, x_letters)}
+ letter_map = {dim: letter for dim, letter in zip(x.shape.names, x_letters)}
REDUCE_LETTERS = list('ijklmn')
y_letters = []
for dim in y.shape.names:
@@ -1418,13 +1439,14 @@ def dot(x: Tensor,
y_letters.append(REDUCE_LETTERS.pop(0))
else:
if dim in x.shape and dim not in x_dims:
- y_letters.append(x_letter_map[dim])
+ y_letters.append(letter_map[dim])
else:
- y_letters.append(KEEP_LETTERS.pop(0))
- keep_letters = list('abcdefgh')[:-len(KEEP_LETTERS)]
+ next_letter = KEEP_LETTERS.pop(0)
+ letter_map[dim] = next_letter
+ y_letters.append(next_letter)
+ keep_letters = [letter_map[dim] for dim in result_shape.names]
subscripts = f'{"".join(x_letters)},{"".join(y_letters)}->{"".join(keep_letters)}'
result_native = backend.einsum(subscripts, x_native, y_native)
- result_shape = merge_shapes(x.shape.without(x_dims), y.shape.without(y_dims)) # don't check group match ToDo the order might be incorrect here
return NativeTensor(result_native, result_shape)
diff --git a/phi/math/_shape.py b/phi/math/_shape.py
index 3d8a93923..52d162909 100644
--- a/phi/math/_shape.py
+++ b/phi/math/_shape.py
@@ -52,6 +52,9 @@ def __init__(self, sizes: tuple, names: tuple, types: tuple, item_names: tuple):
# assert isinstance(self.item_names, tuple)
# assert all([items is None or isinstance(items, tuple) for items in self.item_names])
# assert all([items is None or all([isinstance(n, str) for n in items]) for items in self.item_names])
+ # for size in sizes:
+ # if size is not None and not isinstance(size, int):
+ # assert size.rank > 0
def _to_dict(self, include_sizes=True):
result = dict(names=self.names, types=self.types, item_names=self.item_names)
@@ -758,7 +761,7 @@ def _size_and_item_names_from_obj(obj, prev_size, prev_item_names, keep_item_nam
if isinstance(obj, (tuple, list)):
return len(obj), tuple(obj)
elif isinstance(obj, Number):
- return obj, prev_item_names if keep_item_names and obj == prev_size else None
+ return obj, prev_item_names if keep_item_names and (prev_size is None or _size_equal(obj, prev_size)) else None
elif isinstance(obj, math.Tensor) or obj is None:
return obj, None
else:
@@ -955,6 +958,7 @@ def after_gather(self, selection: dict) -> 'Shape':
else:
from phi.math import Tensor
gathered_sizes = [(s[{sel_dim: selection}] if isinstance(s, Tensor) else s) for s in result.sizes]
+ gathered_sizes = [(int(s) if isinstance(s, Tensor) and s.rank == 0 else s) for s in gathered_sizes]
result = result.with_sizes(gathered_sizes, keep_item_names=True).without(sel_dim)
elif isinstance(selection, slice):
step = selection.step or 1
diff --git a/phi/math/_tensors.py b/phi/math/_tensors.py
index a77811280..23061e280 100644
--- a/phi/math/_tensors.py
+++ b/phi/math/_tensors.py
@@ -9,7 +9,7 @@
import numpy
import numpy as np
-from ._magic_ops import PhiTreeNodeType, variable_attributes, copy_with
+from ._magic_ops import PhiTreeNodeType, variable_attributes, copy_with, stack
from ._shape import (Shape,
CHANNEL_DIM, BATCH_DIM, SPATIAL_DIM, EMPTY_SHAPE,
parse_dim_order, shape_stack, merge_shapes, channel, concat_shapes,
@@ -85,7 +85,8 @@ def numpy(self, order: str or tuple or list or Shape = None) -> np.ndarray:
return choose_backend(native).numpy(native)
def __array__(self, dtype=None): # NumPy conversion
- warnings.warn("Automatic conversion of Φ-Flow tensors to NumPy can cause problems because the dimension order is not guaranteed.", SyntaxWarning, stacklevel=3)
+ if self.rank > 1:
+ warnings.warn("Automatic conversion of Φ-Flow tensors to NumPy can cause problems because the dimension order is not guaranteed.", SyntaxWarning, stacklevel=3)
return self.numpy(self._shape)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): # NumPy interface
@@ -663,13 +664,26 @@ def __iter__(self):
elif self.rank == 0:
return iter([self.native()])
else:
- from ._magic_ops import flatten
- return iter(flatten(self))
+ from ._ops import reshaped_native
+ native = reshaped_native(self, [self.shape])
+ return iter(native)
def _tensor(self, other):
if isinstance(other, Tensor):
return other
- return compatible_tensor(other, compat_shape=self.shape, compat_natives=self._natives(), convert=False)
+ elif isinstance(other, (tuple, list)) and any(isinstance(v, Tensor) for v in other):
+ if 'vector' in self.shape:
+ outer_dim = self.shape['vector']
+ elif self.shape.channel_rank == 1:
+ outer_dim = self.shape.channel
+ else:
+ raise ValueError(f"Cannot combine tensor of shape {self.shape} with tuple {tuple([type(v).__name__ for v in other])}")
+ remaining_shape = self.shape.without(outer_dim)
+ other_items = [v if isinstance(v, Tensor) else compatible_tensor(v, compat_shape=remaining_shape, compat_natives=self._natives(), convert=False) for v in other]
+ other_stacked = stack(other_items, outer_dim, expand_values=True)
+ return other_stacked
+ else:
+ return compatible_tensor(other, compat_shape=self.shape, compat_natives=self._natives(), convert=False)
def _op1(self, native_function):
"""
@@ -921,7 +935,9 @@ def __stack__(self, values: tuple, dim: Shape, **kwargs) -> 'Shapable':
def __concat__(self, values: tuple, dim: str, **kwargs) -> 'Shapable':
return NotImplemented
- def __flatten__(self, flat_dim: Shape):
+ def __flatten__(self, flat_dim: Shape, flatten_batch: bool):
+ if not flatten_batch and self._shape.batch:
+ raise NotImplementedError
return layout(self._as_list(), flat_dim)
def __expand__(self, dims: Shape, **kwargs) -> 'Tensor':
@@ -1014,7 +1030,7 @@ def _tensor_reduce(self,
result = native_function(choose_backend(self._as_list()), self._obj, 0)
return wrap(result)
if not self._shape.without(dims):
- return self.__flatten__(batch('_flat'))._tensor_reduce(('_flat',), dtype, native_function, collapsed_function, unaffected_function)
+ return self.__flatten__(batch('_flat'), flatten_batch=True)._tensor_reduce(('_flat',), dtype, native_function, collapsed_function, unaffected_function)
else:
raise NotImplementedError(f"Partial Layout reduction not yet supported. Shape={self._shape}, reduce={dims}")
# # --- inner reduce ---
@@ -1148,11 +1164,11 @@ def _op1(self, native_function):
def _op2(self, other, operator, native_function, op_name: str = 'unknown', op_symbol: str = '?'):
try:
- other = self._tensor(other)
+ other_tensor = self._tensor(other)
except NoBackendFound:
return NotImplemented
- if isinstance(other, NativeTensor):
- return op2_native(self, other, native_function)
+ if isinstance(other_tensor, NativeTensor) or (isinstance(other_tensor, Tensor) and not isinstance(other, Tensor)):
+ return op2_native(self, other_tensor, native_function)
else:
return NotImplemented
@@ -1320,6 +1336,8 @@ def _op2(self, other, operator, native_function, op_name: str = 'unknown', op_sy
else:
combined_shape = (self._shape & other_t._shape).with_sizes(inner.shape)
return CollapsedTensor(inner, combined_shape)
+ elif not isinstance(other, Tensor): # was converted to Tensor, probably TensorStack
+ return operator(self, other_t)
else:
return NotImplemented
@@ -1595,17 +1613,35 @@ def tensor(data: Tensor or Shape or tuple or list or numbers.Number,
`phi.math.wrap()` which uses `convert=False`, `layout()`.
Args:
- data: native tensor, scalar, sequence, Shape or Tensor
- shape: Ordered dimensions and types. If sizes are defined, they will be checked against `data`.`
- convert: If True, converts the data to the native format of the current default backend.
- If False, wraps the data in a `Tensor` but keeps the given data reference if possible.
+ data: native tensor, scalar, sequence, Shape or Tensor
+ shape: Ordered dimensions and types. If sizes are defined, they will be checked against `data`.`
+ convert: If True, converts the data to the native format of the current default backend.
+ If False, wraps the data in a `Tensor` but keeps the given data reference if possible.
Raises:
- AssertionError: if dimension names are not provided and cannot automatically be inferred
- ValueError: if `data` is not tensor-like
+ AssertionError: if dimension names are not provided and cannot automatically be inferred
+ ValueError: if `data` is not tensor-like
Returns:
- Tensor containing same values as data
+ Tensor containing same values as data
+
+ Examples:
+ ```python
+ tensor([1, 2, 3], channel(vector='x,y,z'))
+ # Out: (x=1, y=2, z=3)
+
+ tensor([1., 2, 3], channel(vector='x,y,z'))
+ # Out: (x=1.000, y=2.000, z=3.000) float64
+
+ tensor(numpy.zeros([10, 8, 6, 2]), batch('batch'), spatial('x,y'), channel(vector='x,y'))
+ # Out: (batchᵇ=10, xˢ=8, yˢ=6, vectorᶜ=x,y) float64 const 0.0
+
+ tensor([(0, 1), (0, 2), (1, 3)], instance('particles'), channel(vector='x,y'))
+ # Out: (x=0, y=1); (x=0, y=2); (x=1, y=3) (particlesⁱ=3, vectorᶜ=x,y)
+
+ tensor(numpy.random.randn(10))
+ # Out: (vectorᶜ=10) float64 -0.128 ± 1.197 (-2e+00...2e+00)
+ ```
"""
assert all(isinstance(s, Shape) for s in shape), f"Cannot create tensor because shape needs to be one or multiple Shape instances but got {shape}"
shape = None if len(shape) == 0 else concat_shapes(*shape)
@@ -1776,7 +1812,7 @@ def compatible_tensor(data, compat_shape: Shape = None, compat_natives=(), conve
warnings.warn(f"Combining a phi.math.Tensor with a {data_type} of same shape is not invariant under shape permutations. Please convert the {data_type} to a phi.math.Tensor first. Shapes: {shape} and {compat_shape}", SyntaxWarning, stacklevel=5)
return NativeTensor(data, compat_shape.with_sizes(shape))
else:
- raise ValueError(f"Cannot combine native tensor of shape {shape} with tensor of shape {compat_shape}")
+ raise ValueError(f"Cannot combine tensor of shape {shape} with tensor of shape {compat_shape}")
def broadcastable_native_tensors(*tensors):
diff --git a/phi/math/backend/_backend.py b/phi/math/backend/_backend.py
index 7546a3098..823c498a4 100644
--- a/phi/math/backend/_backend.py
+++ b/phi/math/backend/_backend.py
@@ -134,7 +134,7 @@ def complex_type(self) -> DType:
def combine_types(self, *dtypes: DType) -> DType:
return combine_types(*dtypes, fp_precision=self.precision)
- def auto_cast(self, *tensors, bool_to_int=False) -> list:
+ def auto_cast(self, *tensors, bool_to_int=False, int_to_float=False) -> list:
"""
Determins the appropriate values type resulting from operations involving the tensors as input.
@@ -152,6 +152,8 @@ def auto_cast(self, *tensors, bool_to_int=False) -> list:
result_type = self.combine_types(*dtypes)
if result_type.kind == bool and bool_to_int:
result_type = DType(int, 32)
+ if result_type.kind == int and int_to_float:
+ result_type = DType(float, self.precision)
if result_type.kind in (int, float, complex, bool): # do not cast everything to string!
tensors = [self.cast(t, result_type) for t in tensors]
return tensors
diff --git a/phi/math/extrapolation.py b/phi/math/extrapolation.py
index 652afeb18..09bdcddd1 100644
--- a/phi/math/extrapolation.py
+++ b/phi/math/extrapolation.py
@@ -10,7 +10,7 @@
from phi.math.backend._backend import get_spatial_derivative_order
from .backend import choose_backend
-from ._shape import Shape, channel, spatial
+from ._shape import Shape, channel, spatial, EMPTY_SHAPE, merge_shapes
from ._magic_ops import concat, stack
from ._tensors import Tensor, NativeTensor, CollapsedTensor, TensorStack, wrap
from . import _ops as math # TODO this executes _ops.py, can we avoid this?
@@ -196,6 +196,10 @@ def __init__(self, value: Tensor or float):
self.value = wrap(value)
""" Extrapolation value """
+ @property
+ def shape(self):
+ return self.value.shape
+
def __repr__(self):
return repr(self.value)
@@ -244,7 +248,11 @@ def pad(self, value: Tensor, widths: dict, **kwargs):
native = value._native
ordered_pad_widths = order_by_shape(value.shape, widths, default=(0, 0))
backend = choose_backend(native, pad_value.native())
- result_tensor = backend.pad(native, ordered_pad_widths, 'constant', pad_value.native())
+ for dim in pad_value.shape.non_batch.names:
+ assert dim in value.shape, f"Cannot pad tensor {value.shape} with extrapolation {pad_value.shape} because non-batch dimension '{dim}' is missing."
+ result_tensor = NotImplemented
+ if pad_value.rank == 0:
+ result_tensor = backend.pad(native, ordered_pad_widths, 'constant', pad_value.native())
if result_tensor is NotImplemented:
return Extrapolation.pad(self, value, widths, **kwargs)
return NativeTensor(result_tensor, value.shape.after_pad(widths))
@@ -263,8 +271,8 @@ def pad(self, value: Tensor, widths: dict, **kwargs):
elif isinstance(value, TensorStack):
if not value.requires_broadcast:
return self.pad(value._cache(), widths)
- inner_widths = {dim: w for dim, w in widths.items() if dim != value.stack_dim_name}
- tensors = [self.pad(t, inner_widths) for t in value.dimension(value.stack_dim.name)]
+ inner_widths = {dim: w for dim, w in widths.items() if dim != value.stack_dim.name}
+ tensors = [self[{value.stack_dim.name: i}].pad(t, inner_widths) for i, t in enumerate(value.dimension(value.stack_dim.name))]
return TensorStack(tensors, value.stack_dim)
else:
return Extrapolation.pad(self, value, widths, **kwargs)
@@ -364,6 +372,10 @@ def __neg__(self):
class _CopyExtrapolation(Extrapolation):
+ @property
+ def shape(self):
+ return EMPTY_SHAPE
+
def is_copy_pad(self, dim: str, upper_edge: bool):
return True
@@ -625,6 +637,11 @@ def transform_coordinates(self, coordinates: Tensor, shape: Shape, **kwargs) ->
class _NoExtrapolation(Extrapolation): # singleton
+
+ @property
+ def shape(self):
+ return EMPTY_SHAPE
+
def to_dict(self) -> dict:
return {'type': 'none'}
@@ -691,6 +708,10 @@ def __init__(self, derived_from: Extrapolation):
super().__init__(-1)
self.derived_from = derived_from
+ @property
+ def shape(self):
+ return EMPTY_SHAPE
+
def to_dict(self) -> dict:
return {'type': 'undefined', 'derived_from': self.derived_from.to_dict()}
@@ -806,6 +827,10 @@ def __init__(self, extrapolations: Dict[str, Tuple[Extrapolation, Extrapolation]
super().__init__(pad_rank=None)
self.ext = extrapolations
+ @property
+ def shape(self):
+ return merge_shapes(*sum(self.ext.values(), ()))
+
def to_dict(self) -> dict:
return {
'type': 'mixed',
@@ -945,6 +970,10 @@ def __init__(self, normal: Extrapolation, tangential: Extrapolation):
self.normal = normal
self.tangential = tangential
+ @property
+ def shape(self):
+ return merge_shapes(self.normal, self.tangential)
+
def to_dict(self) -> dict:
return {
'type': 'normal-tangential',
diff --git a/phi/math/magic.py b/phi/math/magic.py
index e2ec74637..a9fb23736 100644
--- a/phi/math/magic.py
+++ b/phi/math/magic.py
@@ -272,13 +272,15 @@ def __unpack_dim__(self, dim: str, unpacked_dims: Shape, **kwargs) -> 'Shapable'
"""
raise NotImplementedError
- def __flatten__(self, flat_dim: Shape, **kwargs):
+ def __flatten__(self, flat_dim: Shape, flatten_batch: bool, **kwargs) -> 'Shapable':
"""
Lays out all elements along a single dimension.
This is equivalent to packing all dimensions.
Args:
flat_dim: Single dimension as `Shape`.
+ flatten_batch: Whether to flatten batch dimensions as well.
+ If `False`, batch dimensions are kept, only onn-batch dimensions are flattened.
**kwargs: Additional keyword arguments required by specific implementations.
Adding spatial dimensions to fields requires the `bounds: Box` argument specifying the physical extent of the new dimensions.
Adding batch dimensions must always work without keyword arguments.
@@ -289,7 +291,6 @@ def __flatten__(self, flat_dim: Shape, **kwargs):
raise NotImplementedError
-
class _PhiTreeNodeType(type):
def __instancecheck__(self, instance):
diff --git a/phi/physics/diffuse.py b/phi/physics/diffuse.py
index 00fbb49b9..a7ddd04d7 100644
--- a/phi/physics/diffuse.py
+++ b/phi/physics/diffuse.py
@@ -15,9 +15,6 @@ def explicit(field: FieldType,
"""
Simulate a finite-time diffusion process of the form dF/dt = α · ΔF on a given `Field` FieldType with diffusion coefficient α.
- If `field` is periodic (set via `extrapolation='periodic'`), diffusion may be simulated in Fourier space.
- Otherwise, finite differencing is used to approximate the
-
Args:
field: CenteredGrid, StaggeredGrid or ConstantField
diffusivity: Diffusion per time. `diffusion_amount = diffusivity * dt`
diff --git a/phi/torch/_torch_backend.py b/phi/torch/_torch_backend.py
index d5f6e2dd9..de9148cd0 100644
--- a/phi/torch/_torch_backend.py
+++ b/phi/torch/_torch_backend.py
@@ -147,7 +147,7 @@ def multi_slice(self, tensor, slices: tuple):
seed = staticmethod(torch.manual_seed)
def einsum(self, equation, *tensors):
- tensors = self.auto_cast(*tensors)
+ tensors = self.auto_cast(*tensors, bool_to_int=True, int_to_float=True)
return torch.einsum(equation, *tensors)
def jit_compile(self, f: Callable) -> Callable:
diff --git a/phi/vis/_matplotlib/_matplotlib_plots.py b/phi/vis/_matplotlib/_matplotlib_plots.py
index ef3262550..9724be9da 100644
--- a/phi/vis/_matplotlib/_matplotlib_plots.py
+++ b/phi/vis/_matplotlib/_matplotlib_plots.py
@@ -248,7 +248,7 @@ def _plot(axis, data: SampledField, space: Box, show_color_bar, vmin, vmax, **pl
if data.points.shape.non_channel.rank > 1:
data_list = field.unstack(data, data.points.shape.non_channel[0].name)
for d in data_list:
- _plot(axis, d, show_color_bar, vmin, vmax, **plt_args)
+ _plot(axis, d, space, show_color_bar, vmin, vmax, **plt_args)
else:
x, y, z = math.reshaped_numpy(data.points.vector[dims], [vector, data.shape.non_channel])
color = [d.native() for d in data.color.points.unstack(len(x))]
diff --git a/tests/commit/field/test__grid.py b/tests/commit/field/test__grid.py
index 074e562ab..6865a8b79 100644
--- a/tests/commit/field/test__grid.py
+++ b/tests/commit/field/test__grid.py
@@ -138,3 +138,13 @@ def test_is_phi_tree_node(self):
self.assertTrue(issubclass(CenteredGrid, PhiTreeNode))
grid = CenteredGrid(0, x=4)
self.assertTrue(isinstance(grid, PhiTreeNode))
+
+ def test_reshape_centered_grid(self):
+ grid = math.expand(CenteredGrid(1, x=10, y=10), batch(b=100))
+ grid = math.rename_dims(grid, 'b', 'bat')
+ self.assertEqual(batch(bat=100) & spatial(x=10, y=10), grid.shape)
+
+ def test_reshape_staggered_grid(self):
+ grid = math.expand(StaggeredGrid(1, x=10, y=10), batch(b=100))
+ grid = math.rename_dims(grid, 'b', 'bat')
+ self.assertEqual(batch(bat=100) & spatial(x=10, y=10) & channel(vector='x,y'), grid.shape)
diff --git a/tests/commit/field/test__point_cloud.py b/tests/commit/field/test__point_cloud.py
new file mode 100644
index 000000000..f9fb889dc
--- /dev/null
+++ b/tests/commit/field/test__point_cloud.py
@@ -0,0 +1,14 @@
+from unittest import TestCase
+
+from phi.field import PointCloud
+from phi.geom import Sphere
+from phi.math import batch, stack, instance, expand, rename_dims, shape, vec
+
+
+class GridTest(TestCase):
+
+ def test_reshape(self):
+ c = PointCloud(Sphere(stack([vec(x=0, y=1)] * 50, instance('points')), radius=.1))
+ c = expand(c, batch(b=2))
+ c = rename_dims(c, 'points', 'particles')
+ assert batch(b=2) & instance(particles=50) == shape(c)
diff --git a/tests/commit/geom/test__sphere.py b/tests/commit/geom/test__sphere.py
index b6d6cfd4f..02339a340 100644
--- a/tests/commit/geom/test__sphere.py
+++ b/tests/commit/geom/test__sphere.py
@@ -1,9 +1,8 @@
from unittest import TestCase
-
from phi import math
-from phi.geom import Box, union, Cuboid, embed, Sphere
-from phi.math import batch, channel
+from phi.geom import union, Sphere
+from phi.math import stack, vec, instance, expand, rename_dims, unpack_dim, pack_dims, spatial, flatten, batch, channel
from phi.math.magic import Shaped, Sliceable, Shapable
@@ -43,62 +42,26 @@ def test_project(self):
self.assertEqual(sphere, sphere.vector['x,y'])
self.assertEqual(Sphere(x=4, radius=1), sphere['x'])
- # def test_box_constructor(self):
- # try:
- # Box(0, (1, 1))
- # raise RuntimeError
- # except AssertionError:
- # pass
- # math.assert_close(Box(x=1, y=1).size, 1)
- #
- # def test_box_batched(self):
- # lower = math.tensor([(0, 0), (1, 1)], batch('boxes'), channel(vector='x,y'))
- # upper = math.wrap((1, 1), channel(vector='x,y'))
- # box = Box(lower, upper)
- # self.assertEqual(batch(boxes=2) & channel(vector='x,y'), box.shape)
- #
- # def test_slice(self):
- # b1, b2 = Box(x=4, y=3), Box(x=2, y=1)
- # u = union(b1, b2)
- # self.assertEqual(b1, u.union[0])
- # self.assertEqual(b2, u.union[1])
- #
- # def test_without(self):
- # box = Box(x=4, y=3)
- # self.assertEqual(Box(x=4), box.without(('y',)))
- # self.assertEqual(Box(), box.without(('x', 'y')))
- # self.assertEqual(box, box.without(()))
- #
- # def test_embed(self):
- # self.assertEqual(Box(x=4, y=3, z=None), embed(Box(x=4, y=3), 'z'))
- # self.assertEqual(Box(x=4, y=3, z=None), embed(Box(x=4, y=3), 'x,z'))
- #
- # def test_box_product(self):
- # a = Box(x=4)
- # b = Box(y=3).shifted(math.wrap(1))
- # ab = a * b
- # self.assertEqual(2, ab.spatial_rank)
- # math.assert_close(ab.size, (4, 3))
- # math.assert_close(ab.lower, (0, 1))
- #
- # def test_union_same(self):
- # u = union(Box(x=1, y=1), Box(x=(2, 3), y=1))
- # self.assertIsInstance(u, Box)
- # math.assert_close(u.approximate_signed_distance((0, 0)), u.approximate_signed_distance((3, 1)), 0)
- # math.assert_close(u.approximate_signed_distance((1.5, 0)), 0.5)
- #
- # def test_stack_volume(self):
- # u = math.stack([Box(x=1, y=1), Box(x=2, y=2)], batch('batch'))
- # math.assert_close(u.volume, [1, 4])
- #
- # def test_shape_type(self):
- # box = Box(x=1, y=2)
- # self.assertEqual(box.rotated(0.1).shape_type, 'rotB')
- #
- # def test_box_eq(self):
- # self.assertNotEqual(Box(x=1, y=1), Box(x=1))
- # self.assertEqual(Box(x=1, y=1), Box(x=1, y=1))
- #
- # def test_cuboid_constructor_kwargs(self):
- # c = Cuboid(x=2., y=1.)
- # math.assert_close(c.lower, -c.upper, (-1, -.5))
+ def test_reshaping(self):
+ s = stack([Sphere(vec(x=0, y=0), radius=1)] * 50, instance('points'))
+ s = expand(s, batch(b=100))
+ s = rename_dims(s, 'b', 'bat')
+ s = unpack_dim(s, 'points', spatial(x=10, y=5))
+ assert batch(bat=100) & spatial(x=10, y=5) & channel(vector='x,y') == s.shape
+ s = pack_dims(s, 'x,y', instance('particles'))
+ assert batch(bat=100) & instance(particles=50) & channel(vector='x,y') == s.shape
+ s = flatten(s)
+ assert batch(bat=100) & instance(flat=50) & channel(vector='x,y') == s.shape
+
+ def test_reshaping_const_radius(self):
+ s = Sphere(stack([vec(x=0, y=0)] * 50, instance('points')), radius=1)
+ s = expand(s, batch(b=100))
+ s = rename_dims(s, 'b', 'bat')
+ s = unpack_dim(s, 'points', spatial(x=10, y=5))
+ assert not s.radius.shape
+ assert batch(bat=100) & spatial(x=10, y=5) & channel(vector='x,y') == s.shape
+ s = pack_dims(s, 'x,y', instance('particles'))
+ assert not s.radius.shape
+ assert batch(bat=100) & instance(particles=50) & channel(vector='x,y') == s.shape
+ s = flatten(s)
+ assert batch(bat=100) & instance(flat=50) & channel(vector='x,y') == s.shape
diff --git a/tests/commit/geom/test_geom.py b/tests/commit/geom/test_geom.py
index c3f4695c1..97558dcd1 100644
--- a/tests/commit/geom/test_geom.py
+++ b/tests/commit/geom/test_geom.py
@@ -1,6 +1,7 @@
from unittest import TestCase
from phi import math, geom
+from phi.math import stack, vec, instance, expand, rename_dims, unpack_dim, pack_dims, spatial, flatten, batch, channel
from phi.geom import Box, Sphere
@@ -24,3 +25,14 @@ def test_infinite_cylinder(self):
corner_distance = math.sqrt(2) / 2 - .5
distance = math.wrap([corner_distance, 0, corner_distance, corner_distance, 0], math.instance('points'))
math.assert_close(cylinder.approximate_signed_distance(loc), distance)
+
+ def test_point_reshaping(self):
+ s = stack([geom.Point(vec(x=0, y=0))] * 50, instance('points'))
+ s = expand(s, batch(b=100))
+ s = rename_dims(s, 'b', 'bat')
+ s = unpack_dim(s, 'points', spatial(x=10, y=5))
+ assert batch(bat=100) & spatial(x=10, y=5) & channel(vector='x,y') == s.shape
+ s = pack_dims(s, 'x,y', instance('particles'))
+ assert batch(bat=100) & instance(particles=50) & channel(vector='x,y') == s.shape
+ s = flatten(s)
+ assert batch(bat=100) & instance(flat=50) & channel(vector='x,y') == s.shape
\ No newline at end of file
diff --git a/tests/commit/math/test__functional.py b/tests/commit/math/test__functional.py
index 193b90b68..9e68170ef 100644
--- a/tests/commit/math/test__functional.py
+++ b/tests/commit/math/test__functional.py
@@ -121,7 +121,7 @@ def f(x):
return x.x[:2]
def grad(_inputs, _y, df):
- return {'x': math.flatten(math.expand(df * 0, batch(tmp=2)))}
+ return {'x': math.flatten(math.expand(df * 0, batch(tmp=2)), flatten_batch=True)}
def loss(x):
fg = math.custom_gradient(f, grad)
diff --git a/tests/commit/math/test__magic_ops.py b/tests/commit/math/test__magic_ops.py
index c01863ff5..1e1e93a4c 100644
--- a/tests/commit/math/test__magic_ops.py
+++ b/tests/commit/math/test__magic_ops.py
@@ -2,7 +2,7 @@
from unittest import TestCase
from phi.math import batch, unstack, Shape, merge_shapes, stack, concat, expand, spatial, shape, instance, rename_dims, \
- pack_dims, random_normal, flatten, unpack_dim, EMPTY_SHAPE, Tensor, Dict, channel
+ pack_dims, random_normal, flatten, unpack_dim, EMPTY_SHAPE, Tensor, Dict, channel, linspace
from phi.math.magic import BoundDim, Shaped, Sliceable, Shapable, PhiTreeNode, slicing_dict
@@ -111,11 +111,14 @@ def test_unstack(self):
def test_stack(self):
for test_class in TEST_CLASSES:
- test_class = TEST_CLASSES[1]
a = test_class(spatial(x=5))
- # self.assertEqual(spatial(x=5) & batch(b=2), stack([a, a], batch('b')).shape)
+ self.assertEqual(spatial(x=5) & batch(b=2), stack([a, a], batch('b')).shape)
self.assertEqual(spatial(x=5) & batch(b='a1,a2'), stack({'a1': a, 'a2': a}, batch('b')).shape)
+ def test_stack_expand(self):
+ v = stack([0, linspace(0, 1, instance(points=10))], channel(vector='x,y'), expand_values=True)
+ self.assertEqual(instance(points=10) & channel(vector='x,y'), v.shape)
+
def test_multi_dim_stack(self):
for test_class in TEST_CLASSES:
a = test_class(spatial(x=5))
@@ -154,7 +157,8 @@ def test_unpack_dim(self):
def test_flatten(self):
for test_class in TEST_CLASSES:
a = test_class(spatial(x=5) & batch(b=2))
- self.assertEqual(instance(points=10), flatten(a, instance('points')).shape)
+ self.assertEqual(instance(points=10), flatten(a, instance('points'), flatten_batch=True).shape)
+ self.assertEqual(batch(b=2) & instance(points=5), flatten(a, instance('points'), flatten_batch=False).shape)
def test_bound_dim(self):
for test_class in TEST_CLASSES:
diff --git a/tests/commit/math/test__nd.py b/tests/commit/math/test__nd.py
index 2c80e2a97..21065551e 100644
--- a/tests/commit/math/test__nd.py
+++ b/tests/commit/math/test__nd.py
@@ -255,3 +255,7 @@ def test_vector_length(self):
def test_dim_mask(self):
math.assert_close((1, 0, 0), math.dim_mask(spatial('x,y,z'), 'x'))
math.assert_close((1, 0, 1), math.dim_mask(spatial('x,y,z'), 'x,z'))
+
+ def test_vec_expand(self):
+ v = math.vec(x=0, y=math.linspace(0, 1, instance(points=10)))
+ self.assertEqual(instance(points=10) & channel(vector='x,y'), v.shape)
diff --git a/tests/commit/math/test__tensors.py b/tests/commit/math/test__tensors.py
index 0533a375e..d4f73a7d5 100644
--- a/tests/commit/math/test__tensors.py
+++ b/tests/commit/math/test__tensors.py
@@ -605,3 +605,15 @@ def test_numpy_function_interface(self):
self.assertFalse(np.equal(x, y))
self.assertTrue(x != y)
self.assertTrue(np.not_equal(x, y))
+
+ def test_broadcast_vector_tuple(self):
+ t = vec('something', x=0, y=1)
+ i = math.linspace(0, 1, instance(particles=10))
+ result = t + (0, i)
+ self.assertEqual(instance(particles=10) & channel(something='x,y'), result.shape)
+
+ def test_broadcast_matrix_tuple(self):
+ i = math.linspace(0, 1, instance(particles=10))
+ t = math.expand(vec(x=0, y=1), channel(c=2))
+ result = t + (0, i)
+ self.assertEqual(instance(particles=10) & channel(vector='x,y', c=2), result.shape)
diff --git a/tests/commit/math/test_extrapolation.py b/tests/commit/math/test_extrapolation.py
index 5852f3e29..a8f4e27d9 100644
--- a/tests/commit/math/test_extrapolation.py
+++ b/tests/commit/math/test_extrapolation.py
@@ -1,7 +1,7 @@
from unittest import TestCase
import phi
-from phi.math import NUMPY, spatial, batch, extrapolation
+from phi.math import NUMPY, spatial, batch, extrapolation, shape
from phi.math.extrapolation import *
from phi import math
@@ -207,3 +207,14 @@ def test_slice_normal_tangential(self):
ext = combine_sides(x=(INFLOW_LEFT, BOUNDARY), y=0)
self.assertEqual(combine_sides(x=(1, BOUNDARY), y=0), ext[{'vector': 'x'}])
self.assertEqual(combine_sides(x=(0, BOUNDARY), y=0), ext[{'vector': 'y'}])
+
+ def test_shapes(self):
+ self.assertEqual(EMPTY_SHAPE, ONE.shape)
+ self.assertEqual(EMPTY_SHAPE, PERIODIC.shape)
+ self.assertEqual(EMPTY_SHAPE, BOUNDARY.shape)
+ self.assertEqual(EMPTY_SHAPE, SYMMETRIC.shape)
+ self.assertEqual(EMPTY_SHAPE, REFLECT.shape)
+ v = math.vec(x=1, y=0)
+ self.assertEqual(v.shape, shape(ZERO + v))
+ self.assertEqual(v.shape, shape(combine_sides(x=v, y=0)))
+ self.assertEqual(v.shape, shape(combine_by_direction(normal=v, tangential=0)))