Skip to content

Commit

Permalink
general cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
milanofthe committed Oct 25, 2024
1 parent e110a39 commit a3a4639
Show file tree
Hide file tree
Showing 4 changed files with 63 additions and 55 deletions.
2 changes: 1 addition & 1 deletion examples/example_diff.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@

time, [_, res] = Sco.read()

fig, ax = plt.subplots(nrows=1, tight_layout=True, dpi=120)
fig, ax = plt.subplots(nrows=1, figsize=(8, 4), tight_layout=True, dpi=120)

ax.plot(time, list(map(lambda x:x.d(a), res)), label="$dx/da$")
ax.plot(time, list(map(lambda x:x.d(b), res)), label="$dx/db$")
Expand Down
5 changes: 3 additions & 2 deletions examples/example_duffing.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,17 +29,18 @@
# DUFFING OSCILLATOR ====================================================================

#simulation timestep
dt = 0.1
dt = 0.05

#initial position and velocity
x0, v0 = 0.0, 0.0

#driving angular frequency and amplitude
a, omega = 100.0, 2.0
a, omega = 10.0, 2.0

#parameters (mass, damping, linear stiffness, nonlienar stiffness)
m, c, k, d = 1.0, 0.5, 1.0, 1.4


#blocks that define the system
I1 = Integrator(v0) # integrator for velocity
I2 = Integrator(x0) # integrator for position
Expand Down
10 changes: 5 additions & 5 deletions examples/example_pendulum.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,14 +31,14 @@
phi0, omega0 = 0.99*np.pi, 0

#parameters (gravity, length)
g, l = 9.81, 1
g, l = 9.81, 1

#blocks that define the system
In1 = Integrator(omega0)
In2 = Integrator(phi0)
Amp = Amplifier(-g/l)
Fnc = Function(np.sin)
Sco = Scope(labels=["angle", "angular velocity"])
Sco = Scope(labels=["angular velocity", "angle"])

blocks = [In1, In2, Amp, Fnc, Sco]

Expand All @@ -51,12 +51,12 @@
]

# Create a simulation instance from the blocks and connections
Sim = Simulation(blocks, connections, dt=dt, log=True, Solver=RKCK54)
Sim = Simulation(blocks, connections, dt=dt, log=True, Solver=RKCK54, tolerance_lte_rel=1e-6)

# Run the simulation for 25 seconds
Sim.run(duration=20)
Sim.run(duration=25)

# Plot the results directly from the scope
Sco.plot()

plt.show()
plt.show()
101 changes: 54 additions & 47 deletions pathsim/diff/value.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,41 +19,41 @@
# Trigonometric functions
np.sin : lambda x: np.cos(x),
np.cos : lambda x: -np.sin(x),
np.tan : lambda x: 1 / np.cos(x)**2,
np.arcsin : lambda x: 1 / np.sqrt(1 - x**2),
np.arccos : lambda x: -1 / np.sqrt(1 - x**2),
np.arctan : lambda x: 1 / (1 + x**2),
np.tan : lambda x: 1/np.cos(x)**2,
np.arcsin : lambda x: 1/np.sqrt(1 - x**2),
np.arccos : lambda x: -1/np.sqrt(1 - x**2),
np.arctan : lambda x: 1/(1 + x**2),
np.sinh : lambda x: np.cosh(x),
np.cosh : lambda x: np.sinh(x),
np.tanh : lambda x: 1 - np.tanh(x)**2,
np.arcsinh : lambda x: 1 / np.sqrt(x**2 + 1),
np.arccosh : lambda x: 1 / (np.sqrt(x - 1) * np.sqrt(x + 1)),
np.arctanh : lambda x: 1 / (1 - x**2),
np.arcsinh : lambda x: 1/np.sqrt(x**2 + 1),
np.arccosh : lambda x: 1/(np.sqrt(x - 1)*np.sqrt(x + 1)),
np.arctanh : lambda x: 1/(1 - x**2),

# Exponential and logarithmic functions
np.exp : lambda x: np.exp(x),
np.exp2 : lambda x: np.exp2(x) * np.log(2),
np.exp2 : lambda x: np.exp2(x)*np.log(2),
np.log : lambda x: 1/x,
np.log2 : lambda x: 1 / (x * np.log(2)),
np.log10 : lambda x: 1 / (x * np.log(10)),
np.log1p : lambda x: 1 / (1 + x),
np.log2 : lambda x: 1/(x*np.log(2)),
np.log10 : lambda x: 1/(x*np.log(10)),
np.log1p : lambda x: 1/(1 + x),
np.expm1 : lambda x: np.exp(x),

# Power functions
np.sqrt : lambda x: 0.5 / np.sqrt(x),
np.sqrt : lambda x: 0.5/np.sqrt(x),
np.square : lambda x: 2*x,
np.power : lambda x, p: p * np.power(x, p-1),
np.cbrt : lambda x: 1 / (3 * np.cbrt(x)**2),
np.power : lambda x, p: p*np.power(x, p-1),
np.cbrt : lambda x: 1/(3*np.cbrt(x)**2),

# Complex functions
np.real : lambda x: np.real(x),
np.imag : lambda x: np.imag(x),
np.conj : lambda x: np.conj(x),
np.abs : lambda x: x / np.abs(x),
np.angle : lambda x: -1j / x,
np.abs : lambda x: x/np.abs(x),
np.angle : lambda x: -1j/x,

# Statistical functions
np.sign : lambda x: 0,
np.sign : lambda x: 0.0,
}


Expand Down Expand Up @@ -92,14 +92,11 @@ class Value:
the partial derivatives with respect to the instance itself and other instances
of the value class.
This is realized by a dictionary that handles the reference tracking via the unique
identifiers of the 'Value' instances.
Includes a slim interface to numpy functions using '__array_ufunc__' and fallback
using numerical gradients (central differences).
This is realized by a dictionary that handles the reference tracking via the id
of the 'Value' instances.
INPUTS :
val : (float, int, complex) The numerical value.
val : (float, int, complex) The numerical value.
grad : (dict, optional) The gradient dictionary. If None, initializes with self derivative.
"""

Expand Down Expand Up @@ -210,28 +207,6 @@ def __abs__(self):

# arithmetic operators ----------------------------------------------------------------------

def __truediv__(self, other):
if isinstance(other, Value):
new_grad = {k: (self.grad.get(k, 0) * other.val - self.val * other.grad.get(k, 0))
/ (other.val ** 2)
if other.val != 0.0 else 0.0 for k in set(self.grad) | set(other.grad)}
return Value(val=self.val / other.val, grad=new_grad)
if isinstance(other, np.ndarray):
return np.array([self / x for x in other])
else:
return Value(val=self.val / other,
grad={k: v / other for k, v in self.grad.items()})


def __rtruediv__(self, other):
if isinstance(other, Value):
return other / self
else:
return Value(val=other / self.val,
grad={k: -other * v / (self.val ** 2) if self.val != 0.0 else 0.0
for k, v in self.grad.items()})


def __add__(self, other):
if isinstance(other, Value):
new_grad = {k: self.grad.get(k, 0) + other.grad.get(k, 0)
Expand Down Expand Up @@ -267,6 +242,10 @@ def __rmul__(self, other):
return self * other


def __imul__(self, other):
return self * other


def __sub__(self, other):
if isinstance(other, Value):
new_grad = {k: self.grad.get(k, 0) - other.grad.get(k, 0)
Expand All @@ -286,6 +265,28 @@ def __isub__(self, other):
return self - other


def __truediv__(self, other):
if isinstance(other, Value):
new_grad = {k: (self.grad.get(k, 0) * other.val - self.val * other.grad.get(k, 0))
/ (other.val ** 2)
if other.val != 0.0 else 0.0 for k in set(self.grad) | set(other.grad)}
return Value(val=self.val / other.val, grad=new_grad)
if isinstance(other, np.ndarray):
return np.array([self / x for x in other])
else:
return Value(val=self.val / other,
grad={k: v / other for k, v in self.grad.items()})


def __rtruediv__(self, other):
if isinstance(other, Value):
return other / self
else:
return Value(val=other / self.val,
grad={k: -other * v / (self.val ** 2) if self.val != 0.0 else 0.0
for k, v in self.grad.items()})


def __pow__(self, power):
if isinstance(power, Value):
new_val = self.val ** power.val
Expand Down Expand Up @@ -326,7 +327,7 @@ def shuffle(self):

x, y = Value(0.6), Value(3)

z = np.arctan(y*x) / np.exp(1/x)
z = np.arctan(y*x) + np.exp(1/x)
print(z.d(x), z.d(y))

A = np.array([x, Value(3), Value(0.5)])
Expand All @@ -343,4 +344,10 @@ def shuffle(self):
B = y * A**2
print(B)

# print(B[0].d(x))
b = np.linalg.norm(B)
print(b.d(x), b.d(y))

d = np.sign(np.sin(2*np.pi*x))

print(np.sign(d))

0 comments on commit a3a4639

Please sign in to comment.