Skip to content

Commit

Permalink
Merge branch 'f/rewrite_subsystem' into 'main'
Browse files Browse the repository at this point in the history
Target Hardware Decription Proposal

See merge request es/ai/hannah/hannah!365
  • Loading branch information
cgerum committed Nov 19, 2024
2 parents f56a4a3 + 9e0b060 commit ad3d534
Show file tree
Hide file tree
Showing 113 changed files with 2,464 additions and 4,454 deletions.
1 change: 1 addition & 0 deletions .gitlab-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ variables:
PIP_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pip"
POETRY_HOME: "$CI_PROJECT_DIR/.poetry"
POETRY_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pypoetry"
POETRY_VIRTUALENVS_CREATE: false
GIT_SUBMODULE_STRATEGY: recursive
DEBIAN_FRONTEND: "noninteractive"

Expand Down
4 changes: 2 additions & 2 deletions experiments/rhode_island/model/localization_net.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,10 @@ qconfig:
_target_: hannah.models.factory.qconfig.get_trax_qat_qconfig
config:
bw_b: 8
bw_w: 6
bw_w: 4
bw_f: 8
power_of_2: false # Use power of two quantization for weights
noise_prob: 0.7 # Probability of quantizing a value during training
noise_prob: 0.9 # Probability of quantizing a value during training
conv:
- target: forward
stride: 1
Expand Down
12 changes: 10 additions & 2 deletions hannah/__init__.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
#
# Copyright (c) 2022 University of Tübingen.
# Copyright (c) 2024 Hannah contributors.
#
# This file is part of hannah.
# See https://atreus.informatik.uni-tuebingen.de/ties/ai/hannah/hannah for further info.
# See https://github.com/ekut-es/hannah for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand All @@ -16,3 +16,11 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#


try:
from beartype.claw import beartype_this_package

beartype_this_package()
except ImportError:
pass # beartype is not installed in production environment
23 changes: 15 additions & 8 deletions hannah/callbacks/optimization.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
#
# Copyright (c) 2022 University of Tübingen.
# Copyright (c) 2024 Hannah contributors.
#
# This file is part of hannah.
# See https://atreus.informatik.uni-tuebingen.de/ties/ai/hannah/hannah for further info.
# See https://github.com/ekut-es/hannah for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
Expand All @@ -20,6 +20,7 @@
from collections import defaultdict
from typing import Any, Iterable, List, Mapping, Union

import pytorch_lightning as pl
from pytorch_lightning.callbacks import Callback
from torch import Tensor

Expand Down Expand Up @@ -94,8 +95,15 @@ def _add_monitor_mapping(self, monitor):
else:
self.directions.append(-1.0)

def on_train_batch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", outputs: 'STEP_OUTPUT', batch: Any, batch_idx: int) -> None: # noqa: F821
callback_metrics = trainer.callback_metrics
def on_train_batch_end(
self,
trainer: "pl.Trainer",
pl_module: "pl.LightningModule",
outputs: Any,
batch: Any,
batch_idx: int,
) -> None:
callback_metrics = trainer.callback_metrics

for k, v in callback_metrics.items():
if k.startswith("train"):
Expand All @@ -112,7 +120,7 @@ def on_train_batch_end(self, trainer: "pl.Trainer", pl_module: "pl.LightningModu
monitor_val = callback_metrics[monitor] * direction
if monitor.startswith("train"):
self._curves[monitor][trainer.global_step] = monitor_val

self.values[monitor] = monitor_val

def on_test_end(self, trainer, pl_module):
Expand Down Expand Up @@ -156,7 +164,7 @@ def on_validation_end(self, trainer, pl_module):
# Skip evaluation of validation metrics during sanity check
if trainer.sanity_checking:
return

callback_metrics = trainer.callback_metrics

for k, v in callback_metrics.items():
Expand All @@ -168,7 +176,7 @@ def on_validation_end(self, trainer, pl_module):
try:
monitor_val = float(callback_metrics[monitor])
directed_monitor_val = monitor_val * direction

self.values[monitor] = directed_monitor_val
self._curves[monitor][trainer.global_step] = directed_monitor_val
except Exception:
Expand Down Expand Up @@ -230,4 +238,3 @@ def curves(self, dict=False):
return list(return_values.values())[0]

return return_values

40 changes: 0 additions & 40 deletions hannah/conf/config_dd_direct_angle.yaml

This file was deleted.

40 changes: 0 additions & 40 deletions hannah/conf/config_dd_direct_angle_phase.yaml

This file was deleted.

24 changes: 24 additions & 0 deletions hannah/conf/dataset/sensor/naneye-raw.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
##
## Copyright (c) 2022 University of Tübingen.
##
## This file is part of hannah.
## See https://atreus.informatik.uni-tuebingen.de/ties/ai/hannah/hannah for further info.
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##

# Very approximative simulation of a raw sensor image naneya-m with some noise model

name: naneye-m
resolution: [320,320]
pattern: GRBG
1 change: 1 addition & 0 deletions hannah/conf/dataset/sensor/naneye.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -18,3 +18,4 @@
##
name: naneye-m
resolution: [320,320]
pattern: [1,1]
24 changes: 12 additions & 12 deletions hannah/conf/model/conv-net-2d.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,17 +26,17 @@ qconfig:
_target_: hannah.models.factory.qconfig.get_trax_qat_qconfig
config:
bw_b: 8
bw_w: 6
bw_w: 8
bw_f: 8
power_of_2: false # Use power of two quantization for weights
noise_prob: 0.7 # Probability of quantizing a value during training
noise_prob: 0.5 # Probability of quantizing a value during training
conv:
- target: forward
stride: 1
stride: 2
blocks:
- target: conv2d
kernel_size: 3
act: false
act: true
norm: true
out_channels: 16
- target: residual
Expand All @@ -46,43 +46,43 @@ conv:
kernel_size: 3
act: true
norm: true
out_channels: 24
out_channels: 32
- target: conv2d
kernel_size: 1
parallel: true
out_channels: 24
out_channels: 32
- target: conv2d
kernel_size: 3
act: true
norm: true
out_channels: 24
out_channels: 32
- target: residual
stride: 2
blocks:
- target: conv2d
kernel_size: 3
act: true
norm: true
out_channels: 32
out_channels: 64
- target: conv2d
kernel_size: 3
act: true
norm: true
out_channels: 32
out_channels: 64
- target: residual
stride: 2
blocks:
- target: conv2d
kernel_size: 3
act: true
norm: true
out_channels: 48
out_channels: 128
- target: conv2d
kernel_size: 1
parallel: true
out_channels: 48
out_channels: 128
- target: conv2d
kernel_size: 3
act: true
norm: true
out_channels: 48
out_channels: 128
10 changes: 5 additions & 5 deletions hannah/conf/nas_new.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,8 @@ defaults:
- override normalizer: fixedpoint
- override module: stream_classifier
- override checkpoint: default
- override backend: trax_ut
- override nas: aging_evolution_nas
#- override backend: trax_ut
- override nas: aging_evolution_nas_legacy
- _self_

experiment_id: test
Expand All @@ -39,9 +39,9 @@ trainer:

nas:
parametrization:
backend:
cols: [2,4,6,8,16]
rows: null
#backend:
# cols: [2,4,6,8,16]
# rows: null
model:
qconfig:
config:
Expand Down
1 change: 1 addition & 0 deletions hannah/conf/trainer/default.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -34,3 +34,4 @@ plugins: null
strategy: auto
reload_dataloaders_every_n_epochs: 0
precision: 32
enable_model_summary: False
2 changes: 0 additions & 2 deletions hannah/datasets/speech.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,6 @@ def snr_factor(snr, psig, pnoise):

def _load_audio(file_name, sr=16000, backend="torchaudio"):
if backend == "torchaudio":
torchaudio.set_audio_backend("sox_io")
try:
data, samplingrate = torchaudio.load(file_name)
except Exception as e:
Expand All @@ -60,7 +59,6 @@ def _load_audio(file_name, sr=16000, backend="torchaudio"):
msglogger.warning(
"Could not load %s with default backend trying sndfile", str(file_name)
)
torchaudio.set_audio_backend("soundfile")
data, samplingrate = torchaudio.load(file_name)
if samplingrate != sr:
data = torchaudio.transforms.Resample(samplingrate, sr).forward(data)
Expand Down
4 changes: 2 additions & 2 deletions hannah/models/ai8x/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,8 +115,8 @@ def block(
weight1_quantized = quantize_weight(weight1)
bias1 = Tensor(
"b1",
(Int(channels)),
axis=["O", "I", "kH", "kW"],
(Int(channels),),
axis=["C"],
grad=True,
)
bias1_quantized = quantize_weight(bias1)
Expand Down
2 changes: 1 addition & 1 deletion hannah/models/ai8x/models_simplified.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ def block(input, channels: int, kernel_size: int):
weight_quantized = quantize_weight(weight)
bias = Tensor(
"b1",
(Int(channels)),
(Int(channels),),
axis=["O", "I", "kH", "kW"],
grad=True,
)
Expand Down
Loading

0 comments on commit ad3d534

Please sign in to comment.