Skip to content

Commit

Permalink
DOCS: Enabled all pydocstyle (numpy) ruff rules with a list of specif…
Browse files Browse the repository at this point in the history
…ic exceptions. (#5636)

* Enabled all ruff rules with a list of specific exceptions.

* Removed trailing line

* changed text

* added another ignore

* [pre-commit.ci] auto fixes from pre-commit.com hooks

for more information, see https://pre-commit.ci

* fix for doctest ci

* amended sentence structure

* fixed note issue.

* fix doctests

---------

Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com>
Co-authored-by: Bill Little <[email protected]>
  • Loading branch information
3 people authored Dec 15, 2023
1 parent 90b3c7d commit d0ee9c2
Show file tree
Hide file tree
Showing 362 changed files with 1,586 additions and 3,268 deletions.
30 changes: 26 additions & 4 deletions .ruff.toml
Original file line number Diff line number Diff line change
Expand Up @@ -23,14 +23,37 @@ lint.ignore = [

# pydocstyle (D)
# https://docs.astral.sh/ruff/rules/#pydocstyle-d
"D",
# Permanent
# (D-1) Permanent
"D105", # Missing docstring in magic method
# Temporary, to be removed when we are more compliant
# (D-2) Temporary, to be removed when we are more compliant. Rare cases mmove to (1).
"D417", # Missing argument descriptions in the docstring
"D101", # Missing docstring in public class
"D102", # Missing docstring in public method
"D104", # Missing docstring in public package
"D106", # Missing docstring in public nested class
# (D-3) Temporary, before an initial review, either fix ocurrenvces or move to (2).
"D100", # Missing docstring in public module
"D103", # Missing docstring in public function
"D200", # One-line docstring should fit on one line
"D202", # No blank lines allowed after function docstring
"D205", # 1 blank line required between summary line and description
"D208", # Docstring is over-indented
"D209", # Multi-line docstring closing quotes should be on a separate line
"D211", # No blank lines allowed before class docstring
"D214", # Section is over-indented
"D300", # triple double quotes `""" / Use triple single quotes `'''`
"D301", # Use `r"""` if any backslashes in a docstring
"D400", # First line should end with a period
"D401", # First line of docstring should be in imperative mood: ...
"D403", # First word of the first line should be capitalized
"D404", # First word of the docstring should not be "This"
"D405", # Section name should be properly capitalized
"D406", # Section name should end with a newline
"D407", # Missing dashed underline after section
"D409", # Section underline should match the length of its name
"D410", # Missing blank line after section
"D411", # Missing blank line before section
"D412", # No blank lines allowed between a section header and its content

# pyupgrade (UP)
# https://docs.astral.sh/ruff/rules/#pyupgrade-up
Expand Down Expand Up @@ -240,4 +263,3 @@ lint.ignore = [
# https://docs.astral.sh/ruff/rules/#ruff-specific-rules-ruf
"RUF",
]

9 changes: 3 additions & 6 deletions benchmarks/asv_delegated_conda.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,7 @@
#
# This file is part of Iris and is released under the BSD license.
# See LICENSE in the root of the repository for full licensing details.
"""
ASV plug-in providing an alternative :class:`asv.plugins.conda.Conda`
"""ASV plug-in providing an alternative :class:`asv.plugins.conda.Conda`
subclass that manages the Conda environment via custom user scripts.
"""
Expand All @@ -22,8 +21,7 @@


class CondaDelegated(Conda):
"""
Manage a Conda environment using custom user scripts, run at each commit.
"""Manage a Conda environment using custom user scripts, run at each commit.
Ignores user input variations - ``matrix`` / ``pythons`` /
``conda_environment_file``, since environment is being managed outside ASV.
Expand All @@ -44,8 +42,7 @@ def __init__(
requirements: dict,
tagged_env_vars: dict,
) -> None:
"""
Parameters
"""Parameters
----------
conf : Config instance
Expand Down
12 changes: 4 additions & 8 deletions benchmarks/benchmarks/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,7 @@


def disable_repeat_between_setup(benchmark_object):
"""
Decorator for benchmarks where object persistence would be inappropriate.
"""Decorator for benchmarks where object persistence would be inappropriate.
E.g:
* Benchmarking data realisation
Expand All @@ -37,8 +36,7 @@ def disable_repeat_between_setup(benchmark_object):


class TrackAddedMemoryAllocation:
"""
Context manager which measures by how much process resident memory grew,
"""Context manager which measures by how much process resident memory grew,
during execution of its enclosed code block.
Obviously limited as to what it actually measures : Relies on the current
Expand Down Expand Up @@ -86,8 +84,7 @@ def addedmem_mb(self):

@staticmethod
def decorator(decorated_func):
"""
Decorates this benchmark to track growth in resident memory during execution.
"""Decorates this benchmark to track growth in resident memory during execution.
Intended for use on ASV ``track_`` benchmarks. Applies the
:class:`TrackAddedMemoryAllocation` context manager to the benchmark
Expand All @@ -108,8 +105,7 @@ def _wrapper(*args, **kwargs):


def on_demand_benchmark(benchmark_object):
"""
Decorator. Disables these benchmark(s) unless ON_DEMAND_BENCHARKS env var is set.
"""Decorator. Disables these benchmark(s) unless ON_DEMAND_BENCHARKS env var is set.
For benchmarks that, for whatever reason, should not be run by default.
E.g:
Expand Down
6 changes: 2 additions & 4 deletions benchmarks/benchmarks/aux_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,7 @@
#
# This file is part of Iris and is released under the BSD license.
# See LICENSE in the root of the repository for full licensing details.
"""
AuxFactory benchmark tests.
"""AuxFactory benchmark tests.
"""

Expand All @@ -20,8 +19,7 @@ class FactoryCommon:
# * remove NotImplementedError
# * combine setup_common into setup

"""
A base class running a generalised suite of benchmarks for any factory.
"""A base class running a generalised suite of benchmarks for any factory.
Factory to be specified in a subclass.
ASV will run the benchmarks within this class for any subclasses.
Expand Down
7 changes: 2 additions & 5 deletions benchmarks/benchmarks/coords.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,7 @@
#
# This file is part of Iris and is released under the BSD license.
# See LICENSE in the root of the repository for full licensing details.
"""
Coord benchmark tests.
"""Coord benchmark tests.
"""

Expand All @@ -26,9 +25,7 @@ class CoordCommon:
# * make class an ABC
# * remove NotImplementedError
# * combine setup_common into setup
"""
A base class running a generalised suite of benchmarks for any coord.
"""A base class running a generalised suite of benchmarks for any coord.
Coord to be specified in a subclass.
ASV will run the benchmarks within this class for any subclasses.
Expand Down
3 changes: 1 addition & 2 deletions benchmarks/benchmarks/cperf/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,7 @@
#
# This file is part of Iris and is released under the BSD license.
# See LICENSE in the root of the repository for full licensing details.
"""
Benchmarks for the CPerf scheme of the UK Met Office's NG-VAT project.
"""Benchmarks for the CPerf scheme of the UK Met Office's NG-VAT project.
CPerf = comparing performance working with data in UM versus LFRic formats.
Expand Down
11 changes: 4 additions & 7 deletions benchmarks/benchmarks/cperf/equality.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,16 +2,14 @@
#
# This file is part of Iris and is released under the BSD license.
# See LICENSE in the root of the repository for full licensing details.
"""
Equality benchmarks for the CPerf scheme of the UK Met Office's NG-VAT project.
"""Equality benchmarks for the CPerf scheme of the UK Met Office's NG-VAT project.
"""
from .. import on_demand_benchmark
from . import SingleDiagnosticMixin


class EqualityMixin(SingleDiagnosticMixin):
"""
Uses :class:`SingleDiagnosticMixin` as the realistic case will be comparing
"""Uses :class:`SingleDiagnosticMixin` as the realistic case will be comparing
:class:`~iris.cube.Cube`\\ s that have been loaded from file.
"""

Expand All @@ -26,9 +24,8 @@ def setup(self, file_type, three_d=False, three_times=False):

@on_demand_benchmark
class CubeEquality(EqualityMixin):
"""
Benchmark time and memory costs of comparing LFRic and UM
:class:`~iris.cube.Cube`\\ s.
"""Benchmark time and memory costs of comparing LFRic and UM
:class:`~iris.cube.Cube`\\ s.
"""

def _comparison(self):
Expand Down
10 changes: 4 additions & 6 deletions benchmarks/benchmarks/cperf/load.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,7 @@
#
# This file is part of Iris and is released under the BSD license.
# See LICENSE in the root of the repository for full licensing details.
"""
File loading benchmarks for the CPerf scheme of the UK Met Office's NG-VAT project.
"""File loading benchmarks for the CPerf scheme of the UK Met Office's NG-VAT project.
"""
from .. import on_demand_benchmark
from . import SingleDiagnosticMixin
Expand All @@ -12,10 +11,9 @@
@on_demand_benchmark
class SingleDiagnosticLoad(SingleDiagnosticMixin):
def time_load(self, _, __, ___):
"""
The 'real world comparison'
* UM coords are always realised (DimCoords).
* LFRic coords are not realised by default (MeshCoords).
"""The 'real world comparison'
* UM coords are always realised (DimCoords).
* LFRic coords are not realised by default (MeshCoords).
"""
cube = self.load()
Expand Down
6 changes: 2 additions & 4 deletions benchmarks/benchmarks/cperf/save.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,7 @@
#
# This file is part of Iris and is released under the BSD license.
# See LICENSE in the root of the repository for full licensing details.
"""
File saving benchmarks for the CPerf scheme of the UK Met Office's NG-VAT project.
"""File saving benchmarks for the CPerf scheme of the UK Met Office's NG-VAT project.
"""

from iris import save
Expand All @@ -15,8 +14,7 @@

@on_demand_benchmark
class NetcdfSave:
"""
Benchmark time and memory costs of saving ~large-ish data cubes to netcdf.
"""Benchmark time and memory costs of saving ~large-ish data cubes to netcdf.
Parametrised by file type.
"""
Expand Down
6 changes: 2 additions & 4 deletions benchmarks/benchmarks/cube.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,7 @@
#
# This file is part of Iris and is released under the BSD license.
# See LICENSE in the root of the repository for full licensing details.
"""
Cube benchmark tests.
"""Cube benchmark tests.
"""

Expand Down Expand Up @@ -32,8 +31,7 @@ class ComponentCommon:
# * remove NotImplementedError
# * combine setup_common into setup

"""
A base class running a generalised suite of benchmarks for cubes that
"""A base class running a generalised suite of benchmarks for cubes that
include a specified component (e.g. Coord, CellMeasure etc.). Component to
be specified in a subclass.
Expand Down
3 changes: 1 addition & 2 deletions benchmarks/benchmarks/experimental/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
#
# This file is part of Iris and is released under the BSD license.
# See LICENSE in the root of the repository for full licensing details.
"""
Benchmark tests for the experimental module.
"""Benchmark tests for the experimental module.
"""
6 changes: 2 additions & 4 deletions benchmarks/benchmarks/experimental/ugrid/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,7 @@
#
# This file is part of Iris and is released under the BSD license.
# See LICENSE in the root of the repository for full licensing details.
"""
Benchmark tests for the experimental.ugrid module.
"""Benchmark tests for the experimental.ugrid module.
"""

Expand All @@ -18,8 +17,7 @@


class UGridCommon:
"""
A base class running a generalised suite of benchmarks for any ugrid object.
"""A base class running a generalised suite of benchmarks for any ugrid object.
Object to be specified in a subclass.
ASV will run the benchmarks within this class for any subclasses.
Expand Down
22 changes: 7 additions & 15 deletions benchmarks/benchmarks/experimental/ugrid/regions_combine.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,7 @@
#
# This file is part of Iris and is released under the BSD license.
# See LICENSE in the root of the repository for full licensing details.
"""
Benchmarks stages of operation of the function
"""Benchmarks stages of operation of the function
:func:`iris.experimental.ugrid.utils.recombine_submeshes`.
Where possible benchmarks should be parameterised for two sizes of input data:
Expand Down Expand Up @@ -90,8 +89,7 @@ def setup_cache(self):
)

def setup(self, n_cubesphere, imaginary_data=True, create_result_cube=True):
"""
The combine-tests "standard" setup operation.
"""The combine-tests "standard" setup operation.
Load the source cubes (full-mesh + region) from disk.
These are specific to the cubesize parameter.
Expand Down Expand Up @@ -139,8 +137,7 @@ def setup(self, n_cubesphere, imaginary_data=True, create_result_cube=True):
self.fix_dask_settings()

def fix_dask_settings(self):
"""
Fix "standard" dask behaviour for time+space testing.
"""Fix "standard" dask behaviour for time+space testing.
Currently this is single-threaded mode, with known chunksize,
which is optimised for space saving so we can test largest data.
Expand All @@ -166,8 +163,7 @@ def recombine(self):


class CombineRegionsCreateCube(MixinCombineRegions):
"""
Time+memory costs of creating a combined-regions cube.
"""Time+memory costs of creating a combined-regions cube.
The result is lazy, and we don't do the actual calculation.
Expand All @@ -187,9 +183,7 @@ def track_addedmem_create_combined_cube(self, n_cubesphere):


class CombineRegionsComputeRealData(MixinCombineRegions):
"""
Time+memory costs of computing combined-regions data.
"""
"""Time+memory costs of computing combined-regions data."""

def time_compute_data(self, n_cubesphere):
_ = self.recombined_cube.data
Expand All @@ -202,8 +196,7 @@ def track_addedmem_compute_data(self, n_cubesphere):


class CombineRegionsSaveData(MixinCombineRegions):
"""
Test saving *only*, having replaced the input cube data with 'imaginary'
"""Test saving *only*, having replaced the input cube data with 'imaginary'
array data, so that input data is not loaded from disk during the save
operation.
Expand All @@ -228,8 +221,7 @@ def track_filesize_saved(self, n_cubesphere):


class CombineRegionsFileStreamedCalc(MixinCombineRegions):
"""
Test the whole cost of file-to-file streaming.
"""Test the whole cost of file-to-file streaming.
Uses the combined cube which is based on lazy data loading from the region
cubes on disk.
"""
Expand Down
9 changes: 3 additions & 6 deletions benchmarks/benchmarks/generate_data/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,7 @@
#
# This file is part of Iris and is released under the BSD license.
# See LICENSE in the root of the repository for full licensing details.
"""
Scripts for generating supporting data for benchmarking.
"""Scripts for generating supporting data for benchmarking.
Data generated using Iris should use :func:`run_function_elsewhere`, which
means that data is generated using a fixed version of Iris and a fixed
Expand Down Expand Up @@ -60,8 +59,7 @@


def run_function_elsewhere(func_to_run, *args, **kwargs):
"""
Run a given function using the :const:`DATA_GEN_PYTHON` executable.
"""Run a given function using the :const:`DATA_GEN_PYTHON` executable.
This structure allows the function to be written natively.
Expand Down Expand Up @@ -101,8 +99,7 @@ def run_function_elsewhere(func_to_run, *args, **kwargs):

@contextmanager
def load_realised():
"""
Force NetCDF loading with realised arrays.
"""Force NetCDF loading with realised arrays.
Since passing between data generation and benchmarking environments is via
file loading, but some benchmarks are only meaningful if starting with real
Expand Down
Loading

0 comments on commit d0ee9c2

Please sign in to comment.