diff --git a/.github/actions/run-tests/action.yml b/.github/actions/run-tests/action.yml index bb48e9e4c..9ade249ac 100644 --- a/.github/actions/run-tests/action.yml +++ b/.github/actions/run-tests/action.yml @@ -1,6 +1,6 @@ # This code is part of a Qiskit project. # -# (C) Copyright IBM 2021, 2023. +# (C) Copyright IBM 2021, 2024. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory @@ -36,7 +36,7 @@ runs: if [ "${{ inputs.event-name }}" == "schedule" ] || [ "${{ inputs.run-slow }}" == "true" ]; then export QISKIT_TESTS="run_slow" fi - if [ "${{ inputs.os }}" == "ubuntu-latest" ] && [ "${{ inputs.python-version }}" == "3.8" ]; then + if [ "${{ inputs.os }}" == "ubuntu-latest" ] && [ "${{ inputs.python-version }}" == "3.9" ]; then export PYTHON="coverage3 run --source qiskit_machine_learning --parallel-mode" fi stestr --test-path test run 2> >(tee /dev/stderr out.txt > /dev/null) diff --git a/.github/workflows/deploy-code.yml b/.github/workflows/deploy-code.yml index ad80409ba..163e75b87 100644 --- a/.github/workflows/deploy-code.yml +++ b/.github/workflows/deploy-code.yml @@ -1,6 +1,6 @@ # This code is part of a Qiskit project. # -# (C) Copyright IBM 2021, 2023. +# (C) Copyright IBM 2021, 2024. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory @@ -25,7 +25,7 @@ jobs: id-token: write strategy: matrix: - python-version: [3.8] + python-version: [3.9] steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 diff --git a/.github/workflows/deploy-docs.yml b/.github/workflows/deploy-docs.yml index 9e01421ee..b26b9f8da 100644 --- a/.github/workflows/deploy-docs.yml +++ b/.github/workflows/deploy-docs.yml @@ -1,6 +1,6 @@ # This code is part of a Qiskit project. # -# (C) Copyright IBM 2021, 2023. +# (C) Copyright IBM 2021, 2024. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory @@ -25,7 +25,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: [3.8] + python-version: [3.9] steps: - uses: actions/checkout@v4 with: diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 828f48b76..2bdd419ad 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -35,7 +35,7 @@ jobs: strategy: matrix: os: [ubuntu-latest] - python-version: [3.8] + python-version: [3.9] steps: - name: Print Concurrency Group env: @@ -112,14 +112,14 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] - python-version: [3.8, 3.9, '3.10', 3.11, 3.12] + python-version: [3.9, '3.10', 3.11, 3.12] include: - os: macos-latest - python-version: 3.8 + python-version: 3.9 - os: macos-latest python-version: 3.12 - os: windows-latest - python-version: 3.8 + python-version: 3.9 - os: windows-latest python-version: 3.12 # macos-14 is an Arm64 image @@ -165,7 +165,7 @@ jobs: run: | coverage3 combine mv .coverage ./ci-artifact-data/ml.dat - if: ${{ matrix.os == 'ubuntu-latest' && matrix.python-version == 3.8 }} + if: ${{ matrix.os == 'ubuntu-latest' && matrix.python-version == 3.9 }} shell: bash - uses: actions/upload-artifact@v4 with: @@ -188,7 +188,7 @@ jobs: fail-fast: false matrix: os: [ubuntu-latest] - python-version: [3.8, 3.12] + python-version: [3.9, 3.12] steps: - uses: actions/checkout@v4 with: @@ -251,29 +251,25 @@ jobs: # cd docs/_build/html # mkdir artifacts # tar -zcvf artifacts/tutorials.tar.gz --exclude=./artifacts . -# if: ${{ matrix.python-version == 3.8 && !startsWith(github.ref, 'refs/heads/stable') && !startsWith(github.base_ref, 'stable/') }} +# if: ${{ matrix.python-version == 3.9 && !startsWith(github.ref, 'refs/heads/stable') && !startsWith(github.base_ref, 'stable/') }} # shell: bash # - name: Run upload stable tutorials # uses: actions/upload-artifact@v4 # with: # name: tutorials-stable${{ matrix.python-version }} # path: docs/_build/html/artifacts/tutorials.tar.gz -# if: ${{ matrix.python-version == 3.8 && !startsWith(github.ref, 'refs/heads/stable') && !startsWith(github.base_ref, 'stable/') }} +# if: ${{ matrix.python-version == 3.9 && !startsWith(github.ref, 'refs/heads/stable') && !startsWith(github.base_ref, 'stable/') }} Deprecation_Messages_and_Coverage: needs: [Checks, MachineLearning, Tutorials] runs-on: ubuntu-latest strategy: matrix: - python-version: [3.8] + python-version: [3.9] steps: - uses: actions/checkout@v4 - uses: actions/setup-python@v5 with: python-version: ${{ matrix.python-version }} - - uses: actions/download-artifact@v4 - with: - name: ubuntu-latest-3.8 - path: /tmp/u38 - uses: actions/download-artifact@v4 with: name: ubuntu-latest-3.9 @@ -292,16 +288,16 @@ jobs: path: /tmp/u312 - uses: actions/download-artifact@v4 with: - name: macos-latest-3.8 - path: /tmp/m38 + name: macos-latest-3.9 + path: /tmp/m39 - uses: actions/download-artifact@v4 with: name: macos-latest-3.12 path: /tmp/m312 - uses: actions/download-artifact@v4 with: - name: windows-latest-3.8 - path: /tmp/w38 + name: windows-latest-3.9 + path: /tmp/w39 - uses: actions/download-artifact@v4 with: name: windows-latest-3.12 @@ -319,10 +315,10 @@ jobs: shell: bash - name: Combined Deprecation Messages run: | - sort -f -u /tmp/u38/ml.dep /tmp/u39/ml.dep /tmp/u310/ml.dep /tmp/u311/ml.dep /tmp/u312/ml.dep /tmp/m38/ml.dep /tmp/m312/ml.dep /tmp/w38/ml.dep /tmp/w312/ml.dep /tmp/a310/ml.dep /tmp/a312/ml.dep || true + sort -f -u /tmp/u39/ml.dep /tmp/u310/ml.dep /tmp/u311/ml.dep /tmp/u312/ml.dep /tmp/m39/ml.dep /tmp/m312/ml.dep /tmp/w39/ml.dep /tmp/w312/ml.dep /tmp/a310/ml.dep /tmp/a312/ml.dep || true shell: bash - name: Coverage combine - run: coverage3 combine /tmp/u38/ml.dat + run: coverage3 combine /tmp/u39/ml.dat shell: bash - name: Upload to Coveralls env: diff --git a/.mergify.yml b/.mergify.yml index d9a318886..ca27e5964 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -1,12 +1,12 @@ queue_rules: - name: automerge conditions: - - check-success=Deprecation_Messages_and_Coverage (3.8) + - check-success=Deprecation_Messages_and_Coverage (3.9) pull_request_rules: - name: automatic merge on CI success and review conditions: - - check-success=Deprecation_Messages_and_Coverage (3.8) + - check-success=Deprecation_Messages_and_Coverage (3.9) - "#approved-reviews-by>=1" - label=automerge - label!=on hold diff --git a/.pylintdict b/.pylintdict index 375963119..93892d47d 100644 --- a/.pylintdict +++ b/.pylintdict @@ -23,6 +23,7 @@ armijo arxiv asmatrix aspuru +assertraises async autoencoder autoencoders @@ -136,6 +137,7 @@ elif endian entangler enum +eol eps estimatorqnn et @@ -158,6 +160,7 @@ farhi farrokh fi fidelities +fidelity fidelityquantumkernel filippo fletcher @@ -201,6 +204,7 @@ hadfield hamiltonian hamiltonians hao +hartree hashable hatano havlíček @@ -208,6 +212,7 @@ heidelberg hessians hilbert hoc +homebrew hopkins hoyer html @@ -259,6 +264,7 @@ kwargs labelled lagrange langle +linux larrañaga lcu len @@ -347,6 +353,7 @@ o'brien objval observables oct +october olson onboarding onodera @@ -452,10 +459,12 @@ rhs rightarrow robert romero +rosenbrock rosen runarsson runtime runtimes +RuntimeError rx ry rz @@ -495,6 +504,9 @@ sqrt statefn statevector statevectors +stdlib +stdout +stfc stddev stdlib stdout @@ -594,7 +606,3 @@ zz θ ψ ω -assertRaises -RuntimeError -Rosenbrock -fidelities diff --git a/.pylintrc b/.pylintrc deleted file mode 100644 index 43b37e7c4..000000000 --- a/.pylintrc +++ /dev/null @@ -1,381 +0,0 @@ -[MASTER] - -# Specify a configuration file. -#rcfile= - -# Python code to execute, usually for sys.path manipulation such as -# pygtk.require(). -# Changed to fix recursion crash since pandas 1.1.5 -init-hook='import sys; sys.setrecursionlimit(8 * sys.getrecursionlimit())' - -# Add files or directories to the blacklist. They should be base names, not -# paths. -ignore=CVS - -# Add files or directories matching the regex patterns to the blacklist. The -# regex matches against base names, not paths. -ignore-patterns= - -# Pickle collected data for later comparisons. -persistent=yes - -# List of plugins (as comma separated values of python modules names) to load, -# usually to register additional checkers. -load-plugins=pylint.extensions.docparams, # enable checking of docstring args - pylint.extensions.docstyle, # basic docstring style checks - -# Use multiple processes to speed up Pylint. -jobs=1 - -# Allow loading of arbitrary C extensions. Extensions are imported into the -# active Python interpreter and may run arbitrary code. -unsafe-load-any-extension=no - -# A comma-separated list of package or module names from where C extensions may -# be loaded. Extensions are loading into the active Python interpreter and may -# run arbitrary code -extension-pkg-whitelist= - - -[MESSAGES CONTROL] - -# Only show warnings with the listed confidence levels. Leave empty to show -# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED -confidence= - -# Enable the message, report, category or checker with the given id(s). You can -# either give multiple identifier separated by comma (,) or put this option -# multiple time (only on the command line, not in the configuration file where -# it should appear only once). See also the "--disable" option for examples. -#enable= - -# Disable the message, report, category or checker with the given id(s). You -# can either give multiple identifiers separated by comma (,) or put this -# option multiple times (only on the command line, not in the configuration -# file where it should appear only once).You can also use "--disable=all" to -# disable everything first and then reenable specific checks. For example, if -# you want to run only the similarities checker, you can use "--disable=all -# --enable=similarities". If you want to run only the classes checker, but have -# no Warning level messages displayed, use"--disable=all --enable=classes -# --disable=W" -disable=fixme, # disabled as TODOs would show up as warnings - protected-access, # disabled as we don't follow the public vs private - # convention strictly - duplicate-code, # disabled as it is too verbose - redundant-returns-doc, # for @abstractmethod, it cannot interpret "pass" - # disable the "too-many/few-..." refactoring hints - too-many-lines, too-many-branches, too-many-locals, too-many-nested-blocks, - too-many-statements, too-many-instance-attributes, too-many-arguments, - too-many-public-methods, too-few-public-methods, too-many-ancestors, - unnecessary-pass, # allow for methods with just "pass", for clarity - no-else-return, # relax "elif" after a clause with a return - docstring-first-line-empty, # relax docstring style - import-outside-toplevel, - - -[REPORTS] - -# Set the output format. Available formats are text, parseable, colorized, msvs -# (visual studio) and html. You can also give a reporter class, eg -# mypackage.mymodule.MyReporterClass. -output-format=text - -# Tells whether to display a full report or only the messages -reports=yes - -# Python expression which should return a note less than 10 (10 is the highest -# note). You have access to the variables errors warning, statement which -# respectively contain the number of errors / warnings messages and the total -# number of statements analyzed. This is used by the global evaluation report -# (RP0004). -evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) - -# Template used to display messages. This is a python new-style format string -# used to format the message information. See doc for all details -#msg-template= - - -[BASIC] - -# Good variable names which should always be accepted, separated by a comma -# i,j,k = typical indices -# n,m = typical numbers -# ex = for exceptions and errors -# v,w = typical vectors -# x,y,z = typical axes -# _ = placeholder name -# q,r,qr,cr,qc = quantum and classical registers, and quantum circuit -# pi = the PI constant -# op = operation iterator -# b = basis iterator -good-names=i,j,k,n,m,ex,v,w,x,y,z,Run,_,logger,q,c,r,qr,cr,qc,nd,pi,op,b,ar,br,a,mu, - __unittest,iSwapGate - -# Bad variable names which should always be refused, separated by a comma -bad-names=foo,bar,toto,tutu,tata - -# Colon-delimited sets of names that determine each other's naming style when -# the name regexes allow several styles. -name-group= - -# Include a hint for the correct naming format with invalid-name -include-naming-hint=no - -# List of decorators that produce properties, such as abc.abstractproperty. Add -# to this list to register other decorators that produce valid properties. -property-classes=abc.abstractproperty - -# Regular expression matching correct module names -module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ - -# Regular expression matching correct constant names -const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ - -# Regular expression matching correct class names -class-rgx=[A-Z_][a-zA-Z0-9]+$ - -# Regular expression matching correct function names -function-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression matching correct method names -method-rgx=(([a-z_][a-z0-9_]{2,49})|(assert[A-Z][a-zA-Z0-9]{2,43})|(test_[_a-zA-Z0-9]{2,}))$ - -# Regular expression matching correct attribute names -attr-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression matching correct argument names -argument-rgx=[a-z_][a-z0-9_]{2,30}|ax|dt$ - -# Regular expression matching correct variable names -variable-rgx=[a-z_][a-z0-9_]{2,30}$ - -# Regular expression matching correct class attribute names -class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ - -# Regular expression matching correct inline iteration names -inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ - -# Regular expression which should only match function or class names that do -# not require a docstring. -no-docstring-rgx=^_ - -# Minimum line length for functions/classes that require docstrings, shorter -# ones are exempt. -docstring-min-length=-1 - - -[ELIF] - -# Maximum number of nested blocks for function / method body -max-nested-blocks=5 - - -[FORMAT] - -# Maximum number of characters on a single line. -max-line-length=105 - -# Regexp for a line that is allowed to be longer than the limit. -ignore-long-lines=^\s*(# )??$ - -# Allow the body of an if to be on the same line as the test if there is no -# else. -single-line-if-stmt=no - -# Maximum number of lines in a module -max-module-lines=1000 - -# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 -# tab). -indent-string=' ' - -# Number of spaces of indent required inside a hanging or continued line. -indent-after-paren=4 - -# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. -expected-line-ending-format= - - -[LOGGING] - -# Logging modules to check that the string format arguments are in logging -# function parameter format -logging-modules=logging - - -[MISCELLANEOUS] - -# List of note tags to take in consideration, separated by a comma. -notes=FIXME,XXX,TODO - - -[SIMILARITIES] - -# Minimum lines number of a similarity. -min-similarity-lines=4 - -# Ignore comments when computing similarities. -ignore-comments=yes - -# Ignore docstrings when computing similarities. -ignore-docstrings=yes - -# Ignore imports when computing similarities. -ignore-imports=no - - -[SPELLING] - -# Spelling dictionary name. Available dictionaries: none. To make it working -# install python-enchant package. -spelling-dict= - -# List of comma separated words that should not be checked. -spelling-ignore-words= - -# A path to a file that contains private dictionary; one word per line. -spelling-private-dict-file= - -# Tells whether to store unknown words to indicated private dictionary in -# --spelling-private-dict-file option instead of raising a message. -spelling-store-unknown-words=no - - -[TYPECHECK] - -# Tells whether missing members accessed in mixin class should be ignored. A -# mixin class is detected if its name ends with "mixin" (case insensitive). -ignore-mixin-members=yes - -# List of module names for which member attributes should not be checked -# (useful for modules/projects where namespaces are manipulated during runtime -# and thus existing member attributes cannot be deduced by static analysis. It -# supports qualified module names, as well as Unix pattern matching. -ignored-modules=matplotlib.cm,numpy.random,retworkx,torch - -# List of class names for which member attributes should not be checked (useful -# for classes with dynamically set attributes). This supports the use of -# qualified names. -ignored-classes=optparse.Values,thread._local,_thread._local,QuantumCircuit,torch - -# List of members which are set dynamically and missed by pylint inference -# system, and so shouldn't trigger E1101 when accessed. Python regular -# expressions are accepted. -generated-members= - -# List of decorators that produce context managers, such as -# contextlib.contextmanager. Add to this list to register other decorators that -# produce valid context managers. -contextmanager-decorators=contextlib.contextmanager - - -[VARIABLES] - -# Tells whether we should check for unused import in __init__ files. -init-import=no - -# A regular expression matching the name of dummy variables (i.e. expectedly -# not used). -dummy-variables-rgx=(_+[a-zA-Z0-9]*?$)|dummy - -# List of additional names supposed to be defined in builtins. Remember that -# you should avoid to define new builtins when possible. -additional-builtins= - -# List of strings which can identify a callback function by name. A callback -# name must start or end with one of those strings. -callbacks=cb_,_cb - -# List of qualified module names which can have objects that can redefine -# builtins. -redefining-builtins-modules=six.moves,future.builtins - - -[CLASSES] - -# List of method names used to declare (i.e. assign) instance attributes. -defining-attr-methods=__init__,__new__,setUp - -# List of valid names for the first argument in a class method. -valid-classmethod-first-arg=cls - -# List of valid names for the first argument in a metaclass class method. -valid-metaclass-classmethod-first-arg=mcs - -# List of member names, which should be excluded from the protected access -# warning. -exclude-protected=_asdict,_fields,_replace,_source,_make - - -[DESIGN] - -# Maximum number of arguments for function / method -max-args=8 - -# Argument names that match this expression will be ignored. Default to name -# with leading underscore -ignored-argument-names=_.* - -# Maximum number of locals for function / method body -max-locals=15 - -# Maximum number of return / yield for function / method body -max-returns=6 - -# Maximum number of branch for function / method body -max-branches=12 - -# Maximum number of statements in function / method body -max-statements=50 - -# Maximum number of parents for a class (see R0901). -max-parents=7 - -# Maximum number of attributes for a class (see R0902). -max-attributes=10 - -# Minimum number of public methods for a class (see R0903). -min-public-methods=2 - -# Maximum number of public methods for a class (see R0904). -max-public-methods=35 - -# Maximum number of boolean expressions in a if statement -max-bool-expr=5 - - -[IMPORTS] - -# Deprecated modules which should not be used, separated by a comma -deprecated-modules=optparse - -# Create a graph of every (i.e. internal and external) dependencies in the -# given file (report RP0402 must not be disabled) -import-graph= - -# Create a graph of external dependencies in the given file (report RP0402 must -# not be disabled) -ext-import-graph= - -# Create a graph of internal dependencies in the given file (report RP0402 must -# not be disabled) -int-import-graph= - -# Force import order to recognize a module as part of the standard -# compatibility libraries. -known-standard-library= - -# Force import order to recognize a module as part of a third party library. -known-third-party=enchant - -# Analyse import fallback blocks. This can be used to support both Python 2 and -# 3 compatible code, which means that the block might have code that exists -# only in one or another interpreter, leading to false positives when analysed. -analyse-fallback-blocks=no - - -[EXCEPTIONS] - -# Exceptions that will emit a warning when being caught. Defaults to -# "Exception" -overgeneral-exceptions=builtins.Exception diff --git a/README.md b/README.md index 3973737ad..35f3b6260 100644 --- a/README.md +++ b/README.md @@ -12,103 +12,105 @@ ## What is Qiskit Machine Learning? -Qiskit Machine Learning introduces fundamental computational building blocks, such as Quantum Kernels -and Quantum Neural Networks, used in different applications, including classification and regression. -On the one hand, this design is very easy to use and allows users to rapidly prototype a first model -without deep quantum computing knowledge. On the other hand, Qiskit Machine Learning is very flexible, -and users can easily extend it to support cutting-edge quantum machine learning research. +Qiskit Machine Learning introduces fundamental computational building blocks, such as Quantum +Kernels and Quantum Neural Networks, used in various applications including classification +and regression. + +This library is part of the Qiskit Community ecosystem, a collection of high-level codes that are based +on the Qiskit software development kit. As of version `0.7.0`, Qiskit Machine Learning is co-maintained +by IBM and the Hartree Center, part of the UK Science and Technologies Facilities Council (STFC). + +The Qiskit Machine Learning framework aims to be: + +* **User-friendly**, allowing users to quickly and easily prototype quantum machine learning models without + the need of extensive quantum computing knowledge. +* **Flexible**, providing tools and functionalities to conduct proof-of-concepts and innovative research + in quantum machine learning for both beginners and experts. +* **Extensible**, facilitating the integration of new cutting-edge features leveraging Qiskit's + architectures, patterns and related services. + ## What are the main features of Qiskit Machine Learning? -Qiskit Machine Learning provides the -[FidelityQuantumKernel](https://qiskit-community.github.io/qiskit-machine-learning/stubs/qiskit_machine_learning.kernels.QuantumKernel.html#qiskit_machine_learning.kernels.FidelityQuantumKernel) -class that makes use of the [Fidelity](https://qiskit-community.github.io/qiskit-algorithms/stubs/qiskit_algorithms.state_fidelities.BaseStateFidelity.html) algorithm introduced in Qiskit Algorithms and can be easily used -to directly compute kernel matrices for given datasets or can be passed to a Quantum Support Vector Classifier -[QSVC](https://qiskit-community.github.io/qiskit-machine-learning/stubs/qiskit_machine_learning.algorithms.QSVC.html#qiskit_machine_learning.algorithms.QSVC) or -Quantum Support Vector Regressor -[QSVR](https://qiskit-community.github.io/qiskit-machine-learning/stubs/qiskit_machine_learning.algorithms.QSVR.html#qiskit_machine_learning.algorithms.QSVR) -to quickly start solving classification or regression problems. -It also can be used with many other existing kernel-based machine learning algorithms from established -classical frameworks. - -Qiskit Machine Learning defines a generic interface for neural networks that is implemented by different -quantum neural networks. Two core implementations are readily provided, such as the -[EstimatorQNN](https://qiskit-community.github.io/qiskit-machine-learning/stubs/qiskit_machine_learning.neural_networks.EstimatorQNN.html), -and the [SamplerQNN](https://qiskit-community.github.io/qiskit-machine-learning/stubs/qiskit_machine_learning.neural_networks.SamplerQNN.html). -The [EstimatorQNN](https://qiskit-community.github.io/qiskit-machine-learning/stubs/qiskit_machine_learning.neural_networks.EstimatorQNN.html) -leverages the [Estimator](https://docs.quantum.ibm.com/api/qiskit/qiskit.primitives.BaseEstimator) primitive from Qiskit and -allows users to combine parametrized quantum circuits with quantum mechanical observables. The circuits can be constructed using, for example, building blocks -from Qiskit’s circuit library, and the QNN’s output is given by the expected value of the observable. -The [SamplerQNN](https://qiskit-community.github.io/qiskit-machine-learning/stubs/qiskit_machine_learning.neural_networks.SamplerQNN.html) -leverages another primitive introduced in Qiskit, the [Sampler](https://docs.quantum.ibm.com/api/qiskit/qiskit.primitives.BaseSampler) primitive. -This neural network translates quasi-probabilities of bitstrings estimated by the primitive into a desired output. This -translation step can be used to interpret a given bitstring in a particular context, e.g. translating it into a set of classes. - -The neural networks include the functionality to evaluate them for a given input as well as to compute the -corresponding gradients, which is important for efficient training. To train and use neural networks, -Qiskit Machine Learning provides a variety of learning algorithms such as the -[NeuralNetworkClassifier](https://qiskit-community.github.io/qiskit-machine-learning/stubs/qiskit_machine_learning.algorithms.NeuralNetworkClassifier.html#qiskit_machine_learning.algorithms.NeuralNetworkClassifier) -and -[NeuralNetworkRegressor](https://qiskit-community.github.io/qiskit-machine-learning/stubs/qiskit_machine_learning.algorithms.NeuralNetworkRegressor.html#qiskit_machine_learning.algorithms.NeuralNetworkRegressor). -Both take a QNN as input and then use it in a classification or regression context. -To allow an easy start, two convenience implementations are provided - the Variational Quantum Classifier -[VQC](https://qiskit-community.github.io/qiskit-machine-learning/stubs/qiskit_machine_learning.algorithms.VQC.html#qiskit_machine_learning.algorithms.VQC) -as well as the Variational Quantum Regressor -[VQR](https://qiskit-community.github.io/qiskit-machine-learning/stubs/qiskit_machine_learning.algorithms.VQR.html#qiskit_machine_learning.algorithms.VQR). -Both take just a feature map and an ansatz and construct the underlying QNN automatically. - -In addition to the models provided directly in Qiskit Machine Learning, it has the -[TorchConnector](https://qiskit-community.github.io/qiskit-machine-learning/stubs/qiskit_machine_learning.connectors.TorchConnector.html#qiskit_machine_learning.connectors.TorchConnector), -which allows users to integrate all of our quantum neural networks directly into the -[PyTorch](https://pytorch.org) -open source machine learning library. Thanks to Qiskit’s gradient algorithms, this includes automatic -differentiation - the overall gradients computed by [PyTorch](https://pytorch.org) -during the backpropagation take into -account quantum neural networks, too. The flexible design also allows the building of connectors -to other packages in the future. - -## Installation - -We encourage installing Qiskit Machine Learning via the pip tool (a python package manager). +### Kernel-based methods + +The [`FidelityQuantumKernel`](https://qiskit-community.github.io/qiskit-machine-learning/stubs/qiskit_machine_learning.kernels.QuantumKernel.html#qiskit_machine_learning.kernels.FidelityQuantumKernel) +class uses the [`Fidelity`](https://qiskit-community.github.io/qiskit-machine-learning/stubs/qiskit_machine_learning.state_fidelities.BaseStateFidelity.html)) +algorithm. It computes kernel matrices for datasets and can be combined with a Quantum Support Vector Classifier ([`QSVC`](https://qiskit-community.github.io/qiskit-machine-learning/stubs/qiskit_machine_learning.algorithms.QSVC.html#qiskit_machine_learning.algorithms.QSVC)) +or a Quantum Support Vector Regressor ([`QSVR`](https://qiskit-community.github.io/qiskit-machine-learning/stubs/qiskit_machine_learning.algorithms.QSVR.html#qiskit_machine_learning.algorithms.QSVR)) +to solve classification or regression problems respectively. It is also compatible with classical kernel-based machine learning algorithms. + + +### Quantum Neural Networks (QNNs) + +Qiskit Machine Learning defines a generic interface for neural networks, implemented by two core (derived) primitives: + +- **[`EstimatorQNN`](https://qiskit-community.github.io/qiskit-machine-learning/stubs/qiskit_machine_learning.neural_networks.EstimatorQNN.html):** Leverages the [`Estimator`](https://docs.quantum.ibm.com/api/qiskit/qiskit.primitives.BaseEstimator) primitive, combining parametrized quantum circuits with quantum mechanical observables. The output is the expected value of the observable. + +- **[`SamplerQNN`](https://qiskit-community.github.io/qiskit-machine-learning/stubs/qiskit_machine_learning.neural_networks.SamplerQNN.html):** Leverages the [`Sampler`](https://docs.quantum.ibm.com/api/qiskit/qiskit.primitives.BaseSampler) primitive, translating bit-string counts into the desired outputs. + +To train and use neural networks, Qiskit Machine Learning provides learning algorithms such as the [`NeuralNetworkClassifier`](https://qiskit-community.github.io/qiskit-machine-learning/stubs/qiskit_machine_learning.algorithms.NeuralNetworkClassifier.html#qiskit_machine_learning.algorithms.NeuralNetworkClassifier) +and [`NeuralNetworkRegressor`](https://qiskit-community.github.io/qiskit-machine-learning/stubs/qiskit_machine_learning.algorithms.NeuralNetworkRegressor.html#qiskit_machine_learning.algorithms.NeuralNetworkRegressor). +Finally, built on these, the Variational Quantum Classifier ([`VQC`](https://qiskit-community.github.io/qiskit-machine-learning/stubs/qiskit_machine_learning.algorithms.VQC.html#qiskit_machine_learning.algorithms.VQC)) +and the Variational Quantum Regressor ([`VQR`](https://qiskit-community.github.io/qiskit-machine-learning/stubs/qiskit_machine_learning.algorithms.VQR.html#qiskit_machine_learning.algorithms.VQR)) +take a _feature map_ and an _ansatz_ to construct the underlying QNN automatically using high-level syntax. + +### Integration with PyTorch + +The [`TorchConnector`](https://qiskit-community.github.io/qiskit-machine-learning/stubs/qiskit_machine_learning.connectors.TorchConnector.html#qiskit_machine_learning.connectors.TorchConnector) +integrates QNNs with [PyTorch](https://pytorch.org). +Thanks to the gradient algorithms in Qiskit Machine Learning, this includes automatic differentiation. +The overall gradients computed by PyTorch during the backpropagation take into account quantum neural +networks, too. The flexible design also allows the building of connectors to other packages in the future. + +## Installation and documentation + +We encourage installing Qiskit Machine Learning via the `pip` tool, a `Python` package manager. ```bash pip install qiskit-machine-learning ``` -**pip** will handle all dependencies automatically and you will always install the latest -(and well-tested) version. +`pip` will install all dependencies automatically, so that you will always have the most recent +stable version. -If you want to work on the very latest work-in-progress versions, either to try features ahead of -their official release or if you want to contribute to Machine Learning, then you can install from source. -To do this follow the instructions in the +If you want to work instead on the very latest _work-in-progress_ versions of Qiskit Machine Learning, +either to try features ahead of +their official release or if you want to contribute to the library, then you can install from source. +For more details on how to do so and much more, follow the instructions in the [documentation](https://qiskit-community.github.io/qiskit-machine-learning/getting_started.html#installation). ### Optional Installs -* **PyTorch**, may be installed either using command `pip install 'qiskit-machine-learning[torch]'` to install the +* **PyTorch** may be installed either using command `pip install 'qiskit-machine-learning[torch]'` to install the package or refer to PyTorch [getting started](https://pytorch.org/get-started/locally/). When PyTorch is installed, the `TorchConnector` facilitates its use of quantum computed networks. -* **Sparse**, may be installed using command `pip install 'qiskit-machine-learning[sparse]'` to install the - package. Sparse being installed will enable the usage of sparse arrays/tensors. +* **Sparse** may be installed using command `pip install 'qiskit-machine-learning[sparse]'` to install the + package. Sparse being installed will enable the usage of sparse arrays and tensors. + +* **NLopt** is required for the global optimizers. [`NLopt`](https://nlopt.readthedocs.io/en/latest/) + can be installed manually with `pip install nlopt` on Windows and Linux platforms, or with `brew + install nlopt` on MacOS using the Homebrew package manager. For more information, + refer to the [installation guide](https://nlopt.readthedocs.io/en/latest/NLopt_Installation/). ## Migration to Qiskit 1.x > [!NOTE] -> Qiskit Machine Learning learning depends on Qiskit, which will be automatically installed as a -> dependency when you install Qiskit Machine Learning. If you have a pre-`1.0` version of Qiskit -> installed in your environment (however it was installed), and wish to upgrade to `1.0`, you -> should take note of the +> Qiskit Machine Learning depends on Qiskit, which will be automatically installed as a +> dependency when you install Qiskit Machine Learning. From version `0.8.0` of Qiskit Machine +> Learning, Qiskit `1.0` or above will be required. If you have a pre-`1.0` version of Qiskit +> installed in your environment (however it was installed), you should upgrade to `1.x` to +> continue using the latest features. You may refer to the > official [Qiskit 1.0 Migration Guide](https://docs.quantum.ibm.com/api/migration-guides/qiskit-1.0) -> for detailed instructions and examples on how to upgrade. +> for detailed instructions and examples on how to upgrade Qiskit. ---------------------------------------------------------------------------------------------------- ### Creating Your First Machine Learning Programming Experiment in Qiskit -Now that Qiskit Machine Learning is installed, it's time to begin working with the Machine Learning module. -Let's try an experiment using VQC (Variational Quantum Classifier) algorithm to -train and test samples from a data set to see how accurately the test set can -be classified. +Now that Qiskit Machine Learning is installed, it's time to begin working with the Machine +Learning module. Let's try an experiment using VQC (Variational Quantum Classifier) algorithm to +train and test samples from a data set to see how accurately the test set can be classified. ```python from qiskit.circuit.library import TwoLocal, ZZFeatureMap @@ -177,9 +179,11 @@ For questions that are more suited for a forum, we use the **Qiskit** tag in [St ## Humans behind Qiskit Machine Learning -Qiskit Machine Learning was inspired, authored and brought about by the collective work of a team of researchers -and software engineers. This library continues to grow with the help and work of -[many people](https://github.com/qiskit-community/qiskit-machine-learning/graphs/contributors), who contribute to the project at different levels. +Qiskit Machine Learning was inspired, authored and brought about by the collective work of a +team of researchers and software engineers. This library continues to grow with the help and +work of +[many people](https://github.com/qiskit-community/qiskit-machine-learning/graphs/contributors), +who contribute to the project at different levels. ## How can I cite Qiskit Machine Learning? If you use Qiskit, please cite as per the provided diff --git a/constraints.txt b/constraints.txt index cfe92e0b9..4cf890b52 100644 --- a/constraints.txt +++ b/constraints.txt @@ -1,4 +1,3 @@ numpy>=1.20,<2.0 -ipython<8.13;python_version<'3.9' nbconvert<7.14 # workaround https://github.com/jupyter/nbconvert/issues/2092 diff --git a/docs/getting_started.rst b/docs/getting_started.rst index b64772460..5ef8a3c99 100644 --- a/docs/getting_started.rst +++ b/docs/getting_started.rst @@ -14,7 +14,7 @@ that first. Then the information here can be followed which focuses on the addit specific to Qiskit Machine Learning. Qiskit Machine Learning has some functions that have been made optional where the dependent code and/or -support program(s) are not (or cannot be) installed by default. Those are PyTorch and Sparse. +support program(s) are not (or cannot be) installed by default. Those are PyTorch, Sparse and NLopt. See :ref:`optional_installs` for more information. .. tab-set:: @@ -97,6 +97,27 @@ Optional installs * **Sparse**, may be installed using command ``pip install 'qiskit-machine-learning[sparse]'`` to install the package. Sparse being installed will enable the usage of sparse arrays/tensors. +* **NLopt** is required for the global optimizers. `NLOpt `__ + can be installed manually with ``pip install nlopt`` on Windows and Linux platforms, or with + ``brew install nlopt`` on MacOS using the Homebrew package manager. For more information, refer + to the `installation guide `__. + +.. _migration-to-qiskit-1x: + +Migration to Qiskit 1.x +======================== + +.. note:: + + Qiskit Machine Learning depends on Qiskit, which will be automatically installed as a + dependency when you install Qiskit Machine Learning. From version ``0.8.0`` of Qiskit Machine + Learning, Qiskit ``1.0`` or above will be required. If you have a pre-``1.0`` version of Qiskit + installed in your environment (however it was installed), you should upgrade to ``1.x`` to + continue using the latest features. You may refer to the + official `Qiskit 1.0 Migration Guide `_ + for detailed instructions and examples on how to upgrade Qiskit. + + ---- Ready to get going?... diff --git a/docs/index.rst b/docs/index.rst index ee045ff58..767c75bce 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -5,58 +5,63 @@ Qiskit Machine Learning overview Overview ============== -Qiskit Machine Learning introduces fundamental computational building blocks - such as Quantum Kernels -and Quantum Neural Networks - used in different applications, including classification and regression. -On the one hand, this design is very easy to use and allows users to rapidly prototype a first model -without deep quantum computing knowledge. On the other hand, Qiskit Machine Learning is very flexible, -and users can easily extend it to support cutting-edge quantum machine learning research. - -Qiskit Machine Learning provides the :class:`~qiskit_machine_learning.kernels.FidelityQuantumKernel` -class class that makes use of the :class:`~qiskit_algorithms.state_fidelities.BaseStateFidelity` algorithm -introduced in Qiskit and can be easily used to directly compute kernel matrices for given datasets -or can be passed to a Quantum Support Vector Classifier -(:class:`~qiskit_machine_learning.algorithms.QSVC`) or -Quantum Support Vector Regressor (:class:`~qiskit_machine_learning.algorithms.QSVR`) -to quickly start solving classification or regression problems. -It also can be used with many other existing kernel-based machine learning algorithms from established -classical frameworks. - -Qiskit Machine Learning defines a generic interface for neural networks that is implemented by different -quantum neural networks. Two core implementations are readily provided, such as the -:class:`~qiskit_machine_learning.neural_networks.EstimatorQNN` -and the :class:`~qiskit_machine_learning.neural_networks.SamplerQNN`. -The :class:`~qiskit_machine_learning.neural_networks.EstimatorQNN` leverages -the :class:`~qiskit.primitives.BaseEstimator` primitive from Qiskit and allows users to combine -parametrized quantum circuits with quantum mechanical observables. The circuits can be constructed -using, for example, building blocks from Qiskit's circuit library, and the QNN's output is given -by the expected value of the observable. -The :class:`~qiskit_machine_learning.neural_networks.SamplerQNN` leverages another primitive -introduced in Qiskit, the :class:`~qiskit.primitives.BaseSampler` primitive. This neural network -translates quasi-probabilities of bitstrings estimated by the primitive into a desired output. This -translation step can be used to interpret a given bitstring in a particular context, e.g. -translating it into a set of classes. - -The neural networks include the functionality to evaluate them for a given input as well as to compute the -corresponding gradients, which is important for efficient training. To train and use neural networks, -Qiskit Machine Learning provides a variety of learning algorithms such as the -:class:`~qiskit_machine_learning.algorithms.NeuralNetworkClassifier` and -:class:`~qiskit_machine_learning.algorithms.NeuralNetworkRegressor`. -Both take a QNN as input and then use it in a classification or regression context. -To allow an easy start, two convenience implementations are provided - the Variational Quantum Classifier -(:class:`~qiskit_machine_learning.algorithms.VQC`) -as well as the Variational Quantum Regressor (:class:`~qiskit_machine_learning.algorithms.VQR`). -Both take just a feature map and an ansatz and construct the underlying QNN automatically. - -In addition to the models provided directly in Qiskit Machine Learning, it has the -:class:`~qiskit_machine_learning.connectors.TorchConnector`, -which allows users to integrate all of our quantum neural networks directly into the -`PyTorch `__ -open source machine learning library. Thanks to Qiskit Algorithm's gradient algorithms, -this includes automatic -differentiation - the overall gradients computed by `PyTorch `__ -during the backpropagation take into -account quantum neural networks, too. The flexible design also allows the building of connectors -to other packages in the future. +Qiskit Machine Learning introduces fundamental computational building blocks, such as Quantum +Kernels and Quantum Neural Networks, used in various applications including classification +and regression. + +This library is part of the Qiskit Community ecosystem, a collection of high-level codes that are based +on the Qiskit software development kit. As of version ``0.7.0``, Qiskit Machine Learning is co-maintained +by IBM and the Hartree Center, part of the UK Science and Technologies Facilities Council (STFC). + +The Qiskit Machine Learning framework aims to be: + +* **User-friendly**, allowing users to quickly and easily prototype quantum machine learning models without + the need of extensive quantum computing knowledge. +* **Flexible**, providing tools and functionalities to conduct proof-of-concepts and innovative research + in quantum machine learning for both beginners and experts. +* **Extensible**, facilitating the integration of new cutting-edge features leveraging Qiskit's + architectures, patterns and related services. + +What are the main features of Qiskit Machine Learning? +====================================================== + +Kernel-based methods +--------------------- + +The :class:`~qiskit_machine_learning.kernels.FidelityQuantumKernel` +class uses the :class:`~qiskit_algorithms.state_fidelities.BaseStateFidelity` +algorithm. It computes kernel matrices for datasets and can be combined with a Quantum Support Vector Classifier (:class:`~qiskit_machine_learning.algorithms.QSVC`) +or a Quantum Support Vector Regressor (:class:`~qiskit_machine_learning.algorithms.QSVR`) +to solve classification or regression problems respectively. It is also compatible with classical kernel-based machine learning algorithms. + +Quantum Neural Networks (QNNs) +------------------------------ + +Qiskit Machine Learning defines a generic interface for neural networks, implemented by two core (derived) primitives: + +- :class:`~qiskit_machine_learning.neural_networks.EstimatorQNN` leverages the Qiskit + `Estimator `__ primitive, combining parametrized quantum circuits + with quantum mechanical observables. The output is the expected value of the observable. + +- :class:`~qiskit_machine_learning.neural_networks.SamplerQNN` leverages the Qiskit + `Sampler `__ primitive, + translating bit-string counts into the desired outputs. + +To train and use neural networks, Qiskit Machine Learning provides learning algorithms such as the :class:`~qiskit_machine_learning.algorithms.NeuralNetworkClassifier` +and :class:`~qiskit_machine_learning.algorithms.NeuralNetworkRegressor`. +Finally, built on these, the Variational Quantum Classifier (:class:`~qiskit_machine_learning.algorithms.VQC`) +and the Variational Quantum Regressor (:class:`~qiskit_machine_learning.algorithms.VQR`) +take a *feature map* and an *ansatz* to construct the underlying QNN automatically using high-level syntax. + +Integration with PyTorch +------------------------ + +The :class:`~qiskit_machine_learning.connectors.TorchConnector` +integrates QNNs with `PyTorch `_. +Thanks to the gradient algorithms in Qiskit Machine Learning, this includes automatic differentiation. +The overall gradients computed by PyTorch during the backpropagation take into account quantum neural +networks, too. The flexible design also allows the building of connectors to other packages in the future. + diff --git a/pyproject.toml b/pyproject.toml index f9728a2be..dd917cc01 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,4 +4,76 @@ build-backend = "setuptools.build_meta" [tool.black] line-length = 100 -target-version = ['py38', 'py39', 'py310', 'py311'] + +target-version = ['py39', 'py310', 'py311', 'py312'] + +[tool.pylint.main] +extension-pkg-allow-list = [ + "numpy", + "rustworkx", +] +load-plugins = ["pylint.extensions.docparams", "pylint.extensions.docstyle"] +py-version = "3.9" # update it when bumping minimum supported python version + +[tool.pylint.basic] +good-names = ["a", "b", "i", "j", "k", "d", "n", "m", "ex", "v", "w", "x", "y", "z", "Run", "_", "logger", "q", "c", "r", "qr", "cr", "qc", "nd", "pi", "op", "b", "ar", "br", "p", "cp", "ax", "dt", "__unittest", "iSwapGate", "mu"] +method-rgx = "(([a-z_][a-z0-9_]{2,49})|(assert[A-Z][a-zA-Z0-9]{2,43})|(test_[_a-zA-Z0-9]{2,}))$" +variable-rgx = "[a-z_][a-z0-9_]{1,30}$" + +[tool.pylint.format] +max-line-length = 105 # default 100 + +[tool.pylint."messages control"] +disable = [ +# intentionally disabled: + "spelling", # too noisy + "fixme", # disabled as TODOs would show up as warnings + "protected-access", # disabled as we don't follow the public vs private convention strictly + "duplicate-code", # disabled as it is too verbose + "redundant-returns-doc", # for @abstractmethod, it cannot interpret "pass" + "too-many-lines", "too-many-branches", "too-many-locals", "too-many-nested-blocks", "too-many-statements", + "too-many-instance-attributes", "too-many-arguments", "too-many-public-methods", "too-few-public-methods", "too-many-ancestors", + "unnecessary-pass", # allow for methods with just "pass", for clarity + "no-else-return", # relax "elif" after a clause with a return + "docstring-first-line-empty", # relax docstring style + "import-outside-toplevel", "import-error", # overzealous with our optionals/dynamic packages +# TODO(#9614): these were added in modern Pylint. Decide if we want to enable them. If so, +# remove from here and fix the issues. Else, move it above this section and add a comment +# with the rationale + "arguments-renamed", + "broad-exception-raised", + "consider-iterating-dictionary", + "consider-using-dict-items", + "consider-using-enumerate", + "consider-using-f-string", + "modified-iterating-list", + "nested-min-max", + "no-member", + "no-name-in-module", + "no-value-for-parameter", + "non-ascii-name", + "not-context-manager", + "superfluous-parens", + "unknown-option-value", + "unexpected-keyword-arg", + "unnecessary-dict-index-lookup", + "unnecessary-direct-lambda-call", + "unnecessary-dunder-call", + "unnecessary-ellipsis", + "unnecessary-lambda-assignment", + "unnecessary-list-index-lookup", + "unspecified-encoding", + "unsupported-assignment-operation", + "use-dict-literal", + "use-list-literal", + "use-implicit-booleaness-not-comparison", + "use-maxsplit-arg", +] + +enable = [ + "use-symbolic-message-instead" +] + +[tool.pylint.spelling] +spelling-private-dict-file = ".pylintdict" +spelling-store-unknown-words = "n" diff --git a/qiskit_machine_learning/algorithms/classifiers/neural_network_classifier.py b/qiskit_machine_learning/algorithms/classifiers/neural_network_classifier.py index 37595caa1..76f25ad2a 100644 --- a/qiskit_machine_learning/algorithms/classifiers/neural_network_classifier.py +++ b/qiskit_machine_learning/algorithms/classifiers/neural_network_classifier.py @@ -42,6 +42,7 @@ class NeuralNetworkClassifier(TrainableModel, ClassifierMixin): See `Scikit-Learn `__ for more details. """ + # pylint: disable=too-many-positional-arguments def __init__( self, neural_network: NeuralNetwork, diff --git a/qiskit_machine_learning/algorithms/classifiers/pegasos_qsvc.py b/qiskit_machine_learning/algorithms/classifiers/pegasos_qsvc.py index cae4c0e74..1b007078f 100644 --- a/qiskit_machine_learning/algorithms/classifiers/pegasos_qsvc.py +++ b/qiskit_machine_learning/algorithms/classifiers/pegasos_qsvc.py @@ -56,6 +56,7 @@ class PegasosQSVC(ClassifierMixin, SerializableModelMixin): FITTED = 0 UNFITTED = 1 + # pylint: disable=too-many-positional-arguments # pylint: disable=invalid-name def __init__( self, diff --git a/qiskit_machine_learning/algorithms/classifiers/vqc.py b/qiskit_machine_learning/algorithms/classifiers/vqc.py index 1f3c4238b..7b1926a70 100644 --- a/qiskit_machine_learning/algorithms/classifiers/vqc.py +++ b/qiskit_machine_learning/algorithms/classifiers/vqc.py @@ -44,6 +44,7 @@ class VQC(NeuralNetworkClassifier): Multi-label classification is not supported. E.g., :math:`[[1, 1, 0], [0, 1, 1], [1, 0, 1]]`. """ + # pylint: disable=too-many-positional-arguments def __init__( self, num_qubits: int | None = None, diff --git a/qiskit_machine_learning/algorithms/inference/qbayesian.py b/qiskit_machine_learning/algorithms/inference/qbayesian.py index 9621ba5e4..164cbe1ac 100644 --- a/qiskit_machine_learning/algorithms/inference/qbayesian.py +++ b/qiskit_machine_learning/algorithms/inference/qbayesian.py @@ -15,11 +15,15 @@ import copy from typing import Tuple, Dict, Set, List + from qiskit import QuantumCircuit, ClassicalRegister from qiskit.quantum_info import Statevector -from qiskit.circuit.library import GroverOperator -from qiskit.primitives import BaseSampler, Sampler from qiskit.circuit import Qubit +from qiskit.circuit.library import GroverOperator +from qiskit.primitives import BaseSampler, Sampler, BaseSamplerV2 +from qiskit.transpiler.passmanager import BasePassManager +from qiskit.transpiler.preset_passmanagers import generate_preset_pass_manager +from qiskit.providers.fake_provider import GenericBackendV2 class QBayesian: @@ -62,7 +66,8 @@ def __init__( *, limit: int = 10, threshold: float = 0.9, - sampler: BaseSampler | None = None, + sampler: BaseSampler | BaseSamplerV2 | None = None, + pass_manager: BasePassManager | None = None, ): """ Args: @@ -83,7 +88,8 @@ def __init__( # Test valid input for qrg in circuit.qregs: if qrg.size > 1: - raise ValueError("Every register needs to be mapped to exactly one unique qubit") + raise ValueError("Every register needs to be mapped to exactly one unique qubit.") + # Initialize parameter self._circ = circuit self._limit = limit @@ -92,6 +98,11 @@ def __init__( sampler = Sampler() self._sampler = sampler + if pass_manager is None: + _backend = GenericBackendV2(num_qubits=max(circuit.num_qubits, 2)) + pass_manager = generate_preset_pass_manager(optimization_level=1, backend=_backend) + self._pass_manager = pass_manager + # Label of register mapped to its qubit self._label2qubit = {qrg.name: qrg[0] for qrg in self._circ.qregs} # Label of register mapped to its qubit index bottom up in significance @@ -139,11 +150,34 @@ def _get_grover_op(self, evidence: Dict[str, int]) -> GroverOperator: def _run_circuit(self, circuit: QuantumCircuit) -> Dict[str, float]: """Run the quantum circuit with the sampler.""" - # Sample from circuit - job = self._sampler.run(circuit) - result = job.result() - # Get the counts of quantum state results - counts = result.quasi_dists[0].nearest_probability_distribution().binary_probabilities() + counts = {} + + if isinstance(self._sampler, BaseSampler): + # Sample from circuit + job = self._sampler.run(circuit) + result = job.result() + + # Get the counts of quantum state results + counts = result.quasi_dists[0].nearest_probability_distribution().binary_probabilities() + + elif isinstance(self._sampler, BaseSamplerV2): + + # Sample from circuit + circuit_isa = self._pass_manager.run(circuit) + job = self._sampler.run([circuit_isa]) + result = job.result() + + bit_array = list(result[0].data.values())[0] + bitstring_counts = bit_array.get_counts() + + # Normalize the counts to probabilities + total_shots = result[0].metadata["shots"] + counts = {k: v / total_shots for k, v in bitstring_counts.items()} + + # Convert to quasi-probabilities + # counts = QuasiDistribution(probabilities) + # counts = {k: v for k, v in counts.items()} + return counts def __power_grover( @@ -360,12 +394,12 @@ def limit(self, limit: int): self._limit = limit @property - def sampler(self) -> BaseSampler: + def sampler(self) -> BaseSampler | BaseSamplerV2: """Returns the sampler primitive used to compute the samples.""" return self._sampler @sampler.setter - def sampler(self, sampler: BaseSampler): + def sampler(self, sampler: BaseSampler | BaseSamplerV2): """Set the sampler primitive used to compute the samples.""" self._sampler = sampler diff --git a/qiskit_machine_learning/algorithms/regressors/vqr.py b/qiskit_machine_learning/algorithms/regressors/vqr.py index a26499c87..3ece2ca2f 100644 --- a/qiskit_machine_learning/algorithms/regressors/vqr.py +++ b/qiskit_machine_learning/algorithms/regressors/vqr.py @@ -29,6 +29,7 @@ class VQR(NeuralNetworkRegressor): """A convenient Variational Quantum Regressor implementation.""" + # pylint: disable=too-many-positional-arguments def __init__( self, num_qubits: int | None = None, diff --git a/qiskit_machine_learning/algorithms/trainable_model.py b/qiskit_machine_learning/algorithms/trainable_model.py index efd7b1796..31af78056 100644 --- a/qiskit_machine_learning/algorithms/trainable_model.py +++ b/qiskit_machine_learning/algorithms/trainable_model.py @@ -34,6 +34,7 @@ class TrainableModel(SerializableModelMixin): """Base class for ML model that defines a scikit-learn like interface for Estimators.""" + # pylint: disable=too-many-positional-arguments def __init__( self, neural_network: NeuralNetwork, diff --git a/qiskit_machine_learning/datasets/ad_hoc.py b/qiskit_machine_learning/datasets/ad_hoc.py index f553f74f4..ee66570d4 100644 --- a/qiskit_machine_learning/datasets/ad_hoc.py +++ b/qiskit_machine_learning/datasets/ad_hoc.py @@ -26,6 +26,7 @@ from ..utils import algorithm_globals +# pylint: disable=too-many-positional-arguments def ad_hoc_data( training_size: int, test_size: int, diff --git a/qiskit_machine_learning/gradients/base/base_sampler_gradient.py b/qiskit_machine_learning/gradients/base/base_sampler_gradient.py index 9e29b47ab..eaee27945 100644 --- a/qiskit_machine_learning/gradients/base/base_sampler_gradient.py +++ b/qiskit_machine_learning/gradients/base/base_sampler_gradient.py @@ -26,6 +26,7 @@ from qiskit.primitives.utils import _circuit_key from qiskit.providers import Options from qiskit.transpiler.passes import TranslateParameterizedGates +from qiskit.transpiler.passmanager import BasePassManager from .sampler_gradient_result import SamplerGradientResult from ..utils import ( @@ -41,7 +42,13 @@ class BaseSamplerGradient(ABC): """Base class for a ``SamplerGradient`` to compute the gradients of the sampling probability.""" - def __init__(self, sampler: BaseSampler, options: Options | None = None): + def __init__( + self, + sampler: BaseSampler, + len_quasi_dist: int | None = None, + options: Options | None = None, + pass_manager: BasePassManager | None = None, + ): """ Args: sampler: The sampler used to compute the gradients. @@ -52,6 +59,8 @@ def __init__(self, sampler: BaseSampler, options: Options | None = None): """ self._sampler: BaseSampler = sampler self._default_options = Options() + self._pass_manager = pass_manager + self._len_quasi_dist = len_quasi_dist if options is not None: self._default_options.update_options(**options) self._gradient_circuit_cache: dict[tuple, GradientCircuit] = {} diff --git a/qiskit_machine_learning/gradients/param_shift/param_shift_sampler_gradient.py b/qiskit_machine_learning/gradients/param_shift/param_shift_sampler_gradient.py index 0d7f384a8..e376755fa 100644 --- a/qiskit_machine_learning/gradients/param_shift/param_shift_sampler_gradient.py +++ b/qiskit_machine_learning/gradients/param_shift/param_shift_sampler_gradient.py @@ -20,11 +20,15 @@ from qiskit.circuit import Parameter, QuantumCircuit +from qiskit.primitives import BaseSamplerV1 +from qiskit.primitives.base import BaseSamplerV2 +from qiskit.result import QuasiDistribution + from ..base.base_sampler_gradient import BaseSamplerGradient from ..base.sampler_gradient_result import SamplerGradientResult from ..utils import _make_param_shift_parameter_values -from ...exceptions import AlgorithmError +from ...exceptions import AlgorithmError, QiskitMachineLearningError class ParamShiftSamplerGradient(BaseSamplerGradient): @@ -91,18 +95,52 @@ def _run_unique( all_n.append(n) # Run the single job with all circuits. - job = self._sampler.run(job_circuits, job_param_values, **options) + if isinstance(self._sampler, BaseSamplerV1): + job = self._sampler.run(job_circuits, job_param_values, **options) + elif isinstance(self._sampler, BaseSamplerV2): + if self._pass_manager is None: + raise QiskitMachineLearningError( + "To use ParameterShifSamplerGradient with SamplerV2 you " + + "must pass a gradient with a pass manager" + ) + isa_g_circs = self._pass_manager.run(job_circuits) + circ_params = [ + (isa_g_circs[i], job_param_values[i]) for i in range(len(job_param_values)) + ] + job = self._sampler.run(circ_params) + else: + raise AlgorithmError( + "The accepted estimators are BaseSamplerV1 (deprecated) and BaseSamplerV2; got " + + f"{type(self._sampler)} instead." + ) + try: results = job.result() except Exception as exc: - raise AlgorithmError("Estimator job failed.") from exc + raise AlgorithmError("Sampler job failed.") from exc # Compute the gradients. gradients = [] partial_sum_n = 0 + opt = None # Required by PyLint: possibly-used-before-assignment for n in all_n: gradient = [] - result = results.quasi_dists[partial_sum_n : partial_sum_n + n] + + if isinstance(self._sampler, BaseSamplerV1): + result = results.quasi_dists[partial_sum_n : partial_sum_n + n] + opt = self._get_local_options(options) + elif isinstance(self._sampler, BaseSamplerV2): + result = [] + for i in range(partial_sum_n, partial_sum_n + n): + bitstring_counts = results[i].data.meas.get_counts() + # Normalize the counts to probabilities + total_shots = sum(bitstring_counts.values()) + probabilities = {k: v / total_shots for k, v in bitstring_counts.items()} + # Convert to quasi-probabilities + counts = QuasiDistribution(probabilities) + result.append({k: v for k, v in counts.items() if int(k) < self.len_quasi_dist}) + opt = options + for dist_plus, dist_minus in zip(result[: n // 2], result[n // 2 :]): grad_dist: dict[int, float] = defaultdict(float) for key, val in dist_plus.items(): @@ -113,5 +151,4 @@ def _run_unique( gradients.append(gradient) partial_sum_n += n - opt = self._get_local_options(options) return SamplerGradientResult(gradients=gradients, metadata=metadata, options=opt) diff --git a/qiskit_machine_learning/gradients/spsa/spsa_estimator_gradient.py b/qiskit_machine_learning/gradients/spsa/spsa_estimator_gradient.py index 1f9bfa0b2..c0387a201 100644 --- a/qiskit_machine_learning/gradients/spsa/spsa_estimator_gradient.py +++ b/qiskit_machine_learning/gradients/spsa/spsa_estimator_gradient.py @@ -40,6 +40,7 @@ class SPSAEstimatorGradient(BaseEstimatorGradient): `doi: 10.1109/TAC.2000.880982 `_ """ + # pylint: disable=too-many-positional-arguments def __init__( self, estimator: BaseEstimator, diff --git a/qiskit_machine_learning/gradients/spsa/spsa_sampler_gradient.py b/qiskit_machine_learning/gradients/spsa/spsa_sampler_gradient.py index c3de7c4da..1c25b8aaa 100644 --- a/qiskit_machine_learning/gradients/spsa/spsa_sampler_gradient.py +++ b/qiskit_machine_learning/gradients/spsa/spsa_sampler_gradient.py @@ -40,6 +40,7 @@ class SPSASamplerGradient(BaseSamplerGradient): `doi: 10.1109/TAC.2000.880982 `_. """ + # pylint: disable=too-many-positional-arguments def __init__( self, sampler: BaseSampler, diff --git a/qiskit_machine_learning/kernels/fidelity_quantum_kernel.py b/qiskit_machine_learning/kernels/fidelity_quantum_kernel.py index e9769addc..212c32acd 100644 --- a/qiskit_machine_learning/kernels/fidelity_quantum_kernel.py +++ b/qiskit_machine_learning/kernels/fidelity_quantum_kernel.py @@ -256,6 +256,7 @@ def _get_kernel_entries( kernel_entries.extend(job.result().fidelities) return kernel_entries + # pylint: disable=too-many-positional-arguments def _is_trivial( self, i: int, j: int, x_i: np.ndarray, y_j: np.ndarray, symmetric: bool ) -> bool: diff --git a/qiskit_machine_learning/neural_networks/neural_network.py b/qiskit_machine_learning/neural_networks/neural_network.py index 651abf4ae..e75858d38 100644 --- a/qiskit_machine_learning/neural_networks/neural_network.py +++ b/qiskit_machine_learning/neural_networks/neural_network.py @@ -42,6 +42,7 @@ class NeuralNetwork(ABC): batched inputs. This is to be implemented by other (quantum) neural networks. """ + # pylint: disable=too-many-positional-arguments def __init__( self, num_inputs: int, diff --git a/qiskit_machine_learning/neural_networks/sampler_qnn.py b/qiskit_machine_learning/neural_networks/sampler_qnn.py index 6982d2e87..28fee16d8 100644 --- a/qiskit_machine_learning/neural_networks/sampler_qnn.py +++ b/qiskit_machine_learning/neural_networks/sampler_qnn.py @@ -14,17 +14,19 @@ from __future__ import annotations import logging - from numbers import Integral from typing import Callable, cast, Iterable, Sequence - import numpy as np +from qiskit.primitives import BaseSamplerV1 +from qiskit.primitives.base import BaseSamplerV2 + from qiskit.circuit import Parameter, QuantumCircuit from qiskit.primitives import BaseSampler, SamplerResult, Sampler +from qiskit.result import QuasiDistribution import qiskit_machine_learning.optionals as _optionals -from .neural_network import NeuralNetwork + from ..gradients import ( BaseSamplerGradient, ParamShiftSamplerGradient, @@ -33,6 +35,7 @@ from ..circuit.library import QNNCircuit from ..exceptions import QiskitMachineLearningError +from .neural_network import NeuralNetwork if _optionals.HAS_SPARSE: # pylint: disable=import-error @@ -128,6 +131,7 @@ def __init__( self, *, circuit: QuantumCircuit, + num_virtual_qubits: int | None = None, sampler: BaseSampler | None = None, input_params: Sequence[Parameter] | None = None, weight_params: Sequence[Parameter] | None = None, @@ -138,50 +142,45 @@ def __init__( input_gradients: bool = False, ): """ - Args: - sampler: The sampler primitive used to compute the neural network's results. - If ``None`` is given, a default instance of the reference sampler defined - by :class:`~qiskit.primitives.Sampler` will be used. - circuit: The parametrized quantum circuit that generates the samples of this network. - If a :class:`~qiskit_machine_learning.circuit.library.QNNCircuit` is passed, the - `input_params` and `weight_params` do not have to be provided, because these two - properties are taken from the - :class:`~qiskit_machine_learning.circuit.library.QNNCircuit`. - input_params: The parameters of the circuit corresponding to the input. If a - :class:`~qiskit_machine_learning.circuit.library.QNNCircuit` is provided the - `input_params` value here is ignored. Instead the value is taken from the - :class:`~qiskit_machine_learning.circuit.library.QNNCircuit` input_parameters. - weight_params: The parameters of the circuit corresponding to the trainable weights. If - a :class:`~qiskit_machine_learning.circuit.library.QNNCircuit` is provided the - `weight_params` value here is ignored. Instead the value is taken from the - :class:`~qiskit_machine_learning.circuit.library.QNNCircuit` weight_parameters. - sparse: Returns whether the output is sparse or not. - interpret: A callable that maps the measured integer to another unsigned integer or - tuple of unsigned integers. These are used as new indices for the (potentially - sparse) output array. If no interpret function is - passed, then an identity function will be used by this neural network. - output_shape: The output shape of the custom interpretation. It is ignored if no custom - interpret method is provided where the shape is taken to be - ``2^circuit.num_qubits``. - gradient: An optional sampler gradient to be used for the backward pass. - If ``None`` is given, a default instance of - :class:`~qiskit_machine_learning.gradients.ParamShiftSamplerGradient` will be used. - input_gradients: Determines whether to compute gradients with respect to input data. - Note that this parameter is ``False`` by default, and must be explicitly set to - ``True`` for a proper gradient computation when using - :class:`~qiskit_machine_learning.connectors.TorchConnector`. - Raises: - QiskitMachineLearningError: Invalid parameter values. + Args: sampler: The sampler primitive used to compute the neural network's results. If + ``None`` is given, a default instance of the reference sampler defined by + :class:`~qiskit.primitives.Sampler` will be used. circuit: The parametrized quantum + circuit that generates the samples of this network. If a + :class:`~qiskit_machine_learning.circuit.library.QNNCircuit` is passed, + the `input_params` and `weight_params` do not have to be provided, because these two + properties are taken from the :class:`~qiskit_machine_learning.circuit.library.QNNCircuit + `. input_params: The parameters of the circuit corresponding to the input. If a + :class:`~qiskit_machine_learning.circuit.library.QNNCircuit` is provided the + `input_params` value here is ignored. Instead, the value is taken from the + :class:`~qiskit_machine_learning.circuit.library.QNNCircuit` input_parameters. + weight_params: The parameters of the circuit corresponding to the trainable weights. If a + :class:`~qiskit_machine_learning.circuit.library.QNNCircuit` is provided the + `weight_params` value here is ignored. Instead, the value is taken from the + :class:`~qiskit_machine_learning.circuit.library.QNNCircuit` weight_parameters. sparse: + Returns whether the output is sparse or not. interpret: A callable that maps the measured + integer to another unsigned integer or tuple of unsigned integers. These are used as new + indices for the (potentially sparse) output array. If no interpret function is passed, + then an identity function will be used by this neural network. output_shape: The output + shape of the custom interpretation. For SamplerV1, it is ignored if no custom interpret + method is provided where the shape is taken to be ``2^circuit.num_qubits``. gradient: An + optional sampler gradient to be used for the backward pass. If ``None`` is given, + a default instance of + :class:`~qiskit_machine_learning.gradients.ParamShiftSamplerGradient` will be used. + input_gradients: Determines whether to compute gradients with respect to input data. Note + that this parameter is ``False`` by default, and must be explicitly set to ``True`` for a + proper gradient computation when using + :class:`~qiskit_machine_learning.connectors.TorchConnector`. Raises: + QiskitMachineLearningError: Invalid parameter values. """ # set primitive, provide default if sampler is None: sampler = Sampler() self.sampler = sampler - # set gradient - if gradient is None: - gradient = ParamShiftSamplerGradient(self.sampler) - self.gradient = gradient + if num_virtual_qubits is None: + # print statement + num_virtual_qubits = circuit.num_qubits + self.num_virtual_qubits = num_virtual_qubits self._org_circuit = circuit @@ -196,6 +195,12 @@ def __init__( _optionals.HAS_SPARSE.require_now("DOK") self.set_interpret(interpret, output_shape) + + # set gradient + if gradient is None: + gradient = ParamShiftSamplerGradient(sampler=self.sampler) + self.gradient = gradient + self._input_gradients = input_gradients super().__init__( @@ -276,10 +281,9 @@ def _compute_output_shape( # Warn user that output_shape parameter will be ignored logger.warning( "No interpret function given, output_shape will be automatically " - "determined as 2^num_qubits." + "determined as 2^num_virtual_qubits." ) - output_shape_ = (2**self.circuit.num_qubits,) - + output_shape_ = (2**self.num_virtual_qubits,) return output_shape_ def _postprocess(self, num_samples: int, result: SamplerResult) -> np.ndarray | SparseArray: @@ -296,8 +300,24 @@ def _postprocess(self, num_samples: int, result: SamplerResult) -> np.ndarray | prob = np.zeros((num_samples, *self._output_shape)) for i in range(num_samples): - counts = result.quasi_dists[i] + if isinstance(self.sampler, BaseSamplerV1): + counts = result.quasi_dists[i] + elif isinstance(self.sampler, BaseSamplerV2): + bitstring_counts = result[i].data.meas.get_counts() + + # Normalize the counts to probabilities + total_shots = sum(bitstring_counts.values()) + probabilities = {k: v / total_shots for k, v in bitstring_counts.items()} + + # Convert to quasi-probabilities + counts = QuasiDistribution(probabilities) + counts = {k: v for k, v in counts.items() if int(k) < 2**self.num_virtual_qubits} + else: + raise QiskitMachineLearningError( + "The accepted estimators are BaseSamplerV1 (deprecated) and BaseSamplerV2; " + + f"got {type(self.sampler)} instead." + ) # evaluate probabilities for b, v in counts.items(): key = self._interpret(b) @@ -329,6 +349,7 @@ def _postprocess_gradient( ) weights_grad = DOK((num_samples, *self._output_shape, self._num_weights)) else: + input_grad = ( np.zeros((num_samples, *self._output_shape, self._num_inputs)) if self._input_gradients @@ -387,14 +408,22 @@ def _forward( """ parameter_values, num_samples = self._preprocess_forward(input_data, weights) - # sampler allows batching - job = self.sampler.run([self._circuit] * num_samples, parameter_values) + if isinstance(self.sampler, BaseSamplerV1): + job = self.sampler.run([self._circuit] * num_samples, parameter_values) + elif isinstance(self.sampler, BaseSamplerV2): + job = self.sampler.run( + [(self._circuit, parameter_values[i]) for i in range(num_samples)] + ) + else: + raise QiskitMachineLearningError( + "The accepted estimators are BaseSamplerV1 (deprecated) and BaseSamplerV2; " + + f"got {type(self.sampler)} instead." + ) try: results = job.result() except Exception as exc: - raise QiskitMachineLearningError("Sampler job failed.") from exc + raise QiskitMachineLearningError(f"Sampler job failed: {exc}") from exc result = self._postprocess(num_samples, results) - return result def _backward( @@ -410,21 +439,18 @@ def _backward( if np.prod(parameter_values.shape) > 0: circuits = [self._circuit] * num_samples - job = None if self._input_gradients: - job = self.gradient.run(circuits, parameter_values) # type: ignore[arg-type] + job = self.gradient.run(circuits, parameter_values) elif len(parameter_values[0]) > self._num_inputs: params = [self._circuit.parameters[self._num_inputs :]] * num_samples - job = self.gradient.run( - circuits, parameter_values, parameters=params # type: ignore[arg-type] - ) + job = self.gradient.run(circuits, parameter_values, parameters=params) if job is not None: try: results = job.result() except Exception as exc: - raise QiskitMachineLearningError("Sampler job failed.") from exc + raise QiskitMachineLearningError(f"Sampler job failed: {exc}") from exc input_grad, weights_grad = self._postprocess_gradient(num_samples, results) diff --git a/qiskit_machine_learning/optimizers/adam_amsgrad.py b/qiskit_machine_learning/optimizers/adam_amsgrad.py index 74e332c0b..fe0aeb910 100644 --- a/qiskit_machine_learning/optimizers/adam_amsgrad.py +++ b/qiskit_machine_learning/optimizers/adam_amsgrad.py @@ -57,6 +57,7 @@ class ADAM(Optimizer): "snapshot_dir", ] + # pylint: disable=too-many-positional-arguments def __init__( self, maxiter: int = 10000, diff --git a/qiskit_machine_learning/optimizers/aqgd.py b/qiskit_machine_learning/optimizers/aqgd.py index ef5d0d703..4de3fdfd7 100644 --- a/qiskit_machine_learning/optimizers/aqgd.py +++ b/qiskit_machine_learning/optimizers/aqgd.py @@ -49,6 +49,7 @@ class AQGD(Optimizer): _OPTIONS = ["maxiter", "eta", "tol", "disp", "momentum", "param_tol", "averaging"] + # pylint: disable=too-many-positional-arguments def __init__( self, maxiter: int | list[int] = 1000, @@ -179,6 +180,7 @@ def _compute_objective_fn_and_gradient( gradient = 0.5 * (values[1 : num_params + 1] - values[1 + num_params :]) return obj_value, gradient + # pylint: disable=too-many-positional-arguments def _update( self, params: np.ndarray, diff --git a/qiskit_machine_learning/optimizers/cg.py b/qiskit_machine_learning/optimizers/cg.py index bb060389a..f7005f36f 100644 --- a/qiskit_machine_learning/optimizers/cg.py +++ b/qiskit_machine_learning/optimizers/cg.py @@ -33,6 +33,7 @@ class CG(SciPyOptimizer): _OPTIONS = ["maxiter", "disp", "gtol", "eps"] + # pylint: disable=too-many-positional-arguments # pylint: disable=unused-argument def __init__( self, diff --git a/qiskit_machine_learning/optimizers/cobyla.py b/qiskit_machine_learning/optimizers/cobyla.py index d7710b1e3..eef13ab55 100644 --- a/qiskit_machine_learning/optimizers/cobyla.py +++ b/qiskit_machine_learning/optimizers/cobyla.py @@ -31,6 +31,7 @@ class COBYLA(SciPyOptimizer): _OPTIONS = ["maxiter", "disp", "rhobeg"] + # pylint: disable=too-many-positional-arguments # pylint: disable=unused-argument def __init__( self, diff --git a/qiskit_machine_learning/optimizers/gradient_descent.py b/qiskit_machine_learning/optimizers/gradient_descent.py index e33aacec0..832e44b47 100644 --- a/qiskit_machine_learning/optimizers/gradient_descent.py +++ b/qiskit_machine_learning/optimizers/gradient_descent.py @@ -174,6 +174,7 @@ def grad(x): """ + # pylint: disable=too-many-positional-arguments def __init__( self, maxiter: int = 100, diff --git a/qiskit_machine_learning/optimizers/gsls.py b/qiskit_machine_learning/optimizers/gsls.py index 6f2a36e30..7f7ab2966 100644 --- a/qiskit_machine_learning/optimizers/gsls.py +++ b/qiskit_machine_learning/optimizers/gsls.py @@ -51,6 +51,7 @@ class GSLS(Optimizer): "max_failed_rejection_sampling", ] + # pylint: disable=too-many-positional-arguments # pylint: disable=unused-argument def __init__( self, @@ -131,6 +132,7 @@ def minimize( return result + # pylint: disable=too-many-positional-arguments def ls_optimize( self, n: int, @@ -269,6 +271,7 @@ def sample_points( return points, directions + # pylint: disable=too-many-positional-arguments def sample_set( self, n: int, x: np.ndarray, var_lb: np.ndarray, var_ub: np.ndarray, num_points: int ) -> tuple[np.ndarray, np.ndarray]: diff --git a/qiskit_machine_learning/optimizers/l_bfgs_b.py b/qiskit_machine_learning/optimizers/l_bfgs_b.py index 0560e454d..4e355a6c7 100644 --- a/qiskit_machine_learning/optimizers/l_bfgs_b.py +++ b/qiskit_machine_learning/optimizers/l_bfgs_b.py @@ -46,6 +46,7 @@ class L_BFGS_B(SciPyOptimizer): # pylint: disable=invalid-name _OPTIONS = ["maxfun", "maxiter", "ftol", "iprint", "eps"] + # pylint: disable=too-many-positional-arguments # pylint: disable=unused-argument def __init__( self, diff --git a/qiskit_machine_learning/optimizers/nelder_mead.py b/qiskit_machine_learning/optimizers/nelder_mead.py index 8109b3f48..8fabce5eb 100644 --- a/qiskit_machine_learning/optimizers/nelder_mead.py +++ b/qiskit_machine_learning/optimizers/nelder_mead.py @@ -40,6 +40,7 @@ class NELDER_MEAD(SciPyOptimizer): # pylint: disable=invalid-name _OPTIONS = ["maxiter", "maxfev", "disp", "xatol", "adaptive"] + # pylint: disable=too-many-positional-arguments # pylint: disable=unused-argument def __init__( self, diff --git a/qiskit_machine_learning/optimizers/nft.py b/qiskit_machine_learning/optimizers/nft.py index b76bfc983..5dffc47c5 100644 --- a/qiskit_machine_learning/optimizers/nft.py +++ b/qiskit_machine_learning/optimizers/nft.py @@ -29,6 +29,7 @@ class NFT(SciPyOptimizer): _OPTIONS = ["maxiter", "maxfev", "disp", "reset_interval"] + # pylint: disable=too-many-positional-arguments # pylint: disable=unused-argument def __init__( self, @@ -69,6 +70,7 @@ def __init__( super().__init__(method=nakanishi_fujii_todo, options=options, **kwargs) +# pylint: disable=too-many-positional-arguments # pylint: disable=invalid-name def nakanishi_fujii_todo( fun, x0, args=(), maxiter=None, maxfev=1024, reset_interval=32, eps=1e-32, callback=None, **_ diff --git a/qiskit_machine_learning/optimizers/p_bfgs.py b/qiskit_machine_learning/optimizers/p_bfgs.py index c70d7697d..32287c4df 100644 --- a/qiskit_machine_learning/optimizers/p_bfgs.py +++ b/qiskit_machine_learning/optimizers/p_bfgs.py @@ -52,6 +52,7 @@ class P_BFGS(SciPyOptimizer): # pylint: disable=invalid-name _OPTIONS = ["maxfun", "ftol", "iprint"] + # pylint: disable=too-many-positional-arguments # pylint: disable=unused-argument def __init__( self, diff --git a/qiskit_machine_learning/optimizers/powell.py b/qiskit_machine_learning/optimizers/powell.py index 96842db36..e2e875a6e 100644 --- a/qiskit_machine_learning/optimizers/powell.py +++ b/qiskit_machine_learning/optimizers/powell.py @@ -33,6 +33,7 @@ class POWELL(SciPyOptimizer): _OPTIONS = ["maxiter", "maxfev", "disp", "xtol"] + # pylint: disable=too-many-positional-arguments # pylint: disable=unused-argument def __init__( self, diff --git a/qiskit_machine_learning/optimizers/qnspsa.py b/qiskit_machine_learning/optimizers/qnspsa.py index 408bcd8a4..3d2b91381 100644 --- a/qiskit_machine_learning/optimizers/qnspsa.py +++ b/qiskit_machine_learning/optimizers/qnspsa.py @@ -93,6 +93,7 @@ def loss(x): """ + # pylint: disable=too-many-positional-arguments def __init__( self, fidelity: FIDELITY, @@ -184,6 +185,7 @@ def __init__( self.fidelity = fidelity + # pylint: disable=too-many-positional-arguments def _point_sample(self, loss, x, eps, delta1, delta2): loss_points = [x + eps * delta1, x - eps * delta1] fidelity_points = [ diff --git a/qiskit_machine_learning/optimizers/slsqp.py b/qiskit_machine_learning/optimizers/slsqp.py index facbfdbce..2a32c7c08 100644 --- a/qiskit_machine_learning/optimizers/slsqp.py +++ b/qiskit_machine_learning/optimizers/slsqp.py @@ -36,6 +36,7 @@ class SLSQP(SciPyOptimizer): _OPTIONS = ["maxiter", "disp", "ftol", "eps"] + # pylint: disable=too-many-positional-arguments # pylint: disable=unused-argument def __init__( self, diff --git a/qiskit_machine_learning/optimizers/spsa.py b/qiskit_machine_learning/optimizers/spsa.py index c6579811e..03ed45017 100644 --- a/qiskit_machine_learning/optimizers/spsa.py +++ b/qiskit_machine_learning/optimizers/spsa.py @@ -161,6 +161,7 @@ def __call__(self, nfev, parameters, value, stepsize, accepted) -> bool: """ + # pylint: disable=too-many-positional-arguments def __init__( self, maxiter: int = 100, @@ -280,6 +281,7 @@ def __init__( self._nfev: int | None = None # the number of function evaluations self._smoothed_hessian: np.ndarray | None = None # smoothed average of the Hessians + # pylint: disable=too-many-positional-arguments @staticmethod def calibrate( loss: Callable[[np.ndarray], float], @@ -413,6 +415,7 @@ def settings(self) -> dict[str, Any]: "termination_checker": self.termination_checker, } + # pylint: disable=too-many-positional-arguments def _point_sample(self, loss, x, eps, delta1, delta2): """A single sample of the gradient at position ``x`` in direction ``delta``.""" # points to evaluate @@ -478,6 +481,7 @@ def _point_estimate(self, loss, x, eps, num_samples): hessian_estimate / num_samples, ) + # pylint: disable=too-many-positional-arguments def _compute_update(self, loss, x, k, eps, lse_solver): # compute the perturbations if isinstance(self.resamplings, dict): diff --git a/qiskit_machine_learning/optimizers/tnc.py b/qiskit_machine_learning/optimizers/tnc.py index 13d50d29b..b16af35a3 100644 --- a/qiskit_machine_learning/optimizers/tnc.py +++ b/qiskit_machine_learning/optimizers/tnc.py @@ -33,6 +33,7 @@ class TNC(SciPyOptimizer): _OPTIONS = ["maxiter", "disp", "accuracy", "ftol", "xtol", "gtol", "eps"] + # pylint: disable=too-many-positional-arguments # pylint: disable=unused-argument def __init__( self, diff --git a/qiskit_machine_learning/state_fidelities/compute_uncompute.py b/qiskit_machine_learning/state_fidelities/compute_uncompute.py index 3453b2081..a1f745f67 100644 --- a/qiskit_machine_learning/state_fidelities/compute_uncompute.py +++ b/qiskit_machine_learning/state_fidelities/compute_uncompute.py @@ -18,11 +18,17 @@ from copy import copy from qiskit import QuantumCircuit -from qiskit.primitives import BaseSampler + +from qiskit.primitives import BaseSampler, BaseSamplerV1, SamplerResult, StatevectorSampler +from qiskit.primitives.base import BaseSamplerV2 + +from qiskit.transpiler.passmanager import PassManager +from qiskit.result import QuasiDistribution + from qiskit.primitives.primitive_job import PrimitiveJob from qiskit.providers import Options -from ..exceptions import AlgorithmError +from ..exceptions import AlgorithmError, QiskitMachineLearningError from .base_state_fidelity import BaseStateFidelity from .state_fidelity_result import StateFidelityResult from ..algorithm_job import AlgorithmJob @@ -53,7 +59,10 @@ class ComputeUncompute(BaseStateFidelity): def __init__( self, - sampler: BaseSampler, + sampler: BaseSampler | BaseSamplerV2, + *, + num_virtual_qubits: int | None = None, + pass_manager: PassManager | None = None, options: Options | None = None, local: bool = False, ) -> None: @@ -79,11 +88,24 @@ def __init__( Raises: ValueError: If the sampler is not an instance of ``BaseSampler``. """ - if not isinstance(sampler, BaseSampler): + if (not isinstance(sampler, BaseSampler)) and (not isinstance(sampler, BaseSamplerV2)): + raise ValueError( + f"The sampler should be an instance of BaseSampler or BaseSamplerV2, " + f"but got {type(sampler)}" + ) + if ( + isinstance(sampler, BaseSamplerV2) + and (pass_manager is None) + and not isinstance(sampler, StatevectorSampler) + ): + raise ValueError(f"A pass_manager should be provided for {type(sampler)}.") + if (pass_manager is not None) and (num_virtual_qubits is None): raise ValueError( - f"The sampler should be an instance of BaseSampler, " f"but got {type(sampler)}" + f"Number of virtual qubits should be provided for {type(pass_manager)}." ) self._sampler: BaseSampler = sampler + self.num_virtual_qubits = num_virtual_qubits + self.pass_manager = pass_manager self._local = local self._default_options = Options() if options is not None: @@ -111,6 +133,8 @@ def create_fidelity_circuit( circuit = circuit_1.compose(circuit_2.inverse()) circuit.measure_all() + if self.pass_manager is not None: + circuit = self.pass_manager.run(circuit) return circuit def _run( @@ -156,29 +180,60 @@ def _run( # primitive's default options. opts = copy(self._default_options) opts.update_options(**options) - - sampler_job = self._sampler.run(circuits=circuits, parameter_values=values, **opts.__dict__) - - local_opts = self._get_local_options(opts.__dict__) - return AlgorithmJob(ComputeUncompute._call, sampler_job, circuits, self._local, local_opts) + if isinstance(self._sampler, BaseSamplerV1): + sampler_job = self._sampler.run( + circuits=circuits, parameter_values=values, **opts.__dict__ + ) + local_opts = self._get_local_options(opts.__dict__) + elif isinstance(self._sampler, BaseSamplerV2): + sampler_job = self._sampler.run( + [(circuits[i], values[i]) for i in range(len(circuits))], **opts.__dict__ + ) + local_opts = opts.__dict__ + else: + raise QiskitMachineLearningError( + "The accepted estimators are BaseSamplerV1 (deprecated) and BaseSamplerV2; got" + + f" {type(self.sampler)} instead." + ) + return AlgorithmJob( + ComputeUncompute._call, + sampler_job, + circuits, + self._local, + local_opts, + self._sampler, + self._post_process_v2, + self.num_virtual_qubits, + ) @staticmethod def _call( - job: PrimitiveJob, circuits: Sequence[QuantumCircuit], local: bool, local_opts: Options + job: PrimitiveJob, + circuits: Sequence[QuantumCircuit], + local: bool, + local_opts: Options = None, + _sampler=None, + _post_process_v2=None, + num_virtual_qubits=None, ) -> StateFidelityResult: try: result = job.result() except Exception as exc: raise AlgorithmError("Sampler job failed!") from exc + if isinstance(_sampler, BaseSamplerV1): + quasi_dists = result.quasi_dists + elif isinstance(_sampler, BaseSamplerV2): + quasi_dists = _post_process_v2(result) + if local: raw_fidelities = [ - ComputeUncompute._get_local_fidelity(prob_dist, circuit.num_qubits) - for prob_dist, circuit in zip(result.quasi_dists, circuits) + ComputeUncompute._get_local_fidelity(prob_dist, num_virtual_qubits) + for prob_dist, circuit in zip(quasi_dists, circuits) ] else: raw_fidelities = [ - ComputeUncompute._get_global_fidelity(prob_dist) for prob_dist in result.quasi_dists + ComputeUncompute._get_global_fidelity(prob_dist) for prob_dist in quasi_dists ] fidelities = ComputeUncompute._truncate_fidelities(raw_fidelities) @@ -225,6 +280,21 @@ def _get_local_options(self, options: Options) -> Options: opts.update_options(**options) return opts + def _post_process_v2(self, result: SamplerResult): + quasis = [] + for i in range(len(result)): + bitstring_counts = result[i].data.meas.get_counts() + + # Normalize the counts to probabilities + total_shots = sum(bitstring_counts.values()) + probabilities = {k: v / total_shots for k, v in bitstring_counts.items()} + + # Convert to quasi-probabilities + counts = QuasiDistribution(probabilities) + quasi_probs = {k: v for k, v in counts.items() if int(k) < 2**self.num_virtual_qubits} + quasis.append(quasi_probs) + return quasis + @staticmethod def _get_global_fidelity(probability_distribution: dict[int, float]) -> float: """Process the probability distribution of a measurement to determine the diff --git a/releasenotes/notes/py38_end_of_support-fa1fdea6ea02b502.yaml b/releasenotes/notes/py38_end_of_support-fa1fdea6ea02b502.yaml new file mode 100644 index 000000000..f039fa74e --- /dev/null +++ b/releasenotes/notes/py38_end_of_support-fa1fdea6ea02b502.yaml @@ -0,0 +1,6 @@ +--- +upgrade: + - | + Removed support for using Qiskit Machine Learning with Python 3.8 to reflect + the EOL of Python 3.8 in October 2024 (PEP 569). To continue using Qiskit Machine Learning, you + must upgrade to a Python: 3.9 or above if you are using older versions of Python. diff --git a/setup.py b/setup.py index 0d654e889..d7caaa4be 100644 --- a/setup.py +++ b/setup.py @@ -57,7 +57,6 @@ "Operating System :: MacOS", "Operating System :: POSIX :: Linux", "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", @@ -68,7 +67,7 @@ packages=setuptools.find_packages(include=['qiskit_machine_learning','qiskit_machine_learning.*']), install_requires=REQUIREMENTS, include_package_data=True, - python_requires=">=3.8", + python_requires=">=3.9", extras_require={ 'torch': ["torch"], 'sparse': ["sparse"], diff --git a/test/algorithms/classifiers/test_neural_network_classifier.py b/test/algorithms/classifiers/test_neural_network_classifier.py index 30930a9fe..52fcc8271 100644 --- a/test/algorithms/classifiers/test_neural_network_classifier.py +++ b/test/algorithms/classifiers/test_neural_network_classifier.py @@ -167,6 +167,7 @@ def _generate_data(self, num_inputs: int) -> tuple[np.ndarray, np.ndarray]: return features, labels + # pylint: disable=too-many-positional-arguments def _create_classifier( self, qnn: NeuralNetwork, diff --git a/test/algorithms/classifiers/test_vqc.py b/test/algorithms/classifiers/test_vqc.py index 9d252e8c8..15beeb049 100644 --- a/test/algorithms/classifiers/test_vqc.py +++ b/test/algorithms/classifiers/test_vqc.py @@ -84,6 +84,7 @@ def setUp(self): "no_one_hot": _create_dataset(6, 2, one_hot=False), } + # pylint: disable=too-many-positional-arguments @idata(itertools.product(NUM_QUBITS_LIST, FEATURE_MAPS, ANSATZES, OPTIMIZERS, DATASETS)) @unpack def test_VQC(self, num_qubits, f_m, ans, opt, d_s): diff --git a/test/algorithms/inference/test_qbayesian.py b/test/algorithms/inference/test_qbayesian.py index d0b114b8d..5c68e17ab 100644 --- a/test/algorithms/inference/test_qbayesian.py +++ b/test/algorithms/inference/test_qbayesian.py @@ -15,11 +15,14 @@ import unittest from test import QiskitMachineLearningTestCase +import copy import numpy as np from qiskit import QuantumCircuit from qiskit.circuit import QuantumRegister from qiskit.primitives import Sampler +from qiskit.providers.fake_provider import GenericBackendV2 +from qiskit_ibm_runtime import Session, SamplerV2 from qiskit_machine_learning.utils import algorithm_globals from qiskit_machine_learning.algorithms import QBayesian @@ -208,5 +211,193 @@ def test_trivial_circuit(self): ) +class TestQBayesianInferenceV2(QiskitMachineLearningTestCase): + """Test QBayesianInference Algorithm V2""" + + backend = GenericBackendV2(num_qubits=3) + session = Session(backend=backend) + _sampler = SamplerV2(mode=session) + _sampler.options.default_shots = 2**7 + + def setUp(self): + super().setUp() + algorithm_globals.random_seed = 10598 + # Quantum Bayesian inference + qc = self._create_bayes_net() + self.qbayesian = QBayesian(qc, sampler=self._sampler) + + def _create_bayes_net(self): + # Probabilities + theta_a = 2 * np.arcsin(np.sqrt(0.25)) + theta_b_na = 2 * np.arcsin(np.sqrt(0.6)) + theta_b_a = 2 * np.arcsin(np.sqrt(0.7)) + theta_c_nbna = 2 * np.arcsin(np.sqrt(0.1)) + theta_c_nba = 2 * np.arcsin(np.sqrt(0.55)) + theta_c_bna = 2 * np.arcsin(np.sqrt(0.7)) + theta_c_ba = 2 * np.arcsin(np.sqrt(0.9)) + # Random variables + qr_a = QuantumRegister(1, name="A") + qr_b = QuantumRegister(1, name="B") + qr_c = QuantumRegister(1, name="C") + # Define a 3-qubit quantum circuit + qc = QuantumCircuit(qr_a, qr_b, qr_c, name="Bayes net") + # P(A) + qc.ry(theta_a, 0) + # P(B|-A) + qc.x(0) + qc.cry(theta_b_na, qr_a, qr_b) + qc.x(0) + # P(B|A) + qc.cry(theta_b_a, qr_a, qr_b) + # P(C|-B,-A) + qc.x(0) + qc.x(1) + qc.mcry(theta_c_nbna, [qr_a[0], qr_b[0]], qr_c[0]) + qc.x(0) + qc.x(1) + # P(C|-B,A) + qc.x(1) + qc.mcry(theta_c_nba, [qr_a[0], qr_b[0]], qr_c[0]) + qc.x(1) + # P(C|B,-A) + qc.x(0) + qc.mcry(theta_c_bna, [qr_a[0], qr_b[0]], qr_c[0]) + qc.x(0) + # P(C|B,A) + qc.mcry(theta_c_ba, [qr_a[0], qr_b[0]], qr_c[0]) + return qc + + def test_rejection_sampling(self): + """Test rejection sampling with different amount of evidence""" + test_cases = [{"A": 0, "B": 0}, {"A": 0}, {}] + true_res = [ + {"000": 0.9, "100": 0.1}, + {"000": 0.36, "100": 0.04, "010": 0.18, "110": 0.42}, + { + "000": 0.27, + "001": 0.03375, + "010": 0.135, + "011": 0.0175, + "100": 0.03, + "101": 0.04125, + "110": 0.315, + "111": 0.1575, + }, + ] + for evd, res in zip(test_cases, true_res): + samples = self.qbayesian.rejection_sampling(evidence=evd) + self.assertTrue( + np.all( + [ + np.isclose(res[sample_key], sample_val, atol=0.08) + for sample_key, sample_val in samples.items() + ] + ) + ) + + def test_rejection_sampling_format_res(self): + """Test rejection sampling with different result format""" + test_cases = [{"A": 0, "C": 1}, {"C": 1}, {}] + true_res = [ + {"P(B=0|A=0,C=1)", "P(B=1|A=0,C=1)"}, + {"P(A=0,B=0|C=1)", "P(A=1,B=0|C=1)", "P(A=0,B=1|C=1)", "P(A=1,B=1|C=1)"}, + { + "P(A=0,B=0,C=0)", + "P(A=1,B=0,C=0)", + "P(A=0,B=1,C=0)", + "P(A=1,B=1,C=0)", + "P(A=0,B=0,C=1)", + "P(A=1,B=0,C=1)", + "P(A=0,B=1,C=1)", + "P(A=1,B=1,C=1)", + }, + ] + for evd, res in zip(test_cases, true_res): + self.assertTrue( + res == set(self.qbayesian.rejection_sampling(evidence=evd, format_res=True).keys()) + ) + + def test_inference(self): + """Test inference with different amount of evidence""" + test_q_1, test_e_1 = ({"B": 1}, {"A": 1, "C": 1}) + test_q_2 = {"B": 0} + test_q_3 = {} + test_q_4, test_e_4 = ({"B": 1}, {"A": 0}) + true_res = [0.79, 0.21, 1, 0.6] + res = [] + samples = [] + # 1. Query basic inference + res.append(self.qbayesian.inference(query=test_q_1, evidence=test_e_1)) + samples.append(self.qbayesian.samples) + # 2. Query basic inference + res.append(self.qbayesian.inference(query=test_q_2)) + samples.append(self.qbayesian.samples) + # 3. Query marginalized inference + res.append(self.qbayesian.inference(query=test_q_3)) + samples.append(self.qbayesian.samples) + # 4. Query marginalized inference + res.append(self.qbayesian.inference(query=test_q_4, evidence=test_e_4)) + # Correct inference + np.testing.assert_allclose(true_res, res, atol=0.04) + # No change in samples + self.assertTrue(samples[0] == samples[1]) + + def test_parameter(self): + """Tests parameter of methods""" + # Test set threshold + self.qbayesian.threshold = 0.9 + self.qbayesian.rejection_sampling(evidence={"A": 1}) + self.assertTrue(self.qbayesian.threshold == 0.9) + # Test set limit + # Not converged + self.qbayesian.limit = 0 + self.qbayesian.rejection_sampling(evidence={"B": 1}) + self.assertFalse(self.qbayesian.converged) + self.assertTrue(self.qbayesian.limit == 0) + # Converged + self.qbayesian.limit = 1 + self.qbayesian.rejection_sampling(evidence={"B": 1}) + self.assertTrue(self.qbayesian.converged) + self.assertTrue(self.qbayesian.limit == 1) + # Test sampler + sampler = copy.deepcopy(self._sampler) + self.qbayesian.sampler = sampler + self.qbayesian.inference(query={"B": 1}, evidence={"A": 0, "C": 0}) + self.assertTrue(self.qbayesian.sampler == sampler) + # Create a quantum circuit with a register that has more than one qubit + with self.assertRaises(ValueError, msg="No ValueError in constructor with invalid input."): + QBayesian(QuantumCircuit(QuantumRegister(2, "qr"))) + # Test invalid inference without evidence or generated samples + with self.assertRaises(ValueError, msg="No ValueError in inference with invalid input."): + QBayesian(QuantumCircuit(QuantumRegister(1, "qr"))).inference({"A": 0}) + + def test_trivial_circuit(self): + """Tests trivial quantum circuit""" + # Define rotation angles + theta_a = 2 * np.arcsin(np.sqrt(0.2)) + theta_b_a = 2 * np.arcsin(np.sqrt(0.9)) + theta_b_na = 2 * np.arcsin(np.sqrt(0.3)) + # Define quantum registers + qr_a = QuantumRegister(1, name="A") + qr_b = QuantumRegister(1, name="B") + # Define a 2-qubit quantum circuit + qc = QuantumCircuit(qr_a, qr_b, name="Bayes net small") + qc.ry(theta_a, 0) + qc.cry(theta_b_a, control_qubit=qr_a, target_qubit=qr_b) + qc.x(0) + qc.cry(theta_b_na, control_qubit=qr_a, target_qubit=qr_b) + qc.x(0) + # Inference + self.assertTrue( + np.all( + np.isclose( + 0.1, + QBayesian(circuit=qc).inference(query={"B": 0}, evidence={"A": 1}), + atol=0.04, + ) + ) + ) + + if __name__ == "__main__": unittest.main() diff --git a/test/connectors/test_torch.py b/test/connectors/test_torch.py index a9d4dde22..3979ed5d1 100644 --- a/test/connectors/test_torch.py +++ b/test/connectors/test_torch.py @@ -42,6 +42,7 @@ def subTest(self, msg, **kwargs): """Sub test.""" raise builtins.Exception("Abstract method") + # pylint: disable=too-many-positional-arguments @abstractmethod def assertAlmostEqual(self, first, second, places=None, msg=None, delta=None): """Assert almost equal.""" diff --git a/test/connectors/test_torch_connector.py b/test/connectors/test_torch_connector.py index 385af7803..68cb77c49 100644 --- a/test/connectors/test_torch_connector.py +++ b/test/connectors/test_torch_connector.py @@ -263,6 +263,7 @@ class ConvolutionalLayer(torch.nn.Module): stride (int, optional): Stride of the convolution. Defaults to 1. """ + # pylint: disable=too-many-positional-arguments def __init__( self, input_channel: int, diff --git a/test/kernels/test_fidelity_qkernel.py b/test/kernels/test_fidelity_qkernel.py index 51b9a5b45..d21923510 100644 --- a/test/kernels/test_fidelity_qkernel.py +++ b/test/kernels/test_fidelity_qkernel.py @@ -129,6 +129,7 @@ def test_exceptions(self): with self.assertRaises(ValueError, msg="Unsupported value of 'max_circuits_per_job'."): _ = FidelityQuantumKernel(max_circuits_per_job=-1) + # pylint: disable=too-many-positional-arguments @idata( # params, fidelity, feature map, enforce_psd, duplicate itertools.product( @@ -152,6 +153,7 @@ def test_evaluate_symmetric(self, params, fidelity, feature_map, enforce_psd, du np.testing.assert_allclose(kernel_matrix, solution, rtol=1e-4, atol=1e-10) + # pylint: disable=too-many-positional-arguments @idata( itertools.product( ["samples_1", "samples_4"], diff --git a/test/neural_networks/test_sampler_qnn.py b/test/neural_networks/test_sampler_qnn.py index 8084b5109..09c0982c0 100644 --- a/test/neural_networks/test_sampler_qnn.py +++ b/test/neural_networks/test_sampler_qnn.py @@ -23,11 +23,18 @@ from qiskit.circuit import Parameter, QuantumCircuit from qiskit.primitives import Sampler +from qiskit.providers.fake_provider import GenericBackendV2 +from qiskit.transpiler.preset_passmanagers import generate_preset_pass_manager from qiskit.circuit.library import RealAmplitudes, ZZFeatureMap -from qiskit_machine_learning.utils import algorithm_globals +from qiskit_ibm_runtime import Session, SamplerV2 + +from qiskit_machine_learning.utils import algorithm_globals from qiskit_machine_learning.circuit.library import QNNCircuit from qiskit_machine_learning.neural_networks.sampler_qnn import SamplerQNN +from qiskit_machine_learning.gradients.param_shift.param_shift_sampler_gradient import ( + ParamShiftSamplerGradient, +) import qiskit_machine_learning.optionals as _optionals if _optionals.HAS_SPARSE: @@ -45,8 +52,9 @@ class SparseArray: # type: ignore DEFAULT = "default" SHOTS = "shots" +V2 = "v2" SPARSE = [True, False] -SAMPLERS = [DEFAULT, SHOTS] +SAMPLERS = [DEFAULT, SHOTS, V2] INTERPRET_TYPES = [0, 1, 2] BATCH_SIZES = [2] INPUT_GRADS = [True, False] @@ -69,6 +77,8 @@ def setUp(self): self.qc = QuantumCircuit(num_qubits) self.qc.append(feature_map, range(2)) self.qc.append(var_form, range(2)) + self.qc.measure_all() + self.num_virtual_qubits = num_qubits # store params self.input_params = list(feature_map.parameters) @@ -93,19 +103,44 @@ def interpret_2d(x): # define sampler primitives self.sampler = Sampler() self.sampler_shots = Sampler(options={"shots": 100, "seed": 42}) + self.backend = GenericBackendV2(num_qubits=8) + self.session = Session(backend=self.backend) + self.sampler_v2 = SamplerV2(mode=self.session) self.array_type = {True: SparseArray, False: np.ndarray} + # pylint: disable=too-many-positional-arguments def _get_qnn( self, sparse, sampler_type, interpret_id, input_params, weight_params, input_grads ): """Construct QNN from configuration.""" + # get interpret setting + interpret = None + output_shape = None + if interpret_id == 1: + interpret = self.interpret_1d + output_shape = self.output_shape_1d + elif interpret_id == 2: + interpret = self.interpret_2d + output_shape = self.output_shape_2d # get quantum instance + gradient = None if sampler_type == SHOTS: sampler = self.sampler_shots elif sampler_type == DEFAULT: sampler = self.sampler + elif sampler_type == V2: + sampler = self.sampler_v2 + + if self.qc.layout is None: + self.pm = generate_preset_pass_manager(optimization_level=1, backend=self.backend) + self.qc = self.pm.run(self.qc) + gradient = ParamShiftSamplerGradient( + sampler=self.sampler, + len_quasi_dist=2**self.num_virtual_qubits, + pass_manager=self.pm, + ) else: sampler = None @@ -123,11 +158,13 @@ def _get_qnn( qnn = SamplerQNN( sampler=sampler, circuit=self.qc, + num_virtual_qubits=self.num_virtual_qubits, input_params=input_params, weight_params=weight_params, sparse=sparse, interpret=interpret, output_shape=output_shape, + gradient=gradient, input_gradients=input_grads, ) return qnn @@ -344,7 +381,7 @@ def test_no_parameters(self): sampler_qnn.input_gradients = True self._verify_qnn(sampler_qnn, 1, input_data=None, weights=None) - def test_qnn_qc_circui_construction(self): + def test_qnn_qc_circuit_construction(self): """Test Sampler QNN properties and forward/backward pass for QNNCircuit construction""" num_qubits = 2 feature_map = ZZFeatureMap(feature_dimension=num_qubits) diff --git a/test/optimizers/test_spsa.py b/test/optimizers/test_spsa.py index 80afce43e..ec3af13ee 100644 --- a/test/optimizers/test_spsa.py +++ b/test/optimizers/test_spsa.py @@ -146,6 +146,7 @@ class TerminationChecker: def __init__(self): self.values = [] + # pylint: disable=too-many-positional-arguments def __call__(self, nfev, point, fvalue, stepsize, accepted) -> bool: self.values.append(fvalue) diff --git a/test/state_fidelities/test_compute_uncompute_v2.py b/test/state_fidelities/test_compute_uncompute_v2.py new file mode 100644 index 000000000..819b206fc --- /dev/null +++ b/test/state_fidelities/test_compute_uncompute_v2.py @@ -0,0 +1,343 @@ +# This code is part of a Qiskit project. +# +# (C) Copyright IBM 2022, 2024. +# +# This code is licensed under the Apache License, Version 2.0. You may +# obtain a copy of this license in the LICENSE.txt file in the root directory +# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. +# +# Any modifications or derivative works of this code must retain this +# copyright notice, and modified files need to carry a notice indicating +# that they have been altered from the originals. + +"""Tests for Fidelity.""" + +import unittest +from test import QiskitMachineLearningTestCase + +import numpy as np + +from qiskit.circuit import QuantumCircuit, ParameterVector +from qiskit.circuit.library import RealAmplitudes +from qiskit.primitives import Sampler +from qiskit.providers.fake_provider import GenericBackendV2 +from qiskit.transpiler.preset_passmanagers import generate_preset_pass_manager + +from qiskit_ibm_runtime import Session, SamplerV2 + +from qiskit_machine_learning.state_fidelities import ComputeUncompute + + +class TestComputeUncompute(QiskitMachineLearningTestCase): + """Test Compute-Uncompute Fidelity class""" + + def setUp(self): + super().setUp() + parameters = ParameterVector("x", 2) + + rx_rotations = QuantumCircuit(2) + rx_rotations.rx(parameters[0], 0) + rx_rotations.rx(parameters[1], 1) + + ry_rotations = QuantumCircuit(2) + ry_rotations.ry(parameters[0], 0) + ry_rotations.ry(parameters[1], 1) + + plus = QuantumCircuit(2) + plus.h([0, 1]) + + zero = QuantumCircuit(2) + + rx_rotation = QuantumCircuit(2) + rx_rotation.rx(parameters[0], 0) + rx_rotation.h(1) + + self._circuit = [rx_rotations, ry_rotations, plus, zero, rx_rotation] + + self.backend = GenericBackendV2( + num_qubits=4, + calibrate_instructions=None, + pulse_channels=False, + noise_info=False, + seed=123, + ) + self.session = Session(backend=self.backend) + self._sampler = SamplerV2(mode=self.session) + self.pm = generate_preset_pass_manager(optimization_level=0, backend=self.backend) + + self._left_params = np.array([[0, 0], [np.pi / 2, 0], [0, np.pi / 2], [np.pi, np.pi]]) + self._right_params = np.array([[0, 0], [0, 0], [np.pi / 2, 0], [0, 0]]) + + def test_1param_pair(self): + """test for fidelity with one pair of parameters""" + fidelity = ComputeUncompute( + self._sampler, pass_manager=self.pm, num_virtual_qubits=self._circuit[0].num_qubits + ) + job = fidelity.run( + self._circuit[0], self._circuit[1], self._left_params[0], self._right_params[0] + ) + result = job.result() + np.testing.assert_allclose(result.fidelities, np.array([1.0])) + + def test_1param_pair_local(self): + """test for fidelity with one pair of parameters""" + fidelity = ComputeUncompute( + self._sampler, + local=True, + pass_manager=self.pm, + num_virtual_qubits=self._circuit[0].num_qubits, + ) + job = fidelity.run( + self._circuit[0], self._circuit[1], self._left_params[0], self._right_params[0] + ) + result = job.result() + np.testing.assert_allclose(result.fidelities, np.array([1.0])) + + def test_local(self): + """test difference between local and global fidelity""" + fidelity_global = ComputeUncompute( + self._sampler, + local=False, + pass_manager=self.pm, + num_virtual_qubits=self._circuit[2].num_qubits, + ) + fidelity_local = ComputeUncompute( + self._sampler, + local=True, + pass_manager=self.pm, + num_virtual_qubits=self._circuit[2].num_qubits, + ) + fidelities = [] + for fidelity in [fidelity_global, fidelity_local]: + job = fidelity.run(self._circuit[2], self._circuit[3]) + result = job.result() + fidelities.append(result.fidelities[0]) + np.testing.assert_allclose(fidelities, np.array([0.25, 0.5]), atol=1e-1, rtol=1e-1) + + def test_4param_pairs(self): + """test for fidelity with four pairs of parameters""" + fidelity = ComputeUncompute( + self._sampler, pass_manager=self.pm, num_virtual_qubits=self._circuit[0].num_qubits + ) + n = len(self._left_params) + job = fidelity.run( + [self._circuit[0]] * n, [self._circuit[1]] * n, self._left_params, self._right_params + ) + results = job.result() + np.testing.assert_allclose( + results.fidelities, np.array([1.0, 0.5, 0.25, 0.0]), atol=1e-1, rtol=1e-1 + ) + + def test_symmetry(self): + """test for fidelity with the same circuit""" + fidelity = ComputeUncompute( + self._sampler, pass_manager=self.pm, num_virtual_qubits=self._circuit[0].num_qubits + ) + n = len(self._left_params) + job_1 = fidelity.run( + [self._circuit[0]] * n, [self._circuit[0]] * n, self._left_params, self._right_params + ) + job_2 = fidelity.run( + [self._circuit[0]] * n, [self._circuit[0]] * n, self._right_params, self._left_params + ) + print(job_1) + results_1 = job_1.result() + results_2 = job_2.result() + np.testing.assert_allclose(results_1.fidelities, results_2.fidelities, atol=1e-1, rtol=1e-1) + + def test_no_params(self): + """test for fidelity without parameters""" + fidelity = ComputeUncompute( + self._sampler, pass_manager=self.pm, num_virtual_qubits=self._circuit[2].num_qubits + ) + job = fidelity.run([self._circuit[2]], [self._circuit[3]]) + results = job.result() + np.testing.assert_allclose(results.fidelities, np.array([0.25]), atol=1e-1, rtol=1e-1) + + job = fidelity.run([self._circuit[2]], [self._circuit[3]], [], []) + results = job.result() + np.testing.assert_allclose(results.fidelities, np.array([0.25]), atol=1e-1, rtol=1e-1) + + def test_left_param(self): + """test for fidelity with only left parameters""" + fidelity = ComputeUncompute( + self._sampler, pass_manager=self.pm, num_virtual_qubits=self._circuit[1].num_qubits + ) + n = len(self._left_params) + job = fidelity.run( + [self._circuit[1]] * n, [self._circuit[3]] * n, values_1=self._left_params + ) + results = job.result() + np.testing.assert_allclose( + results.fidelities, np.array([1.0, 0.5, 0.5, 0.0]), atol=1e-1, rtol=1e-1 + ) + + def test_right_param(self): + """test for fidelity with only right parameters""" + fidelity = ComputeUncompute( + self._sampler, pass_manager=self.pm, num_virtual_qubits=self._circuit[1].num_qubits + ) + n = len(self._left_params) + job = fidelity.run( + [self._circuit[3]] * n, [self._circuit[1]] * n, values_2=self._left_params + ) + results = job.result() + np.testing.assert_allclose( + results.fidelities, np.array([1.0, 0.5, 0.5, 0.0]), atol=1e-1, rtol=1e-1 + ) + + def test_not_set_circuits(self): + """test for fidelity with no circuits.""" + fidelity = ComputeUncompute( + self._sampler, pass_manager=self.pm, num_virtual_qubits=self._circuit[0].num_qubits + ) + with self.assertRaises(TypeError): + job = fidelity.run( + circuits_1=None, + circuits_2=None, + values_1=self._left_params, + values_2=self._right_params, + ) + job.result() + + def test_circuit_mismatch(self): + """test for fidelity with different number of left/right circuits.""" + fidelity = ComputeUncompute( + self._sampler, pass_manager=self.pm, num_virtual_qubits=self._circuit[0].num_qubits + ) + n = len(self._left_params) + with self.assertRaises(ValueError): + job = fidelity.run( + [self._circuit[0]] * n, + [self._circuit[1]] * (n + 1), + self._left_params, + self._right_params, + ) + job.result() + + def test_asymmetric_params(self): + """test for fidelity when the 2 circuits have different number of + left/right parameters.""" + + fidelity = ComputeUncompute( + self._sampler, pass_manager=self.pm, num_virtual_qubits=self._circuit[0].num_qubits + ) + n = len(self._left_params) + right_params = [[p] for p in self._right_params[:, 0]] + job = fidelity.run( + [self._circuit[0]] * n, [self._circuit[4]] * n, self._left_params, right_params + ) + result = job.result() + np.testing.assert_allclose( + result.fidelities, np.array([0.5, 0.25, 0.25, 0.0]), atol=1e-1, rtol=1e-1 + ) + + def test_input_format(self): + """test for different input format variations""" + + circuit = RealAmplitudes(2) + fidelity = ComputeUncompute( + self._sampler, pass_manager=self.pm, num_virtual_qubits=circuit.num_qubits + ) + values = np.random.random(circuit.num_parameters) + shift = np.ones_like(values) * 0.01 + + # lists of circuits, lists of numpy arrays + job = fidelity.run([circuit], [circuit], [values], [values + shift]) + result_1 = job.result() + + # lists of circuits, lists of lists + shift_val = values + shift + job = fidelity.run([circuit], [circuit], [values.tolist()], [shift_val.tolist()]) + result_2 = job.result() + + # circuits, lists + shift_val = values + shift + job = fidelity.run(circuit, circuit, values.tolist(), shift_val.tolist()) + result_3 = job.result() + + # circuits, np.arrays + job = fidelity.run(circuit, circuit, values, values + shift) + result_4 = job.result() + + np.testing.assert_allclose(result_1.fidelities, result_2.fidelities, atol=1e-1, rtol=1e-1) + np.testing.assert_allclose(result_1.fidelities, result_3.fidelities, atol=1e-1, rtol=1e-1) + np.testing.assert_allclose(result_1.fidelities, result_4.fidelities, atol=1e-1, rtol=1e-1) + + def test_input_measurements(self): + """test for fidelity with measurements on input circuits""" + fidelity = ComputeUncompute( + self._sampler, pass_manager=self.pm, num_virtual_qubits=self._circuit[0].num_qubits + ) + circuit_1 = self._circuit[0] + circuit_1.measure_all() + circuit_2 = self._circuit[1] + circuit_2.measure_all() + + job = fidelity.run(circuit_1, circuit_2, self._left_params[0], self._right_params[0]) + result = job.result() + np.testing.assert_allclose(result.fidelities, np.array([1.0])) + + def test_options(self): + """Test fidelity's run options""" + sampler_shots = Sampler(options={"shots": 1024}) + + with self.subTest("sampler"): + # Only options in sampler + fidelity = ComputeUncompute( + sampler_shots, pass_manager=self.pm, num_virtual_qubits=self._circuit[2].num_qubits + ) + options = fidelity.options + job = fidelity.run(self._circuit[2], self._circuit[3]) + result = job.result() + self.assertEqual(options.__dict__, {"shots": 1024}) + self.assertEqual(result.options.__dict__, {"shots": 1024}) + + with self.subTest("fidelity init"): + # Fidelity default options override sampler + # options and add new fields + fidelity = ComputeUncompute( + sampler_shots, + options={"shots": 2048, "dummy": 100}, + pass_manager=self.pm, + num_virtual_qubits=self._circuit[2].num_qubits, + ) + options = fidelity.options + job = fidelity.run(self._circuit[2], self._circuit[3]) + result = job.result() + self.assertEqual(options.__dict__, {"shots": 2048, "dummy": 100}) + self.assertEqual(result.options.__dict__, {"shots": 2048, "dummy": 100}) + + with self.subTest("fidelity update"): + # Update fidelity options + fidelity = ComputeUncompute( + sampler_shots, + options={"shots": 2048, "dummy": 100}, + pass_manager=self.pm, + num_virtual_qubits=self._circuit[2].num_qubits, + ) + fidelity.update_default_options(shots=100) + options = fidelity.options + job = fidelity.run(self._circuit[2], self._circuit[3]) + result = job.result() + self.assertEqual(options.__dict__, {"shots": 100, "dummy": 100}) + self.assertEqual(result.options.__dict__, {"shots": 100, "dummy": 100}) + + with self.subTest("fidelity run"): + # Run options override fidelity options + fidelity = ComputeUncompute( + sampler_shots, + options={"shots": 2048, "dummy": 100}, + pass_manager=self.pm, + num_virtual_qubits=self._circuit[2].num_qubits, + ) + job = fidelity.run(self._circuit[2], self._circuit[3], shots=50, dummy=None) + options = fidelity.options + result = job.result() + # Only default + sampler options. Not run. + self.assertEqual(options.__dict__, {"shots": 2048, "dummy": 100}) + self.assertEqual(result.options.__dict__, {"shots": 50, "dummy": None}) + + +if __name__ == "__main__": + unittest.main() diff --git a/tox.ini b/tox.ini index 0f06e2637..f8f98c294 100644 --- a/tox.ini +++ b/tox.ini @@ -1,7 +1,7 @@ [tox] # Sets this min.version because of differences with env_tmp_dir env. minversion = 4.0.2 -envlist = py38, py39, py310, py311, py312, lint, gpu, gpu-amd +envlist = py39, py310, py311, py312, lint, gpu, gpu-amd skipsdist = True [testenv]