diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000..fb5c9f9 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,25 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '' +labels: '' +assignees: '' + +--- + +**Describe the bug** +A clear and concise description of what the bug is. + +**Expected behaviour** +A clear and concise description of what you expected to happen. + +**Environment (please complete the following information):** + - aiokafka version (`python -c "import aiokafka; print(aiokafka.__version__)"`): + - kafka-python version (`python -c "import kafka; print(kafka.__version__)"`): + - Kafka Broker version (`kafka-topics.sh --version`): + - Other information (Confluent Cloud version, etc.): + +**Reproducible example** +```python +# Add a short Python script or Docker configuration that can reproduce the issue. +``` diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000..5cd9fdd --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,14 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: '' +labels: enhancement +assignees: '' + +--- + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Additional context** +Add any other context about the feature request here (Other library implementation examples, Kafka Improvement Proposal (KIP), Kafka jira ticket, etc.). diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md new file mode 100644 index 0000000..3e27018 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/question.md @@ -0,0 +1,10 @@ +--- +name: Question +about: Feel free to ask questions if you don't understand something ^^ +title: "[QUESTION] " +labels: question +assignees: '' + +--- + + diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 0000000..f2f6a5f --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,24 @@ + +### Changes + +Fixes # + + + +### Checklist + +- [ ] I think the code is well written +- [ ] Unit tests for the changes exist +- [ ] Documentation reflects the changes +- [ ] Add a new news fragment into the `CHANGES` folder + * name it `.` (e.g. `588.bugfix`) + * if you don't have an `issue_id` change it to the pr id after creating the PR + * ensure type is one of the following: + * `.feature`: Signifying a new feature. + * `.bugfix`: Signifying a bug fix. + * `.doc`: Signifying a documentation improvement. + * `.removal`: Signifying a deprecation or removal of public API. + * `.misc`: A ticket has been closed, but it is not of interest to users. + * Make sure to use full sentences with correct case and punctuation, for example: `Fix issue with non-ascii contents in doctest text files.` diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml new file mode 100644 index 0000000..a5b31ab --- /dev/null +++ b/.github/workflows/publish.yml @@ -0,0 +1,199 @@ +# This workflows will upload a Python Package using Twine when a release is created +# For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries + +name: Publish + +on: + release: + types: [created] + +jobs: + package-source: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v1 + with: + python-version: 3.7 + - name: Prepare C files to include + run: | + python -m pip install --upgrade pip setuptools + python -m pip install -r requirements-cython.txt + # Make sure we install to have all c files to be shiped with bundle + python -m pip install -vv -Ue . # We set -vv to see compiler exceptions/warnings + - name: Build source package + run: python setup.py sdist + - name: Upload source package + uses: actions/upload-artifact@v2 + with: + name: dist + path: dist/ + + package-wheel: + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, macos-latest, windows-latest] + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-python@v1 + with: + python-version: 3.7 + - name: Build wheels + env: + CIBW_BEFORE_BUILD_LINUX: pip install -r requirements-cython.txt && yum install -y zlib-devel + # On windows and mac we should have z library preinstalled + CIBW_BEFORE_BUILD: pip install -r requirements-cython.txt + CIBW_BUILD_VERBOSITY: 2 + CIBW_SKIP: cp27-* cp35-* pp27-* + run: | + python -m pip install --upgrade pip setuptools + pip install cibuildwheel + cibuildwheel --output-dir dist + shell: bash + - name: Upload wheels + uses: actions/upload-artifact@v2 + with: + name: dist + path: dist/ + + test-wheels-windows: + needs: [package-source, package-wheel] + runs-on: windows-latest + + strategy: + matrix: + python: [3.6, 3.7] + include: + - python: 3.6 + snappy_whl: tools/python_snappy-0.5.4-cp36-cp36m-win_amd64.whl + aiokafka_whl: dist/aiokafka-*-cp36-cp36m-win_amd64.whl + - python: 3.7 + snappy_whl: tools/python_snappy-0.5.4-cp37-cp37m-win_amd64.whl + aiokafka_whl: dist/aiokafka-*-cp37-cp37m-win_amd64.whl + + steps: + - uses: actions/checkout@v2 + - name: Download distributions + uses: actions/download-artifact@v2 + with: + name: dist + path: dist/ + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + + - name: Install python dependencies + run: | + pip install --upgrade pip setuptools wheel + pip install ${{ matrix.snappy_whl }} + pip install -r requirements-win-test.txt + pip install ${{ matrix.aiokafka_whl }} + shell: bash + + - name: Run Unit Tests + run: | + # Remove source code to be sure we use wheel code + rm -rf aiokafka + make ci-test-unit + shell: bash + + test-wheels-mac: + needs: [package-source, package-wheel] + runs-on: macos-latest + + strategy: + matrix: + python: [3.6, 3.7] + include: + - python: 3.6 + aiokafka_whl: dist/aiokafka-*-cp36-cp36m-macosx_10_9_x86_64.whl + - python: 3.7 + aiokafka_whl: dist/aiokafka-*-cp37-cp37m-macosx_10_9_x86_64.whl + + steps: + - uses: actions/checkout@v2 + - name: Download distributions + uses: actions/download-artifact@v2 + with: + name: dist + path: dist/ + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + + - name: Install system dependencies + run: | + brew install snappy + - name: Install python dependencies + run: | + pip install --upgrade pip setuptools wheel + pip install -r requirements-ci.txt + pip install ${{ matrix.aiokafka_whl }} + + - name: Run Unit Tests + run: | + # Remove source code to be sure we use wheel code + rm -rf aiokafka + make ci-test-unit + + test-wheels-linux: + needs: [package-source, package-wheel] + runs-on: ubuntu-latest + + strategy: + matrix: + python: [3.6, 3.7] + include: + - python: 3.6 + aiokafka_whl: dist/aiokafka-*-cp36-cp36m-manylinux1_x86_64.whl + - python: 3.7 + aiokafka_whl: dist/aiokafka-*-cp37-cp37m-manylinux1_x86_64.whl + + steps: + - uses: actions/checkout@v2 + - name: Download distributions + uses: actions/download-artifact@v2 + with: + name: dist + path: dist/ + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + + - name: Install system dependencies + run: | + sudo apt-get install -y libsnappy-dev libkrb5-dev + - name: Install python dependencies + run: | + pip install --upgrade pip setuptools wheel + pip install -r requirements-ci.txt + pip install ${{ matrix.aiokafka_whl }} + + - name: Run Unit Tests + run: | + # Remove source code to be sure we use wheel code + rm -rf aiokafka + make ci-test-unit + + deploy: + + runs-on: ubuntu-latest + needs: [test-wheels-linux, test-wheels-mac, test-wheels-windows] + + steps: + - uses: actions/checkout@v2 + - name: Download distributions + uses: actions/download-artifact@v2 + with: + name: dist + path: dist/ + - name: Publish to PyPI + uses: pypa/gh-action-pypi-publish@master + with: + user: ${{ secrets.PYPI_USERNAME }} + password: ${{ secrets.PYPI_PASSWORD }} diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 0000000..106cc6d --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,318 @@ +# This workflow will install Python dependencies, run tests and lint with a variety of Python versions +# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions + +name: Tests + +on: + push: + branches: [ master ] + tags: + - "v0.[0-9]+.[0-9]+" + - "v0.[0-9]+.[0-9]+.dev*" + pull_request: + branches: [ master ] + +jobs: + test-sanity: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: 3.7 + + - name: Install system dependencies + run: | + sudo apt-get install -y libsnappy-dev libkrb5-dev + + - name: Get pip cache dir + id: pip-cache + run: | + python -m pip install -U "pip>=20.1" + echo "::set-output name=dir::$(pip cache dir)" + + - name: Cache packages + uses: actions/cache@v1 + with: + path: ${{ steps.pip-cache.outputs.dir }} + key: ${{ runner.os }}-py-3.7-${{ hashFiles('requirements-ci.txt') }}-${{ hashFiles('setup.py') }} + # If miss on key takes any other cache with different hashes, will download correct ones on next step anyway + restore-keys: | + ${{ runner.os }}-py-3.7- + + - name: Install python dependencies + run: | + pip install --upgrade pip setuptools wheel + pip install -r requirements-ci.txt + pip install -vv -Ue . # We set -vv to see compiler exceptions/warnings + + - name: Run Unit Tests + run: | + make ci-test-unit + + - name: Lint code + run: | + make flake + + - name: Check readme for package + run: | + make check-readme + + test-windows: + needs: test-sanity + runs-on: windows-latest + + strategy: + matrix: + python: [3.6, 3.7] + include: + - python: 3.6 + snappy_whl: tools/python_snappy-0.5.4-cp36-cp36m-win_amd64.whl + - python: 3.7 + snappy_whl: tools/python_snappy-0.5.4-cp37-cp37m-win_amd64.whl + + steps: + - uses: actions/checkout@v2 + with: + ref: ${{ github.event.pull_request.head.sha }} + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + + - name: Get pip cache dir + id: pip-cache + run: | + python -m pip install -U "pip>=20.1" + echo "::set-output name=dir::$(pip cache dir)" + + - name: Cache packages + uses: actions/cache@v1 + with: + path: ${{ steps.pip-cache.outputs.dir }} + key: ${{ runner.os }}-py-${{ matrix.python }}-${{ hashFiles('requirements-win-test.txt') }}-${{ hashFiles('setup.py') }} + # If miss on key takes any other cache with different hashes, will download correct ones on next step anyway + restore-keys: | + ${{ runner.os }}-py-${{ matrix.python }}- + + - name: Install python dependencies + run: | + pip install --upgrade pip setuptools wheel + pip install ${{ matrix.snappy_whl }} + pip install -r requirements-win-test.txt + pip install -vv -Ue . # We set -vv to see compiler exceptions/warnings + + - name: Run Unit Tests + run: | + make ci-test-unit + mv coverage.xml coverage-ext.xml + env: + PYTHONASYNCIODEBUG: "1" + + - name: Run Unit Tests without extensions + run: | + make ci-test-unit + mv coverage.xml coverage-py.xml + env: + AIOKAFKA_NO_EXTENSIONS: "1" + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v1 + with: + file: ./coverage-ext.xml + flags: unit,cext + name: test-windows-${{ matrix.python }}-ext + if: ${{ always() }} + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v1 + with: + file: ./coverage-py.xml + flags: unit,purepy + name: test-windows-${{ matrix.python }}-py + if: ${{ always() }} + + test-mac: + needs: test-sanity + runs-on: macos-latest + + strategy: + matrix: + python: [3.6, 3.7] + + steps: + - uses: actions/checkout@v2 + with: + ref: ${{ github.event.pull_request.head.sha }} + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + + - name: Get pip cache dir + id: pip-cache + run: | + python -m pip install -U "pip>=20.1" + echo "::set-output name=dir::$(pip cache dir)" + + - name: Cache packages + uses: actions/cache@v1 + with: + path: ${{ steps.pip-cache.outputs.dir }} + key: ${{ runner.os }}-py-${{ matrix.python }}-${{ hashFiles('requirements-ci.txt') }}-${{ hashFiles('setup.py') }} + # If miss on key takes any other cache with different hashes, will download correct ones on next step anyway + restore-keys: | + ${{ runner.os }}-py-${{ matrix.python }}- + + - name: Install system dependencies + run: | + brew install snappy + + - name: Install python dependencies + run: | + pip install --upgrade pip setuptools wheel + pip install -r requirements-ci.txt + pip install -vv -Ue . # We set -vv to see compiler exceptions/warnings + + - name: Run All Tests + run: | + make ci-test-unit + mv coverage.xml coverage-ext.xml + env: + PYTHONASYNCIODEBUG: "1" + + - name: Run All Tests without extensions + run: | + make ci-test-unit + mv coverage.xml coverage-py.xml + env: + AIOKAFKA_NO_EXTENSIONS: "1" + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v1 + with: + file: ./coverage-ext.xml + flags: unit,cext + name: test-mac-${{ matrix.python }}-ext + if: ${{ always() }} + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v1 + with: + file: ./coverage-py.xml + flags: unit,purepy + name: test-mac-${{ matrix.python }}-py + if: ${{ always() }} + + test-linux: + needs: test-sanity + runs-on: ubuntu-latest + + strategy: + matrix: + include: + - python: 3.7 + kafka: "2.4.0" + scala: "2.12" + + # Older python versions against latest broker + - python: 3.5 + kafka: "2.4.0" + scala: "2.12" + - python: 3.6 + kafka: "2.4.0" + scala: "2.12" + + # Older brokers against latest python version + - python: 3.7 + kafka: "0.9.0.1" + scala: "2.11" + - python: 3.7 + kafka: "0.10.2.1" + scala: "2.11" + - python: 3.7 + kafka: "0.11.0.3" + scala: "2.12" + - python: 3.7 + kafka: "1.1.1" + scala: "2.12" + - python: 3.7 + kafka: "2.1.1" + scala: "2.12" + - python: 3.7 + kafka: "2.2.2" + scala: "2.12" + - python: 3.7 + kafka: "2.3.1" + scala: "2.12" + fail-fast: false + + steps: + - uses: actions/checkout@v2 + with: + ref: ${{ github.event.pull_request.head.sha }} + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + + - name: Install system dependencies + run: | + sudo apt-get install -y libsnappy-dev libkrb5-dev krb5-user + + - name: Get pip cache dir + id: pip-cache + run: | + python -m pip install -U "pip>=20.1" + echo "::set-output name=dir::$(pip cache dir)" + + - name: Cache packages + uses: actions/cache@v1 + with: + path: ${{ steps.pip-cache.outputs.dir }} + key: ${{ runner.os }}-py-${{ matrix.python }}-${{ hashFiles('requirements-ci.txt') }}-${{ hashFiles('setup.py') }} + # If miss on key takes any other cache with different hashes, will download correct ones on next step anyway + restore-keys: | + ${{ runner.os }}-py-${{ matrix.python }}- + + - name: Install python dependencies + run: | + pip install --upgrade pip setuptools wheel + pip install -r requirements-ci.txt + pip install -vv -Ue . # We set -vv to see compiler exceptions/warnings + + - name: Run All Tests + run: | + make ci-test-all + mv coverage.xml coverage-ext.xml + env: + SCALA_VERSION: ${{ matrix.scala }} + KAFKA_VERSION: ${{ matrix.kafka }} + + - name: Run All Tests without extensions + run: | + make ci-test-all + mv coverage.xml coverage-py.xml + env: + AIOKAFKA_NO_EXTENSIONS: "1" + SCALA_VERSION: ${{ matrix.scala }} + KAFKA_VERSION: ${{ matrix.kafka }} + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v1 + with: + file: ./coverage-ext.xml + flags: integration,cext + name: test-linux-${{ matrix.python }}-${{ matrix.kafka }}-${{ matrix.scala }}-ext + if: ${{ always() }} + + - name: Upload coverage without cext to Codecov + uses: codecov/codecov-action@v1 + with: + file: ./coverage-py.xml + flags: integration,purepy + name: test-linux-${{ matrix.python }}-${{ matrix.kafka }}-${{ matrix.scala }}-py + if: ${{ always() }} diff --git a/.travis.yml b/.travis.yml_bak similarity index 75% rename from .travis.yml rename to .travis.yml_bak index 03b605a..1bdab13 100644 --- a/.travis.yml +++ b/.travis.yml_bak @@ -40,28 +40,28 @@ matrix: python: 3.5 services: - docker - env: KAFKA_VERSION=2.1.0 SCALA_VERSION=2.12 PYTHONASYNCIODEBUG=1 + env: KAFKA_VERSION=2.4.0 SCALA_VERSION=2.12 PYTHONASYNCIODEBUG=1 script: - make ci-test-all - sudo: true python: 3.5 services: - docker - env: KAFKA_VERSION=2.1.0 SCALA_VERSION=2.12 PYTHONASYNCIODEBUG=1 AIOKAFKA_NO_EXTENSIONS=1 + env: KAFKA_VERSION=2.4.0 SCALA_VERSION=2.12 PYTHONASYNCIODEBUG=1 AIOKAFKA_NO_EXTENSIONS=1 script: - make ci-test-all - sudo: true python: 3.6 services: - docker - env: KAFKA_VERSION=2.1.0 SCALA_VERSION=2.12 PYTHONASYNCIODEBUG=1 + env: KAFKA_VERSION=2.4.0 SCALA_VERSION=2.12 PYTHONASYNCIODEBUG=1 script: - make ci-test-all - sudo: true python: 3.6 services: - docker - env: KAFKA_VERSION=2.1.0 SCALA_VERSION=2.12 PYTHONASYNCIODEBUG=1 AIOKAFKA_NO_EXTENSIONS=1 + env: KAFKA_VERSION=2.4.0 SCALA_VERSION=2.12 PYTHONASYNCIODEBUG=1 AIOKAFKA_NO_EXTENSIONS=1 script: - make ci-test-all - sudo: true @@ -69,7 +69,7 @@ matrix: dist: xenial services: - docker - env: KAFKA_VERSION=2.1.0 SCALA_VERSION=2.12 PYTHONASYNCIODEBUG=1 + env: KAFKA_VERSION=2.4.0 SCALA_VERSION=2.12 PYTHONASYNCIODEBUG=1 script: - make ci-test-all - sudo: true @@ -77,7 +77,7 @@ matrix: dist: xenial services: - docker - env: KAFKA_VERSION=2.1.0 SCALA_VERSION=2.12 PYTHONASYNCIODEBUG=1 AIOKAFKA_NO_EXTENSIONS=1 + env: KAFKA_VERSION=2.4.0 SCALA_VERSION=2.12 PYTHONASYNCIODEBUG=1 AIOKAFKA_NO_EXTENSIONS=1 script: - make ci-test-all @@ -103,6 +103,34 @@ matrix: env: KAFKA_VERSION=0.11.0.3 SCALA_VERSION=2.12 PYTHONASYNCIODEBUG=1 script: - make ci-test-all + - sudo: true + python: *mainstream_python + services: + - docker + env: KAFKA_VERSION=1.1.1 SCALA_VERSION=2.12 PYTHONASYNCIODEBUG=1 + script: + - make ci-test-all + - sudo: true + python: *mainstream_python + services: + - docker + env: KAFKA_VERSION=2.1.1 SCALA_VERSION=2.12 PYTHONASYNCIODEBUG=1 + script: + - make ci-test-all + - sudo: true + python: *mainstream_python + services: + - docker + env: KAFKA_VERSION=2.2.2 SCALA_VERSION=2.12 PYTHONASYNCIODEBUG=1 + script: + - make ci-test-all + - sudo: true + python: *mainstream_python + services: + - docker + env: KAFKA_VERSION=2.3.1 SCALA_VERSION=2.12 PYTHONASYNCIODEBUG=1 + script: + - make ci-test-all - stage: *stage_deploy python: *mainstream_python diff --git a/CHANGES.rst b/CHANGES.rst index 3edee3e..14d0f4f 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,14 +1,63 @@ -CHANGES -------- +========= +Changelog +========= + +628.bugfix +Fix memory leak in kafka consumer when consumer is in idle state not consuming any message + +618.feature +added `OAUTHBEARER` as a new `sasl_mechanism`. 523.feature ^^^^^^^^^^^ Add `consumer.last_poll_timestamp(partition)` which gives the ms timestamp of the last update of `highwater` and `lso`. +0.6.0 (2020-05-15) +================== + +New features: + +* Add async context manager support for both Producer and Consumer. (pr #613 and #494 by @nimish) +* Upgrade to kafka-python version 2.0.0 and set it as non-strict + parameter. (issue #590 by @yumendy and #558 by @originalgremlin) +* Make loop argument optional (issue #544) +* SCRAM-SHA-256 and SCRAM-SHA-512 support for SASL authentication (issue #571 and pr #588 by @SukiCZ) +* Added headers param to AIOKafkaProducer.send_and_wait (pr #553 by @megabotan) +* Add `consumer.last_poll_timestamp(partition)` which gives the ms timestamp of the last + update of `highwater` and `lso`. (issue #523 and pr #526 by @aure-olli) +* Change all code base to async-await (pr #522) +* Minor: added PR and ISSUE templates to GitHub + + +Bugfixes: + +* Ignore debug package generation on bdist_rpm command. (issue #599 by @gabriel-tincu) +* UnknownMemberId was raised to the user instead of retrying on auto commit. (issue #611) +* Fix issue with messages not being read after subscriptions change with group_id=None. (issue #536) +* Handle `RequestTimedOutError` in `coordinator._do_commit_offsets()` method to explicitly mark + coordinator as dead. (issue #584 and pr #585 by @FedirAlifirenko) +* Added handling `asyncio.TimeoutError` on metadata request to broker and metadata update. + (issue #576 and pr #577 by @MichalMazurek) +* Too many reqs on kafka not available (issue #496 by @lud4ik) +* Consumer.seek_to_committed now returns mapping of committed offsets (pr #531 by @ask) +* Message Accumulator: add_message being recursive eventually overflows (pr #530 by @ask) + + +Improved Documentation: + +* Clarify auto_offset_reset usage. (pr 601 by @dargor) +* Fix spelling errors in comments and documentation using codespell (pr #567 by mauritsvdvijgh) +* Delete old benchmark file (issue #546 by @jeffwidman) +* Fix a few typos in docs (pr #573 and pr #563 by @ultrabug) +* Fix typos, spelling, grammar, etc (pr #545 and pr #547 by @jeffwidman) +* Fix typo in docs (pr #541 by @pablogamboa) +* Fix documentation for benchmark (pr #537 by @abhishekray07) +* Better logging for bad CRC (pr #529 by @ask) + 0.5.2 (2019-03-10) -^^^^^^^^^^^^^^^^^^ +================== Bugfixes: @@ -18,7 +67,7 @@ Bugfixes: 0.5.1 (2019-03-10) -^^^^^^^^^^^^^^^^^^ +================== New features: @@ -39,14 +88,14 @@ Bugfixes: 0.5.0 (2018-12-28) -^^^^^^^^^^^^^^^^^^ +================== New features: * Add full support for V2 format messages with a Cython extension. Those are used for Kafka >= 0.11.0.0 * Added support for transactional producing (issue #182) -* Added support for indempotent producing with `enable_idempotence` parameter +* Added support for idempotent producing with `enable_idempotence` parameter * Added support for `fetch_max_bytes` in AIOKafkaConsumer. This can help limit the amount of data transferred in a single roundtrip to broker, which is essential for consumers with large amount of partitions @@ -63,7 +112,7 @@ Bugfixes: 0.4.3 (2018-11-01) -^^^^^^^^^^^^^^^^^^ +================== Bugfix: @@ -72,7 +121,7 @@ Bugfix: 0.4.2 (2018-09-12) -^^^^^^^^^^^^^^^^^^ +================== Bugfix: @@ -89,7 +138,7 @@ Infrastructure: * Refactored travis CI build pipeline 0.4.1 (2018-05-13) -^^^^^^^^^^^^^^^^^^ +================== * Fix issue when offset commit error reports wrong partition in log (issue #353) * Add ResourceWarning when Producer, Consumer or Connections are not closed @@ -98,7 +147,7 @@ Infrastructure: 0.4.0 (2018-01-30) -^^^^^^^^^^^^^^^^^^ +================== Major changes: @@ -108,7 +157,7 @@ Major changes: versions later * Added C extension for Records parsing protocol, boosting the speed of produce/consume routines significantly -* Added an experimental batch producer API for unique cases, where user want's +* Added an experimental batch producer API for unique cases, where user wants to control batching himself (by @shargan) @@ -144,7 +193,7 @@ Big thanks to: 0.3.1 (2017-09-19) -^^^^^^^^^^^^^^^^^^ +================== * Added `AIOKafkaProducer.flush()` method. (PR #209 by @vineet-rh) * Fixed a bug with uvloop involving `float("inf")` for timeout. (PR #210 by @@ -153,7 +202,7 @@ Big thanks to: 0.3.0 (2017-08-17) -^^^^^^^^^^^^^^^^^^ +================== * Moved all public structures and errors to `aiokafka` namespace. You will no longer need to import from `kafka` namespace. @@ -169,7 +218,7 @@ Big thanks to: Producer interface. 0.2.3 (2017-07-23) -^^^^^^^^^^^^^^^^^^ +================== * Fixed retry problem in Producer, when buffer is not reset to 0 offset. Thanks to @ngavrysh for the fix in Tubular/aiokafka fork. (issue #184) @@ -180,7 +229,7 @@ Big thanks to: 0.2.2 (2017-04-17) -^^^^^^^^^^^^^^^^^^ +================== * Reconnect after KafkaTimeoutException. (PR #149 by @Artimi) * Fixed compacted topic handling. It could skip messages if those were @@ -197,7 +246,7 @@ Big thanks to @Artimi for pointing out several of those issues. 0.2.1 (2017-02-19) -^^^^^^^^^^^^^^^^^^ +================== * Add a check to wait topic autocreation in Consumer, instead of raising UnknownTopicOrPartitionError (PR #92 by fabregas) @@ -211,7 +260,7 @@ Big thanks to @Artimi for pointing out several of those issues. 0.2.0 (2016-12-18) -^^^^^^^^^^^^^^^^^^ +================== * Added SSL support. (PR #81 by Drizzt1991) * Fixed UnknownTopicOrPartitionError error on first message for autocreated topic (PR #96 by fabregas) @@ -224,7 +273,7 @@ Big thanks to @Artimi for pointing out several of those issues. 0.1.4 (2016-11-07) -^^^^^^^^^^^^^^^^^^ +================== * Bumped kafka-python version to 1.3.1 and Kafka to 0.10.1.0. * Fixed auto version detection, to correctly handle 0.10.0.0 version @@ -237,7 +286,7 @@ Big thanks to @fabregas for the hard work on this release (PR #60) 0.1.3 (2016-10-18) -^^^^^^^^^^^^^^^^^^ +================== * Fixed bug with infinite loop on heartbeats with autocommit=True. #44 * Bumped kafka-python to version 1.1.1 @@ -246,7 +295,7 @@ Big thanks to @fabregas for the hard work on this release (PR #60) 0.1.2 (2016-04-30) -^^^^^^^^^^^^^^^^^^ +================== * Added Python3.5 usage example to docs * Don't raise retriable exceptions in 3.5's async for iterator @@ -254,12 +303,12 @@ Big thanks to @fabregas for the hard work on this release (PR #60) 0.1.1 (2016-04-15) -^^^^^^^^^^^^^^^^^^ +================== -* Fix packaging issues. Removed unneded files from package. +* Fix packaging issues. Removed unneeded files from package. 0.1.0 (2016-04-15) -^^^^^^^^^^^^^^^^^^ +================== Initial release diff --git a/Makefile b/Makefile index b785b71..1cc1536 100644 --- a/Makefile +++ b/Makefile @@ -1,34 +1,46 @@ # Some simple testing tasks (sorry, UNIX only). -FLAGS= +FLAGS?=--maxfail=3 SCALA_VERSION?=2.12 -KAFKA_VERSION?=2.1.0 +KAFKA_VERSION?=2.2.2 DOCKER_IMAGE=aiolibs/kafka:$(SCALA_VERSION)_$(KAFKA_VERSION) DIFF_BRANCH=origin/master +FORMATTED_AREAS=aiokafka/util.py aiokafka/structs.py setup: pip install -r requirements-dev.txt pip install -Ue . -flake: - extra=$$(python -c "import sys;sys.stdout.write('--exclude tests/test_pep492.py') if sys.version_info[:3] < (3, 5, 0) else sys.stdout.write('')"); \ - flake8 aiokafka tests $$extra +format: + isort -rc $(FORMATTED_AREAS) setup.py + black $(FORMATTED_AREAS) setup.py -test: - py.test -s --no-print-logs --docker-image $(DOCKER_IMAGE) $(FLAGS) tests +flake: lint +lint: + black --check $(FORMATTED_AREAS) setup.py + @if ! isort -c -rc $(FORMATTED_AREAS) setup.py; then \ + echo "Import sort errors, run 'make format' to fix them!!!"; \ + isort --diff -rc $(FORMATTED_AREAS) setup.py; \ + false; \ + fi + flake8 aiokafka tests setup.py + mypy $(FORMATTED_AREAS) -vtest: - py.test -s -v --docker-image $(DOCKER_IMAGE) $(FLAGS) tests +test: flake + py.test -s --show-capture=no --docker-image $(DOCKER_IMAGE) $(FLAGS) tests + +vtest: flake + py.test -s -v --log-level INFO --docker-image $(DOCKER_IMAGE) $(FLAGS) tests cov cover coverage: flake py.test -s --cov aiokafka --cov-report html --docker-image $(DOCKER_IMAGE) $(FLAGS) tests @echo "open file://`pwd`/htmlcov/index.html" ci-test-unit: - py.test -s --cov aiokafka --cov-report html $(FLAGS) tests + py.test -s --log-level DEBUG --cov aiokafka --cov-report xml --color=yes $(FLAGS) tests ci-test-all: - py.test -s --cov aiokafka --cov-report html --docker-image $(DOCKER_IMAGE) $(FLAGS) -k sasl tests + py.test -s -v --log-level DEBUG --cov aiokafka --cov-report xml --color=yes --docker-image $(DOCKER_IMAGE) $(FLAGS) tests coverage.xml: .coverage coverage xml diff --git a/aiokafka/__init__.py b/aiokafka/__init__.py index f3c2cb2..9a537e3 100644 --- a/aiokafka/__init__.py +++ b/aiokafka/__init__.py @@ -1,4 +1,4 @@ -__version__ = '1.1.6' # noqa +__version__ = '1.1.6ec46014' # noqa from .abc import ConsumerRebalanceListener from .client import AIOKafkaClient @@ -16,6 +16,8 @@ __all__ = [ # Clients API + 'BaseProducer', + 'MultiTXNProducer', "AIOKafkaProducer", "AIOKafkaConsumer", # ABC's diff --git a/aiokafka/abc.py b/aiokafka/abc.py index 088cdc6..bcb9deb 100644 --- a/aiokafka/abc.py +++ b/aiokafka/abc.py @@ -87,6 +87,57 @@ def on_partitions_assigned(self, assigned): pass +# This statement is compatible with both Python 2.7 & 3+ +ABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()}) + + +class AbstractTokenProvider(ABC): + """ + A Token Provider must be used for the SASL OAuthBearer protocol. + The implementation should ensure token reuse so that multiple + calls at connect time do not create multiple tokens. The implementation + should also periodically refresh the token in order to guarantee + that each call returns an unexpired token. A timeout error should + be returned after a short period of inactivity so that the + broker can log debugging info and retry. + Token Providers MUST implement the token() method + """ + + def __init__(self, **config): + pass + + @abc.abstractmethod + async def token(self): + """ + An async callback returning a (str) ID/Access Token to be sent to + the Kafka client. In case where a synchoronous callback is needed, + implementations like following can be used: + .. highlight:: python + .. code-block:: python + from aiokafka.abc import AbstractTokenProvider + + class CustomTokenProvider(AbstractTokenProvider): + async def token(self): + return asyncio.get_running_loop().run_in_executor( + None, self._token) + + def _token(self): + # The actual synchoronous token callback. + """ + pass + + def extensions(self): + """ + This is an OPTIONAL method that may be implemented. + Returns a map of key-value pairs that can + be sent with the SASL/OAUTHBEARER initial client request. If + not implemented, the values are ignored. This feature is only available + in Kafka >= 2.1.0. + """ + return {} + + __all__ = [ - "ConsumerRebalanceListener" + "ConsumerRebalanceListener", + "AbstractTokenProvider" ] diff --git a/aiokafka/client.py b/aiokafka/client.py index 3ec5b99..6a0f715 100644 --- a/aiokafka/client.py +++ b/aiokafka/client.py @@ -15,13 +15,15 @@ from aiokafka.protocol.produce import ProduceRequest from aiokafka.errors import ( KafkaError, - ConnectionError, + KafkaConnectionError, NodeNotReadyError, RequestTimedOutError, UnknownTopicOrPartitionError, UnrecognizedBrokerVersion, StaleMetadata) -from aiokafka.util import ensure_future, create_future, parse_kafka_version +from aiokafka.util import ( + ensure_future, create_future, get_running_loop, parse_kafka_version +) __all__ = ['AIOKafkaClient'] @@ -76,12 +78,12 @@ class AIOKafkaClient: Default: None. connections_max_idle_ms (int): Close idle connections after the number of milliseconds specified by this config. Specifying `None` will - disable idle checks. Default: 540000 (9hours). + disable idle checks. Default: 540000 (9 minutes). """ _closed = False - def __init__(self, *, loop, bootstrap_servers='localhost', + def __init__(self, *, loop=None, bootstrap_servers='localhost', client_id='aiokafka-' + __version__, metadata_max_age_ms=300000, request_timeout_ms=40000, @@ -94,7 +96,12 @@ def __init__(self, *, loop, bootstrap_servers='localhost', sasl_plain_username=None, sasl_plain_password=None, sasl_kerberos_service_name='kafka', - sasl_kerberos_domain_name=None): + sasl_kerberos_domain_name=None, + sasl_oauth_token_provider=None + ): + if loop is None: + loop = get_running_loop() + if security_protocol not in ( 'SSL', 'PLAINTEXT', 'SASL_PLAINTEXT', 'SASL_SSL'): raise ValueError("`security_protocol` should be SSL or PLAINTEXT") @@ -102,10 +109,14 @@ def __init__(self, *, loop, bootstrap_servers='localhost', raise ValueError( "`ssl_context` is mandatory if security_protocol=='SSL'") if security_protocol in ["SASL_SSL", "SASL_PLAINTEXT"]: - if sasl_mechanism not in ("PLAIN", "GSSAPI"): + if sasl_mechanism not in ( + "PLAIN", "GSSAPI", "SCRAM-SHA-256", "SCRAM-SHA-512", + "OAUTHBEARER"): raise ValueError( - "only `PLAIN` and `GSSAPI` sasl_mechanism " - "are supported at the moment") + "only `PLAIN`, `GSSAPI`, `SCRAM-SHA-256`, " + "`SCRAM-SHA-512` and `OAUTHBEARER`" + "sasl_mechanism are supported " + "at the moment") if sasl_mechanism == "PLAIN" and \ (sasl_plain_username is None or sasl_plain_password is None): raise ValueError( @@ -128,6 +139,7 @@ def __init__(self, *, loop, bootstrap_servers='localhost', self._sasl_plain_password = sasl_plain_password self._sasl_kerberos_service_name = sasl_kerberos_service_name self._sasl_kerberos_domain_name = sasl_kerberos_domain_name + self._sasl_oauth_token_provider = sasl_oauth_token_provider self.cluster = ClusterMetadata(metadata_max_age_ms=metadata_max_age_ms) @@ -200,6 +212,7 @@ async def bootstrap(self): sasl_plain_password=self._sasl_plain_password, sasl_kerberos_service_name=self._sasl_kerberos_service_name, # noqa: ignore=E501 sasl_kerberos_domain_name=self._sasl_kerberos_domain_name, + sasl_oauth_token_provider=self._sasl_oauth_token_provider, version_hint=version_hint) except (OSError, asyncio.TimeoutError) as err: log.error('Unable connect to "%s:%s": %s', host, port, err) @@ -207,7 +220,7 @@ async def bootstrap(self): try: metadata = await bootstrap_conn.send(metadata_request) - except KafkaError as err: + except (KafkaError, asyncio.TimeoutError) as err: log.warning('Unable to request metadata from "%s:%s": %s', host, port, err) bootstrap_conn.close() @@ -227,7 +240,7 @@ async def bootstrap(self): log.debug('Received cluster metadata: %s', self.cluster) break else: - raise ConnectionError( + raise KafkaConnectionError( 'Unable to bootstrap from {}'.format(self.hosts)) # detect api version if need @@ -294,9 +307,9 @@ async def _metadata_update(self, cluster_metadata, topics): try: metadata = await conn.send(metadata_request) - except KafkaError as err: + except (KafkaError, asyncio.TimeoutError) as err: log.error( - 'Unable to request metadata from node with id %s: %s', + 'Unable to request metadata from node with id %s: %r', node_id, err) continue @@ -411,10 +424,10 @@ async def _get_conn( # possible to get a leader that is for some reason not in # metadata. # I think requerying metadata should solve this problem - if broker is None : + if broker is None: raise StaleMetadata( 'Broker id %s not in current metadata' % node_id) - + log.debug("Initiating connection to node %s at %s:%s", node_id, broker.host, broker.port) @@ -439,9 +452,10 @@ async def _get_conn( sasl_plain_password=self._sasl_plain_password, sasl_kerberos_service_name=self._sasl_kerberos_service_name, # noqa: ignore=E501 sasl_kerberos_domain_name=self._sasl_kerberos_domain_name, + sasl_oauth_token_provider=self._sasl_oauth_token_provider, version_hint=version_hint ) - except (OSError, asyncio.TimeoutError) as err: + except (OSError, asyncio.TimeoutError, KafkaError) as err: log.error('Unable connect to node with id %s: %s', node_id, err) if group == ConnectionGroup.DEFAULT: # Connection failures imply that our metadata is stale, so @@ -465,10 +479,10 @@ async def send(self, node_id, request, *, group=ConnectionGroup.DEFAULT): request (Struct): request object (not-encoded) Raises: - kafka.common.RequestTimedOutError - kafka.common.NodeNotReadyError - kafka.common.ConnectionError - kafka.common.CorrelationIdError + kafka.errors.RequestTimedOutError + kafka.errors.NodeNotReadyError + kafka.errors.KafkaConnectionError + kafka.errors.CorrelationIdError Returns: Future: resolves to Response struct @@ -522,14 +536,14 @@ async def check_version(self, node_id=None): ((0, 8, 0), MetadataRequest_v0([])), ] - # kafka kills the connection when it doesnt recognize an API request + # kafka kills the connection when it does not recognize an API request # so we can send a test request and then follow immediately with a # vanilla MetadataRequest. If the server did not recognize the first # request, both will be failed with a ConnectionError that wraps # socket.error (32, 54, or 104) conn = await self._get_conn(node_id, no_hint=True) if conn is None: - raise ConnectionError( + raise KafkaConnectionError( "No connection to node with id {}".format(node_id)) for version, request in test_cases: try: @@ -565,7 +579,8 @@ def _check_api_version_response(self, response): # The logic here is to check the list of supported request versions # in descending order. As soon as we find one that works, return it test_cases = [ - # format (, ) + # format (, ) + ((2, 3, 0), FetchRequest[0].API_KEY, 11), ((2, 1, 0), MetadataRequest[0].API_KEY, 7), ((1, 1, 0), FetchRequest[0].API_KEY, 7), ((1, 0, 0), MetadataRequest[0].API_KEY, 5), diff --git a/aiokafka/cluster.py b/aiokafka/cluster.py index 89393fd..e83a0b0 100644 --- a/aiokafka/cluster.py +++ b/aiokafka/cluster.py @@ -29,7 +29,6 @@ def update(self, brokers) -> None: (0, random.randint(1, 100), broker.nodeId)) - class ClusterMetadata(BaseClusterMetadata): def __init__(self, *args, **kw): @@ -126,7 +125,6 @@ def update_metadata(self, metadata): log.error("Error fetching metadata for topic %s: %s", topic, error_type) - with self._lock: self._brokers = _new_brokers self.controller = _new_controller diff --git a/aiokafka/conn.py b/aiokafka/conn.py index c172f26..858de47 100644 --- a/aiokafka/conn.py +++ b/aiokafka/conn.py @@ -1,10 +1,14 @@ import asyncio import collections +import base64 import functools +import hashlib +import hmac import logging import struct import sys import traceback +import uuid import warnings import weakref @@ -18,6 +22,8 @@ import aiokafka.errors as Errors from aiokafka.util import ensure_future, create_future, PY_36 +from aiokafka.abc import AbstractTokenProvider + try: import gssapi except ImportError: @@ -71,6 +77,7 @@ async def create_conn( sasl_plain_password=None, sasl_kerberos_service_name='kafka', sasl_kerberos_domain_name=None, + sasl_oauth_token_provider=None, version_hint=None ): if loop is None: @@ -86,6 +93,7 @@ async def create_conn( sasl_plain_password=sasl_plain_password, sasl_kerberos_service_name=sasl_kerberos_service_name, sasl_kerberos_domain_name=sasl_kerberos_domain_name, + sasl_oauth_token_provider=sasl_oauth_token_provider, version_hint=version_hint) await conn.connect() return conn @@ -118,10 +126,23 @@ def __init__(self, host, port, *, loop, client_id='aiokafka', sasl_plain_password=None, sasl_plain_username=None, sasl_kerberos_service_name='kafka', sasl_kerberos_domain_name=None, + sasl_oauth_token_provider=None, version_hint=None): if sasl_mechanism == "GSSAPI": assert gssapi is not None, "gssapi library required" + if sasl_mechanism == "OAUTHBEARER": + if sasl_oauth_token_provider is None or \ + not isinstance( + sasl_oauth_token_provider, AbstractTokenProvider): + raise ValueError("sasl_oauth_token_provider needs to be \ + provided implementing aiokafka.abc.AbstractTokenProvider") + assert callable( + getattr(sasl_oauth_token_provider, "token", None) + ), ( + 'sasl_oauth_token_provider must implement method #token()' + ) + self._loop = loop self._host = host self._port = port @@ -135,6 +156,7 @@ def __init__(self, host, port, *, loop, client_id='aiokafka', self._sasl_plain_password = sasl_plain_password self._sasl_kerberos_service_name = sasl_kerberos_service_name self._sasl_kerberos_domain_name = sasl_kerberos_domain_name + self._sasl_oauth_token_provider = sasl_oauth_token_provider # Version hint is the version determined by initial client bootstrap self._version_hint = version_hint @@ -157,7 +179,7 @@ def __init__(self, host, port, *, loop, client_id='aiokafka', if loop.get_debug(): self._source_traceback = traceback.extract_stack(sys._getframe(1)) - # Warn and try to close. We can close synchroniously, so will attempt + # Warn and try to close. We can close synchronously, so will attempt # that def __del__(self, _warnings=warnings): if self.connected(): @@ -257,7 +279,9 @@ async def _do_sasl_handshake(self): self.close(reason=CloseReason.AUTH_FAILURE, exc=exc) raise exc - assert self._sasl_mechanism in ('PLAIN', 'GSSAPI') + assert self._sasl_mechanism in ( + 'PLAIN', 'GSSAPI', 'SCRAM-SHA-256', 'SCRAM-SHA-512', 'OAUTHBEARER' + ) if self._security_protocol == 'SASL_PLAINTEXT' and \ self._sasl_mechanism == 'PLAIN': self.log.warning( @@ -265,6 +289,10 @@ async def _do_sasl_handshake(self): if self._sasl_mechanism == 'GSSAPI': authenticator = self.authenticator_gssapi() + elif self._sasl_mechanism.startswith('SCRAM-SHA-'): + authenticator = self.authenticator_scram() + elif self._sasl_mechanism == 'OAUTHBEARER': + authenticator = self.authenticator_oauth() else: authenticator = self.authenticator_plain() @@ -304,6 +332,10 @@ async def _do_sasl_handshake(self): self.log.info( 'Authenticated as %s via GSSAPI', self.sasl_principal) + elif self._sasl_mechanism == 'OAUTHBEARER': + self.log.info( + 'Authenticated via OAUTHBEARER' + ) else: self.log.info('Authenticated as %s via PLAIN', self._sasl_plain_username) @@ -319,6 +351,17 @@ def authenticator_gssapi(self): loop=self._loop, principal=self.sasl_principal) + def authenticator_scram(self): + return ScramAuthenticator( + loop=self._loop, + sasl_plain_password=self._sasl_plain_password, + sasl_plain_username=self._sasl_plain_username, + sasl_mechanism=self._sasl_mechanism) + + def authenticator_oauth(self): + return OAuthAuthenticator( + sasl_oauth_token_provider=self._sasl_oauth_token_provider) + @property def sasl_principal(self): service = self._sasl_kerberos_service_name @@ -373,7 +416,7 @@ def port(self): def send(self, request, expect_response=True): if self._writer is None: - raise Errors.ConnectionError( + raise Errors.KafkaConnectionError( "No connection to broker at {0}:{1}" .format(self._host, self._port)) @@ -387,7 +430,7 @@ def send(self, request, expect_response=True): self._writer.write(size + message) except OSError as err: self.close(reason=CloseReason.CONNECTION_BROKEN) - raise Errors.ConnectionError( + raise Errors.KafkaConnectionError( "Connection at {0}:{1} broken: {2}".format( self._host, self._port, err)) @@ -402,7 +445,7 @@ def send(self, request, expect_response=True): def _send_sasl_token(self, payload, expect_response=True): if self._writer is None: - raise Errors.ConnectionError( + raise Errors.KafkaConnectionError( "No connection to broker at {0}:{1}" .format(self._host, self._port)) @@ -411,7 +454,7 @@ def _send_sasl_token(self, payload, expect_response=True): self._writer.write(size + payload) except OSError as err: self.close(reason=CloseReason.CONNECTION_BROKEN) - raise Errors.ConnectionError( + raise Errors.KafkaConnectionError( "Connection at {0}:{1} broken: {2}".format( self._host, self._port, err)) @@ -435,7 +478,7 @@ def close(self, reason=None, exc=None): self._read_task = None for _, _, fut in self._requests: if not fut.done(): - error = Errors.ConnectionError( + error = Errors.KafkaConnectionError( "Connection at {0}:{1} closed".format( self._host, self._port)) if exc is not None: @@ -593,3 +636,125 @@ def authenticator_gssapi(self): msg = client_ctx.wrap(msg + self._principal.encode(), False).message yield (msg, False) + + +class ScramAuthenticator(BaseSaslAuthenticator): + MECHANISMS = { + 'SCRAM-SHA-256': hashlib.sha256, + 'SCRAM-SHA-512': hashlib.sha512 + } + + def __init__(self, *, loop, sasl_plain_password, + sasl_plain_username, sasl_mechanism): + self._loop = loop + self._nonce = str(uuid.uuid4()).replace('-', '') + self._auth_message = '' + self._salted_password = None + self._sasl_plain_username = sasl_plain_username + self._sasl_plain_password = sasl_plain_password.encode('utf-8') + self._hashfunc = self.MECHANISMS[sasl_mechanism] + self._hashname = ''.join(sasl_mechanism.lower().split('-')[1:3]) + self._stored_key = None + self._client_key = None + self._client_signature = None + self._client_proof = None + self._server_key = None + self._server_signature = None + self._authenticator = self.authenticator_scram() + + def first_message(self): + client_first_bare = 'n={},r={}'.format( + self._sasl_plain_username, self._nonce) + self._auth_message += client_first_bare + return 'n,,' + client_first_bare + + def process_server_first_message(self, server_first): + self._auth_message += ',' + server_first + params = dict(pair.split('=', 1) for pair in server_first.split(',')) + server_nonce = params['r'] + if not server_nonce.startswith(self._nonce): + raise ValueError("Server nonce, did not start with client nonce!") + self._nonce = server_nonce + self._auth_message += ',c=biws,r=' + self._nonce + + salt = base64.b64decode(params['s'].encode('utf-8')) + iterations = int(params['i']) + self.create_salted_password(salt, iterations) + + self._client_key = self.hmac(self._salted_password, b'Client Key') + self._stored_key = self._hashfunc(self._client_key).digest() + self._client_signature = self.hmac( + self._stored_key, self._auth_message.encode('utf-8')) + self._client_proof = ScramAuthenticator._xor_bytes( + self._client_key, self._client_signature) + self._server_key = self.hmac(self._salted_password, b'Server Key') + self._server_signature = self.hmac( + self._server_key, self._auth_message.encode('utf-8')) + + def final_message(self): + return 'c=biws,r={},p={}'.format( + self._nonce, base64.b64encode(self._client_proof).decode('utf-8')) + + def process_server_final_message(self, server_final): + params = dict(pair.split('=', 1) for pair in server_final.split(',')) + if self._server_signature != base64.b64decode( + params['v'].encode('utf-8')): + raise ValueError("Server sent wrong signature!") + + def authenticator_scram(self): + client_first = self.first_message().encode('utf-8') + server_first = yield client_first, True + self.process_server_first_message(server_first.decode('utf-8')) + client_final = self.final_message().encode('utf-8') + server_final = yield client_final, True + self.process_server_final_message(server_final.decode('utf-8')) + + def hmac(self, key, msg): + return hmac.new(key, msg, digestmod=self._hashfunc).digest() + + def create_salted_password(self, salt, iterations): + self._salted_password = hashlib.pbkdf2_hmac( + self._hashname, self._sasl_plain_password, salt, iterations + ) + + @staticmethod + def _xor_bytes(left, right): + return bytes(lb ^ rb for lb, rb in zip(left, right)) + + +class OAuthAuthenticator(BaseSaslAuthenticator): + def __init__(self, *, sasl_oauth_token_provider): + self._sasl_oauth_token_provider = sasl_oauth_token_provider + self._token_sent = False + + async def step(self, payload): + if self._token_sent: + return + token = await self._sasl_oauth_token_provider.token() + token_extensions = self._token_extensions() + self._token_sent = True + return self._build_oauth_client_request(token, token_extensions)\ + .encode("utf-8"), True + + def _build_oauth_client_request(self, token, token_extensions): + return "n,,\x01auth=Bearer {}{}\x01\x01".format( + token, token_extensions + ) + + def _token_extensions(self): + """ + Return a string representation of the OPTIONAL key-value pairs + that can be sent with an OAUTHBEARER initial request. + """ + # Only run if the #extensions() method is implemented + # by the clients Token Provider class + # Builds up a string separated by \x01 via a dict of key value pairs + if callable( + getattr(self._sasl_oauth_token_provider, "extensions", None)): + extensions = self._sasl_oauth_token_provider.extensions() + if len(extensions) > 0: + msg = "\x01".join( + ["{}={}".format(k, v) for k, v in extensions.items()]) + return "\x01" + msg + + return "" diff --git a/aiokafka/consumer/consumer.py b/aiokafka/consumer/consumer.py index 1165bb2..571060f 100644 --- a/aiokafka/consumer/consumer.py +++ b/aiokafka/consumer/consumer.py @@ -18,7 +18,7 @@ ) from aiokafka.structs import TopicPartition from aiokafka.util import ( - PY_36, commit_structure_validate + PY_36, commit_structure_validate, get_running_loop ) from aiokafka import __version__ @@ -99,8 +99,9 @@ class AIOKafkaConsumer(object): errors. Default: 100. auto_offset_reset (str): A policy for resetting offsets on OffsetOutOfRange errors: 'earliest' will move to the oldest - available message, 'latest' will move to the most recent. Any - ofther value will raise the exception. Default: 'latest'. + available message, 'latest' will move to the most recent, and + 'none' will raise an exception so you can handle this case. + Default: 'latest'. enable_auto_commit (bool): If true the consumer's offset will be periodically committed in the background. Default: True. auto_commit_interval_ms (int): milliseconds between automatic @@ -200,11 +201,15 @@ class AIOKafkaConsumer(object): sasl_mechanism (str): Authentication mechanism when security_protocol is configured for SASL_PLAINTEXT or SASL_SSL. Valid values are: - PLAIN, GSSAPI. Default: PLAIN + PLAIN, GSSAPI, SCRAM-SHA-256, SCRAM-SHA-512, OAUTHBEARER. + Default: PLAIN sasl_plain_username (str): username for sasl PLAIN authentication. Default: None sasl_plain_password (str): password for sasl PLAIN authentication. Default: None + sasl_oauth_token_provider (kafka.oauth.abstract.AbstractTokenProvider): + OAuthBearer token provider instance. (See kafka.oauth.abstract). + Default: None Note: Many configuration parameters are taken from Java Client: @@ -215,7 +220,7 @@ class AIOKafkaConsumer(object): _closed = None # Serves as an uninitialized flag for __del__ _source_traceback = None - def __init__(self, *topics, loop, + def __init__(self, *topics, loop=None, bootstrap_servers='localhost', client_id='aiokafka-' + __version__, group_id=None, @@ -253,7 +258,11 @@ def __init__(self, *topics, loop, start_rebalancing_span=None, start_coordinator_span=None, on_generation_id_known=None, - flush_spans=None): + flush_spans=None, + sasl_oauth_token_provider=None): + if loop is None: + loop = get_running_loop() + if max_poll_records is not None and ( not isinstance(max_poll_records, int) or max_poll_records < 1): raise ValueError("`max_poll_records` should be positive Integer") @@ -274,7 +283,8 @@ def __init__(self, *topics, loop, sasl_plain_username=sasl_plain_username, sasl_plain_password=sasl_plain_password, sasl_kerberos_service_name=sasl_kerberos_service_name, - sasl_kerberos_domain_name=sasl_kerberos_domain_name) + sasl_kerberos_domain_name=sasl_kerberos_domain_name, + sasl_oauth_token_provider=sasl_oauth_token_provider) self._group_id = group_id self._heartbeat_interval_ms = heartbeat_interval_ms @@ -335,13 +345,6 @@ def __del__(self, _warnings=warnings): context['source_traceback'] = self._source_traceback self._loop.call_exception_handler(context) - async def __aenter__(self): - await self.start() - return self - - async def __aexit__(self, type, value, traceback): - await self.stop() - async def start(self): """ Connect to Kafka cluster. This will: @@ -398,7 +401,7 @@ async def start(self): start_rebalancing_span=self._start_rebalancing_span, start_coordinator_span=self._start_coordinator_span, on_generation_id_known=self._on_generation_id_known, - flush_spans=self._flush_spans, + flush_spans=self._flush_spans ) if self._subscription.subscription is not None: if self._subscription.partitions_auto_assigned(): @@ -719,7 +722,7 @@ def last_stable_offset(self, partition): def last_poll_timestamp(self, partition): """ Returns the timestamp of the last poll of this partition (in ms). It is the last time `highwater` and `last_stable_offset` were - udpated. However it does not mean that new messages were received. + updated. However it does not mean that new messages were received. As with ``highwater()`` will not be available until some messages are consumed. @@ -858,6 +861,10 @@ async def seek_to_committed(self, *partitions): *partitions: Optionally provide specific TopicPartitions, otherwise default to all assigned partitions. + Returns: + dict: ``{TopicPartition: offset}`` mapping + of the currently committed offsets. + Raises: IllegalStateError: If any partition is not currently assigned IllegalOperation: If used with ``group_id == None`` @@ -1075,6 +1082,15 @@ def subscribe(self, topics=(), pattern=None, listener=None): self._subscription.subscribe( topics=topics, listener=listener) self._client.set_topics(self._subscription.subscription.topics) + if self._group_id is None: + # We have reset the assignment, but client.set_topics will + # not always do a metadata update. We force it to do it even + # if metadata did not change. This will trigger a reassignment + # on NoGroupCoordinator, but only if snapshot did not change, + # thus we reset it too. + self._client.force_metadata_update() + if self._coordinator is not None: + self._coordinator._metadata_snapshot = {} log.info("Subscribed to topic(s): %s", topics) def subscription(self): @@ -1265,3 +1281,10 @@ def records_last_request(self): @property def records_last_response(self): return self._fetcher.records_last_response + + async def __aenter__(self): + await self.start() + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.stop() diff --git a/aiokafka/consumer/fetcher.py b/aiokafka/consumer/fetcher.py index 943e62a..08b9e9a 100644 --- a/aiokafka/consumer/fetcher.py +++ b/aiokafka/consumer/fetcher.py @@ -230,7 +230,8 @@ def _unpack_records(self): log.debug( "Skipping aborted record batch from partition %s with" " producer_id %s and offsets %s to %s", - tp, next_batch.producer_id + tp, next_batch.producer_id, + next_batch.base_offset, next_batch.next_offset - 1 ) self.next_fetch_offset = next_batch.next_offset continue @@ -686,6 +687,11 @@ async def _proc_fetch_request(self, assignment, node_id, request): " fetch") return False + fetch_offsets = {} + for topic, partitions in request.topics: + for partition, offset, _ in partitions: + fetch_offsets[TopicPartition(topic, partition)] = offset + now_ms = int(1000 * time.time()) for topic, partitions in response.topics: for partition, error_code, highwater, *part_data in partitions: @@ -706,7 +712,7 @@ async def _proc_fetch_request(self, assignment, node_id, request): tp_state.lso = lso tp_state.timestamp = now_ms if not tp_state.has_valid_position or \ - tp_state.position != fetch_offset : + tp_state.position != fetch_offset: log.debug( "Discarding fetch response for partition %s " "since its offset %s does not match the current " @@ -866,7 +872,8 @@ async def _retrieve_offsets(self, timestamps, timeout_ms=float("inf")): Arguments: timestamps: {TopicPartition: int} dict with timestamps to fetch offsets by. -1 for the latest available, -2 for the earliest - available. Otherwise timestamp is treated as epoch miliseconds. + available. Otherwise timestamp is treated as epoch + milliseconds. Returns: {TopicPartition: (int, int)}: Mapping of partition to @@ -1109,10 +1116,13 @@ async def fetched_records(self, partitions, timeout=0, max_records=None): return drained waiter = self._create_fetch_waiter() - done, _ = await asyncio.wait( + done, pending = await asyncio.wait( [waiter], timeout=timeout, loop=self._loop) if not done or self._closed: + if pending: + fut = pending.pop() + fut.cancel() return {} if waiter.done(): diff --git a/aiokafka/consumer/group_coordinator.py b/aiokafka/consumer/group_coordinator.py index b03ea89..60a12de 100644 --- a/aiokafka/consumer/group_coordinator.py +++ b/aiokafka/consumer/group_coordinator.py @@ -70,8 +70,8 @@ def _handle_metadata_update(self, cluster): self._group_subscription is not None: metadata_snapshot = self._get_metadata_snapshot() if self._metadata_snapshot != metadata_snapshot: - log.debug("Metadata for topic has changed from %s to %s. ", - self._metadata_snapshot, metadata_snapshot) + log.info("Metadata for topic has changed from %s to %s. ", + self._metadata_snapshot, metadata_snapshot) self._metadata_snapshot = metadata_snapshot self._on_metadata_change() @@ -116,8 +116,14 @@ def assign_all_partitions(self, check_unknown=False): partitions = [] for topic in self._subscription.subscription.topics: p_ids = self._cluster.partitions_for_topic(topic) - if not p_ids and check_unknown: - raise Errors.UnknownTopicOrPartitionError() + if not p_ids: + if check_unknown: + raise Errors.UnknownTopicOrPartitionError() + else: + # We probably just changed subscription during metadata + # update. No problem, lets wait for the next metadata + # update + continue for p_id in p_ids: partitions.append(TopicPartition(topic, p_id)) @@ -159,6 +165,10 @@ async def _reset_committed_routine(self): return_when=asyncio.FIRST_COMPLETED, loop=self._loop) + if not event_waiter.done(): + event_waiter.cancel() + event_waiter = None + except asyncio.CancelledError: pass @@ -347,7 +357,7 @@ async def _send_req(self, request): def check_errors(self): """ Check if coordinator is well and no authorization or unrecoverable - errors occured + errors occurred """ if self._coordination_task.done(): self._coordination_task.result() @@ -512,6 +522,8 @@ async def _on_join_complete( # update partition assignment self._subscription.assign_from_subscribed(assignment.partitions()) + # The await bellow can change subscription, remember the ongoing one. + subscription = self._subscription.subscription # give the assignor a chance to update internal state # based on the received assignment @@ -521,8 +533,7 @@ async def _on_join_complete( # Callback can rely on something like ``Consumer.position()`` that # requires committed point to be refreshed. await T(self._stop_commit_offsets_refresh_task)() - self.start_commit_offsets_refresh_task( - self._subscription.subscription.assignment) + self.start_commit_offsets_refresh_task(subscription.assignment) assigned = set(self._subscription.assigned_partitions()) log.info("Setting newly assigned partitions %s for group %s", @@ -588,8 +599,8 @@ async def ensure_coordinator_known(self, trace_span=None): if trace_span is not None: T = self.traced_from_parent_span(lazy=True) else: - T = lambda f: f - + def T(f): + return f try: await T(self._coordinator_lookup_lock.acquire)() retry_backoff = self._retry_backoff_ms / 1000 @@ -636,7 +647,7 @@ async def ensure_coordinator_known(self, trace_span=None): self.coordinator_id = coordinator_id self._coordinator_dead_fut = create_future(loop=self._loop) log.info("Discovered coordinator %s for group %s", - self.coordinator_id, self.group_id) + self.coordinator_id, self.group_id) finally: set_tag("coord_lookup_retry_count", i) T(self._coordinator_lookup_lock.release)() @@ -675,7 +686,7 @@ async def __coordination_routine(self): if subscription is None: await asyncio.wait( [self._subscription.wait_for_subscription(), - self._closing], + self._closing], return_when=asyncio.FIRST_COMPLETED, loop=self._loop) if self._closing.done(): break @@ -808,6 +819,7 @@ async def REPLACE_WITH_MEMBER_ID(self, subscription, prev_assignment): idle_time = self._subscription.fetcher_idle_time if idle_time >= self._max_poll_interval: await T(asyncio.sleep)(self._retry_backoff_ms / 1000) + return None # We will only try to perform the rejoin once. If it fails, # we will spin this loop another time, checking for coordinator @@ -822,7 +834,7 @@ async def REPLACE_WITH_MEMBER_ID(self, subscription, prev_assignment): self._start_heartbeat_task() return subscription.assignment return None - + def _start_heartbeat_task(self): if self._heartbeat_task is None: self._heartbeat_task = ensure_future( @@ -872,6 +884,16 @@ async def _heartbeat_routine(self): "Heartbeat session expired - marking coordinator dead") self.coordinator_dead() + # If consumer is idle (no records consumed) for too long we need + # to leave the group + idle_time = self._subscription.fetcher_idle_time + if idle_time < self._max_poll_interval: + sleep_time = min( + sleep_time, + self._max_poll_interval - idle_time) + else: + await self._maybe_leave_group() + log.debug("Stopping heartbeat task") async def _do_heartbeat(self): @@ -940,7 +962,7 @@ def start_commit_offsets_refresh_task(self, assignment): self._commit_refresh_routine(assignment), loop=self._loop) async def _stop_commit_offsets_refresh_task(self): - # The previous task should end after assinment changed + # The previous task should end after assignment changed if self._commit_refresh_task is not None: if not self._commit_refresh_task.done(): self._commit_refresh_task.cancel() @@ -1030,7 +1052,7 @@ async def _maybe_do_autocommit(self, assignment): assignment, assignment.all_consumed_offsets()) except Errors.KafkaError as error: log.warning("Auto offset commit failed: %s", error) - if error.retriable: + if self._is_commit_retriable(error): # Retry after backoff. self._next_autocommit_deadline = \ self._loop.time() + backoff @@ -1043,6 +1065,16 @@ async def _maybe_do_autocommit(self, assignment): return max(0, self._next_autocommit_deadline - self._loop.time()) + def _is_commit_retriable(self, error): + # Java client raises CommitFailedError which is retriable and thus + # masks those 3. We raise error that we got explicitly, so treat them + # as retriable. + return error.retriable or isinstance(error, ( + Errors.UnknownMemberIdError, + Errors.IllegalGenerationError, + Errors.RebalanceInProgressError + )) + async def _maybe_do_last_autocommit(self, assignment): if not self._enable_auto_commit: return @@ -1139,7 +1171,8 @@ async def _do_commit_offsets(self, assignment, offsets): error_type.__name__) errored[tp] = error_type() elif error_type in (Errors.GroupCoordinatorNotAvailableError, - Errors.NotCoordinatorForGroupError): + Errors.NotCoordinatorForGroupError, + Errors.RequestTimedOutError): log.info( "OffsetCommit failed for group %s due to a" " coordinator error (%s), will find new coordinator" diff --git a/aiokafka/consumer/subscription_state.py b/aiokafka/consumer/subscription_state.py index 708ba39..ac69cef 100644 --- a/aiokafka/consumer/subscription_state.py +++ b/aiokafka/consumer/subscription_state.py @@ -25,7 +25,7 @@ class SubscriptionType(Enum): class SubscriptionState: - """ Intermidiate bridge to coordinate work between Consumer, Coordinator + """ Intermediate bridge to coordinate work between Consumer, Coordinator and Fetcher primitives. The class is different from kafka-python's implementation to provide @@ -260,7 +260,7 @@ def register_fetch_waiters(self, waiters): self._fetch_waiters = waiters def abort_waiters(self, exc): - """ Critical error occured, we will abort any pending waiter + """ Critical error occurred, we will abort any pending waiter """ for waiter in self._assignment_waiters: if not waiter.done(): @@ -523,7 +523,7 @@ def fetch_committed(self): return fut def update_committed(self, offset_meta: OffsetAndMetadata): - """ Called by Coordinator on successfull commit to update commit cache. + """ Called by Coordinator on successful commit to update commit cache. """ for fut in self._committed_futs: if not fut.done(): diff --git a/aiokafka/errors.py b/aiokafka/errors.py index 0d4b6e3..d9be929 100644 --- a/aiokafka/errors.py +++ b/aiokafka/errors.py @@ -68,7 +68,7 @@ KafkaUnavailableError, KafkaTimeoutError, - ConnectionError, + KafkaConnectionError, ) __all__ = [ @@ -143,7 +143,7 @@ "KafkaUnavailableError", "KafkaTimeoutError", - "ConnectionError", + "KafkaConnectionError", ] @@ -191,16 +191,17 @@ class ProducerClosed(KafkaError): class ProducerFenced(KafkaError): - """ Another producer with the same tranactional ID went online. - NOTE: As it seems this will be raised b y Broker if transaction - timeout occurred also. + """ Another producer with the same transactional ID went online. + NOTE: As it seems this will be raised by Broker if transaction timeout + occurred also. """ def __init__( - self, - msg="There is a newer producer using the same transactional_id " - "or transaction timeout occurred (check that processing " - "time is below transaction_timeout_ms)"): + self, + msg="There is a newer producer using the same transactional_id or" + "transaction timeout occurred (check that processing time is " + "below transaction_timeout_ms)" + ): super().__init__(msg) diff --git a/aiokafka/helpers.py b/aiokafka/helpers.py index 9d5dbc0..74ed745 100644 --- a/aiokafka/helpers.py +++ b/aiokafka/helpers.py @@ -1,4 +1,5 @@ import logging + from ssl import create_default_context, Purpose log = logging.getLogger(__name__) diff --git a/aiokafka/producer/__init__.py b/aiokafka/producer/__init__.py index 79bff15..5e5c6b8 100644 --- a/aiokafka/producer/__init__.py +++ b/aiokafka/producer/__init__.py @@ -1,3 +1,3 @@ from .producer import AIOKafkaProducer, BaseProducer, MultiTXNProducer -__all__ = ["AIOKafkaProducer"] +__all__ = ["AIOKafkaProducer", 'BaseProducer', 'MultiTXNProducer'] diff --git a/aiokafka/producer/message_accumulator.py b/aiokafka/producer/message_accumulator.py index dc9989c..4875795 100644 --- a/aiokafka/producer/message_accumulator.py +++ b/aiokafka/producer/message_accumulator.py @@ -331,16 +331,15 @@ async def add_message( batch = pending_batches[-1] future = batch.append(key, value, timestamp_ms, headers=headers) - if future is None: - # Batch is full, can't append data atm, - # waiting until batch per topic-partition is drained - start = self._loop.time() - await batch.wait_drain(timeout) - timeout -= self._loop.time() - start - if timeout <= 0: - raise KafkaTimeoutError() - else: + if future is not None: return future + # Batch is full, can't append data atm, + # waiting until batch per topic-partition is drained + start = self._loop.time() + await batch.wait_drain(timeout) + timeout -= self._loop.time() - start + if timeout <= 0: + raise KafkaTimeoutError() def data_waiter(self): """ Return waiter future that will be resolved when accumulator contain @@ -379,7 +378,7 @@ def reenqueue(self, batch): batch.reset_drain() def drain_by_nodes(self, ignore_nodes, muted_partitions=set()): - """ Group batches by leader to partiton nodes. """ + """ Group batches by leader to partition nodes. """ nodes = collections.defaultdict(dict) unknown_leaders_exist = False for tp in list(self._batches.keys()): diff --git a/aiokafka/producer/producer.py b/aiokafka/producer/producer.py index 9210bec..a893bf2 100644 --- a/aiokafka/producer/producer.py +++ b/aiokafka/producer/producer.py @@ -15,7 +15,7 @@ from aiokafka.record.legacy_records import LegacyRecordBatchBuilder from aiokafka.structs import TopicPartition from aiokafka.util import ( - INTEGER_MAX_VALUE, PY_36, commit_structure_validate, + INTEGER_MAX_VALUE, PY_36, commit_structure_validate, get_running_loop ) from .message_accumulator import MessageAccumulator @@ -40,7 +40,7 @@ class BaseProducer(abc.ABC): _closed = None # Serves as an uninitialized flag for __del__ _source_traceback = None - def __init__(self, *, loop, bootstrap_servers='localhost', + def __init__(self, *, loop=None, bootstrap_servers='localhost', client_id=None, metadata_max_age_ms=300000, request_timeout_ms=40000, api_version='auto', acks=_missing, @@ -55,7 +55,11 @@ def __init__(self, *, loop, bootstrap_servers='localhost', transaction_timeout_ms=60000, sasl_mechanism="PLAIN", sasl_plain_password=None, sasl_plain_username=None, sasl_kerberos_service_name='kafka', - sasl_kerberos_domain_name=None): + sasl_kerberos_domain_name=None, + sasl_oauth_token_provider=None): + if loop is None: + loop = get_running_loop() + if acks not in (0, 1, -1, 'all', _missing): raise ValueError("Invalid ACKS parameter") if compression_type not in ('gzip', 'snappy', 'lz4', None): @@ -117,7 +121,8 @@ def __init__(self, *, loop, bootstrap_servers='localhost', sasl_plain_username=sasl_plain_username, sasl_plain_password=sasl_plain_password, sasl_kerberos_service_name=sasl_kerberos_service_name, - sasl_kerberos_domain_name=sasl_kerberos_domain_name) + sasl_kerberos_domain_name=sasl_kerberos_domain_name, + sasl_oauth_token_provider=sasl_oauth_token_provider) self._metadata = self.client.cluster self._loop = loop if loop.get_debug(): @@ -321,11 +326,13 @@ async def send( timestamp_ms=timestamp_ms, headers=headers) return fut - async def send_and_wait(self, topic, value=None, key=None, partition=None, - timestamp_ms=None): + async def send_and_wait( + self, topic, value=None, key=None, partition=None, + timestamp_ms=None, headers=None + ): """Publish a message to a topic and wait the result""" future = await self.send( - topic, value, key, partition, timestamp_ms) + topic, value, key, partition, timestamp_ms, headers) return (await future) @@ -458,6 +465,9 @@ class AIOKafkaProducer(BaseProducer): Default: None sasl_plain_password (str): password for sasl PLAIN authentication. Default: None + sasl_oauth_token_provider (kafka.oauth.abstract.AbstractTokenProvider): + OAuthBearer token provider instance. (See kafka.oauth.abstract). + Default: None Note: Many configuration parameters are taken from the Java client: diff --git a/aiokafka/producer/sender.py b/aiokafka/producer/sender.py index 157a276..7e364ae 100644 --- a/aiokafka/producer/sender.py +++ b/aiokafka/producer/sender.py @@ -52,7 +52,7 @@ def __init__( self._on_irrecoverable_error = on_irrecoverable_error async def start(self): - # If producer is indempotent we need to assure we have PID found + # If producer is idempotent we need to assure we have PID found await self._maybe_wait_for_pid() self._sender_task = ensure_future( self._sender_routine(), loop=self._loop) @@ -91,9 +91,7 @@ async def _sender_routine(self): while True: # If indempotence or transactions are turned on we need to # have a valid PID to send any request below - log.debug('+maybe wait for pid') await self._maybe_wait_for_pid() - log.debug('-maybe wait for pid') waiters = set() # As transaction coordination is done via a single, separate @@ -148,15 +146,10 @@ async def _sender_routine(self): # * At least one of produce task is finished # * Data for new partition arrived # * Metadata update if partition leader unknown - log.debug('+SENDER WAIT FOR %r' % (waiters,)) - if waiters: - done, _ = await asyncio.wait( - waiters, - return_when=asyncio.FIRST_COMPLETED, - loop=self._loop) - log.debug('-SENDER WAIT FOR') - else: - await asyncio.sleep(0.5) + done, _ = await asyncio.wait( + waiters, + return_when=asyncio.FIRST_COMPLETED, + loop=self._loop) # done tasks should never produce errors, if they are it's a # bug diff --git a/aiokafka/record/_crecords/consts.pxi b/aiokafka/record/_crecords/consts.pxi index a110f55..7fd44e8 100644 --- a/aiokafka/record/_crecords/consts.pxi +++ b/aiokafka/record/_crecords/consts.pxi @@ -1,3 +1,4 @@ +#cython: language_level=3 # Attribute parsing flags DEF _ATTR_CODEC_MASK = 0x07 DEF _ATTR_CODEC_NONE = 0x00 diff --git a/aiokafka/record/_crecords/cutil.pxd b/aiokafka/record/_crecords/cutil.pxd index 2fdd292..3ba19d5 100644 --- a/aiokafka/record/_crecords/cutil.pxd +++ b/aiokafka/record/_crecords/cutil.pxd @@ -1,3 +1,4 @@ +#cython: language_level=3 from libc.stdint cimport int64_t, uint64_t, uint32_t from libc.limits cimport UINT_MAX from cpython cimport ( diff --git a/aiokafka/record/_crecords/cutil.pyx b/aiokafka/record/_crecords/cutil.pyx index 345af7b..33198be 100644 --- a/aiokafka/record/_crecords/cutil.pyx +++ b/aiokafka/record/_crecords/cutil.pyx @@ -1,3 +1,4 @@ +#cython: language_level=3 from aiokafka.errors import CorruptRecordException # VarInt implementation @@ -102,7 +103,7 @@ def encode_varint_cython(int64_t value, write): Arguments: value (int): Value to encode - write (function): Called per byte that needs to be writen + write (function): Called per byte that needs to be written Returns: int: Number of bytes written diff --git a/aiokafka/record/_crecords/default_records.pxd b/aiokafka/record/_crecords/default_records.pxd index 0f429bd..7da6fc9 100644 --- a/aiokafka/record/_crecords/default_records.pxd +++ b/aiokafka/record/_crecords/default_records.pxd @@ -1,3 +1,4 @@ +#cython: language_level=3 from libc.stdint cimport int64_t, uint32_t, int32_t, int16_t diff --git a/aiokafka/record/_crecords/default_records.pyx b/aiokafka/record/_crecords/default_records.pyx index 673f3b3..5473e93 100644 --- a/aiokafka/record/_crecords/default_records.pyx +++ b/aiokafka/record/_crecords/default_records.pyx @@ -1,3 +1,4 @@ +#cython: language_level=3 # See: # https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/\ # apache/kafka/common/record/DefaultRecordBatch.java diff --git a/aiokafka/record/_crecords/hton.pxd b/aiokafka/record/_crecords/hton.pxd index fc79ad3..5454626 100644 --- a/aiokafka/record/_crecords/hton.pxd +++ b/aiokafka/record/_crecords/hton.pxd @@ -1,3 +1,4 @@ +#cython: language_level=3 # Module taken as is from https://github.com/MagicStack/asyncpg project. # Copyright (C) 2016-present the asyncpg authors and contributors diff --git a/aiokafka/record/_crecords/legacy_records.pxd b/aiokafka/record/_crecords/legacy_records.pxd index 934eb2f..4315445 100644 --- a/aiokafka/record/_crecords/legacy_records.pxd +++ b/aiokafka/record/_crecords/legacy_records.pxd @@ -1,3 +1,4 @@ +#cython: language_level=3 from libc.stdint cimport int64_t, uint32_t diff --git a/aiokafka/record/_crecords/memory_records.pyx b/aiokafka/record/_crecords/memory_records.pyx index 076ce63..66f0b3b 100644 --- a/aiokafka/record/_crecords/memory_records.pyx +++ b/aiokafka/record/_crecords/memory_records.pyx @@ -1,3 +1,4 @@ +#cython: language_level=3 # This class takes advantage of the fact that all formats v0, v1 and v2 of # messages storage has the same byte offsets for Length and Magic fields. # Lets look closely at what leading bytes all versions have: diff --git a/aiokafka/record/util.py b/aiokafka/record/util.py index 37c9069..deb4367 100644 --- a/aiokafka/record/util.py +++ b/aiokafka/record/util.py @@ -9,7 +9,7 @@ def encode_varint_py(value, write): Arguments: value (int): Value to encode - write (function): Called per byte that needs to be writen + write (function): Called per byte that needs to be written Returns: int: Number of bytes written diff --git a/aiokafka/structs.py b/aiokafka/structs.py index 640860b..ea651c7 100644 --- a/aiokafka/structs.py +++ b/aiokafka/structs.py @@ -1,22 +1,52 @@ -import collections -from kafka.common import ( - OffsetAndMetadata, TopicPartition, BrokerMetadata, PartitionMetadata +from dataclasses import dataclass +from typing import Generic, NamedTuple, Optional, Sequence, Tuple, TypeVar + +from kafka.structs import ( + BrokerMetadata, + OffsetAndMetadata, + PartitionMetadata, + TopicPartition, ) + __all__ = [ - "OffsetAndMetadata", "TopicPartition", "RecordMetadata", "ConsumerRecord", - "BrokerMetadata", "PartitionMetadata" + "OffsetAndMetadata", + "TopicPartition", + "RecordMetadata", + "ConsumerRecord", + "BrokerMetadata", + "PartitionMetadata", ] -RecordMetadata = collections.namedtuple( - 'RecordMetadata', ['topic', 'partition', 'topic_partition', 'offset', - 'timestamp', 'timestamp_type']) -ConsumerRecord = collections.namedtuple( - "ConsumerRecord", ["topic", "partition", "offset", "timestamp", - "timestamp_type", "key", "value", "checksum", - "serialized_key_size", "serialized_value_size", - "headers"]) +class RecordMetadata(NamedTuple): + topic: str + partition: int + topic_partition: TopicPartition + offset: int + timestamp: Optional[int] # Timestamp in millis, None for older Brokers + timestamp_type: int + + +KT = TypeVar("KT") +VT = TypeVar("VT") + + +@dataclass +class ConsumerRecord(Generic[KT, VT]): + topic: str + partition: int + offset: int + timestamp: int + timestamp_type: int + key: Optional[KT] + value: Optional[VT] + checksum: int + serialized_key_size: int + serialized_value_size: int + headers: Sequence[Tuple[str, bytes]] + -OffsetAndTimestamp = collections.namedtuple( - "OffsetAndTimestamp", ["offset", "timestamp"]) +class OffsetAndTimestamp(NamedTuple): + offset: int + timestamp: Optional[int] # Only None if used with old broker version diff --git a/aiokafka/util.py b/aiokafka/util.py index 5df138f..99ab56f 100644 --- a/aiokafka/util.py +++ b/aiokafka/util.py @@ -1,9 +1,12 @@ +import asyncio import os import sys -import asyncio +from asyncio import AbstractEventLoop from distutils.version import StrictVersion +from typing import Dict, Tuple, TypeVar, Union + +from .structs import OffsetAndMetadata, TopicPartition -from .structs import TopicPartition, OffsetAndMetadata __all__ = ["ensure_future", "create_future", "PY_35"] @@ -13,22 +16,26 @@ except ImportError: exec("from asyncio import async as ensure_future") +T = TypeVar("T") -def create_future(loop): + +def create_future(loop: AbstractEventLoop) -> "asyncio.Future[T]": try: return loop.create_future() except AttributeError: return asyncio.Future(loop=loop) -def parse_kafka_version(api_version): +def parse_kafka_version(api_version: str) -> Tuple[int, int, int]: version = StrictVersion(api_version).version if not (0, 9) <= version < (3, 0): raise ValueError(api_version) return version -def commit_structure_validate(offsets): +def commit_structure_validate( + offsets: Dict[TopicPartition, Union[int, Tuple[int, str], OffsetAndMetadata]] +) -> Dict[TopicPartition, OffsetAndMetadata]: # validate `offsets` structure if not offsets or not isinstance(offsets, dict): raise ValueError(offsets) @@ -53,10 +60,20 @@ def commit_structure_validate(offsets): return formatted_offsets +def get_running_loop() -> asyncio.AbstractEventLoop: + loop = asyncio.get_event_loop() + if not loop.is_running(): + raise RuntimeError( + "The object should be created within an async function or " + "provide loop directly." + ) + return loop + + PY_35 = sys.version_info >= (3, 5) PY_352 = sys.version_info >= (3, 5, 2) PY_36 = sys.version_info >= (3, 6) -NO_EXTENSIONS = bool(os.environ.get('AIOKAFKA_NO_EXTENSIONS')) +NO_EXTENSIONS = bool(os.environ.get("AIOKAFKA_NO_EXTENSIONS")) INTEGER_MAX_VALUE = 2 ** 31 - 1 -INTEGER_MIN_VALUE = - 2 ** 31 +INTEGER_MIN_VALUE = -(2 ** 31) diff --git a/benchmark_results_v0.4.0_vs_v0.3.1.rst b/benchmark_results_v0.4.0_vs_v0.3.1.rst deleted file mode 100644 index 4ac50dc..0000000 --- a/benchmark_results_v0.4.0_vs_v0.3.1.rst +++ /dev/null @@ -1,45 +0,0 @@ -Benchmarks measured using `benchmark/simple_consume_bench.py` and -`simple_produce_bench.py` scripts. - -Kafka environment: - -* Broker 0.10.2.1 -* Scala 2.11 - -Machine: - -* OS Mac OSX 10.13 (17A405) -* 2.7 GHz Intel Core i7 -* 8 GB 1600 MHz DDR3 - - -0.3.1 (vanila asyncio): -Total consumed 1001600 messages in 24.43 second(s). Avg 40998.0 m/s. -Total produced 500000 messages in 24.38 second(s). Avg 20505.0 m/s - -0.3.1 (with uvloop): -Total consumed 1001600 messages in 24.46 second(s). Avg 40953.0 m/s. -Total produced 500000 messages in 23.36 second(s). Avg 21400.0 m/s - - -master Pure Python (vanila asyncio): -Total consumed 1001600 messages in 18.93 second(s). Avg 52918.0 m/s. -Total produced 500000 messages in 19.75 second(s). Avg 25311.0 m/s - -master Pure Python (with uvloop): -Total consumed 1001600 messages in 18.19 second(s). Avg 55057.0 m/s. -Total produced 500000 messages in 15.79 second(s). Avg 31675.0 m/s - - -master C ext (vanila asyncio): -Total consumed 1001600 messages in 4.99 second(s). Avg 200829.0 m/s. -Total produced 500000 messages in 13.48 second(s). Avg 37103.0 m/s - -master C ext (with uvloop): -Total consumed 1001600 messages in 4.72 second(s). Avg 212248.0 m/s. -Total produced 500000 messages in 10.24 second(s). Avg 48828.0 m/s - - -We see an overal boost in speed. With C extension it's ~4.9X speedup on read -and ~1.8X on write. Without C extension we still have a good 29% boost on read -and 23% on write. diff --git a/docker/Dockerfile b/docker/Dockerfile index 63cac84..8558f40 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,8 +1,15 @@ # Kafka and Zookeeper (borrowed from spotify/kafka) -FROM java:openjdk-8-jre +FROM openjdk:8-buster ENV DEBIAN_FRONTEND noninteractive + +# Zookeeper and other needed things +RUN apt-get update && \ + apt-get install -y zookeeper wget supervisor dnsutils krb5-admin-server krb5-kdc && \ + rm -rf /var/lib/apt/lists/* && \ + apt-get clean + ARG SCALA_VERSION=2.11 ARG KAFKA_VERSION=0.9.0.1 ENV SCALA_VERSION $SCALA_VERSION @@ -21,6 +28,7 @@ RUN apt-get update && \ ADD scripts/start-kafka.sh /usr/bin/start-kafka.sh ADD scripts/kafka_server_jaas.conf /etc/kafka/kafka_server_jaas.conf ADD scripts/kafka_server_gssapi_jaas.conf /etc/kafka/kafka_server_gssapi_jaas.conf +ADD scripts/kafka_server_jaas_no_scram.conf /etc/kafka/kafka_server_jaas_no_scram.conf ADD scripts/krb5.conf /etc/krb5.conf # Supervisor config diff --git a/docker/Makefile b/docker/Makefile index 433233a..4163f79 100644 --- a/docker/Makefile +++ b/docker/Makefile @@ -1,5 +1,5 @@ -SCALA_VERSION?=2.11 -KAFKA_VERSION?=0.9.0.1 +SCALA_VERSION?=2.12 +KAFKA_VERSION?=1.1.1 IMAGE_NAME?=aiolibs/kafka IMAGE_TAG=$(IMAGE_NAME):$(SCALA_VERSION)_$(KAFKA_VERSION) diff --git a/docker/config.yml b/docker/config.yml index a22b982..4d742f6 100644 --- a/docker/config.yml +++ b/docker/config.yml @@ -13,5 +13,14 @@ versions: kafka: "1.1.1" scala: "2.12" - - kafka: "2.1.0" + kafka: "2.1.1" + scala: "2.12" + - + kafka: "2.2.2" + scala: "2.12" + - + kafka: "2.3.1" + scala: "2.12" + - + kafka: "2.4.0" scala: "2.12" diff --git a/docker/scripts/kafka_server_jaas.conf b/docker/scripts/kafka_server_jaas.conf index d754356..efc6720 100644 --- a/docker/scripts/kafka_server_jaas.conf +++ b/docker/scripts/kafka_server_jaas.conf @@ -11,4 +11,8 @@ KafkaServer { storeKey=true keyTab="/server.keytab" principal="kafka/localhost@AIOLIBS"; + + org.apache.kafka.common.security.scram.ScramLoginModule required + username="test" + password="test"; }; diff --git a/docker/scripts/kafka_server_jaas_no_scram.conf b/docker/scripts/kafka_server_jaas_no_scram.conf new file mode 100644 index 0000000..d754356 --- /dev/null +++ b/docker/scripts/kafka_server_jaas_no_scram.conf @@ -0,0 +1,14 @@ +KafkaServer { + org.apache.kafka.common.security.plain.PlainLoginModule required + username="admin" + password="admin" + user_admin="admin" + user_test="test" + user_test2="test2"; + + com.sun.security.auth.module.Krb5LoginModule required + useKeyTab=true + storeKey=true + keyTab="/server.keytab" + principal="kafka/localhost@AIOLIBS"; +}; diff --git a/docs/api.rst b/docs/api.rst index 8f64baf..5f90aed 100644 --- a/docs/api.rst +++ b/docs/api.rst @@ -77,7 +77,7 @@ GSSAPI sasl methods. Be sure to install ``gssapi`` python module to use GSSAPI. Please consult the `official documentation `_ for setup instructions on Broker side. Client configuration is pretty much the same as JAVA's, consult the ``sasl_*`` options in Consumer and Producer API -Referense for more details. +Reference for more details. Error handling @@ -96,7 +96,7 @@ Exception handling example: try: send_future = await producer.send('foobar', b'test data') response = await send_future # wait until message is produced - except KafkaTimeourError: + except KafkaTimeoutError: print("produce timeout... maybe we want to resend data again?") except KafkaError as err: print("some kafka error on produce: {}".format(err)) diff --git a/docs/consumer.rst b/docs/consumer.rst index 2bd2306..6cc1bdb 100644 --- a/docs/consumer.rst +++ b/docs/consumer.rst @@ -94,7 +94,7 @@ For most simple use cases auto committing is probably the best choice:: enable_auto_commit=True, # Is True by default anyway auto_commit_interval_ms=1000, # Autocommit every second auto_offset_reset="earliest", # If committed offset not found, start - # from beginnig + # from beginning ) await consumer.start() @@ -112,7 +112,7 @@ batch operations you should use *manual commit*:: group_id="my_group", # Consumer must be in a group to commit enable_auto_commit=False, # Will disable autocommit auto_offset_reset="earliest", # If committed offset not found, start - # from beginnig + # from beginning ) await consumer.start() @@ -382,7 +382,7 @@ new topic matching a *subscribed regex* is created. For example:: consumer = AIOKafkaConsumer( loop=loop, bootstrap_servers='localhost:9092', - metadata_max_age_ms=30000, # This controlls the polling interval + metadata_max_age_ms=30000, # This controls the polling interval ) await consumer.start() consumer.subscribe(pattern="^MyGreatTopic-.*$") @@ -500,7 +500,7 @@ A `read_committed` consumer will only read up to the LSO and filter out any transactional messages which have been aborted. The LSO also affects the behavior of ``seek_to_end(*partitions)`` and ``end_offsets(partitions)`` for ``read_committed`` consumers, details of which are in each method's -documentation. Finally, ``last_stable_offset()`` API was added similary to +documentation. Finally, ``last_stable_offset()`` API was added similarly to ``highwater()`` API to query the lSO on a currently assigned transaction:: async for msg in consumer: # Only read committed tranasctions diff --git a/docs/examples/custom_partitioner.rst b/docs/examples/custom_partitioner.rst index 803a8be..2157721 100644 --- a/docs/examples/custom_partitioner.rst +++ b/docs/examples/custom_partitioner.rst @@ -3,7 +3,7 @@ Custom partitioner ================== If you consider using partitions as a logical entity, rather then purely for -load-balancing, you may need to have more controll over routing messages to +load-balancing, you may need to have more control over routing messages to partitions. By default hashing algorithms are used. diff --git a/docs/examples/local_state_consumer.rst b/docs/examples/local_state_consumer.rst index 4ef4976..6852493 100644 --- a/docs/examples/local_state_consumer.rst +++ b/docs/examples/local_state_consumer.rst @@ -169,7 +169,7 @@ There are several points of interest in this example: * We implement ``RebalanceListener`` to dump all counts and offsets before rebalances. After rebalances we load them from the same files. It's a kind of cache to avoid re-reading all messages. - * We controll offset reset policy manualy by setting + * We control offset reset policy manually by setting ``auto_offset_reset="none"``. We need it to catch OffsetOutOfRangeError so we can clear cache if files were old and such offsets don't exist anymore in Kafka. diff --git a/docs/examples/manual_commit.rst b/docs/examples/manual_commit.rst index 116552a..68f89b9 100644 --- a/docs/examples/manual_commit.rst +++ b/docs/examples/manual_commit.rst @@ -13,7 +13,7 @@ More on message delivery: https://kafka.apache.org/documentation.html#semantics .. note:: After Kafka Broker version 0.11 and after `aiokafka==0.5.0` it is possible - to use Transactional Producer to achive *exactly once* delivery semantics. + to use Transactional Producer to achieve *exactly once* delivery semantics. See :ref:`Tranactional Producer ` section. diff --git a/docs/examples/python35_examples.rst b/docs/examples/python35_examples.rst index 299f516..37fa396 100644 --- a/docs/examples/python35_examples.rst +++ b/docs/examples/python35_examples.rst @@ -2,7 +2,7 @@ Python 3.5 async usage ====================== -``aiokafka`` supports Python3.5 ``async def`` syntax and adds some sugger using +``aiokafka`` supports Python3.5 ``async def`` syntax and adds some sugar using this syntax. diff --git a/docs/examples/transaction_example.rst b/docs/examples/transaction_example.rst index c0a247e..36b2729 100644 --- a/docs/examples/transaction_example.rst +++ b/docs/examples/transaction_example.rst @@ -1,7 +1,7 @@ .. _transaction-example: -Tranactional Consume-Process-Produce ------------------------------------- +Transactional Consume-Process-Produce +------------------------------------- If you have a pattern where you want to consume from one topic, process data and produce to a different one, you would really like to do it with using diff --git a/docs/kafka-python_difference.rst b/docs/kafka-python_difference.rst index 2bfa3b2..2dd39a7 100644 --- a/docs/kafka-python_difference.rst +++ b/docs/kafka-python_difference.rst @@ -27,7 +27,7 @@ While ``kafka-python`` has a lot of great features it is made to be used in a very powerful with the ability to use multiple cores. The API itself just can't be adopted to be used in an asynchronous way (even -thou the library does asyncronous IO using `selectors`). It has too much +though the library does asynchronous IO using `selectors`). It has too much blocking behavior including `blocking` socket usage, threading synchronization, etc. Examples would be: @@ -37,7 +37,7 @@ etc. Examples would be: All those can't be changed to use `Future` API seamlessly. So to get a normal, non-blocking interface based on Future's and coroutines a new library needed to -be done. +be written. API differences and rationale @@ -59,7 +59,7 @@ not only message fetching, but also: * Does autocommit This will never be a case where you own the IO loop, at least not with socket -polling. To avoid misunderstandings as to why does those methods behave in a +polling. To avoid misunderstandings as to why do those methods behave in a different way :ref:`aiokafka-consumer` exposes this interface under the name ``getmany()`` with some other differences described below. @@ -78,9 +78,9 @@ heartbeats to Coordinator as long as the *event loop* is running. This behaviour is very similar to Java client, with the exception of no heartbeats on long CPU bound methods. -But ``aiokafka`` also performs rebalance in the same background Task. This +But ``aiokafka`` also performs group rebalancing in the same background Task. This means, that the processing time between ``getmany`` calls actually does not -effect rebalancing. ``KIP-62`` proposed to provide ``max.poll.interval.ms`` as +affect rebalancing. ``KIP-62`` proposed to provide ``max.poll.interval.ms`` as the configuration for both *rebalance timeout* and *consumer processing timeout*. In ``aiokafka`` it does not make much sense, as those 2 are not related, so we added both configurations (``rebalance_timeout_ms`` and @@ -89,7 +89,7 @@ It is quite critical to provide :ref:`ConsumerRebalanceListener ` if you need to control rebalance start and end moments. In that case set the ``rebalance_timeout_ms`` to the maximum time your application can spend -waiting in the callback. If your callback waits for last ``getmany`` result to +waiting in the callback. If your callback waits for the last ``getmany`` result to be processed, it is safe to set this value to ``max_poll_interval_ms``, same as in Java client. @@ -125,8 +125,8 @@ the time of processing of data in all topics. Which is why ``aiokafka`` tries to do prefetches **per partition**. For example, if we processed all data pending for a partition in *iterator* interface, ``aiokafka`` will *try* to prefetch new data right away. The same -interface can be somehow build on top of ``kafka-python``'s *pause* API, but -you will require `a lot of code`_. +interface could be built on top of ``kafka-python``'s *pause* API, but +would require `a lot of code`_. .. note:: diff --git a/docs/producer.rst b/docs/producer.rst index 7acfa55..009e10e 100644 --- a/docs/producer.rst +++ b/docs/producer.rst @@ -93,10 +93,10 @@ The least safe is ``ack=0`` when there will be no acknowledgement from Broker, meaning client will never retry, as it will never see any errors. -Indempotent produce +Idempotent produce ------------------- -As of Kafka 0.11 the Brokers support indempotent producing, that will prevent +As of Kafka 0.11 the Brokers support idempotent producing, that will prevent the Producer from creating duplicates on retries. *aiokafka* supports this mode by passing the parameter ``enable_idempotence=True`` to ``AIOKafkaProducer``:: @@ -197,7 +197,7 @@ containing fields: ``send()`` 0 will be returned (CreateTime). If Broker set it's own timestamp 1 will be returned (LogAppendTime). -.. note:: In a very rare case, when Indempotent or Transactional producer is +.. note:: In a very rare case, when Idempotent or Transactional producer is used and there was a long wait between batch initial send and a retry, producer may return ``offset == -1`` and ``timestamp == -1`` as Broker already expired the metadata for this produce sequence and only knows that @@ -218,7 +218,7 @@ Users who need precise control over batch flow may use the lower-level metadata = batch.append(value=b"msg %d" % i, key=None, timestamp=None) assert metadata is not None - # Optionaly close the batch to further submission. If left open, the batch + # Optionally close the batch to further submission. If left open, the batch # may be appended to by producer.send(). batch.close() diff --git a/examples/ssl_consume_produce.py b/examples/ssl_consume_produce.py index d92dd79..ae71dbd 100644 --- a/examples/ssl_consume_produce.py +++ b/examples/ssl_consume_produce.py @@ -1,7 +1,7 @@ import asyncio from aiokafka import AIOKafkaProducer, AIOKafkaConsumer from aiokafka.helpers import create_ssl_context -from kafka.common import TopicPartition +from kafka.structs import TopicPartition context = create_ssl_context( cafile="./ca-cert", # CA used to sign certificate. diff --git a/pytest.ini b/pytest.ini index c1bd68c..6171b33 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,4 +1,6 @@ [pytest] filterwarnings = - default - ignore:.*docker.sock.*:ResourceWarning \ No newline at end of file + error + ignore:.*docker.sock.*:ResourceWarning +markers = + ssl: Tests that require SSL certificates to run diff --git a/requirements-ci.txt b/requirements-ci.txt index 1efb8df..a640218 100644 --- a/requirements-ci.txt +++ b/requirements-ci.txt @@ -1,11 +1,17 @@ -flake8==3.7.7 -pytest==5.0.0 -pytest-cov==2.7.1 -docker==4.0.2 -lz4==2.1.10 -xxhash==1.3.0 +-r requirements-cython.txt +flake8==3.8.3 +black==19.10b0 +mypy==0.782 +isort==4.3.21 +pytest==5.4.3 +pytest-cov==2.10.0 +pytest-asyncio==0.12.0 +docker==4.2.1 +lz4==3.1.0 +xxhash==1.4.3 python-snappy==0.5.4 -Cython==0.29.11 -docutils==0.14 -Pygments==2.4.2 -gssapi==1.5.1 +docutils==0.16 +Pygments==2.6.1 +gssapi==1.6.2 # pyup: <= 1.6.2 # For some reason 1.6.5 does not install with py35 +dataclasses==0.5; python_version<"3.7" +async_generator==1.10; python_version<"3.7" diff --git a/requirements-cython.txt b/requirements-cython.txt new file mode 100644 index 0000000..07a74f9 --- /dev/null +++ b/requirements-cython.txt @@ -0,0 +1 @@ +Cython==0.29.20 diff --git a/requirements-dev.txt b/requirements-dev.txt index ba757cd..9827690 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,5 +1,5 @@ -r requirements-ci.txt -r requirements-docs.txt -diff-cover==2.3.0 +diff-cover==3.0.1 setuptools>=34.4.0 \ No newline at end of file diff --git a/requirements-docs.txt b/requirements-docs.txt index e0a555c..5635a0d 100644 --- a/requirements-docs.txt +++ b/requirements-docs.txt @@ -1,5 +1,13 @@ +<<<<<<< HEAD Sphinx==2.1.2 sphinxcontrib-asyncio==0.2.0 sphinxcontrib-spelling==4.3.0 alabaster==0.7.12 Cython==0.29.11 +======= +-r requirements-cython.txt +Sphinx==3.1.1 +sphinxcontrib-asyncio==0.2.0 +sphinxcontrib-spelling==5.1.1 +alabaster==0.7.12 +>>>>>>> aio-libs/master diff --git a/requirements-win-test.txt b/requirements-win-test.txt index d81e0ae..ff8d92d 100644 --- a/requirements-win-test.txt +++ b/requirements-win-test.txt @@ -1,10 +1,14 @@ -flake8==3.7.7 -pytest==5.0.0 -pytest-cov==2.7.1 -pytest-catchlog==1.2.2 -docker-py==1.10.6 -lz4==2.1.10 -xxhash==1.3.0 +-r requirements-cython.txt +flake8==3.8.3 +black==19.10b0 +mypy==0.782 +isort==4.3.21 +pytest==5.4.3 +pytest-cov==2.10.0 +pytest-asyncio==0.12.0 +docker==4.2.1 +lz4==3.1.0 +xxhash==1.4.3 python-snappy==0.5.4 -# pypiwin32==219 -cython==0.29.11 +dataclasses==0.5; python_version<"3.7" +async_generator==1.10; python_version<"3.7" diff --git a/setup.cfg b/setup.cfg index 94956cb..d57520a 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,2 +1,51 @@ [wheel] universal = 0 + +[flake8] +max-line-length = 88 +exclude = + .git + venv + __pycache__ + .tox + +[isort] +line_length=88 +include_trailing_comma=True +multi_line_output=3 +force_grid_wrap=0 +combine_as_imports=True +lines_after_imports=2 +known_standard_library=dataclasses +known_first_party=aiokafka,kafka +known_third_party=pytest + +[mypy] +check_untyped_defs = True +disallow_any_generics = True +disallow_untyped_defs = True +follow_imports = silent +strict_optional = True +warn_redundant_casts = True +warn_unused_ignores = True +warn_unused_configs = True +mypy_path=stubs + +[mypy-pytest] +ignore_missing_imports = True + +[mypy-Cython] +ignore_missing_imports = True + +[mypy-kafka.*] +warn_no_return = False +disallow_untyped_defs = False + +[mypy-kafka.vendor.*] +ignore_missing_imports = True + +[mypy-aiokafka.*] +ignore_missing_imports = True + +[mypy-aiokafka.util] +ignore_missing_imports = False diff --git a/setup.py b/setup.py index e250d2a..18bcad8 100644 --- a/setup.py +++ b/setup.py @@ -1,68 +1,68 @@ import os -import re import platform +import re import sys +from distutils.command.bdist_rpm import bdist_rpm as _bdist_rpm from distutils.command.build_ext import build_ext -from distutils.errors import (CCompilerError, DistutilsExecError, - DistutilsPlatformError) +from distutils.errors import CCompilerError, DistutilsExecError, DistutilsPlatformError + +from setuptools import Extension, setup -from setuptools import setup, Extension # Those are needed to build _hton for windows -CFLAGS = ['-O2'] +CFLAGS = ["-O2"] LDFLAGS = [] LIBRARIES = [] -if platform.uname().system == 'Windows': - LDFLAGS.append('ws2_32.lib') +if platform.uname().system == "Windows": + LDFLAGS.append("ws2_32.lib") else: - CFLAGS.extend(['-Wall', '-Wsign-compare', '-Wconversion']) - LIBRARIES.append('z') + CFLAGS.extend(["-Wall", "-Wsign-compare", "-Wconversion"]) + LIBRARIES.append("z") # The extension part is copied from aiohttp's setup.py -if os.environ.get('NO_CYTHON'): +try: + from Cython.Build import cythonize + + USE_CYTHON = True +except ImportError: USE_CYTHON = False -else: - try: - from Cython.Build import cythonize - USE_CYTHON = True - except ImportError: - USE_CYTHON = False -ext = '.pyx' if USE_CYTHON else '.c' +ext = ".pyx" if USE_CYTHON else ".c" extensions = [ Extension( - 'aiokafka.record._crecords.legacy_records', - ['aiokafka/record/_crecords/legacy_records' + ext], + "aiokafka.record._crecords.legacy_records", + ["aiokafka/record/_crecords/legacy_records" + ext], libraries=LIBRARIES, extra_compile_args=CFLAGS, - extra_link_args=LDFLAGS + extra_link_args=LDFLAGS, ), Extension( - 'aiokafka.record._crecords.default_records', - ['aiokafka/record/_crecords/crc32c.c', - 'aiokafka/record/_crecords/default_records' + ext], + "aiokafka.record._crecords.default_records", + [ + "aiokafka/record/_crecords/crc32c.c", + "aiokafka/record/_crecords/default_records" + ext, + ], libraries=LIBRARIES, extra_compile_args=CFLAGS, - extra_link_args=LDFLAGS + extra_link_args=LDFLAGS, ), Extension( - 'aiokafka.record._crecords.memory_records', - ['aiokafka/record/_crecords/memory_records' + ext], + "aiokafka.record._crecords.memory_records", + ["aiokafka/record/_crecords/memory_records" + ext], libraries=LIBRARIES, extra_compile_args=CFLAGS, - extra_link_args=LDFLAGS + extra_link_args=LDFLAGS, ), Extension( - 'aiokafka.record._crecords.cutil', - ['aiokafka/record/_crecords/crc32c.c', - 'aiokafka/record/_crecords/cutil' + ext], + "aiokafka.record._crecords.cutil", + ["aiokafka/record/_crecords/crc32c.c", "aiokafka/record/_crecords/cutil" + ext], libraries=LIBRARIES, extra_compile_args=CFLAGS, - extra_link_args=LDFLAGS + extra_link_args=LDFLAGS, ), ] @@ -71,6 +71,13 @@ extensions = cythonize(extensions) +class bdist_rpm(_bdist_rpm): + def _make_spec_file(self): + orig = super()._make_spec_file() + orig.insert(0, "%define debug_package %{nil}") + return orig + + class BuildFailed(Exception): pass @@ -87,75 +94,78 @@ def run(self): def build_extension(self, ext): try: build_ext.build_extension(self, ext) - except (CCompilerError, DistutilsExecError, - DistutilsPlatformError, ValueError): + except (CCompilerError, DistutilsExecError, DistutilsPlatformError, ValueError): raise BuildFailed() -install_requires = ['kafka-python>=1.4.6,<1.5'] +install_requires = ["kafka-python>=2.0.0"] PY_VER = sys.version_info if PY_VER >= (3, 5): pass elif PY_VER >= (3, 4): - install_requires.append('typing') + install_requires.append("typing") else: - raise RuntimeError("aiokafka doesn't suppport Python earlier than 3.4") + raise RuntimeError("aiokafka doesn't support Python earlier than 3.4") def read(f): return open(os.path.join(os.path.dirname(__file__), f)).read().strip() -extras_require = {'snappy': ['python-snappy>=0.5'], } + +extras_require = { + "snappy": ["python-snappy>=0.5"], +} def read_version(): regexp = re.compile(r"^__version__\W*=\W*'([\d.abrcdev]+)'") - init_py = os.path.join(os.path.dirname(__file__), - 'aiokafka', '__init__.py') + init_py = os.path.join(os.path.dirname(__file__), "aiokafka", "__init__.py") with open(init_py) as f: for line in f: match = regexp.match(line) if match is not None: return match.group(1) else: - raise RuntimeError('Cannot find version in aiokafka/__init__.py') + raise RuntimeError("Cannot find version in aiokafka/__init__.py") + classifiers = [ - 'License :: OSI Approved :: Apache Software License', - 'Intended Audience :: Developers', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.5', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Operating System :: OS Independent', - 'Topic :: System :: Networking', - 'Topic :: System :: Distributed Computing', - 'Framework :: AsyncIO', - 'Development Status :: 4 - Beta', + "License :: OSI Approved :: Apache Software License", + "Intended Audience :: Developers", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.5", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Operating System :: OS Independent", + "Topic :: System :: Networking", + "Topic :: System :: Distributed Computing", + "Framework :: AsyncIO", + "Development Status :: 4 - Beta", ] args = dict( - name='robinhood-aiokafka', + name="robinhood-aiokafka", version=read_version(), - description=('Kafka integration with asyncio.'), - long_description=('This is a temporary fork of aiokafka ' - 'maintained by Robinhood'), + description=("Kafka integration with asyncio."), + long_description=( + "This is a temporary fork of aiokafka " "maintained by Robinhood" + ), classifiers=classifiers, - platforms=['POSIX'], - author='Andrew Svetlov', - author_email='andrew.svetlov@gmail.com', - url='http://aiokafka.readthedocs.org', - download_url='https://pypi.python.org/pypi/aiokafka', - license='Apache 2', - packages=['aiokafka'], + platforms=["POSIX"], + author="Andrew Svetlov", + author_email="andrew.svetlov@gmail.com", + url="http://aiokafka.readthedocs.org", + download_url="https://pypi.python.org/pypi/aiokafka", + license="Apache 2", + packages=["aiokafka"], install_requires=install_requires, extras_require=extras_require, include_package_data=True, ext_modules=extensions, - cmdclass=dict(build_ext=ve_build_ext) + cmdclass=dict(build_ext=ve_build_ext, bdist_rpm=bdist_rpm), ) try: @@ -164,6 +174,6 @@ def read_version(): print("************************************************************") print("Cannot compile C accelerator module, use pure python version") print("************************************************************") - del args['ext_modules'] - del args['cmdclass'] + del args["ext_modules"] + del args["cmdclass"] setup(**args) diff --git a/stubs/kafka/__init__.pyi b/stubs/kafka/__init__.pyi new file mode 100644 index 0000000..24e0b2d --- /dev/null +++ b/stubs/kafka/__init__.pyi @@ -0,0 +1,11 @@ +import logging +from kafka.admin import KafkaAdminClient as KafkaAdminClient +from kafka.client_async import KafkaClient as KafkaClient +from kafka.conn import BrokerConnection as BrokerConnection +from kafka.consumer import KafkaConsumer as KafkaConsumer +from kafka.consumer.subscription_state import ConsumerRebalanceListener as ConsumerRebalanceListener +from kafka.producer import KafkaProducer as KafkaProducer +from typing import Any + +class NullHandler(logging.Handler): + def emit(self, record: Any) -> None: ... diff --git a/stubs/kafka/admin/__init__.pyi b/stubs/kafka/admin/__init__.pyi new file mode 100644 index 0000000..8c1ee0d --- /dev/null +++ b/stubs/kafka/admin/__init__.pyi @@ -0,0 +1,5 @@ +from kafka.admin.acl_resource import ACL as ACL, ACLFilter as ACLFilter, ACLOperation as ACLOperation, ACLPermissionType as ACLPermissionType, ACLResourcePatternType as ACLResourcePatternType, ResourcePattern as ResourcePattern, ResourcePatternFilter as ResourcePatternFilter, ResourceType as ResourceType +from kafka.admin.client import KafkaAdminClient as KafkaAdminClient +from kafka.admin.config_resource import ConfigResource as ConfigResource, ConfigResourceType as ConfigResourceType +from kafka.admin.new_partitions import NewPartitions as NewPartitions +from kafka.admin.new_topic import NewTopic as NewTopic diff --git a/stubs/kafka/admin/acl_resource.pyi b/stubs/kafka/admin/acl_resource.pyi new file mode 100644 index 0000000..c5f7827 --- /dev/null +++ b/stubs/kafka/admin/acl_resource.pyi @@ -0,0 +1,65 @@ +from kafka.errors import IllegalArgumentError as IllegalArgumentError +from kafka.vendor.enum34 import IntEnum as IntEnum +from typing import Any + +class ResourceType(IntEnum): + UNKNOWN: Any = ... + ANY: Any = ... + CLUSTER: Any = ... + DELEGATION_TOKEN: Any = ... + GROUP: Any = ... + TOPIC: Any = ... + TRANSACTIONAL_ID: int = ... + +class ACLOperation(IntEnum): + ANY: Any = ... + ALL: Any = ... + READ: Any = ... + WRITE: Any = ... + CREATE: Any = ... + DELETE: Any = ... + ALTER: Any = ... + DESCRIBE: Any = ... + CLUSTER_ACTION: Any = ... + DESCRIBE_CONFIGS: Any = ... + ALTER_CONFIGS: Any = ... + IDEMPOTENT_WRITE: int = ... + +class ACLPermissionType(IntEnum): + ANY: Any = ... + DENY: Any = ... + ALLOW: int = ... + +class ACLResourcePatternType(IntEnum): + ANY: Any = ... + MATCH: Any = ... + LITERAL: Any = ... + PREFIXED: int = ... + +class ACLFilter: + principal: Any = ... + host: Any = ... + operation: Any = ... + permission_type: Any = ... + resource_pattern: Any = ... + def __init__(self, principal: Any, host: Any, operation: Any, permission_type: Any, resource_pattern: Any) -> None: ... + def validate(self) -> None: ... + def __eq__(self, other: Any) -> Any: ... + def __hash__(self) -> Any: ... + +class ACL(ACLFilter): + def __init__(self, principal: Any, host: Any, operation: Any, permission_type: Any, resource_pattern: Any) -> None: ... + def validate(self) -> None: ... + +class ResourcePatternFilter: + resource_type: Any = ... + resource_name: Any = ... + pattern_type: Any = ... + def __init__(self, resource_type: Any, resource_name: Any, pattern_type: Any) -> None: ... + def validate(self) -> None: ... + def __eq__(self, other: Any) -> Any: ... + def __hash__(self) -> Any: ... + +class ResourcePattern(ResourcePatternFilter): + def __init__(self, resource_type: Any, resource_name: Any, pattern_type: Any = ...) -> None: ... + def validate(self) -> None: ... diff --git a/stubs/kafka/admin/client.pyi b/stubs/kafka/admin/client.pyi new file mode 100644 index 0000000..75a4641 --- /dev/null +++ b/stubs/kafka/admin/client.pyi @@ -0,0 +1,32 @@ +from . import ConfigResourceType as ConfigResourceType +from kafka.admin.acl_resource import ACL as ACL, ACLFilter as ACLFilter, ACLOperation as ACLOperation, ACLPermissionType as ACLPermissionType, ACLResourcePatternType as ACLResourcePatternType, ResourcePattern as ResourcePattern, ResourceType as ResourceType +from kafka.client_async import KafkaClient as KafkaClient +from kafka.errors import IllegalArgumentError as IllegalArgumentError, IncompatibleBrokerVersion as IncompatibleBrokerVersion, KafkaConfigurationError as KafkaConfigurationError, NotControllerError as NotControllerError, UnrecognizedBrokerVersion as UnrecognizedBrokerVersion +from kafka.metrics import MetricConfig as MetricConfig, Metrics as Metrics +from kafka.protocol.admin import AlterConfigsRequest as AlterConfigsRequest, CreateAclsRequest as CreateAclsRequest, CreatePartitionsRequest as CreatePartitionsRequest, CreateTopicsRequest as CreateTopicsRequest, DeleteAclsRequest as DeleteAclsRequest, DeleteTopicsRequest as DeleteTopicsRequest, DescribeAclsRequest as DescribeAclsRequest, DescribeConfigsRequest as DescribeConfigsRequest, DescribeGroupsRequest as DescribeGroupsRequest, ListGroupsRequest as ListGroupsRequest +from kafka.protocol.commit import GroupCoordinatorRequest as GroupCoordinatorRequest, OffsetFetchRequest as OffsetFetchRequest +from kafka.protocol.metadata import MetadataRequest as MetadataRequest +from kafka.structs import OffsetAndMetadata as OffsetAndMetadata, TopicPartition as TopicPartition +from typing import Any, Optional + +log: Any + +class KafkaAdminClient: + DEFAULT_CONFIG: Any = ... + config: Any = ... + def __init__(self, **configs: Any) -> None: ... + def close(self) -> None: ... + def create_topics(self, new_topics: Any, timeout_ms: Optional[Any] = ..., validate_only: bool = ...): ... + def delete_topics(self, topics: Any, timeout_ms: Optional[Any] = ...): ... + def list_topics(self): ... + def describe_topics(self, topics: Optional[Any] = ...): ... + def describe_cluster(self): ... + def describe_acls(self, acl_filter: Any): ... + def create_acls(self, acls: Any): ... + def delete_acls(self, acl_filters: Any): ... + def describe_configs(self, config_resources: Any, include_synonyms: bool = ...): ... + def alter_configs(self, config_resources: Any): ... + def create_partitions(self, topic_partitions: Any, timeout_ms: Optional[Any] = ..., validate_only: bool = ...): ... + def describe_consumer_groups(self, group_ids: Any, group_coordinator_id: Optional[Any] = ..., include_authorized_operations: bool = ...): ... + def list_consumer_groups(self, broker_ids: Optional[Any] = ...): ... + def list_consumer_group_offsets(self, group_id: Any, group_coordinator_id: Optional[Any] = ..., partitions: Optional[Any] = ...): ... diff --git a/stubs/kafka/admin/config_resource.pyi b/stubs/kafka/admin/config_resource.pyi new file mode 100644 index 0000000..509f83c --- /dev/null +++ b/stubs/kafka/admin/config_resource.pyi @@ -0,0 +1,12 @@ +from kafka.vendor.enum34 import IntEnum as IntEnum +from typing import Any, Optional + +class ConfigResourceType(IntEnum): + BROKER: Any = ... + TOPIC: int = ... + +class ConfigResource: + resource_type: Any = ... + name: Any = ... + configs: Any = ... + def __init__(self, resource_type: Any, name: Any, configs: Optional[Any] = ...) -> None: ... diff --git a/stubs/kafka/admin/new_partitions.pyi b/stubs/kafka/admin/new_partitions.pyi new file mode 100644 index 0000000..aeb31ad --- /dev/null +++ b/stubs/kafka/admin/new_partitions.pyi @@ -0,0 +1,6 @@ +from typing import Any, Optional + +class NewPartitions: + total_count: Any = ... + new_assignments: Any = ... + def __init__(self, total_count: Any, new_assignments: Optional[Any] = ...) -> None: ... diff --git a/stubs/kafka/admin/new_topic.pyi b/stubs/kafka/admin/new_topic.pyi new file mode 100644 index 0000000..e9458c0 --- /dev/null +++ b/stubs/kafka/admin/new_topic.pyi @@ -0,0 +1,10 @@ +from kafka.errors import IllegalArgumentError as IllegalArgumentError +from typing import Any, Optional + +class NewTopic: + name: Any = ... + num_partitions: Any = ... + replication_factor: Any = ... + replica_assignments: Any = ... + topic_configs: Any = ... + def __init__(self, name: Any, num_partitions: Any, replication_factor: Any, replica_assignments: Optional[Any] = ..., topic_configs: Optional[Any] = ...) -> None: ... diff --git a/stubs/kafka/client_async.pyi b/stubs/kafka/client_async.pyi new file mode 100644 index 0000000..3875c93 --- /dev/null +++ b/stubs/kafka/client_async.pyi @@ -0,0 +1,56 @@ +from kafka.cluster import ClusterMetadata as ClusterMetadata +from kafka.conn import BrokerConnection as BrokerConnection, ConnectionStates as ConnectionStates, collect_hosts as collect_hosts, get_ip_port_afi as get_ip_port_afi +from kafka.future import Future as Future +from kafka.metrics import AnonMeasurable as AnonMeasurable +from kafka.metrics.stats import Avg as Avg, Count as Count, Rate as Rate +from kafka.metrics.stats.rate import TimeUnit as TimeUnit +from kafka.protocol.metadata import MetadataRequest as MetadataRequest +from kafka.vendor import socketpair as socketpair +from typing import Any, Optional + +log: Any + +class KafkaClient: + DEFAULT_CONFIG: Any = ... + config: Any = ... + cluster: Any = ... + def __init__(self, **configs: Any) -> None: ... + def maybe_connect(self, node_id: Any, wakeup: bool = ...): ... + def ready(self, node_id: Any, metadata_priority: bool = ...): ... + def connected(self, node_id: Any): ... + def close(self, node_id: Optional[Any] = ...) -> None: ... + def __del__(self) -> None: ... + def is_disconnected(self, node_id: Any): ... + def connection_delay(self, node_id: Any): ... + def is_ready(self, node_id: Any, metadata_priority: bool = ...): ... + def send(self, node_id: Any, request: Any, wakeup: bool = ...): ... + def poll(self, timeout_ms: Optional[Any] = ..., future: Optional[Any] = ...): ... + def in_flight_request_count(self, node_id: Optional[Any] = ...): ... + def least_loaded_node(self): ... + def set_topics(self, topics: Any): ... + def add_topic(self, topic: Any): ... + def get_api_versions(self): ... + def check_version(self, node_id: Optional[Any] = ..., timeout: int = ..., strict: bool = ...): ... + def wakeup(self) -> None: ... + def bootstrap_connected(self): ... + +class IdleConnectionManager: + connections_max_idle: Any = ... + next_idle_close_check_time: Any = ... + lru_connections: Any = ... + def __init__(self, connections_max_idle_ms: Any) -> None: ... + def update(self, conn_id: Any) -> None: ... + def remove(self, conn_id: Any) -> None: ... + def is_expired(self, conn_id: Any): ... + def next_check_ms(self): ... + def update_next_idle_close_check_time(self, ts: Any) -> None: ... + def poll_expired_connection(self): ... + +class KafkaClientMetrics: + metrics: Any = ... + metric_group_name: Any = ... + connection_closed: Any = ... + connection_created: Any = ... + select_time: Any = ... + io_time: Any = ... + def __init__(self, metrics: Any, metric_group_prefix: Any, conns: Any): ... diff --git a/stubs/kafka/cluster.pyi b/stubs/kafka/cluster.pyi new file mode 100644 index 0000000..5db9a19 --- /dev/null +++ b/stubs/kafka/cluster.pyi @@ -0,0 +1,33 @@ +from kafka.conn import collect_hosts as collect_hosts +from kafka.future import Future as Future +from kafka.structs import BrokerMetadata as BrokerMetadata, PartitionMetadata as PartitionMetadata, TopicPartition as TopicPartition +from typing import Any + +log: Any + +class ClusterMetadata: + DEFAULT_CONFIG: Any = ... + need_all_topic_metadata: bool = ... + unauthorized_topics: Any = ... + internal_topics: Any = ... + controller: Any = ... + config: Any = ... + def __init__(self, **configs: Any) -> None: ... + def is_bootstrap(self, node_id: Any): ... + def brokers(self): ... + def broker_metadata(self, broker_id: Any): ... + def partitions_for_topic(self, topic: Any): ... + def available_partitions_for_topic(self, topic: Any): ... + def leader_for_partition(self, partition: Any): ... + def partitions_for_broker(self, broker_id: Any): ... + def coordinator_for_group(self, group: Any): ... + def ttl(self): ... + def refresh_backoff(self): ... + def request_update(self): ... + def topics(self, exclude_internal_topics: bool = ...): ... + def failed_update(self, exception: Any) -> None: ... + def update_metadata(self, metadata: Any): ... + def add_listener(self, listener: Any) -> None: ... + def remove_listener(self, listener: Any) -> None: ... + def add_group_coordinator(self, group: Any, response: Any): ... + def with_partitions(self, partitions_to_add: Any): ... diff --git a/stubs/kafka/codec.pyi b/stubs/kafka/codec.pyi new file mode 100644 index 0000000..8129ae1 --- /dev/null +++ b/stubs/kafka/codec.pyi @@ -0,0 +1,21 @@ +from typing import Any, Optional + +PYPY: Any + +def has_gzip(): ... +def has_snappy(): ... +def has_lz4(): ... +def gzip_encode(payload: Any, compresslevel: Optional[Any] = ...): ... +def gzip_decode(payload: Any): ... +def snappy_encode(payload: Any, xerial_compatible: bool = ..., xerial_blocksize: Any = ...): ... +def snappy_decode(payload: Any): ... + +lz4_encode: Any + +def lz4f_decode(payload: Any): ... + +lz4_decode: Any +lz4_decode = lz4f_decode + +def lz4_encode_old_kafka(payload: Any): ... +def lz4_decode_old_kafka(payload: Any): ... diff --git a/stubs/kafka/conn.pyi b/stubs/kafka/conn.pyi new file mode 100644 index 0000000..f0c867f --- /dev/null +++ b/stubs/kafka/conn.pyi @@ -0,0 +1,74 @@ +import ssl +from kafka.future import Future as Future +from kafka.metrics.stats import Avg as Avg, Count as Count, Max as Max, Rate as Rate +from kafka.oauth.abstract import AbstractTokenProvider as AbstractTokenProvider +from kafka.protocol.admin import SaslHandShakeRequest as SaslHandShakeRequest +from kafka.protocol.commit import OffsetFetchRequest as OffsetFetchRequest +from kafka.protocol.metadata import MetadataRequest as MetadataRequest +from kafka.protocol.parser import KafkaProtocol as KafkaProtocol +from kafka.protocol.types import Int32 as Int32, Int8 as Int8 +from kafka.scram import ScramClient as ScramClient +from typing import Any, Optional + +log: Any +DEFAULT_KAFKA_PORT: int +SASL_QOP_AUTH: int +SASL_QOP_AUTH_INT: int +SASL_QOP_AUTH_CONF: int +ssl_available: bool + +class SSLWantReadError(Exception): ... +class SSLWantWriteError(Exception): ... + +AFI_NAMES: Any + +class ConnectionStates: + DISCONNECTING: str = ... + DISCONNECTED: str = ... + CONNECTING: str = ... + HANDSHAKE: str = ... + CONNECTED: str = ... + AUTHENTICATING: str = ... + +class BrokerConnection: + DEFAULT_CONFIG: Any = ... + SECURITY_PROTOCOLS: Any = ... + SASL_MECHANISMS: Any = ... + host: Any = ... + port: Any = ... + afi: Any = ... + config: Any = ... + node_id: Any = ... + in_flight_requests: Any = ... + state: Any = ... + last_attempt: int = ... + def __init__(self, host: Any, port: Any, afi: Any, **configs: Any) -> None: ... + def connect_blocking(self, timeout: Any = ...): ... + def connect(self): ... + def blacked_out(self): ... + def connection_delay(self): ... + def connected(self): ... + def connecting(self): ... + def disconnected(self): ... + def __del__(self) -> None: ... + def close(self, error: Optional[Any] = ...) -> None: ... + def send(self, request: Any, blocking: bool = ...): ... + def send_pending_requests(self): ... + def send_pending_requests_v2(self): ... + def can_send_more(self): ... + def recv(self): ... + def requests_timed_out(self): ... + def get_api_versions(self): ... + def check_version(self, timeout: int = ..., strict: bool = ..., topics: Any = ...): ... + +class BrokerConnectionMetrics: + metrics: Any = ... + bytes_sent: Any = ... + bytes_received: Any = ... + request_time: Any = ... + def __init__(self, metrics: Any, metric_group_prefix: Any, node_id: Any) -> None: ... + +def get_ip_port_afi(host_and_port_str: Any): ... +def collect_hosts(hosts: Any, randomize: bool = ...): ... +def is_inet_4_or_6(gai: Any): ... +def dns_lookup(host: Any, port: Any, afi: Any = ...): ... diff --git a/stubs/kafka/consumer/__init__.pyi b/stubs/kafka/consumer/__init__.pyi new file mode 100644 index 0000000..ff36d4d --- /dev/null +++ b/stubs/kafka/consumer/__init__.pyi @@ -0,0 +1 @@ +from kafka.consumer.group import KafkaConsumer as KafkaConsumer diff --git a/stubs/kafka/consumer/fetcher.pyi b/stubs/kafka/consumer/fetcher.pyi new file mode 100644 index 0000000..63d638b --- /dev/null +++ b/stubs/kafka/consumer/fetcher.pyi @@ -0,0 +1,66 @@ +import kafka.errors as Errors +import six +from collections import namedtuple +from kafka.future import Future as Future +from kafka.metrics.stats import Avg as Avg, Count as Count, Max as Max, Rate as Rate +from kafka.protocol.fetch import FetchRequest as FetchRequest +from kafka.protocol.offset import OffsetRequest as OffsetRequest, OffsetResetStrategy as OffsetResetStrategy, UNKNOWN_OFFSET as UNKNOWN_OFFSET +from kafka.record import MemoryRecords as MemoryRecords +from kafka.serializer import Deserializer as Deserializer +from kafka.structs import OffsetAndTimestamp as OffsetAndTimestamp, TopicPartition as TopicPartition +from typing import Any, Optional + +log: Any +READ_UNCOMMITTED: int +READ_COMMITTED: int + +ConsumerRecord = namedtuple('ConsumerRecord', ['topic', 'partition', 'offset', 'timestamp', 'timestamp_type', 'key', 'value', 'headers', 'checksum', 'serialized_key_size', 'serialized_value_size', 'serialized_header_size']) + +CompletedFetch = namedtuple('CompletedFetch', ['topic_partition', 'fetched_offset', 'response_version', 'partition_data', 'metric_aggregator']) + +class NoOffsetForPartitionError(Errors.KafkaError): ... +class RecordTooLargeError(Errors.KafkaError): ... + +class Fetcher(six.Iterator): + DEFAULT_CONFIG: Any = ... + config: Any = ... + def __init__(self, client: Any, subscriptions: Any, metrics: Any, **configs: Any) -> None: ... + def send_fetches(self): ... + def reset_offsets_if_needed(self, partitions: Any) -> None: ... + def in_flight_fetches(self): ... + def update_fetch_positions(self, partitions: Any) -> None: ... + def get_offsets_by_times(self, timestamps: Any, timeout_ms: Any): ... + def beginning_offsets(self, partitions: Any, timeout_ms: Any): ... + def end_offsets(self, partitions: Any, timeout_ms: Any): ... + def beginning_or_end_offset(self, partitions: Any, timestamp: Any, timeout_ms: Any): ... + def fetched_records(self, max_records: Optional[Any] = ..., update_offsets: bool = ...): ... + def __iter__(self) -> Any: ... + def __next__(self): ... + class PartitionRecords: + fetch_offset: Any = ... + topic_partition: Any = ... + messages: Any = ... + message_idx: Any = ... + def __init__(self, fetch_offset: Any, tp: Any, messages: Any) -> None: ... + def __len__(self): ... + def discard(self) -> None: ... + def take(self, n: Optional[Any] = ...): ... + +class FetchResponseMetricAggregator: + sensors: Any = ... + unrecorded_partitions: Any = ... + total_bytes: int = ... + total_records: int = ... + def __init__(self, sensors: Any, partitions: Any) -> None: ... + def record(self, partition: Any, num_bytes: Any, num_records: Any) -> None: ... + +class FetchManagerMetrics: + metrics: Any = ... + group_name: Any = ... + bytes_fetched: Any = ... + records_fetched: Any = ... + fetch_latency: Any = ... + records_fetch_lag: Any = ... + fetch_throttle_time_sensor: Any = ... + def __init__(self, metrics: Any, prefix: Any) -> None: ... + def record_topic_fetch_metrics(self, topic: Any, num_bytes: Any, num_records: Any) -> None: ... diff --git a/stubs/kafka/consumer/group.pyi b/stubs/kafka/consumer/group.pyi new file mode 100644 index 0000000..d70e873 --- /dev/null +++ b/stubs/kafka/consumer/group.pyi @@ -0,0 +1,49 @@ +import six +from kafka.client_async import KafkaClient as KafkaClient +from kafka.consumer.fetcher import Fetcher as Fetcher +from kafka.consumer.subscription_state import SubscriptionState as SubscriptionState +from kafka.coordinator.assignors.range import RangePartitionAssignor as RangePartitionAssignor +from kafka.coordinator.assignors.roundrobin import RoundRobinPartitionAssignor as RoundRobinPartitionAssignor +from kafka.coordinator.consumer import ConsumerCoordinator as ConsumerCoordinator +from kafka.errors import KafkaConfigurationError as KafkaConfigurationError, UnsupportedVersionError as UnsupportedVersionError +from kafka.metrics import MetricConfig as MetricConfig, Metrics as Metrics +from kafka.protocol.offset import OffsetResetStrategy as OffsetResetStrategy +from kafka.structs import TopicPartition as TopicPartition +from typing import Any, Optional + +log: Any + +class KafkaConsumer(six.Iterator): + DEFAULT_CONFIG: Any = ... + DEFAULT_SESSION_TIMEOUT_MS_0_9: int = ... + config: Any = ... + def __init__(self, *topics: Any, **configs: Any) -> None: ... + def bootstrap_connected(self): ... + def assign(self, partitions: Any) -> None: ... + def assignment(self): ... + def close(self, autocommit: bool = ...) -> None: ... + def commit_async(self, offsets: Optional[Any] = ..., callback: Optional[Any] = ...): ... + def commit(self, offsets: Optional[Any] = ...) -> None: ... + def committed(self, partition: Any, metadata: bool = ...): ... + def topics(self): ... + def partitions_for_topic(self, topic: Any): ... + def poll(self, timeout_ms: int = ..., max_records: Optional[Any] = ..., update_offsets: bool = ...): ... + def position(self, partition: Any): ... + def highwater(self, partition: Any): ... + def pause(self, *partitions: Any) -> None: ... + def paused(self): ... + def resume(self, *partitions: Any) -> None: ... + def seek(self, partition: Any, offset: Any) -> None: ... + def seek_to_beginning(self, *partitions: Any) -> None: ... + def seek_to_end(self, *partitions: Any) -> None: ... + def subscribe(self, topics: Any = ..., pattern: Optional[Any] = ..., listener: Optional[Any] = ...) -> None: ... + def subscription(self): ... + def unsubscribe(self) -> None: ... + def metrics(self, raw: bool = ...): ... + def offsets_for_times(self, timestamps: Any): ... + def beginning_offsets(self, partitions: Any): ... + def end_offsets(self, partitions: Any): ... + def __iter__(self) -> Any: ... + def __next__(self): ... + def next_v2(self): ... + def next_v1(self): ... diff --git a/stubs/kafka/consumer/subscription_state.pyi b/stubs/kafka/consumer/subscription_state.pyi new file mode 100644 index 0000000..189fa23 --- /dev/null +++ b/stubs/kafka/consumer/subscription_state.pyi @@ -0,0 +1,63 @@ +import abc +from kafka.errors import IllegalStateError as IllegalStateError +from kafka.protocol.offset import OffsetResetStrategy as OffsetResetStrategy +from kafka.structs import OffsetAndMetadata as OffsetAndMetadata +from typing import Any, Optional + +log: Any + +class SubscriptionState: + subscription: Any = ... + subscribed_pattern: Any = ... + assignment: Any = ... + listener: Any = ... + needs_fetch_committed_offsets: bool = ... + def __init__(self, offset_reset_strategy: str = ...) -> None: ... + def subscribe(self, topics: Any = ..., pattern: Optional[Any] = ..., listener: Optional[Any] = ...) -> None: ... + def change_subscription(self, topics: Any) -> None: ... + def group_subscribe(self, topics: Any) -> None: ... + def reset_group_subscription(self) -> None: ... + def assign_from_user(self, partitions: Any) -> None: ... + def assign_from_subscribed(self, assignments: Any) -> None: ... + def unsubscribe(self) -> None: ... + def group_subscription(self): ... + def seek(self, partition: Any, offset: Any) -> None: ... + def assigned_partitions(self): ... + def paused_partitions(self): ... + def fetchable_partitions(self): ... + def partitions_auto_assigned(self): ... + def all_consumed_offsets(self): ... + def need_offset_reset(self, partition: Any, offset_reset_strategy: Optional[Any] = ...) -> None: ... + def has_default_offset_reset_policy(self): ... + def is_offset_reset_needed(self, partition: Any): ... + def has_all_fetch_positions(self): ... + def missing_fetch_positions(self): ... + def is_assigned(self, partition: Any): ... + def is_paused(self, partition: Any): ... + def is_fetchable(self, partition: Any): ... + def pause(self, partition: Any) -> None: ... + def resume(self, partition: Any) -> None: ... + +class TopicPartitionState: + committed: Any = ... + has_valid_position: bool = ... + paused: bool = ... + awaiting_reset: bool = ... + reset_strategy: Any = ... + highwater: Any = ... + drop_pending_message_set: bool = ... + last_offset_from_message_batch: Any = ... + def __init__(self) -> None: ... + position: Any = ... + def await_reset(self, strategy: Any) -> None: ... + def seek(self, offset: Any) -> None: ... + def pause(self) -> None: ... + def resume(self) -> None: ... + def is_fetchable(self): ... + +class ConsumerRebalanceListener(metaclass=abc.ABCMeta): + __metaclass__: Any = ... + @abc.abstractmethod + def on_partitions_revoked(self, revoked: Any) -> Any: ... + @abc.abstractmethod + def on_partitions_assigned(self, assigned: Any) -> Any: ... diff --git a/stubs/kafka/coordinator/__init__.pyi b/stubs/kafka/coordinator/__init__.pyi new file mode 100644 index 0000000..e69de29 diff --git a/stubs/kafka/coordinator/assignors/__init__.pyi b/stubs/kafka/coordinator/assignors/__init__.pyi new file mode 100644 index 0000000..e69de29 diff --git a/stubs/kafka/coordinator/assignors/abstract.pyi b/stubs/kafka/coordinator/assignors/abstract.pyi new file mode 100644 index 0000000..00a5b37 --- /dev/null +++ b/stubs/kafka/coordinator/assignors/abstract.pyi @@ -0,0 +1,15 @@ +import abc +from typing import Any + +log: Any + +class AbstractPartitionAssignor(metaclass=abc.ABCMeta): + @property + @abc.abstractmethod + def name(self) -> Any: ... + @abc.abstractmethod + def assign(self, cluster: Any, members: Any) -> Any: ... + @abc.abstractmethod + def metadata(self, topics: Any) -> Any: ... + @abc.abstractmethod + def on_assignment(self, assignment: Any) -> Any: ... diff --git a/stubs/kafka/coordinator/assignors/range.pyi b/stubs/kafka/coordinator/assignors/range.pyi new file mode 100644 index 0000000..57e35ca --- /dev/null +++ b/stubs/kafka/coordinator/assignors/range.pyi @@ -0,0 +1,15 @@ +from kafka.coordinator.assignors.abstract import AbstractPartitionAssignor as AbstractPartitionAssignor +from kafka.coordinator.protocol import ConsumerProtocolMemberAssignment as ConsumerProtocolMemberAssignment, ConsumerProtocolMemberMetadata as ConsumerProtocolMemberMetadata +from typing import Any + +log: Any + +class RangePartitionAssignor(AbstractPartitionAssignor): + name: str = ... + version: int = ... + @classmethod + def assign(cls, cluster: Any, member_metadata: Any): ... + @classmethod + def metadata(cls, topics: Any): ... + @classmethod + def on_assignment(cls, assignment: Any) -> None: ... diff --git a/stubs/kafka/coordinator/assignors/roundrobin.pyi b/stubs/kafka/coordinator/assignors/roundrobin.pyi new file mode 100644 index 0000000..fdca1cf --- /dev/null +++ b/stubs/kafka/coordinator/assignors/roundrobin.pyi @@ -0,0 +1,16 @@ +from kafka.coordinator.assignors.abstract import AbstractPartitionAssignor as AbstractPartitionAssignor +from kafka.coordinator.protocol import ConsumerProtocolMemberAssignment as ConsumerProtocolMemberAssignment, ConsumerProtocolMemberMetadata as ConsumerProtocolMemberMetadata +from kafka.structs import TopicPartition as TopicPartition +from typing import Any + +log: Any + +class RoundRobinPartitionAssignor(AbstractPartitionAssignor): + name: str = ... + version: int = ... + @classmethod + def assign(cls, cluster: Any, member_metadata: Any): ... + @classmethod + def metadata(cls, topics: Any): ... + @classmethod + def on_assignment(cls, assignment: Any) -> None: ... diff --git a/stubs/kafka/coordinator/base.pyi b/stubs/kafka/coordinator/base.pyi new file mode 100644 index 0000000..5eda235 --- /dev/null +++ b/stubs/kafka/coordinator/base.pyi @@ -0,0 +1,79 @@ +import abc +import threading +from kafka import errors as Errors +from kafka.coordinator.heartbeat import Heartbeat as Heartbeat +from kafka.future import Future as Future +from kafka.metrics import AnonMeasurable as AnonMeasurable +from kafka.metrics.stats import Avg as Avg, Count as Count, Max as Max, Rate as Rate +from kafka.protocol.commit import GroupCoordinatorRequest as GroupCoordinatorRequest, OffsetCommitRequest as OffsetCommitRequest +from kafka.protocol.group import HeartbeatRequest as HeartbeatRequest, JoinGroupRequest as JoinGroupRequest, LeaveGroupRequest as LeaveGroupRequest, SyncGroupRequest as SyncGroupRequest +from typing import Any, Optional + +log: Any + +class MemberState: + UNJOINED: str = ... + REBALANCING: str = ... + STABLE: str = ... + +class Generation: + generation_id: Any = ... + member_id: Any = ... + protocol: Any = ... + def __init__(self, generation_id: Any, member_id: Any, protocol: Any) -> None: ... + +class UnjoinedGroupException(Errors.KafkaError): + retriable: bool = ... + +class BaseCoordinator(metaclass=abc.ABCMeta): + DEFAULT_CONFIG: Any = ... + config: Any = ... + group_id: Any = ... + heartbeat: Any = ... + rejoin_needed: bool = ... + rejoining: bool = ... + state: Any = ... + join_future: Any = ... + coordinator_id: Any = ... + sensors: Any = ... + def __init__(self, client: Any, metrics: Any, **configs: Any) -> None: ... + @abc.abstractmethod + def protocol_type(self) -> Any: ... + @abc.abstractmethod + def group_protocols(self) -> Any: ... + def coordinator_unknown(self): ... + def coordinator(self): ... + def ensure_coordinator_ready(self) -> None: ... + def lookup_coordinator(self): ... + def need_rejoin(self): ... + def poll_heartbeat(self) -> None: ... + def time_to_next_heartbeat(self): ... + def ensure_active_group(self) -> None: ... + def coordinator_dead(self, error: Any) -> None: ... + def generation(self): ... + def reset_generation(self) -> None: ... + def request_rejoin(self) -> None: ... + def __del__(self) -> None: ... + def close(self) -> None: ... + def maybe_leave_group(self) -> None: ... + +class GroupCoordinatorMetrics: + heartbeat: Any = ... + metrics: Any = ... + metric_group_name: Any = ... + heartbeat_latency: Any = ... + join_latency: Any = ... + sync_latency: Any = ... + def __init__(self, heartbeat: Any, metrics: Any, prefix: Any, tags: Optional[Any] = ...): ... + +class HeartbeatThread(threading.Thread): + name: Any = ... + coordinator: Any = ... + enabled: bool = ... + closed: bool = ... + failed: Any = ... + def __init__(self, coordinator: Any) -> None: ... + def enable(self) -> None: ... + def disable(self) -> None: ... + def close(self) -> None: ... + def run(self) -> None: ... diff --git a/stubs/kafka/coordinator/consumer.pyi b/stubs/kafka/coordinator/consumer.pyi new file mode 100644 index 0000000..a3b08f7 --- /dev/null +++ b/stubs/kafka/coordinator/consumer.pyi @@ -0,0 +1,38 @@ +from kafka.coordinator.assignors.range import RangePartitionAssignor as RangePartitionAssignor +from kafka.coordinator.assignors.roundrobin import RoundRobinPartitionAssignor as RoundRobinPartitionAssignor +from kafka.coordinator.base import BaseCoordinator as BaseCoordinator, Generation as Generation +from kafka.coordinator.protocol import ConsumerProtocol as ConsumerProtocol +from kafka.future import Future as Future +from kafka.metrics import AnonMeasurable as AnonMeasurable +from kafka.metrics.stats import Avg as Avg, Count as Count, Max as Max, Rate as Rate +from kafka.protocol.commit import OffsetCommitRequest as OffsetCommitRequest, OffsetFetchRequest as OffsetFetchRequest +from kafka.structs import OffsetAndMetadata as OffsetAndMetadata, TopicPartition as TopicPartition +from typing import Any, Optional + +log: Any + +class ConsumerCoordinator(BaseCoordinator): + DEFAULT_CONFIG: Any = ... + config: Any = ... + auto_commit_interval: Any = ... + next_auto_commit_deadline: Any = ... + completed_offset_commits: Any = ... + consumer_sensors: Any = ... + def __init__(self, client: Any, subscription: Any, metrics: Any, **configs: Any) -> None: ... + def __del__(self) -> None: ... + def protocol_type(self): ... + def group_protocols(self): ... + def poll(self) -> None: ... + def time_to_next_poll(self): ... + def need_rejoin(self): ... + def refresh_committed_offsets_if_needed(self) -> None: ... + def fetch_committed_offsets(self, partitions: Any): ... + def close(self, autocommit: bool = ...) -> None: ... + def commit_offsets_async(self, offsets: Any, callback: Optional[Any] = ...): ... + def commit_offsets_sync(self, offsets: Any): ... + +class ConsumerCoordinatorMetrics: + metrics: Any = ... + metric_group_name: Any = ... + commit_latency: Any = ... + def __init__(self, metrics: Any, metric_group_prefix: Any, subscription: Any): ... diff --git a/stubs/kafka/coordinator/heartbeat.pyi b/stubs/kafka/coordinator/heartbeat.pyi new file mode 100644 index 0000000..b27c741 --- /dev/null +++ b/stubs/kafka/coordinator/heartbeat.pyi @@ -0,0 +1,20 @@ +from typing import Any + +class Heartbeat: + DEFAULT_CONFIG: Any = ... + config: Any = ... + last_send: Any = ... + last_receive: Any = ... + last_poll: Any = ... + last_reset: Any = ... + heartbeat_failed: Any = ... + def __init__(self, **configs: Any) -> None: ... + def poll(self) -> None: ... + def sent_heartbeat(self) -> None: ... + def fail_heartbeat(self) -> None: ... + def received_heartbeat(self) -> None: ... + def time_to_next_heartbeat(self): ... + def should_heartbeat(self): ... + def session_timeout_expired(self): ... + def reset_timeouts(self) -> None: ... + def poll_timeout_expired(self): ... diff --git a/stubs/kafka/coordinator/protocol.pyi b/stubs/kafka/coordinator/protocol.pyi new file mode 100644 index 0000000..50f7c89 --- /dev/null +++ b/stubs/kafka/coordinator/protocol.pyi @@ -0,0 +1,17 @@ +from kafka.protocol.struct import Struct as Struct +from kafka.protocol.types import Array as Array, Bytes as Bytes, Int16 as Int16, Int32 as Int32, Schema as Schema, String as String +from kafka.structs import TopicPartition as TopicPartition +from typing import Any + +class ConsumerProtocolMemberMetadata(Struct): + SCHEMA: Any = ... + +class ConsumerProtocolMemberAssignment(Struct): + SCHEMA: Any = ... + def partitions(self): ... + +class ConsumerProtocol: + PROTOCOL_TYPE: str = ... + ASSIGNMENT_STRATEGIES: Any = ... + METADATA: Any = ... + ASSIGNMENT: Any = ... diff --git a/stubs/kafka/errors.pyi b/stubs/kafka/errors.pyi new file mode 100644 index 0000000..611dd6c --- /dev/null +++ b/stubs/kafka/errors.pyi @@ -0,0 +1,327 @@ +from typing import Any + +class KafkaError(RuntimeError): + retriable: bool = ... + invalid_metadata: bool = ... + +class IllegalStateError(KafkaError): ... +class IllegalArgumentError(KafkaError): ... + +class NoBrokersAvailable(KafkaError): + retriable: bool = ... + invalid_metadata: bool = ... + +class NodeNotReadyError(KafkaError): + retriable: bool = ... + +class KafkaProtocolError(KafkaError): + retriable: bool = ... + +class CorrelationIdError(KafkaProtocolError): + retriable: bool = ... + +class Cancelled(KafkaError): + retriable: bool = ... + +class TooManyInFlightRequests(KafkaError): + retriable: bool = ... + +class StaleMetadata(KafkaError): + retriable: bool = ... + invalid_metadata: bool = ... + +class MetadataEmptyBrokerList(KafkaError): + retriable: bool = ... + +class UnrecognizedBrokerVersion(KafkaError): ... +class IncompatibleBrokerVersion(KafkaError): ... + +class CommitFailedError(KafkaError): + def __init__(self, *args: Any, **kwargs: Any) -> None: ... + +class AuthenticationMethodNotSupported(KafkaError): ... + +class AuthenticationFailedError(KafkaError): + retriable: bool = ... + +class BrokerResponseError(KafkaError): + errno: Any = ... + message: Any = ... + description: Any = ... + +class NoError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + +class UnknownError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + +class OffsetOutOfRangeError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + +class CorruptRecordException(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... +InvalidMessageError = CorruptRecordException + +class UnknownTopicOrPartitionError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + retriable: bool = ... + invalid_metadata: bool = ... + +class InvalidFetchRequestError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + +class LeaderNotAvailableError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + retriable: bool = ... + invalid_metadata: bool = ... + +class NotLeaderForPartitionError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + retriable: bool = ... + invalid_metadata: bool = ... + +class RequestTimedOutError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + retriable: bool = ... + +class BrokerNotAvailableError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + +class ReplicaNotAvailableError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + +class MessageSizeTooLargeError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + +class StaleControllerEpochError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + +class OffsetMetadataTooLargeError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + +class StaleLeaderEpochCodeError(BrokerResponseError): + errno: int = ... + message: str = ... + +class GroupLoadInProgressError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + retriable: bool = ... + +class GroupCoordinatorNotAvailableError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + retriable: bool = ... + +class NotCoordinatorForGroupError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + retriable: bool = ... + +class InvalidTopicError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + +class RecordListTooLargeError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + +class NotEnoughReplicasError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + retriable: bool = ... + +class NotEnoughReplicasAfterAppendError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + retriable: bool = ... + +class InvalidRequiredAcksError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + +class IllegalGenerationError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + +class InconsistentGroupProtocolError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + +class InvalidGroupIdError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + +class UnknownMemberIdError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + +class InvalidSessionTimeoutError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + +class RebalanceInProgressError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + +class InvalidCommitOffsetSizeError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + +class TopicAuthorizationFailedError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + +class GroupAuthorizationFailedError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + +class ClusterAuthorizationFailedError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + +class InvalidTimestampError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + +class UnsupportedSaslMechanismError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + +class IllegalSaslStateError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + +class UnsupportedVersionError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + +class TopicAlreadyExistsError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + +class InvalidPartitionsError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + +class InvalidReplicationFactorError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + +class InvalidReplicationAssignmentError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + +class InvalidConfigurationError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + +class NotControllerError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + retriable: bool = ... + +class InvalidRequestError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + +class UnsupportedForMessageFormatError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + +class PolicyViolationError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + +class SecurityDisabledError(BrokerResponseError): + errno: int = ... + message: str = ... + description: str = ... + +class KafkaUnavailableError(KafkaError): ... +class KafkaTimeoutError(KafkaError): ... + +class FailedPayloadsError(KafkaError): + payload: Any = ... + def __init__(self, payload: Any, *args: Any) -> None: ... + +class KafkaConnectionError(KafkaError): + retriable: bool = ... + invalid_metadata: bool = ... + +class ProtocolError(KafkaError): ... +class UnsupportedCodecError(KafkaError): ... +class KafkaConfigurationError(KafkaError): ... +class QuotaViolationError(KafkaError): ... + +class AsyncProducerQueueFull(KafkaError): + failed_msgs: Any = ... + def __init__(self, failed_msgs: Any, *args: Any) -> None: ... + +kafka_errors: Any + +def for_code(error_code: Any): ... +def check_error(response: Any) -> None: ... + +RETRY_BACKOFF_ERROR_TYPES: Any +RETRY_REFRESH_ERROR_TYPES: Any +RETRY_ERROR_TYPES: Any diff --git a/stubs/kafka/future.pyi b/stubs/kafka/future.pyi new file mode 100644 index 0000000..09b99d5 --- /dev/null +++ b/stubs/kafka/future.pyi @@ -0,0 +1,19 @@ +from typing import Any + +log: Any + +class Future: + error_on_callbacks: bool = ... + is_done: bool = ... + value: Any = ... + exception: Any = ... + def __init__(self) -> None: ... + def succeeded(self): ... + def failed(self): ... + def retriable(self): ... + def success(self, value: Any): ... + def failure(self, e: Any): ... + def add_callback(self, f: Any, *args: Any, **kwargs: Any): ... + def add_errback(self, f: Any, *args: Any, **kwargs: Any): ... + def add_both(self, f: Any, *args: Any, **kwargs: Any): ... + def chain(self, future: Any): ... diff --git a/stubs/kafka/metrics/__init__.pyi b/stubs/kafka/metrics/__init__.pyi new file mode 100644 index 0000000..68c2f58 --- /dev/null +++ b/stubs/kafka/metrics/__init__.pyi @@ -0,0 +1,8 @@ +from kafka.metrics.compound_stat import NamedMeasurable as NamedMeasurable +from kafka.metrics.dict_reporter import DictReporter as DictReporter +from kafka.metrics.kafka_metric import KafkaMetric as KafkaMetric +from kafka.metrics.measurable import AnonMeasurable as AnonMeasurable +from kafka.metrics.metric_config import MetricConfig as MetricConfig +from kafka.metrics.metric_name import MetricName as MetricName +from kafka.metrics.metrics import Metrics as Metrics +from kafka.metrics.quota import Quota as Quota diff --git a/stubs/kafka/metrics/compound_stat.pyi b/stubs/kafka/metrics/compound_stat.pyi new file mode 100644 index 0000000..521ab41 --- /dev/null +++ b/stubs/kafka/metrics/compound_stat.pyi @@ -0,0 +1,14 @@ +import abc +from kafka.metrics.stat import AbstractStat as AbstractStat +from typing import Any + +class AbstractCompoundStat(AbstractStat, metaclass=abc.ABCMeta): + __metaclass__: Any = ... + def stats(self) -> None: ... + +class NamedMeasurable: + def __init__(self, metric_name: Any, measurable_stat: Any) -> None: ... + @property + def name(self): ... + @property + def stat(self): ... diff --git a/stubs/kafka/metrics/dict_reporter.pyi b/stubs/kafka/metrics/dict_reporter.pyi new file mode 100644 index 0000000..7d33d0b --- /dev/null +++ b/stubs/kafka/metrics/dict_reporter.pyi @@ -0,0 +1,14 @@ +from kafka.metrics.metrics_reporter import AbstractMetricsReporter as AbstractMetricsReporter +from typing import Any + +logger: Any + +class DictReporter(AbstractMetricsReporter): + def __init__(self, prefix: str = ...) -> None: ... + def snapshot(self): ... + def init(self, metrics: Any) -> None: ... + def metric_change(self, metric: Any) -> None: ... + def metric_removal(self, metric: Any): ... + def get_category(self, metric: Any): ... + def configure(self, configs: Any) -> None: ... + def close(self) -> None: ... diff --git a/stubs/kafka/metrics/kafka_metric.pyi b/stubs/kafka/metrics/kafka_metric.pyi new file mode 100644 index 0000000..63345c0 --- /dev/null +++ b/stubs/kafka/metrics/kafka_metric.pyi @@ -0,0 +1,13 @@ +from typing import Any, Optional + +class KafkaMetric: + def __init__(self, metric_name: Any, measurable: Any, config: Any) -> None: ... + @property + def metric_name(self): ... + @property + def measurable(self): ... + @property + def config(self): ... + @config.setter + def config(self, config: Any) -> None: ... + def value(self, time_ms: Optional[Any] = ...): ... diff --git a/stubs/kafka/metrics/measurable.pyi b/stubs/kafka/metrics/measurable.pyi new file mode 100644 index 0000000..c029a17 --- /dev/null +++ b/stubs/kafka/metrics/measurable.pyi @@ -0,0 +1,10 @@ +import abc +from typing import Any + +class AbstractMeasurable(metaclass=abc.ABCMeta): + @abc.abstractmethod + def measure(self, config: Any, now: Any) -> Any: ... + +class AnonMeasurable(AbstractMeasurable): + def __init__(self, measure_fn: Any) -> None: ... + def measure(self, config: Any, now: Any): ... diff --git a/stubs/kafka/metrics/measurable_stat.pyi b/stubs/kafka/metrics/measurable_stat.pyi new file mode 100644 index 0000000..e88729f --- /dev/null +++ b/stubs/kafka/metrics/measurable_stat.pyi @@ -0,0 +1,7 @@ +import abc +from kafka.metrics.measurable import AbstractMeasurable as AbstractMeasurable +from kafka.metrics.stat import AbstractStat as AbstractStat +from typing import Any + +class AbstractMeasurableStat(AbstractStat, AbstractMeasurable, metaclass=abc.ABCMeta): + __metaclass__: Any = ... diff --git a/stubs/kafka/metrics/metric_config.pyi b/stubs/kafka/metrics/metric_config.pyi new file mode 100644 index 0000000..a996e03 --- /dev/null +++ b/stubs/kafka/metrics/metric_config.pyi @@ -0,0 +1,12 @@ +from typing import Any, Optional + +class MetricConfig: + quota: Any = ... + event_window: Any = ... + time_window_ms: Any = ... + tags: Any = ... + def __init__(self, quota: Optional[Any] = ..., samples: int = ..., event_window: Any = ..., time_window_ms: Any = ..., tags: Optional[Any] = ...) -> None: ... + @property + def samples(self): ... + @samples.setter + def samples(self, value: Any) -> None: ... diff --git a/stubs/kafka/metrics/metric_name.pyi b/stubs/kafka/metrics/metric_name.pyi new file mode 100644 index 0000000..224c742 --- /dev/null +++ b/stubs/kafka/metrics/metric_name.pyi @@ -0,0 +1,15 @@ +from typing import Any, Optional + +class MetricName: + def __init__(self, name: Any, group: Any, description: Optional[Any] = ..., tags: Optional[Any] = ...) -> None: ... + @property + def name(self): ... + @property + def group(self): ... + @property + def description(self): ... + @property + def tags(self): ... + def __hash__(self) -> Any: ... + def __eq__(self, other: Any) -> Any: ... + def __ne__(self, other: Any) -> Any: ... diff --git a/stubs/kafka/metrics/metrics.pyi b/stubs/kafka/metrics/metrics.pyi new file mode 100644 index 0000000..2e1f38b --- /dev/null +++ b/stubs/kafka/metrics/metrics.pyi @@ -0,0 +1,24 @@ +from kafka.metrics import AnonMeasurable as AnonMeasurable, KafkaMetric as KafkaMetric, MetricConfig as MetricConfig, MetricName as MetricName +from kafka.metrics.stats import Sensor as Sensor +from typing import Any, Optional + +logger: Any + +class Metrics: + def __init__(self, default_config: Optional[Any] = ..., reporters: Optional[Any] = ..., enable_expiration: bool = ...): ... + @property + def config(self): ... + @property + def metrics(self): ... + def metric_name(self, name: Any, group: Any, description: str = ..., tags: Optional[Any] = ...): ... + def get_sensor(self, name: Any): ... + def sensor(self, name: Any, config: Optional[Any] = ..., inactive_sensor_expiration_time_seconds: Any = ..., parents: Optional[Any] = ...): ... + def remove_sensor(self, name: Any) -> None: ... + def add_metric(self, metric_name: Any, measurable: Any, config: Optional[Any] = ...) -> None: ... + def remove_metric(self, metric_name: Any): ... + def add_reporter(self, reporter: Any) -> None: ... + def register_metric(self, metric: Any) -> None: ... + class ExpireSensorTask: + @staticmethod + def run(metrics: Any) -> None: ... + def close(self) -> None: ... diff --git a/stubs/kafka/metrics/metrics_reporter.pyi b/stubs/kafka/metrics/metrics_reporter.pyi new file mode 100644 index 0000000..804d7fd --- /dev/null +++ b/stubs/kafka/metrics/metrics_reporter.pyi @@ -0,0 +1,15 @@ +import abc +from typing import Any + +class AbstractMetricsReporter(metaclass=abc.ABCMeta): + __metaclass__: Any = ... + @abc.abstractmethod + def init(self, metrics: Any) -> Any: ... + @abc.abstractmethod + def metric_change(self, metric: Any) -> Any: ... + @abc.abstractmethod + def metric_removal(self, metric: Any) -> Any: ... + @abc.abstractmethod + def configure(self, configs: Any) -> Any: ... + @abc.abstractmethod + def close(self) -> Any: ... diff --git a/stubs/kafka/metrics/quota.pyi b/stubs/kafka/metrics/quota.pyi new file mode 100644 index 0000000..7c45af7 --- /dev/null +++ b/stubs/kafka/metrics/quota.pyi @@ -0,0 +1,15 @@ +from typing import Any + +class Quota: + def __init__(self, bound: Any, is_upper: Any) -> None: ... + @staticmethod + def upper_bound(upper_bound: Any): ... + @staticmethod + def lower_bound(lower_bound: Any): ... + def is_upper_bound(self): ... + @property + def bound(self): ... + def is_acceptable(self, value: Any): ... + def __hash__(self) -> Any: ... + def __eq__(self, other: Any) -> Any: ... + def __ne__(self, other: Any) -> Any: ... diff --git a/stubs/kafka/metrics/stat.pyi b/stubs/kafka/metrics/stat.pyi new file mode 100644 index 0000000..215e6fb --- /dev/null +++ b/stubs/kafka/metrics/stat.pyi @@ -0,0 +1,7 @@ +import abc +from typing import Any + +class AbstractStat(metaclass=abc.ABCMeta): + __metaclass__: Any = ... + @abc.abstractmethod + def record(self, config: Any, value: Any, time_ms: Any) -> Any: ... diff --git a/stubs/kafka/metrics/stats/__init__.pyi b/stubs/kafka/metrics/stats/__init__.pyi new file mode 100644 index 0000000..c81d71a --- /dev/null +++ b/stubs/kafka/metrics/stats/__init__.pyi @@ -0,0 +1,10 @@ +from kafka.metrics.stats.avg import Avg as Avg +from kafka.metrics.stats.count import Count as Count +from kafka.metrics.stats.histogram import Histogram as Histogram +from kafka.metrics.stats.max_stat import Max as Max +from kafka.metrics.stats.min_stat import Min as Min +from kafka.metrics.stats.percentile import Percentile as Percentile +from kafka.metrics.stats.percentiles import Percentiles as Percentiles +from kafka.metrics.stats.rate import Rate as Rate +from kafka.metrics.stats.sensor import Sensor as Sensor +from kafka.metrics.stats.total import Total as Total diff --git a/stubs/kafka/metrics/stats/avg.pyi b/stubs/kafka/metrics/stats/avg.pyi new file mode 100644 index 0000000..f4472b2 --- /dev/null +++ b/stubs/kafka/metrics/stats/avg.pyi @@ -0,0 +1,7 @@ +from kafka.metrics.stats.sampled_stat import AbstractSampledStat as AbstractSampledStat +from typing import Any + +class Avg(AbstractSampledStat): + def __init__(self) -> None: ... + def update(self, sample: Any, config: Any, value: Any, now: Any) -> None: ... + def combine(self, samples: Any, config: Any, now: Any): ... diff --git a/stubs/kafka/metrics/stats/count.pyi b/stubs/kafka/metrics/stats/count.pyi new file mode 100644 index 0000000..6c473c0 --- /dev/null +++ b/stubs/kafka/metrics/stats/count.pyi @@ -0,0 +1,7 @@ +from kafka.metrics.stats.sampled_stat import AbstractSampledStat as AbstractSampledStat +from typing import Any + +class Count(AbstractSampledStat): + def __init__(self) -> None: ... + def update(self, sample: Any, config: Any, value: Any, now: Any) -> None: ... + def combine(self, samples: Any, config: Any, now: Any): ... diff --git a/stubs/kafka/metrics/stats/histogram.pyi b/stubs/kafka/metrics/stats/histogram.pyi new file mode 100644 index 0000000..610640d --- /dev/null +++ b/stubs/kafka/metrics/stats/histogram.pyi @@ -0,0 +1,21 @@ +from typing import Any + +class Histogram: + def __init__(self, bin_scheme: Any) -> None: ... + def record(self, value: Any) -> None: ... + def value(self, quantile: Any): ... + @property + def counts(self): ... + def clear(self) -> None: ... + class ConstantBinScheme: + def __init__(self, bins: Any, min_val: Any, max_val: Any) -> None: ... + @property + def bins(self): ... + def from_bin(self, b: Any): ... + def to_bin(self, x: Any): ... + class LinearBinScheme: + def __init__(self, num_bins: Any, max_val: Any) -> None: ... + @property + def bins(self): ... + def from_bin(self, b: Any): ... + def to_bin(self, x: Any): ... diff --git a/stubs/kafka/metrics/stats/max_stat.pyi b/stubs/kafka/metrics/stats/max_stat.pyi new file mode 100644 index 0000000..62ec9c7 --- /dev/null +++ b/stubs/kafka/metrics/stats/max_stat.pyi @@ -0,0 +1,7 @@ +from kafka.metrics.stats.sampled_stat import AbstractSampledStat as AbstractSampledStat +from typing import Any + +class Max(AbstractSampledStat): + def __init__(self) -> None: ... + def update(self, sample: Any, config: Any, value: Any, now: Any) -> None: ... + def combine(self, samples: Any, config: Any, now: Any): ... diff --git a/stubs/kafka/metrics/stats/min_stat.pyi b/stubs/kafka/metrics/stats/min_stat.pyi new file mode 100644 index 0000000..a286eeb --- /dev/null +++ b/stubs/kafka/metrics/stats/min_stat.pyi @@ -0,0 +1,7 @@ +from kafka.metrics.stats.sampled_stat import AbstractSampledStat as AbstractSampledStat +from typing import Any + +class Min(AbstractSampledStat): + def __init__(self) -> None: ... + def update(self, sample: Any, config: Any, value: Any, now: Any) -> None: ... + def combine(self, samples: Any, config: Any, now: Any): ... diff --git a/stubs/kafka/metrics/stats/percentile.pyi b/stubs/kafka/metrics/stats/percentile.pyi new file mode 100644 index 0000000..d33618c --- /dev/null +++ b/stubs/kafka/metrics/stats/percentile.pyi @@ -0,0 +1,8 @@ +from typing import Any + +class Percentile: + def __init__(self, metric_name: Any, percentile: Any) -> None: ... + @property + def name(self): ... + @property + def percentile(self): ... diff --git a/stubs/kafka/metrics/stats/percentiles.pyi b/stubs/kafka/metrics/stats/percentiles.pyi new file mode 100644 index 0000000..5e14216 --- /dev/null +++ b/stubs/kafka/metrics/stats/percentiles.pyi @@ -0,0 +1,21 @@ +from kafka.metrics import AnonMeasurable as AnonMeasurable, NamedMeasurable as NamedMeasurable +from kafka.metrics.compound_stat import AbstractCompoundStat as AbstractCompoundStat +from kafka.metrics.stats import Histogram as Histogram +from kafka.metrics.stats.sampled_stat import AbstractSampledStat as AbstractSampledStat +from typing import Any, Optional + +class BucketSizing: + CONSTANT: int = ... + LINEAR: int = ... + +class Percentiles(AbstractSampledStat, AbstractCompoundStat): + bin_scheme: Any = ... + def __init__(self, size_in_bytes: Any, bucketing: Any, max_val: Any, min_val: float = ..., percentiles: Optional[Any] = ...) -> None: ... + def stats(self): ... + def value(self, config: Any, now: Any, quantile: Any): ... + def combine(self, samples: Any, config: Any, now: Any): ... + def new_sample(self, time_ms: Any): ... + def update(self, sample: Any, config: Any, value: Any, time_ms: Any) -> None: ... + class HistogramSample(AbstractSampledStat.Sample): + histogram: Any = ... + def __init__(self, scheme: Any, now: Any) -> None: ... diff --git a/stubs/kafka/metrics/stats/rate.pyi b/stubs/kafka/metrics/stats/rate.pyi new file mode 100644 index 0000000..5c1cedb --- /dev/null +++ b/stubs/kafka/metrics/stats/rate.pyi @@ -0,0 +1,27 @@ +from kafka.metrics.measurable_stat import AbstractMeasurableStat as AbstractMeasurableStat +from kafka.metrics.stats.sampled_stat import AbstractSampledStat as AbstractSampledStat +from typing import Any, Optional + +class TimeUnit: + NANOSECONDS: Any = ... + MICROSECONDS: Any = ... + MILLISECONDS: Any = ... + SECONDS: Any = ... + MINUTES: Any = ... + HOURS: Any = ... + DAYS: Any = ... + @staticmethod + def get_name(time_unit: Any): ... + +class Rate(AbstractMeasurableStat): + def __init__(self, time_unit: Any = ..., sampled_stat: Optional[Any] = ...) -> None: ... + def unit_name(self): ... + def record(self, config: Any, value: Any, time_ms: Any) -> None: ... + def measure(self, config: Any, now: Any): ... + def window_size(self, config: Any, now: Any): ... + def convert(self, time_ms: Any): ... + +class SampledTotal(AbstractSampledStat): + def __init__(self, initial_value: Optional[Any] = ...) -> None: ... + def update(self, sample: Any, config: Any, value: Any, time_ms: Any) -> None: ... + def combine(self, samples: Any, config: Any, now: Any): ... diff --git a/stubs/kafka/metrics/stats/sampled_stat.pyi b/stubs/kafka/metrics/stats/sampled_stat.pyi new file mode 100644 index 0000000..61e0ed5 --- /dev/null +++ b/stubs/kafka/metrics/stats/sampled_stat.pyi @@ -0,0 +1,25 @@ +import abc +from kafka.metrics.measurable_stat import AbstractMeasurableStat as AbstractMeasurableStat +from typing import Any + +class AbstractSampledStat(AbstractMeasurableStat, metaclass=abc.ABCMeta): + __metaclass__: Any = ... + def __init__(self, initial_value: Any) -> None: ... + @abc.abstractmethod + def update(self, sample: Any, config: Any, value: Any, time_ms: Any) -> Any: ... + @abc.abstractmethod + def combine(self, samples: Any, config: Any, now: Any) -> Any: ... + def record(self, config: Any, value: Any, time_ms: Any) -> None: ... + def new_sample(self, time_ms: Any): ... + def measure(self, config: Any, now: Any): ... + def current(self, time_ms: Any): ... + def oldest(self, now: Any): ... + def purge_obsolete_samples(self, config: Any, now: Any) -> None: ... + class Sample: + initial_value: Any = ... + event_count: int = ... + last_window_ms: Any = ... + value: Any = ... + def __init__(self, initial_value: Any, now: Any) -> None: ... + def reset(self, now: Any) -> None: ... + def is_complete(self, time_ms: Any, config: Any): ... diff --git a/stubs/kafka/metrics/stats/sensor.pyi b/stubs/kafka/metrics/stats/sensor.pyi new file mode 100644 index 0000000..e4f9fcb --- /dev/null +++ b/stubs/kafka/metrics/stats/sensor.pyi @@ -0,0 +1,14 @@ +from kafka.errors import QuotaViolationError as QuotaViolationError +from kafka.metrics import KafkaMetric as KafkaMetric +from typing import Any, Optional + +class Sensor: + def __init__(self, registry: Any, name: Any, parents: Any, config: Any, inactive_sensor_expiration_time_seconds: Any) -> None: ... + @property + def name(self): ... + @property + def metrics(self): ... + def record(self, value: float = ..., time_ms: Optional[Any] = ...) -> None: ... + def add_compound(self, compound_stat: Any, config: Optional[Any] = ...) -> None: ... + def add(self, metric_name: Any, stat: Any, config: Optional[Any] = ...) -> None: ... + def has_expired(self): ... diff --git a/stubs/kafka/metrics/stats/total.pyi b/stubs/kafka/metrics/stats/total.pyi new file mode 100644 index 0000000..ef0714e --- /dev/null +++ b/stubs/kafka/metrics/stats/total.pyi @@ -0,0 +1,7 @@ +from kafka.metrics.measurable_stat import AbstractMeasurableStat as AbstractMeasurableStat +from typing import Any + +class Total(AbstractMeasurableStat): + def __init__(self, value: float = ...) -> None: ... + def record(self, config: Any, value: Any, now: Any) -> None: ... + def measure(self, config: Any, now: Any): ... diff --git a/stubs/kafka/oauth/__init__.pyi b/stubs/kafka/oauth/__init__.pyi new file mode 100644 index 0000000..1280708 --- /dev/null +++ b/stubs/kafka/oauth/__init__.pyi @@ -0,0 +1 @@ +from kafka.oauth.abstract import AbstractTokenProvider as AbstractTokenProvider diff --git a/stubs/kafka/oauth/abstract.pyi b/stubs/kafka/oauth/abstract.pyi new file mode 100644 index 0000000..56fa8a0 --- /dev/null +++ b/stubs/kafka/oauth/abstract.pyi @@ -0,0 +1,10 @@ +import abc +from typing import Any + +ABC: Any + +class AbstractTokenProvider(ABC, metaclass=abc.ABCMeta): + def __init__(self, **config: Any) -> None: ... + @abc.abstractmethod + def token(self) -> Any: ... + def extensions(self): ... diff --git a/stubs/kafka/partitioner/__init__.pyi b/stubs/kafka/partitioner/__init__.pyi new file mode 100644 index 0000000..5282ae7 --- /dev/null +++ b/stubs/kafka/partitioner/__init__.pyi @@ -0,0 +1 @@ +from kafka.partitioner.default import DefaultPartitioner as DefaultPartitioner, murmur2 as murmur2 diff --git a/stubs/kafka/partitioner/default.pyi b/stubs/kafka/partitioner/default.pyi new file mode 100644 index 0000000..7b1a632 --- /dev/null +++ b/stubs/kafka/partitioner/default.pyi @@ -0,0 +1,7 @@ +from typing import Any + +class DefaultPartitioner: + @classmethod + def __call__(cls, key: Any, all_partitions: Any, available: Any): ... + +def murmur2(data: Any): ... diff --git a/stubs/kafka/producer/__init__.pyi b/stubs/kafka/producer/__init__.pyi new file mode 100644 index 0000000..be14dee --- /dev/null +++ b/stubs/kafka/producer/__init__.pyi @@ -0,0 +1 @@ +from kafka.producer.kafka import KafkaProducer as KafkaProducer diff --git a/stubs/kafka/producer/buffer.pyi b/stubs/kafka/producer/buffer.pyi new file mode 100644 index 0000000..7133f2b --- /dev/null +++ b/stubs/kafka/producer/buffer.pyi @@ -0,0 +1,9 @@ +from kafka.metrics.stats import Rate as Rate +from typing import Any, Optional + +class SimpleBufferPool: + wait_time: Any = ... + def __init__(self, memory: Any, poolable_size: Any, metrics: Optional[Any] = ..., metric_group_prefix: str = ...) -> None: ... + def allocate(self, size: Any, max_time_to_block_ms: Any): ... + def deallocate(self, buf: Any) -> None: ... + def queued(self): ... diff --git a/stubs/kafka/producer/future.pyi b/stubs/kafka/producer/future.pyi new file mode 100644 index 0000000..4e2ef7b --- /dev/null +++ b/stubs/kafka/producer/future.pyi @@ -0,0 +1,17 @@ +from collections import namedtuple +from kafka.future import Future as Future +from typing import Any, Optional + +class FutureProduceResult(Future): + topic_partition: Any = ... + def __init__(self, topic_partition: Any) -> None: ... + def success(self, value: Any): ... + def failure(self, error: Any): ... + def wait(self, timeout: Optional[Any] = ...): ... + +class FutureRecordMetadata(Future): + args: Any = ... + def __init__(self, produce_future: Any, relative_offset: Any, timestamp_ms: Any, checksum: Any, serialized_key_size: Any, serialized_value_size: Any, serialized_header_size: Any) -> None: ... + def get(self, timeout: Optional[Any] = ...): ... + +RecordMetadata = namedtuple('RecordMetadata', ['topic', 'partition', 'topic_partition', 'offset', 'timestamp', 'checksum', 'serialized_key_size', 'serialized_value_size', 'serialized_header_size']) diff --git a/stubs/kafka/producer/kafka.pyi b/stubs/kafka/producer/kafka.pyi new file mode 100644 index 0000000..2a9f0ad --- /dev/null +++ b/stubs/kafka/producer/kafka.pyi @@ -0,0 +1,27 @@ +from kafka.client_async import KafkaClient as KafkaClient +from kafka.codec import has_gzip as has_gzip, has_lz4 as has_lz4, has_snappy as has_snappy +from kafka.metrics import MetricConfig as MetricConfig, Metrics as Metrics +from kafka.partitioner.default import DefaultPartitioner as DefaultPartitioner +from kafka.producer.future import FutureProduceResult as FutureProduceResult, FutureRecordMetadata as FutureRecordMetadata +from kafka.producer.record_accumulator import AtomicInteger as AtomicInteger, RecordAccumulator as RecordAccumulator +from kafka.producer.sender import Sender as Sender +from kafka.record.default_records import DefaultRecordBatchBuilder as DefaultRecordBatchBuilder +from kafka.record.legacy_records import LegacyRecordBatchBuilder as LegacyRecordBatchBuilder +from kafka.serializer import Serializer as Serializer +from kafka.structs import TopicPartition as TopicPartition +from typing import Any, Optional + +log: Any +PRODUCER_CLIENT_ID_SEQUENCE: Any + +class KafkaProducer: + DEFAULT_CONFIG: Any = ... + config: Any = ... + def __init__(self, **configs: Any) -> None: ... + def bootstrap_connected(self): ... + def __del__(self) -> None: ... + def close(self, timeout: Optional[Any] = ...) -> None: ... + def partitions_for(self, topic: Any): ... + def send(self, topic: Any, value: Optional[Any] = ..., key: Optional[Any] = ..., headers: Optional[Any] = ..., partition: Optional[Any] = ..., timestamp_ms: Optional[Any] = ...): ... + def flush(self, timeout: Optional[Any] = ...) -> None: ... + def metrics(self, raw: bool = ...): ... diff --git a/stubs/kafka/producer/record_accumulator.pyi b/stubs/kafka/producer/record_accumulator.pyi new file mode 100644 index 0000000..5678fa7 --- /dev/null +++ b/stubs/kafka/producer/record_accumulator.pyi @@ -0,0 +1,56 @@ +from kafka.producer.buffer import SimpleBufferPool as SimpleBufferPool +from kafka.producer.future import FutureProduceResult as FutureProduceResult, FutureRecordMetadata as FutureRecordMetadata +from kafka.record.memory_records import MemoryRecordsBuilder as MemoryRecordsBuilder +from kafka.structs import TopicPartition as TopicPartition +from typing import Any, Optional + +log: Any + +class AtomicInteger: + def __init__(self, val: int = ...) -> None: ... + def increment(self): ... + def decrement(self): ... + def get(self): ... + +class ProducerBatch: + max_record_size: int = ... + created: Any = ... + drained: Any = ... + attempts: int = ... + last_attempt: Any = ... + last_append: Any = ... + records: Any = ... + topic_partition: Any = ... + produce_future: Any = ... + def __init__(self, tp: Any, records: Any, buffer: Any) -> None: ... + @property + def record_count(self): ... + def try_append(self, timestamp_ms: Any, key: Any, value: Any, headers: Any): ... + def done(self, base_offset: Optional[Any] = ..., timestamp_ms: Optional[Any] = ..., exception: Optional[Any] = ...) -> None: ... + def maybe_expire(self, request_timeout_ms: Any, retry_backoff_ms: Any, linger_ms: Any, is_full: Any): ... + def in_retry(self): ... + def set_retry(self) -> None: ... + def buffer(self): ... + +class RecordAccumulator: + DEFAULT_CONFIG: Any = ... + config: Any = ... + muted: Any = ... + def __init__(self, **configs: Any) -> None: ... + def append(self, tp: Any, timestamp_ms: Any, key: Any, value: Any, headers: Any, max_time_to_block_ms: Any, estimated_size: int = ...): ... + def abort_expired_batches(self, request_timeout_ms: Any, cluster: Any): ... + def reenqueue(self, batch: Any) -> None: ... + def ready(self, cluster: Any): ... + def has_unsent(self): ... + def drain(self, cluster: Any, nodes: Any, max_size: Any): ... + def deallocate(self, batch: Any) -> None: ... + def begin_flush(self) -> None: ... + def await_flush_completion(self, timeout: Optional[Any] = ...) -> None: ... + def abort_incomplete_batches(self) -> None: ... + def close(self) -> None: ... + +class IncompleteProducerBatches: + def __init__(self) -> None: ... + def add(self, batch: Any): ... + def remove(self, batch: Any): ... + def all(self): ... diff --git a/stubs/kafka/producer/sender.pyi b/stubs/kafka/producer/sender.pyi new file mode 100644 index 0000000..c458d47 --- /dev/null +++ b/stubs/kafka/producer/sender.pyi @@ -0,0 +1,40 @@ +import threading +from kafka.metrics.measurable import AnonMeasurable as AnonMeasurable +from kafka.metrics.stats import Avg as Avg, Max as Max, Rate as Rate +from kafka.protocol.produce import ProduceRequest as ProduceRequest +from kafka.structs import TopicPartition as TopicPartition +from typing import Any, Optional + +log: Any + +class Sender(threading.Thread): + DEFAULT_CONFIG: Any = ... + config: Any = ... + name: Any = ... + def __init__(self, client: Any, metadata: Any, accumulator: Any, metrics: Any, **configs: Any) -> None: ... + def run(self) -> None: ... + def run_once(self) -> None: ... + def initiate_close(self) -> None: ... + def force_close(self) -> None: ... + def add_topic(self, topic: Any) -> None: ... + def wakeup(self) -> None: ... + def bootstrap_connected(self): ... + +class SenderMetrics: + metrics: Any = ... + batch_size_sensor: Any = ... + compression_rate_sensor: Any = ... + queue_time_sensor: Any = ... + produce_throttle_time_sensor: Any = ... + records_per_request_sensor: Any = ... + byte_rate_sensor: Any = ... + retry_sensor: Any = ... + error_sensor: Any = ... + max_record_size_sensor: Any = ... + def __init__(self, metrics: Any, client: Any, metadata: Any): ... + def add_metric(self, metric_name: Any, measurable: Any, group_name: str = ..., description: Optional[Any] = ..., tags: Optional[Any] = ..., sensor_name: Optional[Any] = ...) -> None: ... + def maybe_register_topic_metrics(self, topic: Any): ... + def update_produce_request_metrics(self, batches_map: Any) -> None: ... + def record_retries(self, topic: Any, count: Any) -> None: ... + def record_errors(self, topic: Any, count: Any) -> None: ... + def record_throttle_time(self, throttle_time_ms: Any, node: Optional[Any] = ...) -> None: ... diff --git a/stubs/kafka/protocol/__init__.pyi b/stubs/kafka/protocol/__init__.pyi new file mode 100644 index 0000000..e07f07f --- /dev/null +++ b/stubs/kafka/protocol/__init__.pyi @@ -0,0 +1,3 @@ +from typing import Any + +API_KEYS: Any diff --git a/stubs/kafka/protocol/abstract.pyi b/stubs/kafka/protocol/abstract.pyi new file mode 100644 index 0000000..512edf3 --- /dev/null +++ b/stubs/kafka/protocol/abstract.pyi @@ -0,0 +1,11 @@ +import abc +from typing import Any + +class AbstractType(metaclass=abc.ABCMeta): + __metaclass__: Any = ... + @abc.abstractmethod + def encode(cls, value: Any) -> Any: ... + @abc.abstractmethod + def decode(cls, data: Any) -> Any: ... + @classmethod + def repr(cls, value: Any): ... diff --git a/stubs/kafka/protocol/admin.pyi b/stubs/kafka/protocol/admin.pyi new file mode 100644 index 0000000..d5bf020 --- /dev/null +++ b/stubs/kafka/protocol/admin.pyi @@ -0,0 +1,427 @@ +from kafka.protocol.api import Request as Request, Response as Response +from kafka.protocol.types import Array as Array, Boolean as Boolean, Bytes as Bytes, Int16 as Int16, Int32 as Int32, Int64 as Int64, Int8 as Int8, Schema as Schema, String as String +from typing import Any + +class ApiVersionResponse_v0(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class ApiVersionResponse_v1(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class ApiVersionResponse_v2(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class ApiVersionRequest_v0(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class ApiVersionRequest_v1(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class ApiVersionRequest_v2(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +ApiVersionRequest: Any +ApiVersionResponse: Any + +class CreateTopicsResponse_v0(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class CreateTopicsResponse_v1(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class CreateTopicsResponse_v2(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class CreateTopicsResponse_v3(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class CreateTopicsRequest_v0(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class CreateTopicsRequest_v1(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class CreateTopicsRequest_v2(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class CreateTopicsRequest_v3(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +CreateTopicsRequest: Any +CreateTopicsResponse: Any + +class DeleteTopicsResponse_v0(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class DeleteTopicsResponse_v1(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class DeleteTopicsResponse_v2(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class DeleteTopicsResponse_v3(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class DeleteTopicsRequest_v0(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class DeleteTopicsRequest_v1(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class DeleteTopicsRequest_v2(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class DeleteTopicsRequest_v3(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +DeleteTopicsRequest: Any +DeleteTopicsResponse: Any + +class ListGroupsResponse_v0(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class ListGroupsResponse_v1(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class ListGroupsResponse_v2(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class ListGroupsRequest_v0(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class ListGroupsRequest_v1(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class ListGroupsRequest_v2(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +ListGroupsRequest: Any +ListGroupsResponse: Any + +class DescribeGroupsResponse_v0(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class DescribeGroupsResponse_v1(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class DescribeGroupsResponse_v2(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class DescribeGroupsResponse_v3(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class DescribeGroupsRequest_v0(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class DescribeGroupsRequest_v1(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class DescribeGroupsRequest_v2(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class DescribeGroupsRequest_v3(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +DescribeGroupsRequest: Any +DescribeGroupsResponse: Any + +class SaslHandShakeResponse_v0(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class SaslHandShakeResponse_v1(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class SaslHandShakeRequest_v0(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class SaslHandShakeRequest_v1(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +SaslHandShakeRequest: Any +SaslHandShakeResponse: Any + +class DescribeAclsResponse_v0(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class DescribeAclsResponse_v1(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class DescribeAclsRequest_v0(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class DescribeAclsRequest_v1(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +DescribeAclsRequest: Any +DescribeAclsResponse: Any + +class CreateAclsResponse_v0(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class CreateAclsResponse_v1(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class CreateAclsRequest_v0(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class CreateAclsRequest_v1(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +CreateAclsRequest: Any +CreateAclsResponse: Any + +class DeleteAclsResponse_v0(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class DeleteAclsResponse_v1(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class DeleteAclsRequest_v0(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class DeleteAclsRequest_v1(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +DeleteAclsRequest: Any +DeleteAclsResponse: Any + +class AlterConfigsResponse_v0(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class AlterConfigsResponse_v1(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class AlterConfigsRequest_v0(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class AlterConfigsRequest_v1(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +AlterConfigsRequest: Any +AlterConfigsResponse: Any + +class DescribeConfigsResponse_v0(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class DescribeConfigsResponse_v1(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class DescribeConfigsResponse_v2(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class DescribeConfigsRequest_v0(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class DescribeConfigsRequest_v1(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class DescribeConfigsRequest_v2(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +DescribeConfigsRequest: Any +DescribeConfigsResponse: Any + +class SaslAuthenticateResponse_v0(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class SaslAuthenticateResponse_v1(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class SaslAuthenticateRequest_v0(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class SaslAuthenticateRequest_v1(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +SaslAuthenticateRequest: Any +SaslAuthenticateResponse: Any + +class CreatePartitionsResponse_v0(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class CreatePartitionsResponse_v1(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class CreatePartitionsRequest_v0(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class CreatePartitionsRequest_v1(Request): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + RESPONSE_TYPE: Any = ... + +CreatePartitionsRequest: Any +CreatePartitionsResponse: Any diff --git a/stubs/kafka/protocol/api.pyi b/stubs/kafka/protocol/api.pyi new file mode 100644 index 0000000..abd5c1a --- /dev/null +++ b/stubs/kafka/protocol/api.pyi @@ -0,0 +1,38 @@ +import abc +from kafka.protocol.struct import Struct as Struct +from kafka.protocol.types import Array as Array, Int16 as Int16, Int32 as Int32, Schema as Schema, String as String +from typing import Any + +class RequestHeader(Struct): + SCHEMA: Any = ... + def __init__(self, request: Any, correlation_id: int = ..., client_id: str = ...) -> None: ... + +class Request(Struct, metaclass=abc.ABCMeta): + __metaclass__: Any = ... + @property + @abc.abstractmethod + def API_KEY(self) -> Any: ... + @property + @abc.abstractmethod + def API_VERSION(self) -> Any: ... + @property + @abc.abstractmethod + def SCHEMA(self) -> Any: ... + @property + @abc.abstractmethod + def RESPONSE_TYPE(self) -> Any: ... + def expect_response(self): ... + def to_object(self): ... + +class Response(Struct, metaclass=abc.ABCMeta): + __metaclass__: Any = ... + @property + @abc.abstractmethod + def API_KEY(self) -> Any: ... + @property + @abc.abstractmethod + def API_VERSION(self) -> Any: ... + @property + @abc.abstractmethod + def SCHEMA(self) -> Any: ... + def to_object(self): ... diff --git a/stubs/kafka/protocol/commit.pyi b/stubs/kafka/protocol/commit.pyi new file mode 100644 index 0000000..5dee04f --- /dev/null +++ b/stubs/kafka/protocol/commit.pyi @@ -0,0 +1,124 @@ +from kafka.protocol.api import Request as Request, Response as Response +from kafka.protocol.types import Array as Array, Int16 as Int16, Int32 as Int32, Int64 as Int64, Int8 as Int8, Schema as Schema, String as String +from typing import Any + +class OffsetCommitResponse_v0(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class OffsetCommitResponse_v1(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class OffsetCommitResponse_v2(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class OffsetCommitResponse_v3(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class OffsetCommitRequest_v0(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class OffsetCommitRequest_v1(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class OffsetCommitRequest_v2(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + DEFAULT_GENERATION_ID: int = ... + DEFAULT_RETENTION_TIME: int = ... + +class OffsetCommitRequest_v3(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +OffsetCommitRequest: Any +OffsetCommitResponse: Any + +class OffsetFetchResponse_v0(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class OffsetFetchResponse_v1(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class OffsetFetchResponse_v2(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class OffsetFetchResponse_v3(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class OffsetFetchRequest_v0(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class OffsetFetchRequest_v1(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class OffsetFetchRequest_v2(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class OffsetFetchRequest_v3(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +OffsetFetchRequest: Any +OffsetFetchResponse: Any + +class GroupCoordinatorResponse_v0(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class GroupCoordinatorResponse_v1(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class GroupCoordinatorRequest_v0(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class GroupCoordinatorRequest_v1(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +GroupCoordinatorRequest: Any +GroupCoordinatorResponse: Any diff --git a/stubs/kafka/protocol/fetch.pyi b/stubs/kafka/protocol/fetch.pyi new file mode 100644 index 0000000..0f24661 --- /dev/null +++ b/stubs/kafka/protocol/fetch.pyi @@ -0,0 +1,83 @@ +from kafka.protocol.api import Request as Request, Response as Response +from kafka.protocol.types import Array as Array, Bytes as Bytes, Int16 as Int16, Int32 as Int32, Int64 as Int64, Int8 as Int8, Schema as Schema, String as String +from typing import Any + +class FetchResponse_v0(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class FetchResponse_v1(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class FetchResponse_v2(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class FetchResponse_v3(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class FetchResponse_v4(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class FetchResponse_v5(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class FetchResponse_v6(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class FetchRequest_v0(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class FetchRequest_v1(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class FetchRequest_v2(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class FetchRequest_v3(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class FetchRequest_v4(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class FetchRequest_v5(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class FetchRequest_v6(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +FetchRequest: Any +FetchResponse: Any diff --git a/stubs/kafka/protocol/frame.pyi b/stubs/kafka/protocol/frame.pyi new file mode 100644 index 0000000..cbd99a4 --- /dev/null +++ b/stubs/kafka/protocol/frame.pyi @@ -0,0 +1,8 @@ +from typing import Any, Optional + +class KafkaBytes(bytearray): + def __init__(self, size: Any) -> None: ... + def read(self, nbytes: Optional[Any] = ...): ... + def write(self, data: Any) -> None: ... + def seek(self, idx: Any) -> None: ... + def tell(self): ... diff --git a/stubs/kafka/protocol/group.pyi b/stubs/kafka/protocol/group.pyi new file mode 100644 index 0000000..1becdb6 --- /dev/null +++ b/stubs/kafka/protocol/group.pyi @@ -0,0 +1,124 @@ +from kafka.protocol.api import Request as Request, Response as Response +from kafka.protocol.struct import Struct as Struct +from kafka.protocol.types import Array as Array, Bytes as Bytes, Int16 as Int16, Int32 as Int32, Schema as Schema, String as String +from typing import Any + +class JoinGroupResponse_v0(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class JoinGroupResponse_v1(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class JoinGroupResponse_v2(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class JoinGroupRequest_v0(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + UNKNOWN_MEMBER_ID: str = ... + +class JoinGroupRequest_v1(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + UNKNOWN_MEMBER_ID: str = ... + +class JoinGroupRequest_v2(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + UNKNOWN_MEMBER_ID: str = ... + +JoinGroupRequest: Any +JoinGroupResponse: Any + +class ProtocolMetadata(Struct): + SCHEMA: Any = ... + +class SyncGroupResponse_v0(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class SyncGroupResponse_v1(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class SyncGroupRequest_v0(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class SyncGroupRequest_v1(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +SyncGroupRequest: Any +SyncGroupResponse: Any + +class MemberAssignment(Struct): + SCHEMA: Any = ... + +class HeartbeatResponse_v0(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class HeartbeatResponse_v1(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class HeartbeatRequest_v0(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class HeartbeatRequest_v1(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +HeartbeatRequest: Any +HeartbeatResponse: Any + +class LeaveGroupResponse_v0(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class LeaveGroupResponse_v1(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class LeaveGroupRequest_v0(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class LeaveGroupRequest_v1(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +LeaveGroupRequest: Any +LeaveGroupResponse: Any diff --git a/stubs/kafka/protocol/message.pyi b/stubs/kafka/protocol/message.pyi new file mode 100644 index 0000000..848f7c3 --- /dev/null +++ b/stubs/kafka/protocol/message.pyi @@ -0,0 +1,44 @@ +from kafka.codec import gzip_decode as gzip_decode, has_gzip as has_gzip, has_lz4 as has_lz4, has_snappy as has_snappy, lz4_decode as lz4_decode, lz4_decode_old_kafka as lz4_decode_old_kafka, snappy_decode as snappy_decode +from kafka.protocol.frame import KafkaBytes as KafkaBytes +from kafka.protocol.struct import Struct as Struct +from kafka.protocol.types import AbstractType as AbstractType, Bytes as Bytes, Int32 as Int32, Int64 as Int64, Int8 as Int8, Schema as Schema +from kafka.util import WeakMethod as WeakMethod, crc32 as crc32 +from typing import Any, Optional + +class Message(Struct): + SCHEMAS: Any = ... + SCHEMA: Any = ... + CODEC_MASK: int = ... + CODEC_GZIP: int = ... + CODEC_SNAPPY: int = ... + CODEC_LZ4: int = ... + TIMESTAMP_TYPE_MASK: int = ... + HEADER_SIZE: int = ... + timestamp: Any = ... + crc: Any = ... + magic: Any = ... + attributes: Any = ... + key: Any = ... + value: Any = ... + encode: Any = ... + def __init__(self, value: Any, key: Optional[Any] = ..., magic: int = ..., attributes: int = ..., crc: int = ..., timestamp: Optional[Any] = ...) -> None: ... + @property + def timestamp_type(self): ... + @classmethod + def decode(cls, data: Any): ... + def validate_crc(self): ... + def is_compressed(self): ... + def decompress(self): ... + def __hash__(self) -> Any: ... + +class PartialMessage(bytes): ... + +class MessageSet(AbstractType): + ITEM: Any = ... + HEADER_SIZE: int = ... + @classmethod + def encode(cls, items: Any, prepend_size: bool = ...): ... + @classmethod + def decode(cls, data: Any, bytes_to_read: Optional[Any] = ...): ... + @classmethod + def repr(cls, messages: Any): ... diff --git a/stubs/kafka/protocol/metadata.pyi b/stubs/kafka/protocol/metadata.pyi new file mode 100644 index 0000000..4ee028b --- /dev/null +++ b/stubs/kafka/protocol/metadata.pyi @@ -0,0 +1,83 @@ +from kafka.protocol.api import Request as Request, Response as Response +from kafka.protocol.types import Array as Array, Boolean as Boolean, Int16 as Int16, Int32 as Int32, Schema as Schema, String as String +from typing import Any + +class MetadataResponse_v0(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class MetadataResponse_v1(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class MetadataResponse_v2(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class MetadataResponse_v3(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class MetadataResponse_v4(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class MetadataResponse_v5(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class MetadataRequest_v0(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + ALL_TOPICS: Any = ... + +class MetadataRequest_v1(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + ALL_TOPICS: int = ... + NO_TOPICS: Any = ... + +class MetadataRequest_v2(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + ALL_TOPICS: int = ... + NO_TOPICS: Any = ... + +class MetadataRequest_v3(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + ALL_TOPICS: int = ... + NO_TOPICS: Any = ... + +class MetadataRequest_v4(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + ALL_TOPICS: int = ... + NO_TOPICS: Any = ... + +class MetadataRequest_v5(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + ALL_TOPICS: int = ... + NO_TOPICS: Any = ... + +MetadataRequest: Any +MetadataResponse: Any diff --git a/stubs/kafka/protocol/offset.pyi b/stubs/kafka/protocol/offset.pyi new file mode 100644 index 0000000..290c403 --- /dev/null +++ b/stubs/kafka/protocol/offset.pyi @@ -0,0 +1,49 @@ +from kafka.protocol.api import Request as Request, Response as Response +from kafka.protocol.types import Array as Array, Int16 as Int16, Int32 as Int32, Int64 as Int64, Int8 as Int8, Schema as Schema, String as String +from typing import Any + +UNKNOWN_OFFSET: int + +class OffsetResetStrategy: + LATEST: int = ... + EARLIEST: int = ... + NONE: int = ... + +class OffsetResponse_v0(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class OffsetResponse_v1(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class OffsetResponse_v2(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class OffsetRequest_v0(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + DEFAULTS: Any = ... + +class OffsetRequest_v1(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + DEFAULTS: Any = ... + +class OffsetRequest_v2(Request): + API_KEY: int = ... + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + DEFAULTS: Any = ... + +OffsetRequest: Any +OffsetResponse: Any diff --git a/stubs/kafka/protocol/parser.pyi b/stubs/kafka/protocol/parser.pyi new file mode 100644 index 0000000..b985b35 --- /dev/null +++ b/stubs/kafka/protocol/parser.pyi @@ -0,0 +1,15 @@ +from kafka.protocol.api import RequestHeader as RequestHeader +from kafka.protocol.commit import GroupCoordinatorResponse as GroupCoordinatorResponse +from kafka.protocol.frame import KafkaBytes as KafkaBytes +from kafka.protocol.types import Int32 as Int32 +from typing import Any, Optional + +log: Any + +class KafkaProtocol: + in_flight_requests: Any = ... + bytes_to_send: Any = ... + def __init__(self, client_id: Optional[Any] = ..., api_version: Optional[Any] = ...) -> None: ... + def send_request(self, request: Any, correlation_id: Optional[Any] = ...): ... + def send_bytes(self): ... + def receive_bytes(self, data: Any): ... diff --git a/stubs/kafka/protocol/pickle.pyi b/stubs/kafka/protocol/pickle.pyi new file mode 100644 index 0000000..e69de29 diff --git a/stubs/kafka/protocol/produce.pyi b/stubs/kafka/protocol/produce.pyi new file mode 100644 index 0000000..920586b --- /dev/null +++ b/stubs/kafka/protocol/produce.pyi @@ -0,0 +1,70 @@ +import abc +from kafka.protocol.api import Request as Request, Response as Response +from kafka.protocol.types import Array as Array, Bytes as Bytes, Int16 as Int16, Int32 as Int32, Int64 as Int64, Schema as Schema, String as String +from typing import Any + +class ProduceResponse_v0(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class ProduceResponse_v1(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class ProduceResponse_v2(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class ProduceResponse_v3(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class ProduceResponse_v4(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class ProduceResponse_v5(Response): + API_KEY: int = ... + API_VERSION: int = ... + SCHEMA: Any = ... + +class ProduceRequest(Request, metaclass=abc.ABCMeta): + API_KEY: int = ... + def expect_response(self): ... + +class ProduceRequest_v0(ProduceRequest): + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class ProduceRequest_v1(ProduceRequest): + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class ProduceRequest_v2(ProduceRequest): + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class ProduceRequest_v3(ProduceRequest): + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class ProduceRequest_v4(ProduceRequest): + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +class ProduceRequest_v5(ProduceRequest): + API_VERSION: int = ... + RESPONSE_TYPE: Any = ... + SCHEMA: Any = ... + +ProduceResponse: Any diff --git a/stubs/kafka/protocol/struct.pyi b/stubs/kafka/protocol/struct.pyi new file mode 100644 index 0000000..c892760 --- /dev/null +++ b/stubs/kafka/protocol/struct.pyi @@ -0,0 +1,13 @@ +from kafka.protocol.abstract import AbstractType as AbstractType +from typing import Any + +class Struct(AbstractType): + SCHEMA: Any = ... + def __init__(self, *args: Any, **kwargs: Any) -> None: ... + @classmethod + def encode(cls, item: Any): ... + @classmethod + def decode(cls, data: Any): ... + def get_item(self, name: Any): ... + def __hash__(self) -> Any: ... + def __eq__(self, other: Any) -> Any: ... diff --git a/stubs/kafka/protocol/types.pyi b/stubs/kafka/protocol/types.pyi new file mode 100644 index 0000000..aa9e655 --- /dev/null +++ b/stubs/kafka/protocol/types.pyi @@ -0,0 +1,62 @@ +from kafka.protocol.abstract import AbstractType as AbstractType +from typing import Any + +class Int8(AbstractType): + @classmethod + def encode(cls, value: Any): ... + @classmethod + def decode(cls, data: Any): ... + +class Int16(AbstractType): + @classmethod + def encode(cls, value: Any): ... + @classmethod + def decode(cls, data: Any): ... + +class Int32(AbstractType): + @classmethod + def encode(cls, value: Any): ... + @classmethod + def decode(cls, data: Any): ... + +class Int64(AbstractType): + @classmethod + def encode(cls, value: Any): ... + @classmethod + def decode(cls, data: Any): ... + +class String(AbstractType): + encoding: Any = ... + def __init__(self, encoding: str = ...) -> None: ... + def encode(self, value: Any): ... + def decode(self, data: Any): ... + +class Bytes(AbstractType): + @classmethod + def encode(cls, value: Any): ... + @classmethod + def decode(cls, data: Any): ... + @classmethod + def repr(cls, value: Any): ... + +class Boolean(AbstractType): + @classmethod + def encode(cls, value: Any): ... + @classmethod + def decode(cls, data: Any): ... + +class Schema(AbstractType): + def __init__(self, *fields: Any) -> None: ... + def encode(self, item: Any): ... + def decode(self, data: Any): ... + def __len__(self): ... + @classmethod + def repr(cls, value: Any): ... + +class Array(AbstractType): + array_of: Any = ... + def __init__(self, *array_of: Any) -> None: ... + def encode(self, items: Any): ... + def decode(self, data: Any): ... + @classmethod + def repr(cls, value: Any): ... diff --git a/stubs/kafka/record/__init__.pyi b/stubs/kafka/record/__init__.pyi new file mode 100644 index 0000000..4748428 --- /dev/null +++ b/stubs/kafka/record/__init__.pyi @@ -0,0 +1 @@ +from kafka.record.memory_records import MemoryRecords as MemoryRecords, MemoryRecordsBuilder as MemoryRecordsBuilder diff --git a/stubs/kafka/record/_crc32c.pyi b/stubs/kafka/record/_crc32c.pyi new file mode 100644 index 0000000..8dda7a8 --- /dev/null +++ b/stubs/kafka/record/_crc32c.pyi @@ -0,0 +1,8 @@ +from typing import Any + +CRC_TABLE: Any +CRC_INIT: int + +def crc_update(crc: Any, data: Any): ... +def crc_finalize(crc: Any): ... +def crc(data: Any): ... diff --git a/stubs/kafka/record/abc.pyi b/stubs/kafka/record/abc.pyi new file mode 100644 index 0000000..59248e1 --- /dev/null +++ b/stubs/kafka/record/abc.pyi @@ -0,0 +1,51 @@ +import abc +from typing import Any, Optional + +class ABCRecord(metaclass=abc.ABCMeta): + __metaclass__: Any = ... + @property + @abc.abstractmethod + def offset(self) -> Any: ... + @property + @abc.abstractmethod + def timestamp(self) -> Any: ... + @property + @abc.abstractmethod + def timestamp_type(self) -> Any: ... + @property + @abc.abstractmethod + def key(self) -> Any: ... + @property + @abc.abstractmethod + def value(self) -> Any: ... + @property + @abc.abstractmethod + def checksum(self) -> Any: ... + @property + @abc.abstractmethod + def headers(self) -> Any: ... + +class ABCRecordBatchBuilder(metaclass=abc.ABCMeta): + __metaclass__: Any = ... + @abc.abstractmethod + def append(self, offset: Any, timestamp: Any, key: Any, value: Any, headers: Optional[Any] = ...) -> Any: ... + @abc.abstractmethod + def size_in_bytes(self, offset: Any, timestamp: Any, key: Any, value: Any, headers: Any) -> Any: ... + @abc.abstractmethod + def build(self) -> Any: ... + +class ABCRecordBatch(metaclass=abc.ABCMeta): + __metaclass__: Any = ... + @abc.abstractmethod + def __iter__(self) -> Any: ... + +class ABCRecords(metaclass=abc.ABCMeta): + __metaclass__: Any = ... + @abc.abstractmethod + def __init__(self, buffer: Any) -> None: ... + @abc.abstractmethod + def size_in_bytes(self) -> Any: ... + @abc.abstractmethod + def next_batch(self) -> Any: ... + @abc.abstractmethod + def has_next(self) -> Any: ... diff --git a/stubs/kafka/record/default_records.pyi b/stubs/kafka/record/default_records.pyi new file mode 100644 index 0000000..e570997 --- /dev/null +++ b/stubs/kafka/record/default_records.pyi @@ -0,0 +1,91 @@ +from kafka.codec import gzip_decode as gzip_decode, gzip_encode as gzip_encode, lz4_decode as lz4_decode, lz4_encode as lz4_encode, snappy_decode as snappy_decode, snappy_encode as snappy_encode +from kafka.errors import CorruptRecordException as CorruptRecordException, UnsupportedCodecError as UnsupportedCodecError +from kafka.record.abc import ABCRecord as ABCRecord, ABCRecordBatch as ABCRecordBatch, ABCRecordBatchBuilder as ABCRecordBatchBuilder +from kafka.record.util import calc_crc32c as calc_crc32c, decode_varint as decode_varint, encode_varint as encode_varint, size_of_varint as size_of_varint +from typing import Any, Optional + +class DefaultRecordBase: + HEADER_STRUCT: Any = ... + ATTRIBUTES_OFFSET: Any = ... + CRC_OFFSET: Any = ... + AFTER_LEN_OFFSET: Any = ... + CODEC_MASK: int = ... + CODEC_NONE: int = ... + CODEC_GZIP: int = ... + CODEC_SNAPPY: int = ... + CODEC_LZ4: int = ... + TIMESTAMP_TYPE_MASK: int = ... + TRANSACTIONAL_MASK: int = ... + CONTROL_MASK: int = ... + LOG_APPEND_TIME: int = ... + CREATE_TIME: int = ... + +class DefaultRecordBatch(DefaultRecordBase, ABCRecordBatch): + def __init__(self, buffer: Any) -> None: ... + @property + def base_offset(self): ... + @property + def magic(self): ... + @property + def crc(self): ... + @property + def attributes(self): ... + @property + def last_offset_delta(self): ... + @property + def compression_type(self): ... + @property + def timestamp_type(self): ... + @property + def is_transactional(self): ... + @property + def is_control_batch(self): ... + @property + def first_timestamp(self): ... + @property + def max_timestamp(self): ... + def __iter__(self) -> Any: ... + def __next__(self): ... + next: Any = ... + def validate_crc(self): ... + +class DefaultRecord(ABCRecord): + def __init__(self, offset: Any, timestamp: Any, timestamp_type: Any, key: Any, value: Any, headers: Any) -> None: ... + @property + def offset(self): ... + @property + def timestamp(self): ... + @property + def timestamp_type(self): ... + @property + def key(self): ... + @property + def value(self): ... + @property + def headers(self): ... + @property + def checksum(self) -> None: ... + +class DefaultRecordBatchBuilder(DefaultRecordBase, ABCRecordBatchBuilder): + MAX_RECORD_OVERHEAD: int = ... + def __init__(self, magic: Any, compression_type: Any, is_transactional: Any, producer_id: Any, producer_epoch: Any, base_sequence: Any, batch_size: Any) -> None: ... + def append(self, offset: Any, timestamp: Any, key: Any, value: Any, headers: Optional[Any] = ...): ... + def write_header(self, use_compression_type: bool = ...) -> None: ... + def build(self): ... + def size(self): ... + def size_in_bytes(self, offset: Any, timestamp: Any, key: Any, value: Any, headers: Any): ... + @classmethod + def size_of(cls, key: Any, value: Any, headers: Any): ... + @classmethod + def estimate_size_in_bytes(cls, key: Any, value: Any, headers: Any): ... + +class DefaultRecordMetadata: + def __init__(self, offset: Any, size: Any, timestamp: Any) -> None: ... + @property + def offset(self): ... + @property + def crc(self) -> None: ... + @property + def size(self): ... + @property + def timestamp(self): ... diff --git a/stubs/kafka/record/legacy_records.pyi b/stubs/kafka/record/legacy_records.pyi new file mode 100644 index 0000000..9533fd6 --- /dev/null +++ b/stubs/kafka/record/legacy_records.pyi @@ -0,0 +1,77 @@ +from kafka.codec import gzip_decode as gzip_decode, gzip_encode as gzip_encode, lz4_decode as lz4_decode, lz4_decode_old_kafka as lz4_decode_old_kafka, lz4_encode as lz4_encode, lz4_encode_old_kafka as lz4_encode_old_kafka, snappy_decode as snappy_decode, snappy_encode as snappy_encode +from kafka.errors import CorruptRecordException as CorruptRecordException, UnsupportedCodecError as UnsupportedCodecError +from kafka.record.abc import ABCRecord as ABCRecord, ABCRecordBatch as ABCRecordBatch, ABCRecordBatchBuilder as ABCRecordBatchBuilder +from kafka.record.util import calc_crc32 as calc_crc32 +from typing import Any, Optional + +class LegacyRecordBase: + HEADER_STRUCT_V0: Any = ... + HEADER_STRUCT_V1: Any = ... + LOG_OVERHEAD: Any = ... + CRC_OFFSET: Any = ... + MAGIC_OFFSET: Any = ... + RECORD_OVERHEAD_V0: Any = ... + RECORD_OVERHEAD_V1: Any = ... + KEY_OFFSET_V0: Any = ... + KEY_OFFSET_V1: Any = ... + KEY_LENGTH: Any = ... + VALUE_LENGTH: Any = ... + CODEC_MASK: int = ... + CODEC_NONE: int = ... + CODEC_GZIP: int = ... + CODEC_SNAPPY: int = ... + CODEC_LZ4: int = ... + TIMESTAMP_TYPE_MASK: int = ... + LOG_APPEND_TIME: int = ... + CREATE_TIME: int = ... + NO_TIMESTAMP: int = ... + +class LegacyRecordBatch(ABCRecordBatch, LegacyRecordBase): + def __init__(self, buffer: Any, magic: Any) -> None: ... + @property + def timestamp_type(self): ... + @property + def compression_type(self): ... + def validate_crc(self): ... + def __iter__(self) -> Any: ... + +class LegacyRecord(ABCRecord): + def __init__(self, offset: Any, timestamp: Any, timestamp_type: Any, key: Any, value: Any, crc: Any) -> None: ... + @property + def offset(self): ... + @property + def timestamp(self): ... + @property + def timestamp_type(self): ... + @property + def key(self): ... + @property + def value(self): ... + @property + def headers(self): ... + @property + def checksum(self): ... + +class LegacyRecordBatchBuilder(ABCRecordBatchBuilder, LegacyRecordBase): + def __init__(self, magic: Any, compression_type: Any, batch_size: Any) -> None: ... + def append(self, offset: Any, timestamp: Any, key: Any, value: Any, headers: Optional[Any] = ...): ... + def build(self): ... + def size(self): ... + def size_in_bytes(self, offset: Any, timestamp: Any, key: Any, value: Any, headers: Optional[Any] = ...): ... + @classmethod + def record_size(cls, magic: Any, key: Any, value: Any): ... + @classmethod + def record_overhead(cls, magic: Any): ... + @classmethod + def estimate_size_in_bytes(cls, magic: Any, compression_type: Any, key: Any, value: Any): ... + +class LegacyRecordMetadata: + def __init__(self, offset: Any, crc: Any, size: Any, timestamp: Any) -> None: ... + @property + def offset(self): ... + @property + def crc(self): ... + @property + def size(self): ... + @property + def timestamp(self): ... diff --git a/stubs/kafka/record/memory_records.pyi b/stubs/kafka/record/memory_records.pyi new file mode 100644 index 0000000..29e8984 --- /dev/null +++ b/stubs/kafka/record/memory_records.pyi @@ -0,0 +1,26 @@ +from kafka.errors import CorruptRecordException as CorruptRecordException +from kafka.record.abc import ABCRecords as ABCRecords +from kafka.record.default_records import DefaultRecordBatch as DefaultRecordBatch, DefaultRecordBatchBuilder as DefaultRecordBatchBuilder +from kafka.record.legacy_records import LegacyRecordBatch as LegacyRecordBatch, LegacyRecordBatchBuilder as LegacyRecordBatchBuilder +from typing import Any + +class MemoryRecords(ABCRecords): + LENGTH_OFFSET: Any = ... + LOG_OVERHEAD: Any = ... + MAGIC_OFFSET: Any = ... + MIN_SLICE: Any = ... + def __init__(self, bytes_data: Any) -> None: ... + def size_in_bytes(self): ... + def valid_bytes(self): ... + def has_next(self): ... + def next_batch(self, _min_slice: Any = ..., _magic_offset: Any = ...): ... + +class MemoryRecordsBuilder: + def __init__(self, magic: Any, compression_type: Any, batch_size: Any) -> None: ... + def append(self, timestamp: Any, key: Any, value: Any, headers: Any = ...): ... + def close(self) -> None: ... + def size_in_bytes(self): ... + def compression_rate(self): ... + def is_full(self): ... + def next_offset(self): ... + def buffer(self): ... diff --git a/stubs/kafka/record/util.pyi b/stubs/kafka/record/util.pyi new file mode 100644 index 0000000..c0cb3f0 --- /dev/null +++ b/stubs/kafka/record/util.pyi @@ -0,0 +1,7 @@ +from typing import Any + +def encode_varint(value: Any, write: Any): ... +def size_of_varint(value: Any): ... +def decode_varint(buffer: Any, pos: int = ...): ... +def calc_crc32c(memview: Any, _crc32c: Any = ...): ... +def calc_crc32(memview: Any): ... diff --git a/stubs/kafka/scram.pyi b/stubs/kafka/scram.pyi new file mode 100644 index 0000000..8986595 --- /dev/null +++ b/stubs/kafka/scram.pyi @@ -0,0 +1,26 @@ +from typing import Any + +def xor_bytes(left: Any, right: Any): ... + +class ScramClient: + MECHANISMS: Any = ... + nonce: Any = ... + auth_message: str = ... + salted_password: Any = ... + user: Any = ... + password: Any = ... + hashfunc: Any = ... + hashname: Any = ... + stored_key: Any = ... + client_key: Any = ... + client_signature: Any = ... + client_proof: Any = ... + server_key: Any = ... + server_signature: Any = ... + def __init__(self, user: Any, password: Any, mechanism: Any) -> None: ... + def first_message(self): ... + def process_server_first_message(self, server_first_message: Any) -> None: ... + def hmac(self, key: Any, msg: Any): ... + def create_salted_password(self, salt: Any, iterations: Any) -> None: ... + def final_message(self): ... + def process_server_final_message(self, server_final_message: Any) -> None: ... diff --git a/stubs/kafka/serializer/__init__.pyi b/stubs/kafka/serializer/__init__.pyi new file mode 100644 index 0000000..b9caa9a --- /dev/null +++ b/stubs/kafka/serializer/__init__.pyi @@ -0,0 +1 @@ +from kafka.serializer.abstract import Deserializer as Deserializer, Serializer as Serializer diff --git a/stubs/kafka/serializer/abstract.pyi b/stubs/kafka/serializer/abstract.pyi new file mode 100644 index 0000000..8d76e61 --- /dev/null +++ b/stubs/kafka/serializer/abstract.pyi @@ -0,0 +1,16 @@ +import abc +from typing import Any + +class Serializer(metaclass=abc.ABCMeta): + __meta__: Any = ... + def __init__(self, **config: Any) -> None: ... + @abc.abstractmethod + def serialize(self, topic: Any, value: Any) -> Any: ... + def close(self) -> None: ... + +class Deserializer(metaclass=abc.ABCMeta): + __meta__: Any = ... + def __init__(self, **config: Any) -> None: ... + @abc.abstractmethod + def deserialize(self, topic: Any, bytes_: Any) -> Any: ... + def close(self) -> None: ... diff --git a/stubs/kafka/structs.pyi b/stubs/kafka/structs.pyi new file mode 100644 index 0000000..33aff41 --- /dev/null +++ b/stubs/kafka/structs.pyi @@ -0,0 +1,37 @@ +from collections import namedtuple +from typing import NamedTuple, Optional, Sequence + + +class TopicPartition(NamedTuple): + topic: str + partition: int + + +class BrokerMetadata(NamedTuple): + nodeId: int + host: str + port: int + rack: str + + +class PartitionMetadata(NamedTuple): + topic: str + partition: int + leader: int + replicas: Sequence[int] + isr: Sequence[int] + error: int + + +class OffsetAndMetadata(NamedTuple): + offset: int + metadata: str + + +class OffsetAndTimestamp(NamedTuple): + offset: int + timestamp: Optional[int] # Only None if used with old broker version + + +# Deprecated part +RetryOptions = namedtuple('RetryOptions', ['limit', 'backoff_ms', 'retry_on_timeouts']) diff --git a/stubs/kafka/util.pyi b/stubs/kafka/util.pyi new file mode 100644 index 0000000..7eca44e --- /dev/null +++ b/stubs/kafka/util.pyi @@ -0,0 +1,3 @@ + +def crc32(data: bytes) -> int: + ... diff --git a/stubs/kafka/version.pyi b/stubs/kafka/version.pyi new file mode 100644 index 0000000..bda5b5a --- /dev/null +++ b/stubs/kafka/version.pyi @@ -0,0 +1 @@ +__version__: str diff --git a/tests/_testutil.py b/tests/_testutil.py index d5e60d6..13941f3 100644 --- a/tests/_testutil.py +++ b/tests/_testutil.py @@ -16,7 +16,7 @@ from aiokafka import ConsumerRebalanceListener from aiokafka.client import AIOKafkaClient -from aiokafka.errors import ConnectionError +from aiokafka.errors import KafkaConnectionError from aiokafka.producer import AIOKafkaProducer from aiokafka.helpers import create_ssl_context @@ -26,19 +26,7 @@ __all__ = ['KafkaIntegrationTestCase', 'random_string'] - -def run_until_complete(fun): - if not asyncio.iscoroutinefunction(fun): - fun = asyncio.coroutine(fun) - - @wraps(fun) - def wrapper(test, *args, **kw): - loop = test.loop - timeout = getattr(test, "TEST_TIMEOUT", 30) - ret = loop.run_until_complete( - asyncio.wait_for(fun(test, *args, **kw), timeout, loop=loop)) - return ret - return wrapper +run_until_complete = pytest.mark.asyncio def kafka_versions(*versions): @@ -190,10 +178,44 @@ def _format_params( return options def cleanup(self): - for acl_params in self._active_acls: + for acl_params in self._active_acls[:]: self.remove_acl(**acl_params) +class KafkaConfig: + + def __init__(self, docker, tag): + self._docker = docker + self._active_acls = [] + self._tag = tag + + @property + def cmd(self): + return "/opt/kafka_{tag}/bin/kafka-configs.sh".format(tag=self._tag) + + def _exec(self, *cmd_options): + cmd = ' '.join( + [self.cmd, "--zookeeper", "localhost:2181"] + list(cmd_options)) + exit_code, output = self._docker.exec_run(cmd) + if exit_code != 0: + for line in output.split(b'\n'): + log.warning(line) + raise RuntimeError("Failed to apply Config") + else: + for line in output.split(b'\n'): + log.debug(line) + return output + + def add_scram_user(self, username, password): + self._exec( + "--alter", + "--add-config", + "SCRAM-SHA-256=[password={0}],SCRAM-SHA-512=[password={0}]".format( + password), + "--entity-type", "users", + "--entity-name", username) + + class KerberosUtils: def __init__(self, docker): @@ -214,14 +236,24 @@ def create_keytab( keytab_dir.mkdir() if sys.platform == 'darwin': - subprocess.run( + res = subprocess.run( ['ktutil', '-k', keytab_file, 'add', '-p', principal, '-V', '1', '-e', 'aes256-cts-hmac-sha1-96', '-w', password], - cwd=str(keytab_dir.absolute()), check=True) + cwd=str(keytab_dir.absolute()), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + if res.returncode != 0: + print( + "Failed to setup keytab for Kerberos.\n" + "stdout: \n{}\nstrerr: \n{}".format( + res.stdout, res.stderr), + file=sys.stderr + ) + res.check_returncode() elif sys.platform != 'win32': input_data = ( "add_entry -password -p {principal} -k 1 " @@ -232,10 +264,21 @@ def create_keytab( principal=principal, password=password, keytab_file=keytab_file) - subprocess.run( + res = subprocess.run( ['ktutil'], cwd=str(keytab_dir.absolute()), - input=input_data.encode(), check=True) + input=input_data.encode(), + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) + if res.returncode != 0: + print( + "Failed to setup keytab for Kerberos.\n" + "stdout: \n{}\nstrerr: \n{}".format( + res.stdout, res.stderr), + file=sys.stderr + ) + res.check_returncode() else: raise NotImplementedError @@ -258,27 +301,6 @@ def kdestroy(self): class KafkaIntegrationTestCase(unittest.TestCase): topic = None - hosts = [] - - @classmethod - def wait_kafka(cls): - cls.hosts = ['{}:{}'.format(cls.kafka_host, cls.kafka_port)] - - # Reconnecting until Kafka in docker becomes available - for i in range(500): - client = AIOKafkaClient(loop=cls.loop, bootstrap_servers=cls.hosts) - try: - cls.loop.run_until_complete(client.bootstrap()) - # Broker can still be loading cluster layout, so we can get 0 - # brokers. That counts as still not available - if client.cluster.brokers(): - return - except ConnectionError: - pass - finally: - cls.loop.run_until_complete(client.close()) - time.sleep(0.1) - assert False, "Kafka server never started" @contextmanager def silence_loop_exception_handler(self): @@ -371,3 +393,27 @@ def create_ssl_context(self): def random_string(length): s = "".join(random.choice(string.ascii_letters) for _ in range(length)) return s.encode('utf-8') + + +def wait_kafka(kafka_host, kafka_port, timeout=60): + hosts = ['{}:{}'.format(kafka_host, kafka_port)] + loop = asyncio.get_event_loop() + + # Reconnecting until Kafka in docker becomes available + start = loop.time() + while True: + client = AIOKafkaClient( + loop=loop, bootstrap_servers=hosts) + try: + loop.run_until_complete(client.bootstrap()) + # Broker can still be loading cluster layout, so we can get 0 + # brokers. That counts as still not available + if client.cluster.brokers(): + return True + except KafkaConnectionError: + pass + finally: + loop.run_until_complete(client.close()) + time.sleep(0.5) + if loop.time() - start > timeout: + return False diff --git a/tests/conftest.py b/tests/conftest.py index 83c3e3f..41c6ce8 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,6 +1,7 @@ import asyncio import gc import docker as libdocker +import logging import pytest import socket import uuid @@ -14,6 +15,8 @@ from aiokafka.record.default_records import ( DefaultRecordBatchBuilder, _DefaultRecordBatchBuilderPy) from aiokafka.util import NO_EXTENSIONS +from ._testutil import wait_kafka + if not NO_EXTENSIONS: assert LegacyRecordBatchBuilder is not _LegacyRecordBatchBuilderPy and \ @@ -35,6 +38,14 @@ def pytest_addoption(parser): help='Do not pull new docker image before test run') +def pytest_configure(config): + """Disable the loggers.""" + # Debug logs clobber output on CI + for name in ["urllib3", "asyncio"]: + logger = logging.getLogger(name) + logger.setLevel(logging.INFO) + + @pytest.fixture(scope='session') def docker(request): image = request.config.getoption('--docker-image') @@ -53,12 +64,14 @@ def acl_manager(kafka_server, request): return manager -@pytest.yield_fixture(autouse=True) -def clean_acl(acl_manager): - # This is used to have a better report on ResourceWarnings. Without it - # all warnings will be filled in the end of last test-case. - yield - acl_manager.cleanup() +@pytest.fixture(scope='class') +def kafka_config(kafka_server, request): + image = request.config.getoption('--docker-image') + tag = image.split(":")[-1].replace('_', '-') + + from ._testutil import KafkaConfig + manager = KafkaConfig(kafka_server[-1], tag) + return manager if sys.platform != 'win32': @@ -151,9 +164,15 @@ def kafka_server(request, docker, docker_ip_address, 'NUM_PARTITIONS': 2 } kafka_version = image.split(":")[-1].split("_")[-1] - if not kafka_version == "0.9.0.1": - environment['SASL_MECHANISMS'] = "PLAIN,GSSAPI" + kafka_version = tuple(int(x) for x in kafka_version.split('.')) + if kafka_version >= (0, 10, 2): + environment['SASL_MECHANISMS'] = ( + "PLAIN,GSSAPI,SCRAM-SHA-256,SCRAM-SHA-512" + ) environment['SASL_JAAS_FILE'] = "kafka_server_jaas.conf" + elif kafka_version >= (0, 10, 1): + environment['SASL_MECHANISMS'] = "PLAIN,GSSAPI" + environment['SASL_JAAS_FILE'] = "kafka_server_jaas_no_scram.conf" else: environment['SASL_MECHANISMS'] = "GSSAPI" environment['SASL_JAAS_FILE'] = "kafka_server_gssapi_jaas.conf" @@ -177,13 +196,27 @@ def kafka_server(request, docker, docker_ip_address, }, environment=environment, tty=True, - detach=True) - - yield ( - kafka_host, kafka_port, kafka_ssl_port, kafka_sasl_plain_port, - kafka_sasl_ssl_port, container - ) - container.remove(force=True) + detach=True, + remove=True) + + try: + if not wait_kafka(kafka_host, kafka_port): + exit_code, output = container.exec_run( + ["supervisorctl", "tail", "kafka"]) + print("Kafka failed to start. \n--- STDOUT:") + print(output.decode(), file=sys.stdout) + exit_code, output = container.exec_run( + ["supervisorctl", "tail", "kafka", "stderr"]) + print("--- STDERR:") + print(output.decode(), file=sys.stderr) + pytest.exit("Could not start Kafka Server") + + yield ( + kafka_host, kafka_port, kafka_ssl_port, kafka_sasl_plain_port, + kafka_sasl_ssl_port, container + ) + finally: + container.remove(force=True) else: @@ -224,7 +257,7 @@ def setup_test_class_serverless(request, loop, ssl_folder): @pytest.fixture(scope='class') def setup_test_class(request, loop, kafka_server, ssl_folder, acl_manager, - kerberos_utils): + kerberos_utils, kafka_config): request.cls.loop = loop request.cls.kafka_host = kafka_server[0] request.cls.kafka_port = kafka_server[1] @@ -234,14 +267,15 @@ def setup_test_class(request, loop, kafka_server, ssl_folder, acl_manager, request.cls.ssl_folder = ssl_folder request.cls.acl_manager = acl_manager request.cls.kerberos_utils = kerberos_utils + request.cls.kafka_config = kafka_config + request.cls.hosts = [ + '{}:{}'.format(request.cls.kafka_host, request.cls.kafka_port) + ] docker_image = request.config.getoption('--docker-image') kafka_version = docker_image.split(":")[-1].split("_")[-1] request.cls.kafka_version = kafka_version - if hasattr(request.cls, 'wait_kafka'): - request.cls.wait_kafka() - def pytest_ignore_collect(path, config): if 'pep492' in str(path): diff --git a/tests/test_client.py b/tests/test_client.py index 6fe50af..2e6dde5 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -5,8 +5,10 @@ import types from unittest import mock -from kafka.common import (KafkaError, ConnectionError, RequestTimedOutError, - NodeNotReadyError, UnrecognizedBrokerVersion) +from kafka.errors import ( + KafkaError, KafkaConnectionError, RequestTimedOutError, + NodeNotReadyError, UnrecognizedBrokerVersion +) from kafka.protocol.metadata import ( MetadataRequest_v0 as MetadataRequest, MetadataResponse_v0 as MetadataResponse) @@ -35,7 +37,7 @@ def test_init_with_list(self): loop=self.loop, bootstrap_servers=[ '127.0.0.1:9092', '127.0.0.2:9092', '127.0.0.3:9092']) self.assertEqual( - '', + '', client.__repr__()) self.assertEqual( sorted([('127.0.0.1', 9092, socket.AF_INET), @@ -209,6 +211,35 @@ async def send(*args, **kwargs): self.assertNotEqual(client.cluster.brokers(), set([])) self.assertEqual(client.cluster.brokers(), brokers_before) + @run_until_complete + async def test_client_receive_zero_brokers_timeout_on_send(self): + brokers = [ + (0, 'broker_1', 4567), + (1, 'broker_2', 5678) + ] + correct_meta = MetadataResponse(brokers, []) + + async def send(*args, **kwargs): + raise asyncio.TimeoutError() + + client = AIOKafkaClient(loop=self.loop, + bootstrap_servers=['broker_1:4567'], + api_version="0.10") + conn = mock.Mock() + client._conns = [mock.Mock()] + client._get_conn = mock.Mock() + client._get_conn.side_effect = asyncio.coroutine(lambda x: conn) + conn.send = mock.Mock() + conn.send.side_effect = send + client.cluster.update_metadata(correct_meta) + brokers_before = client.cluster.brokers() + + await client._metadata_update(client.cluster, []) + + # There broker list should not be purged + self.assertNotEqual(client.cluster.brokers(), set([])) + self.assertEqual(client.cluster.brokers(), brokers_before) + class TestKafkaClientIntegration(KafkaIntegrationTestCase): @@ -216,7 +247,7 @@ class TestKafkaClientIntegration(KafkaIntegrationTestCase): async def test_bootstrap(self): client = AIOKafkaClient(loop=self.loop, bootstrap_servers='0.42.42.42:444') - with self.assertRaises(ConnectionError): + with self.assertRaises(KafkaConnectionError): await client.bootstrap() client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts) @@ -228,7 +259,7 @@ async def test_bootstrap(self): client.set_topics(['t2', 't3']) client.set_topics(['t2', 't3']) # should be ignored - client.add_topic('t2') # shold be ignored + client.add_topic('t2') # should be ignored # bootstrap again -- no error expected await client.bootstrap() await client.close() @@ -238,7 +269,15 @@ async def test_failed_bootstrap(self): client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts) with mock.patch.object(AIOKafkaConnection, 'send') as mock_send: mock_send.side_effect = KafkaError('some kafka error') - with self.assertRaises(ConnectionError): + with self.assertRaises(KafkaConnectionError): + await client.bootstrap() + + @run_until_complete + async def test_failed_bootstrap_timeout(self): + client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts) + with mock.patch.object(AIOKafkaConnection, 'send') as mock_send: + mock_send.side_effect = asyncio.TimeoutError('Timeout error') + with self.assertRaises(KafkaConnectionError): await client.bootstrap() @run_until_complete @@ -257,7 +296,14 @@ async def test_check_version(self): client = AIOKafkaClient(loop=self.loop, bootstrap_servers=self.hosts) await client.bootstrap() ver = await client.check_version() - self.assertEqual(kafka_version[:2], ver[:2]) + + expected_version = kafka_version[:2] + # No significant protocol changed, no way to differencieate + if expected_version == (2, 2): + expected_version = (2, 1) + elif expected_version == (2, 4): + expected_version = (2, 3) + self.assertEqual(expected_version, ver[:2]) await self.wait_topic(client, 'some_test_topic') ver2 = await client.check_version() self.assertEqual(ver, ver2) @@ -271,7 +317,7 @@ async def test_check_version(self): await client.check_version(client.get_random_node()) client._get_conn = asyncio.coroutine(lambda _, **kw: None) - with self.assertRaises(ConnectionError): + with self.assertRaises(KafkaConnectionError): await client.check_version() await client.close() @@ -378,7 +424,7 @@ async def new(*args, **kw): self.assertEqual( len(client._metadata_update.mock_calls), 2) - # Setting [] should force update as it meens all topics + # Setting [] should force update as it means all topics await client.set_topics([]) self.assertEqual( len(client._metadata_update.mock_calls), 3) diff --git a/tests/test_conn.py b/tests/test_conn.py index 88d99d6..452df11 100644 --- a/tests/test_conn.py +++ b/tests/test_conn.py @@ -17,8 +17,8 @@ from aiokafka.conn import AIOKafkaConnection, create_conn, VersionInfo from aiokafka.errors import ( - ConnectionError, CorrelationIdError, KafkaError, NoError, UnknownError, - UnsupportedSaslMechanismError, IllegalSaslStateError + KafkaConnectionError, CorrelationIdError, KafkaError, NoError, + UnknownError, UnsupportedSaslMechanismError, IllegalSaslStateError ) from aiokafka.record.legacy_records import LegacyRecordBatchBuilder from ._testutil import KafkaIntegrationTestCase, run_until_complete @@ -88,14 +88,13 @@ async def test_connections_max_idle_ms(self): request = MetadataRequest([]) await conn.send(request) await asyncio.sleep(0.15, loop=self.loop) - # Check if we're stil connected after 250ms, as we were not idle + # Check if we're still connected after 250ms, as we were not idle self.assertEqual(conn.connected(), True) # It shouldn't break if we have a long running call either readexactly = conn._reader.readexactly with mock.patch.object(conn._reader, 'readexactly') as mocked: - @asyncio.coroutine - def long_read(n): + async def long_read(n): await asyncio.sleep(0.2, loop=self.loop) return (await readexactly(n)) mocked.side_effect = long_read @@ -137,13 +136,13 @@ async def test_send_to_closed(self): host, port = self.kafka_host, self.kafka_port conn = AIOKafkaConnection(host=host, port=port, loop=self.loop) request = MetadataRequest([]) - with self.assertRaises(ConnectionError): + with self.assertRaises(KafkaConnectionError): await conn.send(request) conn._writer = mock.MagicMock() conn._writer.write.side_effect = OSError('mocked writer is closed') - with self.assertRaises(ConnectionError): + with self.assertRaises(KafkaConnectionError): await conn.send(request) @run_until_complete @@ -230,7 +229,7 @@ async def invoke_osserror(*a, **kw): # invoke reader task conn._read_task = conn._create_reader_task() - with self.assertRaises(ConnectionError): + with self.assertRaises(KafkaConnectionError): await conn.send(request) self.assertEqual(conn.connected(), False) @@ -410,14 +409,14 @@ async def test__send_sasl_token(self): # Broken pipe error conn._writer.write.side_effect = OSError - with self.assertRaises(ConnectionError): + with self.assertRaises(KafkaConnectionError): conn._send_sasl_token(b"Super data") self.assertEqual(out_buffer, []) self.assertEqual(len(conn._requests), 0) self.assertEqual(conn.close.call_count, 1) conn._writer = None - with self.assertRaises(ConnectionError): + with self.assertRaises(KafkaConnectionError): conn._send_sasl_token(b"Super data") # We don't need to close 2ce self.assertEqual(conn.close.call_count, 1) diff --git a/tests/test_consumer.py b/tests/test_consumer.py index 9fa744f..bab08ac 100644 --- a/tests/test_consumer.py +++ b/tests/test_consumer.py @@ -36,7 +36,7 @@ async def consumer_factory(self, **kwargs): auto_offset_reset = kwargs.pop('auto_offset_reset', 'earliest') group = kwargs.pop('group', 'group-%s' % self.id()) consumer = AIOKafkaConsumer( - self.topic, loop=self.loop, group_id=group, + self.topic, group_id=group, bootstrap_servers=self.hosts, enable_auto_commit=enable_auto_commit, auto_offset_reset=auto_offset_reset, @@ -116,6 +116,46 @@ async def test_simple_consumer(self): # will ignore, no exception expected await consumer.stop() + @run_until_complete + async def test_consumer_context_manager(self): + await self.send_messages(0, list(range(0, 10))) + + group = 'group-%s' % self.id() + consumer = AIOKafkaConsumer( + self.topic, group_id=group, + bootstrap_servers=self.hosts, + enable_auto_commit=False, + auto_offset_reset="earliest") + async with consumer as con: + assert con is consumer + assert consumer._fetcher is not None + messages = [] + async for m in consumer: + messages.append(m) + if len(messages) == 10: + break + self.assert_message_count(messages, 10) + assert consumer._closed + + # Finilize on exception too + consumer = AIOKafkaConsumer( + self.topic, group_id=group, + bootstrap_servers=self.hosts, + enable_auto_commit=False, + auto_offset_reset="earliest") + with pytest.raises(ValueError): + async with consumer as con: + assert con is consumer + assert consumer._fetcher is not None + messages = [] + async for m in consumer: + messages.append(m) + if len(messages) == 10: + break + self.assert_message_count(messages, 10) + raise ValueError + assert consumer._closed + @run_until_complete async def test_consumer_api_version(self): await self.send_messages(0, list(range(0, 10))) @@ -954,7 +994,7 @@ async def test_consumer_subscribe_pattern_with_autocreate(self): {my_topic, my_topic2}) self.assertEqual(consumer.subscription(), {my_topic, my_topic2}) - # Now lets actualy produce some data and verify that it is consumed + # Now lets actually produce some data and verify that it is consumed await producer.send(my_topic, b'test msg') data = await asyncio.wait_for( consume_task, timeout=2, loop=self.loop) @@ -1000,7 +1040,7 @@ async def test_consumer_subscribe_pattern_autocreate_no_group_id(self): {my_topic, my_topic2}) self.assertEqual(consumer.subscription(), {my_topic, my_topic2}) - # Now lets actualy produce some data and verify that it is consumed + # Now lets actually produce some data and verify that it is consumed await producer.send(my_topic, b'test msg') data = await asyncio.wait_for( consume_task, timeout=2, loop=self.loop) @@ -1076,7 +1116,7 @@ async def test_consumer_rebalance_on_new_topic(self): @run_until_complete async def test_consumer_stops_getone(self): # If we have a fetch in progress it should be cancelled if consumer is - # stoped + # stopped consumer = await self.consumer_factory() task = self.loop.create_task(consumer.getone()) await asyncio.sleep(0.1, loop=self.loop) @@ -1094,7 +1134,7 @@ async def test_consumer_stops_getone(self): @run_until_complete async def test_consumer_stops_getmany(self): # If we have a fetch in progress it should be cancelled if consumer is - # stoped + # stopped consumer = await self.consumer_factory() task = self.loop.create_task(consumer.getmany(timeout_ms=10000)) await asyncio.sleep(0.1, loop=self.loop) @@ -1931,6 +1971,7 @@ async def test_consumer_pause_resume(self): with self.assertRaises(asyncio.TimeoutError): await asyncio.wait_for(get_task, timeout=0.5, loop=self.loop) + @run_until_complete async def test_max_poll_interval_ms(self): await self.send_messages(0, list(range(0, 10))) await self.send_messages(1, list(range(10, 20))) diff --git a/tests/test_coordinator.py b/tests/test_coordinator.py index 5794475..bfbfe04 100644 --- a/tests/test_coordinator.py +++ b/tests/test_coordinator.py @@ -12,7 +12,7 @@ OffsetCommitRequest, OffsetCommitResponse_v2, OffsetFetchRequest_v1 as OffsetFetchRequest ) -import kafka.common as Errors +import kafka.errors as Errors from ._testutil import KafkaIntegrationTestCase, run_until_complete @@ -20,7 +20,7 @@ from aiokafka.client import AIOKafkaClient from aiokafka.structs import OffsetAndMetadata, TopicPartition from aiokafka.consumer.group_coordinator import ( - GroupCoordinator, CoordinatorGroupRebalance) + GroupCoordinator, CoordinatorGroupRebalance, NoGroupCoordinator) from aiokafka.consumer.subscription_state import SubscriptionState from aiokafka.util import create_future, ensure_future @@ -138,7 +138,7 @@ async def test_failed_group_join(self): _on_join_leader_mock.side_effect = asyncio.coroutine( lambda resp: b"123") - async def do_rebalance(): + async def do_rebalance(): rebalance = CoordinatorGroupRebalance( coordinator, coordinator.group_id, coordinator.coordinator_id, subscription.subscription, coordinator._assignors, @@ -183,7 +183,7 @@ async def send(*agrs, **kw): await do_rebalance() self.assertEqual(coordinator.need_rejoin(subsc), True) - # no exception expected, member_id should be reseted + # no exception expected, member_id should be reset coordinator.member_id = 'some_invalid_member_id' error_type = Errors.UnknownMemberIdError resp = await do_rebalance() @@ -196,7 +196,7 @@ async def send(*agrs, **kw): with self.assertRaises(Errors.KafkaError): # Masked as unknown error await do_rebalance() - # no exception expected, coordinator_id should be reseted + # no exception expected, coordinator_id should be reset error_type = Errors.GroupCoordinatorNotAvailableError resp = await do_rebalance() self.assertIsNone(resp) @@ -215,7 +215,7 @@ async def send(*agrs, **kw): self.assertEqual(_on_join_leader_mock.call_count, 2) # Subscription changes before rebalance finishes - def send_change_sub(*args, **kw): + async def send_change_sub(*args, **kw): subscription.subscribe(topics=set(['topic2'])) return (await send(*args, **kw)) mocked.send.side_effect = send_change_sub @@ -390,7 +390,7 @@ async def mock_send_req(request): with self.assertRaises(Errors.OffsetMetadataTooLargeError): await coordinator.commit_offsets(assignment, offsets) - # retriable erros should be retried + # retriable errors should be retried commit_error = [ Errors.GroupLoadInProgressError, Errors.GroupLoadInProgressError, @@ -432,12 +432,26 @@ async def mock_send_req(request): Errors.NoError ] await coordinator.commit_offsets(assignment, offsets) + commit_error = [ + Errors.RequestTimedOutError, + Errors.NoError + ] + await coordinator.commit_offsets(assignment, offsets) # Make sure coordinator_id is reset properly each retry - commit_error = Errors.GroupCoordinatorNotAvailableError - with self.assertRaises(Errors.GroupCoordinatorNotAvailableError): - await coordinator._do_commit_offsets(assignment, offsets) - self.assertEqual(coordinator.coordinator_id, None) + for retriable_error in ( + Errors.GroupCoordinatorNotAvailableError, + Errors.NotCoordinatorForGroupError, + Errors.RequestTimedOutError, + ): + self.assertIsNotNone(coordinator.coordinator_id) + commit_error = retriable_error + with self.assertRaises(retriable_error): + await coordinator._do_commit_offsets(assignment, offsets) + self.assertIsNone(coordinator.coordinator_id) + + # ask coordinator to refresh coordinator_id value + await coordinator.ensure_coordinator_known() # Unknown errors are just propagated too commit_error = Errors.UnknownError @@ -1173,6 +1187,13 @@ def reset_assignment(): coordinator._next_autocommit_deadline, now + timeout, places=1) + # UnknownMemberId should also retry + coordinator._next_autocommit_deadline = 0 + mocked.side_effect = Errors.UnknownMemberIdError() + now = self.loop.time() + timeout = await coordinator._maybe_do_autocommit(assignment) + self.assertEqual(timeout, 0.05) + # Not retriable errors should skip autocommit and log mocked.side_effect = Errors.UnknownError() now = self.loop.time() @@ -1329,3 +1350,38 @@ async def do_rejoin(subsc): self.assertEqual(autocommit_mock.call_count, 4) self.assertEqual(metadata_mock.call_count, 1) self.assertEqual(last_commit_mock.call_count, 1) + + @run_until_complete + async def test_no_group_subscribe_during_metadata_update(self): + # Issue #536. During metadata update we can't assume the subscription + # did not change. We should handle the case by refreshing meta again. + client = AIOKafkaClient( + loop=self.loop, bootstrap_servers=self.hosts) + await client.bootstrap() + await self.wait_topic(client, 'topic1') + await self.wait_topic(client, 'topic2') + await client.set_topics(('other_topic', )) + + subscription = SubscriptionState(loop=self.loop) + coordinator = NoGroupCoordinator( + client, subscription, loop=self.loop) + subscription.subscribe(topics=set(['topic1'])) + client.set_topics(('topic1', )) + await asyncio.sleep(0.0001, loop=self.loop) + + # Change subscription before metadata update is received + subscription.subscribe(topics=set(['topic2'])) + metadata_fut = client.set_topics(('topic2', )) + + try: + await asyncio.wait_for( + metadata_fut, + timeout=0.2 + ) + except asyncio.TimeoutError: + pass + + self.assertFalse(client._sync_task.done()) + + await coordinator.close() + await client.close() diff --git a/tests/test_fetcher.py b/tests/test_fetcher.py index 5785d8a..45b43ae 100644 --- a/tests/test_fetcher.py +++ b/tests/test_fetcher.py @@ -50,7 +50,7 @@ def test_fetch_result_and_error(loop): loop=loop, error=OffsetOutOfRangeError({}), backoff=0) # Python3.7 got rid of trailing comma in exceptions, which makes the line - # diffrent between 3.6 and 3.7. + # different between 3.6 and 3.7. assert repr(error) in [ "", "" @@ -81,7 +81,7 @@ async def test_fetcher__update_fetch_positions(self): subscriptions = SubscriptionState(loop=self.loop) fetcher = Fetcher(client, subscriptions, loop=self.loop) self.add_cleanup(fetcher.close) - # Disable backgroud task + # Disable background task fetcher._fetch_task.cancel() try: await fetcher._fetch_task diff --git a/tests/test_message_accumulator.py b/tests/test_message_accumulator.py index afc8413..9b0288b 100644 --- a/tests/test_message_accumulator.py +++ b/tests/test_message_accumulator.py @@ -4,9 +4,10 @@ from unittest import mock from kafka.cluster import ClusterMetadata -from kafka.common import (TopicPartition, KafkaTimeoutError, +from kafka.errors import (KafkaTimeoutError, NotLeaderForPartitionError, LeaderNotAvailableError) +from kafka.structs import TopicPartition from ._testutil import run_until_complete from aiokafka.util import ensure_future from aiokafka.producer.message_accumulator import ( @@ -90,7 +91,7 @@ def mocked_leader_for_partition(tp): done, _ = await asyncio.wait( [add_task], timeout=0.1, loop=self.loop) - self.assertFalse(bool(done)) # we stil not drained data for tp1 + self.assertFalse(bool(done)) # we still not drained data for tp1 batches, unknown_leaders_exist = ma.drain_by_nodes(ignore_nodes=[]) self.assertEqual(unknown_leaders_exist, True) @@ -132,9 +133,9 @@ def mocked_leader_for_partition(tp): tp2, None, b'msg for tp@2', timeout=2) fut2 = await ma.add_message( tp3, None, b'msg for tp@3', timeout=2) - await ma.add_message(tp1, None, b'0123456789'*70, timeout=2) + await ma.add_message(tp1, None, b'0123456789' * 70, timeout=2) with self.assertRaises(KafkaTimeoutError): - await ma.add_message(tp1, None, b'0123456789'*70, timeout=2) + await ma.add_message(tp1, None, b'0123456789' * 70, timeout=2) batches, _ = ma.drain_by_nodes(ignore_nodes=[]) self.assertEqual(batches[1][tp1].expired(), True) with self.assertRaises(LeaderNotAvailableError): @@ -147,7 +148,7 @@ def mocked_leader_for_partition(tp): fut02 = await ma.add_message( tp0, b'key1', b'value#1', timeout=2) fut10 = await ma.add_message( - tp1, None, b'0123456789'*70, timeout=2) + tp1, None, b'0123456789' * 70, timeout=2) batches, _ = ma.drain_by_nodes(ignore_nodes=[]) self.assertEqual(batches[0][tp0].expired(), False) self.assertEqual(batches[1][tp1].expired(), False) diff --git a/tests/test_pep492.py b/tests/test_pep492.py index 90cac29..54ad0b9 100644 --- a/tests/test_pep492.py +++ b/tests/test_pep492.py @@ -102,7 +102,7 @@ async def iterator(): await consumer.stop() # Should just stop iterator, no errors await task - # But creating anothe iterator should result in an error, we can't + # But creating another iterator should result in an error, we can't # have dead loops like: # # while True: diff --git a/tests/test_producer.py b/tests/test_producer.py index 25709dd..3bf63f0 100644 --- a/tests/test_producer.py +++ b/tests/test_producer.py @@ -32,13 +32,12 @@ class TestKafkaProducerIntegration(KafkaIntegrationTestCase): @run_until_complete async def test_producer_start(self): with self.assertRaises(ValueError): - producer = AIOKafkaProducer(loop=self.loop, acks=122) + producer = AIOKafkaProducer(acks=122) with self.assertRaises(ValueError): - producer = AIOKafkaProducer(loop=self.loop, api_version="3.4.5") + producer = AIOKafkaProducer(api_version="3.4.5") - producer = AIOKafkaProducer( - loop=self.loop, bootstrap_servers=self.hosts) + producer = AIOKafkaProducer(bootstrap_servers=self.hosts) await producer.start() self.assertNotEqual(producer.client.api_version, 'auto') partitions = await producer.partitions_for('some_topic_name') @@ -134,6 +133,25 @@ async def test_producer_send(self): with self.assertRaises(ProducerClosed): await producer.send(self.topic, b'value', key=b'KEY') + @run_until_complete + async def test_producer_context_manager(self): + producer = AIOKafkaProducer( + loop=self.loop, bootstrap_servers=self.hosts) + async with producer: + assert producer._sender._sender_task is not None + await producer.send(self.topic, b'value', key=b'KEY') + assert producer._closed + + # Closes even on error + producer = AIOKafkaProducer( + loop=self.loop, bootstrap_servers=self.hosts) + with pytest.raises(ValueError): + async with producer: + assert producer._sender._sender_task is not None + await producer.send(self.topic, b'value', key=b'KEY') + raise ValueError() + assert producer._closed + @run_until_complete async def test_producer_send_noack(self): producer = AIOKafkaProducer( @@ -182,7 +200,7 @@ def serializer(val): await producer.send(self.topic, value, key=key) await producer.stop() - await producer.stop() # shold be Ok + await producer.stop() # should be Ok @run_until_complete async def test_producer_send_with_compression(self): @@ -550,7 +568,7 @@ async def test_producer_indempotence_simple(self): @kafka_versions('>=0.11.0') @run_until_complete async def test_producer_indempotence_no_duplicates(self): - # Indempotent producer should retry produce in case of timeout error + # Idempotent producer should retry produce in case of timeout error producer = AIOKafkaProducer( loop=self.loop, bootstrap_servers=self.hosts, enable_idempotence=True, @@ -685,7 +703,7 @@ async def mocked_send(node_id, request, *args, **kw): @run_until_complete async def test_producer_sender_errors_propagate_to_producer(self): # Following on #362 there may be other unexpected errors in sender - # routine that we wan't the user to see, rather than just get stuck. + # routine that we want the user to see, rather than just get stuck. producer = AIOKafkaProducer( loop=self.loop, bootstrap_servers=self.hosts, linger_ms=1000) @@ -730,3 +748,15 @@ async def test_producer_send_with_headers_raise_error(self): await producer.send( self.topic, b'msg', partition=0, headers=[("type", b"Normal")]) + + @kafka_versions('>=0.11.0') + @run_until_complete + async def test_producer_send_and_wait_with_headers(self): + producer = AIOKafkaProducer( + loop=self.loop, bootstrap_servers=self.hosts) + await producer.start() + self.add_cleanup(producer.stop) + + resp = await producer.send_and_wait( + self.topic, b'msg', partition=0, headers=[("type", b"Normal")]) + self.assertEqual(resp.partition, 0) diff --git a/tests/test_sasl.py b/tests/test_sasl.py index 3fa1d24..1c1c38b 100644 --- a/tests/test_sasl.py +++ b/tests/test_sasl.py @@ -1,3 +1,5 @@ +import pytest + from ._testutil import ( KafkaIntegrationTestCase, run_until_complete, kafka_versions ) @@ -10,17 +12,27 @@ TransactionalIdAuthorizationFailed, UnknownTopicOrPartitionError ) from aiokafka.structs import TopicPartition -import pytest @pytest.mark.usefixtures('setup_test_class') -class TestKafkaProducerIntegration(KafkaIntegrationTestCase): +class TestKafkaSASL(KafkaIntegrationTestCase): TEST_TIMEOUT = 60 # See https://docs.confluent.io/current/kafka/authorization.html # for a good list of what Operation can be mapped to what Resource and # when is it checked + def setUp(self): + super().setUp() + if tuple(map(int, self.kafka_version.split("."))) >= (0, 10): + self.acl_manager.list_acl() + + def tearDown(self): + # This is used to have a better report on ResourceWarnings. Without it + # all warnings will be filled in the end of last test-case. + super().tearDown() + self.acl_manager.cleanup() + @property def sasl_hosts(self): # Produce/consume by SASL_PLAINTEXT @@ -98,6 +110,38 @@ async def gssapi_consumer_factory(self, **kw): await consumer.start() return consumer + async def scram_producer_factory(self, user="test", **kw): + producer = AIOKafkaProducer( + loop=self.loop, + bootstrap_servers=[self.sasl_hosts], + security_protocol="SASL_PLAINTEXT", + sasl_mechanism='SCRAM-SHA-256', + sasl_plain_username=user, + sasl_plain_password=user, + **kw) + self.add_cleanup(producer.stop) + await producer.start() + return producer + + async def scram_consumer_factory(self, user="test", **kw): + kwargs = dict( + enable_auto_commit=True, + auto_offset_reset="earliest", + group_id=self.group_id + ) + kwargs.update(kw) + consumer = AIOKafkaConsumer( + self.topic, loop=self.loop, + bootstrap_servers=[self.sasl_hosts], + security_protocol="SASL_PLAINTEXT", + sasl_mechanism='SCRAM-SHA-256', + sasl_plain_username=user, + sasl_plain_password=user, + **kwargs) + self.add_cleanup(consumer.stop) + await consumer.start() + return consumer + @kafka_versions('>=0.10.0') @run_until_complete async def test_sasl_plaintext_basic(self): @@ -125,6 +169,18 @@ async def test_sasl_plaintext_gssapi(self): finally: self.kerberos_utils.kdestroy() + @kafka_versions('>=0.10.2') + @run_until_complete + async def test_sasl_plaintext_scram(self): + self.kafka_config.add_scram_user("test", "test") + producer = await self.scram_producer_factory() + await producer.send_and_wait(topic=self.topic, + value=b"Super scram msg") + + consumer = await self.scram_consumer_factory() + msg = await consumer.getone() + self.assertEqual(msg.value, b"Super scram msg") + ########################################################################## # Topic Resource ########################################################################## @@ -185,6 +241,27 @@ async def test_sasl_deny_topic_write(self): await producer.send_and_wait( topic=self.topic, value=b"Super sasl msg") + @kafka_versions('>=0.11.0') + @run_until_complete + async def test_sasl_deny_autocreate_cluster(self): + self.acl_manager.add_acl( + deny_principal="test", operation="CREATE", cluster=True) + self.acl_manager.add_acl( + allow_principal="test", operation="DESCRIBE", topic=self.topic) + self.acl_manager.add_acl( + allow_principal="test", operation="WRITE", topic=self.topic) + + producer = await self.producer_factory(request_timeout_ms=5000) + with self.assertRaises(TopicAuthorizationFailedError): + await producer.send_and_wait(self.topic, value=b"Super sasl msg") + + with self.assertRaises(TopicAuthorizationFailedError): + await self.consumer_factory(request_timeout_ms=5000) + + with self.assertRaises(TopicAuthorizationFailedError): + await self.consumer_factory( + request_timeout_ms=5000, group_id=None) + ########################################################################## # Group Resource ########################################################################## diff --git a/tests/test_sender.py b/tests/test_sender.py index 01acc39..74f5644 100644 --- a/tests/test_sender.py +++ b/tests/test_sender.py @@ -91,7 +91,7 @@ async def mocked_call(node_id): async def test_sender_maybe_wait_for_pid_non_transactional(self): sender = await self._setup_sender_with_init_mocked() - # If we are not using transactional manager will return rigth away + # If we are not using transactional manager will return right away sender._txn_manager = None await sender._maybe_wait_for_pid() sender._do_init_pid.assert_not_called() diff --git a/tests/test_transactional_producer.py b/tests/test_transactional_producer.py index 18acb60..cc86158 100644 --- a/tests/test_transactional_producer.py +++ b/tests/test_transactional_producer.py @@ -102,7 +102,7 @@ async def test_producer_transactional_fences_off_previous(self): @run_until_complete async def test_producer_transactional_restart_reaquire_pid(self): # While it's documented that PID may change we need to be sure we - # are sending proper InitPIDRequest, not an indempotent one + # are sending proper InitPIDRequest, not an idempotent one producer = AIOKafkaProducer( loop=self.loop, bootstrap_servers=self.hosts, @@ -206,7 +206,7 @@ async def transform(): for msg in msgs: out_msg = b"OUT-" + msg.value # We produce to the same partition - producer.send( + await producer.send( out_topic, value=out_msg, partition=tp.partition) offsets[tp] = msg.offset + 1 @@ -265,7 +265,7 @@ async def transform(raise_error): for msg in msgs: out_msg = b"OUT-" + msg.value # We produce to the same partition - producer.send( + await producer.send( out_topic, value=out_msg, partition=tp.partition) offsets[tp] = msg.offset + 1