diff --git a/.github/workflows/mkdocs.yml b/.github/workflows/mkdocs.yml new file mode 100644 index 00000000..750afd75 --- /dev/null +++ b/.github/workflows/mkdocs.yml @@ -0,0 +1,76 @@ +--- +name: MkDocs + +on: + push: + branches: + - main + pull_request: + +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +permissions: + contents: write + pull-requests: write + +jobs: + deploy-docs: + runs-on: ubuntu-latest + steps: + - name: Generate token + id: app-token + if: github.event.pull_request.head.repo.full_name == github.repository || github.event_name == 'push' + uses: actions/create-github-app-token@v1 + with: + app-id: ${{ secrets.APP_ID }} + private-key: ${{ secrets.PRIVATE_KEY }} + + - name: Checkout repository + uses: actions/checkout@v4 + with: + token: "${{ steps.app-token.outputs.token }}" + # Fetch the entire git history (all branches + tags) + # We do this because the docs use git describe, which requires having all + # the commits up to the latest version tag. + # We also need the gh-pages branch to push the docs to. + fetch-depth: 0 + + # Make the github application be the committer + # (see: https://stackoverflow.com/a/74071223 on how to obtain the committer email) + - name: Setup git config + if: github.event.pull_request.head.repo.full_name == github.repository || github.event_name == 'push' + run: | + git config --global user.name "py-mine-ci-bot" + git config --global user.email "121461646+py-mine-ci-bot[bot]@users.noreply.github.com" + + - name: Setup poetry + id: poetry_setup + uses: ItsDrike/setup-poetry@v1 + with: + python-version: 3.12 + install-args: "--only main,docs" + + - name: Build the documentation (mkdocs - PR preview) + if: github.event_name == 'pull_request' + run: poetry run mkdocs build + + - name: Deploy docs - PR preview + if: > + github.event_name == 'pull_request' && + github.event.pull_request.head.repo.full_name == github.repository + uses: rossjrw/pr-preview-action@v1 + with: + source-dir: ./site + preview-branch: gh-pages + umbrella-dir: pr-preview + token: ${{ steps.app-token.outputs.token }} + + - name: Build the documentation (mike) + if: github.event_name == 'push' + run: poetry run mike deploy latest + + - name: Deploy docs - latest + if: github.event_name == 'push' + run: git push origin gh-pages diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index cfbd4c77..68918386 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -132,3 +132,51 @@ jobs: # This uses PyPI's trusted publishing, so no token is required - name: Release to PyPI uses: pypa/gh-action-pypi-publish@release/v1 + + publish-docs: + name: "Publish release docs" + needs: build + runs-on: ubuntu-latest + permissions: + contents: write + + steps: + - name: Generate token + id: app-token + uses: actions/create-github-app-token@v1 + with: + app-id: ${{ secrets.APP_ID }} + private-key: ${{ secrets.PRIVATE_KEY }} + + - name: Checkout repository + uses: actions/checkout@v4 + with: + token: "${{ steps.app-token.outputs.token }}" + # Fetch the entire git history (all branches + tags) + # We do this because the docs use git describe, which requires having all + # the commits up to the latest version tag. + # We also need the gh-pages branch to push the docs to. + fetch-depth: 0 + + # Make the github application be the committer + # (see: https://stackoverflow.com/a/74071223 on how to obtain the committer email) + - name: Setup git config + run: | + git config --global user.name "py-mine-ci-bot" + git config --global user.email "121461646+py-mine-ci-bot[bot]@users.noreply.github.com" + + - name: Setup poetry + id: poetry_setup + uses: ItsDrike/setup-poetry@v1 + with: + python-version: 3.12 + install-args: "--only main,docs,release-ci" + + - name: Set version with dynamic versioning + run: poetry run poetry-dynamic-versioning + + - name: Build the documentation (mike) + run: poetry run mike deploy --update-aliases "$(poetry version --short)" release + + - name: Deploy docs - release + run: git push origin gh-pages diff --git a/.gitignore b/.gitignore index 62b5097e..d5a2037f 100644 --- a/.gitignore +++ b/.gitignore @@ -20,8 +20,8 @@ htmlcov/ .coverage* coverage.xml -# Sphinx documentation -docs/_build/ +# Mkdocs documentation +site/ # Pyenv local version information .python-version diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 340ed502..eafa009a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -5,6 +5,9 @@ repos: - id: check-merge-conflict - id: check-toml # For pyproject.toml - id: check-yaml # For workflows + # Only parse the files for syntax, don't do full load. + # We need this because of mkdocs.yml, which uses some custom tags to perform dynamic imports from python. + args: ["--unsafe"] - id: end-of-file-fixer - id: trailing-whitespace args: [--markdown-linebreak-ext=md] diff --git a/.readthedocs.yaml b/.readthedocs.yaml deleted file mode 100644 index 8b4835c6..00000000 --- a/.readthedocs.yaml +++ /dev/null @@ -1,17 +0,0 @@ -version: 2 - -build: - os: ubuntu-22.04 - tools: - python: "3.12" - jobs: - post_create_environment: - - python -m pip install poetry - post_install: - - VIRTUAL_ENV=$READTHEDOCS_VIRTUALENV_PATH poetry install --only main,docs,docs-ci - - poetry run poetry-dynamic-versioning - -sphinx: - builder: dirhtml - configuration: "docs/conf.py" - fail_on_warning: true diff --git a/ATTRIBUTION.md b/ATTRIBUTION.md index 3aaeb5eb..7f4f12e2 100644 --- a/ATTRIBUTION.md +++ b/ATTRIBUTION.md @@ -1,12 +1,13 @@ -This file serves as a way to explicitly give credit to projects which made mcproto possible. +This document serves as a way to explicitly give credit to projects which made mcproto possible. Note that as with any other project, if there was some code that was directly utilized from these projects, it will be mentioned in `LICENSE-THIRD-PARTY.txt`, not in here. This file isn't meant to serve as a place to disclose used code and it's licenses, but rather to give proper credit where it is due, and to shout out a few amazing projects that allowed mcproto to exist in the first place. -- **wiki.vg** (): An absolutely amazing community driven wiki that documents how the minecraft protocol is - structured and the changes that occur between the protocol versions. +- **Minecraft wiki ()** (previously `wiki.vg`): An absolutely amazing + community driven wiki that documents how the minecraft protocol is structured and the changes that occur between the + protocol versions. - **PyMine-Net**: The project that was the main inspiration to this project, being a separation of the minecraft networking tooling used in PyMine-Server, which is an attempt at implementing a fully working minecraft server purely in python. However, this project is no longer maintained, and so mcproto was created to be it's replacement. diff --git a/CHANGELOG.md b/CHANGELOG.md index 2c93807c..b4bc6ff9 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,22 +4,24 @@ - [#130](https://github.com/py-mine/mcproto/issues/130): Renamed "shared_key" field to "shared_secret" in `LoginEncryptionPacket`, following the official terminology. - - This is a breaking change, `LoginEncryptionPacket`'s `__init__` method now uses "shared_secret" keyword only argument, not "shared_key". + - This is a breaking change, `LoginEncryptionPacket`'s `__init__` method now uses "shared_secret" keyword only argument, not "shared_key". + - [#130](https://github.com/py-mine/mcproto/issues/130): The `LoginStart` packet now contains a (required) UUID field (which can be explicitly set to `None`). - - For some reason, this field was not added when the login packets were introduced initially, and while the UUID field can indeed be omitted in some cases (it is an optional filed), in vast majority of cases, it will be present, and we should absolutely support it. - - As this is a new required field, the `__init__` function of `LoginStart` now also expects this `uuid` keyword argument to be present, making this a breaking change. + - For some reason, this field was not added when the login packets were introduced initially, and while the UUID field can indeed be omitted in some cases (it is an optional filed), in vast majority of cases, it will be present, and we should absolutely support it. + - As this is a new required field, the `__init__` function of `LoginStart` now also expects this `uuid` keyword argument to be present, making this a breaking change. - [#159](https://github.com/py-mine/mcproto/issues/159): Fix packet compression handling in the interaction methods. - This fixes a bug that didn't allow for specifying an exact compression threshold that the server specified in `LoginSetCompression` packet, and instead only allowing to toggle between compression on/off, which doesn't really work as server doesn't expect compression for packets below that threshold. + This fixes a bug that didn't allow for specifying an exact compression threshold that the server specified in `LoginSetCompression` packet, and instead only allowing to toggle between compression on/off, which doesn't really work as server doesn't expect compression for packets below that threshold. + + - `sync_write_packet`, `async_write_pakcet`, `sync_read_packet` and `async_read_packet` functions now take `compression_threshold` instead of `compressed` bool flag - - `sync_write_packet`, `async_write_pakcet`, `sync_read_packet` and `async_read_packet` functions now take `compression_threshold` instead of `compressed` bool flag - [#161](https://github.com/py-mine/mcproto/issues/161): `LoginEncryptionRequest` now uses `cryptography.hazmat.primitives.asymmetric.rsa.RSAPublicKey` to hold the public key, instead of just pure `bytes`. Encoding and decoding of this key happens automatically during serialize/deserialize. This is a breaking change for anyone relying on the `public_key` field from this packet being `bytes`, and for anyone initializing this packet directly with `__init__`, which now expects `RSAPublicKey` instance instead. ### Features - [#129](https://github.com/py-mine/mcproto/issues/129): Added a system for handling Minecraft authentication - - Yggdrasil system for unmigrated i.e. non-Microsoft accounts (supportng Minecraft accounts, and the really old Mojang accounts) - - Microsoft OAuth2 system (Xbox live) for migrated i.e. Microsoft accounts + - Yggdrasil system for unmigrated i.e. non-Microsoft accounts (supportng Minecraft accounts, and the really old Mojang accounts) + - Microsoft OAuth2 system (Xbox live) for migrated i.e. Microsoft accounts - [#160](https://github.com/py-mine/mcproto/issues/160): Re-export the packet classes (or any other objects) from the gamestate modules (`mcproto.packets.handshaking`/`mcproto.packets.login`/...) directly. Allowing simpler imports (`from mcproto.packets.login import LoginStart` instead of `from mcproto.packets.login.login import LoginStart`) - [#161](https://github.com/py-mine/mcproto/issues/161): Add support for encryption. Connection classes now have `enable_encryption` method, and some encryption related functions were added into a new `mcproto.encryption` module. - [#168](https://github.com/py-mine/mcproto/issues/168): Add multiplayer related functionalities for requesting and checking joins for original (bought) minecraft accounts. This allows us to join online servers. @@ -28,7 +30,7 @@ ### Bugfixes - [#130](https://github.com/py-mine/mcproto/issues/130): `LoginEncryptionResponse` now includes the `server_id` field. This field was previously hard-coded to 20 spaces (blank value), which is what all minecraft clients on minecraft 1.7.x or higher do, however with older versions, this field is set to 20 random characters, which we should respect. - - This is not a breaking change, as `server_id` will default to `None` in `LoginEncryptionResponse`'s `__init__`, meaning any existing code utilizing this packet will still work. It is purely an additional option. + - This is not a breaking change, as `server_id` will default to `None` in `LoginEncryptionResponse`'s `__init__`, meaning any existing code utilizing this packet will still work. It is purely an additional option. - [#167](https://github.com/py-mine/mcproto/issues/167): Fix packet reading/writing when compression is enabled (use zlib as expected, instead of gzip which we were using before) - [#170](https://github.com/py-mine/mcproto/issues/170): Preserve the call parameters and overloads in the typing signature of `mcproto.packets.packet_map.generate_packet_map` function. (This wasn't the case before, since `functools.lru_cache` doesn't preserve this data). Note that this loses on the typing information about the cache itself, as now it will appear to be a regular uncached function to the type-checker. We deemed this approach better to the alternative of no typing info for call arguments or overloads, but preserving cache info. @@ -39,8 +41,9 @@ - [#141](https://github.com/py-mine/mcproto/issues/141): Move installation instructions from README to Installation docs page - [#144](https://github.com/py-mine/mcproto/issues/144): Add attributetable internal sphinx extension for showing all attributes and methods for specified classes. - - This adds `attributetable` sphinx directive, which can be used before autodoc directive. This will create the attribute table, which will get dynamically moved right below the class definition from autodoc (using javascript). - - This extension was implemented by [discord.py](https://github.com/Rapptz/discord.py/blob/2fdbe59376d736483cd1226e674e609433877af4/docs/extensions/attributetable.py), this is just re-using that code, with some modifications to fit our code style and to fit the documentation design (furo theme). + - This adds `attributetable` sphinx directive, which can be used before autodoc directive. This will create the attribute table, which will get dynamically moved right below the class definition from autodoc (using javascript). + - This extension was implemented by [discord.py](https://github.com/Rapptz/discord.py/blob/2fdbe59376d736483cd1226e674e609433877af4/docs/extensions/attributetable.py), this is just re-using that code, with some modifications to fit our code style and to fit the documentation design (furo theme). + - Updated contributing guidelines (restructure and rewrite some categories, to make it more readable) ### Internal Changes @@ -49,45 +52,43 @@ - [#153](https://github.com/py-mine/mcproto/issues/153): Replace flake8 linter with ruff (mostly equivalent, but much faster and configurable from pyproject.toml) - [#154](https://github.com/py-mine/mcproto/issues/154): Enforce various new ruff linter rules: - - **PGH:** pygrep-hooks (replaces pre-commit version) - - **PL:** pylint (bunch of typing related linter rules) - - **UP:** pyupgrade (forces use of the newest possible standards, depending on target version) - - **RET:** flake8-return (various linter rules related to function returns) - - **Q:** flake8-quotes (always use double quotes) - - **ASYNC:** flake8-async (report blocking operations in async functions) - - **INT:** flake-gettext (gettext related linting rules) - - **PTH:** flake8-use-pathlib (always prefer pathlib alternatives to the os ones) - - **RUF:** ruff custom rules (various additional rules created by the ruff linter team) + - **PGH:** pygrep-hooks (replaces pre-commit version) + - **PL:** pylint (bunch of typing related linter rules) + - **UP:** pyupgrade (forces use of the newest possible standards, depending on target version) + - **RET:** flake8-return (various linter rules related to function returns) + - **Q:** flake8-quotes (always use double quotes) + - **ASYNC:** flake8-async (report blocking operations in async functions) + - **INT:** flake-gettext (gettext related linting rules) + - **PTH:** flake8-use-pathlib (always prefer pathlib alternatives to the os ones) + - **RUF:** ruff custom rules (various additional rules created by the ruff linter team) --- - ## Version 0.4.0 (2023-06-11) ### Breaking Changes - [#41](https://github.com/py-mine/mcproto/issues/41): Rename `mcproto.packets.abc` to `mcproto.packets.packet` - [#116](https://github.com/py-mine/mcproto/issues/116): Restructure the project, moving to a single protocol version model - - This change does NOT have a deprecation period, and will very likely break most existing code-bases. However this change is necessary, as multi-version support was unsustainable (see issue #45 for more details) - - Any packets and types will no longer be present in versioned folders (mcproto.packets.v757.xxx), but rather be directly in the parent directory (mcproto.packets.xxx). - - This change doesn't affect manual communication with the server, connection, and basic IO writers/readers remain the same. + - This change does NOT have a deprecation period, and will very likely break most existing code-bases. However this change is necessary, as multi-version support was unsustainable (see issue #45 for more details) + - Any packets and types will no longer be present in versioned folders (mcproto.packets.v757.xxx), but rather be directly in the parent directory (mcproto.packets.xxx). + - This change doesn't affect manual communication with the server, connection, and basic IO writers/readers remain the same. --- - ## Version 0.3.0 (2023-06-08) ### Features - [#54](https://github.com/py-mine/mcproto/issues/54): Add support for LOGIN state packets - - `LoginStart` - - `LoginEncryptionRequest` - - `LoginEncryptionResponse` - - `LoginSuccess` - - `LoginDisconnect` - - `LoginPluginRequest` - - `LoginPluginResponse` - - `LoginSetCompression` + - `LoginStart` + - `LoginEncryptionRequest` + - `LoginEncryptionResponse` + - `LoginSuccess` + - `LoginDisconnect` + - `LoginPluginRequest` + - `LoginPluginResponse` + - `LoginSetCompression` ### Bugfixes @@ -103,9 +104,9 @@ - [#34](https://github.com/py-mine/mcproto/issues/34): Add version guarantees page - [#40](https://github.com/py-mine/mcproto/issues/40): Move code of conduct to the docs. - Improve readability of the changelog readme (changes/README.md) - - Mention taskipy `changelog-preview` shorthand command - - Add category headers splitting things up, for better readability - - Explain how to express multiple changes related to a single goal in a changelog fragment. + - Mention taskipy `changelog-preview` shorthand command + - Add category headers splitting things up, for better readability + - Explain how to express multiple changes related to a single goal in a changelog fragment. - Include `CHANGELOG.md` file in project's distribution files. ### Internal Changes @@ -121,14 +122,13 @@ --- - ## Version 0.2.0 (2022-12-30) ### Features - [#14](https://github.com/py-mine/mcproto/issues/14): Add `__slots__` to most classes in the project - - All connection classes are now slotted - - Classes in `mcproto.utils.abc` are now slotted + - All connection classes are now slotted + - Classes in `mcproto.utils.abc` are now slotted - Separate packet interaction functions into `mcproto.packets.interactions`, (though they're reexported in `mcproto.packets`, so no breaking changes) @@ -147,11 +147,11 @@ ### Internal Changes - [#6](https://github.com/py-mine/mcproto/issues/6): Rework deprecation system - - Drop support for date-based deprecations, versions work better - - Provide `deprecation_warn` function, which emits warnings directly, no need for a decorator - - Add a `SemanticVersion` class, supporting version comparisons - - If the project's version is already higher than the specified deprecation removal version, raise a DeprecationWarning - as a full exception (rather than just a warning). + - Drop support for date-based deprecations, versions work better + - Provide `deprecation_warn` function, which emits warnings directly, no need for a decorator + - Add a `SemanticVersion` class, supporting version comparisons + - If the project's version is already higher than the specified deprecation removal version, raise a DeprecationWarning + as a full exception (rather than just a warning). - [#7](https://github.com/py-mine/mcproto/issues/7): Add towncrier for managing changelog - [#14](https://github.com/py-mine/mcproto/issues/14): Add slotscheck, ensuring `__slots__` are defined properly everywhere. - [#14](https://github.com/py-mine/mcproto/issues/14): Make `typing-extensions` a runtime dependency and use it directly, don't rely on `if typing.TYPE_CHECKING` blocks. @@ -164,5 +164,5 @@ --- -*The changelog was added during development of 0.2.0, so nothing prior is documented here. Try checking the GitHub -releases, or git commit history directly.* +_The changelog was added during development of 0.2.0, so nothing prior is documented here. Try checking the GitHub +releases, or git commit history directly._ diff --git a/CODE-OF-CONDUCT.md b/CODE-OF-CONDUCT.md index 53ba588f..39b40e06 100644 --- a/CODE-OF-CONDUCT.md +++ b/CODE-OF-CONDUCT.md @@ -1,2 +1,2 @@ You can find our Code of Conduct in the project's documentation -[here](https://mcproto.readthedocs.io/en/latest/pages/code-of-conduct/) +[here](https://py-mine.github.io/mcproto/latest/code_of_conduct/) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5835b077..24617caf 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,640 +1,4 @@ # Contributing Guidelines -This project is fully open-sourced and new contributions are welcome! - -However know that we value the quality of the code we maintain and distribute, and you will need to adhere to some code -quality standards which we define. Your PR may get rejected on the basis of a contributor failing to follow these -guidelines. - -## The Golden Rules of Contributing - -We recommend you adhere to most of these rules in pretty much every project, even if it doesn't require you to. These -rules can often make your life much easier, make debugging quicker and keep the commit history cleaner. - -1. **Lint before you push.** We have multiple code linting rules, which define our general style of the code-base. - These are generally enforced through certain tools, which you are expected to run before every push, and ideally - before every commit. The specifics of our linting will be mentioned [later](#style-guide) -2. **Make great commits.** Great commits should be atomic (do one thing only and do it well), with a commit message - explaining what was done, and why. More on this in [here](#making-great-commits). -3. **Make an issue before the PR.** If you think there's something that should be added to the project, or you - found some issue or something which could be improved, consider making an issue before committing a lot of time to - create a PR. This can help you save a lot of time in case we'd decide that the feature doesn't adhere to our vision - of the project's future, or isn't something which we would be willing/able to maintain. Even though we won't - actively enforce this rule, and for some small obvious features, or bug-fixes making an issue may be an overkill, - for bigger changes, an issue can save you a lot of time implementing something which may not even be wanted in the - project, and therefore won't get accepted. -4. **Don't open a pull request if you aren't assigned to the issue.** If you want to work on some existing GitHub - issue, it is always better to ask a maintainer to assign you to this issue. If there's already someone assigned to - an issue, consider offering to collaborate with that person, rather than ignoring his work and doing it on your own. - This method can help avoid having multiple people working on the exact same thing at the same time, without knowing - about each other, which will often lead to multiple approaches solving the same thing, only one of which can be - accepted (usually from the person who was originally assigned). -5. **Use assets licensed for public use.** Whenever a static asset such as images/video files/audio or even code is - added, they must have a compatible license with our projects. -6. **Use draft pull requests if you aren't done yet.** If your PR isn't ready to be reviewed yet, mark it as draft. - This is further described in [this section](#work-in-progress-prs) -7. **Follow our [Code of Conduct](./CODE-OF-CONDUCT.md).** - -## Project installation - -This project uses [`poetry`](https://python-poetry.org/docs/). It's a tool for managing python virtual environments. If -you haven't heard of those, they're essentially a mini installation of python used purely for the project you're working -on (as opposed to using a single global python installation for everything, which is prone to conflicts, as different -projects might need different versions of the same package). Follow the linked documentation for installation -instructions. - -Once installed, you will want to create a new environment for mcproto, with all of it's dependencies installed. To do -that, enter the clonned repository in your terminal, and run: - -```bash -poetry install -``` - -Note that you will want to re-run this command each time our dependencies are updated, to stay in sync with the project. - -After that, the environment will contain all of the dependencies, including various executable programs, such as -`basedpyright`. One of these executable programs is also `python`, which is the python interpreter for this -environment, capable of interacting with all of the installed libraries. - -You will now need to make your terminal use the programs from this environment, rather than any global versions that you -may have installed, so that you can use the tools in it when working on the project. Some IDEs/editors are capable of -doing this for you automatically when you open the project. If yours isn't, you can run: - -```bash -poetry shell -``` - -You can then start your IDE from the terminal, after you ran this command, and it should pick up the python environment -created by poetry. - -You can also just prefix any command with `poetry run` (e.g. `poetry run python`) to use the executable from the -environment, without activating it, however you will almost always want to activate the environment instead. - -For more info about poetry, make sure to check their amazing official documentation: `https://python-poetry.org/docs/`, -these include installation instructions, explain how to add new dependencies to the project, or how to remove some, and -everything else you'd need to know. - -## Style Guide - -For clarity and readability, adhering to a consistent code style across the whole project is very important. It is not -unusual that style adjustments will be requested in pull requests. - -It is always a good practice to review the style of the existing code-base before adding something new, and adhere to -that established style. That applies even if it isn't the style you generally prefer, however if you think a code style -change of some kind would be justified, feel free to open an issue about it and tell us what exactly should be changed, -and why you think this change is important. If you want to, you can also ask to be assigned to the issue and work on -changing the style in the code-base in your own PR. (Hey you may even get to edit this section!) - -> A style guide is about consistency. Consistency with this style guide is important. Consistency within a project is -> more important. Consistency within one module or function is the most important. -> -> However, know when to be inconsistent -- sometimes style guide recommendations just aren't applicable. When in doubt, -> use your best judgment. Look at other examples and decide what looks best. And don't hesitate to ask! -> — [PEP 8, the general Style Guide for Python Code](https://peps.python.org/pep-0008/) - -### Automatic linting - -As there is a lot of various styling rules we adhere to in our code base, and obviously, describing all of them in a -style guide here would just take way too long, and it would be impossible to remember anyway. For that reason, we use -automated tools to help us catch any style violation without manual review! - -Currently, these are the tools we use for code style enforcement: - -- [`ruff`](https://beta.ruff.rs/docs/): General python linter, formatter and import sorter -- [`slotscheck`](https://slotscheck.readthedocs.io/en/latest/): Enforces the presence of `__slots__` in classes - -You can read more about them individually in the sections below. It is important that you familiarize yourself with -these tools, and their standalone usage, but it would of course be very annoying to have to run the commands to run -these tools manually, so while there will be instructions on how to do that, you should pretty much always prefer -direct IDE/editor integration, which is mentioned [here](#editor-integration), and make use of -[pre-commit](#pre-commit). - -#### Ruff linter & Formatter - -Ruff is an all-in-one linter & formatter solution, which aims to replace the previously very popular -[`flake8`](https://flake8.pycqa.org/en/latest/) linter, [`isort`](https://pycqa.github.io/isort/) import sorter and -[`black`](https://black.readthedocs.io/en/stable/) formatter. Ruff is faster (written in rust! 🦀) and includes most of -the popular flake8 extensions directly. It is almost 1:1 compatible with black, which means the way it formats code is -pretty much the same, with only some very subtle differences. - -You can check the ruff configuration we're using in [`pyproject.toml`](./pyproject.toml) file, under the `[tool.ruff]` -category (and it's subcategories), you can find the enabled linter rules there, and some more specific configuration, -like line length, python version, individual ignored lint rules, and ignored files. - -To run `ruff` **linter** on the code, you can use `ruff check .` command, while in the project's root directory (from -an activated poetry environment, alternatively `poetry run ruff .`). Ruff also supports some automatic fixes to many -violations it founds, to enable fixing, you can use `ruff check --fix`. This will also run the `isort` integration. - -If you find a rule violation in your code somewhere, and you don't understand what that rule's purpose is, `ruff` evens -supports running `ruff rule [rule id]` (for example `ruff rule ANN401`). These explanations are in markdown, so I'd -recommend using a markdown renderer such as [`glow`](https://github.com/charmbracelet/glow) (on Arch linux, you can -install it with: `pacman -S glow`) and piping the output into it for a much nicer reading experience: `ruff rule ANN401 -| glow`. - -To run `ruff` **formatter** on the code, you can simply execute `ruff format .` command (also needs an activated poetry -environment). This will automatically format all files in the code-base. - -#### Slotscheck - -Slotscheck is a utility/linter that enforces the proper use of `__slots__` in our python classes. This is important for -memory-optimization reasons, and it also improves the general performance when accessing/working with attributes of -slotted classes. - -If you're unsure how slots work / what they are, there is a very nice explanation of them in the official python wiki: -[here](https://wiki.python.org/moin/UsingSlots). - -To run slotscheck, you can simply execute `slotscheck -m mcproto` from an activated poetry environment (or -`poetry run slotscheck -m mcproto`). - -### Use of `__all__` - -Consider a python module like the below: - -```python -import foo -from bar import do_bar - - -def do_foobar(): - foo.do_foo() - do_bar() -``` - -If someone were to import it with `from module_above import *`, they'd import `foo`, `do_bar` and `do_foobar`. However -that's kind of weird, in most cases, we don't actually want our imports to be included in a wildcard import like this. -For that reason, we can define a special variable called `__all__`, that specifies all of the things that should -actually be included with a wildcard import like this. - -It is our convention to set this variable right below the imports, like this: - -```python -import foo -from bar import do_bar - -__all__ = ["do_foobar"] - - -def do_foobar(): - foo.do_foo() - do_bar() -``` - -With that, we've explicitly specified what functions/classes should be considered a part of this file, and are expected -to be imported, with all of the rest being considered private and only used in this file internally. (Though it doesn't -mean that the unspecified objects actually can't be imported, it just means they won't be imported with a wildcard `*` -import. So running `from module_above import foo` would work, even though `from module_above import *` wouldn't include -`foo`.) - -Note that generally, in vast majority of cases, wildcard imports shouldn't be used and instead we should be explicit so -that we know where our dependencies come from. The actual primary reason we specify `__all__` in our files is to -immediately show which parts of that file should be considered public, and which are internal to the file. - -### Docstring formatting directive - -The `ruff` linter uses various rules from the `flake8-docstrings` rule-set to enforce a specific standardized docstring -formatting. However, these rules will only enforce the basic structure of docstrings, and where they need to be -specified. In addition to these rules, it is important to mention the rules on the style of the content in the -docstrings themselves. - -Specifically, we follow Sphinx, and the docstrings should be written in the restructuredtext format. Sphinx supports -various directives that allow specifying notes, describe how to add citations, references to other -functions/documentation, how to add tables and a bunch of other things. You can read up on these guidelines in the -official Sphinx documentation: [here](https://www.sphinx-doc.org/en/master/usage/restructuredtext/) - -The use of this style is important, as many editors recognize it, and can show you properly formatted descriptions on -functions or other types of objects upon hovering on them. This format is also useful for automatic generation of -documentation using Sphinx, as it's much easier and more comfortable to simply add the descriptions for each function -directly in the code, than having to replicate it manually in the standalone source code for the project's -documentation, and keep it up to date as new changes to the code are introduced. That said, there are some standalone -files used for documentation, and these are actually also written in reStructuredText format, so what you'll learn here -will carry over to writing / changing those. - -Below is a quick example that demonstrates some of how this code style can look. This showcases both the general style -of our docstrings, and the use of restructuredtext in them. However, it is heavily recommended that you read up on this -in the linked documentation, to know what markup is available and how to use it. You can also see a bunch of examples -from the existing code in this code-base, if you just want a better quick glance. - -```python -def donut(a: bool, b: str) -> None: - """Short one-line description of the function.""" - - -def pineapple(a: bool, b: str) -> str: - """One-line description of the function telling us what it's about. - - Detailed multiline description. - This may include the full explanation of how this function should be used. - - We can also have multiple sections like this. - For example to include further use instruction with some examples or perhaps - with an explanation of how the function works, if it's relevant. - """ - - -def divide(x: int, y: int) -> int: - """Add two numbers together. - - :param x: Number 1 (numerator). - :param y: Number 2 (denominator). - :return: Result of ``x / y`` addition. - :raises ZeroDivisionError: If ``y`` is 0. - """ - -def basic_rest(a: bool, b: str) -> None: - """My funtcion teaches you about some basic reST formatting. - - Some types of text formatting in restructuredtext (reST) are very similar to - those supported in markdown, for example, **this text will be bold**, *This - text will be emphasised (italic)*. - - One important difference between reST and markdown is the use of double backquotes - to include inline code (literals), instead of just a single backquote: ``code``. - - * This is a bulleted list. - * It has three items, the second one has a nested list. - * This is a nested list. - * It has two items. - * This is the third item, continuing the parent list. - - 1. This is a numbered list - 2. It has two items. - - #. This is a numbered list - #. It has two items too. - - .. versionadded:: 2.5 - .. deprecated:: 3.1 - Use :func:`coconut` instead. - .. versionremoved:: 4.0 - The :func:`coconut` is more flexible, and should be used instead. - This function will be removed in an upcomming major release. - .. note:: - This is a note directive, it will show up nicely formatted in the - documentation. - .. warning:: - This is a warning directive. It may contain some important info about - this function, and should be used instead of the note directive, if you're - describing some information regarding security. - """ - -def hyperlinks(q: float) -> int: - """This function teaches you about hyperlinks - - Restructured text also supports hyperlinks. For named links, you can use: - `Link text `_. For inline links, that just show the URL, - you can simply use: ``_. - - You can also separate the link and target definition, like this: `my link`_. - - .. _my link: https://domain.invalid - - .. deprecated: 2.5 - """ - -def reference(param: str) -> None: - """This teaches you about references. - - You will often need to refer to external (or internal) functions, classes or - other things in your docstrings. To do so, you can follow this guide: - - * To refer to the parameter/argument that this function takes, simply use ``param``. - * To refer to another class, use :class:`MyClass`. - * To refer to another function, use :func:`my_func`. - * To refer to another method, in the class this method is in, use :meth:`my_method`. - * To refer to an attribute of the class this method is in, use :attr:`my_attr` - * To refer to a constant, use :const:`FOOBAR`. - * To refer to an exception, use :exc:`FoobarError`. - * To refer to an object, use :obj:`my_object`. - - If you need to refer to objects defined outside of this file, you can use the - fully qualified path to them, like: :class:`my_module.foo.bar.MyClass`. This - will show to full path in the documentation too though, if you don't want that, - you can also use: :class:`~my_module.foo.bar.MyClass`, which will only show up - as ``MyClass`` in the final docs. - - You can also refer to entire modules: :mod:`itertools`. Since ``itertools`` is - actually a part of the standard library, sphinx can even produce links that go - back to Python's official docs for the ``itertools`` modules. - - You can even refer to PEPs, like the :PEP:`287`. - - .. seealso:: - This directive can be used to refer to some other documentation or external - documents. - - It can be useful to put your references in, instead of just having them in - the docstring's text directly. - - One such reference that can be useful here is the Sphinx documentation of the - Python Domain, that details all of these modules. An interesting detail about - this domain is that it's actually included and used by default, that's why in - the documentation, you may see ``:py:func:`` while in these examples, we simply - used ``:func:``. - - You can find this documentation - `here _` - """ -``` - -Another general rule of thumb when writing docstrings is to generally stick to using an imperative mood. - -Imperative mood is a certain grammatical form of writing that expresses a clear command to do something. - -**Use:** "Build a player object." -**Don't use:** "Returns a player object." - -Present tense defines that the work being done is now, in the present, rather than in the past or future. - -**Use:** "Build a player object." -**Don't use:** "Built a player object." or "Will build a player object." - -## Type hinting - -[PEP 484](https://www.python.org/dev/peps/pep-0484/) formally specifies type hints for Python. You can specify type -hints for a function, in addition to just parameter names, allowing you to quickly understand what kind of parameter -this is. Most IDEs/editors will even be able to recognize these type hints, and provide auto-completion based on them. -For example, if you type hint a parameter as `list`, an editor can suggest list methods like `join` or `append`. Many -editors will even show you the type hint on the argument in the function's signature, when you're trying to call it, -along with the parameter name making it really easy to understand what you're supposed to pass without even looking at -the docstring. - -For example, an untyped function can look like this: - -```python -def divide(a, b): - """Divide the two given arguments.""" - return a / b -``` - -With type-annotations, the function looks like this: - -```python -def divide(a: int, b: int) -> float: - """Divide the two given arguments.""" - return a / b -``` - -Thankfully python type-hinting is fairly easy to understand, but if you do want to see some rather interesting -resources for a bit more advanced concepts such as type variables or some more complex types like `typing.Callable`, -we've compiled a quick list of really amazing resources about these type hinting practice. - -- Python documentation from `typing` library: -- MyPy documentation (very extensive but quite beginner friendly): -- Decorator Factory blog about typing: -- Typing Generics (advanced): - -### Enforcing type hints - Type checker - -Even though the type hints can be pretty useful in knowing what the function variables are expected to be and they also -provide better auto-completion, if we're not careful, we could soon end up violating our own type specifications, -because by default python doesn't enforce these type-hints in any way. To python, they're not much more than comments. - -To make sure that our code-base really is correct type-wise, we use a tool that looks at the code statically (similarly -to a linter), and analyzes the types, finding any inconsistencies. Using a type-checker can be very beneficial, -especially to bigger projects, as it can quickly catch mistakes we made based on purely the types, without even having -to run the code. So many times, you'll see issues before actually testing things out (with unit-tests, or manually). In -a lot of cases, type checkers can even uncover many things that our unit tests wouldn't find. - -There are many python type-checkers available, the most notable ones being `mypy` and `pyright`. We decided to use -`pyright`, because it has great support for many newer typing features. Specifically, this project actually uses -`basedpyright`, which is a fork of pyright, that adds in some extra checks and features from Pylance (vscode -extension). - -Pyright can be used from the terminal as a stand-alone linter-like checker, by simply running `basedpyright .` (from -within an activated virtual environment). But just like with linters, you should ideally just [include it into your -editor directly](#editor-integration). We also run pyright automatically, as a part of [pre-commit](#pre-commit). - -## Pre-commit - -Now that you've seen the linters, formatters, type-checkers and other tools that we use in the project, you might be -wondering whether you're really expected to run all of those commands manually, after each change. And of course, no, -you're not, that would be really annoying, and you'd probably also often just forget to do that. - -So, instead of that, we use a tool called `pre-commit`, which creates a [git -hook](https://git-scm.com/book/en/v2/Customizing-Git-Git-Hooks), that will automatically run before each commit you -make. That means each time when you make a commit, all of these tools will run over the code you updated, and if any of -these linters detects an issue, the commit will be aborted, and you will see which linter failed, and it's output -telling you why. - -To install pre-commit as a git hook all you need to do is to run `pre-commit install` from an activated poetry -environment, installing it into the git repository as a hook running before every commit. That said, you can also run -pre-commit without having to install it (or without having to make a commit, even if installed). To do that, simply -execute: `pre-commit run --all-files`. Note that the installed hook will only run the linters on the files that were -updated in the commit, while using the command directly will run it on the whole project. - -You can find pre-commit's configuration the [`.pre-commit-config.yaml`](./.pre-commit-config.yaml) file, where we -define which tools should be ran and how. Currently, pre-commit runs ruff linter, ruff formatter, slotscheck and -pyright, but also a checker for some issues in TOML/YAML files. - -Even though in most cases enforcing linting before each commit is what we want, there are some situations where we need -to commit some code which doesn't pass these checks. This can happen for example after a merge, or as a result of -making a single purpose small commit without yet worrying about linters. In these cases, you can use the `--no-verify` -flag when making a commit, telling git to skip all of the pre-commit hooks and commit normally. You can also only skip -a specific hook(s), by setting `SKIP` environmental variable (e.g. `SKIP=basedpyright`, or -`SKIP=ruff-linter,ruff-formatter,slotscheck`), the names of the individual hooks are their ids, you can find those in -the configuration file for pre-commit. - -However this kind of verification skipping should be used sparingly. We value a clean history which consistently follows -our linting guidelines, and making commits with linting issues only leads to more commits, fixing those issues later. If -you really do need to skip the linters though, you should then wait until you create another commit fixing the issues -before pushing the code to github, to avoid needlessly failing the automated workflows, which run pre-commit themselves -(amongst other things). - -## Editor Integration - -Even with pre-commit, it would still be very annoying to have to only run the linters during the commit, because with -the amount of rules we have, and especially if you're not used to following many of them, you will make a lot of -mistakes. Because of that, we heavily recommend that you integrate these tools into your IDE/editor directly. Most -editors will support integration will all of these tools, so you shouldn't have any trouble doing this. - -If you're using neovim, I would recommend setting up LSP (Language Server Protocol), and installing basedpyright, as it -has language server support built into it. Same thing goes with `ruff`, which has an LSP implementation -[`ruff-lsp`](https://github.com/astral-sh/ruff-lsp). As for slotscheck, there isn't currently any good way to integrate -it directly, so you will need to rely on pre-commit, or run it manually. However, slotscheck violations are fairly -rare. - -On vscode, you can simply install the following extensions: - -- [BasedPyright](https://marketplace.visualstudio.com/items?itemName=detachhead.basedpyright) -- [ruff](https://marketplace.visualstudio.com/items?itemName=charliermarsh.ruff) - -(Similarly to neovim, there is no extension available for slotscheck, however violations are fairly rare, and it should -be enough to have it run with pre-commit.) - -## Making Great Commits - -A well-structured git log is key to a project's maintainability; it provides insight into when and why things were done -for future maintainers of the project. - -Commits should be as narrow in scope as possible. Commits that span hundreds of lines across multiple unrelated -functions and/or files are very hard for maintainers to follow. After about a week, they'll probably be hard for you to -follow too. - -Please also avoid making a lot minor commits for fixing test failures or linting errors. Instead, run the linters before -you push, ideally with [pre-commit](#pre-commit). - -We've compiled a few resources on making good commits: - -- -- -- -- -- - -## Work in Progress PRs - -Whenever you add a pull request that isn't yet ready to be reviewed and merged, you can mark it as a draft. This -provides both visual and functional indicator that the PR isn't yet ready to be reviewed and merged. - -This feature should be utilized instead of the traditional method of prepending `[WIP]` to the PR title. - -Methods of marking PR as a draft: - -1. When creating it - - ![image](https://user-images.githubusercontent.com/20902250/94499351-bc736e80-01fc-11eb-8e99-a7863dd1428a.png) - -2. After it was created - - ![image](https://user-images.githubusercontent.com/20902250/94499276-8930df80-01fc-11eb-9292-7f0c6101b995.png) - -For more info, check the GitHub's documentation about this feature -[here](https://github.blog/2019-02-14-introducing-draft-pull-requests/) - -## Don't reinvent the wheel - -We're an open-sourced project, and like most other open-sourced projects, we depend on other projects that already -implemented many things which we need in our code. It doesn't make a lot of sense to try and implement everything from -the bottom, when there already are perfectly reasonable and working implementations made. - -In most of the cases, this will mean using some libraries which can simply be added to our [project's -dependencies](./pyproject.toml) which is maintained with poetry, which you can read more about in [this -section](#project-installation). - -Libraries aren't the only way to make use of the existing open-source code that's already out there. Another -thing which we can often do is simply directly copy open-source code into our project. However always make sure that -before even considering to paste someones code into ours, you have the right to do so given to you by the code's -author. This could be a directly given permission, but in most of cases, it will be an open-source license allowing -anyone to use the code it applies to as long as the conditions of that license are followed. - -We all stand on the shoulders of giants even just by using the python language. There were some very smart people -behind implementing this language or the libraries that our project uses and they deserve the credit for their hard -work as their license specifies. To do this, we use the [`ATTRIBUTION.txt`](./ATTRIBUTION.txt) file. - -This project is released under the LGPL v3 license and this means we can utilize the code of LGPL v3 libraries as well -as the permissive licenses (such as MIT, Apache or BSD licenses), it also means that when you contribute to our -project, you agree that your contributions may appear on other projects accordingly to the LGPL license (however you -may choose to later publish your code under any other license). - -LGPL v3 is a "copy-left" license, which ensures that your code will always remain open-sourced and it will never be -relicensed (unless you give your permission as the copyright holder of your code). If for some reason you don't want to -contribute under a copy-left license but rather under MIT, or other permissive license, you are free to do so, just -mention whatever parts you added in the attribution file with your license's full-text with a copyright notice that -includes your name and a link to the original source (if you just made that code up, instead of a link to the original -source, you can just include a link to your GitHub profile, or just use your git email address.) - -- How software licenses work: -- GitHub's docs on licenses: - -## Changelog - -It is important for the users to know what has changed in between the release versions, for that reason, we keep -a changelog, which is handled by a library called `towncrier`. Information about how this changelog works in detail is -described in it's own file at: [`./changes/README.md`](./changes/README.md). - -Do make sure to read this file, as we generally require a changelog fragment file to be added with each pull request. -A PR without this file will NOT be accepted (unless there is a reason not to include a changelog - like for minor -fixes, or other exceptions). - -## Unit-Tests - -To ensure that our project will work correctly with any new changes made to it, we use automated unit-tests which test -the individual functions in our code with some sample inputs for correct outputs. Unit-testing is explained in better -detail in it's own file at [`./tests/README.md`](./tests/README.md). - -## Deprecations - -The removal or rename of anything that is a part of the public API must go through a deprecation process. This will -ensure that users won't be surprised when we eventually remove some features, and their code won't end up broken after -an update. Instead, a deprecated call should produce a warning about the deprecation, where the user is informed at -which version will the accessed object be removed. Until then, the object must have the same old behavior and shouldn't -break existing code-bases. - -The project already contains some internal utilities that can help up mark something as deprecated easily, here's a few -quick examples of these utilities in practice: - -```python -# Old version: -class Car: - def __init__(self, engine_power: int, engine_type: str, fuel_tank_size: int): - self.engine_power = engine_power - self.engine_type = engine_type - self.fuel_tank_size = fuel_tank_size - -# New version, with deprecations preventing old code from breaking: -from mcproto.utils.deprecation import deprecated - -class Car: - def __init__(self, engine: Engine, fuel_tank_size: int): - self.engine = engine - self.fuel_tank_size = fuel_tank_size - - @deprecated(removal_version="2.0.0", replacement="engine.power") - @property - def engine_power(self) -> int: - return self.engine.power - - @deprecated(removal_version="2.0.0", replacement="engine.type") - @property - def engine_power(self) -> int: - return self.engine.type -``` - -```python -# Old version: -def print_value(value: str, add_version: bool) -> None: - txt = "The value " - if add_version: - txt += f"for version {PROJECT_VERSION} " - txt += f"is: {value}" - print(txt) - -# New version, with deprecation -from mcproto.utils.deprecation import deprecation_warn - -def print_value(value: str, add_version: bool = False) -> None: - txt = "The value " - if add_version: - deprecation_warn(obj_name="add_version argument", removal_version="4.0.0") - txt += f"for version {PROJECT_VERSION} " - txt += f"is: {value}" - print(txt) - -# New version, after version 4.0.0 (with deprecations removed): -def print_value(value: str) -> None: - print(f"The value is: {value}") -``` - -## Changes to this Arrangement - -We tried to design our specifications in a really easy and comprehensive way so that they're understandable to -everyone, but of course from a point of someone who already has some established standards, they'll usually always -think that their standards are the best standards, even though there may actually be a better way to do some things. -For this reason, we're always open to reconsidering these standards if there's a good enough reason for it. - -After all every project will inevitably evolve over time, and these guidelines are no different. This document and the -standards it holds are open to pull requests and changes by the contributors, just make sure that this document is -always in sync with the codebase, which means that if you want to propose some syntactic change, you also change it -everywhere in the codebase so that the whole project will actually follow the newly proposed standards. - -If you do believe that you have something valuable to add or change, please don't hesitate to do so in a PR (of course, -after you opened an issue, as with every proposed feature by a non-core developer). - -## Footnotes - -This could be a lot to remember at once, but you can use this document as a resource while writing the code for our -repository and cross-check that your styling is following our guidelines and that you're practicing the rules that -we've set here. - -This document was inspired by -[Python Discord's CONTRIBUTING agreement.](https://github.com/python-discord/bot/blob/master/CONTRIBUTING.md). +You can find our contributing guidelines and instructions on setting up the project in our documentation: +[here](https://py-mine.github.io/mcproto/latest/contributing/guides/) diff --git a/LICENSE-THIRD-PARTY.txt b/LICENSE-THIRD-PARTY.txt index 0fca57c3..3e9704f6 100644 --- a/LICENSE-THIRD-PARTY.txt +++ b/LICENSE-THIRD-PARTY.txt @@ -15,13 +15,12 @@ Applies to: - .github/workflows/fragment-check.yml: Entire file - .github/workflows/prepare-release.yml: Workflow heavily inspired by original - .github/scripts/normalize_coverage.py: Entire file - - docs/_static/extra.css: Entire file - Copyright (c) 2015-present Rapptz All rights reserved. - - docs/pages/version_guarantees.rst: Entire file - - docs/_static/extra.css: Attribute table related config - - docs/_static/extra.js: Attribute table related functionality - - docs/extensions/attributetable.py: Entire file + - docs/installation/version-guarantees.rst: Entire file + - Copyright (c) 2016-2024 Martin Donath + All rights reserved + - docs/contributing/reporting-a-bug.md: Majority of the file --------------------------------------------------------------------------------------------------- Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -41,6 +40,27 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +--------------------------------------------------------------------------------------------------- + ISC License + +Applies to: + - Copyright (c) 2021, Timothée Mazzucotelli + All rights reserved. + - docs/css/mkdocstrings.css: Entire file + - docs/css/material.css: Entire file + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + --------------------------------------------------------------------------------------------------- GNU LESSER GENERAL PUBLIC LICENSE Applies to: diff --git a/README.md b/README.md index ac129f89..2e6348d9 100644 --- a/README.md +++ b/README.md @@ -5,330 +5,14 @@ [![current PyPI version](https://img.shields.io/pypi/v/mcproto.svg)](https://pypi.org/project/mcproto/) [![Validation](https://github.com/ItsDrike/mcproto/actions/workflows/validation.yml/badge.svg)](https://github.com/ItsDrike/mcproto/actions/workflows/validation.yml) [![Unit tests](https://github.com/ItsDrike/mcproto/actions/workflows/unit-tests.yml/badge.svg)](https://github.com/ItsDrike/mcproto/actions/workflows/unit-tests.yml) -[![Docs](https://img.shields.io/readthedocs/mcproto?label=Docs)](https://mcproto.readthedocs.io/) +[![Docs](https://github.com/py-mine/mcproto/actions/workflows/mkdocs.yml/badge.svg)](https://py-mine.github.io/mcproto) -This is a heavily Work-In-Progress library, which attempts to be a full wrapper around the minecraft protocol, allowing -for simple interactions with minecraft servers, and perhaps even for use as a base to a full minecraft server -implementation in python (though the speed will very likely be quite terrible, making it probably unusable as any -real-world playable server). +Mcproto is a python library that provides various low level interactions with the Minecraft protocol. It attempts to be +a full wrapper around the Minecraft protocol, which means it could be used as a basis for Minecraft bots written in +python, or even full python server implementations. -Currently, the library is very limited and doesn't yet have any documentation, so while contributions are welcome, fair -warning that there is a lot to comprehend in the code-base and it may be challenging to understand it all. +> [!WARNING] +> Currently, the library is still work in progress and very incomplete, so while contributions are welcome, fair warning +> that using mcproto in production isn't recommended. -## Examples - -Since there is no documentation, to satisfy some curious minds that really want to use this library even in this -heavily unfinished state, here's a few simple snippets of it in practice: - -### Manual communication with the server - -As sending entire packets is still being worked on, the best solution to communicate with a server is to send the data -manually, using our connection reader/writer, and buffers (being readers/writers, but only from bytearrays as opposed -to using an actual connection). - -Fair warning: This example is pretty long, but that's because it aims to explain the minecraft protocol to people that -see it for the first time, and so a lot of explanation comments are included. But the code itself is actually quite -simple, due to a bunch of helpful read/write methods the library already provides. - -```python -import json -import asyncio - -from mcproto.buffer import Buffer -from mcproto.connection import TCPAsyncConnection -from mcproto.protocol.base_io import StructFormat - - -async def handshake(conn: TCPAsyncConnection, ip: str, port: int = 25565) -> None: - # As a simple example, let's request status info from a server. - # (this is what you see in the multiplayer server list, i.e. the server's motd, icon, info - # about how many players are connected, etc.) - - # To do this, we first need to understand how are minecraft packets composed, and take a look - # at the specific packets that we're interested in. Thankfully, there's an amazing community - # made wiki that documents all of this! You can find it at https://wiki.vg/ - - # Alright then, let's take a look at the (uncompressed) packet format specification: - # https://wiki.vg/Protocol#Packet_format - # From the wiki, we can see that a packet is composed of 3 fields: - # - Packet length (in bytes), sent as a variable length integer - # combined length of the 2 fields below - # - Packet ID, also sent as varint - # each packet has a unique number, that we use to find out which packet it is - # - Data, specific to the individual packet - # every packet can hold different kind of data, this will be shown in the packet's - # specification (you can find these in wiki.vg) - - # Ok then, with this knowledge, let's establish a connection with our server, and request - # status. To do this, we fist need to send a handshake packet. Let's do it: - - # Let's take a look at what data the Handshake packet should contain: - # https://wiki.vg/Protocol#Handshake - handshake = Buffer() - # We use 47 for the protocol version, as it's quite old, and will work with almost any server - handshake.write_varint(47) - handshake.write_utf(ip) - handshake.write_value(StructFormat.USHORT, port) - handshake.write_varint(1) # Intention to query status - - # Nice! Now that we have the packet data, let's follow the packet format and send it. - # Let's prepare another buffer that will contain the last 2 fields (packet id and data) - # combined. We do this since the first field will require us to know the size of these - # two combined, so let's just put them into 1 buffer. - packet = Buffer() - packet.write_varint(0) # Handshake packet has packet ID of 0 - packet.write(handshake) # Full data from our handshake packet - - # And now, it's time to send it! - await conn.write_varint(len(packet)) # First field (size of packet id + data) - await conn.write(packet) # Second + Third fields (packet id + data) - - -async def status(conn: TCPAsyncConnection, ip: str, port: int = 25565) -> dict: - # This function will be called right after a handshake - # Sending this packet told the server recognize our connection, and since we've specified - # the intention to query status, it then moved us to STATUS game state. - - # Different game states have different packets that we can send out, for example there is a - # game state for login, that we're put into while joining the server, and from it, we tell - # the server our username player UUID, etc. - - # The packet IDs are unique to each game state, so since we're now in status state, a packet - # with ID of 0 is no longer the handshake packet, but rather the status request packet - # (precisely what we need). - # https://wiki.vg/Protocol#Status_Request - - # The status request packet is empty, and doesn't contain any data, it just instructs the - # server to send us back a status response packet. Let's send it! - packet = Buffer() - packet.write_varint(0) # Status request packet ID - - await conn.write_varint(len(packet)) - await conn.write(packet) - - # Now, let's receive the response packet from the server - # Remember, the packet format states that we first receive a length, then packet id, then data - _response_len = await conn.read_varint() - _response = await conn.read(_response_len) # will give us a bytearray - - # Amazing, we've just received data from the server! But it's just bytes, let's turn it into - # a Buffer object, which includes helpful methods that allow us to read from it - response = Buffer(_response) - packet_id = response.read_varint() # Remember, 2nd field is the packet ID - - # Let's see it then, what packet did we get? - print(packet_id) # 0 - - # Interesting, this packet has an ID of 0, but wasn't that the status request packet? We wanted - # a response tho. Well, actually, if we take a look at the status response packet at the wiki, - # it really has an ID of 0: - # https://wiki.vg/Protocol#Status_Response - # Aha, so not only are packet ids unique between game states, they're also unique between the - # direction a server bound packet (sent by client, with server as the destination) can have an - # id of 0, while a client bound packet (sent by server, with client as the destination) can - # have the same id, and mean something else. - - # Alright then, so we know what we got is a status response packet, let's read the wiki a bit - # further and see what data it actually contains, and see how we can get it out. Hmmm, it - # contains a UTF-8 encoded string that represents JSON data, ok, so let's get that string, it's - # still in our buffer. - received_string = response.read_utf() - - # Now, let's just use the json module, convert the string into some json object (in this case, - # a dict) - data = json.loads(received_string) - return data - -async def main(): - # That's it, all that's left is actually calling our functions now - - ip = "mc.hypixel.net" - port = 25565 - - async with (await TCPAsyncConnection.make_client((ip, port), 2)) as connection: - await handshake(connection, ip, port) - data = await status(connection, ip, port) - - # Wohoo, we got the status data! Let's see it - print(data["players"]["max"]) # This is the server's max player amount (slots) - print(data["players"]["online"]) # This is how many people are currently online - print(data["description"]) # And here's the motd - - # There's a bunch of other things in this data, try it out, see what you can find! - -def start(): - # Just some boilerplate code that can run our asynchronous main function - asyncio.run(main()) -``` - -### Using packet classes for communication - -The first thing you'll need to understand about packet classes in mcproto is that they're generally going to support -the latest minecraft version, and while any the versions are usually mostly compatible, mcproto does NOT guarantee -support for any older protocol versions. - -#### Obtaining the packet map - -As we've already seen in the example before, packets follow certain format, and every packet has it's associated ID -number, direction (client->server or server->client), and game state (status/handshaking/login/play). The packet IDs -are unique to given direction and game state combination. - -For example in clientbound direction (packets sent from server to the client), when in the status game state, there -will always be unique ID numbers for the different packets. In this case, there would actually only be 2 packets here: -The Ping response packet, which has an ID of 1, and the Status response packet, with an ID of 0. - -To receive a packet, we therefore need to know both the game state, and the direction, as only then are we able to -figure out what the type of packet it is. In mcproto, packet receiving therefore requires a "packet map", which is a -mapping (dictionary) of packet id -> packet class. Here's an example of obtaining a packet map: - -```python -from mcproto.packets import generate_packet_map, GameState, PacketDirection - -STATUS_CLIENTBOUND_MAP = generate_packet_map(PacketDirection.CLIENTBOUND, GameState.STATUS) -``` - -Which, if you were to print it, would look like this: - -``` -{ - 0: - 1: , -} -``` - -Telling us that in the status gamestate, for the clientbound direction, these are the only packet we can receive, -and showing us the actual packet classes for every possible ID number. - -#### Building our own packets - -Our first packet will always have to be a Handshake, this is the only packet in the entire handshaking state, and it's -a "gateway", after which we get moved to a different state, specifically, either to STATUS (to obtain information about -the server, such as motd, amount of players, or other details you'd see in the multiplayer screen in your MC client). - -```python -from mcproto.packets.handshaking.handshake import Handshake, NextState - -my_handshake = Handshake( - # Once again, we use an old protocol version so that even older servers will respond - protocol_version=47, - server_address="mc.hypixel.net", - server_port=25565, - next_state=NextState.STATUS, -) -``` - -That's it! We've now constructed a full handshake packet with all of the data it should contain. You might remember -from the example above, that we originally had to look at the protocol specification, find the handshake packet and -construct it's data as a Buffer with all of these variables. - -With these packet classes, you can simply follow your editor's autocompletion to see what this packet requires, pass it -in and the data will be constructed for you from these attributes, without constantly cross-checking with the wiki. - -For completion, let's also construct the status request packet that we were sending to instruct the server to send us -back the status response packet. - -```python -from mcproto.packets.status.status import StatusRequest - -my_status_request = StatusRequest() -``` - -This one was even easier, as the status request packet alone doesn't contain any special data, it's just a request to -the server to send us some data back. - -#### Sending packets - -To actually send out a packet to the server, we'll need to create a connection, and use the custom functions -responsible for sending packets out. Let's see it: - -```python -from mcproto.packets import async_write_packet -from mcproto.connection import TCPAsyncConnection - -async def main(): - ip = "mc.hypixel.net" - port = 25565 - - async with (await TCPAsyncConnection.make_client((ip, port), 2)) as connection: - await async_write_packet(connection, my_handshake) - # my_handshake is a packet we've created in the example before -``` - -Much easier than the manual version, isn't it? - -#### Receiving packets - -Alright, we might now know how to send a packet, but how do we receive one? Let's see: - -```python -# Let's say we already have a connection at this moment, after all, how else would -# we've gotten into the STATUS game state. -# Also, let's do something different, let's say we have a synchronous connection, just for fun -from mcproto.connection import TCPSyncConnection -conn: TCPSyncConnection - -# With a synchronous connection, comes synchronous reader, so instead of using async_read_packet, -# we'll use sync_read_packet here -from mcproto.packets import sync_read_packet - -# But remember? To read a packet, we'll need to have that packet map, telling us which IDs represent -# which actual packet types. Let's pass in the one we've constructed before -packet = sync_read_packet(conn, STATUS_CLIENTBOUND_MAP) - -# Cool! We've got back a packet, let's see what kind of packet we got back -from mcproto.packets.status.status import StatusResponse -from mcproto.packets.status.ping import PingPong - -if isinstance(packet, StatusResponse): - ... -elif isinstance(packet, PingPong): - ... -else: - raise Exception("Impossible, there aren't other client bound packets in the STATUS game state") -``` - -#### Requesting status - -Alright, so let's actually try to put all of this knowledge together, and create something meaningful. Let's replicate -the status obtaining logic from the manual example, but with these new packet classes: - -```python -from mcproto.connection import TCPAsyncConnection -from mcproto.packets import async_write_packet, async_read_packet, generate_packet_map -from mcproto.packets.packet import PacketDirection, GameState -from mcproto.packets.handshaking.handshake import Handshake, NextState -from mcproto.packets.status.status import StatusRequest, StatusResponse -from mcproto.packets.status.ping import PingPong - -STATUS_CLIENTBOUND_MAP = generate_packet_map(PacketDirection.CLIENTBOUND, GameState.STATUS) - - -async def get_status(ip: str, port: int) -> dict: - handshake_packet = Handshake( - protocol_version=47, - server_address=ip, - server_port=port, - next_state=NextState.STATUS, - ) - status_req_packet = StatusRequest() - - async with (await TCPAsyncConnection.make_client((ip, port), 2)) as connection: - # We start out at HANDSHAKING game state - await async_write_packet(connection, handshake_packet) - # After sending the handshake, we told the server to now move us into the STATUS game state - await async_write_packet(connection, status_req_packet) - # Since we're still in STATUS game state, we use the status packet map when reading - packet = await async_read_packet(connection, STATUS_CLIENTBOUND_MAP) - - # Now that we've got back the packet, we no longer need the connection, we won't be sending - # anything else, so let's get out of the context manager. - - # Now, we should always first make sure it really is the packet we expected - if not isinstance(packet, StatusResponse): - raise ValueError(f"We've got an unexpected packet back: {packet!r}") - - # Since we know we really are dealing with a status response, let's get out it's data, and return it - return packet.data -``` - -Well, that wasn't so hard, was it? +For more info, check our our [documentation](https://py-mine.github.io/mcproto). diff --git a/SECURITY.md b/SECURITY.md index 7f524663..18297e70 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -1,5 +1,3 @@ -# Security Policy - ## Reporting Security Vulnerabilities **We urge you not to file a bug report in the GitHub issue tracker, since they are open for anyone to see** @@ -15,5 +13,5 @@ of the people below: - **ItsDrike** (project maintainer and owner) - **Email:** `itsdrike@protonmail.com` - - **Discord:** `ItsDrike#5359` (however you will need to join the [py-mine discord](https://discord.gg/C2wX7zduxC) too, - as I might not answer to message requests from people I don't share a server with.) + - **Discord:** `ItsDrike` (however you will need to join the [py-mine discord](https://discord.gg/C2wX7zduxC) too, + as I might not answer to message requests from people I don't share a server with.) diff --git a/changes/209.feature.md b/changes/209.feature.md index 4ea701de..f126c3ad 100644 --- a/changes/209.feature.md +++ b/changes/209.feature.md @@ -1 +1 @@ -- Added `InvalidPacketContentError` exception, raised when deserializing of a specific packet fails. This error inherits from `IOError`, making it backwards compatible with the original implementation. +Added `InvalidPacketContentError` exception, raised when deserializing of a specific packet fails. This error inherits from `IOError`, making it backwards compatible with the original implementation. diff --git a/changes/257.feature.md b/changes/257.feature.md index 453b22bf..01ac334e 100644 --- a/changes/257.feature.md +++ b/changes/257.feature.md @@ -1,12 +1,13 @@ -- Added the `NBTag` to deal with NBT data: - - The `NBTag` class is the base class for all NBT tags and provides the basic functionality to serialize and deserialize NBT data from and to a `Buffer` object. - - The classes `EndNBT`, `ByteNBT`, `ShortNBT`, `IntNBT`, `LongNBT`, `FloatNBT`, `DoubleNBT`, `ByteArrayNBT`, `StringNBT`, `ListNBT`, `CompoundNBT`, `IntArrayNBT`and `LongArrayNBT` were added and correspond to the NBT types described in the [NBT specification](https://wiki.vg/NBT#Specification). - - NBT tags can be created using the `NBTag.from_object()` method and a schema that describes the NBT tag structure. - Compound tags are represented as dictionaries, list tags as lists, and primitive tags as their respective Python types. - The implementation allows to add custom classes to the schema to handle custom NBT tags if they inherit the `:class: NBTagConvertible` class. - - The `NBTag.to_object()` method can be used to convert an NBT tag back to a Python object. Use include_schema=True to include the schema in the output, and `include_name=True` to include the name of the tag in the output. In that case the output will be a dictionary with a single key that is the name of the tag and the value is the object representation of the tag. - - The `NBTag.serialize()` can be used to serialize an NBT tag to a new `Buffer` object. - - The `NBTag.deserialize(buffer)` can be used to deserialize an NBT tag from a `Buffer` object. - - If the buffer already exists, the `NBTag.write_to(buffer, with_type=True, with_name=True)` method can be used to write the NBT tag to the buffer (and in that case with the type and name in the right format). - - The `NBTag.read_from(buffer, with_type=True, with_name=True)` method can be used to read an NBT tag from the buffer (and in that case with the type and name in the right format). - - The `NBTag.value` property can be used to get the value of the NBT tag as a Python object. +Added the `NBTag` to deal with NBT data + + - The `NBTag` class is the base class for all NBT tags and provides the basic functionality to serialize and deserialize NBT data from and to a `Buffer` object. + - The classes `EndNBT`, `ByteNBT`, `ShortNBT`, `IntNBT`, `LongNBT`, `FloatNBT`, `DoubleNBT`, `ByteArrayNBT`, `StringNBT`, `ListNBT`, `CompoundNBT`, `IntArrayNBT`and `LongArrayNBT` were added and correspond to the NBT types described in the [NBT specification](https://wiki.vg/NBT#Specification). + - NBT tags can be created using the `NBTag.from_object()` method and a schema that describes the NBT tag structure. + Compound tags are represented as dictionaries, list tags as lists, and primitive tags as their respective Python types. + The implementation allows to add custom classes to the schema to handle custom NBT tags if they inherit the `:class: NBTagConvertible` class. + - The `NBTag.to_object()` method can be used to convert an NBT tag back to a Python object. Use include_schema=True to include the schema in the output, and `include_name=True` to include the name of the tag in the output. In that case the output will be a dictionary with a single key that is the name of the tag and the value is the object representation of the tag. + - The `NBTag.serialize()` can be used to serialize an NBT tag to a new `Buffer` object. + - The `NBTag.deserialize(buffer)` can be used to deserialize an NBT tag from a `Buffer` object. + - If the buffer already exists, the `NBTag.write_to(buffer, with_type=True, with_name=True)` method can be used to write the NBT tag to the buffer (and in that case with the type and name in the right format). + - The `NBTag.read_from(buffer, with_type=True, with_name=True)` method can be used to read an NBT tag from the buffer (and in that case with the type and name in the right format). + - The `NBTag.value` property can be used to get the value of the NBT tag as a Python object. diff --git a/changes/274.internal.md b/changes/274.internal.md index c4892e10..9a45750f 100644 --- a/changes/274.internal.md +++ b/changes/274.internal.md @@ -1,4 +1,5 @@ -- Update ruff version (the version we used was very outdated) -- Drop isort in favor of ruff's built-in isort module in the linter -- Drop black in favor of ruff's new built-in formatter -- Update ruff settings, including adding/enabling some new rule-sets +Update ruff + - Update ruff version (the version we used was very outdated) + - Drop isort in favor of ruff's built-in isort module in the linter + - Drop black in favor of ruff's new built-in formatter + - Update ruff settings, including adding/enabling some new rule-sets diff --git a/changes/285.internal.1.md b/changes/285.internal.1.md index 5536712b..8e1372f7 100644 --- a/changes/285.internal.1.md +++ b/changes/285.internal.1.md @@ -1,34 +1 @@ -- **Function**: `gen_serializable_test` - - Generates tests for serializable classes, covering serialization, deserialization, validation, and error handling. - - **Parameters**: - - `context` (dict): Context to add the test functions to (usually `globals()`). - - `cls` (type): The serializable class to test. - - `fields` (list): Tuples of field names and types of the serializable class. - - `serialize_deserialize` (list, optional): Tuples for testing successful serialization/deserialization. - - `validation_fail` (list, optional): Tuples for testing validation failures with expected exceptions. - - `deserialization_fail` (list, optional): Tuples for testing deserialization failures with expected exceptions. - - **Note**: Implement `__eq__` in the class for accurate comparison. - - - The `gen_serializable_test` function generates a test class with the following tests: - -.. literalinclude:: /../tests/mcproto/utils/test_serializable.py - :language: python - :start-after: # region Test ToyClass - :end-before: # endregion Test ToyClass - - - The generated test class will have the following tests: - -```python -class TestGenToyClass: - def test_serialization(self): - # 3 subtests for the cases 1, 2, 3 (serialize_deserialize) - - def test_deserialization(self): - # 3 subtests for the cases 1, 2, 3 (serialize_deserialize) - - def test_validation(self): - # 3 subtests for the cases 4, 5, 6 (validation_fail) - - def test_exceptions(self): - # 3 subtests for the cases 7, 8, 9 (deserialization_fail) -``` +Add `gen_serializable_test` function to generate tests for serializable classes, covering serialization, deserialization, validation, and error handling. diff --git a/changes/285.internal.2.md b/changes/285.internal.2.md index 7969f33c..c0eb7b9e 100644 --- a/changes/285.internal.2.md +++ b/changes/285.internal.2.md @@ -1,16 +1 @@ -- **Class**: `Serializable` - - Base class for types that should be (de)serializable into/from `mcproto.Buffer` data. - - **Methods**: - - `__attrs_post_init__()`: Runs validation after object initialization, override to define custom behavior. - - `serialize() -> Buffer`: Returns the object as a `Buffer`. - - `serialize_to(buf: Buffer)`: Abstract method to write the object to a `Buffer`. - - `validate()`: Validates the object's attributes; can be overridden for custom validation. - - `deserialize(cls, buf: Buffer) -> Self`: Abstract method to construct the object from a `Buffer`. - - **Note**: Use the `dataclass` decorator when adding parameters to subclasses. - - - Exemple: - -.. literalinclude:: /../tests/mcproto/utils/test_serializable.py - :language: python - :start-after: # region ToyClass - :end-before: # endregion ToyClass +Rework the `Serializable` class diff --git a/changes/300.internal.md b/changes/300.internal.md index cb1372e3..4f005ece 100644 --- a/changes/300.internal.md +++ b/changes/300.internal.md @@ -1,6 +1,8 @@ -- Fix CI not running unit tests on python 3.8 (only 3.11) -- Update to use python 3.12 (in validation and as one of the matrix versions in unit-tests workflow) -- Trigger and run lint and unit-tests workflows form a single main CI workflow. -- Only send status embed after the main CI workflow finishes (not for both unit-tests and validation) -- Use `--output-format=github` for `ruff check` in the validation workflow -- Fix the status-embed workflow +Update CI + + - Fix CI not running unit tests on python 3.8 (only 3.11) + - Update to use python 3.12 (in validation and as one of the matrix versions in unit-tests workflow) + - Trigger and run lint and unit-tests workflows form a single main CI workflow. + - Only send status embed after the main CI workflow finishes (not for both unit-tests and validation) + - Use `--output-format=github` for `ruff check` in the validation workflow + - Fix the status-embed workflow diff --git a/changes/346.docs.md b/changes/346.docs.md new file mode 100644 index 00000000..2551f06d --- /dev/null +++ b/changes/346.docs.md @@ -0,0 +1 @@ +Complete documentation rewrite diff --git a/changes/README.md b/changes/README.md index 4ce9df02..89264b57 100644 --- a/changes/README.md +++ b/changes/README.md @@ -3,74 +3,4 @@ This folder holds fragments of the changelog to be used in the next release, when the final changelog will be generated. -For every pull request made to this project, the contributor is responsible for creating a file (fragment), with -a short description of what that PR changes. - -These fragment files use the following format: `{pull_request_number}.{type}.md`, - -Possible types are: -- **`feature`**: New feature that affects the public API. -- **`bugfix`**: A bugfix, which was affecting the public API. -- **`docs`**: Change to the documentation, or updates to public facing docstrings -- **`breaking`**: Signifying a breaking change of some part of the project's public API, which could cause issues for - end-users updating to this version. (Includes deprecation removals.) -- **`deprecation`**: Signifying a newly deprecated feature, scheduled for eventual removal. -- **`internal`** Fully internal change that doesn't affect the public API, but is significant enough to be mentioned, - likely because it affects project contributors. (Such as a new linter rule, change in code style, significant change - in internal API, ...) - -For changes that do not fall under any of the above cases, please specify the lack of the changelog in the pull request -description, so that a maintainer can skip the job that checks for presence of this fragment file. - -## Create fragments with commands - -While you absolutely can simply create these files manually, it's a much better idea to use the `towncrier` library, -which can create the file for you in the proper place. With it, you can simply run `towncrier create -{pull_request}.{type}.md` after creating the pull request, edit the created file and commit the changes. - -If the change is simple enough, you can even use the `-c`/`--content` flag and specify it directly, like: `towncrier -create 12.feature.md -c "Add dinosaurs!"`, or if you're used to terminal editors, there's also the `--edit` flag, which -opens the file with your `$EDITOR`. - -## Preview changelog - -To preview the latest changelog, run `towncrier build --draft --version [version number]`. (For version number, you can -pretty much enter anything as this is just for a draft version. For true builds, this would be the next version number, -so for example, if the current version is 1.0.2, next one will be one either 1.0.3, or 1.1.0, or 2.0.0. But for drafts, -you can also just enter something like `next` for the version, as it's just for your own private preview.) - -To make this a bit easier, there is a taskipy task running the command above, so you can just use `poetry run task -changelog-preview` to see the changelog, if you don't like remembering new commands. - -## Multiple fragments in single PR - -If necessary, multiple fragment files can be created per pull-request, with different change types, if the PR covers -multiple areas. For example for PR #13 that both introduces a feature, and changes the documentation, can add -2 fragment files: `13.feature.md` and `13.docs.md`. - -Additionally, if a single PR is addressing multiple unrelated topics in the same category, and needs to make multiple -distinct changelog entries, an optional counter value can be added at the end of the file name (needs to be an -integer). So for example PR #25 which makes 2 distinct internal changes can add these fragment files: -`25.internal.1.md` and `25.internal.2.md`. (The numbers in this counter position will not be shown in the final -changelog and are merely here for separation of the individual fragments.) - -However if the changes are related to some bigger overarching goal, you can also just use a single fragment file with -the following format: - -```markdown -Update changelog -- Rename `documentation` category to shorter: `docs` -- Add `internal` category for changes unrelated to public API, but potentially relevant to contributors -- Add github workflow enforcing presence of a new changelog fragment file for each PR - - For insignificant PRs which don't require any changelog entry, a maintainer can add `skip-fragment-check` label. -``` - -That said, if you end up making multiple distinct changelog fragments like this, it's a sign that your PR is probably -too big, and you should split it up into multiple PRs instead. Making huge PRs that address several unrelated topics at -once is generally a bad practice, and should be avoided. If you go overboard, your PR might even end up getting closed -for being too big, and you'll be required to split it up. - -## Footnotes - -- See for more info about why and how to properly maintain a changelog -- For more info about `towncrier`, check out it's [documentation](https://towncrier.readthedocs.io/en/latest/tutorial.html) +To learn more about changelog fragments, see our [documentation](https://py-mine.github.io/mcproto/latest/installation/changelog/) diff --git a/docs/LICENSE.md b/docs/LICENSE.md new file mode 100644 index 00000000..ed1add7a --- /dev/null +++ b/docs/LICENSE.md @@ -0,0 +1,23 @@ +# Documentation License + +This documentation itself does NOT follow the primary project license! + +Instead, it follows a Creative Commons license: CC BY-NC-SA 4.0 + +## Attribution + +If you need a copyright header for proper attribution, you can use: + +Mcproto Documentation © 2024 by ItsDrike + +In HTML: + +```html +Mcproto Documentation © 2024 by ItsDrike +``` + +If you also need the license identifier, use the following: + +```html +CC BY-NC-SA 4.0 +``` diff --git a/docs/_static/extra.css b/docs/_static/extra.css deleted file mode 100644 index f0373b0c..00000000 --- a/docs/_static/extra.css +++ /dev/null @@ -1,119 +0,0 @@ -html { - word-wrap: anywhere; -} - -body { - --toc-item-spacing-horizontal: 0.5rem; - --admonition-font-size: 0.8em; - - --attribute-table-title: var(--color-content-foreground); - --attribute-table-entry-border: var(--color-foreground-border); - --attribute-table-entry-text: var(--color-api-name); - --attribute-table-entry-hover-border: var(--color-content-foreground); - --attribute-table-entry-hover-background: var(--color-api-background-hover); - --attribute-table-entry-hover-text: var(--color-content-foreground); - --attribute-table-badge: var(--color-api-keyword); -} - -.icon { - user-select: none; -} - -.viewcode-back { - position: absolute; - right: 1em; - background-color: var(--color-code-background); - width: auto; -} - -.toc-drawer { - width: initial; - max-width: 20em; - right: -20em; -} - -.toc-tree ul ul ul ul { - border-left: 1px solid var(--color-background-border); -} - -@media (max-width: 82em) { - body { - font-size: 0.7em; - } - - .toc-tree { - padding-left: 0; - } - - .sidebar-brand-text { - font-size: 1rem; - } - - .sidebar-tree .reference { - padding: 0.5em 1em; - } -} - -/* attribute tables */ -.py-attribute-table { - display: flex; - flex-wrap: wrap; - flex-direction: row; - margin: 0 2em; - padding-top: 16px; -} - -.py-attribute-table-column { - flex: 1 1 auto; -} - -.py-attribute-table-column:not(:first-child) { - margin-top: 1em; -} - -.py-attribute-table-column > span { - color: var(--attribute-table-title); -} - -main .py-attribute-table-column > ul { - list-style: none; - margin: 4px 0px; - padding-left: 0; - font-size: 0.95em; -} - -.py-attribute-table-entry { - margin: 0; - padding: 2px 0; - padding-left: 0.2em; - border-left: 2px solid var(--attribute-table-entry-border); - display: flex; - line-height: 1.2em; -} - -.py-attribute-table-entry > a { - padding-left: 0.5em; - color: var(--attribute-table-entry-text); - flex-grow: 1; -} - -.py-attribute-table-entry > a:hover { - color: var(--attribute-table-entry-hover-text); - text-decoration: none; -} - -.py-attribute-table-entry:hover { - background-color: var(--attribute-table-entry-hover-background); - border-left: 2px solid var(--attribute-table-entry-hover-border); - text-decoration: none; -} - -.py-attribute-table-badge { - flex-basis: 3em; - text-align: right; - font-size: 0.9em; - color: var(--attribute-table-badge); - -moz-user-select: none; - -webkit-user-select: none; - user-select: none; -} diff --git a/docs/_static/extra.js b/docs/_static/extra.js deleted file mode 100644 index 12fd8a08..00000000 --- a/docs/_static/extra.js +++ /dev/null @@ -1,13 +0,0 @@ -document.addEventListener("DOMContentLoaded", () => { - const tables = document.querySelectorAll( - ".py-attribute-table[data-move-to-id]" - ); - tables.forEach((table) => { - let element = document.getElementById( - table.getAttribute("data-move-to-id") - ); - let parent = element.parentNode; - // insert ourselves after the element - parent.insertBefore(table, element.nextSibling); - }); -}); diff --git a/docs/api/basic.rst b/docs/api/basic.rst deleted file mode 100644 index c68e0204..00000000 --- a/docs/api/basic.rst +++ /dev/null @@ -1,5 +0,0 @@ -Basic Usage -=========== - -.. - TODO: Write this diff --git a/docs/api/internal.rst b/docs/api/internal.rst deleted file mode 100644 index 7a8e0745..00000000 --- a/docs/api/internal.rst +++ /dev/null @@ -1,14 +0,0 @@ -Internal API -============ - -Everything listed on this page is considered internal, and is only present to provide linkable references, and -as an easy quick reference for contributors. These components **are not a part of the public API** and **they -should not be used externally**, as we do not guarantee their backwards compatibility, which means breaking changes -may be introduced between patch versions without any warnings. - -.. automodule:: mcproto.utils.abc - :exclude-members: define - -.. autofunction:: tests.helpers.gen_serializable_test -.. - TODO: Write this diff --git a/docs/api/packets.rst b/docs/api/packets.rst deleted file mode 100644 index 5c2d1a55..00000000 --- a/docs/api/packets.rst +++ /dev/null @@ -1,45 +0,0 @@ -Packets documentation -===================== - -Base classes and interaction functions --------------------------------------- - -.. automodule:: mcproto.packets - :members: - :undoc-members: - :show-inheritance: - - -Handshaking gamestate ---------------------- - -.. automodule:: mcproto.packets.handshaking.handshake - :members: - :undoc-members: - :show-inheritance: - -Status gamestate ----------------- - -.. automodule:: mcproto.packets.status.ping - :members: - :undoc-members: - :show-inheritance: - -.. automodule:: mcproto.packets.status.status - :members: - :undoc-members: - :show-inheritance: - -Login gamestate ---------------- - -.. automodule:: mcproto.packets.login.login - :members: - :undoc-members: - :show-inheritance: - -Play gamestate --------------- - -Not yet implemented diff --git a/docs/api/protocol.rst b/docs/api/protocol.rst deleted file mode 100644 index 92fea8d9..00000000 --- a/docs/api/protocol.rst +++ /dev/null @@ -1,24 +0,0 @@ -Protocol documentation -====================== - -This is the documentation for methods minecraft protocol interactions, connection and buffer. - - -.. attributetable:: mcproto.protocol.base_io.BaseAsyncReader - -.. attributetable:: mcproto.protocol.base_io.BaseSyncReader - -.. automodule:: mcproto.protocol.base_io - :members: - :undoc-members: - :show-inheritance: - -.. autoclass:: mcproto.buffer.Buffer - :members: - :undoc-members: - :show-inheritance: - -.. automodule:: mcproto.connection - :members: - :undoc-members: - :show-inheritance: diff --git a/docs/api/types/index.rst b/docs/api/types/index.rst deleted file mode 100644 index f17972a8..00000000 --- a/docs/api/types/index.rst +++ /dev/null @@ -1,12 +0,0 @@ -.. api/types documentation master file - -======================= -API Types Documentation -======================= - -Welcome to the API Types documentation! This documentation provides information about the various types used in the API. - -.. toctree:: - :maxdepth: 2 - - nbt.rst diff --git a/docs/api/types/nbt.rst b/docs/api/types/nbt.rst deleted file mode 100644 index e2a4398b..00000000 --- a/docs/api/types/nbt.rst +++ /dev/null @@ -1,6 +0,0 @@ -NBT Format -========== - -.. automodule:: mcproto.types.nbt - :members: - :show-inheritance: diff --git a/docs/assets/draft-pr-conversion.png b/docs/assets/draft-pr-conversion.png new file mode 100644 index 00000000..1467615a Binary files /dev/null and b/docs/assets/draft-pr-conversion.png differ diff --git a/docs/assets/draft-pr-creation.png b/docs/assets/draft-pr-creation.png new file mode 100644 index 00000000..a4e2fc86 Binary files /dev/null and b/docs/assets/draft-pr-creation.png differ diff --git a/docs/assets/draft-pr-unmark.png b/docs/assets/draft-pr-unmark.png new file mode 100644 index 00000000..0b9382b9 Binary files /dev/null and b/docs/assets/draft-pr-unmark.png differ diff --git a/docs/assets/py-mine_logo.png b/docs/assets/py-mine_logo.png new file mode 100644 index 00000000..5e2af94e Binary files /dev/null and b/docs/assets/py-mine_logo.png differ diff --git a/docs/conf.py b/docs/conf.py deleted file mode 100644 index 5fc7efdc..00000000 --- a/docs/conf.py +++ /dev/null @@ -1,218 +0,0 @@ -"""Configuration file for the Sphinx documentation builder. - -For the full list of built-in configuration values, see the documentation: -https://www.sphinx-doc.org/en/master/usage/configuration.html - -# -- Project information ----------------------------------------------------- -# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information -""" - -from __future__ import annotations - -import datetime -import sys -from pathlib import Path - -import m2r2 -import sphinxcontrib.towncrier.ext -from docutils import statemachine -from packaging.version import parse as parse_version -from sphinx.ext import autodoc -from sphinx.util.nodes import nodes -from typing_extensions import override - -if sys.version_info >= (3, 11): - from tomllib import load as toml_parse -else: - from tomli import load as toml_parse - - -# -- Basic project information ----------------------------------------------- - -with Path("../pyproject.toml").open("rb") as f: - pkg_meta: dict[str, str] = toml_parse(f)["tool"]["poetry"] - -project = str(pkg_meta["name"]) -copyright = f"{datetime.datetime.now(tz=datetime.timezone.utc).date().year}, ItsDrike" # noqa: A001 -author = "ItsDrike" - -parsed_version = parse_version(pkg_meta["version"]) -release = str(parsed_version) - -# -- General configuration --------------------------------------------------- -# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration - -# Add docs/extensions into python path, allowing custom internal sphinx extensions -# these will now be essentially considered as regualar packages -sys.path.append(str(Path(__file__).parent.joinpath("extensions").absolute())) - -extensions = [ - # official extensions - "sphinx.ext.autodoc", # Automatic documentation generation - "sphinx.ext.autosectionlabel", # Allows referring to sections by their title - "sphinx.ext.extlinks", # Shorten common link patterns - "sphinx.ext.intersphinx", # Used to reference for third party projects: - "sphinx.ext.todo", # Adds todo directive - "sphinx.ext.viewcode", # Links to source files for the documented functions - # external - "sphinxcontrib.towncrier.ext", # Towncrier changelog - "m2r2", # Used to include .md files: - "sphinx_copybutton", # Copyable codeblocks - # internal - "attributetable", # adds attributetable directive, for producing list of methods and attributes of class -] - -# The suffix(es) of source filenames. -source_suffix = [".rst", ".md"] - -# The master toctree document. -master_doc = "index" - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = "en" - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ["_build"] - -# If true, '()' will be appended to :func: etc. cross-reference text. -add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# -- Options for HTML output ------------------------------------------------- -# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output - -html_theme = "furo" -html_favicon = "https://i.imgur.com/nPCcxts.png" - -html_static_path = ["_static"] -html_css_files = ["extra.css"] -html_js_files = ["extra.js"] - -# -- Extension configuration ------------------------------------------------- - -# -- sphinx.ext.autodoc ------------------------ - -# What docstring to insert into main body of autoclass -# "class" / "init" / "both" -autoclass_content = "both" - -# Sort order of the automatically documented members -autodoc_member_order = "bysource" - -# Default options for all autodoc directives -autodoc_default_options = { - "members": True, - "undoc-members": True, - "show-inheritance": True, - "exclude-members": "__dict__,__weakref__", -} - -# -- sphinx.ext.autosectionlabel --------------- - -# Automatically generate section labels: -autosectionlabel_prefix_document = True - -# -- sphinx.ext.extlinks ----------------------- - -# will create new role, allowing for example :issue:`123` -extlinks = { - # role: (URL with %s, caption or None) - "issue": ("https://github.com/py-mine/mcproto/issues/%s", "GH-%s"), -} - -# -- sphinx.ext.intersphinx -------------------- - -# Third-party projects documentation references: -intersphinx_mapping = { - "python": ("https://docs.python.org/3", None), -} - -# -- sphinx.ext.todo --------------------------- - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = True - -# -- sphinxcontrib.towncrier.ext --------------- - -towncrier_draft_autoversion_mode = "draft" -towncrier_draft_include_empty = True -towncrier_draft_working_directory = Path(__file__).parents[1].resolve() - -# -- m2r2 -------------------------------------- - -# Enable multiple references to the same URL for m2r2 -m2r_anonymous_references = True - -# Changelog contains a lot of duplicate labels, since every subheading holds a category -# and these repeat a lot. Currently, m2r2 doesn't handle this properly, and so these -# labels end up duplicated. See: https://github.com/CrossNox/m2r2/issues/59 -suppress_warnings = [ - "autosectionlabel.pages/changelog", - "autosectionlabel.pages/code-of-conduct", - "autosectionlabel.pages/contributing", -] - -# -- Other options ----------------------------------------------------------- - - -def mock_autodoc() -> None: - """Mock autodoc to not add ``Bases: object`` to the classes, that do not have super classes. - - See also https://stackoverflow.com/a/75041544/20952782. - """ - - class MockedClassDocumenter(autodoc.ClassDocumenter): - @override - def add_line(self, line: str, source: str, *lineno: int) -> None: - if line == " Bases: :py:class:`object`": - return - super().add_line(line, source, *lineno) - - autodoc.ClassDocumenter = MockedClassDocumenter - - -def override_towncrier_draft_format() -> None: - """Monkeypatch sphinxcontrib.towncrier.ext to first convert the draft text from md to rst. - - We can use ``m2r2`` for this, as it's an already installed extension with goal - of including markdown documents into rst documents, so we simply run it's converter - somewhere within sphinxcontrib.towncrier.ext and include this conversion. - - Additionally, the current changelog format always starts the version with "Version {}", - this doesn't look well with the version set to "Unreleased changes", so this function - also removes this "Version " prefix. - """ - orig_f = sphinxcontrib.towncrier.ext._nodes_from_document_markup_source # pyright: ignore[reportPrivateUsage] - - def override_f( - state: statemachine.State, # pyright: ignore[reportMissingTypeArgument] # arg not specified in orig_f either - markup_source: str, - ) -> list[nodes.Node]: - markup_source = markup_source.replace("## Version Unreleased changes", "## Unreleased changes") - markup_source = markup_source.rstrip(" \n") - markup_source = markup_source.removesuffix("---") - markup_source = markup_source.rstrip(" \n") - markup_source = m2r2.M2R()(markup_source) - - return orig_f(state, markup_source) - - sphinxcontrib.towncrier.ext._nodes_from_document_markup_source = override_f # pyright: ignore[reportPrivateUsage] - - -mock_autodoc() -override_towncrier_draft_format() diff --git a/docs/contributing/guides/api-reference.md b/docs/contributing/guides/api-reference.md new file mode 100644 index 00000000..a1ad4fb4 --- /dev/null +++ b/docs/contributing/guides/api-reference.md @@ -0,0 +1,188 @@ +!!! bug "Work In Progress" + + This page is still being written. The content below (if any) may change. + +# Writing API reference + +???+ abstract + + This page contains the guide on documenting the code that will appear in the API reference section of this + documentation. It goes over the technology and libraries that we use to generate this API reference docs, details + the docstring style we use, mentions how to add something into the API reference (like new modules) and details what + should and shouldn't be documented here. + +As was already briefly mentioned in the [documentation](./documentation.md) section, we're using +[mkdocstrings](https://mkdocstrings.github.io/), which is an extension of `mkdocs` that is able to automatically +generate documentation from the source code. + +Well, we're using `mkdocstrings`, but internally, the python handler for `mkdocstrings` is using +[`griffe`](https://mkdocstrings.github.io/griffe/), which is the tool responsible for actually analyzing the source +code and collecting all the details. + +As you might imagine though, in order to allow `griffe` to automatically pick up information about our codebase, it's +necessary to actually include this information into the code, as you're writing it. It's also important to use a +consistent style, that `griffe` can understand. + +In our case, we use the [Google style](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings) for +writing docstrings. + +## Google Style docstrings formatting + +While you should ideally just read over the [official +specification](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings) (don't worry, it's actually +quite readable; well, other than the white theme), you can also take a quick glance through some of these examples +below, that quickly demonstrate the style. + +```python +def deal_damage(entity: Entity, damage: int) -> None: + """Deal damage to specified entity. + + Args: + entity: The entity to deal damage to + damage: The amount of damage to deal. + + Note: + This might end up killing the entity. If this does occur + a death message will be logged. + """ + entity.hp -= damage + if entity.hp <= 0: + print(f"Entity {entity.name} died.") + + +def bake_cookie(flavor: str, temperature: int = 175) -> str: + """Bake a delicious cookie. + + This function simulates the process of baking a cookie with the given flavor. + + Args: + flavor: The type of cookie to bake. Must be a valid flavor. + temperature: The baking temperature in Celsius. + Defaults to 175. + + Returns: + A string confirming that the cookie is ready. + + Raises: + ValueError: If the flavor is unknown. + RuntimeError: If the oven temperature is too high and the cookie burns. + """ + valid_flavors = {"chocolate chip", "oatmeal", "peanut butter", "sugar"} + if flavor not in valid_flavors: + raise ValueError(f"Unknown flavor: {flavor}") + + if temperature > 500: + raise RuntimeError("Oven overheated! Your cookie is now charcoal.") + + return f"Your {flavor} cookie is baked at {temperature}°F and ready to eat!" + + +class Cat: + """A simple representation of a cat. + + Attributes: + name: The name of the cat. + age: The age of the cat in years. + is_hungry: Whether the cat is hungry. + """ + + def __init__(self, name: str, age: int): + """Initialize a cat with a name and age. + + Args: + name: The name of the cat. + age: The age of the cat in years. + """ + self.name = name + self.age = age + self.is_hungry = True # a new cat is always hungry (duh!) + + def purr(self) -> str: + """Make the cat purr.""" + return "Purr... Purr..." + + def meow(self) -> str: + """Make the cat meow. + + Returns: + A string representing the cat's meow. + """ + return f"{self.name} says 'Meow!'" + + def feed(self) -> None: + """Feed the cat. + + Once fed, the cat will no longer be hungry. + """ + self.is_hungry = False + +DEFAULT_HP = 500 +"""This is the default value for the amount of health points that each entity will have.""" +``` + +!!! tip "Further reading" + + - Like mentioned above, you can take a look over the [official Google style guide + spec](https://google.github.io/styleguide/pyguide.html#38-comments-and-docstrings) + - Griffe also has a [docstring recommendations + page](https://mkdocstrings.github.io/griffe/guide/users/recommendations/docstrings/), where you can find a bunch + of examples that showcase the various places where you can use docstrings. + - On top of the general docstring recommendations, griffe also has a bit more detailed [reference + page](https://mkdocstrings.github.io/griffe/reference/docstrings/#google-style) that further demonstrates some of + the things that will and won't work. + +### Cross-References + +If you need to refer to some object (function/class/attribute/...) from you docstring, you will need to follow the +[mkdocstrings cross-references syntax](https://mkdocstrings.github.io/usage/#cross-references). Generally, it will look +something like this: + +```python title="mcproto/module_b.py" +from mcproto.module_a import custom_object + +def bar(obj): ... + +def foo(): + """Do the foo. + + This function does the foo by utilizing the [`bar`][mcproto.module_b.bar] method, + to which the [`custom_object`][mcproto.module_a.custom_object] is passed. + """ + bar(custom_object) +``` + +The references need to point to an object that is included in the docs (documented in API reference pages). + +### Relative Cross-References + +While relative cross-references are supported by mkdocstrings, they are [gated for sponsors +only](https://mkdocstrings.github.io/python/usage/configuration/docstrings/#relative_crossrefs), at least until a +funding goal is reached. + +For that reason, we're using an alternative handler to `mkdocstrings-python`: +[`mkdocstrings-python-xref`](https://github.com/analog-garage/mkdocstrings-python-xref). This handler uses +`mkdocstrings-python` internally, while extending it to provide support for relative cross-references. + +To use relative cross-references, check the [mkdocstrings-python-xref +documentation](https://analog-garage.github.io/mkdocstrings-python-xref). + +It is expected that once relative cross-refs come to mainline `mkdocstrings-python`, this alternative handler will be +dropped. Because of this, we should do our best to use compatible syntax when writing the cross-references. As the +`mkdocstrings-python-xref` handler offers quite a bit more than the official version does, however using it would make +migrating back much harder. + +## Writing API Reference + +On top of just learning about how to write docstrings, you will need to understand how to write the docs for the API +reference. Currently, most of our API reference docs work by simply recursively including the whole module, so you +likely won't need to touch it unless you're adding new modules (files). That said, sometimes, it might be useful to +document something from the docs directly, rather than just from docstrings. + +Rather than rewriting what's already really well explained, we'll instead just point you towards the [mkdocstrings +documentation](https://mkdocstrings.github.io/usage/). + +## What to document + +Finally, before including something into the docs, make sure it makes sense as a part of your Public API. When deciding +this, you might find this [Griffe +guide](https://mkdocstrings.github.io/griffe/guide/users/recommendations/public-apis/) to be helpful. diff --git a/docs/contributing/guides/breaking-changes.md b/docs/contributing/guides/breaking-changes.md new file mode 100644 index 00000000..42558920 --- /dev/null +++ b/docs/contributing/guides/breaking-changes.md @@ -0,0 +1,269 @@ +# Breaking changes & Deprecations + +???+ abstract + + This page describes how we handle breaking changes and deprecations in the project. It clarifies what is a breaking + change, what is a deprecation, how to mark something as deprecated and explains when a function should be + deprecated. Finally, it mentions how to properly communicate breaking chagnges and deprecations to end users. + +!!! note "Pre-Requisites" + + Before reading this page, make sure to familiarize yourself with our [versioning model] + +## What is a breaking change + +A breaking change is a modification that requires developers to adjust their code due to alterations that break +previously working functionality. This includes changes such as altering method signatures, changing return types, or +removing classes or functions without prior warning. + +We follow [semantic versioning] to manage breaking changes. That means, **major** version increments (e.g., from `3.x.x` +to `4.0.0`) indicate breaking changes. It’s essential that users can rely on **minor** and **patch** versions (e.g., +`3.1.0` or `3.0.1`) being backwards-compatible with the first major release (`3.0.0`). + +When introducing changes, aim to implement them in a non-breaking way. Breaking changes should be **avoided** whenever +possible. If a breaking change is absolutely necessary, strive to transition gradually through **deprecations**. + +Refer to the [versioning model page][breaking-changes] for some examples of what constitutes a breaking change. + +## What is a deprecation + +A deprecation signals that a particular part of the code (commonly a function, class, or argument) should no longer be +used because it is outdated, inefficient, or replaced by better alternatives. Deprecations are a **temporary** measure +to guide developers toward **transitioning** to newer practices, while giving them time to adjust their code without +causing immediate disruptions. + +Deprecations act as a soft warning: they indicate that the deprecated feature will eventually be removed, but for now, +it remains usable with a runtime deprecation warning. This gives developers enough time to adapt before the removal +takes place in a future major release. + +It’s essential to understand that deprecations are not permanent — every deprecated feature has a defined removal +version, beyond which it will no longer exist. Typically, the removal happens in the next major version after the +deprecation was announced. For example, if a feature is deprecated in version `3.x`, it will usually be removed in +version `4.0.0`. + +!!! info "Recap" + + Deprecations help to avoid **immediate breaking changes** by offering a **grace period** for users to update their + code before the feature is entirely removed in the next major release. + +Deprecations are primarily used for: + +- **Phasing out old functions, classes, or methods** in favor of improved alternatives. +- **Renaming functions, arguments, or classes** to align with better conventions. +- **Adjusting method signatures**, such as adding required arguments or removing old ones. +- **Changing behaviors** that can’t be applied retroactively without introducing errors. + +!!! important "Deprecating protocol changes" + + Deprecations are **not used for protocol-related changes**, as the Minecraft protocol evolves independently of + mcproto’s internal development. For these types of changes, mcproto will introduce a major version bump and require + users to update. + + *That said, these changes are still considered as breaking, and will need to be documented as such.* + +!!! note + + Sometimes, it isn't possible/feasible to deprecate something, as the new change is so different from the original + that a breaking change is the only option. That said, this should be a rare case and you should always first do + your best to think about how to deprecate something before deciding on just marking your change as breaking. + +## How to deprecate + +We have two custom function to mark something as deprecated, both of these live in the `mcproto.utils.deprecation` +module: + +- `deprecation_warn`: This function triggers a deprecation warning immediately after it is called, alerting developers + to the pending removal. +- `deprecated`: This decorator function marks a function as deprecated. It triggers a deprecation warning each time the + decorated function is called. Internally, this ultimately just calls `deprecation_warn`. + +### Removal version + +These functions take a removal version as an argument, which should be specified as a [semantic version][semantic versioning] string. Generally, you'll just want to put the next major version of the library here (so if you're +currently on `3.5.2` you'll want to specify the removal version as `4.0.0`; You always want to bump the first / major +version number.) + +The `deprecation_warn` function will usually just show a warning, however, if the current version of the library +surpasses the removal version, it will instead throw a runtime error, making it unusable. In most cases, people +shouldn't ever face this, as once the new major version is released, all deprecations with that removal version should +be removed, but it's a nice way to ensure the proper behavior, just in case we'd forget, allowing us to remove them +later on in a patch version without breaking the semantic versioning model. + +!!! note + + The removal version is a **required** argument, as we want to make sure that deprecated code doesn't stay in our + codebase forever. Deprecations should always be a temporary step toward the eventual removal of a feature. + + If there is a valid reason to extend the deprecation period, you can push back the removal version, keeping the old + or compatibility code longer and incrementing the major version number in the argument accordingly. + + However, we should **never** shorten the deprecation period, as that would defeat the purpose of giving developers + enough time to adapt to the change. Reducing the deprecation time could result in unexpected breakage for users + relying on the deprecated feature. + +### Examples + +#### Function rename + +```python +from mcproto.utils.deprecation import deprecated + + +@deprecated(removal_version="4.0.0", replacement="new_function") +def old_function(x: int, y: int) -> int: + ... + + +def new_function(x: int, y: int) -> int: + ... +``` + +#### Class removal + +```python +@deprecated(removal_version="4.0.0", extra_msg="Optional extra message") +class MyClass: + ... +``` + +#### Argument removal + +```python +from mcproto.utils.deprecation import deprecation_warn + +def old_function(x: int, y: int, z: int) -> int: + ... + +def new_function(x: int, y: int, z: int | None = None) -> int: + if z is not None: + deprecation_warn( + obj_name="z (new_function argument)", + removal_version="4.0.0", + replacement=None, + extra_msg="Optional extra message, like a reason for removal" + ) + + ... # this logic should still support working with z, just like it did in the old impl +``` + +## Communicating breaking changes + +**Breaking changes necessitate clear communication**, as they directly impact users by forcing updates to their +codebases. It’s essential to ensure that users are well-informed about any breaking changes introduced in the project. +This is achieved through the project’s changelog. + +**Every breaking change must be documented using a 'breaking' type changelog fragment.** When writing the fragment, +adhere to the following guidelines: + +- Specify **what** was deprecated with a fully qualified name (e.g. `module.submodule.MyClass.deprecated_method`). +- Suggest an **alternative**, if applicable, and explain any necessary **migration steps**. +- Briefly document **why** the deprecation was made (without going into excessive detail). +- Prioritize **clarity and good wording** + +These entries are critical, as they are likely to be read by end-users of our library (programmers but +non-contributors). Keep this in mind when crafting breaking change fragments. + +!!! warning "Every breaking change needs its own entry" + + If your pull request introduces multiple breaking changes across different components, you must create individual + changelog entries for each change. + +!!! example "Example of a good breaking changelog fragment" + + Suppose a library changes the return type of a function from a list to a set. This type change would be difficult + to deprecate because the change affects existing code that relies on the specific return type. + + ```markdown title="changes/521.breaking.md" + Change return type of `mcproto.utils.get_items` from `list[str]` to `set[str]`. + + This change was made to improve performance and ensure unique item retrieval. + The previous behavior of returning duplicates in a list has been removed, + which may impact existing code that relies on the previous return type. + Users should adjust their code to handle the new return type accordingly. + ``` + + Even though it’s technically feasible to implement this as a non-breaking change - such as by creating a new + function or adding a boolean flag to control the return type, these approaches may not suit our use case. For + instance, if we were to introduce a boolean flag, we would need to set it to `False` by default and show + deprecation warnings to users unless they explicitly set the flag to `True`. + + Eventually, when the deprecation period is over, the flag becomes pointless, but removing support for it would + necessitate yet another round of deprecation for the flag itself, forcing users to revert to using the function + without it. This approach could frustrate users and create unnecessary complexity. + + When considering non-breaking changes, it’s crucial to evaluate potential complications like these. If you opt for + a breaking change, be sure to include similar reasoning in your pull request description to help convey the + rationale behind the decision. + +!!! note "Removing deprecations" + + We consider deprecation removals as a breaking change, which means that these removals also need to be documented. + That said, it is sufficient for these removals to be documented in a single changelog fragment. These removals + alongside with writing the fragment will be performed by the project maintainers at the time of the release. + +## Communicating deprecations + +Even though a deprecation doesn’t immediately break code, it signals an upcoming change and it's essential to communicate +this clearly to the users of our project. We achieve this through the project's changelog. + +???+ tip "Benefits of tracking deprecations in changelog" + + While runtime deprecation warnings provide immediate feedback upon updating the library, it can often be beneficial + to give users a chance to plan ahead before updating the library, especially for projects that perform automatic + dependency updates through CI, which may not check for warnings, leading to deprecation warnings reaching + production. + + Additionally, it's often easy for people to miss/overlook the warnings if they're not looking for them in the CLI + output, or if their project already produces some other warnings, making ours blend in. + + By clearly documenting deprecations, we enable users to identify deprecated features before upgrading, allowing + them to address issues proactively or at least prepare for changes. + + A changelog entry serves as a permanent, versioned record of changes, providing detailed explanations of why a + feature is deprecated, what the recommended replacements are. It's a place where people may look for clarification + on why something was removed, or in search of migration steps after seeing the deprecation warning. + +**Every deprecation must be documented using a 'deprecation' type changelog fragment.** When writing the fragment, +similar guidelines to writing breaking changelog fragments apply: + +
+ +- Provide the **removal version** i.e. version in which the deprecated feature will be removed (e.g. `4.0.0`). (1) +- Specify **what** was deprecated with a fully qualified name (e.g. `module.submodule.MyClass.deprecated_method`). +- Suggest an **alternative**, if applicable, and explain any necessary **migration steps**. +- Briefly document **why** the deprecation was made (without going into excessive detail). +- Prioritize **clarity and good wording** + +
+ +1. This point is specific to deprecations, it's the only additional point in comparison to the breaking changes + guidelines. + +These entries form the second most important part of the changelog, likely to be read by end-users. Keep this in mind +when crafting deprecation fragments. + +!!! warning "Every deprecated component needs it's own entry" + + Just like with breaking changes, if your're deprecating multiple different components, you + must make multiple changelog entries, one for each deprecation. + +!!! example "Example of a good deprecation changelog fragment" + + Suppose we used a simple string configuration parameter but introduced a more flexible configuration object to + allow for future extensions and better validation. This would be a good candidate for deprecation rather than an + immediate breaking change. + + ```markdown title="changes/521.deprecation.md" + Deprecate string-based `mcproto.utils.connect` configuration attribute in favor of `mcproto.utils.ConnectionConfig`. + + The new `ConnectionConfig` object offers more flexibility by allowing users to specify multiple options (like + timeouts, retries, etc.) in a structured way, instead of relying on a string. Users are encouraged to migrate + to this object when calling `mcproto.utils.connect` to take full advantage of future improvements and + additional connection parameters. + + - The string-based configuration support will be removed in version `4.0.0`. + ``` + +[versioning model]: ../../meta/versioning.md +[semantic versioning]: https://semver.org +[breaking-changes]: ../../meta/versioning.md#examples-of-breaking-changes diff --git a/docs/contributing/guides/changelog.md b/docs/contributing/guides/changelog.md new file mode 100644 index 00000000..baf51635 --- /dev/null +++ b/docs/contributing/guides/changelog.md @@ -0,0 +1,259 @@ +# Changelog fragments + +???+ abstract + + This page describes our use of `towncrier` for the project's changelog. It explains the different changelog + categories, the process of creating changelog fragment files and generating a changelog preview. Additionally, the + page contains a guide on writing good changelog fragments. + +Our project contains a changelog which tracks all notable changes for easy and quick reference to both users and our +contributors. + +To maintain our changelog, we're using [`towncrier`][towncrier], which allows us to create **fragment files**, which +each contains a single changelog entry. Once a new release is created, all of these fragments will be used to create a +changelog for that new release. + +We generally require every pull request to to include a new changelog fragment, summarizing what it does. + +!!! note + + If you think your change shouldn't require a changelog entry (it's a small / simple change that isn't worth + noting), ask us to add the `skip-fragment-check` label to your PR, which will disable the automated check that + enforces a presence of the changelog fragment. + +## Structure of a fragment file + +The fragment files are stored in the `changes/` directory in our project. These files follow the following naming +format: `{pull_request_number}.{type}.md`. + +Possible fragment types are: + +- **`feature`**: New feature that affects the public API. +- **`bugfix`**: A bugfix, which was affecting the public API. +- **`docs`**: Change to the documentation, or updates to public facing docstrings +- **`breaking`**: Signifying a breaking change of some part of the project's public API, which could cause issues for + end-users updating to this version. (Includes deprecation removals.) +- **`deprecation`**: Signifying a newly deprecated feature, scheduled for eventual removal. +- **`internal`** Fully internal change that doesn't affect the public API, but is significant enough to be mentioned, + likely because it affects project contributors. (Such as a new linter rule, change in code style, significant change + in internal API, ...) + +## Create fragments with commands + +While you can absolutely create these files manually, it's often a lot more convenient to use the `towncrier` CLI, +which can create the file for you in the proper place automatically. With it, you can simply run: + +```bash +towncrier create {pull_request_number}.{type}.md +``` + +After you ran the command, a new file will appear in the `changes/` directory. You can now open it and describe your +change inside of it. + +If the change is simple enough, you can even use the `-c` / `--content` flag to specify it directly, like: + +```bash +towncrier create 12.feature.md -c "Add dinosaurs!" +``` + +!!! tip "Terminal editors" + + If you're used to terminal editors, there's also an `--edit` flag, which will open the file with your + `$EDITOR`. (I would recommend `neovim`, but if you find it too complex, `nano` also works well) + +## Multiple fragments in a single PR + +If necessary, multiple fragment files can be created per pull-request, with different change types, if the PR covers +multiple areas. For example for PR #13 that both introduces a feature, and changes the documentation, can add 2 +fragment files: `13.feature.md` and `13.docs.md`. + +Additionally, if a single PR is addressing multiple unrelated topics in the same category, and needs to make multiple +distinct changelog entries, an optional counter value can be added at the end of the file name (needs to be an +integer). So for example PR #25 which makes 2 distinct internal changes can add these fragment files: +`25.internal.1.md` and `25.internal.2.md`. (The numbers in this counter position will not be shown in the final +changelog and are merely here for separation of the individual fragments.) + +However if the changes are related to some bigger overarching goal, you can also just use a single fragment file with +the following format: + +```markdown title="changes/25.internal.md" +Update towncrier categories + + - Rename `documentation` category to shorter: `docs` + - Add `internal` category for changes unrelated to public API, but potentially relevant to contributors + - Add github workflow enforcing presence of a new changelog fragment file for each PR + - For insignificant PRs which don't require any changelog entry, a maintainer can add `skip-fragment-check` label. +``` + +!!! warning + + While possible, if you end up making multiple distinct changelog fragments like this, it's a sign that your PR + might be too big, and you should split it up into multiple PRs instead. Making huge PRs that address several + unrelated topics at once is generally a bad practice, and should be avoided. If you go overboard, your PR might + even end up getting closed for being too big, and you'll be required to split it up. + +## Preview changelog + +To preview the latest changelog, run `towncrier build --draft --version latest`. + +??? note "Meaning of the version value" + + The `--version` attribute usually takes the version number of the project, to which these changes apply. However, + since we just want to preview the changes, it doesn't really matter for us, so we can just pass `latest` or + whatever else you wish. + + For actual builds, the version is automatically obtained and this command is executed in our release CI workflow. + + This version will be used in the first line of the changelog (the header). + +??? note "Meaning of --draft flag" + + The `--draft` flag will make sure that towncrier will only show you the contents of the next changelog version + entry, but won't actually add that generated content to our `CHANGELOG.md` file, while consuming (removing) the + changelog fragments. + + You will never need to run `towncrier` without the `--draft` flag, as our CI workflows for project releasing handle + that automatically. + +To make this a bit easier, there is a taskipy task running the command above, so you can just use `poetry run task +changelog-preview` to see the changelog, if you don't like remembering new commands. + +## Writing good changelog fragments + +Fragment files follow the same markdown syntax as our documentation. + +The contents of a fragment file should describe the change that you've made in a quick and general way. That said, the +change descriptions can be a bit more verbose than just the PR title, but only if it's necessary. Keep in mind that +these changes will be shown to the end users of the library, so try to explain your change in a way that a +non-contributor would understand. + +!!! tip + + If your change needs some more in-depth explanations, perhaps with code examples and reasoning for why such a + change was made, use the PR body (description) for this purpose. Each changelog entry will contain a link to the + corresponding pull request, so if someone is interested in any additional details about a change, they can always + look there. + +### Examples of good changlog fragment files + +:material-check:{ style="color: #4DB6AC" } **Clear and descriptive** + +```markdown title="changes/171.feature.md" +Add `Account.check` function, to verify that the access token in use is valid, and the data the Account instance has matches the data minecraft API has. +``` + +```markdown title="changes/179.docs.md" +Enforce presence of docstrings everywhere with pydocstyle. This also adds docstring to all functions and classes that didn't already have one. Minor improvements for consistency were also made to some existing docstrings. +``` + +:material-check:{ style="color: #4DB6AC" } **Slightly on the longer side, but it's justified** (Sometimes, it's +important to explain the issue that this fixes, so that users know that it was there) + +```markdown title="changes/330.bugfix.md" +Fix behavior of the `mcproto.utils.deprecation` module, which was incorrectly always using a fallback version, assuming mcproto is at version 0.0.0. This then could've meant that using a deprecated feature that is past the specified deprecation (removal) version still only resulted in a deprecation warning, as opposed to a full runtime error. +``` + +:material-check:{ style="color: #4DB6AC" } **With an extra note about the breaking change** (Adding some extra +description isn't always bad, especially for explaining how a breaking change affects existing code) + +```markdown title="changes/130.breaking.md" +Renamed "shared_key" field to "shared_secret" in `LoginEncryptionPacket`, following the official terminology. + + This is a breaking change as `LoginEncryptionPacket`'s `__init__` method now uses "shared_secret" keyword only + argument, not "shared_key". Every initialization call to this packet needs to be updated. +``` + +:material-check:{ style="color: #4DB6AC" } **With a list of subchanges that were made** (Be careful with this one +though, make sure you don't over-do it) + +```markdown title="changes/129.feature.md" +Added a system for handling Minecraft authentication + + - Yggdrasil system for unmigrated i.e. non-Microsoft accounts (supportng Minecraft accounts, and the really old + Mojang accounts) + - Microsoft OAuth2 system (Xbox live) for migrated i.e. Microsoft accounts +``` + +### Examples of bad changelog fragment files + +:material-close:{ style="color: #EF5350" } **Unclear** (But what does this class do?) + +```markdown title="changes/123.feature.md" +Update `Buffer` class. +``` + +:material-close:{ style="color: #EF5350" } **Bad category** (This is a feature, not a bugfix) + +```markdown title="changes/161.bugfix.md" +Add support for encryption. Connection classes now have `enable_encryption` method, and some encryption related functions were added into a new mcproto.encryption module. +``` + +:material-close:{ style="color: #EF5350" } **Starts with dash** (Sometimes, it can feel natural to start your changelog +entry with a `-`, as it is a list item in the final changelog, however, this dash will already be added automatically) + +```markdown title="changes/171.feature.md" +- Add `Account.check` function, to verify that the access token in use is valid, and the data the Account instance has matches the data minecraft API has. +``` + +:material-close:{ style="color: #EF5350" } **Wrapped first line** (Splitting up the first line into multiple lines is +something we often do in markdown, because it should still be rendered as a single line, however, because of how +towncrier merges these fragments, using multiple lines will cause issues and the changelog won't be formatter +correctly! Further blocks can have wrapped lines.) + +```markdown title="changes/330.bugfix.md" +Fix behavior of the `mcproto.utils.deprecation` module, which was incorrectly always using a fallback version, assuming +mcproto is at version 0.0.0. This then could've meant that using a deprecated feature that is past the specified +deprecation (removal) version still only resulted in a deprecation warning, as opposed to a full runtime error. +``` + +:material-close:{ style="color: #EF5350" } **No indent in description** (Sometimes, we want to add additional +description to our changelog entry. When doing so, we need to make sure that the description block is indented with 4 +spaces and there is a blank line after the first / title line.) + +```markdown title="changes/330.breaking.md" +Renamed "shared_key" field to "shared_secret" in `LoginEncryptionPacket`, following the official terminology. + +This is a breaking change as `LoginEncryptionPacket`'s `__init__` method now uses "shared_secret" keyword only +argument, not "shared_key". Every initialization call to this packet needs to be updated. +``` + +:material-close:{ style="color: #EF5350" } **Way too long** (This should've been the PR description) + +```markdown title="changes/161.feature.md" +Introduce support for encryption handling. + + Most servers (even offline ones) usually send an EncryptionRequest packet during the LOGIN state, with a public + (RSA) key that the client is expected to use to encrypt a randomly generated shared secret, to send back to the + server in EncryptionResponse packet. After that, all further communication is encrypted with this shared secret. + + The encryption used is a AES/CFB8 stream cipher. That means the encrypted ciphertext will have the same amount + of bytes as the original plaintext, allowing us to still trust our reader/writer methods that rely on reading + specific amounts of bytes, even if their content don't make sense. + + This directly uses the base connection classes and adds enable_encryption method to them, which after getting + called will automatically encrypt/decrypt any incomming/outcomming data. + + This additionally also changes the LoginEncryptionRequest packet class, and makes the public key attribute + actually hold an RSA public key (from the cryptography library), instead of just the received bytes. This is + then much more useful to work with later on. This is a breaking change. +``` + +!!! tip "Verify if your changelog works" + + Our CI will automatically build the documentation for your PR and post a link to it as a comment in the pull + request. This documentation will include a preview of the changelog with all unreleased changes in the [changelog] + page. You can take a look there to make sure that your change fragment(s) resulted in the proper output. + +!!! note "Internal changes" + + We're a bit more forgiving when it comes to describing your change if your change is in the `internal` category, as + end users don't need to read those. Changes in this category can be a bit less descriptive. + +## Footnotes + +- See for more info about why and how to properly maintain a changelog +- For more info about `towncrier`, check out it's [documentation][towncrier-tutorial] + +[towncrier]: https://towncrier.readthedocs.io/en/stable/ +[towncrier-tutorial]: https://towncrier.readthedocs.io/en/stable/tutorial.html +[changelog]: ../../meta/changelog.md diff --git a/docs/contributing/guides/documentation.md b/docs/contributing/guides/documentation.md new file mode 100644 index 00000000..f4c9fda4 --- /dev/null +++ b/docs/contributing/guides/documentation.md @@ -0,0 +1,35 @@ +# Writing documentation + +???+ abstract + + This guide describes how to write the documentation for this project (like the docs for the page you're reading + right now). It contains several useful links for `mkdocs` documentation and for the various extensions that we use. + +Our documentation page is generated from markdown files in the `docs/` directory, using [`mkdocs`][mkdocs] with +[`mkdocs-material`][mkdocs-material]. + +This gives us an amazing framework for building great-looking, modern docs. For the most part, the documentation is +written in classical markdown syntax, just with some additions. If you're familiar with markdown, you should be able to +make a simple change easily, without having to look at any docs. + +That said, for more complex changes, you will want to familiarize yourself with [mkdocs-material +documentation][mkdocs-material-guide]. Don't worry, these docs are fairly easy to read and while they do cover a lot, +they're nicely segmented, so you should be able to find what you're looking for quickly. On top of just that, you may +want to simply look through the existing pages, as a lot of what you'd probably want to do was already done on one of +our pages, so you can just copy that. + +Other than just mkdocs-material, we also use +[pymdown-extensions], which add various neat +extensions that are often useful when writing the docs. These are mostly small quality-of-life extensions that bring +some more life to the docs, but aren't something that you'd need to work with all the time. We do suggest that you check +it out though, so that you know what's available. + +Finally, for generating our API reference page, we're using [mkdocstrings]. More on +that in the [API reference] guide though. + +[mkdocs]: https://www.mkdocs.org/ +[mkdocs-material]: https://squidfunk.github.io/mkdocs-material/ +[mkdocs-material-guide]: https://squidfunk.github.io/mkdocs-material/getting-started/ +[pymdown-extensions]: https://facelessuser.github.io/pymdown-extensions/extensions/arithmatex/ +[mkdocstrings]: https://mkdocstrings.github.io/ +[API reference]: ./api-reference.md diff --git a/docs/contributing/guides/great-commits.md b/docs/contributing/guides/great-commits.md new file mode 100644 index 00000000..930fa0fa --- /dev/null +++ b/docs/contributing/guides/great-commits.md @@ -0,0 +1,329 @@ +# Great Commits + +???+ abstract + + This guide describes how to make good commits that are helpful to maintainers, debuggable and readable when going + over the `git log`, or `git blame`. + + It explains the purpose of a commit message and it's structure, goes over the importance of making commits + "atomic" and the practice of partial staging. Additionally, it also mentions why and how to avoid making a lot of + fixing commits, describes the practice of force pushing, alongside it's downsides and finally, it explains why + these practices are worth following and how they make the developer's life easier. + +A well-structured git log is crucial for a project's maintainability, providing insight into changes as a reference for +future maintainers (or old forgetful ones, _like me_). Here, we outline the best practices for making good commits in +our project. + +## Commit Message Guidelines + +### Purpose + +Every commit should represent a change in the source code. The commit message should not only describe **what** was +changed but also **why** it was necessary and what it achieves. + +### More than just the first line + +Many developers are uesd to commiting changes with a simple `git commit -m "My message"`, and while this is enough and +it's perfectly fine in many cases, sometimes you just need more space to describe what a change truly achieves. + +Surprisingly, many people don't even know that they can make a commit that has more in it's message than just the +title/first line. That then leads to poorly documented changes, because single line sometimes just isn't enough. + +To create a commit with a bigger commit message, you can simply run the `git commit` command without the `-m` argument. +This should open a temporary file in your text editor (`$EDITOR`), in which you can write out your commit message in +full. + +??? tip "Use git commit by default" + + I’d actually recommend making the simple `git commit` the default way you make new commits, since it invites you to + write more about it, by just seeing that you have that space available. We usually don’t even know what exactly + we’ll write in our new commit message before getting to typing it out, and knowing you have that extra space if you + need it will naturally lead to using it, even if you didn’t know you needed it ahead of time. + +!!! note + + That said, not every commit requires both a subject and a body. Sometimes, a change may be so simple, that no + further context is necessary. With those changes, including a body would just be a waste of the readers time. For + example: + + ```markdown + Fix typo in README + ``` + + This message doesn't need anything extra. Some people like to include what the typo was, but if you want to know + that, you can just look at the actual changes that commit made. There's a whole bunch of ways to do that with git, + like `git show`, `git diff` or `git log --patch`. So while in some cases, having extra context can be very + valuable, you also shouldn't overdo it. + +### Structure + +Git commits should be written in a very specific way. There’s a few rules to follow: + +1. **Subject Line:** + - **Limit to 50 characters** (This isn't a hard limit, but try not to go much longer. This limit ensures + readability and forces the author to think about the most concise way to explain what's going on. Hint: If you're + having trouble summarizing, you might be committing too much at once) + - **A single sentence** (The summary should be a single sentence, multiple probably wouldn't fit into the character + limit anyways) + - **Capitalize the first letter** + - **Don't end with a period** (A period will only waste one of your precious 50 characters for the summary and + it's not very useful context wise) + - **Use imperative mood** (Imperative mood means “written as if giving a command/instruction” i.e.: “Add support + for X”, not “I added support for X” or “Support for X was added”, as a rule of thumb, a subject message should be + able to complete the sentence: “If implemented, this commit will …”) +2. **Body:** + - **Separate the body from the subject line with a blank line** (Not doing so would make git think your summary + spans across multiple lines, rather than it being a body) + - **Wrap at 72 characters** (Commits are often printed into the terminal with the `git log` command. If the output + isn't wrapped, going over the terminals width can cause a pretty messy output. The recommended maximum width for + terminal text output is 80 characters, but git tools can often add indents, so 72 characters is a sensible maximum) + - **Avoid implementation details** (The diff shows the "how", focus on the "what" and "why") + +Git commits can use markdown, most other programs will understand it and it's a great way to bring in some more +style, improving the readability. In fact, if you view the commit from a site like GitHub, it will automatically +render any markdown in the commit for you. + +???+ example "Example commit" + + ```markdown + Summarize changes in around 50 characters or less + + More detailed explanatory text, if necessary. Wrap it to about 72 + characters or so. In some contexts, the first line is treated as the + subject of the commit and the rest of the text as the body. The + blank line separating the summary from the body is critical (unless + you omit the body entirely); various tools like `log`, `shortlog` + and `rebase` can get confused if you run the two together. + + Explain the problem that this commit is solving. Focus on why you + are making this change as opposed to how (the code explains that). + Are there side effects or other unintuitive consequences of this + change? Here's the place to explain them. + + Further paragraphs come after blank lines. + + - Bullet points are okay too + - They're very useful for listing something + ``` + +:material-run-fast: **Stretch goal** – Include relevant **keywords** to make your commits easily searchable (e.g. the +name of the class/function you modified). + +:material-run-fast: **Stretch goal \#2** – Keep it **engaging**! Provide some interesting context or debug processes to +make the commit history both more informative and fun to read. + +## Make "atomic" commits + +!!! quote "Definition" + + *Atomic: of or forming a single irreducible unit or component in a larger system.* + +The term “atomic commit” means that the commit is only representing a single change, that can’t be further reduced into +multiple commits, i.e. this commit only handles a single change. Ideally, it should be possible to sum up the changes +that a good commit makes in a single sentence. + +That said, the irreducibility should only apply to the change itself, obviously, making a commit for every line of code +wouldn’t be very clean. Having a commit only change a small amount of code isn’t what makes it atomic. While the commit +certainly can be small, it can just as well be a commit that’s changing thousands of lines. (That said, you should have +some really good justification for it if you’re actually making commits that big.) + +The important thing is that the commit is only responsible for addressing a single change. A counter-example would be a +commit that adds a new feature, but also fixes a bug you found while implementing this feature, and also improves the +formatting of some other function, that you encountered along the way. With atomic commits, all of these actions would +get their own standalone commits, as they’re unrelated to each other, and describe several different changes. + +Note that making atomic commits isn't just about splitting thins up to only represent single changes, indeed, while +they should only represent the smallest possible change, it should also be a “complete” change. This means that a +commit responsible for changing how some function works in order to improve performance should ideally also update the +documentation, make the necessary adjustments to unit-tests so they still pass, and update all of the references to +this updated function to work properly after this change. + +!!! abstract "Summary" + + So, an atomic commit is a commit representing a single (ideally an irreducible) change, that’s fully implemented + and integrates well with the rest of the codebase. + +### Partial adds + +Many people tend to always simply use `git add -A` (or `git add .`), to stage all of the changes they made, and then +create a commit with it all. Sometimes, you might not even stage the changes and choose to use `git commit -a`, to +quickly commit everything. + +In an ideal world, where you only made the changes you needed to make for this single atomic commit, this would work +pretty well, and while sometimes this is the case, in many cases, you might've also fixed a bug or a typo that you +noticed while working on your changes, or already implemented something else, that doesn't fit into your single atomic +commit that you now wish to make. + +In this case, it can be very useful to know that you can instead make a "partial" add, only staging those changes that +belong to the commit. + +In some cases, it will be sufficient to simpy stage specific files, which you can do with: + +```bash +git add path/to/some/file path/to/other/file +``` + +That said, in most cases, you're left with a single file that contains multiple unrelated changes. When this happens, +you can use the `-p`/`--patch` flag: + +```bash +git add -p path/to/file +``` + +Git will then let you interactively go over every "hunk" (a chunk of code, with changes close to each other) and let +you decide whether to accept it (hence staging that single hunk), split it into more chunks, skip it (avoids staging +this hunk) or even modify it in your editor, allowing you to remove the intertwined code from multiple changes, so that +your commit will really only perform a single change. + +!!! tip "Use --patch more often" + + This git feature has slowly became one of my favorite tools, and I use it almost every time I need to commit + something, even if I don't need to change or skip things, since it also allows me to quickly review the changes + I'm making, before they make it into a commit. + +## Avoid fixing commits + +A very common occurrence I see in a ton of different projects is people making sequences of commits that go like: + +- Fix bug X +- Actually fix bug X +- Fix typo in variable name +- Sort imports +- Follow lint rules +- Run auto-formatter + +While people can obviously mess up sometimes, and just not get something right on the first try, a fixing commit is +rarely a good way to solve that. + +Instead of making a new commit, you can actually just amend the original. To do this, we can use the `git commit +--amned`, which will add your staged changes into the previous commit, even allowing you to change the message of that +old commit. + +Not only that, if you've already made another commit, but now found something that needs changing in the commit before +that, you can use interactive rebase with `git rebase -i HEAD~3`, allowing you to change the last 3 commits, or even +completely remove some of those commits. + +For more on history rewriting, I'd recommend checking the [official git +documentation][git-history-rewriting]. + +### Force pushing + +Changing history is a great tool to clean up after yourself, but it works best with local changes, i.e. with changes +you haven't yet pushed. + +If you're changing git history after you've already pushed, you will find that pushing again will not work, giving you +a message like "updates were rejected because the remote contains work that you do not have locally". + +To resolve this issue, it is possible to make a "force push" with `git push --force` command. Running this will push +your branch to the remote (to GitHub) regardless of what was in the remote already, hence overriding it. + +!!! warning + + Force pushing becomes risky if others have already pulled the branch you are working on. If you overwrite the + branch with a force push, it can lead to several issues: + + - **Lost work:** Collaborators may have pushed to your branch already, following it's existing git history. + However, after your force-push, their changes would be ereased from the remote. **Make sure you pull / rebase + from the remote before you make a force-push.** + - **Complex conflicts:** If someone else has pulled your branch and did some changes that they didn't yet push + before you force-pushed, suddenly, their git history is now no longer in sync. Resolving conflicts like that is + possible, but it can be very annoying. + - **Harder reviews:** When reviewing your code, we sometimes like going over the individual commits to understand + your individual (atomic) changes better. It's often a lot easier to look at and review 10 different atomic + changes individually, that together form a PR than it would be to look at all of them at once. By force-pushing, + you're changing the commit history, making the changes to the code that we already reviewed. This is partially + GitHub's fault though, for not providing an easier way of showing these changes across force-pushes. + +#### Force pushing on PR feature branches + +In our project, we do allow force pushing on your individual feature branches that you use for your PR. This +flexibility enables you to clean up your commit history and refine your changes before they are merged into the main +branch. However, it's important to note that many other projects may not permit force pushing due to the risks +involved. Always check the contributing guidelines of the project you are working on. + +!!! tip "Best practices" + + To mitigate the risks associated with force pushing, consider following these best practices: + + - **Push less often:** Try to limit of othen you push changes to the remote repository in general. Aim to push only + when you are satisfied with the set of changes you have. This reduces the likelihood of needing to force-push a + lot. + - **Force push quickly:** If you do need to force-push, try to do so as quickly as possible. The more time that has + passed since your normal push, the more likely it is that someone have already clonned/pulled your changes. If a + force push was made within just a few seconds of the original push (and it only overwrites the changes from that + last push), it's not very likely that someone will have those changes pulled already, so you probably won't break + anyone's local version. + - **Pull before changing history:** Make absolutely certain that you don't override anyone's changes with your + force-push. Sometimes, maintainers can create new commits in your branch, other times, that can even be you by + modifying something from GitHub, or clicking on the apply suggestion button from a code-review. By pulling before + you start changing history, you can make sure that you won't erease these changes and they'll remain a part of + your modified history. + +## Benefits + +Now that you've seen some of the best practices to follow when making new commits, let's talk a bit about why we follow +these practices and what benefits we can gain from them. + +### A generally improved development workflow + +Speaking from my personal experience, I can confidently say that learning how to make good git commits, specifically +the practice of making atomic commits will make you a better programmer overall. That might sound surprising, but it's +really true. + +The reason is that it forces you to only tackle one issue at a time. This naturally helps you to think about how to +split your problem into several smaller (atomic) subproblems and make commits addressing those single parts. This is +actually one of very well known approaches to problem-solving, called the "divide and conquer" method, where you split +your problem into really small, trivially simple chunks that you solve one by one. + +### Easier bug hunting + +Bugs in code are pretty much inevitable, even for the most experienced of developers. Sometimes, we just don't realise +how certain part of the code-base will interact with another part, or we're just careless as we try and build something +fast. + +The most annoying bugs are those that aren't discovered immediately during development. These bugs can require a lot of +work to track down. With a good git log, filled with a lot of small commits, where each commit leaves the code-base in +a usable state, you can make this process a lot simpler! + +Git has a command specifically for this: `git bisect`. It will first make you mark 2 commits, a good one and a bad one, +after which it will perform a binary search, checking out the commits in between these two as you try and replicate the +bug on each. This will quickly lead you to the specific commit that introduced this bug, without having to do any code +debugging at all. + +The great advantage here is that users reporting bugs can often perform git bisects too, even without having to know +much about development and the structure of our code-base and if the identified commit is small enough, the issue is +often apparent just from looking at the diff. Even for bigger commits though, they can be often reverted to quickly fix +the issue and give developers time to focus on actually resolving it, while using it's diff as a reference. + +### Enhanced git blame + +Clear commit messages can be very useful for understanding certain parts of the code. Git provides a tool called `git +blame`, which can show you which commit is responsible for adding a specific line into the code-base. From there, you +can then take a look at that commit specifically and see it's title & description to further understand that change, +along with the rest of the diff to give you proper context for how that line worked with the rest of the code. + +This can often be a great tool when refactoring, as sometimes it can be quite unclear why something is done the way it +is and commits can sometimes help explain that. + +### Efficient cherry picking + +In some cases, it can be useful to carry over certain change (commit) from one place to another. This process is called +cherry-picking (`git cherry-pick`), which will copy a commit and apply it's diff elsewhere. With atomic commits, this +will often work without any further adjustments, since each commit should itself leave you with a functioning project. + +### Streamlined pull request reviews + +Reviewers can often better understand and verify changes by examining your well-structured commits, improving the +review process. + +## Footnotes + +This guide took **heavy** inspiration from this article: . + +!!! quote + + P.S. It's not plagiarism if the original was written by me :P + +See the original article's sources for proper attributions. + +[git-history-rewriting]: https://git-scm.com/book/en/v2/Git-Tools-Rewriting-History diff --git a/docs/contributing/guides/index.md b/docs/contributing/guides/index.md new file mode 100644 index 00000000..3d684694 --- /dev/null +++ b/docs/contributing/guides/index.md @@ -0,0 +1,81 @@ +# Contributing guides & guidelines + +Welcome to the contributing guides & guidelines for mcproto. This documentation is intended for our contributors, +interested in writing or modifying mcproto itself. If you just wish to use mcproto in your project, you can safely skip +this section. + +Mcproto is a relatively large project and maintaining it is no easy task. With a project like that, consistency and +good code quality become very important to keep the code-base readable and bug-free. To achieve this, we have put +together these guidelines that will explain the code style and coding practices that we expect our contributors to +follow. + +This documentation will also include various guides that tell you how to set up our project for development and explain +the automated tools that we use to improve our coding experience and enforce a bunch of the code style rules quickly +and without the need for human review. + +## The Golden Rules of Contributing + +These are the general rules which you should follow when contributing. You can glance over these and then go over the +individual guides one by one, or use the references in these rules to get to the specific guide page explaining the +rule. + +!!! note + + This list serves as a quick-reference rather than a full guide. Some of our guidelines aren't directly linked in + these references at all and we heavily encourage you to go over each of the guide pages in the order they're listed + in the docs. + +1. **Lint before you push.** We have multiple code linting rules, which define our general style of the code-base. + These are often enforced through certain tools, which you are expected to run before every push and ideally even + before every commit. The specifics of our linting rules are mentioned in our [style guide]. Running all of these + tools manually before every commit would however be quite annoying, so we use [pre-commit]. +2. **Make great commits.** Great commits should be atomic (do one thing only and do it well), with a commit message + that explaining what was done, and why. More on this [here][great-commits]. +3. **Make an issue before the PR.** Before you start working on your PR, open an issue and let us know what you're + planning. We described this further in our [making a PR guide][issue-before-pr]. +4. **Use assets licensed for public use.** Whenever you're adding a static asset (e.g. images/video files/audio or + even code) that isn't owned/written by you, make sure it has a compatible license with our projects. +5. **Follow our [Code of Conduct]** + +## Changes to these guidelines + +While we're confident and happy with the current code style and tooling, we acknowledge that change is inevitable. New +tools are constantly being developed, and we have already made significant updates to our code style in the past. + +Every project evolves over time, and these guidelines are no exception. This documentation is open to pull requests and +changes from contributors. Just ensure that any updates to this document are in sync with the codebase. If you propose +a code style change, you must apply that change throughout the codebase to maintain internal consistency. + +If you believe you have something valuable to add or change, please submit a pull request. For major style changes, we +strongly encourage you to open an issue first, as we may not always agree with significant alterations. For minor +clarity improvements or typo fixes, opening an issue isn't necessary. + +We tried to design our specifications to be straightforward and comprehensive, but we might not always succeed, as +we're doing so from our perspective of already having extensive background knowledge. Therefore, we welcome any clarity +improvements to the documentation. If you think you can explain something better, please contribute. + +## Footnotes + +We understand that going through all of these guidelines can be time-consuming and a lot to remember. However, we +strongly encourage you to review them, especially if you haven't worked with these tools or followed such best +practices before. + +!!! tip + + Every page in this contributing guides category has an abstract at the top, summarizing its content. This allows + you to quickly determine if you are already familiar with the topic or, if you're re-reading, to quickly recall + what the page covers. Feel free to skip any guide pages if you're already familiar with what they cover. + +We believe these guides will be beneficial to you beyond our codebase, as they promote good coding practices and help +make your code cleaner. You will likely be able to apply much of the knowledge you gain here to your own projects. + +## Disclaimer + +These documents were inspired by [Python Discord's CONTRIBUTING agreement.][pydis-contributing] + +[style guide]: ./style-guide.md +[pre-commit]: ./precommit.md +[great-commits]: ./great-commits.md +[issue-before-pr]: ../making-a-pr.md#get-assigned-to-the-issue +[Code of Conduct]: ../../meta/code-of-conduct.md +[pydis-contributing]: https://github.com/python-discord/bot/blob/master/CONTRIBUTING.md diff --git a/docs/contributing/guides/precommit.md b/docs/contributing/guides/precommit.md new file mode 100644 index 00000000..d83a70be --- /dev/null +++ b/docs/contributing/guides/precommit.md @@ -0,0 +1,111 @@ +# Pre-commit + +???+ abstract + + This guide explains what is pre-commit and how to set it up as a git hook that will run automatically before your + commits. It also describes how to run pre-commit manually from the CLI, how to skip some or all of the individual + checks it performs, what happens when hooks edit files and where it's configuration file is. + +Now that you've seen the linters, formatters, type-checkers and other tools that we use in the project, you might be +wondering whether you're really expected to run all of those commands manually, after each change. And of course, no, +you're not, that would be really annoying, and you'd probably also often just forget to do that. + +So, instead of that, we use a tool called [`pre-commit`][pre-commit], which creates a [git hook][git-hooks], that will +automatically run before each commit you make. That means each time when you make a commit, all of these tools will run +over the code you updated, and if any of these linters detects an issue, the commit will be aborted, and you will see +which linter failed, and it's output telling you why. + +## Installing pre-commit + +To install pre-commit as a git hook all you need to do is to run: + +```bash +pre-commit install +``` + +This will install pre-commit as a git hook into your git repository, which will mean it will run automatically before +every new commit you make. + +!!! warning + + Pre-commit itself will be installed via poetry, which means you will need to have an [activated][activate-venv] + poetry environment whenever you make a new commit, otherwise, the pre-commit git hook will fail with command not + found. + +## Hooks that modify files + +Sometimes, hooks can end up modifying your files, for example the ruff format hook may do so if your file wasn't +already formatted by ruff. When this happens, the hook itself will fail, which will make git abort the commit. At this +point, you will be left with the original changes still staged, but some files may have been modified, which means +you'll want to `git add` those again, staging these automatic modifications and then make the commit again. + +Note that in case you were only committing a [partial change][partial-git-add], which means you still had some parts of +the file unstaged, pre-commit will not modify the files for you. Instead, the hook will just fail, leaving the rest up +to you. You should now run the formatter yourself and perform another partial add, updating the staged changes to be +compliant. + +## Running manually + +Even though in most cases, it will be more than enough to have pre-commit run automatically as a git hook, +sometimes, you may want to run it manually without making a commit. + +!!! tip + + You can run this command without having pre-commit installed as a git hook at all. This makes it possible to avoid + installing pre-commit and instead running all checks manually each time. That said, we heavily recommend that you + instead install pre-commit properly, as it's very easy to forget to run these checks. + +To run pre-commit manually you can use the following command: + +```bash +pre-commit run --all-files +``` + +Using this command will make pre-commit run on all files within the project, rather than just running against the +git staged ones, which is the behavior of the automatically ran hook. + +## Skipping pre-commit + +!!! info "Automatic skipping" + + Pre-commit is pretty smart and will skip running certain tools depending on which files you modified. For example + some hooks only check the validity of Python code, so if you haven't modified any Python files, there is no need to + run those hooks. + +Even though in most cases enforcing linting before each commit is what we want, there are some situations where we need +to commit some code which doesn't pass these checks. This can happen for example after a merge, or as a result of +making a single purpose small commit without yet worrying about linters. In these cases, you can use the `--no-verify` +flag when making a commit, telling git to skip the pre-commit hooks and commit normally. When making a commit, this +would look like: + +```bash +git commit -m "My unchecked commit" --no-verify +``` + +You can also only skip a specific hook, by setting `SKIP` environmental variable (e.g. `SKIP=basedpyright`) or even +multiple hooks (`SKIP=ruff-linter,ruff-formatter,slotscheck`). When making a commit, this would look like: + +```bash +SKIP="check-toml,slotscheck,basedpyright" git commit -m "My partially checked commit" +``` + +!!! note "" + + The names of the individual hooks are their ids, you can find those in the [configuration file](#configuration) for + pre-commit. + +!!! warning + + This kind of verification skipping should be used sparingly. We value a clean history which consistently follows + our linting guidelines, and making commits with linting issues only leads to more commits, fixing those issues later. + +## Configuration + +You can find pre-commit's configuration the `.pre-commit-config.yaml` file, where we define which tools should be ran +and how. Currently, pre-commit runs ruff linter, ruff formatter, slotscheck and basedpyright, but also a checker for +some issues in TOML/YAML files. + +[pre-commit]: https://pre-commit.com/ +[git-hooks]: https://git-scm.com/book/en/v2/Customizing-Git-Git-Hooks +[activate-venv]: ./setup.md#activating-the-environment +[partial-git-add]: ./great-commits.md#partial-adds diff --git a/docs/contributing/guides/setup.md b/docs/contributing/guides/setup.md new file mode 100644 index 00000000..9055c5ab --- /dev/null +++ b/docs/contributing/guides/setup.md @@ -0,0 +1,181 @@ +!!! bug "Work In Progress" + + This page is missing a guide on configuring vscode to pick up poetry environment. + +# Setting up the project + +???+ abstract + + This guide describes the very basics of setting up our project. + + It explains how to use `poetry` to install the python dependencies for the project. After which it goes over using + poetry (activating the virtual environment, keeping the dependencies up to date as we update them, adding / + removing dependencies and poetry dependency group). + +## Pre-requisites + +A basic knowledge of [git and GitHub][git-and-github], alongside working within the terminal and running commands is a +requirement to work on this project. + +This guide assumes you have already [forked][github-forking] our repository, [clonned it][git-cloning] to your +computer and created your own [git branch][git-branches] to work on. + +If you wish to work from an already forked repository, make sure to check out the main branch and do a [`git +pull`][git-pull] to get your fork up to date. Now create your new branch. + +## Poetry + +This project uses [`poetry`][poetry]. Poetry is a tool for managing python dependencies in a +reproducible way, ensuring that everyone is using the same versions. It creates virtual environments for each project, +which ensures that your global dependencies won't clash with the project. + +??? question "More about virtual environments" + + A python virtual environment is essentially a separate mini installation of python used purely for the project + you're working on (as opposed to using your system-wide python installation for everything). + + The reason we do this is to avoid dependency conflicts. Consder this: Our project needs library "foo" at version + 2.5.2, however, you also have another unrelated project, that also needs the "foo" library, but this project didn't + yet update this dependency, and requires an older version of this library: 1.2.0. This is a problem, because our + project won't work with a version that old, we're using some of the new features of that library, similarly, your + project won't work with a newer version though. + + With a virtual environment, both projects will have their own isolated python installation, that only contains the + dependencies listed for that project, avoiding any conflicts completely. + + You can create virtual environments manually, with the built-in `venv` python module, but poetry makes this much + simpler. If you want to find out more about virutal environments, check the [official python + documentation][venv-docs]. + +This means you will need to have poetry installed on your system to run our project. To do so, just follow their +[official documentation][poetry-installation]. + +## Dependency installation + +Once installed, you will want to create a new environment for our project, with all of our dependencies installed. Open +a terminal in the project's directory and run the following command: + +```bash +poetry install +``` + +After running this command, the virtual environment will be populated with all of the dependencies that you will need +for running & developing the project. + +## Activating the environment + +The virtual environment that you just created will contain a bunch of executable programs, such as `ruff` (our linter). +One of those executable programs is also `python`, which is the python interpreter for this environment, capable of +using all of those dependencies installed in that environment. + +By default, when you run the `python` command, your machine will use the system-wide python installation though and the +executables present in this environment will not be runnable at all. In order to make your terminal use the programs +from this environment, instead of the global ones, you will need to "activate" the environment. + +Some IDEs/editors are capable of doing this automatically when you open the project, if your editor supports that, you +should configure it to do so. + +??? question "Configuring VSCode to use the poetry environment" + + TODO + +If your IDE doesn't have that option, or you just wish to work from the terminal, you can instead run: + +```bash +poetry shell +``` + +Now you can start the IDE from your terminal, which should make it work within the poetry python environment. + +!!! tip "Execute a single command inside the virtual environment" + + If you just want to urn a single command from the venv, without necessarily having to activate the environment + (often useful in scripts), poetry provides a quick and simple way to do so. All you need to do is prefix any such + command with `poetry run` (e.g. `poetry run ruff`). + +## Keeping your dependencies up to date + +We often update the dependencies of mcproto to keep them up to date. Whenever we make such an update, you will need to +update your virtual environment to prevent it from going out of date. An out of date environment could mean that you're +using older versions of some libraries and what will run on your machine might not match what will run on other +machines with the dependencies updated. + +Thankfully, poetry makes updating the dependencies very easy as all you have to do is re-run the installation command: + +```bash +poetry install +``` + +It can sometimes be hard to know when you need to run the install command, in most cases, even if we did update +something and you're still on an older version, nothing significant will actually happen, however, the moment you start +seeing some errors when you try to run the project, or inconsistencies with the continuous integration workflows from +your local runs, it's a good indication that your dependencies are probably out of date. + +Ideally, you should run this command as often as possible, if there aren't any new changes, it will simply exit +instantly. You should be especially careful when switching git branches, as dependencies might have been changed (most +likely a new dependency was introduced, or an old one got removed), so consider running this command whenever you +switch to another branch, unless you know that branch didn't make any changes to the project dependencies. + +## Poetry dependency groups + +Poetry has a really cool way of splitting up the dependencies that projects need into multiple groups. For example, you +can have a group of dependencies for linting & autoformatting, another group for documentation support, unit-testing, +for releasing the project, etc. + +To see which dependencies belong to which group, you can check the `pyproject.toml` file for the +`[tool.poetry.group.GROUP_NAME.dependencies]` sections. + +By default, `poetry install` will install all non-optional dependency groups. That means all development +dependencies you should need will get installed. + +The reason why we use groups is because in some of our automated workflows, we don't always need all of the project +dependencies and we can save time by only installing the group(s) that we need. It also provides a clean way to quickly +see which dependencies are being used for what. + +The most important group is the `main` group. This group contains all runtime dependencies, which means without these +dependencies, the project wouldn't be runnable at all. It is these libraries that will become the dependencies of our +library when we make a release on PyPI. + +## Installing dependencies + +During the development, you may sometimes want to introduce a new library to the project, to do this, you will first +need to decide which dependency group it should belong to. To do this, identify whether this new dependency will be +required to run the project, or if it's just some tool / utility that's necessary only during the development. + +If it's a runtime dependency, all you need to do is run: + +```bash +poetry add [name-of-your-dependency] +``` + +This will add the dependency to the `main` group. + +However, if you're working with a development dependency, you will want to go over the dependency groups we have (from +`pyproject.toml`) and decide where it should belong. Once you figured that out, you can run: + +```bash +poetry add --group [group-name] [name-of-your-dependency] +``` + +!!! note + + Sometimes, it might make sense to include the same dependency in multiple groups. (Though this is usually quite + rare.) + +## Uninstalling dependencies + +Similarly, we sometimes stop needing a certain dependency. Uninstalling is a very similar process to installation. +First, find which group you want to remove this dependency from and then run: + +```bash +poetry remove --group [group-name] [name-of-your-dependency] +``` + +[git-and-github]: https://docs.github.com/en/get-started/start-your-journey/about-github-and-git +[github-forking]: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/fork-a-repo +[git-cloning]: https://docs.github.com/en/repositories/creating-and-managing-repositories/cloning-a-repository +[git-branches]: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/about-branches +[git-pull]: https://github.com/git-guides/git-pull +[poetry]: https://python-poetry.org/docs/ +[poetry-installation]: https://python-poetry.org/docs/#installation +[venv-docs]: https://docs.python.org/3/library/venv.html diff --git a/docs/contributing/guides/slotscheck.md b/docs/contributing/guides/slotscheck.md new file mode 100644 index 00000000..e6a168fc --- /dev/null +++ b/docs/contributing/guides/slotscheck.md @@ -0,0 +1,75 @@ +# Slotscheck + +???+ abstract + + This page explains how we enforce the proper use of `__slots__` on our classes with `slotscheck` tool. We go over + what slotted classes, what slotscheck enforces, how to run slotscheck and how to configure it. + +On top of the tools you already saw (ruff & basedpyright), we also have one more tool that performs static analysis on +our code: [**slotscheck**][slotscheck]. + +## What is slotscheck + +Slotscheck is a tool that focuses on enforcing proper use of `__slots__` on classes. + +???+ question "What are slotted classes" + + If you aren't familiar with slotted classes, you should check the [official documentation][slots-docs]. That said, + if you just want a quick overview: + + - Slots allow you to explicitly declare all member attributes of a class (e.g. declaring `__slots__ = ("a", "b")` + will make the class instances only contain variables `a` and `b`, trying to set any other attribute will result + in an `AttributeError`). + - The reason we like using slots is the efficiency they come with. Slotted classes use up less RAM and offer + a faster attribute access. + + Example of a slotted class: + + ```python + class FooBar: + __slots__ = ("foo", "bar") + + def __init__(self, foo: str, bar: str) -> None: + self.foo = foo + self.bar = bar + + x = FooBar("a", "b") + print(x.a, x.b) + x.c = 5 # AttributeError + ``` + +With a low level project like mcproto, efficiency is important and `__slots__` offer such efficiency at a very low cost +(of simply defining them). + +The purpose of `slotscheck` is to check that our slotted classes are using `__slots__` properly, as sometimes, it is +easy to make mistakes, which result in losing a lot of the efficiency that slots provide. Issues that slotscheck +detects: + +- Detect broken slots inheritance +- Detect overlapping slots +- Detect duplicate slots + +## How to use slotscheck + +To run slotscheck on the codebase, you can use the following command: + +```bash +slotscheck -m mcproto +``` + +!!! note "" + + Make you have an [activated][activate-venv] poetry virtual environment and you're in the project's root directory. + +## Configuring slotscheck + +Sometimes, you may want to ignore certain files from being checked. To do so, +you can modify the [slotscheck configuration][slotscheck-config] in +`pyproject.toml`, under the `[tool.slotscheck]` option. That said, doing so +should be very rare and you should have a very good reason to ignore your file +instead of fixing the underlying issue. + +[slotscheck]: https://slotscheck.readthedocs.io/en/latest/ +[slots-docs]: https://wiki.python.org/moin/UsingSlots +[activate-venv]: ./setup.md#activating-the-environment +[slotscheck-config]: https://slotscheck.readthedocs.io/en/latest/configuration.html diff --git a/docs/contributing/guides/style-guide.md b/docs/contributing/guides/style-guide.md new file mode 100644 index 00000000..95c64452 --- /dev/null +++ b/docs/contributing/guides/style-guide.md @@ -0,0 +1,137 @@ +!!! bug "Work In Progress" + + This page is missing a guide on ruff editor integration + +# Style Guide + +???+ abstract + + This page describes how we use `ruff` to enforce a consistent code style in our project. + +For clarity and readability, adhering to a consistent code style across the whole project is very important. It is not +unusual that style adjustments will be requested in pull requests. + +It is always a good practice to review the style of the existing code-base before and to adhere to that established +style before adding something new. That applies even if it isn't the code style you generally prefer. (That said, if +you think a code style change of some kind would be justified, feel free to open an issue about it and tell us why.) + +!!! quote + + A style guide is about consistency. Consistency with this style guide is important. Consistency within a project + is more important. Consistency within one module or function is the most important. + + However, know when to be inconsistent -- sometimes style guide recommendations just aren't applicable. When in + doubt, use your best judgment. Look at other examples and decide what looks best. And don't hesitate to ask! + + — [PEP 8, the general Style Guide for Python Code][pep8] + +??? tip "Check out the PEP8 song" + + The [Python Discord][python discord] community have made an amazing song about PEP8, check it out + [here][pep8-song]! + +## Automatic linting + +As there is a lot of various code style rules we adhere to in our code base, describing all of them here would take way +too long and it would be impossible to remember anyway. For that reason, we use automated tools to help us catch any +code style violations automatically. + +Currently, we use [`ruff`][ruff] to enforce most of our code style requirements. That said, we do +have some other tools that check the correctness of the code, we will describe those later. + +### Ruff linter & formatter + +Ruff is an all-in-one linter & formatter solution, which aims to replace three previously very popular tools into a +single package: + +- [`flake8`][flake8] linter +- [`isort`][isort] import sorter +- [`black`][black] auto-formatter + +??? question "Why pick ruff over the combination of these tools?" + + There were multiple reasons why we chose ruff instead of using the above tools individually, here's just some of + them: + + - Ruff is faster (written in rust! :crab:) + - A single tool is more convenient than 3 separate ones + - Ruff includes a lot of flake8 plugins with some great lint rules + - Ruff has a great community and is slowly managing to overtake these individual projects + - If you're already used to flake8, you'll feel right at home with ruff, it even has the same error codes (mostly)! + +You can check the ruff configuration we're using in `pyproject.toml` file, under the `[tool.ruff]` category (and it's +subcategories). You can find which linter rules are enabled and which we choose to exclude, some file-specific +overrides where the rules apply differently and a bunch of other configuration options. + +#### Linter + +To run ruff linter on the code, open the terminal in the project's root directory and run: + +```bash +ruff check . +``` + +!!! note "" + + Don't forget to [activate][activate-venv] the poetry virtual environment before running ruff. + +Ruff is really smart and it can often automatically fix some of the style violations it found. To make ruff do that, +you can add the `--fix` flag to the command: + +```bash +ruff check --fix . +``` + +If you got a rule violation in your code and you don't understand what the rule's purpose is supposed to be / why we +enforce it, you can use Ruff to show you some details about that rule. The explanation that ruff will give you will +often even contain code examples. To achieve this, simply run: + +```bash +ruff rule [rule-id] +``` + +With the `[rule-id]` being the rule you're interested in, for example `UP038`. + +??? tip "Use glow to render the markdown syntax from ruff rule command" + + The `ruff rule` command will output the rule explanation in markdown, however, since you're running this comand + in a terminal, there won't be any helpful syntax highlighting for that by default. + + That's why I'd recommend using a markdown render such as [`glow`][glow]. With + it, you can pipe the output from ruff into it and have it produce a fancy colored output, that's much easier to + read: `ruff rule UP038 | glow`. + +Alternatively, you can also find the rules and their description in the [ruff +documentation][ruff-rules]. + +#### Formatter + +On top of being an amazing linter, ruff is also an automatic code formatter. That means ruff can actually make your +code follow a proper and style automatically! It will just take your original unformatted (but valid) python code and +edit it to meet our configured code style for you. + +To make ruff format your code, simply run: + +```bash +ruff format . +``` + +### Editor integration + +TODO + +## Other style guidelines + +While `ruff` can do a lot, it can't do everything. There are still some guidelines that you will need to read over and +apply manually. You will find these guides on the next pages of this documentation. + +[pep8]: https://peps.python.org/pep-0008/ +[python discord]: https://www.pythondiscord.com/ +[pep8-song]: https://www.youtube.com/watch?v=hgI0p1zf31k +[ruff]: https://beta.ruff.rs/docs/ +[flake8]: https://flake8.pycqa.org/en/latest/ +[isort]: https://pycqa.github.io/isort/ +[black]: https://black.readthedocs.io/en/stable/ +[activate-venv]: ./setup.md#activating-the-environment +[glow]: https://github.com/charmbracelet/glow +[ruff-rules]: https://docs.astral.sh/ruff/rules/ diff --git a/docs/contributing/guides/type-hints.md b/docs/contributing/guides/type-hints.md new file mode 100644 index 00000000..3cd1c110 --- /dev/null +++ b/docs/contributing/guides/type-hints.md @@ -0,0 +1,147 @@ +# Type Hints + +???+ abstract + + This article explains what python type-hints are, how they can be enforced with the use of type checkers and the + type checker of our choice: **basedpyright** and it's editor integration. + +Most people only know python as a dynamically typed language, that doesn't offer any kind of type safety. In the very +days of python, this was true, however today, things are a bit different. Even though Python on it's own is still a +dynamically typed language, it does actually support specifying "type hints" which can even be enforced by external +tools called "type checkers". With those, we can achieve a (mostly) type safe experience while using Python. + +## Regular python + +In regular python, as most people know it, you might end up writing a function like this: + +```python +def add(x, y): + return x + y +``` + +In this code, you have no idea what the type of `x` and `y` arguments should be. So, even though you may have intended +for this function to only work with numbers (ints), it's actually entirely possible to use it with something else. For +example, running `add("hello", "world)` will return `"helloworld"` because the `+` operator works on strings too. + +The point is, there's nothing telling you what the type of these parameters should be, and that could lead to +misunderstandings. Even though in some cases, you can figure out what the type should these variables have purely based +on their name alongside the name of the function, in most cases, it's not that easy. It often requires looking through +the docs, or going over the actual source code of such function. + +Annoyingly, python won't even prevent you from passing in types that are definitely incorrect, like: `add(1, "hi")`. +Running this would cause a `TypeError`, but unless you have unit-tests that actually run that code, you won't find out +about this bug until it actually causes an issue and at that point, it might already be too late, since your code has +crashed a production app. + +Clearly then, this isn't ideal. + +## Type-Hints + +While python doesn't require it, there is in fact a way to add a "**hint**" that indicates what **type** should a given +variable have. So, when we take the function from above, adding type-hints to it would result in something like this: + +```python +def add(x: int, y: int) -> int: + return x + y +``` + +We've now made the types very explicit to the programmer, which means they'll no longer need to spend a bunch of time +looking through the implementation of that function, or going through the documentation just to know how to use this +function. Instead, the type hints will tell just you. + +This is incredibly useful, because most editors will be able to pick up these type hints, and show them to you while +calling the function, so you know what to pass right away, without even having to look at the function definition where +the type-hints are defined. + +Not only that, specifying a type-hint will greatly improve the development experience in your editor / IDE, because +you'll get much better auto-completion. The thing is, if you have a parameter like `x`, but your editor doesn't know +what type it should have, it can't really help you if you start typing `x.remove`, looking for the `removeprefix` +function. However, if you tell your editor that `x` is a string (`x: str`), it will now be able to go through all of +the methods that strings have, and show you those that start with `remove` (being `removeprefix` and `removesuffix`). + +This makes type-hints great at saving you time while developing, even though you have to do some additional work when +specifying them. + +## Runtime behavior + +Even though type-hints are a part of the Python language, the interpreter doesn't actually care about them. That means +that the interpreter doesn't do any optimizations or checking when you're running your code, even if you have a +function like `add` that we have added type-hints to, code like `add(1, "hi")` will not cause any immediate errors. + +Most editors are configured very loosely when it comes to type-hints. That means they will show you these hints when +you're working with the function, but they won't produce warnings when you pass in the wrong thing. That's why they're +called "type hints", they're only hints that can help you out, but they aren't actually enforced. + +## Enforcing type hints - Type Checkers + +Even though python on it's own indeed doesn't enforce the type-hints you specify, there are tools that can run "static" +checks against your code. A static check is a check that works with your code in it's textual form. It will read the +contents of your python files without actually running that file and analyze it purely based on that text content. + +Using these tools will allow you to analyze your code for typing mistakes before you ever even run your program. That +means having a function call like `add(1, "hi")` anywhere in your code would be detected and reported as an issue. + +There is a bunch of these tools available for python, but the most common ones are +[`pyright`][pyright] and [`mypy`][mypy]. + +## BasedPyright + +The type checker that we use in our code-base is [**basedpyright**][basedpyright]. It's a fork of +pyright which adds some extra checks and features and focuses more on the open-source community, than the +official Microsoft owned Pyright. + +### Running BasedPyright + +To run BasedPyright on the code-base, you can use the following command: + +```bash +basedpyright . +``` + +!!! note "" + + You will need to run this from an [activated][activate-venv] poetry environment while + in the project's root directory. + +### Editor Integration + +=== "VSCode" + + On vscode, you can simply install the [BasedPyright extension][basedpyright-vscode-ext] from the marketplace. + + Note that this extension does collide with the commonly used **Pylance** extension, which is installed + automatically alongside the **Python** extension and provide intellisense for Python. The reason BasedPyright + collides with this extension is that Pylance actually uses pyright as a language server in the background, and as + we mentioned, basedpyright is an alternative, so using both would cause duplicate errors. This means that you will + need to disable Pylance, at least within our codebase. + +=== "Neovim" + + If you're using Neovim, I would recommend setting up LSP (Language Server Protocol) and installing basedpyright, as + it has language server support built into it. You can achieve this with the [`lspconfig`][neovim-lspconfig] plugin. + You can then use [`mason-lspconfig`][neovim-mason-lspconfig-plugin] to install `basedpyright`, or manually + configure `lspconfig` and use your system-wide `basedpyright` executable. + +## Great resources + +While type hinting might seem very simple from the examples shown above, there is actually a fair bit to it, and if you +never worked within a type checked code-base, you should definitely check out some of these resources, which go over +the basics. + +- [Getting started with type hints in Python](https://dev.to/decorator_factory/type-hints-in-python-tutorial-3pel) - a + blog post / tutorial by decorator-factory. +- [Basics of static typing](https://docs.basedpyright.com/#/type-concepts) - part of the BasedPyright documentation +- [Mypy documentation](https://mypy.readthedocs.io/en/stable/) - very extensive documentation on various typing + concepts. (Some things are mypy focused, but most things will cary over to basedpyright too) +- [Python documentation for the `typing` module](https://docs.python.org/3/library/typing.html) - Python's standard + library contains a `typing` module, which holds a bunch of useful structures that we often use while working with + type-hints. +- [PEP 484](https://www.python.org/dev/peps/pep-0484/) - formal specification of type hints for the Python langauge + +[pyright]: https://github.com/microsoft/pyright +[mypy]: https://mypy.readthedocs.io/en/stable/ +[basedpyright]: https://docs.basedpyright.com/ +[activate-venv]: ./setup.md#activating-the-environment +[basedpyright-vscode-ext]: https://marketplace.visualstudio.com/items?itemName=detachhead.basedpyright +[neovim-lspconfig]: https://github.com/neovim/nvim-lspconfig +[neovim-mason-lspconfig-plugin]: https://github.com/williamboman/mason-lspconfig.nvim diff --git a/docs/contributing/guides/unit-tests.md b/docs/contributing/guides/unit-tests.md new file mode 100644 index 00000000..fb61abca --- /dev/null +++ b/docs/contributing/guides/unit-tests.md @@ -0,0 +1,3 @@ +!!! bug "Work In Progress" + + This page is still being written. The content below (if any) may change. diff --git a/docs/contributing/issue-guide.md b/docs/contributing/issue-guide.md new file mode 100644 index 00000000..fc973145 --- /dev/null +++ b/docs/contributing/issue-guide.md @@ -0,0 +1,192 @@ +# Bug Reports & Feature Requests + +Mcproto is an actively maintained project, and we welcome contributions in the form of both bug reports and feature +requests. This guide will help you understand how to effectively submit an issue, whether it's reporting a bug or +proposing a new feature. + +## Before creating an issue + +Before opening a new issue with your bug report, please do the following things: + +### Upgrade to latest version + +Chances are that the bug you discovered was already fixed in a subsequent version. Thus, before reporting an issue, +ensure that you're running the [latest version][changelog] of mcproto. + +!!! warning "Bug fixes are not backported" + + Please understand that only bugs that occur in the latest version of mcproto will be addressed. Also, to reduce + duplicate efforts, fixes cannot be backported to earlier versions. + + Please understand that only bugs that occur in the latest version will be addressed. Also, to reduce duplicate + efforts, fixes cannot be backported to earlier versions, except as a hotfix to the latest version, diverging from + the not yet finished features, even if already in the `main` branch. + + Due to the nature of our [versioning], that might mean that if you require an older version of minecraft protocol, + you might be stuck with an older, buggy version of this library. + +### Search for existing issues + +It's possible that the issue you're having was already reported. Please take some time and search the [existing +issues][issue tracker] in the GitHub repository for your problem. If you do find an existing issue that matches the +problem you're having, simply leave a :thumbsup: reaction instead (avoid commenting "I have this issue too" or similar, +as that ultimately just clutters the discussion in that issue, but if you do think that you have something meaningful to +add, please do). + +!!! note + + Make sure to also check the closed issues. By default, github issue search will start with: `is:issue is:open`, + remove the `is:open` part to search all issues, not just the opened ones. It's possible that we seen this issue + before, but closed the issue as something that we're unable to fix. + +In case you found a relevant issue, however, it has already been closed as implemented (not as declined / not planned), +but the bug / proposed feature is still somehow relevant, don't be worried to drop a comment on this older issue, we +will get notifications for those too. That said, if you think there is sufficient new context now, it might also make +sense to open a new issue instead, but make sure to at least mention the old issue if you choose this route. + +## Creating a new issue + +At this point, when you still haven't found a solution to your problem, we encourage you to create an issue. + +We have some issue-templates ready, to make sure that you include all of the necessary things we need to know: + +- For a **bug report**, you can click [here][open bug issue]. +- For a **feature request**, you can instead click [here][open feature issue]. + +If you prefer, you can also [open a blank issue][open blank issue]. This will allow you to avoid having to follow the +issue templates above. This might be useful if your issue doesn't cleanly fit into either of these two, or if you prefer +to use your own categories and structure for the issue. That said, make please still make sure to include all of the +relevant details when you do so. + +## Writing good bug reports + +Generally, the GitHub issue template should guide you towards telling us everything that we need to know. However, for +the best results, keep reading through this section. In here, we'll explain how a well formatted issue should look like +in general and what it should contain. + +### Issue Title + +A good title is short and descriptive. It should be a one-sentence executive summary of the issue, so the impact and +severity of the bug you want to report can be inferred right from the title. + +| | Example | +| ---------------------------------------------------------- | -------------------------------------------------------------------- | +| :material-check:{ style="color: #4DB6AC" } **Clear** | Ping packet has incorrect ID | +| :material-close:{ style="color: #EF5350" } **Wordy** | The Ping packet has an incorrect packet ID of 0, when it should be 1 | +| :material-close:{ style="color: #EF5350" } **Unclear** | Ping packet is incorrect | +| :material-close:{ style="color: #EF5350" } **Non-english** | El paquete ping tiene una identificación incorrecta | +| :material-close:{ style="color: #EF5350" } **Useless** | Help | + +### Bug description + +Now, to the bug you want to report. Provide a clear, focused, specific and concise summary of the bug you encountered. +Explain why you think this is a bug that should be reported to us. Adhere to the following principles: + +1. **Explain the what, not the how** – don't explain [how to reproduce the bug](#reproduction) here, + we're getting there. Focus on articulating the problem and its impact as clearly as possible. +2. **Keep it short and concise** - if the bug can be precisely explained in one or two sentences, perfect. Don't + inflate it - maintainers and future users will be grateful for having to read less. +3. **Don't under-explain** - don't leave out important details just to keep things short. While keeping things short is + important, if something is relevant, mention it. It is more important for us to have enough information to be able + to understand the bug, even if it means slightly longer bug report. +4. **One bug at a time** - if you encounter several unrelated bugs, please create separate issues for them. Don't + report them in the same issue, as this makes it difficult for others when they're searching for existing issues and + also for us, since we can't mark such an issue as complete if only one of the bugs was fixed. + +--- + +:material-run-fast: **Stretch goal** – if you have a link to an existing page that describes the issue, or otherwise +explains some of your claims, include it. Usually, this will be a link leading to the Minecraft +protocol documentation for something. + +:material-run-fast: **Stretch goal \#2** – if you found a workaround or a way to fix +the bug, you can help other users temporarily mitigate the problem before +we maintainers can fix the bug in our code base. + +### Reproduction + +A minimal reproducible example is at the heart of every well-written bug report, as it allows us maintainers to +instantly recreate the necessary conditions to inspect the bug and quickly find its root cause from there. It's a +proven fact that issues with concise and small reproductions can be fixed much faster. + +Focus on creating a simple and small code snippet that we can run to see the bug. Do your best to avoid giving us large +snippets or whole files just for the purpose of the reproducible example, do your best to reduce the amount of code as +much as you can and try to avoid using external dependencies in the snippet (except for mcproto of course). + +??? tip "How to include code-snippets (markdown)" + + In case you're not yet familiar with the syntax, GitHub issues use `markdown` format, which means you can use some + nice custom formatting to make the text appear distinct. One of these formatting options is a source-code block / + code snippet. To include one, you will want to use the following syntax: + + ````markdown + ```language + your code + it can be multiline + ``` + ```` + + Note that the symbols used here aren't single quotes (`'`), they're backticks: `` ` ``. + On an english keyboard, you can type these using the key right below escape (also used for tildes: `~`). + + The `language` controls how the code will be highlighted. For python, you can use `python`, for yaml, `yaml`, etc. + +Sometimes, the bug can't be described in terms of code snippets, such as when reporting a mistake in the documentation. +In that case, provide a link to the documentation or whatever other relevant things that will allows us to see the bug +with minimal effort. In certain cases, it might even be fine to leave the reproduction steps section empty. + +## Next steps + +Once the issue is submitted, you have 2 options: + +### Wait for us to address it + +We will try to review your issue as soon as possible. Please be patient though, as this is an open-source project +maintained by volunteers, who work on it simply for the fun of it. This means that we may sometimes have other +priorities in life or we just want to work on some more interesting tasks first. It might therefore take a while for us +to get to your issue, but we try and do our best to respond reasonably quickly, when we can. Even when things are +slower, we kindly ask you to avoid posting comments like "Any progress on this?" as they are not helpful and only create +unnecessary clutter in the discussion. + +When we do address your issue, we might need further information from you. GitHub has a notification system, so once we +respond, you will be notified there. Note that, by default, these notifications might not be forwarded to your email or +elsewhere, so please check GitHub periodically for updates. + +Finally, when we address your issue, we will mark the issue as closed (GitHub will notify you of this too). Once that +happens, your bug should be fixed / feature implemented, but we appreciate it if you take the time to verify that +everything is working correctly. If something is still wrong, you can reopen the issue and let us know. + +!!! warning "Issues are fixed on the main branch" + + Do note that when we close an issue, it means that we have fixed your bug in the `main` branch of the repository. + That doesn't necessarily mean the fix has been released on PyPI yet, so you might still need to wait for the next + release. Alternatively, you can also try the [git installation] to get the project right from that latest `main` + branch. + +### Attempt to solve it yourself + +!!! quote + + The fastest way to get something done is to avoid waiting on others. + +If you wish to try and tackle the bug yourself, let us know by commenting on the issue with something like "I'd like to +work on this". This helps us avoid duplicate efforts and ensures that we don't work on something you're already +addressing. + +Once a maintainer sees your comment, they will assign the issue to you. Being assigned is a soft approval from us, +giving you the green light to start working. + +Of course, you are welcome to start working on the issue even before being officially assigned. However, please be +aware that sometimes we choose not to fix certain bugs for specific reasons. In such cases, your work might not end up +being used. + +Before starting your work though, make sure to also read our [pull request guide]. + +[changelog]: ../meta/changelog.md +[versioning]: ../meta/versioning.md +[issue tracker]: https://github.com/py-mine/mcproto/issues +[open bug issue]: https://github.com/py-mine/mcproto/issues/new?labels=type%3A+bug&template=bug_report.yml +[open feature issue]: https://github.com/py-mine/mcproto/issues/new?labels +[open blank issue]: https://github.com/py-mine/mcproto/issues/new?template=Blank+issue +[git installation]: ../installation.md#latest-git-version +[pull request guide]: ./making-a-pr.md diff --git a/docs/contributing/making-a-pr.md b/docs/contributing/making-a-pr.md new file mode 100644 index 00000000..f1cfb06a --- /dev/null +++ b/docs/contributing/making-a-pr.md @@ -0,0 +1,148 @@ +!!! bug "Work In Progress" + + This page is missing a guide on writing a good PR body + +# Pull Requests + +Welcome! If you're interested in contributing to mcproto, you've come to the right place. Mcproto is an open-source +project, and we welcome contributions from anyone eager to help out. + +To contribute, you can create a [pull request] on our GitHub repository. Your pull request will then be reviewed by our +maintainers, and once approved, it will be merged into the main repository. Contributions can include bug fixes, +documentation updates, or new features. + +!!! important "Code quality requirements" + + While we encourage and appreciate contributions, maintaining high code quality is crucial to us. That means you + will need to adhere to our code quality standards. Contributions may be rejected if they do not meet these + guidelines. + +## Get assigned to the issue + +The very first thing you will need to do is deciding what you actually want to work on. In all likelihood, you already +have something in mind if you're reading this, however, if you don't, you're always free to check the opened GitHub +issues, that don't yet have anyone assigned. If you find anything interesting there that you'd wish to work on, leave a +comment on that issue with something like: "I'd like to work on this". + +Even if you do have an idea already, we heavily recommend (though not require) that you first make an issue, this can be +a [bug report], but also a feature request, or something else. Once you made the issue, leave a: "I'd like to work on +this" comment on it. + +Eventually, a maintainer will get back to you and you will be assigned to the issue. Being assigned is a soft approval +from us, giving you the green light to start coding. By getting assigned, you also reserve the right to work on that +given issue, hence preventing us (or someone else) from potentially working on the same thing, wasting ours or your +time. This prevention of duplicate efforts is also the primary reason why we recommend creating an issue first. + +Of course, you are welcome to start working on the issue even before being officially assigned. However, please be +aware that sometimes, we may choose not to pursue a certain feature / bugfix. In such cases, your work might not end up +being used, which would be a shame. + +!!! note "Minor tasks don't need an issue" + + While we generally do encourage contributors to first create an issue and get assigned to it first. If you're + just fixing a typo, improving the wording, or making some minor optimizations to the code, you can safely skip + this step. + + The point of encouraging issues is to prevent needlessly wasting people's time. However, with these minor tasks, + it might actually take you longer to create a full issue about the problem than it would to just submit a fix. + + There's therefore no point in cluttering the issue tracker with a bunch of small issues that can often be + changed in just a few minutes. + +## Pull Request Body + +TODO + +## Work in Progress PRs + +Whenever you open a pull request that isn't yet ready to be reviewed and merged, you can mark it as a **draft**. This +provides both visual and functional indicator that the PR isn't yet ready to be merged. + +Methods of marking PR as a draft: + +| **When creating it** | **After creation** | +| ----------------------------------------- | ------------------------------------------- | +| ![image](../assets/draft-pr-creation.png) | ![image](../assets/draft-pr-conversion.png) | + +Once your work is done and you think the PR is ready to be merged, mark it as **Ready for review** + +![image](../assets/draft-pr-unmark.png){ width="600" } + +## Contributing guidelines + +In order to make a successful contribution, it is **required** that you get familiar with our [contributing guidelines]. + +## Automated checks + +The project includes various CI workflows that will run automatically for your pull request after every push and check +your changes with various tools. These tools are here to ensure that our contributing guidelines are met and ensure +good code quality of your PR. + +That said, you shouldn't rely on these CI workflows to let you know if you made a mistake, instead, you should run +these tools on your own machine during the development. Many of these tools can fix the violations for you +automatically and it will generally be a better experience for you. Running these tools locally will also prevent a +bunch of "Fix the CI" commits, which just clutter the git history. + +Make sure to read our [contributing guidelines] thoroughly, as they describe how to use these tools and even how to have +them run automatically before each commit, so you won't forget. + +Passing the CI workflows is a requirement in order to get your pull request merged. If a maintainer sees a PR that's +marked as ready for review, but isn't passing the CI, we'll often refrain from even reviewing it, as we consider it +incomplete. If you have a technical reason why your PR can't pass the CI, let us know in the PR description or a +comment. + +## Code Review + +All pull requests will need to be reviewed by at least one team member before merging. The reviewer will provide +feedback and suggestions for improvement. + +Once a reviewer approves your pull request, it can be merged into the `main` branch. + +??? question "How do I request a review?" + + Request a review from a team member by [assigning them as a reviewer][assigning pr reviewer] to your pull request. + + However, you can also just wait until we get to your PR, you don't need to assign a reviewer unless you want + someone specific to review. Just make sure that your PR is marked as ready for review and not draft. + +### Giving Feedback + +If you wish, you can also provide some feedback on other PRs. Doing so is a great way to fill the time while you're +waiting for your PR to be reviewed by us and you're also speeding up the process, as it reduces the amount of time +we'd have to spend reviewing those other PRs before getting to yours. + +When reviewing a pull request, aim to be constructive and specific. Highlight areas that need improvement and suggest +potential solutions. If you have any questions on concerns about something in the code, don't hesitate to ask the +author for clarification. + +Focus on the following aspects during a code review: + +- Correctness and functionality +- Code quality and readability +- Adherence to the project guidelines + +??? example "Good Code Review Feedback" + + Here are some examples of a good code review feedback: + + ``` + - Great work on the new function! The implementation looks good overall. + - The tests cover most of the functionality, but it's are missing a test case for edge case X. Could you add a test for that? + - The logic in the new function is somewhat complex. Consider breaking it into smaller functions for better clarity. + - The new feature is well-implemented, but it would be great to add more inline comments to explain the logic, as + it isn't trivial to understand. + - There's a small typo in the docstring. Could you correct it? + - The configuration settings are hard-coded. Can you move them to a configuration file to make it easier to manage? + ``` + +Always be respectful and considerate when giving feedback. Remember that the goal is to improve the code and help the +author grow as a developer. + +!!! success "Be Positive" + + Don't forget to acknowledge the positive aspects of the contribution as well! + +[pull request]: https://docs.github.com/en/pull-requests +[bug report]: ./issue-guide.md +[contributing guidelines]: ./guides/index.md +[assigning pr reviewer]: https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/requesting-a-pull-request-review diff --git a/docs/contributing/security-policy.md b/docs/contributing/security-policy.md new file mode 100644 index 00000000..61d98a3f --- /dev/null +++ b/docs/contributing/security-policy.md @@ -0,0 +1,3 @@ +# Security Policy + +--8<-- "SECURITY.md" diff --git a/docs/css/admoditions.css b/docs/css/admoditions.css new file mode 100644 index 00000000..f4132550 --- /dev/null +++ b/docs/css/admoditions.css @@ -0,0 +1,18 @@ +/* Add important admonition */ +:root { + --md-admonition-icon--important: url("data:image/svg+xml,"); +} +.md-typeset .admonition.important, +.md-typeset details.important { + border-color: rgb(171, 125, 248); +} +.md-typeset .important > .important, +.md-typeset .important > summary { + background-color: rgba(171, 125, 248, 0.1); +} +.md-typeset .important > .admonition-title::before, +.md-typeset .important > summary::before { + background-color: rgb(171, 125, 248); + -webkit-mask-image: var(--md-admonition-icon--important); + mask-image: var(--md-admonition-icon--important); +} diff --git a/docs/css/material.css b/docs/css/material.css new file mode 100644 index 00000000..51ae054a --- /dev/null +++ b/docs/css/material.css @@ -0,0 +1,4 @@ +/* Don't uppercase H5 headings. */ +.md-typeset h5 { + text-transform: none; +} diff --git a/docs/css/mkdocstrings.css b/docs/css/mkdocstrings.css new file mode 100644 index 00000000..287a5dbd --- /dev/null +++ b/docs/css/mkdocstrings.css @@ -0,0 +1,47 @@ +/* Indentation. */ +div.doc-contents:not(.first) { + padding-left: 25px; + border-left: 0.05rem solid var(--md-typeset-table-color); +} + +/* Mark external links with an arrow. */ +a.external::after, +a.autorefs-external::after { + /* https://primer.style/octicons/arrow-up-right-24 */ + mask-image: url('data:image/svg+xml,'); + -webkit-mask-image: url('data:image/svg+xml,'); + content: " "; + + display: inline-block; + vertical-align: middle; + position: relative; + + height: 1em; + width: 1em; + background-color: currentColor; +} + +a.external:hover::after, +a.autorefs-external:hover::after { + background-color: var(--md-accent-fg-color); +} + +pre :is(a.external, a.autorefs-external)::after { + content: none; /* Remove the arrow icon for links inside
: in the signature */
+}
+
+/* Light blue color for parameter `param` symbols`. */
+[data-md-color-scheme="default"] {
+  --doc-symbol-parameter-fg-color: #829bd1;
+  --doc-symbol-parameter-bg-color: #829bd11a;
+}
+
+[data-md-color-scheme="slate"] {
+  --doc-symbol-parameter-fg-color: #829bd1;
+  --doc-symbol-parameter-bg-color: #829bd11a;
+}
+
+/* Hide parameter 'param' symbols in ToC. */
+li.md-nav__item:has(> a[href*="("]) {
+  display: none;
+}
diff --git a/docs/examples/index.rst b/docs/examples/index.rst
deleted file mode 100644
index 09ef2452..00000000
--- a/docs/examples/index.rst
+++ /dev/null
@@ -1,13 +0,0 @@
-Examples
-========
-
-Here are some examples of using the project in practice.
-
-
-.. toctree::
-    :maxdepth: 1
-    :caption: Examples
-
-    status.rst
-
-Feel free to propose any further examples, we'll be happy to add them to the list!
diff --git a/docs/examples/status.rst b/docs/examples/status.rst
deleted file mode 100644
index e9c37997..00000000
--- a/docs/examples/status.rst
+++ /dev/null
@@ -1,5 +0,0 @@
-Obtaining status data from a server
-===================================
-
-..
-    TODO: Write this
diff --git a/docs/extensions/attributetable.py b/docs/extensions/attributetable.py
deleted file mode 100644
index bbcb8b6e..00000000
--- a/docs/extensions/attributetable.py
+++ /dev/null
@@ -1,295 +0,0 @@
-from __future__ import annotations
-
-import importlib
-import inspect
-import re
-from collections.abc import Sequence
-from typing import Any, ClassVar, NamedTuple
-
-from docutils import nodes
-from sphinx import addnodes
-from sphinx.application import Sphinx
-from sphinx.environment import BuildEnvironment
-from sphinx.locale import _ as translate
-from sphinx.util.docutils import SphinxDirective
-from sphinx.util.typing import OptionSpec
-from sphinx.writers.html5 import HTML5Translator
-from typing_extensions import override
-
-
-class AttributeTable(nodes.General, nodes.Element):
-    pass
-
-
-class AttributeTableColumn(nodes.General, nodes.Element):
-    pass
-
-
-class AttributeTableTitle(nodes.TextElement):
-    pass
-
-
-class AttributeTablePlaceholder(nodes.General, nodes.Element):
-    pass
-
-
-class AttributeTableBadge(nodes.TextElement):
-    pass
-
-
-class AttributeTableItem(nodes.Part, nodes.Element):
-    pass
-
-
-def visit_attributetable_node(self: HTML5Translator, node: AttributeTable) -> None:
-    class_ = node["python-class"]
-    self.body.append(f'
') - - -def visit_attributetablecolumn_node(self: HTML5Translator, node: AttributeTableColumn) -> None: - self.body.append(self.starttag(node, "div", CLASS="py-attribute-table-column")) - - -def visit_attributetabletitle_node(self: HTML5Translator, node: AttributeTableTitle) -> None: - self.body.append(self.starttag(node, "span")) - - -def visit_attributetablebadge_node(self: HTML5Translator, node: AttributeTableBadge) -> None: - attributes = { - "class": "py-attribute-table-badge", - "title": node["badge-type"], - } - self.body.append(self.starttag(node, "span", **attributes)) - - -def visit_attributetable_item_node(self: HTML5Translator, node: AttributeTableItem) -> None: - self.body.append(self.starttag(node, "li", CLASS="py-attribute-table-entry")) - - -def depart_attributetable_node(self: HTML5Translator, node: AttributeTable) -> None: - self.body.append("
") - - -def depart_attributetablecolumn_node(self: HTML5Translator, node: AttributeTableColumn) -> None: - self.body.append("") - - -def depart_attributetabletitle_node(self: HTML5Translator, node: AttributeTableTitle) -> None: - self.body.append("") - - -def depart_attributetablebadge_node(self: HTML5Translator, node: AttributeTableBadge) -> None: - self.body.append("") - - -def depart_attributetable_item_node(self: HTML5Translator, node: AttributeTableItem) -> None: - self.body.append("") - - -_name_parser_regex = re.compile(r"(?P[\w.]+\.)?(?P\w+)") - - -class PyAttributeTable(SphinxDirective): - has_content: ClassVar[bool] = False - required_arguments: ClassVar[int] = 1 - optional_arguments: ClassVar[int] = 0 - final_argument_whitespace: ClassVar[bool] = False - option_spec: ClassVar[OptionSpec | None] = {} - - def parse_name(self, content: str) -> tuple[str, str]: - match = _name_parser_regex.match(content) - if match is None: - raise RuntimeError(f"content {content} somehow doesn't match regex in {self.env.docname}.") - path, name = match.groups() - if path: - modulename = path.rstrip(".") - else: - modulename = self.env.temp_data.get("autodoc:module") - if not modulename: - modulename = self.env.ref_context.get("py:module") - if modulename is None: - raise RuntimeError(f"modulename somehow None for {content} in {self.env.docname}.") - - return modulename, name - - @override - def run(self) -> list[AttributeTablePlaceholder]: - """If you're curious on the HTML this is meant to generate: - -
-
- translate('Attributes') - -
- -
- - However, since this requires the tree to be complete - and parsed, it'll need to be done at a different stage and then - replaced. - """ - content = self.arguments[0].strip() - node = AttributeTablePlaceholder("") - modulename, name = self.parse_name(content) - node["python-doc"] = self.env.docname - node["python-module"] = modulename - node["python-class"] = name - node["python-full-name"] = f"{modulename}.{name}" - return [node] - - -def build_lookup_table(env: BuildEnvironment) -> dict[str, list[str]]: - # Given an environment, load up a lookup table of - # full-class-name: objects - result = {} - domain = env.domains["py"] - - ignored = { - "data", - "exception", - "module", - "class", - } - - for fullname, _, objtype, _, _, _ in domain.get_objects(): - if objtype in ignored: - continue - - classname, _, child = fullname.rpartition(".") - try: - result[classname].append(child) - except KeyError: - result[classname] = [child] - - return result - - -class TableElement(NamedTuple): - fullname: str - label: str - badge: AttributeTableBadge | None - - -def process_attributetable(app: Sphinx, doctree: nodes.Node, fromdocname: str) -> None: - env = app.builder.env - - lookup = build_lookup_table(env) - for node in doctree.traverse(AttributeTablePlaceholder): - modulename, classname, fullname = node["python-module"], node["python-class"], node["python-full-name"] - groups = get_class_results(lookup, modulename, classname, fullname) - table = AttributeTable("") - for label, subitems in groups.items(): - if not subitems: - continue - table.append(class_results_to_node(label, sorted(subitems, key=lambda c: c.label))) - - table["python-class"] = fullname - - if not table: - node.replace_self([]) - else: - node.replace_self([table]) - - -def get_class_results( - lookup: dict[str, list[str]], modulename: str, name: str, fullname: str -) -> dict[str, list[TableElement]]: - module = importlib.import_module(modulename) - cls = getattr(module, name) - - groups: dict[str, list[TableElement]] = { - translate("Attributes"): [], - translate("Methods"): [], - } - - try: - members = lookup[fullname] - except KeyError: - return groups - - for attr in members: - attrlookup = f"{fullname}.{attr}" - key = translate("Attributes") - badge = None - label = attr - value = None - - for base in cls.__mro__: - value = base.__dict__.get(attr) - if value is not None: - break - - if value is not None: - doc = value.__doc__ or "" - if inspect.iscoroutinefunction(value) or doc.startswith("|coro|"): - key = translate("Methods") - badge = AttributeTableBadge("async", "async") - badge["badge-type"] = translate("coroutine") - elif isinstance(value, classmethod): - key = translate("Methods") - label = f"{name}.{attr}" - badge = AttributeTableBadge("cls", "cls") - badge["badge-type"] = translate("classmethod") - elif inspect.isfunction(value): - if doc.startswith(("A decorator", "A shortcut decorator")): - # finicky but surprisingly consistent - key = translate("Methods") - badge = AttributeTableBadge("@", "@") - badge["badge-type"] = translate("decorator") - elif inspect.isasyncgenfunction(value): - key = translate("Methods") - badge = AttributeTableBadge("async for", "async for") - badge["badge-type"] = translate("async iterable") - else: - key = translate("Methods") - badge = AttributeTableBadge("def", "def") - badge["badge-type"] = translate("method") - - groups[key].append(TableElement(fullname=attrlookup, label=label, badge=badge)) - - return groups - - -def class_results_to_node(key: str, elements: Sequence[TableElement]) -> AttributeTableColumn: - title = AttributeTableTitle(key, key) - ul = nodes.bullet_list("") - for element in elements: - ref = nodes.reference( - "", - "", - internal=True, - refuri=f"#{element.fullname}", - anchorname="", - *[nodes.Text(element.label)], # noqa: B026 # (from original impl) - ) - para = addnodes.compact_paragraph("", "", ref) - if element.badge is not None: - ul.append(AttributeTableItem("", element.badge, para)) - else: - ul.append(AttributeTableItem("", para)) - - return AttributeTableColumn("", title, ul) - - -def setup(app: Sphinx) -> dict[str, Any]: - app.add_directive("attributetable", PyAttributeTable) - app.add_node(AttributeTable, html=(visit_attributetable_node, depart_attributetable_node)) - app.add_node(AttributeTableColumn, html=(visit_attributetablecolumn_node, depart_attributetablecolumn_node)) - app.add_node(AttributeTableTitle, html=(visit_attributetabletitle_node, depart_attributetabletitle_node)) - app.add_node(AttributeTableBadge, html=(visit_attributetablebadge_node, depart_attributetablebadge_node)) - app.add_node(AttributeTableItem, html=(visit_attributetable_item_node, depart_attributetable_item_node)) - app.add_node(AttributeTablePlaceholder) - _ = app.connect("doctree-resolved", process_attributetable) - return {"parallel_read_safe": True} diff --git a/docs/pages/faq.rst b/docs/faq.md similarity index 50% rename from docs/pages/faq.rst rename to docs/faq.md index cb73d06a..5ada492a 100644 --- a/docs/pages/faq.rst +++ b/docs/faq.md @@ -1,20 +1,24 @@ -Frequently Asked Questions -========================== +--- +hide: + - navigation +--- -.. note:: - This page is still being worked on, if you have any suggestions for a question, feel free to create an issue on - GitHub, or let us know on the development discord server. +# Frequently Asked Questions -Missing synchronous alternatives for some functions ---------------------------------------------------- +!!! bug "Work In Progress" + + This page is still being worked on, if you have any suggestions for a question, feel free to create an issue on + GitHub, or let us know on the development discord server. + +## Missing synchronous alternatives for some functions While mcproto does provide synchronous functionalities for the general protocol interactions (reading/writing packets and lower level structures), any unrelated functionalities (such as HTTP interactions with the Minecraft API) will only provide asynchronous versions. -This was done to reduce the burden of maintaining 2 versions of the same code. The only reason protocol intercation -even have synchronous support is because it's needed in the :class:`~mcproto.buffer.Buffer` class. (See `Issue #128 -`_ for more details on this decision.) +This was done to reduce the burden of maintaining 2 versions of the same code. The only reason protocol interaction +even have synchronous support is because it's needed for the [`Buffer`][mcproto.buffer.Buffer] +class. (See [Issue \#128](https://github.com/py-mine/mcproto/issues/128) for more details on this decision.) Generally, we recommend that you just stick to using the asynchronous alternatives though, both since some functions only support async, and because async will generally provide you with a more scalable codebase, making it much easier diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 00000000..15b56a2d --- /dev/null +++ b/docs/index.md @@ -0,0 +1,33 @@ +--- +hide: + - navigation +--- + +# Home + +
+ Logo + MCPROTO +
+ +## What is Mcproto + +Mcproto is a python library that provides various low level interactions with the Minecraft protocol. It attempts to be +a full wrapper around the Minecraft protocol, which means it could be used as a basis for Minecraft bots written in +python, or even full python server implementations. + +!!! important + + Mcproto only covers the **latest minecraft protocol implementation**, updating with each full minecraft release + (not including snapshots!). Using mcproto for older versions of minecraft is not officially supported, if you need + to do so, you will want to use an older version of mcproto, but note that **no bug fixes or features will be + backported** to these older versions. + + *For more information on versioning and update practices, see our [Versioning Practices][versioning].* + +!!! warning + + This library is still heavily Work-In-Progress, which means a lot of things can still change and some features may + be missing or incomplete. Using the library for production applications at it's current state isn't recommended. + +[versioning]: meta/versioning.md diff --git a/docs/index.rst b/docs/index.rst deleted file mode 100644 index 18170ab2..00000000 --- a/docs/index.rst +++ /dev/null @@ -1,35 +0,0 @@ -.. mdinclude:: ../README.md - -Content -------- - -.. toctree:: - :maxdepth: 1 - :caption: Pages - - pages/installation.rst - usage/index.rst - examples/index.rst - pages/faq.rst - pages/changelog.rst - pages/version_guarantees.rst - pages/contributing.rst - pages/code-of-conduct.rst - -.. toctree:: - :maxdepth: 1 - :caption: API Documentation - - api/basic.rst - api/packets.rst - api/protocol.rst - api/internal.rst - api/types/index.rst - - -Indices and tables ------------------- - -* :ref:`genindex` -* :ref:`modindex` -* :ref:`search` diff --git a/docs/installation.md b/docs/installation.md new file mode 100644 index 00000000..85b3e9d7 --- /dev/null +++ b/docs/installation.md @@ -0,0 +1,151 @@ +--- +hide: + - navigation +--- + +# Installation + +## PyPI (stable) version + +Mcproto is available on [PyPI][mcproto-pypi] and can be installed like any other python library with: + +=== ":simple-python: pip" + + ```bash + pip install mcproto + ``` + +
+ + [pip] is the main package installer for Python. + +
+ +=== ":simple-poetry: poetry" + + ```bash + poetry add mcproto + ``` + +
+ + [Poetry] is an all-in-one solution for Python project management. + +
+ +=== ":simple-rye: rye" + + ```bash + rye add mcproto + ``` + +
+ + [Rye] is an all-in-one solution for Python project management, written in Rust. + +
+ +=== ":simple-ruff: uv" + + ```bash + uv pip install mcproto + ``` + +
+ + [uv] is an ultra fast dependency resolver and package installer, written in Rust. + +
+ +=== ":simple-pdm: pdm" + + ```bash + pdm add mcproto + ``` + +
+ + [PDM] is an all-in-one solution for Python project management. + +
+ +## Latest (git) version + +Alternatively, you may want to install the latest available version, which is what you currently see in the `main` git +branch. Although this method will actually work for any branch with a pretty straightforward change. + +This kind of installation should only be done if you wish to test some new unreleased features and it's likely that you +will encounter bugs. + +That said, since mcproto is still in development, changes can often be made quickly and it can sometimes take a while +for these changes to carry over to PyPI. So if you really want to try out that latest feature, this is the method +you'll want. + +To install the latest mcproto version directly from the `main` git branch, use: + +=== ":simple-python: pip" + + ```bash + pip install 'mcproto@git+https://github.com/py-mine/mcproto@main' + ``` + +
+ + [pip] is the main package installer for Python. + +
+ +=== ":simple-poetry: poetry" + + ```bash + poetry add 'git+https://github.com/py-mine/mcproto#main' + ``` + +
+ + [Poetry] is an all-in-one solution for Python project management. + +
+ +=== ":simple-rye: rye" + + ```bash + rye add mcproto --git='https://github.com/py-mine/mcproto' --branch main + ``` + +
+ + [Rye] is an all-in-one solution for Python project management, written in Rust. + +
+ +=== ":simple-ruff: uv" + + ```bash + uv pip install 'mcproto@git+https://github.com/py-mine/mcproto@main' + ``` + +
+ + [uv] is an ultra fast dependency resolver and package installer, written in Rust. + +
+ +=== ":simple-pdm: pdm" + + ```bash + pdm add "git+https://github.com/py-mine/mcproto@main" + ``` + +
+ + [PDM] is an all-in-one solution for Python project management. + +
+ +[mcproto-pypi]: https://pypi.org/project/mcproto +[pip]: https://pip.pypa.io/en/stable/ +[Poetry]: https://python-poetry.org/ +[Rye]: https://rye.astral.sh/ +[uv]: https://github.com/astral-sh/uv +[PDM]: https://pdm-project.org/en/latest/ diff --git a/docs/meta/attribution.md b/docs/meta/attribution.md new file mode 100644 index 00000000..91564f74 --- /dev/null +++ b/docs/meta/attribution.md @@ -0,0 +1,3 @@ +# Attribution + +--8<-- "ATTRIBUTION.md" diff --git a/docs/meta/changelog.md b/docs/meta/changelog.md new file mode 100644 index 00000000..5599c918 --- /dev/null +++ b/docs/meta/changelog.md @@ -0,0 +1,15 @@ +# Changelog + +!!! danger "" + + Major and minor releases also include the changes specified in prior development releases. + +!!! tip + + Feel free to skip the **Internal Changes** category if you aren't a contributor / core developer of mcproto. + +```python exec="yes" +--8<-- "docs/scripts/gen_changelog.py" +``` + +--8<-- "CHANGELOG.md" diff --git a/docs/meta/code-of-conduct.md b/docs/meta/code-of-conduct.md new file mode 100644 index 00000000..46ab1f49 --- /dev/null +++ b/docs/meta/code-of-conduct.md @@ -0,0 +1,154 @@ +# Code of Conduct + +This code of conduct outlines our expectations for the people involved with this project. We, as members, contributors, +and leaders, are committed to fostering a welcoming and inspiring project where anyone can participate with the +expectation of a harassment-free experience, as outlined in this code of conduct. + +The goal of this document is to set the overall tone for our community. It is here to outline some of the things you can +and can't do if you wish to participate in our community. + +However, it is not intended as a rulebook containing an exhaustive list of permitted and prohibited actions. Social +conduct varies between situations and individuals, but we should all do our best to create a welcoming and positive +experience for everyone. + +We value many things beyond just technical expertise, including collaboration and supporting others within our +community. Providing a positive experience for others can have a much more significant impact than simply providing the +correct answer. + +## Harassment + +We share a common understanding of what constitutes harassment as it applies to a professional setting. Although this +list cannot be exhaustive, we explicitly honor the following "protected attributes": **diversity in age, gender, +culture, ethnicity, language, national origin, political beliefs, profession, race, religion, sexual orientation, +socioeconomic status, disability and personal appearance**. We will not tolerate discrimination based on any of the +protected characteristics above, including others not explicitly listed here. We consider discrimination of any kind to +be unacceptable and immoral. + +Harassment includes, but is not limited to: + +- Offensive comments (or "jokes") related to any of the above mentioned attributes. +- Deliberate "outing"/"doxing" of any aspect of a person's identity, such as physical or electronic address, without + their explicit consent, except as necessary means to protect others from intentional abuse. +- Unwelcome comments regarding a person's lifestyle choices and practices, including those related to food, health, + parenting, drugs and employment. +- Deliberate misgendering, including deadnaming or persistently using a pronoun that does not correctly reflect a + person's gender identity. You should do your best to address people by the name/pronoun they give you when not + addressing them by their username or handle. +- Threats of physical or psychological violence. +- Incitement of violence towards any individual, including encouraging a person to engage in self-harm. +- Publishing private communication without consent, even if non-harassing. +- A pattern of inappropriate behavior, such as unwelcome intimacy or persistent teasing after a request to stop. +- Continued one-on-one communication after requests to cease. +- Sabotage of someone else's work or intentionally hindering someone else's performance. + +## Plagiarism + +Plagiarism is the re-use of someone else's work (e.g., binary content such as images, textual content such as an +article, but also source code, or any other copyrightable resources) without the permission or a license right from the +author. Claiming someone else's work as your own is not only unethical and disrespectful to the author, but also +illegal in most countries. You should always respect the author's wishes, and give credit where credit is due. + +### Intentional vs. Unintentional Plagiarism + +If we find that you've **intentionally** attempted to add plagiarized content to our code-base, you will likely face a +**permanent ban** from any future contributions to this project's repository. We will, of course, do our best to +remove, or properly attribute this plagiarized content as quickly as possible. + +Unintentional plagiarism will not be punished as harshly, but nevertheless, it is your responsibility as a contributor +to check where the code you're submitting comes from, and so, repeated submissions of such content, even after warnings, +may still result in a ban. + +### Understanding code licensing + +Please note that an online repository **without a license** is presumed to only be source-available, **NOT +open-source**. This means the work is **still protected by author's copyright**, automatically imposed over it and +without any license extending that copyright, you have no legal rights to use such code. **Simply finding publicly +posted code does not grant permission to reuse it in other projects.** This code may be available to be seen by anyone, +but that does not mean it's also available to be used by anyone in any way they like. + +Another important note to keep in mind is that **even if a project has an open-source license**, that license may have +conditions which are **incompatible** with our codebase. For example, some licenses require that all linked code be +licensed under the same terms, which may not align with our project's licensing. Always review and understand a license +before using code under it — **simple attribution often isn't enough**. + +??? tip "Learn more about software licensing" + + If you are new to software licensing, you can check out [this](https://itsdrike.com/posts/software-licenses/) + article, which does a good job at explaining the basics. + +## Generally inappropriate behavior + +Outside of just harassment and plagiarism, there are countless other behaviors which we consider unacceptable, as they +may be offensive or discourage people from engaging with our community. + +**Examples of generally inappropriate behavior:** + +- The use of sexualized language or imagery of any kind +- The use of inappropriate images, including in an account's avatar +- The use of inappropriate language, including in an account's nickname +- Any form of spamming, flaming, baiting or other attention-stealing / disruptive behavior that derails discussions +- Discussing topics that are overly polarizing, sensitive, or incite arguments. +- Responding with "RTFM", "just google it" or similar response to help requests +- Other conduct which could be reasonably considered inappropriate + +**Examples of generally appropriate behavior:** + +- Being kind and courteous to others +- Collaborating with other community members +- Gracefully accepting constructive criticism +- Using welcoming and inclusive language +- Showing empathy towards other community members + +## Scope + +This Code of Conduct applies within all community spaces, including this repository itself, conversations on any +platforms officially connected to this project (such as in GitHub issues, emails or platforms like discord). It also +applies when an individual is officially representing the community in public spaces. Examples of representing our +community include using an official social media account, or acting as an appointed representative at an online or +offline event. + +All members involved with the project are expected to follow this Code of Conduct, regardless of their position in the +project's hierarchy, this Code of Conduct applies equally to contributors, maintainers, and those seeking help or +reporting bugs. + +## Enforcement Responsibilities + +Whenever a participant has made a mistake, we expect them to take responsibility for their actions. If someone has been +harmed or offended, it is our responsibility to listen carefully and respectfully, and to do our best to right the +wrong. + +Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take +appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, +offensive, harmful, or otherwise undesirable. + +Community leaders have the authority and responsibility to remove, edit, or reject any contributions — such as comments, +commits, code, wiki edits, issues, or Discord messages — that violate this Code of Conduct. When appropriate, they will +also make sure to communicate the reasons for moderation decisions. + +If you have experienced or witnessed unacceptable behavior constituting a code of conduct violation or have any other +code of conduct concerns, please let us know and we will do our best to resolve this issue. + +## Reporting a Code of Conduct violation + +If you think that someone is violating the Code of Conduct, you can report it to any repository maintainer. When doing +so, follow these steps: + +1. Contact a repository maintainer via email or Discord DM. Avoid using public channels for these reports. +2. When submitting the report, make sure to provide all the necessary details of the incident, including context and + relevant links/screenshots. +3. We also kindly ask that you maintain confidentiality and avoid any public discussions of the violation. + +## Sources + +The open-source community has an incredible amount of resources that people have freely provided to others and we all +depend on these projects in many ways. This code of conduct article is no exception and there were many open source +projects that has helped bring this code of conduct to existence. For that reason, we'd like to thank all of these +communities and projects for keeping their content open and available to everyone, but most notably we'd like to thank +the projects with established codes of conduct and diversity statements that we used as our inspiration. Below is the +list these projects: + +- Python: +- Contributor Covenant: +- Rust-lang: +- Code Fellows: +- Python Discord: diff --git a/docs/meta/license.md b/docs/meta/license.md new file mode 100644 index 00000000..b274f16b --- /dev/null +++ b/docs/meta/license.md @@ -0,0 +1,77 @@ +# License + +This project's source code is licensed under the **GNU Lesser General Public License** (LGPL) version 3. + +The LGPL license allows you to use mcproto as a library pretty much in any code-base, including in proprietary +code-bases. However, if you wish to make a derivative project to mcproto itself, such a project will need to be licensed under +LGPL as well. + +!!! tip + + If you want to see a quick glance of what this license allows, prohibits & requires, check it out in [tl;dr + legal][tldr-lgpl]. + +??? example "Full LICENSE text" + + ```title="LICENSE.txt" + --8<-- "LICENSE.txt" + ``` + +!!! note + + If you need a copyright header for attribution, you can use: + + === "Rendered" + + mcproto + + Copyright © 2025 ItsDrike <itsdrike@protonmail.com> + + === "HTML" + + ```html + mcproto + + Copyright © 2025 ItsDrike <itsdrike@protonmail.com> + ``` + +## This documentation + +This documentation itself follows a Creative Commons license: CC BY-NC-SA 4.0 + +!!! note + + If you need a copyright header for proper attribution, you can use: + + === "Rendered" + + Mcproto Documentation © 2024 by ItsDrike + + If you also need the license identifier, use the following: + + CC BY-NC-SA 4.0 + + === "HTML" + + ```html + Mcproto Documentation © 2024 by ItsDrike + ``` + + If you also need the license identifier, use the following: + + ```html + CC BY-NC-SA 4.0 + ``` + +## Differently licensed parts + +Some parts of the project follow a different license. See the `LICENSE-THIRD-PARTY.txt` file, which lists all of these +parts and their respective licenses. + +??? example "Full LICENSE-THIRD-PARTY text" + + ```title="LICENSE-THIRD-PARTY.txt" + --8<-- "LICENSE-THIRD-PARTY.txt" + ``` + +[tldr-lgpl]: https://www.tldrlegal.com/license/gnu-lesser-general-public-license-v3-lgpl-3 diff --git a/docs/meta/support.md b/docs/meta/support.md new file mode 100644 index 00000000..9ac1430b --- /dev/null +++ b/docs/meta/support.md @@ -0,0 +1,12 @@ +# Support + +- If you found a bug, or wish to propose a new feature, please follow [this guide][issue-guide] +- If you just want to ask a question, feel free to do so on the [project's discussion board][github-discussions], or get + in touch through our [discord server]. +- In case you have a security concern, or some other problem that requires private resolution, please follow our + [security policy] to disclose the issue appropriately. + +[issue-guide]: ../contributing/issue-guide.md +[github-discussions]: https://github.com/py-mine/mcproto/discussions +[discord server]: https://discord.gg/C2wX7zduxC +[security policy]: ../contributing/security-policy.md diff --git a/docs/meta/versioning.md b/docs/meta/versioning.md new file mode 100644 index 00000000..51702975 --- /dev/null +++ b/docs/meta/versioning.md @@ -0,0 +1,113 @@ +# Versioning Practices & Guarantees + +!!! bug "Work In Progress" + + This page is missing an explanation on how to figure out which minecraft version a given mcproto version is for. + This is because we currenly don't have any way to do so, once this will be decided on, it should be documented + here. + +!!! danger "Pre-release phase" + + Mcproto is currently in the pre-release phase (pre v1.0.0). During this phase, these guarantees will NOT be + followed! This means that **breaking changes can occur in minor version bumps**. That said, micro version bumps are + still strictly for bugfixes, and will not include any features or breaking changes. + +This library follows [semantic versioning model][semver], which means the major version is updated every time +there is an incompatible (breaking) change made to the public API. In addition to semantic versioning, mcproto has +unique versioning practices related to new Minecraft releases. + +## Versioning Model for Minecraft Releases + +Mcproto aims to always be compatible with the **latest Minecraft protocol implementation**, updating the library as +soon as possible after each **full Minecraft release** (snapshots are not supported). + +Typically, a new Minecraft release will result in a major version bump for mcproto, since protocol changes are often +breaking in nature. That said, it is not impossible for a new Minecraft release not to include breaking changes, in +this case, we will not perform this version bump. + +However, there may be cases where we release a major version that does not correspond to a Minecraft update, depending +on the changes made in the library itself. + +!!! info "Recap" + + - **Minecraft Updates**: When a new version of Minecraft is released and introduces breaking changes to the + protocol, mcproto will increment its major version (e.g., from `1.x.x` to `2.0.0`). + - **Non-breaking Protocol Changes**: If a Minecraft update introduces new features or protocol adjustments that do + not break the existing public API, we may opt to release a minor version (e.g., from `1.0.x` to `1.1.0`). + - **Non-protocol Major Releases**: Major releases may also happen due to significant internal changes or + improvements in the library that are independent of Minecraft protocol updates. + +!!! warning + + While mcproto strives to stay updated with Minecraft releases, this project is maintained by unpaid volunteers. We do + our best to release updates in a timely manner after a new Minecraft version, but delays may occur. + +## Examples of Breaking Changes + +First thing to keep in mind is that breaking changes only apply to **publicly documented API**. Internal features, +including any attributes that start with an underscore or those explicitly mentioned as internal are not a part of the +public API and are subject to change without warning. + +Here are examples of what constitutes a breaking change: + +- Changing the default parameter value of a function to something else. +- Renaming (or removing) a function without deprecation +- Adding or removing parameters of a function. +- Removing deprecated alias to a renamed function. +- Protocol changes that affect how public methods or classes behave. + +!!! note + + The examples above are non-exhaustive. + +## Examples of Non-Breaking Changes + +The following changes are considered non-breaking under mcproto’s versioning model: + +
+ +- Changing function's name, while providing a deprecated alias. +- Renaming (or removing) internal attributes or methods, such as those prefixed with an underscore. +- Adding new functionality that doesn’t interfere with existing function signatures or behavior. +- Changing the behavior of a function to fix a bug. (1) +- Changes in the typing definitions of the public API. +- Changes in the documentation. +- Modifying the internal protocol connection handling. +- Adding an element into `__slots__` of a data class. +- Updating the dependencies to a newer version, major or otherwise. + +
+ +1. This only includes changes that don't affect users in a breaking way, unless you're relying on the bug—in which + case, that's on you, and it's probably time to rethink your life choices. + +## Special Considerations + +Given that mcproto is tied closely to the evolving Minecraft protocol, we may have to make breaking changes more +frequently than a typical Python library. + +While we aim to provide deprecation warnings for changes, particularly in **protocol-independent core library +features**, there are certain limitations due to the nature of Minecraft protocol updates. When a major update is +released as a result of a Minecraft protocol change, **we will not provide deprecations for affected features**, as the +protocol itself has changed in a way that necessitates immediate adaptation. + +However, for **internal major updates** that are independent of Minecraft protocol changes, **we will make every effort +to deprecate old behavior**, giving users time to transition smoothly before removing legacy functionality. + +Specifically, the protocol dependant code includes code in `mcproto.packets` and `mcproto.types` packages. Lower level +protocol abstractions present in `mcproto.protocol`, `mcproto.buffer`, `mcproto.connection`, `mcproto.encryption`, +`mcproto.multiplayer` and `mcproto.auth` will go through proper deprecations. This should allow you to safely use these +lower level features to communicate to servers at any protocol version. + +## Communicating deprecations & breaking changes + +When a breaking change occurs, you will always find it listed at the top of the changelog. Here, will also find +detailed notes about any migration instructions and a brief reason for the change. + +When a feature is deprecated, we will notify users through: + +- **Warnings in the code** (via `DeprecationWarning`): These warnings will contain details about what was deprecated, + including a replacement option (if there is one) and a version number for when this deprecation will be removed. +- **Entries in the changelog**: This includes any migration instructions and a brief reason for deprecation. + +[semver]: https://semver.org diff --git a/docs/pages/changelog.rst b/docs/pages/changelog.rst deleted file mode 100644 index 68f630ee..00000000 --- a/docs/pages/changelog.rst +++ /dev/null @@ -1,12 +0,0 @@ -Changelog -========= - -.. seealso:: - Check out what can and can't change between the library versions. :doc:`version_guarantees` - -.. attention:: - Major and minor releases also include the changes specified in prior development releases. - -.. towncrier-draft-entries:: Unreleased changes - -.. mdinclude:: ../../CHANGELOG.md diff --git a/docs/pages/code-of-conduct.rst b/docs/pages/code-of-conduct.rst deleted file mode 100644 index 20161955..00000000 --- a/docs/pages/code-of-conduct.rst +++ /dev/null @@ -1,159 +0,0 @@ -Code of Conduct -=============== - -This code of conduct outlines our expectations for the people involved with this project. We, as members, contributors -and leaders are committed to providing a welcoming and inspiring project that anyone can easily join, expecting -a harassment-free experience, as described in this code of conduct. - -This code of conduct is here to ensure we provide a welcoming and inspiring project that anyone can easily join, -expecting a harassment-free experience, as described in this code of conduct. - -The goal of this document is to set the overall tone for our community. It is here to outline some of the things you -can and can't do if you wish to participate in our community. However it is not here to serve as a rule-book with -a complete set of things you can't do, social conduct differs from situation to situation, and person to person, but we -should do our best to try and provide a good experience to everyone, in every situation. - -We value many things beyond just technical expertise, including collaboration and supporting others within our -community. Providing a positive experience for others can have a much more significant impact than simply providing the -correct answer. - -Harassment ----------- - -We share a common understanding of what constitutes harassment as it applies to a professional setting. Although this -list cannot be exhaustive, we explicitly honor diversity in age, gender, culture, ethnicity, language, national origin, -political beliefs, profession, race, religion, sexual orientation, socioeconomic status, disability and personal -appearance. We will not tolerate discrimination based on any of the protected characteristics above, including some -that may not have been explicitly mentioned here. We consider discrimination of any kind to be unacceptable and -immoral. - -Harassment includes, but is not limited to: - -* Offensive comments (or "jokes") related to any of the above mentioned attributes. -* Deliberate "outing"/"doxing" of any aspect of a person's identity, such as physical or electronic address, without - their explicit consent, except as necessary to protect others from intentional abuse. -* Unwelcome comments regarding a person's lifestyle choices and practices, including those related to food, health, - parenting, drugs and employment. -* Deliberate misgendering. This includes deadnaming or persistently using a pronoun that does not correctly reflect a - person's gender identity. You must address people by the name they give you when not addressing them by their - username or handle. -* Threats of violence, both physical and psychological. -* Incitement of violence towards any individual, including encouraging a person to engage in self-harm. -* Publication of non-harassing private communication. -* Pattern of inappropriate social conduct, such as requesting/assuming inappropriate levels of intimacy with others, or - excessive teasing after a request to stop. -* Continued one-on-one communication after requests to cease. -* Sabotage of someone else's work or intentionally hindering someone else's performance. - -Plagiarism ----------- - -Plagiarism is the re-use of someone else's work (eg: binary content such as images, textual content such as an article, -but also source code, or any other copyrightable resources) without the permission or license right from the author. -Claiming someone else's work as your own is not just immoral and disrespectful to the author, but also illegal in most -countries. You should always follow the authors wishes, and give credit where credit is due. - -If we found that you've **intentionally** attempted to add plagiarized content to our code-base, you will likely end up -being permanently banned from any future contributions to this project's repository. We will of course also do our best -to remove, or properly attribute this plagiarized content as quickly as possible. - -An unintentional attempt at plagiarism will not be punished as harshly, but nevertheless, it is your responsibility as -a contributor to check where the code you're submitting comes from, and so repeated submission of such content, even -after you were warned might still get you banned. - -Please note that an online repository that has no license is presumed to only be source-available, NOT open-source. -Meaning that this work is protected by author's copyright, automatically imposed over it, and without any license -extending that copyright, you have no rights to use such code. So know that you can't simply take some source-code, -even though it's published publicly. This code may be available to be seen by anyone, but that does not mean it's also -available to be used by anyone in other projects. - -Another important note to keep in mind is that even if some project has an open-source license, that license may have -conditions which are incompatible with our codebase (such as requiring all of the code that links to this new part to -also be licensed under the same license, which our code-base is not currently under). That is why it's necessary to -understand a license before using code available under it. Simple attribution often isn't everything that the license -requires. - -Generally inappropriate behavior --------------------------------- - -Outside of just harassment and plagiarism, there are countless other behaviors which we consider unacceptable, as they -may be offensive, and discourage people from engaging with our community. - -**Examples of generally inappropriate behavior:** - -* The use of sexualized language or imagery of any kind -* The use of inappropriate images, including in an account's avatar -* The use of inappropriate language, including in an account's nickname -* Any spamming, flamming, baiting or other attention-stealing behavior -* Discussing topics that are overly polarizing, sensitive, or incite arguments. -* Responding with "RTFM", "just google it" or similar response to help requests -* Other conduct which could be reasonably considered inappropriate - -**Examples of generally appropriate behavior:** - -* Being kind and courteous to others -* Collaborating with other community members -* Gracefully accepting constructive criticism -* Using welcoming and inclusive language -* Showing empathy towards other community members - -Scope ------ - -This Code of Conduct applies within all community spaces, including this repository itself, conversations on any -platforms officially connected to this project (such as in GitHub issues, through official emails or applications like -discord). It also applies when an individual is officially representing the community in public spaces. Examples of -representing our community include using an official social media account, or acting as an appointed representative at -an online or offline event. - -All members involved with the project are expected to follow this Code of Conduct, no matter their position in the -project's hierarchy, this Code of Conduct applies equally to contributors, maintainers, people seeking help/reporting -bugs, etc. - -Enforcement Responsibilities ----------------------------- - -Whenever a participant has made a mistake, we expect them to take responsibility for their actions. If someone has been -harmed or offended, it is our responsibility to listen carefully and respectfully, and to do our best to right the -wrong. - -Community leaders are responsible for clarifying and enforcing our standards of acceptable behavior and will take -appropriate and fair corrective action in response to any behavior that they deem inappropriate, threatening, -offensive, harmful, or otherwise undesirable. - -Community leaders have the right and responsibility to remove, edit or reject comments, commits, code, wiki edits, -issues and other contributions within the enforcement scope that are not aligned to this Code of Conduct, and will -communicate reasons for moderation decisions when appropriate. - -If you have experienced or witnessed unacceptable behavior constituting a code of conduct violation or have any other -code of conduct concerns, please let us know and we will do our best to resolve this issue. - -Reporting a Code of Conduct violation -------------------------------------- - -If you saw someone violating the Code of Conduct in some way, you can report it to any repository maintainer, either by -email or through a Discord DM. You should avoid using public channels for reporting these, and instead do so in private -discussion with a maintainer. - -Sources -------- - -The open-source community has an incredible amount of resources that people have freely provided to others and we all -depend on these projects in many ways. This code of conduct article is no exception and there were many open source -projects that has helped bring this code of conduct to existence. For that reason, we'd like to thank all of these -communities and projects for keeping their content open and available to everyone, but most notably we'd like to thank -the projects with established codes of conduct and diversity statements that we used as our inspiration. Below is the -list these projects: - -* `Python `_ -* `Contributor Covenant `_ -* `Rust-lang `_ -* `Code Fellows `_ -* `Python Discord `_ - -License -------- - -All content of this page is licensed under a Creative Commons Attributions license. - -For more information about this license, see: diff --git a/docs/pages/contributing.rst b/docs/pages/contributing.rst deleted file mode 100644 index 1c554637..00000000 --- a/docs/pages/contributing.rst +++ /dev/null @@ -1,9 +0,0 @@ -Contributing Guidelines -======================= - -.. mdinclude:: ../../CONTRIBUTING.md - :start-line: 2 - -.. - TODO: Rewrite CONTRIBUTING.md here directly, rather than including it - like this, and just include a link to the docs in CONTRIBUTING.md diff --git a/docs/pages/installation.rst b/docs/pages/installation.rst deleted file mode 100644 index 3515936b..00000000 --- a/docs/pages/installation.rst +++ /dev/null @@ -1,28 +0,0 @@ -Installation -============ - -PyPI (stable) version ---------------------- - -Mcproto is available on `PyPI `_, and can be installed trivially with: - -.. code-block:: bash - - python3 -m pip install mcproto - -This will install the latest stable (released) version. This is generally what you'll want to do. - -Latest (git) version --------------------- - -Alternatively, you may want to install the latest available version, which is what you currently see in the ``main`` -git branch. Although this method will actually work for any branch with a pretty straightforward change. This kind of -installation should only be done when testing new feautes, and it's likely you'll encounter bugs. - -That said, since mcproto is still in development, changes can often be made pretty quickly, and it can sometimes take a -while for these changes to carry over to PyPI. So if you really want to try out that latest feature, this is the method -you'll want. - -.. code-block:: bash - - python3 -m pip install 'mcproto@git+https://github.com/py-mine/mcproto@main' diff --git a/docs/pages/version_guarantees.rst b/docs/pages/version_guarantees.rst deleted file mode 100644 index d530fa86..00000000 --- a/docs/pages/version_guarantees.rst +++ /dev/null @@ -1,38 +0,0 @@ -Version Guarantees -================== - -.. attention:: - Mcproto is currently in the pre-release phase (pre v1.0.0). During this phase, these guarantees will NOT be - followed! This means that **breaking changes can occur in minor version bumps**, though micro version bumps are - still strictly for bugfixes, and will not include any features or breaking changes. - -This library follows `semantic versioning model `_, which means the major version is updated every -time there is an incompatible (breaking) change made to the public API. However due to the fairly dynamic nature of -Python, it can be hard to discern what can be considered a breaking change, and what isn't. - -First thing to keep in mind is that breaking changes only apply to **publicly documented functions and classes**. If -it's not listed in the documentation here, it's an internal feature, that isn't considered a part of the public API, -and thus is bound to change. This includes documented attributes that start with an underscore. - -.. note:: - The examples below are non-exhaustive. - -Examples of Breaking Changes ----------------------------- - -* Changing the default parameter value of a function to something else. -* Renaming (or removing) a function without an alias to the old function. -* Adding or removing parameters of a function. -* Removing deprecated alias to a renamed function - -Examples of Non-Breaking Changes --------------------------------- - -* Changing function's name, while providing a deprecated alias. -* Renaming (or removing) private underscored attributes. -* Adding an element into `__slots__` of a data class. -* Changing the behavior of a function to fix a bug. -* Changes in the typing behavior of the library. -* Changes in the documentation. -* Modifying the internal protocol connection handling. -* Updating the dependencies to a newer version, major or otherwise. diff --git a/docs/reference/abc.md b/docs/reference/abc.md new file mode 100644 index 00000000..26614144 --- /dev/null +++ b/docs/reference/abc.md @@ -0,0 +1,6 @@ +# Abstract Base Classes + +::: mcproto.utils.abc.Serializable + options: + show_root_heading: true + show_root_toc_entry: true diff --git a/docs/reference/authentication.md b/docs/reference/authentication.md new file mode 100644 index 00000000..f253afa0 --- /dev/null +++ b/docs/reference/authentication.md @@ -0,0 +1,11 @@ +# Authentication + +::: mcproto.auth.account + +::: mcproto.auth.yggdrasil + +::: mcproto.auth.msa + +::: mcproto.auth.microsoft.oauth + +::: mcproto.auth.microsoft.xbox diff --git a/docs/reference/encryption.md b/docs/reference/encryption.md new file mode 100644 index 00000000..3cf23c3e --- /dev/null +++ b/docs/reference/encryption.md @@ -0,0 +1,6 @@ +# Encryption utilities + +The following components are used for encryption related interacions (generally needed during the communication with +the server, after an encryption request during the login process) + +::: mcproto.encryption diff --git a/docs/reference/multiplayer.md b/docs/reference/multiplayer.md new file mode 100644 index 00000000..600d6c28 --- /dev/null +++ b/docs/reference/multiplayer.md @@ -0,0 +1,5 @@ +# Multiplayer utilities + +The following components are used for various multiplayer interacions (generally needed during the server joining process). + +::: mcproto.multiplayer diff --git a/docs/reference/packets.md b/docs/reference/packets.md new file mode 100644 index 00000000..08f72ab9 --- /dev/null +++ b/docs/reference/packets.md @@ -0,0 +1,41 @@ +# Packets + +!!! bug "Pending rewrite of this page" + + This page will be rewritten in the near future and split it into multiple pages for the individual game states, + with the play state possibly being subdivided into even more pages. Currently, this page shows all implemented + packets in mcproto. This split will happen once play state packets are introduced. + +## Base classes and interaction functions + +::: mcproto.packets + options: + heading_level: 3 + +## Handshaking gamestate + +::: mcproto.packets.handshaking.handshake + options: + heading_level: 3 + +## Status gamestate + +::: mcproto.packets.status.ping + options: + heading_level: 3 + +::: mcproto.packets.status.status + options: + heading_level: 3 + +## Login gamestate + +::: mcproto.packets.login.login + options: + heading_level: 3 + +## Play gamestate + +!!! bug "Work In Progress" + + Packets for the Play gamestate aren't yet implemented. diff --git a/docs/reference/protocol.md b/docs/reference/protocol.md new file mode 100644 index 00000000..9b85fc0d --- /dev/null +++ b/docs/reference/protocol.md @@ -0,0 +1,12 @@ +# Protocol documentation + +This is the documentation for components related to interactions with the minecraft protocol and connection establishing. + +::: mcproto.protocol.base_io + +::: mcproto.buffer.Buffer + options: + show_root_heading: true + show_root_toc_entry: true + +::: mcproto.connection diff --git a/docs/reference/types.md b/docs/reference/types.md new file mode 100644 index 00000000..e13c6446 --- /dev/null +++ b/docs/reference/types.md @@ -0,0 +1,5 @@ +# Types + +::: mcproto.types + options: + show_submodules: true diff --git a/docs/scripts/gen_changelog.py b/docs/scripts/gen_changelog.py new file mode 100644 index 00000000..3d18ab4a --- /dev/null +++ b/docs/scripts/gen_changelog.py @@ -0,0 +1,62 @@ +"""Script to generate a draft towncrier changelog for the next release. + +This script is intended to be ran by mkdocs to generate a markdown output that will be included +in the changelog page of the documentation. + +(The script is executed from the project root directory, so the paths are relative to that) +""" + +import subprocess + +INDENT_PREFIX = " " # we use 4 spaces for single indent + + +def get_project_version() -> str: + """Get project version using git describe. + + This will obtain a version named according to the latest version tag, + followed by the number of commits since that tag, and the latest commit hash. + (e.g. v0.5.0-166-g26b88) + """ + proc = subprocess.run( + ["git", "describe", "--tags", "--abbrev=5"], # noqa: S607 + capture_output=True, + check=True, + ) + proc.check_returncode() + out = proc.stdout.decode().strip() + if out == "": + raise ValueError("Could not get project version") + return out + + +def get_changelog(version: str) -> str: + """Generate draft changelog for the given project version.""" + proc = subprocess.run( # noqa: S603 + ["towncrier", "build", "--draft", "--version", version], # noqa: S607 + capture_output=True, + check=True, + ) + proc.check_returncode() + + changes = proc.stdout.decode().strip() + if changes == "": + raise ValueError("Could not generate changelog") + + header, changes = changes.split("\n", maxsplit=1) + changes = changes.lstrip() + + if changes.startswith("No significant changes"): + return "" + + # Wrap the changes output into an admonition block + admonition_header = '???+ example "Unreleased Changes"' + + # Prefix each line with a tab to make it part of the admonition block + header = f"{INDENT_PREFIX}{header}" + changes = "\n".join(f"{INDENT_PREFIX}{line}" for line in changes.split("\n")) + + return admonition_header + "\n" + header + "\n\n" + changes + + +print(get_changelog(get_project_version())) diff --git a/docs/usage/authentication.md b/docs/usage/authentication.md new file mode 100644 index 00000000..dcdf3545 --- /dev/null +++ b/docs/usage/authentication.md @@ -0,0 +1,263 @@ +# Minecraft account authentication + +Mcproto has first party support to handle authentication, allowing you to use your own minecraft account. This is +needed if you wish to login to "online mode" (non-warez) servers as a client (player). + +## Microsoft (migrated) accounts + +This is how authentication works for already migrated minecraft accounts, using Microsoft accounts for authentication. +(This will be most accounts. Any newly created minecraft accounts - after 2021 will always be Microsoft linked +accounts.) + +### Creating Azure application + +To authenticate with a microsoft account, you will need to go through the entire OAuth2 flow. Mcproto has functions to +hide pretty much all of this away, however you will need to create a new Microsoft Azure application, that mcproto +will use to obtain an access token. + +We know this is annoying, but it's a necessary step, as Microsoft only allows these applications to request OAuth2 +authentication, and to avoid potential abuse, we can't really just use our registered application (like with say +[MultiMC]), as this token would have to be embedded into our source-code, and +since this is python, that would mean just including it here in plain text, and because mcproto is a low level library +that can be used for any kind of interactions, we can't trust that you won't abuse this token. + +Instead, everyone using mcproto should register a new application, and get their own MSA token for your application +that uses mcproto in the back. + +To create a new application, follow these steps (this is a simplified guide, for a full guide, feel free to check the +[Microsoft documentation][azure-app-registration]): + +1. Go to the [Azure portal] and log in (create an account if you need to). +2. Search for and select **Azure Active Directory**. +3. On the left navbar, under **Manage** section, click on **App registrations**. +4. Click on **New registration** on top navbar. +5. Pick a name for the application. Anyone using your app to authenticate will see this name. +6. Choose **Personal Microsoft accounts only** from the Supported account types. +7. Leave the **Redirect URI (optional)** empty. +8. Click on **Register**. + +From there, you will need to enable this application to be used for OAuth2 flows. To do that, follow these steps: + +1. On the left navbar, under **Manage** section, click on **Authentication**. +2. Set **Allow public content flows** to **Yes**. +3. Click **Save**. + +After that, you can go back to the app (click **Overview** from the left navbar), and you'll want to copy the +**Application (client) ID**. This is the ID you will need to pass to mcproto. (You will also need the **Display name**, +and the **Directory (Tenant) ID** for [Registering the application with Minecraft] - first time only) + +If you ever need to access this application again, follow these steps (as Microsoft Azure is pretty unintuitive, we +document this too): + +1. Go to the [Azure portal] and log in. +2. Click on **Azure Active Directory** (if you can't find it on the main page, you can use the search). +3. On the left navbar, under **Manage** section, click on **App registrations**. +4. Click on **View all applications from personal account** (assuming you registered the app from a personal account). +5. Click on your app. + +### Registering the application with Minecraft + +Previously, this step wasn't required, however due to people maliciously creating these applications to steal +accounts, Mojang have recently started to limit access to the , and only allow +explicitly white listed Client IDs to use this API. + +This API is absolutely crucial step in getting the final minecraft token, and so you will need to register your Client +ID to be white listed by Mojang. Thankfully, it looks like Mojang is generally pretty lenient and at least for me, +they didn't cause any unnecessary hassles when I asked for my application to be registered, for development purposes +and work on mcproto. + +That said, you will need to wait a while (about a week, though it could be more), until Mojang reviews your +application and approves it. There isn't much we can do about this. + +To get your Azure application registered, you will need to fill out a simple form, where you accept the EULA, provide +your E-Mail, Application name, Application Client ID and Tennant ID. + +More annoyingly you will additionally also need to provide an **associated website or domain** for your project/brand. +(This application is generally designed for more user-facing programs, such as full launchers. When registering +mcproto, I just used the GitHub URL). Lastly, you'll want to describe why you need access to this API in the +**Justification** section. + +Visit the [Mojang article][mojang-api-review-article] describing this process. There +is also a link to the form to fill out. + +### The code + +Finally, after you've managed to register your application and get it approved by Mojang, you can use it with mcproto, +go through the Microsoft OAuth2 flow and authorize this application to access your Microsoft account, which mcproto +will then use to get the minecraft token you'll then need to login to online servers. + +```python +import httpx +from mcproto.auth.microsoft.oauth import full_microsoft_oauth +from mcproto.auth.microsoft.xbox import xbox_auth +from mcproto.auth.msa import MSAAccount + +MY_MSA_CLIENT_ID = "[REDACTED]" # Paste your own Client ID here + +async def authenticate() -> MSAAccount: + async with httpx.AsyncClient() as client: + microsoft_token = await full_microsoft_oauth(client, MY_MSA_CLIENT_ID) + user_hash, xsts_token = xbox_auth(client, microsoft_token) + return MSAAccount.xbox_auth(cilent, user_hash, xsts_token) +``` + +Note that the `full_microsoft_oauth` function will print a message containing the URL you should visit in your +browser, and a one time code to type in once you reach this URL. That will then prompt you to log in to your Microsoft +account, and then allow you to authorize the application to use your account. + +### Caching + +You will very likely want to set up caching here, and store at least the `microsoft_token` somewhere, so you don't have +to log in each time your code will run. Here's some example code that caches every step of the way, always resorting to +the "closest" functional token. Note that this is using `pickle` to store the tokens, you may want to use JSON or other +format instead, as it would be safer. Also, be aware that these are sensitive and if compromised, someone could gain +access to your minecraft account (though only for playing, they shouldn't be able to change your password or anything +like that), so you might want to consider encrypting these cache files before storing: + +```python +from __future__ import annotations + +import logging +import pickle +from pathlib import Path + +import httpx + +from mcproto.auth.microsoft.oauth import full_microsoft_oauth +from mcproto.auth.microsoft.xbox import XSTSRequestError, xbox_auth +from mcproto.auth.msa import MSAAccount, ServicesAPIError + +log = logging.getLogger(__name__) + +MY_MSA_CLIENT_ID = "[REDACTED]" # Paste your own Client ID here +CACHE_DIR = Path(".cache/") + + +async def microsoft_login(client: httpx.AsyncClient) -> MSAAccount: # noqa: PLR0912,PLR0915 + """Obtain minecraft account using Microsoft authentication. + + This function performs full caching of every step along the way, allowing for recovery + without manual intervention for as long as at least the root token (from Microsoft OAuth2) + is valid. Any later tokens will be refreshed and re-cached once invalid. + + If all tokens are invalid, or this function was ran for the first time (without any cached + data), you will be shown a URL and a code. You have to go to this URL with your browser and + enter the code, completing the OAuth2 flow, obtaining the root token. + """ + CACHE_DIR.mkdir(parents=True, exist_ok=True) + + access_token_cache = CACHE_DIR.joinpath("xbox_access_token.pickle") + if access_token_cache.exists(): + with access_token_cache.open("rb") as f: + access_token: str = pickle.load(f) # noqa: S301 + + try: + account = await MSAAccount.from_xbox_access_token(client, access_token) + log.info("Logged in with cached xbox minecraft access token") + return account + except httpx.HTTPStatusError as exc: + log.warning(f"Cached xbox minecraft access token is invalid: {exc!r}") + else: + log.warning("No cached access token available, trying Xbox Secure Token Service (XSTS) token") + + # Access token either doesn't exist, or isn't valid, try XSTS (Xbox) token + xbox_token_cache = CACHE_DIR.joinpath("xbox_xsts_token.pickle") + if xbox_token_cache.exists(): + with xbox_token_cache.open("rb") as f: + user_hash, xsts_token = pickle.load(f) # noqa: S301 + + try: + access_token = await MSAAccount._get_access_token_from_xbox(client, user_hash, xsts_token) + except ServicesAPIError as exc: + log.warning(f"Invalid cached Xbox Secure Token Service (XSTS) token: {exc!r}") + else: + log.info("Obtained xbox access token from cached Xbox Secure Token Service (XSTS) token") + log.info("Storing xbox minecraft access token to cache and restarting auth") + with access_token_cache.open("wb") as f: + pickle.dump(access_token, f) + return await microsoft_login(client) + else: + log.warning("No cached Xbox Secure Token Service (XSTS) token available, trying Microsoft OAuth2 token") + + # XSTS token either doesn't exist, or isn't valid, try Microsoft OAuth2 token + microsoft_token_cache = CACHE_DIR.joinpath("microsoft_token.pickle") + if microsoft_token_cache.exists(): + with microsoft_token_cache.open("rb") as f: + microsoft_token = pickle.load(f) # noqa: S301 + + try: + user_hash, xsts_token = await xbox_auth(client, microsoft_token) + except (httpx.HTTPStatusError, XSTSRequestError) as exc: + log.warning(f"Invalid cached Microsoft OAuth2 token {exc!r}") + else: + log.info("Obtained Xbox Secure Token Service (XSTS) token from cached Microsoft OAuth2 token") + log.info("Storing Xbox Secure Token Service (XSTS) token to cache and restarting auth") + with xbox_token_cache.open("wb") as f: + pickle.dump((user_hash, xsts_token), f) + return await microsoft_login(client) + else: + log.warning("No cached microsoft token") + + # Microsoft OAuth2 token either doesn't exist, or isn't valid, request user auth + log.info("Running Microsoft OAuth2 flow, requesting user authentication") + microsoft_token = await full_microsoft_oauth(client, MY_MSA_CLIENT_ID) + log.info("Obtained Microsoft OAuth2 token from user authentication") + log.info("Storing Microsoft OAuth2 token and restarting auth") + with microsoft_token_cache.open("wb") as f: + pickle.dump(microsoft_token["access_token"], f) + return await microsoft_login(client) +``` + +## Minecraft (non-migrated) accounts + +If you haven't migrated your account into a Microsoft account, follow this guide for authentication. (Any newly created +Minecraft accounts will be using Microsoft accounts already.) This method of authentication is called "yggdrasil". + +!!! warning + + The account migration process has been concluded in **September 19, 2023**. See: + + + That means that it's no longer possible to migrate this old account into a microsoft account and it's only a matter + of time until the authentication servers handling these accounts are turned off entirely. + + Mcproto will remove support for this old authentication methods once this happens. + +This method of authentication doesn't require any special app registrations, however it is significantly less secure, +as you need to enter your login and password directly. + +```python +import httpx +from mcproto.auth.yggdrasil import YggdrasilAccount + +LOGIN = "mail@example.com" +PASSWORD = "my_password" + +async def authenticate() -> YggdrasilAccount: + async with httpx.AsyncClient() as client: + return YggdrasilAccount.authenticate(client, login=LOGIN, password=PASSWORD) +``` + +The Account instance you will obtain here will contain a refresh token, and a shorter lived access token, received from +Mojang APIs from the credentials you entered. Just like with Microsoft accounts, you may want to cache these tokens to +avoid needless calls to request new ones and go through authentication again. That said, since doing so doesn't +necessarily require user interaction, if you make the credentials accessible from your code directly, this is a lot +less annoying. + +If you will decide to use caching, or if you plan on using these credentials in a long running program, you may see the +access token expire. You can check whether the token is expired with the `YggdrasilAccount.validate` method, and if it +is (call returned `False`), you can call `YggdrasilAccount.refresh` to use the refresh token to obtain a new access +token. The refresh token is much more long lived than the access token, so this should generally be enough for you, +although if you login from elsewhere, or after a really long time, the refresh token might be invalidated, in that +case, you'll need to go through the full login again. + +## Legacy Mojang accounts + +If your minecraft account is still using the (really old) Mojang authentication, you can simply follow the non-migrated +guide, as it will work with these legacy accounts too, the only change you will need to make is to use your username, +instead of an email. + +[MultiMC]: https://github.com/MultiMC/Launcher +[azure-app-registration]: https://learn.microsoft.com/en-us/azure/active-directory/develop/quickstart-register-app +[azure portal]: https://portal.azure.com/#home +[mojang-api-review-article]: https://help.minecraft.net/hc/en-us/articles/16254801392141 diff --git a/docs/usage/authentication.rst b/docs/usage/authentication.rst deleted file mode 100644 index 6adb328f..00000000 --- a/docs/usage/authentication.rst +++ /dev/null @@ -1,268 +0,0 @@ -Minecraft account authentication -================================ - -Mcproto has first party support to handle authentication, allowing you to use your own minecraft account. This is -needed if you wish to login to "online mode" (non-warez) servers as a client (player). - -Microsoft (migrated) accounts ------------------------------ - -This is how authentication works for already migrated minecraft accounts, using Microsoft accounts for authentication. -(This will be most accounts. Any newly created minecraft accounts - after 2021 will always be Microsoft linked accounts.) - -Creating Azure application -^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To authenticate with a microsoft account, you will need to go through the entire OAuth2 flow. Mcproto has functions to -hide pretty much all of this away, however you will need to create a new Microsoft Azure application, that mcproto will -use to obtain an access token. - -We know this is annoying, but it's a necessary step, as Microsoft only allows these applications to request OAuth2 -authentication, and to avoid potential abuse, we can't really just use our registered application (like with say -`MultiMC `_), as this token would have to be embedded into our source-code, and -since this is python, that would mean just including it here in plain text, and because mcproto is a low level library -that can be used for any kind of interactions, we can't trust that you won't abuse this token. - -Instead, everyone using mcproto should register a new application, and get their own MSA token for your application -that uses mcproto in the back. - -To create a new application, follow these steps (this is a simplified guide, for a full guide, feel free to check the -`Microsoft documentation `): - -#. Go to the `Azure portal `_ and log in (create an account if you need to). -#. Search for and select **Azure Active Directory**. -#. On the left navbar, under **Manage** section, click on **App registrations**. -#. Click on **New registration** on top navbar. -#. Pick a name for the application. Anyone using your app to authenticate will see this name. -#. Choose **Personal Microsoft accounts only** from the Supported account types. -#. Leave the **Redirect URI (optional)** empty. -#. Click on **Register**. - -From there, you will need to enable this application to be used for OAuth2 flows. To do that, follow these steps: - -#. On the left navbar, under **Manage** section, click on **Authentication**. -#. Set **Allow public content flows** to **Yes**. -#. Click **Save**. - -After that, you can go back to the app (click **Overview** from the left navbar), and you'll want to copy the -**Application (client) ID**. This is the ID you will need to pass to mcproto. (You will also need the **Display name**, -and the **Directory (Tenant) ID** for `Registering the application with Minecraft`_ - first time only) - -If you ever need to access this application again, follow these steps (as Microsoft Azure is pretty unintuitive, we -document this too): - -#. Go to the `Azure portal `_ and log in. -#. Click on **Azure Active Directory** (if you can't find it on the main page, you can use the search). -#. On the left navbar, under **Manage** section, click on **App registrations**. -#. Click on **View all applications from personal account** (assuming you registered the app from a personal account). -#. Click on your app. - -Registering the application with Minecraft -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Previously, this step wasn't required, however due to people maliciously creating these applications to steal accounts, -Mojang have recently started to limit access to the ``https://api.minecraftservices.com``, and only allow explicitly -white listed Client IDs to use this API. - -This API is absolutely crucial step in getting the final minecraft token, and so you will need to register your Client -ID to be white listed by Mojang. Thankfully, it looks like Mojang is generally pretty lenient and at least for me, they -didn't cause any unnecessary hassles when I asked for my application to be registered, for development purposes and -work on mcproto. - -That said, you will need to wait a while (about a week, though it could be more), until Mojang reviews your application -and approves it. There isn't much we can do about this. - -To get your Azure application registered, you will need to fill out a simple form, where you accept the EULA, provide -your E-Mail, Application name, Application Client ID and Tennant ID. - -More annoyingly you will additionally also need to provide an **associated website or domain** for your project/brand. -(This application is generally designed for more user-facing programs, such as full launchers. When registering -mcproto, I just used the GitHub URL). Lastly, you'll want to describe why you need access to this API in the -**Justification** section. - -Visit the `mojang article `_ describing this process. -There is also a link to the form to fill out. - - -The code -^^^^^^^^ - -Finally, after you've managed to register your application and get it approved by Mojang, you can use it with mcproto, -go through the Microsoft OAuth2 flow and authorize this application to access your Microsoft account, which mcproto -will then use to get the minecraft token you'll then need to login to online servers. - -.. code-block:: python - - import httpx - from mcproto.auth.microsoft.oauth import full_microsoft_oauth - from mcproto.auth.microsoft.xbox import xbox_auth - from mcproto.auth.msa import MSAAccount - - MY_MSA_CLIENT_ID = "[REDACTED]" # Paste your own Client ID here - - async def authenticate() -> MSAAccount: - async with httpx.AsyncClient() as client: - microsoft_token = await full_microsoft_oauth(client, MY_MSA_CLIENT_ID) - user_hash, xsts_token = xbox_auth(client, microsoft_token) - return MSAAccount.xbox_auth(cilent, user_hash, xsts_token) - -Note that the :meth:`~mcproto.auth.microsoft.oauth.full_microsoft_oauth` will print a message containing the URL you -should visit in your browser, and a one time code to type in once you reach this URL. That will then prompt you to log -in to your Microsoft account, and then allow you to authorize the application to use your account. - -Caching -^^^^^^^ - -You will very likely want to set up caching here, and store at least the ``microsoft_token`` somewhere, so you don't -have to log in each time your code will run. Here's some example code that caches every step of the way, always -resorting to the "closest" functional token. Note that this is using `pickle` to store the tokens, you may want to use -JSON or other format instead, as it would be safer. Also, be aware that these are sensitive and if compromised, someone -could gain access to your minecraft account (though only for playing, they shouldn't be able to change your password or -anything like that), so you might want to consider encrypting these cache files before storing: - -.. code-block:: python - - from __future__ import annotations - - import logging - import pickle - from pathlib import Path - - import httpx - - from mcproto.auth.microsoft.oauth import full_microsoft_oauth - from mcproto.auth.microsoft.xbox import XSTSRequestError, xbox_auth - from mcproto.auth.msa import MSAAccount, ServicesAPIError - - log = logging.getLogger(__name__) - - MY_MSA_CLIENT_ID = "[REDACTED]" # Paste your own Client ID here - CACHE_DIR = Path(".cache/") - - - async def microsoft_login(client: httpx.AsyncClient) -> MSAAccount: # noqa: PLR0912,PLR0915 - """Obtain minecraft account using Microsoft authentication. - - This function performs full caching of every step along the way, allowing for recovery - without manual intervention for as long as at least the root token (from Microsoft OAuth2) - is valid. Any later tokens will be refreshed and re-cached once invalid. - - If all tokens are invalid, or this function was ran for the first time (without any cached - data), you will be shown a URL and a code. You have to go to this URL with your browser and - enter the code, completing the OAuth2 flow, obtaining the root token. - """ - CACHE_DIR.mkdir(parents=True, exist_ok=True) - - access_token_cache = CACHE_DIR.joinpath("xbox_access_token.pickle") - if access_token_cache.exists(): - with access_token_cache.open("rb") as f: - access_token: str = pickle.load(f) # noqa: S301 - - try: - account = await MSAAccount.from_xbox_access_token(client, access_token) - log.info("Logged in with cached xbox minecraft access token") - return account - except httpx.HTTPStatusError as exc: - log.warning(f"Cached xbox minecraft access token is invalid: {exc!r}") - else: - log.warning("No cached access token available, trying Xbox Secure Token Service (XSTS) token") - - # Access token either doesn't exist, or isn't valid, try XSTS (Xbox) token - xbox_token_cache = CACHE_DIR.joinpath("xbox_xsts_token.pickle") - if xbox_token_cache.exists(): - with xbox_token_cache.open("rb") as f: - user_hash, xsts_token = pickle.load(f) # noqa: S301 - - try: - access_token = await MSAAccount._get_access_token_from_xbox(client, user_hash, xsts_token) - except ServicesAPIError as exc: - log.warning(f"Invalid cached Xbox Secure Token Service (XSTS) token: {exc!r}") - else: - log.info("Obtained xbox access token from cached Xbox Secure Token Service (XSTS) token") - log.info("Storing xbox minecraft access token to cache and restarting auth") - with access_token_cache.open("wb") as f: - pickle.dump(access_token, f) - return await microsoft_login(client) - else: - log.warning("No cached Xbox Secure Token Service (XSTS) token available, trying Microsoft OAuth2 token") - - # XSTS token either doesn't exist, or isn't valid, try Microsoft OAuth2 token - microsoft_token_cache = CACHE_DIR.joinpath("microsoft_token.pickle") - if microsoft_token_cache.exists(): - with microsoft_token_cache.open("rb") as f: - microsoft_token = pickle.load(f) # noqa: S301 - - try: - user_hash, xsts_token = await xbox_auth(client, microsoft_token) - except (httpx.HTTPStatusError, XSTSRequestError) as exc: - log.warning(f"Invalid cached Microsoft OAuth2 token {exc!r}") - else: - log.info("Obtained Xbox Secure Token Service (XSTS) token from cached Microsoft OAuth2 token") - log.info("Storing Xbox Secure Token Service (XSTS) token to cache and restarting auth") - with xbox_token_cache.open("wb") as f: - pickle.dump((user_hash, xsts_token), f) - return await microsoft_login(client) - else: - log.warning("No cached microsoft token") - - # Microsoft OAuth2 token either doesn't exist, or isn't valid, request user auth - log.info("Running Microsoft OAuth2 flow, requesting user authentication") - microsoft_token = await full_microsoft_oauth(client, MY_MSA_CLIENT_ID) - log.info("Obtained Microsoft OAuth2 token from user authentication") - log.info("Storing Microsoft OAuth2 token and restarting auth") - with microsoft_token_cache.open("wb") as f: - pickle.dump(microsoft_token["access_token"], f) - return await microsoft_login(client) - -Minecraft (non-migrated) accounts ---------------------------------- - -If you still haven't migrated your account and linked it to a Microsoft account, follow this guide for authentication. -(Any newly created Minecraft accounts will be using Microsoft accounts already.) This method of authentication is -called "yggdrasil". - -.. warning:: - Mojang has announced that they will be closing the migration period for these unmigrated accounts in **September - 19, 2023**. See: ``_ - - Once that happen, any unmigrated accounts will no longer work, and you won't be able to log in. If you're still - using an unmigrated account, it's about time to move. - - Mcproto will remove support for this old authentication methods once this happens. - -This method of authentication doesn't require any special app registrations, however it is significantly less secure, -as you need to enter your login and password directly. - -.. code-block:: python - - import httpx - from mcproto.auth.yggdrasil import YggdrasilAccount - - LOGIN = "mail@example.com" - PASSWORD = "my_password" - - async def authenticate() -> YggdrasilAccount: - async with httpx.AsyncClient() as client: - return YggdrasilAccount.authenticate(client, login=LOGIN, password=PASSWORD) - - -The Account instance you will obtain here will contain a refresh token, and a shorter lived access token, received from -Mojang APIs from the credentials you entered. Just like with Microsoft accounts, you may want to cache these tokens to -avoid needless calls to request new ones and go through authentication again. That said, since doing so doesn't -necessarily require user interaction, if you make the credentials accessible from your code directly, this is a lot -less annoying. - -If you will decide to use caching, or if you plan on using these credentials in a long running program, you may see the -access token expire. You can check whether the token is expired with the -:meth:`~mcproto.auth.yggdrasil.YggdrasilAccount.validate` method, and if it is (call returned ``False``), you can call -:meth:`~mcproto.auth.yggdrasil.YggdrasilAccount.refresh` to use the refresh token to obtain a new access token. The -refresh token is much more long lived than the access token, so this should generally be enough for you, although if -you login from elsewhere, or after a really long time, the refresh token might be invalidated, in that case, you'll -need to go through the full login again. - -Legacy Mojang accounts ----------------------- - -If your minecraft account is still using the (really old) Mojang authentication, you can simply follow the non-migrated -guide, as it will work with these legacy accounts too, the only change you will need to make is to use your username, -instead of an email. diff --git a/docs/usage/first-steps.md b/docs/usage/first-steps.md new file mode 100644 index 00000000..c6cc1103 --- /dev/null +++ b/docs/usage/first-steps.md @@ -0,0 +1,254 @@ +# Manual communication with the server + +This example demonstrates how to interact with a Minecraft server using mcproto at it's lowest-level interface. It +avoids the built-in packet classes to show how to manually handle data through mcproto's connection and buffer classes. +Although this isn’t the typical use case for mcproto, it provides insight into the underlying Minecraft protocol, which +is crucial to understand before transitioning to using the higher-level packet handling. + +In this example, we'll retrieve a server's status — information displayed in the multiplayer server list, such as the +server's MOTD, icon, and player count. + +## Step-by-step guide + +### Handshake with the server + +The first step when doing pretty much any kind of communication with the server is establishing a connection and +sending a "handshake" packet. + +??? question "What even is a packet?" + + A packet is a structured piece of data sent across a network to encode an action or message. In games, packets + allow different kinds of information — such as a player's movement, an item pickup, or a chat message — to be + communicated in a structured way, with each packet tailored for a specific purpose. + + Every packet has a set structure with fields that identify it and hold its data, making it clear what action or + event the packet is meant to represent. While packets may carry different types of information, they usually follow + a similar format, so the game’s client and server can read and respond to them easily. + +To do this, we first need to understand Minecraft packets structure in general, then focus on the specific handshake +packet format. To find this out, we recommend using [wiki.vg], which is a fantastic resource, +detailing all of the Minecraft protocol logic. + +So, according to the [Packet Format][docs-packet-format] page, a Minecraft packet has three fields: + +- **Packet length**: the total size of the Packet ID and Data fields (in bytes). Sent in a variable length integer + format. +- **Packet ID**: uniquely identifies which packet this is. Also sent in the varint format. +- **Data**: the packet's actual content. This will differ depending on the packet type. + +Another important information to know is that Minecraft protocol operates in “states,” each with its own set of packets +and IDs. For example, the same packet ID in one state may represent a completely different packet in another state. +Upon establishing a connection with a Minecraft server, you'll begin in the "handshaking" state, with only one packet +available: the handshake packet. This packet tells the server which state to enter next. + +In our case, we’ll request to enter the "status" state, used for obtaining server information (in contrast, the "login" +state would be used to join the server). + +Next, let’s look at the specifics of the handshake packet on wiki.vg [here][docs-handshake]. + +From here, we can see that the handshake packet has an ID of `0` and should contain the following data (fields): + +- **Protocol Version**: The version of minecraft protocol (for compatibility), sent as a varint. +- **Server Address**: The hostname or IP that was used to connect to the server, sent as a string with max length of + 255 characters. +- **Server Port**: The port number (usually 25565), sent as unsigned short. +- **Next State**: The desired state to transition to, sent as a varint. (1 for "status".) + +Armed with this information, we can start writing code to send the handshake: + +```python +from mcproto.buffer import Buffer +from mcproto.connection import TCPAsyncConnection +from mcproto.protocol.base_io import StructFormat + + +async def handshake(conn: TCPAsyncConnection, ip: str, port: int = 25565) -> None: + handshake = Buffer() + # We use 47 for the protocol version, as which is quite old. We do that to make sure that this code + # will work with almost any server, including older ones. Using a newer protocol number may result + # in older servers refusing to respond. + handshake.write_varint(47) + handshake.write_utf(ip) + handshake.write_value(StructFormat.USHORT, port) + handshake.write_varint(1) # The next state should be "status" + + # Nice! Now we have the packet data, stored in a buffer object. + # This is the data field in the packet format specification. + + # Let's prepare another buffer that will contain the last 2 packet format fields (packet id and data). + # We do this since the first field will require us to know the size of these two combined, + # so let's put them into 1 buffer first: + packet = Buffer() + packet.write_varint(0) # Handshake packet ID + packet.write(handshake) # The entire handshake data, from our previous buffer. + + # And finally, it's time to send it! + await conn.write_varint(len(packet)) # First field (size of packet id + data) + await conn.write(packet) # Second + Third fields (packet id + data) +``` + +### Running the code + +Now, you may be wondering how to actually run this code, what is `TCPAsyncConnection`? Essentially, it's just a wrapper +around a socket connection, designed specifically for communication with Minecraft servers. + +To create an instance of this connection, you'll want to use an `async with` statement, like so: + +```python +import asyncio + +from mcproto.connection import TCPAsyncConnection + +async def main(): + ip = "mc.hypixel.net" + port = 25565 + + async with (await TCPAsyncConnection.make_client((ip, port), 2)) as connection: + await handshake(connection, ip, port) + +def start(): + # Just some boilerplate code that we can run our asynchronous main function + asyncio.run(main()) +``` + +Currently, this code only establishes a connection and requests a state transition to "status", so when running it you +won't see any meaningful result just yet. + +!!! tip "Synchronous handling" + + Even though we're using asynchronous connection in this example, mcproto does also provide a synchronous + version: `TCPSyncConnection`. + + While you can use this synchronous option, we recommend the asynchronous approach as it highlights blocking + operations with the `await` keyword and allows other tasks to run concurrently, while these blocking operations are + waiting. + +### Obtaining server status + +Now comes the interesting part, we'll request a status from the server, and read the response that it sends us. Since +we're already in the status game state by now, we'll want to take a look at the packets that are available in this +state. Once again, wiki.vg datails all of this for us [here][docs-status]. + +We can notice that the packets are split into 2 categories: **client-bound** and **server-bound**. We'll first want to +look at the server-bound ones (i.e. packets targetted to the server, sent by the client - us). There are 2 packets +listed here: Ping Request and Status request. Ping is only here to check if the server is online, and allow us to +measure how long the response took, getting the latency, we're not that interested in doing this now, we want to see +some actual useful data from the server, so we'll choose the Status request packet. + +Since this packet just tells the server to send us the status, it actually doesn't contain any data fields for us to +add, so the packet itself will be empty: + +```python +from mcproto.buffer import Buffer +from mcproto.connection import TCPAsyncConnection + +async def status_request(conn: TCPAsyncConnection) -> None: + # Let's construct a buffer with the packet ID & packet data (like we saw in the handshake example already) + # However, since the status request packet doesn't contain any data, we just need to set the packet id. + packet = Buffer() + packet.write_varint(0) # Status request packet ID + + await conn.write_varint(len(packet)) + await conn.write(packet) +``` + +After we send this request, the server should respond back to us. But what will it respond with? Well, let's find out: + +```python +from mcproto.buffer import Buffer +from mcproto.connection import TCPAsyncConnection + +async def read_status_response(conn: TCPAsyncConnection) -> None: + # Remember, the packet format states that we first receive a length, then packet id, then data + _response_len = await conn.read_varint() + _response = await conn.read(_response_len) # will give us a bytearray + + # Amazing, we've just received data from the server! But it's just bytes, let's turn it into + # a Buffer object, which includes helpful methods that allow us to read from it + response = Buffer(_response) + packet_id = response.read_varint() # Remember, 2nd field is the packet ID, encoded as a varint + + print(packet_id) +``` + +Adjusting our main function to run the new logic: + +```python +async def main(): + ip = "mc.hypixel.net" + port = 25565 + + async with (await TCPAsyncConnection.make_client((ip, port), 2)) as connection: + await handshake(connection, ip, port) + await status_request(connection) + await read_status_response(connection) +``` + +Running the code now, we can see it print `0`. Aha! That's our packet ID, so let's see what the server sent us. So, +looking through the list of **client-bound** packets in the wiki, this is the **Status Response Packet**! + +!!! note + + Interesting, this packet has an ID of 0, wasn't that the status request packet? + + Indeed, packets can have the same ID in different directions, so packet ID `0` for a client-bound response is + distinct from packet ID `0` for a server-bound request. + +Alright then, let's see what the status response packet contains: The wiki says it just has a single UTF-8 string +field, which contains JSON data. Let's adjust our function a bit, and read that data: + +```python +import json + +from mcproto.buffer import Buffer +from mcproto.connection import TCPAsyncConnection + +async def read_status_response(conn: TCPAsyncConnection) -> dict: # We're now returning a dict + _response_len = await conn.read_varint() + _response = await conn.read(_response_len) + + response = Buffer(_response) + packet_id = response.read_varint() + + # Let's always make sure we got the status response packet here. + assert packet_id == 0 + + # Let's now read that single UTF8 string field, it should still be in our buffer: + received_string = response.read_utf() + + # Now, let's just use the json built-in library, convert the JSON string into a python object + # (in this case, it will be a dict) + data = json.loads(received_string) + + # Cool, we now have the actual status data that the server has provided, we should return them + # from the function now. + # Before we do that though, let's just do a sanity-check and ensure that the buffer doesn't contain + # any more data. + assert response.remaining == 0 # 0 bytes (everything was read) + return data +``` + +Finally, we'll adjust the main function to show some of the status data that we obtained: + +```python +async def main(): + ip = "mc.hypixel.net" + port = 25565 + + async with (await TCPAsyncConnection.make_client((ip, port), 2)) as connection: + await handshake(connection, ip, port) + await status_request(connection) + data = await read_status_response(connection) + + # Wohoo, we got the status data! Let's see it + print(data["players"]["max"]) # This is the server's max player amount (slots) + print(data["players"]["online"]) # This is how many people are currently online + print(data["description"]) # And here's the motd + + # There's a bunch of other things in this data, try it out, see what you can find! +``` + +[wiki.vg]: https://wiki.vg +[docs-packet-format]: https://wiki.vg/Protocol#Packet_format +[docs-handshake]: https://wiki.vg/Protocol#Handshake +[docs-status]: https://wiki.vg/Protocol#Status diff --git a/docs/usage/index.md b/docs/usage/index.md new file mode 100644 index 00000000..e8121e50 --- /dev/null +++ b/docs/usage/index.md @@ -0,0 +1,17 @@ +# Usage + +This part of the documentation contains various guides and explanations on how to use the different parts of mcproto. + +!!! bug "Work In Progress" + + This category is still being written. Many pages are missing. + +!!! note "Didn't find what you were looking for?" + + If you were looking for a guide on something, but you didn't find it documented here and you feel like it's + something that others would benefit from seeing too, you can create a [github issue][issue guide] and ask us to + write one. + + + +[issue guide]: ../contributing/issue-guide.md diff --git a/docs/usage/index.rst b/docs/usage/index.rst deleted file mode 100644 index 430806d7..00000000 --- a/docs/usage/index.rst +++ /dev/null @@ -1,13 +0,0 @@ -Usage guides -============ - -Here are some guides and explanations on how to use the various different parts of mcproto. - - -.. toctree:: - :maxdepth: 1 - :caption: Guides - - authentication.rst - -Feel free to propose any further guides, we'll be happy to add them to the list! diff --git a/docs/usage/packet-communication.md b/docs/usage/packet-communication.md new file mode 100644 index 00000000..b35203a0 --- /dev/null +++ b/docs/usage/packet-communication.md @@ -0,0 +1,187 @@ +# Packet communication + +This guide explains how to communicate with the server using our packet classes. It will go over the same example from +[previous page](./first-steps.md), showing how to obtain the server status, but instead of using the low level +interactions, this guide will simplify a lot of that logic with the use of packet classes. + +!!! warning "Packets Target the Latest Minecraft Version" + + Mcproto's packet classes are designed to support the **latest Minecraft release**. While packets in the handshaking + and status game states usually remain compatible across versions, mcproto does NOT guarantee cross-version packet + compatibility. Using packets in the play game state, for example, will very likely lead to compatibility issues if + you're working with older Minecraft versions. + + Only the low level interactions are guaranteed to remain compatible across protocol updates, if you need support + for and older minecraft version, consider downgrading to an older version of mcproto, or using the low level + interactions. + +## Obtaining the packet map + +Every packet has a unique ID based on its direction (client to server or server to client) and game state (such as +status, handshaking, login, or play). This ID lets us recognize packet types in different situations, which is crucial +for correctly receiving packets. + +To make this process easier, mcproto provides a packet map—essentially a dictionary mapping packet IDs to packet +classes. Here’s how to generate a packet map: + +```python +from mcproto.packets import generate_packet_map, GameState, PacketDirection + +STATUS_CLIENTBOUND_MAP = generate_packet_map(PacketDirection.CLIENTBOUND, GameState.STATUS) +``` + +Printing `STATUS_CLIENTBOUND_MAP` would display something like this: + +``` +{ + 0: + 1: , +} +``` + +Telling us that in the STATUS gamestate, for the clientbound direction, these are the only packet we can receive, +and mapping the actual packet classes for every supported packet ID number. + +## Using packets + +The first packet we send to the server is always a **Handshake** packet. This is the only packet in the entire +handshaking state, and it's a "gateway", after which we get moved to a different state, in our case, that will be the +STATUS state. + +```python +from mcproto.packets.handshaking.handshake import Handshake, NextState + +my_handshake = Handshake( + # Once again, we use an old protocol version so that even older servers will respond + protocol_version=47, + server_address="mc.hypixel.net", + server_port=25565, + next_state=NextState.STATUS, +) +``` + +That's it! We've now constructed a full handshake packet with all of the data it should contain. You might remember +from the previous low-level example, that we originally had to look at the protocol specification, find the handshake +packet and construct it's data as a Buffer with all of these variables. + +With these packet classes, you can simply follow your editor's autocompletion to see what this packet requires, pass it +in and the data will be constructed for you from these attributes, without constantly cross-checking with the wiki. + +For completion, let's also construct the status request packet that we were sending to instruct the server to send us +back the status response packet. + +```python +from mcproto.packets.status.status import StatusRequest + +my_status_request = StatusRequest() +``` + +This one was even easier, as the status request packet alone doesn't contain any special data, it's just a request to +the server to send us some data back. + +## Sending packets + +To actually send out a packet to the server, we'll need to create a connection, and use mcproto's `async_write_packet` +function, responsible for sending packets. Let's see it: + +```python +from mcproto.packets import async_write_packet +from mcproto.connection import TCPAsyncConnection + +async def main(): + ip = "mc.hypixel.net" + port = 25565 + + async with (await TCPAsyncConnection.make_client((ip, port), timeout=2)) as connection: + # Let's send the handshake packet that we've created in the example before + await async_write_packet(connection, my_handshake) + # Followed by the status request + await async_write_packet(connection, my_status_request) +``` + +Much easier than the manual version, isn't it? + +## Receiving packets + +Alright, we might now know how to send a packet, but how do we receive one? + +Let's see, but this time, let's also try out using the synchronous connection, just for fun: + +```python +from mcproto.connection import TCPSyncConnection + +# With a synchronous connection, comes synchronous reader/writer functions +from mcproto.packets import sync_read_packet, sync_write_packet + +# We'll also need the packet classes from the status game-state +from mcproto.packets.status.status import StatusResponse +from mcproto.packets.status.ping import PingPong + +def main(): + ip = "mc.hypixel.net" + port = 25565 + + with TCPSyncConnection.make_client(("mc.hypixel.net", 25565), 2) as conn: + # First, send the handshake & status request, just like before, but synchronously + await sync_write_packet(connection, my_handshake) + await sync_write_packet(connection, my_status_request) + + # To read a packet, we'll also need to have the packet map, telling us which IDs represent + # which actual packet types. Let's pass in the map that we've constructed before: + packet = sync_read_packet(conn, STATUS_CLIENTBOUND_MAP) + + # Now that we've got back the packet, we no longer need the connection, we won't be sending + # anything else, so let's get out of the context manager. + + # Finally, let's handle the received packet: + if isinstance(packet, StatusResponse): + ... + elif isinstance(packet, PingPong): + ... + else: + raise Exception("Impossible, there are no other client bound packets in the STATUS game state") +``` + +## Requesting status + +Alright, so let's actually try to put all of this knowledge together, and create something meaningful. Let's replicate +the status obtaining logic from the manual example, but with these new packet classes: + +```python +from mcproto.connection import TCPAsyncConnection +from mcproto.packets import async_write_packet, async_read_packet, generate_packet_map +from mcproto.packets.packet import PacketDirection, GameState +from mcproto.packets.handshaking.handshake import Handshake, NextState +from mcproto.packets.status.status import StatusRequest, StatusResponse + +STATUS_CLIENTBOUND_MAP = generate_packet_map(PacketDirection.CLIENTBOUND, GameState.STATUS) + + +async def get_status(ip: str, port: int) -> dict: + handshake_packet = Handshake( + protocol_version=47, + server_address=ip, + server_port=port, + next_state=NextState.STATUS, + ) + status_req_packet = StatusRequest() + + async with (await TCPAsyncConnection.make_client((ip, port), 2)) as connection: + # We start out at HANDSHAKING game state + await async_write_packet(connection, handshake_packet) + # After sending the handshake, we told the server to now move us into the STATUS game state + await async_write_packet(connection, status_req_packet) + # Since we're still in STATUS game state, we use the status packet map when reading + packet = await async_read_packet(connection, STATUS_CLIENTBOUND_MAP) + + # Now, we should always first make sure it really is the packet we expected + if not isinstance(packet, StatusResponse): + raise ValueError(f"We've got an unexpected packet back: {packet!r}") + + # Since we know we really are dealing with a status response, let's get out it's data, and return it + # this is the same JSON data that we obtained from the first example with the manual interactions + return packet.data +``` + +As you can see, this approach is more convenient and eliminates much of the manual packet handling, letting you focus +on higher-level logic! diff --git a/mcproto/auth/account.py b/mcproto/auth/account.py index 57d8cfb5..d49b5817 100644 --- a/mcproto/auth/account.py +++ b/mcproto/auth/account.py @@ -53,10 +53,11 @@ def __init__(self, username: str, uuid: McUUID, access_token: str) -> None: async def check(self, client: httpx.AsyncClient) -> None: """Check with minecraft API whether the account information stored is valid. - :raises MismatchedAccountInfoError: - If the information received from the minecraft API didn't match the information currently - stored in the account instance. - :raises InvalidAccountAccessTokenError: If the access token is not valid. + Raises: + MismatchedAccountInfoError: + If the information received from the minecraft API didn't match the information currently + stored in the account instance. + InvalidAccountAccessTokenError: If the access token is not valid. """ res = await client.get( f"{MINECRAFT_API_URL}/minecraft/profile", diff --git a/mcproto/auth/microsoft/oauth.py b/mcproto/auth/microsoft/oauth.py index c9e459cb..405b2d5f 100644 --- a/mcproto/auth/microsoft/oauth.py +++ b/mcproto/auth/microsoft/oauth.py @@ -107,13 +107,13 @@ class MicrosoftOauthResponseData(TypedDict): async def microsoft_oauth_request(client: httpx.AsyncClient, client_id: str) -> MicrosoftOauthRequestData: """Initiate Microsoft Oauth2 flow. - This requires a ``client_id``, which can be obtained by creating an application on - `Microsoft Azure `_, + This requires a `client_id`, which can be obtained by creating an application on + [Microsoft Azure](https://learn.microsoft.com/en-us/azure/active-directory/develop/quickstart-register-app), with 'Allow public client flows' set to 'Yes' (can be set from the 'Authentication' tab). This will create a device id, used to identify our request and a user code, which the user can manually enter to - https://www.microsoft.com/link and confirm, after that, :func:`microsoft_oauth_authenticate` should be called, - with the returend device id as an argument. + and confirm, after that, [`microsoft_oauth_authenticate`][..] should be called, + with the returned device id as an argument. """ data = {"client_id": client_id, "scope": "XboxLive.signin offline_access"} res = await client.post(f"{MICROSOFT_OAUTH_URL}/devicecode", data=data) @@ -129,9 +129,9 @@ async def microsoft_oauth_authenticate( ) -> MicrosoftOauthResponseData: """Complete Microsoft Oauth2 flow and authenticate. - This functon should be called after :func:`microsoft_oauth_request`. If the user has authorized the request, - we will get an access token back, allowing us to perform certain actions on behaf of the microsoft user that - has authorized this request. Alternatively, this function will fal with :exc:`MicrosoftOauthResponseError`. + This function should be called after [`microsoft_oauth_request`][..]. If the user has authorized the request, + we will get an access token back, allowing us to perform certain actions on behalf of the microsoft user that + has authorized this request. Alternatively, this function will fail with [`MicrosoftOauthResponseError`][..]. """ data = { "grant_type": "urn:ietf:params:oauth:grant-type:device_code", @@ -153,7 +153,7 @@ async def microsoft_oauth_authenticate( async def full_microsoft_oauth(client: httpx.AsyncClient, client_id: str) -> MicrosoftOauthResponseData: """Perform full Microsoft Oauth2 sequence, waiting for user to authenticated (from the browser). - See :func:`microsoft_oauth_request` (OAuth2 start) and :func:`microsoft_oauth_authenticate` (OAuth2 end). + See [`microsoft_oauth_request`][..] (OAuth2 start) and [`microsoft_oauth_authenticate`][..] (OAuth2 end). """ request_data = await microsoft_oauth_request(client, client_id) diff --git a/mcproto/auth/microsoft/xbox.py b/mcproto/auth/microsoft/xbox.py index f1f6c936..1542df4b 100644 --- a/mcproto/auth/microsoft/xbox.py +++ b/mcproto/auth/microsoft/xbox.py @@ -93,7 +93,7 @@ class XboxData(NamedTuple): async def xbox_auth(client: httpx.AsyncClient, microsoft_access_token: str, bedrock: bool = False) -> XboxData: """Authenticate into Xbox Live account and obtain user hash and XSTS token. - See :func:`~mcproto.auth.microsoft.oauth.full_microsoft_oauth` for info on ``microsoft_access_token``. + See [`full_microsoft_oauth`][mcproto.auth.microsoft.oauth.] for info on `microsoft_access_token`. """ # Obtain XBL token payload = { diff --git a/mcproto/auth/msa.py b/mcproto/auth/msa.py index 6a4bf621..b51b8d2a 100644 --- a/mcproto/auth/msa.py +++ b/mcproto/auth/msa.py @@ -87,8 +87,8 @@ async def _get_access_token_from_xbox(client: httpx.AsyncClient, user_hash: str, async def from_xbox_access_token(cls, client: httpx.AsyncClient, access_token: str) -> Self: """Construct the account from the xbox access token, using it to get the rest of the profile information. - See :meth:`_get_access_token_from_xbox` for how to obtain the ``access_token``. Note that - in most cases, you'll want to use :meth:`xbox_auth` rather than this method directly. + See [`_get_access_token_from_xbox`][..] for how to obtain the `access_token`. Note that + in most cases, you'll want to use [`xbox_auth`][..] rather than this method directly. """ res = await client.get( f"{MC_SERVICES_API_URL}/minecraft/profile", headers={"Authorization": f"Bearer {access_token}"} @@ -102,7 +102,8 @@ async def from_xbox_access_token(cls, client: httpx.AsyncClient, access_token: s async def xbox_auth(cls, client: httpx.AsyncClient, user_hash: str, xsts_token: str) -> Self: """Authenticate using an XSTS token from Xbox Live auth (for Microsoft accounts). - See :func:`mcproto.auth.microsoft.xbox.xbox_auth` for how to obtain the ``user_hash`` and ``xsts_token``. + See [`mcproto.auth.microsoft.xbox.xbox_auth`][mcproto.auth.microsoft.xbox.xbox_auth] for how to obtain + the `user_hash` and `xsts_token`. """ access_token = await cls._get_access_token_from_xbox(client, user_hash, xsts_token) return await cls.from_xbox_access_token(client, access_token) diff --git a/mcproto/auth/yggdrasil.py b/mcproto/auth/yggdrasil.py index d7428cc0..b6fb09e9 100644 --- a/mcproto/auth/yggdrasil.py +++ b/mcproto/auth/yggdrasil.py @@ -167,7 +167,7 @@ async def validate(self, client: httpx.AsyncClient) -> bool: """Check if the access token is (still) usable for authentication with a Minecraft server. If this method fails, the stored access token is no longer usable for for authentcation - with a Minecraft server, but should still be good enough for :meth:`refresh`. + with a Minecraft server, but should still be good enough for [`refresh`][..]. This mainly happens when one has used another client (e.g. another launcher). """ @@ -190,8 +190,9 @@ async def validate(self, client: httpx.AsyncClient) -> bool: async def authenticate(cls, client: httpx.AsyncClient, login: str, password: str) -> Self: """Authenticate using the Yggdrasil system (for non-Microsoft accounts). - :param login: E-Mail of your Minecraft account, or username for (really old) Mojang accounts. - :param password: Plaintext account password. + Args: + login: E-Mail of your Minecraft account, or username for (really old) Mojang accounts. + password: Plaintext account password. """ # Any random string, we use a random v4 uuid, needs to remain same in further communications client_token = str(uuid4()) @@ -227,8 +228,9 @@ async def authenticate(cls, client: httpx.AsyncClient, login: str, password: str async def signout(self, client: httpx.AsyncClient, username: str, password: str) -> None: """Sign out using the Yggdrasil system (for non-Microsoft accounts). - :param login: E-Mail of your Minecraft account, or username for (really old) Mojang accounts. - :param password: Plaintext account password. + Args: + login: E-Mail of your Minecraft account, or username for (really old) Mojang accounts. + password: Plaintext account password. """ payload = { "username": username, diff --git a/mcproto/buffer.py b/mcproto/buffer.py index e9c94adc..7ce87bfd 100644 --- a/mcproto/buffer.py +++ b/mcproto/buffer.py @@ -18,7 +18,7 @@ def __init__(self, *args, **kwargs): @override def write(self, data: bytes | bytearray) -> None: - """Write/Store given ``data`` into the buffer.""" + """Write/Store given `data` into the buffer.""" self.extend(data) @override @@ -27,18 +27,19 @@ def read(self, length: int) -> bytes: Reading data doesn't remove that data, rather that data is treated as already read, and next read will start from the first unread byte. If freeing the data is necessary, check - the :meth:`.clear` function. + the [`clear`][..] function. - :param length: - Amount of bytes to be read. + Args: + length: + Amount of bytes to be read. - If the requested amount can't be read (buffer doesn't contain that much data/buffer - doesn't contain any data), an :exc:`IOError` will be reaised. + If the requested amount can't be read (buffer doesn't contain that much data/buffer + doesn't contain any data), an [`IOError`][IOError] will be reaised. - If there were some data in the buffer, but it was less than requested, this remaining - data will still be depleted and the partial data that was read will be a part of the - error message in the :exc:`IOError`. This behavior is here to mimic reading from a real - socket connection. + If there were some data in the buffer, but it was less than requested, this remaining + data will still be depleted and the partial data that was read will be a part of the + error message in the [`IOError`][IOError]. This behavior is here to mimic reading from a real + socket connection. """ end = self.pos + length @@ -60,14 +61,15 @@ def read(self, length: int) -> bytes: def clear(self, only_already_read: bool = False) -> None: """Clear out the stored data and reset position. - :param only_already_read: - When set to ``True``, only the data that was already marked as read will be cleared, - and the position will be reset (to start at the remaining data). This can be useful - for avoiding needlessly storing large amounts of data in memory, if this data is no - longer useful. + Args: + only_already_read: + When set to `True`, only the data that was already marked as read will be cleared, + and the position will be reset (to start at the remaining data). This can be useful + for avoiding needlessly storing large amounts of data in memory, if this data is no + longer useful. - Otherwise, if set to ``False``, all of the data is cleared, and the position is reset, - essentially resulting in a blank buffer. + Otherwise, if set to `False`, all of the data is cleared, and the position is reset, + essentially resulting in a blank buffer. """ if only_already_read: del self[: self.pos] diff --git a/mcproto/connection.py b/mcproto/connection.py index 406c436d..d73582b5 100644 --- a/mcproto/connection.py +++ b/mcproto/connection.py @@ -40,18 +40,19 @@ def __init__(self): self.encryption_enabled = False def enable_encryption(self, shared_secret: bytes) -> None: - """Enable encryption for this connection, using the ``shared_secret``. + """Enable encryption for this connection, using the `shared_secret`. After calling this method, the reading and writing process for this connection will be altered, and any future communication will be encrypted/decrypted there. You will need to call this method after sending the - :class:`~mcproto.packets.login.login.LoginEncryptionResponse` packet. + [`LoginEncryptionResponse`][mcproto.packets.login.login.] packet. - :param shared_secret: - This is the cipher key for the AES symetric cipher used for the encryption. + Args: + shared_secret: + This is the cipher key for the AES symetric cipher used for the encryption. - See :func:`mcproto.encryption.generate_shared_secret`. + See [`generate_shared_secret`][mcproto.encryption.]. """ # Ensure the `shared_secret` is an instance of the bytes class, not any # subclass. This is needed since the cryptography library calls some C @@ -69,13 +70,16 @@ def enable_encryption(self, shared_secret: bytes) -> None: @classmethod @abstractmethod def make_client(cls, address: tuple[str, int], timeout: float) -> Self: - """Construct a client connection (Client -> Server) to given server ``address``. + """Construct a client connection (Client -> Server) to given server `address`. - :param address: Address of the server to connection to. - :param timeout: - Amount of seconds to wait for the connection to be established. - If connection can't be established within this time, :exc:`TimeoutError` will be raised. - This timeout is then also used for any further data receiving. + Args: + address: Address of the server to connection to. + timeout: + Amount of seconds to wait for the connection to be established. + + If a connection can't be established within this time, [`TimeoutError`][TimeoutError] will be raised. + + This timeout is then also used for any further data receiving. """ raise NotImplementedError @@ -91,14 +95,14 @@ def close(self) -> None: @abstractmethod def _write(self, data: bytes, /) -> None: - """Send raw ``data`` through this specific connection.""" + """Send raw `data` through this specific connection.""" raise NotImplementedError @override def write(self, data: bytes | bytearray, /) -> None: - """Send given ``data`` over the connection. + """Send given `data` over the connection. - Depending on :attr:`encryption_enabled` flag (set from :meth:`enable_encryption`), + Depending on `encryption_enabled` flag (set from [`enable_encryption`][..]), this might also perform an encryption of the input data. """ if not isinstance(data, bytes): @@ -113,10 +117,11 @@ def write(self, data: bytes | bytearray, /) -> None: def _read(self, length: int, /) -> bytes: """Receive raw data from this specific connection. - :param length: - Amount of bytes to be received. If the requested amount can't be received - (server didn't send that much data/server didn't send any data), an :exc:`IOError` - will be raised. + Args: + length: + Amount of bytes to be received. If the requested amount can't be received + (server didn't send that much data/server didn't send any data), an + [`IOError`][IOError] will be raised. """ raise NotImplementedError @@ -124,13 +129,14 @@ def _read(self, length: int, /) -> bytes: def read(self, length: int, /) -> bytes: """Receive data sent through the connection. - Depending on :attr:`encryption_enabled` flag (set from :meth:`enable_encryption`), + Depending on `encryption_enabled` flag (set from [`enable_encryption`][..]), this might also perform a decryption of the received data. - :param length: - Amount of bytes to be received. If the requested amount can't be received - (server didn't send that much data/server didn't send any data), an :exc:`IOError` - will be raised. + Args: + length: + Amount of bytes to be received. If the requested amount can't be received + (server didn't send that much data/server didn't send any data), an + [`IOError`][IOError] will be raised. """ data = self._read(length) @@ -157,18 +163,19 @@ def __init__(self): self.encryption_enabled = False def enable_encryption(self, shared_secret: bytes) -> None: - """Enable encryption for this connection, using the ``shared_secret``. + """Enable encryption for this connection, using the `shared_secret`. After calling this method, the reading and writing process for this connection will be altered, and any future communication will be encrypted/decrypted there. You will need to call this method after sending the - :class:`~mcproto.packets.login.login.LoginEncryptionResponse` packet. + [`LoginEncryptionResponse`][mcproto.packets.login.login.] packet. - :param shared_secret: - This is the cipher key for the AES symetric cipher used for the encryption. + Args: + shared_secret: + This is the cipher key for the AES symetric cipher used for the encryption. - See :func:`mcproto.encryption.generate_shared_secret`. + See [`generate_shared_secret`][mcproto.encryption.]. """ # Ensure the `shared_secret` is an instance of the bytes class, not any # subclass. This is needed since the cryptography library calls some C @@ -186,13 +193,16 @@ def enable_encryption(self, shared_secret: bytes) -> None: @classmethod @abstractmethod async def make_client(cls, address: tuple[str, int], timeout: float) -> Self: - """Construct a client connection (Client -> Server) to given server ``address``. + """Construct a client connection (Client -> Server) to given server `address`. + + Args: + address: Address of the server to connection to. + timeout: + Amount of seconds to wait for the connection to be established. + + If a connection can't be established within this time, [`TimeoutError`][TimeoutError] will be raised. - :param address: Address of the server to connection to. - :param timeout: - Amount of seconds to wait for the connection to be established. - If connection can't be established within this time, :exc:`TimeoutError` will be raised. - This timeout is then also used for any further data receiving. + This timeout is then also used for any further data receiving. """ raise NotImplementedError @@ -208,14 +218,14 @@ async def close(self) -> None: @abstractmethod async def _write(self, data: bytes, /) -> None: - """Send raw ``data`` through this specific connection.""" + """Send raw `data` through this specific connection.""" raise NotImplementedError @override async def write(self, data: bytes | bytearray, /) -> None: - """Send given ``data`` over the connection. + """Send given `data` over the connection. - Depending on :attr:`encryption_enabled` flag (set from :meth:`enable_encryption`), + Depending on `encryption_enabled` flag (set from [`enable_encryption`][..]), this might also perform an encryption of the input data. """ if not isinstance(data, bytes): @@ -230,10 +240,11 @@ async def write(self, data: bytes | bytearray, /) -> None: async def _read(self, length: int, /) -> bytes: """Receive raw data from this specific connection. - :param length: - Amount of bytes to be received. If the requested amount can't be received - (server didn't send that much data/server didn't send any data), an :exc:`IOError` - will be raised. + Args: + length: + Amount of bytes to be received. If the requested amount can't be received + (server didn't send that much data/server didn't send any data), an + [`IOError`][IOError] will be raised. """ raise NotImplementedError @@ -241,13 +252,14 @@ async def _read(self, length: int, /) -> bytes: async def read(self, length: int, /) -> bytes: """Receive data sent through the connection. - Depending on :attr:`encryption_enabled` flag (set from :meth:`enable_encryption`), + Depending on `encryption_enabled` flag (set from [`enable_encryption`][..]), this might also perform a decryption of the received data. - :param length: - Amount of bytes to be received. If the requested amount can't be received - (server didn't send that much data/server didn't send any data), an :exc:`IOError` - will be raised. + Args: + length: + Amount of bytes to be received. If the requested amount can't be received + (server didn't send that much data/server didn't send any data), an + [`IOError`][IOError] will be raised. """ data = await self._read(length) @@ -265,7 +277,7 @@ async def __aexit__(self, *a, **kw) -> None: class TCPSyncConnection(SyncConnection, Generic[T_SOCK]): - """Synchronous connection using a TCP :class:`~socket.socket`.""" + """Synchronous connection using a TCP [`socket`][?socket.].""" __slots__ = ("socket",) @@ -318,7 +330,7 @@ def _close(self) -> None: class TCPAsyncConnection(AsyncConnection, Generic[T_STREAMREADER, T_STREAMWRITER]): - """Asynchronous TCP connection using :class:`~asyncio.StreamWriter` and :class:`~asyncio.StreamReader`.""" + """Asynchronous TCP connection using [`StreamWriter`][?asyncio.] and [`StreamReader`][?asyncio.].""" __slots__ = ("reader", "timeout", "writer") @@ -364,14 +376,14 @@ async def _close(self) -> None: @property def socket(self) -> socket.socket: - """Obtain the underlying socket behind the :class:`~asyncio.Transport`.""" + """Obtain the underlying socket behind the [`Transport`][?asyncio.].""" # TODO: This should also have pyright: ignore[reportPrivateUsage] # See: https://github.com/DetachHead/basedpyright/issues/494 return self.writer.transport._sock # pyright: ignore[reportAttributeAccessIssue] class UDPSyncConnection(SyncConnection, Generic[T_SOCK]): - """Synchronous connection using a UDP :class:`~socket.socket`.""" + """Synchronous connection using a UDP [`socket`][?socket.].""" __slots__ = ("address", "socket") @@ -408,7 +420,7 @@ def _close(self) -> None: class UDPAsyncConnection(AsyncConnection, Generic[T_DATAGRAM_CLIENT]): - """Asynchronous UDP connection using :class:`~asyncio_dgram.DatagramClient`.""" + """Asynchronous UDP connection using `asyncio_dgram.DatagramClient`.""" __slots__ = ("stream", "timeout") diff --git a/mcproto/encryption.py b/mcproto/encryption.py index d702dc13..99c25caf 100644 --- a/mcproto/encryption.py +++ b/mcproto/encryption.py @@ -10,7 +10,7 @@ def generate_shared_secret() -> bytes: # pragma: no cover """Generate a random shared secret for client. - This secret will be sent to the server in :class:`~mcproto.packets.login.login.LoginEncryptionResponse` packet, + This secret will be sent to the server in [`LoginEncryptionResponse`][mcproto.packets.login.login.] packet, and used to encrypt all future communication afterwards. This will be symetric encryption using AES/CFB8 stream cipher. And this shared secret will be 16-bytes long. @@ -21,7 +21,7 @@ def generate_shared_secret() -> bytes: # pragma: no cover def generate_verify_token() -> bytes: # pragma: no cover """Generate a random verify token. - This token will be sent by the server in :class:`~mcproto.packets.login.login.LoginEncryptionRequest`, to be + This token will be sent by the server in [`LoginEncryptionRequest`][mcproto.packets.login.login.], to be encrypted by the client as a form of verification. This token doesn't need to be cryptographically secure, it's just a sanity check that @@ -33,9 +33,9 @@ def generate_verify_token() -> bytes: # pragma: no cover def generate_rsa_key() -> RSAPrivateKey: # pragma: no cover """Generate a random RSA key pair for server. - This key pair will be used for :class:`~mcproto.packets.login.login.LoginEncryptionRequest` packet, + This key pair will be used for [`LoginEncryptionRequest`][mcproto.packets.login.login.] packet, where the client will be sent the public part of this key pair, which will be used to encrypt the - shared secret (and verification token) sent in :class:`~mcproto.packets.login.login.LoginEncryptionResponse` + shared secret (and verification token) sent in [`LoginEncryptionResponse`][mcproto.packets.login.login.] packet. The server will then use the private part of this key pair to decrypt that. This will be a 1024-bit RSA key pair. @@ -54,10 +54,13 @@ def encrypt_token_and_secret( ) -> tuple[bytes, bytes]: """Encrypts the verification token and shared secret with the server's public key. - :param public_key: The RSA public key provided by the server - :param verification_token: The verification token provided by the server - :param shared_secret: The generated shared secret - :return: A tuple containing (encrypted token, encrypted secret) + Args: + public_key: The RSA public key provided by the server + verification_token: The verification token provided by the server + shared_secret: The generated shared secret + + Returns: + A tuple containing (encrypted token, encrypted secret) """ # Ensure both the `shared_secret` and `verification_token` are instances # of the bytes class, not any subclass. This is needed since the cryptography @@ -80,10 +83,13 @@ def decrypt_token_and_secret( ) -> tuple[bytes, bytes]: """Decrypts the verification token and shared secret with the server's private key. - :param private_key: The RSA private key generated by the server - :param verification_token: The verification token encrypted and sent by the client - :param shared_secret: The shared secret encrypted and sent by the client - :return: A tuple containing (decrypted token, decrypted secret) + Args: + private_key: The RSA private key generated by the server + verification_token: The verification token encrypted and sent by the client + shared_secret: The shared secret encrypted and sent by the client + + Returns: + A tuple containing (decrypted token, decrypted secret) """ # Ensure both the `shared_secret` and `verification_token` are instances # of the bytes class, not any subclass. This is needed since the cryptography diff --git a/mcproto/multiplayer.py b/mcproto/multiplayer.py index 752f7d14..888065e7 100644 --- a/mcproto/multiplayer.py +++ b/mcproto/multiplayer.py @@ -26,7 +26,7 @@ class UserJoinRequestErrorKind(str, Enum): - """Enum for various different kinds of exceptions that can occur during :func:`join_request`.""" + """Enum for various different kinds of exceptions that can occur during [`join_request`][..].""" BANNED_FROM_MULTIPLAYER = "User with has been banned from multiplayer." XBOX_MULTIPLAYER_DISABLED = "User's Xbox profile has multiplayer disabled." @@ -44,11 +44,11 @@ def from_status_error(cls, code: int, err_msg: str | None) -> UserJoinRequestErr class UserJoinRequestFailedError(Exception): - """Exception raised when :func:`join_request` fails. + """Exception raised when [`join_request`][..] fails. - This can be caused by various reasons. See: :class:`UserJoinRequestErrorKind` enum class. - The most likely case for this error is invalid authentication token, or the user being - banned from multiplayer. + This can be caused by various reasons. See: [`UserJoinRequestErrorKind`][..] enum class, + containing all the possible reasons. The most likely case for this error is invalid + authentication token, or the user being banned from multiplayer. """ def __init__(self, exc: httpx.HTTPStatusError): @@ -82,14 +82,14 @@ def __repr__(self) -> str: class UserJoinCheckFailedError(Exception): - """Exception raised when :func:`join_check` fails. + """Exception raised when [`join_check`][..] fails. This signifies that the Minecraft session API server didn't contain a join request for the `server_hash` and `client_username`, and it therefore didn't acknowledge the join. This means the user didn't confirm this join with Minecraft API (didn't call - :func:`join_request`), hence the validity of this account can't be verified. The server - should kick the user and end the join flow. + [`join_request`][..]), hence the validity of this account can't be verified. + The server should kick the user and end the join flow. """ def __init__(self, response: httpx.Response, client_username: str, server_hash: str, client_ip: str | None): @@ -107,7 +107,7 @@ def __repr__(self) -> str: class JoinAcknowledgeProperty(TypedDict): - """Skin blob data from :class:`JoinAcknowledgeData`.""" + """Skin blob data from [`JoinAcknowledgeData`][..].""" name: str value: str @@ -115,10 +115,10 @@ class JoinAcknowledgeProperty(TypedDict): class JoinAcknowledgeData(TypedDict): - """Response from :func:`join_check` (hasJoined minecraft API endpoint). + """Response from [`join_check`][..] (hasJoined minecraft API endpoint). - This response contains information on the user has submitted the :func:`join_request`. - (uuid, name, and player skin properties) + This response contains information on the user has submitted the + [`join_request`][..]. (uuid, name, and player skin properties) """ id: str @@ -129,18 +129,19 @@ class JoinAcknowledgeData(TypedDict): def compute_server_hash(server_id: str, shared_secret: bytes, server_public_key: RSAPublicKey) -> str: """Compute a hash to be sent as 'serverId' field to Mojang session server. - This function is used for :func:`join_request` and :func:`join_check` functions, which require + This function is used for [`join_request`][..] and [`join_check`][..] functions, which require this hash value. - This SHA1 hash is computed based on the ``server_id``, ``server_public_key`` and ``shared_secret``. + This SHA1 hash is computed based on the `server_id`, `server_public_key` and `shared_secret`. Together, these values ensure that there can't be any middle-man listening in after encryption is established. This is because a middle man/proxy who would want to listed into the encrypted communication would - need to know the encryption key (``shared_secret``). A proxy can capture this key, as the client - sends it over to the server in :class:`~mcproto.packets.login.login.LoginEncryptionResponse` packet, + need to know the encryption key (`shared_secret`). A proxy can capture this key, as the client + sends it over to the server in [`LoginEncryptionResponse`][mcproto.packets.login.login.] packet, however it is sent encrypted. The client performs this encryption with a public key, which it got - from the server, in :class:`mcproto.packets.login.login.LoginEncryptionRequest` packet. + from the server, in [`LoginEncryptionRequest`][mcproto.packets.login.login.] + packet. That mans that for a proxy to be able to actually obtain this shared secret value, it would need to be able to capture the encryption response, and decrypt the shared secret value. That means it would @@ -185,23 +186,24 @@ async def join_request(client: httpx.AsyncClient, account: Account, server_hash: """Inform the Mojang session server about this new user join. This function is called by the client, when joining an online mode (non-warez) server. This is - required and the server will check that this request was indeed made (:func:`join_check`). + required and the server will check that this request was indeed made ([`join_check`][..]). This request should be performed after receiving the - :class:`~mcproto.packets.login.login.LoginEncryptionRequest` packet, but before sending the - :class:`~mcproto.packets.login.login.LoginEncryptionResponse`. + [`LoginEncryptionRequest`][mcproto.packets.login.login.] packet, but before sending the + [`LoginEncryptionResponse`][mcproto.packets.login.login.]. - Performing this request requires an :class:`~mcproto.auth.account.Account` instance, as this request + Performing this request requires an [`Account`][mcproto.auth.account.] instance, as this request is here to ensure that only original Minceraft accounts (officially bought accounts) can join. - This request uses a ``server_hash`` to identify which server is the client attempting to join. This + This request uses a `server_hash` to identify which server is the client attempting to join. This hash is composed of various values, which together serve as a way to prevent any MITMA (man in the - middle attacks). To obtain this hash, see :func:`compute_server_hash`. This function's docstring + middle attacks). To obtain this hash, see [`compute_server_hash`][..]. This function's docstring also includes description for why and how this prevents a MITMA. - :param client: HTTPX async client to make the HTTP request with. - :param account: Instance of an account containing the minecraft token necessary for this request. - :param server_hash: SHA1 hash of the server (see :func:`compute_server_hash`) + Args: + client: HTTPX async client to make the HTTP request with. + account: Instance of an account containing the minecraft token necessary for this request. + server_hash: SHA1 hash of the server (see [`compute_server_hash`][..]) """ payload = { "accessToken": account.access_token, @@ -231,30 +233,31 @@ async def join_check( This function is called by the server in online mode (non-warez), to verify that the joining client really does have an official minecraft account. The client will first inform the server about this - join request (:func:`join_request`), server then runs this check confirming the client is who they + join request ([`join_request`][..]), server then runs this check confirming the client is who they say they are. This request should be performed after receiving the after receiving the - :class:`~mcproto.packets.login.login.LoginEncryptionResponse` packet. + [`LoginEncryptionResponse`][mcproto.packets.login.login.] packet. - This request uses a ``server_hash``, this is the value under which the client has submitted their + This request uses a `server_hash`, this is the value under which the client has submitted their join request, and we'll now be checking for that submission with that same value. This is a hash composed of various values, which together serve as a way to prevent any MITMA (man in the middle - attacks). To obtain this hash, see :func:`compute_server_hash`. This function's docstring also + attacks). To obtain this hash, see [`compute_server_hash`][..]. This function's docstring also includes description for why and how this prevents a MITMA. - :param client: HTTPX async client to make the HTTP request with. - :param client_username: - Must match joining the username of the joining client (case sensitive). + Args: + client: HTTPX async client to make the HTTP request with. + client_username: + Must match joining the username of the joining client (case sensitive). - Note: This is the in-game nickname of the selected profile, not Mojang account name - (which is never sent to the server). Servers should use the name in "name" field which was - received in the :class:`~mcproto.packets.login.login.LoginStart` packet. - :param server_hash: SHA1 hash of the server (see :func:`compute_server_hash`) - :param client_ip: - IP address of the connecting player (optional) + Note: This is the in-game nickname of the selected profile, not Mojang account name + (which is never sent to the server). Servers should use the name in "name" field which was + received in the [`LoginStart`][mcproto.packets.login.login.] packet. + server_hash: SHA1 hash of the server (see [`compute_server_hash`][..]) + client_ip: + IP address of the connecting player (optional) - Servers only include this when 'prevent-proxy-connections' is set to true in server.properties + Servers only include this when 'prevent-proxy-connections' is set to true in server.properties """ params = {"username": client_username, "serverId": server_hash} if client_ip is not None: diff --git a/mcproto/packets/handshaking/handshake.py b/mcproto/packets/handshaking/handshake.py index 51898635..f4593cfb 100644 --- a/mcproto/packets/handshaking/handshake.py +++ b/mcproto/packets/handshaking/handshake.py @@ -17,7 +17,7 @@ class NextState(IntEnum): - """Enum of all possible next game states we can transition to from the :class:`Handshake` packet.""" + """Enum of all possible next game states we can transition to from the [`Handshake`][(m).] packet.""" STATUS = 1 LOGIN = 2 @@ -30,10 +30,11 @@ class Handshake(ServerBoundPacket): Initialize the Handshake packet. - :param protocol_version: Protocol version number to be used. - :param server_address: The host/address the client is connecting to. - :param server_port: The port the client is connecting to. - :param next_state: The next state for the server to move into. + Args: + protocol_version: Protocol version number to be used. + server_address: The host/address the client is connecting to. + server_port: The port the client is connecting to. + next_state: The next state for the server to move into. """ PACKET_ID: ClassVar[int] = 0x00 diff --git a/mcproto/packets/interactions.py b/mcproto/packets/interactions.py index 0c4ec6bd..9acb45ea 100644 --- a/mcproto/packets/interactions.py +++ b/mcproto/packets/interactions.py @@ -46,12 +46,13 @@ def _serialize_packet(packet: Packet, *, compression_threshold: int = -1) -> Buffer: """Serialize the internal packet data, along with it's packet id. - :param packet: The packet to serialize. - :param compression_threshold: - A threshold for the packet length (in bytes), which if surpassed compression should - be enabeld. To disable compression, set this to -1. Note that when enabled, even if - the threshold isn't crossed, the packet format will be different than with compression - disabled. + Args: + packet: The packet to serialize. + compression_threshold: + A threshold for the packet length (in bytes), which if surpassed compression should + be enabled. To disable compression, set this to -1. Note that when enabled, even if + the threshold isn't crossed, the packet format will be different than with compression + disabled. """ packet_data = packet.serialize() @@ -85,17 +86,18 @@ def _deserialize_packet( ) -> T_Packet: """Deserialize the packet id and it's internal data. - :param packet_map: - A mapping of packet id (int) -> packet. Should hold all possible packets for the - current gamestate and direction. See :func:`~mcproto.packets.packet_map.generate_packet_map` - :param compressed: - Boolean flag, if compression is enabled, it should be set to ``True``, ``False`` otherwise. - - You can get this based on :class:`~mcproto.packets.login.login.LoginSetCompression` packet, - which will contain a compression threshold value. This threshold is only useful when writing - the packets, for reading, we don't care about the specific threshold, we only need to know - whether compression is enabled or not. That is, if the threshold is set to a non-negative - number, this should be ``True``. + Args: + packet_map: + A mapping of packet id (int) -> packet. Should hold all possible packets for the + current gamestate and direction. See [`generate_packet_map`][mcproto.packets.packet_map.] + compressed: + Boolean flag, if compression is enabled, it should be set to `True`, `False` otherwise. + + You can get this based on [`LoginSetCompression`][mcproto.packets.login.login.] packet, + which will contain a compression threshold value. This threshold is only useful when writing + the packets, for reading, we don't care about the specific threshold, we only need to know + whether compression is enabled or not. That is, if the threshold is set to a non-negative + number, this should be `True`. """ if compressed: data_length = buf.read_varint() @@ -116,15 +118,16 @@ def sync_write_packet( *, compression_threshold: int = -1, ) -> None: - """Write given ``packet``. + """Write given `packet`. - :param writer: The connection/writer to send this packet to. - :param packet: The packet to be sent. - :param compression_threshold: - A threshold packet length, whcih if crossed compression should be enabled. + Args: + writer: The connection/writer to send this packet to. + packet: The packet to be sent. + compression_threshold: + A threshold packet length, which if crossed compression should be enabled. - You can get this number from :class:`~mcproto.packets.login.login.LoginSetCompression` packet. - If this packet wasn't sent by the server, set this to -1 (default). + You can get this number from [`LoginSetCompression`][mcproto.packets.login.login.] packet. + If this packet wasn't sent by the server, set this to -1 (default). """ data_buf = _serialize_packet(packet, compression_threshold=compression_threshold) writer.write_bytearray(data_buf) @@ -136,15 +139,16 @@ async def async_write_packet( *, compression_threshold: int = -1, ) -> None: - """Write given ``packet``. + """Write given `packet`. - :param writer: The connection/writer to send this packet to. - :param packet: The packet to be sent. - :param compression_threshold: - A threshold packet length, whcih if crossed compression should be enabled. + Args: + writer: The connection/writer to send this packet to. + packet: The packet to be sent. + compression_threshold: + A threshold packet length, which if crossed compression should be enabled. - You can get this number from :class:`~mcproto.packets.login.login.LoginSetCompression` packet. - If this packet wasn't sent by the server, set this to -1 (default). + You can get this number from [`LoginSetCompression`][mcproto.packets.login.login.] packet. + If this packet wasn't sent by the server, set this to -1 (default). """ data_buf = _serialize_packet(packet, compression_threshold=compression_threshold) await writer.write_bytearray(data_buf) @@ -158,22 +162,23 @@ def sync_read_packet( ) -> T_Packet: """Read a packet. - :param reader: The connection/reader to receive this packet from. - :param packet_map: - A mapping of packet id (number) -> Packet (class). + Args: + reader: The connection/reader to receive this packet from. + packet_map: + A mapping of packet id (number) -> Packet (class). - This mapping should contain all of the packets for the current gamestate and direction. - See :func:`~mcproto.packets.packet_map.generate_packet_map` - :param compression_threshold: - A threshold packet length, whcih if crossed compression should be enabled. + This mapping should contain all of the packets for the current gamestate and direction. + See [`generate_packet_map`][mcproto.packets.packet_map.] + compression_threshold: + A threshold packet length, which if crossed compression should be enabled. - You can get this number from :class:`~mcproto.packets.login.login.LoginSetCompression` packet. - If this packet wasn't sent by the server, set this to -1 (default). + You can get this number from [`LoginSetCompression`][mcproto.packets.login.login.] packet. + If this packet wasn't sent by the server, set this to -1 (default). - Note that during reading, we don't actually need to know the specific threshold, just - whether or not is is non-negative (whether compression is enabled), as the packet format - fundamentally changes when it is. That means you can pass any positive number here to - enable compresison, regardess of what it actually is. + Note that during reading, we don't actually need to know the specific threshold, just + whether or not is is non-negative (whether compression is enabled), as the packet format + fundamentally changes when it is. That means you can pass any positive number here to + enable compression, regardless of what it actually is. """ # The packet format fundamentally changes when compression_threshold is non-negative (enabeld) # We only care about the sepcific threshold when writing though, for reading (deserialization), @@ -192,22 +197,23 @@ async def async_read_packet( ) -> T_Packet: """Read a packet. - :param reader: The connection/reader to receive this packet from. - :param packet_map: - A mapping of packet id (number) -> Packet (class). + Args: + reader: The connection/reader to receive this packet from. + packet_map: + A mapping of packet id (number) -> Packet (class). - This mapping should contain all of the packets for the current gamestate and direction. - See :func:`~mcproto.packets.packet_map.generate_packet_map` - :param compression_threshold: - A threshold packet length, whcih if crossed compression should be enabled. + This mapping should contain all of the packets for the current gamestate and direction. + See [`generate_packet_map`][mcproto.packets.packet_map.] + compression_threshold: + A threshold packet length, which if crossed compression should be enabled. - You can get this number from :class:`~mcproto.packets.login.login.LoginSetCompression` packet. - If this packet wasn't sent by the server, set this to -1 (default). + You can get this number from [`LoginSetCompression`][mcproto.packets.login.login.] packet. + If this packet wasn't sent by the server, set this to -1 (default). - Note that during reading, we don't actually need to know the specific threshold, just - whether or not is is non-negative (whether compression is enabled), as the packet format - fundamentally changes when it is. That means you can pass any positive number here to - enable compresison, regardess of what it actually is. + Note that during reading, we don't actually need to know the specific threshold, just + whether or not is is non-negative (whether compression is enabled), as the packet format + fundamentally changes when it is. That means you can pass any positive number here to + enable compression, regardless of what it actually is. """ # The packet format fundamentally changes when compression_threshold is non-negative (enabeld) # We only care about the sepcific threshold when writing though, for reading (deserialization), diff --git a/mcproto/packets/login/login.py b/mcproto/packets/login/login.py index 7eeae828..6770e15b 100644 --- a/mcproto/packets/login/login.py +++ b/mcproto/packets/login/login.py @@ -33,8 +33,9 @@ class LoginStart(ServerBoundPacket): Initialize the LoginStart packet. - :param username: Username of the client who sent the request. - :param uuid: UUID of the player logging in (if the player doesn't have a UUID, this can be ``None``) + Args: + username: Username of the client who sent the request. + uuid: UUID of the player logging in (if the player doesn't have a UUID, this can be `None`) """ PACKET_ID: ClassVar[int] = 0x00 @@ -63,9 +64,10 @@ class LoginEncryptionRequest(ClientBoundPacket): Initialize the LoginEncryptionRequest packet. - :param public_key: Server's public key. - :param verify_token: Sequence of random bytes generated by server for verification. - :param server_id: Empty on minecraft versions 1.7.X and higher (20 random chars pre 1.7). + Args: + public_key: Server's public key. + verify_token: Sequence of random bytes generated by server for verification. + server_id: Empty on minecraft versions 1.7.X and higher (20 random chars pre 1.7). """ PACKET_ID: ClassVar[int] = 0x01 @@ -108,12 +110,13 @@ def _deserialize(cls, buf: Buffer, /) -> Self: @final @define class LoginEncryptionResponse(ServerBoundPacket): - """Response from the client to :class:`LoginEncryptionRequest` packet. (Client -> Server). + """Response from the client to [`LoginEncryptionRequest`][(m).] packet. (Client -> Server). Initialize the LoginEncryptionResponse packet. - :param shared_secret: Shared secret value, encrypted with server's public key. - :param verify_token: Verify token value, encrypted with same public key. + Args: + shared_secret: Shared secret value, encrypted with server's public key. + verify_token: Verify token value, encrypted with same public key. """ PACKET_ID: ClassVar[int] = 0x01 @@ -143,8 +146,9 @@ class LoginSuccess(ClientBoundPacket): Initialize the LoginSuccess packet. - :param uuid: The UUID of the connecting player/client. - :param username: The username of the connecting player/client. + Args: + uuid: The UUID of the connecting player/client. + username: The username of the connecting player/client. """ PACKET_ID: ClassVar[int] = 0x02 @@ -173,7 +177,8 @@ class LoginDisconnect(ClientBoundPacket): Initialize the LoginDisconnect packet. - :param reason: The reason for disconnection (kick). + Args: + reason: The reason for disconnection (kick). """ PACKET_ID: ClassVar[int] = 0x00 @@ -199,9 +204,10 @@ class LoginPluginRequest(ClientBoundPacket): Initialize the LoginPluginRequest. - :param message_id: Message id, generated by the server, should be unique to the connection. - :param channel: Channel identifier, name of the plugin channel used to send data. - :param data: Data that is to be sent. + Args: + message_id: Message id, generated by the server, should be unique to the connection. + channel: Channel identifier, name of the plugin channel used to send data. + data: Data that is to be sent. """ PACKET_ID: ClassVar[int] = 0x04 @@ -233,8 +239,9 @@ class LoginPluginResponse(ServerBoundPacket): Initialize the LoginPluginRequest packet. - :param message_id: Message id, generated by the server, should be unique to the connection. - :param data: Optional response data, present if client understood request. + Args: + message_id: Message id, generated by the server, should be unique to the connection. + data: Optional response data, present if client understood request. """ PACKET_ID: ClassVar[int] = 0x02 @@ -264,11 +271,13 @@ class LoginSetCompression(ClientBoundPacket): Initialize the LoginSetCompression packet. - :param threshold: - Maximum size of a packet before it is compressed. All packets smaller than this will remain uncompressed. - To disable compression completely, threshold can be set to -1. + Args: + threshold: + Maximum size of a packet before it is compressed. All packets smaller than this will remain uncompressed. + To disable compression completely, threshold can be set to -1. - .. note:: This packet is optional, and if not set, the compression will not be enabled at all. + Note: + This packet is optional, and if not set, the compression will not be enabled at all. """ PACKET_ID: ClassVar[int] = 0x03 @@ -290,7 +299,7 @@ def _deserialize(cls, buf: Buffer, /) -> Self: @final @define class LoginAcknowledged(ServerBoundPacket): - """Sent by client to acknowledge LoginSuccess from server. (Client -> Server). + """Sent by client to acknowledge [`LoginSuccess`][(m).] from server. (Client -> Server). This packet has no fields - it's just an empty acknowledgment. """ diff --git a/mcproto/packets/packet.py b/mcproto/packets/packet.py index 688c6128..60a885a9 100644 --- a/mcproto/packets/packet.py +++ b/mcproto/packets/packet.py @@ -93,11 +93,12 @@ def __init__( ) -> None: """Initialize the error class. - :param packet_id: Identified packet ID. - :param game_state: Game state of the identified packet. - :param direction: Packet direction of the identified packet. - :param buffer: Buffer received for deserialization, that failed to parse. - :param message: Reason for the failure. + Args: + packet_id: Identified packet ID. + game_state: Game state of the identified packet. + direction: Packet direction of the identified packet. + buffer: Buffer received for deserialization, that failed to parse. + message: Reason for the failure. """ self.packet_id = packet_id self.game_state = game_state diff --git a/mcproto/packets/packet_map.py b/mcproto/packets/packet_map.py index 279db457..9867aa94 100644 --- a/mcproto/packets/packet_map.py +++ b/mcproto/packets/packet_map.py @@ -40,13 +40,13 @@ class WalkableModuleData(NamedTuple): def _walk_submodules(module: ModuleType) -> Iterator[WalkableModuleData]: - """Find all submodules of given module, that specify ``__all__``. + """Find all submodules of given module, that specify `__all__`. - If a submodule that doesn't define ``__all__`` is found, it will be skipped, as we don't + If a submodule that doesn't define `__all__` is found, it will be skipped, as we don't consider it walkable. (This is important, as we'll later need to go over all variables in - these modules, and without ``__all__`` we wouldn't know what to go over. Simply using all + these modules, and without `__all__` we wouldn't know what to go over. Simply using all defined variables isn't viable, as that would also include imported things, potentially - causing the same object to appear more than once. This makes ``__all__`` a requirement.) + causing the same object to appear more than once. This makes `__all__` a requirement.) """ def on_error(name: str) -> NoReturn: @@ -71,16 +71,17 @@ def on_error(name: str) -> NoReturn: def _walk_module_packets(module_data: WalkableModuleData) -> Iterator[type[Packet]]: - """Find all packet classes specified in module's ``__all__``. + """Find all packet classes specified in module's `__all__`. - :return: - Iterator yielding every packet class defined in ``__all__`` of given module. - These objects are obtained directly using ``getattr`` from the imported module. + Returns: + Iterator yielding every packet class defined in `__all__` of given module. + These objects are obtained directly using `getattr` from the imported module. - :raises TypeError: - Raised when an attribute defined in ``__all__`` can't be obtained using ``getattr``. - This would suggest the module has incorrectly defined ``__all__``, as it includes values - that aren't actually defined in the module. + Raises: + TypeError: + Raised when an attribute defined in `__all__` can't be obtained using `getattr`. + This would suggest the module has incorrectly defined `__all__`, as it includes values + that aren't actually defined in the module. """ for member_name in module_data.member_names: try: @@ -109,13 +110,13 @@ def generate_packet_map( @lru_cache def generate_packet_map(direction: PacketDirection, state: GameState) -> Mapping[int, type[Packet]]: - """Dynamically generated a packet map for given ``direction`` and ``state``. + """Dynamically generated a packet map for given `direction` and `state`. This generation is done by dynamically importing all of the modules containing these packets, - filtering them to only contain those pacekts with the specified parameters, and storing those + filtering them to only contain those packets with the specified parameters, and storing those into a dictionary, using the packet id as key, and the packet class itself being the value. - As this fucntion is likely to be called quite often, and it uses dynamic importing to obtain + As this function is likely to be called quite often, and it uses dynamic importing to obtain the packet classes, this function is cached, which means the logic only actually runs once, after which, for the same arguments, the same dict will be returned. """ diff --git a/mcproto/packets/status/ping.py b/mcproto/packets/status/ping.py index e638a36c..03e0cf09 100644 --- a/mcproto/packets/status/ping.py +++ b/mcproto/packets/status/ping.py @@ -19,9 +19,10 @@ class PingPong(ClientBoundPacket, ServerBoundPacket): Initialize the PingPong packet. - :param payload: - Random number to test out the connection. Ideally, this number should be quite big, - however it does need to fit within the limit of a signed long long (-2 ** 63 to 2 ** 63 - 1). + Args: + payload: + Random number to test out the connection. Ideally, this number should be quite big, + however it does need to fit within the limit of a signed long long (-2 ** 63 to 2 ** 63 - 1). """ PACKET_ID: ClassVar[int] = 0x01 diff --git a/mcproto/packets/status/status.py b/mcproto/packets/status/status.py index 18418262..4ba75b4e 100644 --- a/mcproto/packets/status/status.py +++ b/mcproto/packets/status/status.py @@ -37,7 +37,8 @@ class StatusResponse(ClientBoundPacket): Initialize the StatusResponse packet. - :param data: JSON response data sent back to the client. + Args: + data: JSON response data sent back to the client. """ PACKET_ID: ClassVar[int] = 0x00 diff --git a/mcproto/protocol/base_io.py b/mcproto/protocol/base_io.py index 04125f5f..371b049f 100644 --- a/mcproto/protocol/base_io.py +++ b/mcproto/protocol/base_io.py @@ -31,8 +31,8 @@ class StructFormat(str, Enum): """All possible write/read struct types. - .. seealso: - :module:`struct` module documentation. + See Also: + [`struct`][struct] module documentation. """ BOOL = "?" @@ -101,7 +101,7 @@ async def write_value(self, fmt: Literal[StructFormat.BOOL], value: bool, /) -> async def write_value(self, fmt: Literal[StructFormat.CHAR], value: str, /) -> None: ... async def write_value(self, fmt: StructFormat, value: object, /) -> None: - """Write a given ``value`` as given struct format (``fmt``) in big-endian mode.""" + """Write a given `value` as given struct format (`fmt`) in big-endian mode.""" await self.write(struct.pack(">" + fmt.value, value)) async def _write_varuint(self, value: int, /, *, max_bits: int | None = None) -> None: @@ -109,9 +109,9 @@ async def _write_varuint(self, value: int, /, *, max_bits: int | None = None) -> This is a standard way of transmitting ints, and it allows smaller numbers to take less bytes. - Writing will be limited up to integer values of ``max_bits`` bits, and trying to write bigger values will rase - a :exc:`ValueError`. Note that setting ``max_bits`` to for example 32 bits doesn't mean that at most 4 bytes - will be sent, in this case it would actually take at most 5 bytes, due to the variable encoding overhead. + Writing will be limited up to integer values of `max_bits` bits, and trying to write bigger values will raise + a [`ValueError`][ValueError]. Note that setting `max_bits` to for example 32 bits doesn't mean that at most 4 + bytes will be sent, in this case it would actually take at most 5 bytes, due to the variable encoding overhead. Varints send bytes where 7 least significant bits are value bits, and the most significant bit is continuation flag bit. If this continuation bit is set (1), it indicates that there will be another varint byte sent after @@ -135,7 +135,7 @@ async def _write_varuint(self, value: int, /, *, max_bits: int | None = None) -> async def write_varint(self, value: int, /) -> None: """Write a 32-bit signed integer in a variable length format. - For more information about variable length format check :meth:`._write_varuint`. + For more information about variable length format check [`_write_varuint`][..]. """ val = to_twos_complement(value, bits=32) await self._write_varuint(val, max_bits=32) @@ -143,7 +143,7 @@ async def write_varint(self, value: int, /) -> None: async def write_varlong(self, value: int, /) -> None: """Write a 64-bit signed integer in a variable length format. - For more information about variable length format check :meth:`._write_varuint`. + For more information about variable length format check [`_write_varuint`][..]. """ val = to_twos_complement(value, bits=64) await self._write_varuint(val, max_bits=64) @@ -168,8 +168,8 @@ async def write_utf(self, value: str, /) -> None: worst case of 4 bytes per every character, at most 131068 data bytes will be written + 3 additional bytes from the varint encoding overhead. - :raises ValueError: - If the given string ``value`` has more characters than the allowed maximum (32767). + Raises: + ValueError: If the given string `value` has more characters than the allowed maximum (32767). """ if len(value) > 32767: raise ValueError("Maximum character limit for writing strings is 32767 characters.") @@ -179,10 +179,10 @@ async def write_utf(self, value: str, /) -> None: await self.write(data) async def write_optional(self, value: T | None, /, writer: Callable[[T], Awaitable[R]]) -> R | None: - """Write a bool showing if a ``value`` is present, if so, also writes this value with ``writer`` function. + """Write a bool showing if a `value` is present, if so, also writes this value with `writer` function. - * When ``value`` is ``None``, a bool of ``False`` will be written, and ``None`` is returned. - * When ``value`` is not ``None``, a bool of ``True`` is written, after which the ``writer`` function is called, + * When `value` is `None`, a bool of `False` will be written, and `None` is returned. + * When `value` is not `None`, a bool of `True` is written, after which the `writer` function is called, and the return value is forwarded. """ if value is None: @@ -218,7 +218,7 @@ def write_value(self, fmt: Literal[StructFormat.BOOL], value: bool, /) -> None: def write_value(self, fmt: Literal[StructFormat.CHAR], value: str, /) -> None: ... def write_value(self, fmt: StructFormat, value: object, /) -> None: - """Write a given ``value`` as given struct format (``fmt``) in big-endian mode.""" + """Write a given `value` as given struct format (`fmt`) in big-endian mode.""" self.write(struct.pack(">" + fmt.value, value)) def _write_varuint(self, value: int, /, *, max_bits: int | None = None) -> None: @@ -226,9 +226,9 @@ def _write_varuint(self, value: int, /, *, max_bits: int | None = None) -> None: This is a standard way of transmitting ints, and it allows smaller numbers to take less bytes. - Writing will be limited up to integer values of ``max_bits`` bits, and trying to write bigger values will rase - a :exc:`ValueError`. Note that setting ``max_bits`` to for example 32 bits doesn't mean that at most 4 bytes - will be sent, in this case it would actually take at most 5 bytes, due to the variable encoding overhead. + Writing will be limited up to integer values of `max_bits` bits, and trying to write bigger values will raise + a [`ValueError`][ValueError]. Note that setting `max_bits` to for example 32 bits doesn't mean that at most 4 + bytes will be sent, in this case it would actually take at most 5 bytes, due to the variable encoding overhead. Varints send bytes where 7 least significant bits are value bits, and the most significant bit is continuation flag bit. If this continuation bit is set (1), it indicates that there will be another varint byte sent after @@ -252,7 +252,7 @@ def _write_varuint(self, value: int, /, *, max_bits: int | None = None) -> None: def write_varint(self, value: int, /) -> None: """Write a 32-bit signed integer in a variable length format. - For more information about variable length format check :meth:`._write_varuint`. + For more information about variable length format check [`_write_varuint`][..]. """ val = to_twos_complement(value, bits=32) self._write_varuint(val, max_bits=32) @@ -260,7 +260,7 @@ def write_varint(self, value: int, /) -> None: def write_varlong(self, value: int, /) -> None: """Write a 64-bit signed integer in a variable length format. - For more information about variable length format check :meth:`._write_varuint` docstring. + For more information about variable length format check [`_write_varuint`][..]. """ val = to_twos_complement(value, bits=64) self._write_varuint(val, max_bits=64) @@ -285,8 +285,8 @@ def write_utf(self, value: str, /) -> None: worst case of 4 bytes per every character, at most 131068 data bytes will be written + 3 additional bytes from the varint encoding overhead. - :raises ValueError: - If the given string ``value`` has more characters than the allowed maximum (32767). + Raises: + ValueError: If the given string `value` has more characters than the allowed maximum (32767). """ if len(value) > 32767: raise ValueError("Maximum character limit for writing strings is 32767 characters.") @@ -296,10 +296,10 @@ def write_utf(self, value: str, /) -> None: self.write(data) def write_optional(self, value: T | None, /, writer: Callable[[T], R]) -> R | None: - """Write a bool showing if a ``value`` is present, if so, also writes this value with ``writer`` function. + """Write a bool showing if a `value` is present, if so, also writes this value with `writer` function. - * When ``value`` is ``None``, a bool of ``False`` will be written, and ``None`` is returned. - * When ``value`` is not ``None``, a bool of ``True`` is written, after which the ``writer`` function is called, + * When `value` is `None`, a bool of `False` will be written, and `None` is returned. + * When `value` is not `None`, a bool of `True` is written, after which the `writer` function is called, and the return value is forwarded. """ if value is None: @@ -339,7 +339,7 @@ async def read_value(self, fmt: Literal[StructFormat.BOOL], /) -> bool: ... async def read_value(self, fmt: Literal[StructFormat.CHAR], /) -> str: ... async def read_value(self, fmt: StructFormat, /) -> object: - """Read a value as given struct format (``fmt``) in big-endian mode. + """Read a value as given struct format (`fmt`) in big-endian mode. The amount of bytes to read will be determined based on the struct format automatically. """ @@ -353,8 +353,8 @@ async def _read_varuint(self, *, max_bits: int | None = None) -> int: This is a standard way of transmitting ints, and it allows smaller numbers to take less bytes. - Reading will be limited up to integer values of ``max_bits`` bits, and trying to read bigger values will rase - an :exc:`IOError`. Note that setting ``max_bits`` to for example 32 bits doesn't mean that at most 4 bytes + Reading will be limited up to integer values of `max_bits` bits, and trying to read bigger values will raise + an [`IOError`][IOError]. Note that setting `max_bits` to for example 32 bits doesn't mean that at most 4 bytes will be read, in this case we would actually read at most 5 bytes, due to the variable encoding overhead. Varints send bytes where 7 least significant bits are value bits, and the most significant bit is continuation @@ -385,7 +385,7 @@ async def _read_varuint(self, *, max_bits: int | None = None) -> int: async def read_varint(self) -> int: """Read a 32-bit signed integer in a variable length format. - For more information about variable length format check :meth:`._read_varuint`. + For more information about variable length format check [`_read_varuint`][..]. """ unsigned_num = await self._read_varuint(max_bits=32) return from_twos_complement(unsigned_num, bits=32) @@ -393,7 +393,7 @@ async def read_varint(self) -> int: async def read_varlong(self) -> int: """Read a 64-bit signed integer in a variable length format. - For more information about variable length format check :meth:`._read_varuint`. + For more information about variable length format check [`_read_varuint`][..]. """ unsigned_num = await self._read_varuint(max_bits=64) return from_twos_complement(unsigned_num, bits=64) @@ -421,13 +421,14 @@ async def read_utf(self) -> str: worst case of 4 bytes per every character, at most 131068 data bytes will be read + 3 additional bytes from the varint encoding overhead. - :raises IOError: - * If the prefix varint is bigger than the maximum (131068) bytes, the string will not be read at all, - and :exc:`IOError` will be raised immediately. - * If the received string has more than the maximum amount of characters (32767). Note that in this case, - the string will still get read in it's entirety, since it fits into the maximum bytes limit (131068), - which was simply read at once. This limitation is here only to replicate the behavior of minecraft's - implementation. + Raises: + IOError: + * If the prefix varint is bigger than the maximum (131068) bytes, the string will not be read at all, + and [`IOError`][IOError] will be raised immediately. + * If the received string has more than the maximum amount of characters (32767). Note that in this + case, the string will still get read in it's entirety, since it fits into the maximum bytes limit + (131068), which was simply read at once. This limitation is here only to replicate the behavior of + minecraft's implementation. """ length = await self.read_varint() if length > 131068: @@ -442,10 +443,10 @@ async def read_utf(self) -> str: return chars async def read_optional(self, reader: Callable[[], Awaitable[R]]) -> R | None: - """Read a bool showing if a value is present, if so, also reads this value with ``reader`` function. + """Read a bool showing if a value is present, if so, also reads this value with `reader` function. - * When ``False`` is read, the function will not read anything and ``None`` is returned. - * When ``True`` is read, the ``reader`` function is called, and it's return value is forwarded. + * When `False` is read, the function will not read anything and `None` is returned. + * When `True` is read, the `reader` function is called, and it's return value is forwarded. """ if not await self.read_value(StructFormat.BOOL): return None @@ -492,8 +493,8 @@ def _read_varuint(self, *, max_bits: int | None = None) -> int: This is a standard way of transmitting ints, and it allows smaller numbers to take less bytes. - Reading will be limited up to integer values of ``max_bits`` bits, and trying to read bigger values will rase - an :exc:`IOError`. Note that setting ``max_bits`` to for example 32 bits doesn't mean that at most 4 bytes + Reading will be limited up to integer values of `max_bits` bits, and trying to read bigger values will raise + an [`IOError`][IOError]. Note that setting `max_bits` to for example 32 bits doesn't mean that at most 4 bytes will be read, in this case we would actually read at most 5 bytes, due to the variable encoding overhead. Varints send bytes where 7 least significant bits are value bits, and the most significant bit is continuation @@ -524,7 +525,7 @@ def _read_varuint(self, *, max_bits: int | None = None) -> int: def read_varint(self) -> int: """Read a 32-bit signed integer in a variable length format. - For more information about variable length format check :meth:`._read_varuint`. + For more information about variable length format check [`_read_varuint`][..]. """ unsigned_num = self._read_varuint(max_bits=32) return from_twos_complement(unsigned_num, bits=32) @@ -532,7 +533,7 @@ def read_varint(self) -> int: def read_varlong(self) -> int: """Read a 64-bit signed integer in a variable length format. - For more information about variable length format check :meth:`._read_varuint`. + For more information about variable length format check [`_read_varuint`][..]. """ unsigned_num = self._read_varuint(max_bits=64) return from_twos_complement(unsigned_num, bits=64) @@ -560,13 +561,14 @@ def read_utf(self) -> str: worst case of 4 bytes per every character, at most 131068 data bytes will be read + 3 additional bytes from the varint encoding overhead. - :raises IOError: - * If the prefix varint is bigger than the maximum (131068) bytes, the string will not be read at all, - and :exc:`IOError` will be raised immediately. - * If the received string has more than the maximum amount of characters (32767). Note that in this case, - the string will still get read in it's entirety, since it fits into the maximum bytes limit (131068), - which was simply read at once. This limitation is here only to replicate the behavior of minecraft's - implementation. + Raises: + IOError: + * If the prefix varint is bigger than the maximum (131068) bytes, the string will not be read at all, + and [`IOError`][IOError] will be raised immediately. + * If the received string has more than the maximum amount of characters (32767). Note that in this + case, the string will still get read in it's entirety, since it fits into the maximum bytes limit + (131068), which was simply read at once. This limitation is here only to replicate the behavior of + minecraft's implementation. """ length = self.read_varint() if length > 131068: @@ -581,10 +583,10 @@ def read_utf(self) -> str: return chars def read_optional(self, reader: Callable[[], R]) -> R | None: - """Read a bool showing if a value is present, if so, also reads this value with ``reader`` function. + """Read a bool showing if a value is present, if so, also reads this value with `reader` function. - * When ``False`` is read, the function will not read anything and ``None`` is returned. - * When ``True`` is read, the ``reader`` function is called, and it's return value is forwarded. + * When `False` is read, the function will not read anything and `None` is returned. + * When `True` is read, the `reader` function is called, and it's return value is forwarded. """ if not self.read_value(StructFormat.BOOL): return None diff --git a/mcproto/protocol/utils.py b/mcproto/protocol/utils.py index 643a1e83..126d0b14 100644 --- a/mcproto/protocol/utils.py +++ b/mcproto/protocol/utils.py @@ -4,11 +4,12 @@ def to_twos_complement(number: int, bits: int) -> int: - """Convert a given ``number`` into twos complement format of given amount of ``bits``. + """Convert a given `number` into twos complement format of given amount of `bits`. - :raises ValueError: - Given ``number`` is out of range, and can't be converted into twos complement format, since - it wouldn't fit into the given amount of ``bits``. + Raises: + ValueError: + Given `number` is out of range, and can't be converted into twos complement format, since + it wouldn't fit into the given amount of `bits`. """ value_max = 1 << (bits - 1) value_min = value_max * -1 @@ -21,11 +22,12 @@ def to_twos_complement(number: int, bits: int) -> int: def from_twos_complement(number: int, bits: int) -> int: - """Convert a given ``number`` from twos complement format of given amount of ``bits``. + """Convert a given `number` from twos complement format of given amount of `bits`. - :raises ValueError: - Given ``number`` doesn't fit into given amount of ``bits``. This likely means that you're using - the wrong number, or that the number was converted into twos complement with higher amount of ``bits``. + Raises: + ValueError: + Given `number` doesn't fit into given amount of `bits`. This likely means that you're using + the wrong number, or that the number was converted into twos complement with higher amount of `bits`. """ value_max = (1 << bits) - 1 if number < 0 or number > value_max: diff --git a/mcproto/types/chat.py b/mcproto/types/chat.py index be6016ec..6f679015 100644 --- a/mcproto/types/chat.py +++ b/mcproto/types/chat.py @@ -44,7 +44,7 @@ class ChatMessage(MCType): __slots__ = ("raw",) def as_dict(self) -> RawChatMessageDict: - """Convert received ``raw`` into a stadard :class:`dict` form.""" + """Convert received `raw` into a standard [`dict`][dict] form.""" if isinstance(self.raw, list): return RawChatMessageDict(extra=self.raw) if isinstance(self.raw, str): @@ -60,9 +60,10 @@ def as_dict(self) -> RawChatMessageDict: def __eq__(self, other: object) -> bool: """Check equality between two chat messages. - ..warning: This is purely using the `raw` field, which means it's possible that - a chat message that appears the same, but was representing in a different way - will fail this equality check. + Warning: + This is purely using the `raw` field, which means it's possible that a chat + message that appears the same, but was representing in a different way will + fail this equality check. """ if not isinstance(other, ChatMessage): return NotImplemented diff --git a/mcproto/types/nbt.py b/mcproto/types/nbt.py index e23ae705..609f7875 100644 --- a/mcproto/types/nbt.py +++ b/mcproto/types/nbt.py @@ -35,8 +35,7 @@ """ Implementation of the NBT (Named Binary Tag) format used in Minecraft as described in the NBT specification -Source : `Minecraft NBT Spec `_ - +Source: [Minecraft NBT Spec](https://web.archive.org/web/20110723210920/http://www.minecraft.net/docs/NBT.txt) Named Binary Tag specification NBT (Named Binary Tag) is a tag based binary format designed to carry large amounts of binary data with smaller @@ -47,7 +46,7 @@ byte tagType TAG_String name - [payload] + \\[payload\\] * The tagType is a single byte defining the contents of the payload of the tag. * The name is a descriptive name, and can be anything (eg "cat", "banana", "Hello World!"). @@ -57,8 +56,6 @@ Note that ONLY Named Tags carry the name and tagType data. Explicitly identified Tags (such as TAG_String) only contains the payload. - -.. seealso:: :class:`NBTagType` """ # region NBT Specification @@ -74,8 +71,8 @@ class NBTagType(IntEnum): """ This tag is used to mark the end of a list. It doesn't carry any payload, and it cannot be named! - If this type appears where a Named Tag is expected, the name is assumed to be ``""``. - (In other words, this Tag is always just a single ``0x00`` byte when named, and nothing in all other cases) + If this type appears where a Named Tag is expected, the name is assumed to be `""`. + (In other words, this Tag is always just a single `0x00` byte when named, and nothing in all other cases) """ BYTE = 1 @@ -154,8 +151,11 @@ class NBTagConvertible(Protocol): def to_nbt(self, name: str = "") -> NBTag: """Convert the object to an NBT tag. - :param name: The name of the tag. - :return: The NBT tag created from the object. + Args: + name: The name of the tag. + + Returns: + The NBT tag created from the object. """ raise NotImplementedError("Derived classes need to implement this method.") @@ -193,13 +193,15 @@ class NBTag(MCType, NBTagConvertible, ABC): def serialize(self, with_type: bool = True, with_name: bool = True) -> Buffer: """Serialize the NBT tag to a new buffer. - :param with_type: - Whether to include the type of the tag in the serialization. (Passed to :meth:`_write_header`) - :param with_name: - Whether to include the name of the tag in the serialization. (Passed to :meth:`_write_header`) - :return: The buffer containing the serialized NBT tag. + Args: + with_type: Whether to include the type of the tag in the serialization. (Passed to [`_write_header`][..]) + with_name: Whether to include the name of the tag in the serialization. (Passed to [`_write_header`][..]) - .. note:: The ``with_type`` and ``with_name`` parameters only control the first level of serialization. + Returns; + The buffer containing the serialized NBT tag. + + Note: + The `with_type` and `with_name` parameters only control the first level of serialization. """ buf = Buffer() self.serialize_to(buf, with_name=with_name, with_type=with_type) @@ -210,14 +212,16 @@ def serialize(self, with_type: bool = True, with_name: bool = True) -> Buffer: def deserialize(cls, buf: Buffer, with_name: bool = True, with_type: bool = True) -> NBTag: """Deserialize the NBT tag. - :param buf: The buffer to read from. - :param with_name: Whether to read the name of the tag. (Passed to :meth:`_read_header`) - :param with_type: Whether to read the type of the tag. (Passed to :meth:`_read_header`) - :return: + Args: + buf: The buffer to read from. + with_name: Whether to read the name of the tag. (Passed to [`_read_header`][..]) + with_type: Whether to read the type of the tag. (Passed to [`_read_header`][..]) + + Returns: The deserialized NBT tag. This tag will be an instance of the class, that is associated with the tag type - obtained from :meth:`_read_header` (see: :const:`ASSOCIATED_TYPES`). + obtained from [`_read_header`][..] (see: [`ASSOCIATED_TYPES`][(m).]). """ name, tag_type = cls._read_header(buf, with_name=with_name, read_type=with_type) @@ -234,11 +238,13 @@ def deserialize(cls, buf: Buffer, with_name: bool = True, with_type: bool = True def serialize_to(self, buf: Buffer, with_type: bool = True, with_name: bool = True) -> None: """Serialize the NBT tag to a buffer. - :param buf: The buffer to write to. - :param with_type: Whether to include the type of the tag in the serialization. - :param with_name: Whether to include the name of the tag in the serialization. + Args: + buf: The buffer to write to. + with_type: Whether to include the type of the tag in the serialization. + with_name: Whether to include the name of the tag in the serialization. - .. seealso:: :meth:`serialize` + See Also: + [`serialize`][..] """ raise NotImplementedError @@ -247,17 +253,18 @@ def serialize_to(self, buf: Buffer, with_type: bool = True, with_name: bool = Tr def read_from(cls, buf: Buffer, with_type: bool = True, with_name: bool = True) -> NBTag: """Read the NBT tag from the buffer. - Implementation shortcut used in :meth:`deserialize`. (Subclasses can override this, avoiding some - repetition when compared to overriding ``deserialize`` directly.) + Implementation shortcut used in [`deserialize`][..]. (Subclasses can override this, avoiding some + repetition when compared to overriding `deserialize` directly.) """ raise NotImplementedError def _write_header(self, buf: Buffer, with_type: bool = True, with_name: bool = True) -> None: """Write the header of the NBT tag to the buffer. - :param buf: The buffer to write to. - :param with_type: Whether to include the type of the tag in the serialization. - :param with_name: Whether to include the name of the tag in the serialization. + Args: + buf: The buffer to write to. + with_type: Whether to include the type of the tag in the serialization. + with_name: Whether to include the name of the tag in the serialization. """ if with_type: tag_type = _get_tag_type(self) @@ -269,18 +276,22 @@ def _write_header(self, buf: Buffer, with_type: bool = True, with_name: bool = T def _read_header(cls, buf: Buffer, read_type: bool = True, with_name: bool = True) -> tuple[str, NBTagType]: """Read the header of the NBT tag. - :param buf: The buffer to read from. - :param read_type: Whether to read the type of the tag from the buffer. - * If ``True``, the tag type will be read from the buffer - * If ``False`` and called from a subclass, the tag type will be inferred from the subclass. - * If ``False`` and called from the base class, the tag type will be TAG_Compound. - :param with_name: Whether to read the name of the tag. If set to ``False``, the tag will have the name ``""``. + Args: + buf: The buffer to read from. + read_type: + Whether to read the type of the tag from the buffer. + + * If `True`, the tag type will be read from the buffer + * If `False` and called from a subclass, the tag type will be inferred from the subclass. + * If `False` and called from the base class, the tag type will be TAG_Compound. + with_name: Whether to read the name of the tag. If set to `False`, the tag will have the name `""`. - :return: A tuple containing the name and the tag type. + Returns: + A tuple containing the name and the tag type. - .. note:: - It is possible that this function reads nothing from the buffer if both ``with_name`` and - ``read_type`` are set to ``False``. + Note: + It is possible that this function reads nothing from the buffer if both `with_name` and + `read_type` are set to `False`. """ if read_type: try: @@ -303,32 +314,35 @@ def _read_header(cls, buf: Buffer, read_type: bool = True, with_name: bool = Tru def from_object(data: FromObjectType, schema: FromObjectSchema, name: str = "") -> NBTag: """Create an NBT tag from a python object and a schema. - :param data: - The python object to create the NBT tag from. - :param schema: - The schema used to create the NBT tags. - - This is a description of the types of the ``data`` in the python object. - It can be a subclass of :class:`NBTag` (e.g. :class:`IntNBT`, :class:`StringNBT`, :class:`CompoundNBT`, - etc.), a :class:`dict`, a :class:`list`, a :class:`tuple`, or a class that has a `to_nbt` method. - - Example of schema: - - .. code-block:: python - - schema = { - "string": StringNBT, - "list_of_floats": [FloatNBT], - "list_of_compounds": [{ - "key": StringNBT, - "value": IntNBT, - }], - "list_of_lists": [[IntNBT], [StringNBT]], - } - - This would be translated into a :class:`CompoundNBT`. - :param name: The name of the NBT tag. - :return: The NBT tag created from the python object. + Args: + data: The python object to create the NBT tag from. + schema: + The schema used to create the NBT tags. + + This is a description of the types of the `data` in the python object. + It can be a subclass of [`NBTag`][(m).] (e.g. [`IntNBT`][(m).], [`StringNBT`][(m).], + [`CompoundNBT`][(m).], etc.), a [`dict`][dict], a [`list`][list], a [`tuple`][tuple], or any + class that has a `to_nbt` method. + + Example of schema: + + ```python + schema = { + "string": StringNBT, + "list_of_floats": [FloatNBT], + "list_of_compounds": [{ + "key": StringNBT, + "value": IntNBT, + }], + "list_of_lists": [[IntNBT], [StringNBT]], + } + ``` + + This would be translated into a [`CompoundNBT`][(m).]. + name: The name of the NBT tag. + + Returns: + The NBT tag created from the python object. """ # Case 0 : schema is an object with a `to_nbt` method (could be a subclass of NBTag for all we know, as long # as the data is an instance of the schema it will work) @@ -420,11 +434,14 @@ def to_object( ) -> PayloadType | Mapping[str, PayloadType] | tuple[PayloadType | Mapping[str, PayloadType], FromObjectSchema]: """Convert the NBT tag to a python object. - :param include_schema: Whether to return a schema describing the types of the original tag. - :param include_name: Whether to include the name of the tag in the output. - If the tag has no name, the name will be set to "". + Args: + include_schema: Whether to return a schema describing the types of the original tag. + include_name: + Whether to include the name of the tag in the output. + + If the tag has no name, the name will be set to `""`. - :return: + Returns: Either of: * A python object representing the payload of the tag. (default) * A dictionary containing the name associated with a python object representing the payload of the tag. @@ -455,7 +472,8 @@ def __hash__(self) -> int: def to_nbt(self, name: str = "") -> NBTag: """Convert the object to an NBT tag. - .. warning:: This is already an NBT tag, so it will modify the name of the tag and return itself. + Warning: + This is already an NBT tag, so it will modify the name of the tag and return itself. """ self.name = name return self @@ -980,11 +998,14 @@ def to_object( def __eq__(self, other: object) -> bool: """Check equality between two CompoundNBT tags. - :param other: The other CompoundNBT tag to compare to. + Args: + other: The other CompoundNBT tag to compare to. - :return: True if the tags are equal, False otherwise. + Returns: + True if the tags are equal, False otherwise. - .. note:: The order of the tags is not guaranteed, but the names of the tags must match. This function assumes + Note: + The order of the tags is not guaranteed, but the names of the tags must match. This function assumes that there are no duplicate tags in the compound. """ # The order of the tags is not guaranteed diff --git a/mcproto/types/uuid.py b/mcproto/types/uuid.py index 500710b5..1cd6e6b9 100644 --- a/mcproto/types/uuid.py +++ b/mcproto/types/uuid.py @@ -15,8 +15,8 @@ class UUID(uuid.UUID, MCType): """Minecraft UUID type. - In order to support potential future changes in protocol version, and implement McType, - this is a custom subclass, however it is currently compatible with the stdlib's `uuid.UUID`. + In order to support potential future changes in protocol version, and implement [`MCType`][mcproto.types.abc.], + this is a custom subclass, however it is currently compatible with the stdlib's [`UUID`][?uuid.]. """ __slots__ = () diff --git a/mcproto/utils/abc.py b/mcproto/utils/abc.py index a03f73d4..3e46941b 100644 --- a/mcproto/utils/abc.py +++ b/mcproto/utils/abc.py @@ -14,22 +14,22 @@ class RequiredParamsABCMixin: """Mixin class to ABCs that require certain attributes to be set in order to allow initialization. - This class performs a similar check to what :class:`~abc.ABC` already does with abstractmethods, - but for class variables. The required class variable names are set with :attr:`._REQUIRED_CLASS_VARS` + This class performs a similar check to what [`ABC`][?abc.] already does with abstractmethods, + but for class variables. The required class variable names are set with `_REQUIRED_CLASS_VARS` class variable, which itself is automatically required. Just like with ABCs, this doesn't prevent creation of classes without these required class vars defined, only initialization is prevented. This is done to allow creation of a more specific, but still abstract class. - Additionally, you can also define :attr:`._REQUIRED_CLASS_VARS_NO_MRO` class var, holding names of + Additionally, you can also define `_REQUIRED_CLASS_VARS_NO_MRO` class var, holding names of class vars which should be defined on given class directly. That means inheritance will be ignored so even if a subclass defines the required class var, unless the latest class also defines it, this check will fail. This is often useful for classes that are expected to be slotted, as each subclass will need to define - ``__slots__``, otherwise a ``__dict__`` will automatically be made for it. However this is entirely - optional, and if :attr:`._REQUIRED_CLASS_VARS_NO_MRO` isn't set, this check is skipped. + `__slots__`, otherwise a `__dict__` will automatically be made for it. However this is entirely + optional, and if `_REQUIRED_CLASS_VARS_NO_MRO` isn't set, this check is skipped. """ __slots__ = () @@ -64,9 +64,9 @@ def __new__(cls: type[Self], *a: Any, **kw: Any) -> Self: class Serializable(ABC): - """Base class for any type that should be (de)serializable into/from :class:`~mcproto.Buffer` data. + """Base class for any type that should be (de)serializable into/from [`Buffer`][mcproto.buffer.] data. - Any class that inherits from this class and adds parameters should use the :func:`~mcproto.utils.abc.define` + Any class that inherits from this class and adds parameters should use the [`attrs.define`][attrs.define] decorator. """ @@ -78,21 +78,21 @@ def __attrs_post_init__(self) -> None: This function is responsible for conversion/transformation of given values right after initialization (often for example to convert an int initialization param into a specific enum variant) - .. note:: + Note: If you override this method, make sure to call the superclass method at some point to ensure that the validation is run. """ self.validate() def serialize(self) -> Buffer: - """Represent the object as a :class:`~mcproto.Buffer` (transmittable sequence of bytes).""" + """Represent the object as a [`Buffer`][mcproto.buffer.] (transmittable sequence of bytes).""" buf = Buffer() self.serialize_to(buf) return buf @abstractmethod def serialize_to(self, buf: Buffer, /) -> None: - """Write the object to a :class:`~mcproto.Buffer`.""" + """Write the object to a [`Buffer`][mcproto.buffer.].""" raise NotImplementedError def validate(self) -> None: @@ -100,13 +100,13 @@ def validate(self) -> None: By default, this method does nothing. Override it in your subclass to add validation logic. - .. note:: - This method is called by :meth:`~mcproto.utils.abc.Serializable.__attrs_post_init__` + Note: + This method is called by [`__attrs_post_init__`][mcproto.utils.abc.Serializable.] """ return @classmethod @abstractmethod def deserialize(cls, buf: Buffer, /) -> Self: - """Construct the object from a :class:`~mcproto.Buffer` (transmittable sequence of bytes).""" + """Construct the object from a [`Buffer`][mcproto.buffer.] (transmittable sequence of bytes).""" raise NotImplementedError diff --git a/mcproto/utils/deprecation.py b/mcproto/utils/deprecation.py index b25cd428..52a0f181 100644 --- a/mcproto/utils/deprecation.py +++ b/mcproto/utils/deprecation.py @@ -26,17 +26,18 @@ def deprecation_warn( """Produce an appropriate deprecation warning given the parameters. If the currently installed project version is already past the specified deprecation version, - a :exc:`DeprecationWarning` will be raised as a full exception. Otherwise it will just get emitted - as a warning. + a [`DeprecationWarning`][DeprecationWarning] will be raised as a full exception. Otherwise it + will just get emitted as a warning. The deprecation message used will be constructed based on the input parameters. - :param obj_name: Name of the object that got deprecated (such as ``my_function``). - :param removal_version: - Version at which this object should be considered as deprecated and should no longer be used. - :param replacement: A new alternative to this (now deprecated) object. - :param extra_msg: Additional message included in the deprecation warning/exception at the end. - :param stack_level: Stack level at which the warning is emitted. + Args: + obj_name: Name of the object that got deprecated (such as `my_function`). + removal_version: + Version at which this object should be considered as deprecated and should no longer be used. + replacement: A new alternative to this (now deprecated) object. + extra_msg: Additional message included in the deprecation warning/exception at the end. + stack_level: Stack level at which the warning is emitted. """ if isinstance(removal_version, str): removal_version = Version(removal_version) @@ -83,24 +84,25 @@ def deprecated( ) -> DecoratorFunction: """Mark an object as deprecated. - Decorator version of :func:`.deprecation_warn` function. + Decorator version of [`deprecation_warn`][..] function. If the currently installed project version is already past the specified deprecation version, - a :exc:`DeprecationWarning` will be raised as a full exception. Otherwise it will just get emitted - as a warning. + a [`DeprecationWarning`][DeprecationWarning] will be raised as a full exception. Otherwise it + will just get emitted as a warning. The deprecation message used will be constructed based on the input parameters. - :param display_name: - Name of the object that got deprecated (such as ``my_function``). - - By default, the object name is obtained automatically from ``__qualname__`` (falling back - to ``__name__``) of the decorated object. Setting this explicitly will override this obtained - name and the ``display_name`` will be used instead. - :param removal_version: - Version at which this object should be considered as deprecated and should no longer be used. - :param replacement: A new alternative to this (now deprecated) object. - :param extra_msg: Additional message included in the deprecation warning/exception at the end. + Args: + display_name: + Name of the object that got deprecated (such as `my_function`). + + By default, the object name is obtained automatically from `__qualname__` (falling back + to `__name__`) of the decorated object. Setting this explicitly will override this obtained + name and the `display_name` will be used instead. + removal_version: + Version at which this object should be considered as deprecated and should no longer be used. + replacement: A new alternative to this (now deprecated) object. + extra_msg: Additional message included in the deprecation warning/exception at the end. """ def inner(func: Callable[P, R]) -> Callable[P, R]: diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 00000000..067894bd --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,171 @@ +site_name: MCPROTO +site_url: https://py-mine.github.io/mcproto +copyright: Mcproto Documentation © 2024 by ItsDrike + +repo_url: https://github.com/py-mine/mcproto +repo_name: py-mine/mcproto + +watch: + [ + LICENSE.txt, + LICENSE-THIRD-PARTY.txt, + ATTRIBUTION.md, + SECURITY.md, + CHANGELOG.md, + changes, + mcproto, + ] + +exclude_docs: | + LICENSE.md + +nav: + - Home: index.md + - Installation: installation.md + - Usage: + - usage/index.md + - First steps: usage/first-steps.md + - Packet Communication: usage/packet-communication.md + - Authentication: usage/authentication.md + - FAQ: faq.md + - Meta: + - Getting help: meta/support.md + - Versioning Model: meta/versioning.md + - Changelog: meta/changelog.md + - Code of Conduct: meta/code-of-conduct.md + - Attributions: meta/attribution.md + - License: meta/license.md + - Contributing: + - Bugs & Feature requests: contributing/issue-guide.md + - Making a pull request: contributing/making-a-pr.md + - Security Policy: contributing/security-policy.md + - Guides: + - contributing/guides/index.md + - Setting things up: contributing/guides/setup.md + - Style Guide: contributing/guides/style-guide.md + - Type hinting: contributing/guides/type-hints.md + - Slotscheck: contributing/guides/slotscheck.md + - Pre-commit: contributing/guides/precommit.md + - Changelog: contributing/guides/changelog.md + - Breaking Changes: contributing/guides/breaking-changes.md + - Unit Tests: contributing/guides/unit-tests.md + - Documentation: contributing/guides/documentation.md + - API Reference: contributing/guides/api-reference.md + - Great commits: contributing/guides/great-commits.md + - API Reference: + - Protocol: reference/protocol.md + - Authentication: reference/authentication.md + - Encryption: reference/encryption.md + - Multiplayer: reference/multiplayer.md + - Types: reference/types.md + - Packets: reference/packets.md + - ABC: reference/abc.md + +theme: + name: material + logo: assets/py-mine_logo.png + palette: + - media: "(prefers-color-scheme)" + primary: black + accent: black + toggle: + icon: material/brightness-auto + name: Switch to light mode + + - media: "(prefers-color-scheme: light)" + scheme: default + primary: black + accent: black + toggle: + icon: material/brightness-7 + name: Switch to dark mode + + - media: "(prefers-color-scheme: dark)" + scheme: slate + primary: black + accent: black + toggle: + icon: material/brightness-4 + name: Switch to system preference + icon: + repo: fontawesome/brands/github + features: + - content.tabs.link + - content.code.copy + - content.action.edit + - search.highlight + - search.share + - search.suggest + - navigation.footer + - navigation.indexes + - navigation.sections + - navigation.tabs + - navigation.tabs.sticky + - navigation.top + - toc.follow + +extra_css: + - css/mkdocstrings.css + - css/material.css + - css/admoditions.css + +markdown_extensions: + - admonition + - attr_list + - md_in_html + - toc: + permalink: true + - pymdownx.highlight: + anchor_linenums: true + - pymdownx.inlinehilite + - pymdownx.superfences + - pymdownx.snippets + - pymdownx.details + - pymdownx.emoji: + emoji_index: !!python/name:material.extensions.emoji.twemoji + emoji_generator: !!python/name:material.extensions.emoji.to_svg + - pymdownx.tabbed: + alternate_style: true + +plugins: + - search + - markdown-exec: + ansi: required + - mike: + canonical_version: "latest" + version_selector: true + - mkdocstrings: + enable_inventory: true + default_handler: python_betterrefs + handlers: + python_betterrefs: + options: + docstring_options: + ignore_init_summary: true + show_root_heading: false + show_root_toc_entry: false + show_source: false + docstring_style: google + show_signature_annotations: true + better_crossrefs: true + check_crossrefs: true + signature_crossrefs: true + separate_signature: true + show_symbol_type_heading: true + show_symbol_type_toc: true + parameter_headings: true + show_object_full_path: true + docstring_section_style: table + import: + - url: https://docs.python.org/3.13/objects.inv + domains: [std, py] + - https://typing-extensions.readthedocs.io/en/latest/objects.inv + - https://cryptography.io/en/stable/objects.inv + - https://python-semanticversion.readthedocs.io/en/stable/objects.inv + - https://www.attrs.org/en/stable/objects.inv + +extra: + version: + provider: mike + default: latest + alias: true diff --git a/poetry.lock b/poetry.lock index f8335819..7d49f65c 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,17 +1,5 @@ # This file is automatically @generated by Poetry 2.1.2 and should not be changed by hand. -[[package]] -name = "alabaster" -version = "0.7.16" -description = "A light, configurable Sphinx theme" -optional = false -python-versions = ">=3.9" -groups = ["docs"] -files = [ - {file = "alabaster-0.7.16-py3-none-any.whl", hash = "sha256:b46733c07dce03ae4e150330b975c75737fa60f0a7c591b6c8bf4928a28e2c92"}, - {file = "alabaster-0.7.16.tar.gz", hash = "sha256:75a8b99c28a5dad50dd7f8ccdd447a121ddb3892da9e53d1ca5cca3106d58d65"}, -] - [[package]] name = "anyio" version = "4.3.0" @@ -88,6 +76,26 @@ files = [ [package.extras] dev = ["freezegun (>=1.0,<2.0)", "pytest (>=6.0)", "pytest-cov"] +[[package]] +name = "backrefs" +version = "5.9" +description = "A wrapper around re and regex that adds additional back references." +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "backrefs-5.9-py310-none-any.whl", hash = "sha256:db8e8ba0e9de81fcd635f440deab5ae5f2591b54ac1ebe0550a2ca063488cd9f"}, + {file = "backrefs-5.9-py311-none-any.whl", hash = "sha256:6907635edebbe9b2dc3de3a2befff44d74f30a4562adbb8b36f21252ea19c5cf"}, + {file = "backrefs-5.9-py312-none-any.whl", hash = "sha256:7fdf9771f63e6028d7fee7e0c497c81abda597ea45d6b8f89e8ad76994f5befa"}, + {file = "backrefs-5.9-py313-none-any.whl", hash = "sha256:cc37b19fa219e93ff825ed1fed8879e47b4d89aa7a1884860e2db64ccd7c676b"}, + {file = "backrefs-5.9-py314-none-any.whl", hash = "sha256:df5e169836cc8acb5e440ebae9aad4bf9d15e226d3bad049cf3f6a5c20cc8dc9"}, + {file = "backrefs-5.9-py39-none-any.whl", hash = "sha256:f48ee18f6252b8f5777a22a00a09a85de0ca931658f1dd96d4406a34f3748c60"}, + {file = "backrefs-5.9.tar.gz", hash = "sha256:808548cb708d66b82ee231f962cb36faaf4f2baab032f2fbb783e9c2fdddaa59"}, +] + +[package.extras] +extras = ["regex"] + [[package]] name = "basedpyright" version = "1.29.4" @@ -103,28 +111,6 @@ files = [ [package.dependencies] nodejs-wheel-binaries = ">=20.13.1" -[[package]] -name = "beautifulsoup4" -version = "4.12.3" -description = "Screen-scraping library" -optional = false -python-versions = ">=3.6.0" -groups = ["docs"] -files = [ - {file = "beautifulsoup4-4.12.3-py3-none-any.whl", hash = "sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed"}, - {file = "beautifulsoup4-4.12.3.tar.gz", hash = "sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051"}, -] - -[package.dependencies] -soupsieve = ">1.2" - -[package.extras] -cchardet = ["cchardet"] -chardet = ["chardet"] -charset-normalizer = ["charset-normalizer"] -html5lib = ["html5lib"] -lxml = ["lxml"] - [[package]] name = "certifi" version = "2024.7.4" @@ -356,7 +342,7 @@ files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] -markers = {docs = "sys_platform == \"win32\" or platform_system == \"Windows\"", lint = "platform_system == \"Windows\"", release = "platform_system == \"Windows\"", test = "sys_platform == \"win32\""} +markers = {lint = "platform_system == \"Windows\"", release = "platform_system == \"Windows\"", test = "sys_platform == \"win32\""} [[package]] name = "coverage" @@ -488,18 +474,6 @@ files = [ {file = "distlib-0.3.8.tar.gz", hash = "sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64"}, ] -[[package]] -name = "docutils" -version = "0.19" -description = "Docutils -- Python Documentation Utilities" -optional = false -python-versions = ">=3.7" -groups = ["docs"] -files = [ - {file = "docutils-0.19-py3-none-any.whl", hash = "sha256:5e1de4d849fee02c63b040a4a3fd567f4ab104defd8a5511fbbc24a8a017efbc"}, - {file = "docutils-0.19.tar.gz", hash = "sha256:33995a6753c30b7f577febfc2c50411fec6aac7f7ffeb7c4cfe5991072dcf9e6"}, -] - [[package]] name = "dunamai" version = "1.23.1" @@ -521,7 +495,7 @@ version = "1.2.1" description = "Backport of PEP 654 (exception groups)" optional = false python-versions = ">=3.7" -groups = ["main", "docs", "test"] +groups = ["main", "test"] markers = "python_version < \"3.11\"" files = [ {file = "exceptiongroup-1.2.1-py3-none-any.whl", hash = "sha256:5258b9ed329c5bbdd31a309f53cbfb0b155341807f6ff7606a1e801a891b29ad"}, @@ -549,22 +523,37 @@ testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8.0.1)", typing = ["typing-extensions (>=4.8) ; python_version < \"3.11\""] [[package]] -name = "furo" -version = "2024.8.6" -description = "A clean customisable Sphinx documentation theme." +name = "ghp-import" +version = "2.1.0" +description = "Copy your docs directly to the gh-pages branch." optional = false -python-versions = ">=3.8" +python-versions = "*" groups = ["docs"] files = [ - {file = "furo-2024.8.6-py3-none-any.whl", hash = "sha256:6cd97c58b47813d3619e63e9081169880fbe331f0ca883c871ff1f3f11814f5c"}, - {file = "furo-2024.8.6.tar.gz", hash = "sha256:b63e4cee8abfc3136d3bc03a3d45a76a850bada4d6374d24c1716b0e01394a01"}, + {file = "ghp-import-2.1.0.tar.gz", hash = "sha256:9c535c4c61193c2df8871222567d7fd7e5014d835f97dc7b7439069e2413d343"}, + {file = "ghp_import-2.1.0-py3-none-any.whl", hash = "sha256:8337dd7b50877f163d4c0289bc1f1c7f127550241988d568c1db512c4324a619"}, ] [package.dependencies] -beautifulsoup4 = "*" -pygments = ">=2.7" -sphinx = ">=6.0,<9.0" -sphinx-basic-ng = ">=1.0.0.beta2" +python-dateutil = ">=2.8.1" + +[package.extras] +dev = ["flake8", "markdown", "twine", "wheel"] + +[[package]] +name = "griffe" +version = "1.5.6" +description = "Signatures for entire Python programs. Extract the structure, the frame, the skeleton of your project, to generate API documentation or find breaking changes in your API." +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "griffe-1.5.6-py3-none-any.whl", hash = "sha256:b2a3afe497c6c1f952e54a23095ecc09435016293e77af8478ed65df1022a394"}, + {file = "griffe-1.5.6.tar.gz", hash = "sha256:181f6666d5aceb6cd6e2da5a2b646cfb431e47a0da1fda283845734b67e10944"}, +] + +[package.dependencies] +colorama = ">=0.4" [[package]] name = "h11" @@ -651,30 +640,18 @@ files = [ {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, ] -[[package]] -name = "imagesize" -version = "1.4.1" -description = "Getting image size from png/jpeg/jpeg2000/gif file" -optional = false -python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" -groups = ["docs"] -files = [ - {file = "imagesize-1.4.1-py2.py3-none-any.whl", hash = "sha256:0d8d18d08f840c19d0ee7ca1fd82490fdc3729b7ac93f49870406ddde8ef8d8b"}, - {file = "imagesize-1.4.1.tar.gz", hash = "sha256:69150444affb9cb0d5cc5a92b3676f0b2fb7cd9ae39e947a5e11a36b4497cd4a"}, -] - [[package]] name = "importlib-metadata" version = "7.1.0" description = "Read metadata from Python packages" optional = false python-versions = ">=3.8" -groups = ["docs"] -markers = "python_version == \"3.9\"" +groups = ["docs", "release"] files = [ {file = "importlib_metadata-7.1.0-py3-none-any.whl", hash = "sha256:30962b96c0c223483ed6cc7280e7f0199feb01a0e40cfae4d4450fc6fab1f570"}, {file = "importlib_metadata-7.1.0.tar.gz", hash = "sha256:b78938b926ee8d5f020fc4772d487045805a55ddbad2ecf21c6d60938dc7fcd2"}, ] +markers = {release = "python_version == \"3.9\""} [package.dependencies] zipp = ">=0.5" @@ -691,11 +668,11 @@ description = "Read resources from Python packages" optional = false python-versions = ">=3.8" groups = ["docs", "release"] -markers = "python_version == \"3.9\"" files = [ {file = "importlib_resources-6.4.0-py3-none-any.whl", hash = "sha256:50d10f043df931902d4194ea07ec57960f66a80449ff867bfe782b4c486ba78c"}, {file = "importlib_resources-6.4.0.tar.gz", hash = "sha256:cdb2b453b8046ca4e3798eb1d84f3cce1446a0e8e7b5ef4efb600f19fc398145"}, ] +markers = {release = "python_version == \"3.9\""} [package.dependencies] zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} @@ -704,32 +681,13 @@ zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] testing = ["jaraco.test (>=5.4)", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy ; platform_python_implementation != \"PyPy\"", "pytest-ruff (>=0.2.1)", "zipp (>=3.17)"] -[[package]] -name = "incremental" -version = "24.7.2" -description = "A small library that versions your Python projects." -optional = false -python-versions = ">=3.8" -groups = ["docs", "release"] -files = [ - {file = "incremental-24.7.2-py3-none-any.whl", hash = "sha256:8cb2c3431530bec48ad70513931a760f446ad6c25e8333ca5d95e24b0ed7b8fe"}, - {file = "incremental-24.7.2.tar.gz", hash = "sha256:fb4f1d47ee60efe87d4f6f0ebb5f70b9760db2b2574c59c8e8912be4ebd464c9"}, -] - -[package.dependencies] -setuptools = ">=61.0" -tomli = {version = "*", markers = "python_version < \"3.11\""} - -[package.extras] -scripts = ["click (>=6.0)"] - [[package]] name = "iniconfig" version = "2.0.0" description = "brain-dead simple config-ini parsing" optional = false python-versions = ">=3.7" -groups = ["docs", "test"] +groups = ["test"] files = [ {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, @@ -754,20 +712,42 @@ MarkupSafe = ">=2.0" i18n = ["Babel (>=2.7)"] [[package]] -name = "m2r2" -version = "0.3.4" -description = "Markdown and reStructuredText in a single file." +name = "markdown" +version = "3.8.2" +description = "Python implementation of John Gruber's Markdown." optional = false -python-versions = ">=3.7" +python-versions = ">=3.9" groups = ["docs"] files = [ - {file = "m2r2-0.3.4-py3-none-any.whl", hash = "sha256:1a445514af8a229496bfb1380c52da8dd38313e48600359ee92b2c9d2e4df34a"}, - {file = "m2r2-0.3.4.tar.gz", hash = "sha256:e278f5f337e9aa7b2080fcc3e94b051bda9615b02e36c6fb3f23ff019872f043"}, + {file = "markdown-3.8.2-py3-none-any.whl", hash = "sha256:5c83764dbd4e00bdd94d85a19b8d55ccca20fe35b2e678a1422b380324dd5f24"}, + {file = "markdown-3.8.2.tar.gz", hash = "sha256:247b9a70dd12e27f67431ce62523e675b866d254f900c4fe75ce3dda62237c45"}, ] [package.dependencies] -docutils = ">=0.19" -mistune = "0.8.4" +importlib-metadata = {version = ">=4.4", markers = "python_version < \"3.10\""} + +[package.extras] +docs = ["mdx_gh_links (>=0.2)", "mkdocs (>=1.6)", "mkdocs-gen-files", "mkdocs-literate-nav", "mkdocs-nature (>=0.6)", "mkdocs-section-index", "mkdocstrings[python]"] +testing = ["coverage", "pyyaml"] + +[[package]] +name = "markdown-exec" +version = "1.10.0" +description = "Utilities to execute code blocks in Markdown files." +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "markdown_exec-1.10.0-py3-none-any.whl", hash = "sha256:dea4e8b78a3fe7d8e664088ebaccbd4de51b65c45b9e0db9509a9bb4fce33192"}, + {file = "markdown_exec-1.10.0.tar.gz", hash = "sha256:d1fa017995ef337ec59e7ce49fbf3e051145a62c3124ae687c17e987f1392cd0"}, +] + +[package.dependencies] +pygments-ansi-color = {version = "*", optional = true, markers = "extra == \"ansi\""} +pymdown-extensions = ">=9" + +[package.extras] +ansi = ["pygments-ansi-color"] [[package]] name = "markupsafe" @@ -840,17 +820,213 @@ files = [ ] [[package]] -name = "mistune" -version = "0.8.4" -description = "The fastest markdown parser in pure Python" +name = "mergedeep" +version = "1.3.4" +description = "A deep merge function for 🐍." +optional = false +python-versions = ">=3.6" +groups = ["docs"] +files = [ + {file = "mergedeep-1.3.4-py3-none-any.whl", hash = "sha256:70775750742b25c0d8f36c55aed03d24c3384d17c951b3175d898bd778ef0307"}, + {file = "mergedeep-1.3.4.tar.gz", hash = "sha256:0096d52e9dad9939c3d975a774666af186eda617e6ca84df4c94dec30004f2a8"}, +] + +[[package]] +name = "mike" +version = "2.1.3" +description = "Manage multiple versions of your MkDocs-powered documentation" optional = false python-versions = "*" groups = ["docs"] files = [ - {file = "mistune-0.8.4-py2.py3-none-any.whl", hash = "sha256:88a1051873018da288eee8538d476dffe1262495144b33ecb586c4ab266bb8d4"}, - {file = "mistune-0.8.4.tar.gz", hash = "sha256:59a3429db53c50b5c6bcc8a07f8848cb00d7dc8bdb431a4ab41920d201d4756e"}, + {file = "mike-2.1.3-py3-none-any.whl", hash = "sha256:d90c64077e84f06272437b464735130d380703a76a5738b152932884c60c062a"}, + {file = "mike-2.1.3.tar.gz", hash = "sha256:abd79b8ea483fb0275b7972825d3082e5ae67a41820f8d8a0dc7a3f49944e810"}, +] + +[package.dependencies] +importlib-metadata = "*" +importlib-resources = "*" +jinja2 = ">=2.7" +mkdocs = ">=1.0" +pyparsing = ">=3.0" +pyyaml = ">=5.1" +pyyaml-env-tag = "*" +verspec = "*" + +[package.extras] +dev = ["coverage", "flake8 (>=3.0)", "flake8-quotes", "shtab"] +test = ["coverage", "flake8 (>=3.0)", "flake8-quotes", "shtab"] + +[[package]] +name = "mkdocs" +version = "1.6.1" +description = "Project documentation with Markdown." +optional = false +python-versions = ">=3.8" +groups = ["docs"] +files = [ + {file = "mkdocs-1.6.1-py3-none-any.whl", hash = "sha256:db91759624d1647f3f34aa0c3f327dd2601beae39a366d6e064c03468d35c20e"}, + {file = "mkdocs-1.6.1.tar.gz", hash = "sha256:7b432f01d928c084353ab39c57282f29f92136665bdd6abf7c1ec8d822ef86f2"}, +] + +[package.dependencies] +click = ">=7.0" +colorama = {version = ">=0.4", markers = "platform_system == \"Windows\""} +ghp-import = ">=1.0" +importlib-metadata = {version = ">=4.4", markers = "python_version < \"3.10\""} +jinja2 = ">=2.11.1" +markdown = ">=3.3.6" +markupsafe = ">=2.0.1" +mergedeep = ">=1.3.4" +mkdocs-get-deps = ">=0.2.0" +packaging = ">=20.5" +pathspec = ">=0.11.1" +pyyaml = ">=5.1" +pyyaml-env-tag = ">=0.1" +watchdog = ">=2.0" + +[package.extras] +i18n = ["babel (>=2.9.0)"] +min-versions = ["babel (==2.9.0)", "click (==7.0)", "colorama (==0.4) ; platform_system == \"Windows\"", "ghp-import (==1.0)", "importlib-metadata (==4.4) ; python_version < \"3.10\"", "jinja2 (==2.11.1)", "markdown (==3.3.6)", "markupsafe (==2.0.1)", "mergedeep (==1.3.4)", "mkdocs-get-deps (==0.2.0)", "packaging (==20.5)", "pathspec (==0.11.1)", "pyyaml (==5.1)", "pyyaml-env-tag (==0.1)", "watchdog (==2.0)"] + +[[package]] +name = "mkdocs-autorefs" +version = "1.4.2" +description = "Automatically link across pages in MkDocs." +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "mkdocs_autorefs-1.4.2-py3-none-any.whl", hash = "sha256:83d6d777b66ec3c372a1aad4ae0cf77c243ba5bcda5bf0c6b8a2c5e7a3d89f13"}, + {file = "mkdocs_autorefs-1.4.2.tar.gz", hash = "sha256:e2ebe1abd2b67d597ed19378c0fff84d73d1dbce411fce7a7cc6f161888b6749"}, +] + +[package.dependencies] +Markdown = ">=3.3" +markupsafe = ">=2.0.1" +mkdocs = ">=1.1" + +[[package]] +name = "mkdocs-get-deps" +version = "0.2.0" +description = "MkDocs extension that lists all dependencies according to a mkdocs.yml file" +optional = false +python-versions = ">=3.8" +groups = ["docs"] +files = [ + {file = "mkdocs_get_deps-0.2.0-py3-none-any.whl", hash = "sha256:2bf11d0b133e77a0dd036abeeb06dec8775e46efa526dc70667d8863eefc6134"}, + {file = "mkdocs_get_deps-0.2.0.tar.gz", hash = "sha256:162b3d129c7fad9b19abfdcb9c1458a651628e4b1dea628ac68790fb3061c60c"}, +] + +[package.dependencies] +importlib-metadata = {version = ">=4.3", markers = "python_version < \"3.10\""} +mergedeep = ">=1.3.4" +platformdirs = ">=2.2.0" +pyyaml = ">=5.1" + +[[package]] +name = "mkdocs-material" +version = "9.6.14" +description = "Documentation that simply works" +optional = false +python-versions = ">=3.8" +groups = ["docs"] +files = [ + {file = "mkdocs_material-9.6.14-py3-none-any.whl", hash = "sha256:3b9cee6d3688551bf7a8e8f41afda97a3c39a12f0325436d76c86706114b721b"}, + {file = "mkdocs_material-9.6.14.tar.gz", hash = "sha256:39d795e90dce6b531387c255bd07e866e027828b7346d3eba5ac3de265053754"}, +] + +[package.dependencies] +babel = ">=2.10,<3.0" +backrefs = ">=5.7.post1,<6.0" +colorama = ">=0.4,<1.0" +jinja2 = ">=3.1,<4.0" +markdown = ">=3.2,<4.0" +mkdocs = ">=1.6,<2.0" +mkdocs-material-extensions = ">=1.3,<2.0" +paginate = ">=0.5,<1.0" +pygments = ">=2.16,<3.0" +pymdown-extensions = ">=10.2,<11.0" +requests = ">=2.26,<3.0" + +[package.extras] +git = ["mkdocs-git-committers-plugin-2 (>=1.1,<3)", "mkdocs-git-revision-date-localized-plugin (>=1.2.4,<2.0)"] +imaging = ["cairosvg (>=2.6,<3.0)", "pillow (>=10.2,<11.0)"] +recommended = ["mkdocs-minify-plugin (>=0.7,<1.0)", "mkdocs-redirects (>=1.2,<2.0)", "mkdocs-rss-plugin (>=1.6,<2.0)"] + +[[package]] +name = "mkdocs-material-extensions" +version = "1.3.1" +description = "Extension pack for Python Markdown and MkDocs Material." +optional = false +python-versions = ">=3.8" +groups = ["docs"] +files = [ + {file = "mkdocs_material_extensions-1.3.1-py3-none-any.whl", hash = "sha256:adff8b62700b25cb77b53358dad940f3ef973dd6db797907c49e3c2ef3ab4e31"}, + {file = "mkdocs_material_extensions-1.3.1.tar.gz", hash = "sha256:10c9511cea88f568257f960358a467d12b970e1f7b2c0e5fb2bb48cab1928443"}, +] + +[[package]] +name = "mkdocstrings" +version = "0.29.1" +description = "Automatic documentation from sources, for MkDocs." +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "mkdocstrings-0.29.1-py3-none-any.whl", hash = "sha256:37a9736134934eea89cbd055a513d40a020d87dfcae9e3052c2a6b8cd4af09b6"}, + {file = "mkdocstrings-0.29.1.tar.gz", hash = "sha256:8722f8f8c5cd75da56671e0a0c1bbed1df9946c0cef74794d6141b34011abd42"}, ] +[package.dependencies] +importlib-metadata = {version = ">=4.6", markers = "python_version < \"3.10\""} +Jinja2 = ">=2.11.1" +Markdown = ">=3.6" +MarkupSafe = ">=1.1" +mkdocs = ">=1.6" +mkdocs-autorefs = ">=1.4" +pymdown-extensions = ">=6.3" + +[package.extras] +crystal = ["mkdocstrings-crystal (>=0.3.4)"] +python = ["mkdocstrings-python (>=1.16.2)"] +python-legacy = ["mkdocstrings-python-legacy (>=0.2.1)"] + +[[package]] +name = "mkdocstrings-python" +version = "1.14.7" +description = "A Python handler for mkdocstrings." +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "mkdocstrings_python-1.14.7-py3-none-any.whl", hash = "sha256:bdcce5544cc2fbee4163a1cf14e218de688849e75ca46c3ece4a28825aac9b41"}, + {file = "mkdocstrings_python-1.14.7.tar.gz", hash = "sha256:35100ea5545a9b42155da73de8be74484216031e912feff7a4f6115f206139c7"}, +] + +[package.dependencies] +griffe = ">=0.49" +mkdocs-autorefs = ">=1.2" +mkdocstrings = ">=0.28" +typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""} + +[[package]] +name = "mkdocstrings-python-betterrefs" +version = "1.0.2" +description = "Extended mkdocstrings-python handler with better cross-references support" +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "mkdocstrings_python_betterrefs-1.0.2-py3-none-any.whl", hash = "sha256:5a8dd38cf83f01a717966a9edbad26605ac5528d8411d839caaead1c7ea1998a"}, + {file = "mkdocstrings_python_betterrefs-1.0.2.tar.gz", hash = "sha256:98f678de0953eb2ab381c10acb304672dc0ed33945ef04d6b76a72eaa9d6afae"}, +] + +[package.dependencies] +griffe = ">=1.0.0" +mkdocstrings-python = ">=1.14.1,<1.15.0" +typing-extensions = ">=4.0" + [[package]] name = "mslex" version = "1.2.0" @@ -908,13 +1084,41 @@ files = [ {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"}, ] +[[package]] +name = "paginate" +version = "0.5.7" +description = "Divides large result sets into pages for easier browsing" +optional = false +python-versions = "*" +groups = ["docs"] +files = [ + {file = "paginate-0.5.7-py2.py3-none-any.whl", hash = "sha256:b885e2af73abcf01d9559fd5216b57ef722f8c42affbb63942377668e35c7591"}, + {file = "paginate-0.5.7.tar.gz", hash = "sha256:22bd083ab41e1a8b4f3690544afb2c60c25e5c9a63a30fa2f483f6c60c8e5945"}, +] + +[package.extras] +dev = ["pytest", "tox"] +lint = ["black"] + +[[package]] +name = "pathspec" +version = "0.12.1" +description = "Utility library for gitignore style pattern matching of file paths." +optional = false +python-versions = ">=3.8" +groups = ["docs"] +files = [ + {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, + {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, +] + [[package]] name = "platformdirs" version = "4.2.2" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." optional = false python-versions = ">=3.8" -groups = ["dev"] +groups = ["dev", "docs"] files = [ {file = "platformdirs-4.2.2-py3-none-any.whl", hash = "sha256:2d7a1657e36a80ea911db832a8a6ece5ee53d8de21edd5cc5879af6530b1bfee"}, {file = "platformdirs-4.2.2.tar.gz", hash = "sha256:38b7b51f512eed9e84a22788b4bce1de17c0adb134d6becb09836e37d8654cd3"}, @@ -931,7 +1135,7 @@ version = "1.5.0" description = "plugin and hook calling mechanisms for python" optional = false python-versions = ">=3.8" -groups = ["docs", "test"] +groups = ["test"] files = [ {file = "pluggy-1.5.0-py3-none-any.whl", hash = "sha256:44e1ad92c8ca002de6377e165f3e0f1be63266ab4d554740532335b9d75ea669"}, {file = "pluggy-1.5.0.tar.gz", hash = "sha256:2cffa88e94fdc978c4c574f15f9e59b7f4201d439195c3715ca9e2486f1d0cf1"}, @@ -1037,13 +1241,62 @@ files = [ [package.extras] windows-terminal = ["colorama (>=0.4.6)"] +[[package]] +name = "pygments-ansi-color" +version = "0.3.0" +description = "" +optional = false +python-versions = ">=3.7" +groups = ["docs"] +files = [ + {file = "pygments-ansi-color-0.3.0.tar.gz", hash = "sha256:7018954cf5b11d1e734383a1bafab5af613213f246109417fee3f76da26d5431"}, + {file = "pygments_ansi_color-0.3.0-py3-none-any.whl", hash = "sha256:7eb063feaecadad9d4d1fd3474cbfeadf3486b64f760a8f2a00fc25392180aba"}, +] + +[package.dependencies] +pygments = "!=2.7.3" + +[[package]] +name = "pymdown-extensions" +version = "10.16" +description = "Extension pack for Python Markdown." +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "pymdown_extensions-10.16-py3-none-any.whl", hash = "sha256:f5dd064a4db588cb2d95229fc4ee63a1b16cc8b4d0e6145c0899ed8723da1df2"}, + {file = "pymdown_extensions-10.16.tar.gz", hash = "sha256:71dac4fca63fabeffd3eb9038b756161a33ec6e8d230853d3cecf562155ab3de"}, +] + +[package.dependencies] +markdown = ">=3.6" +pyyaml = "*" + +[package.extras] +extra = ["pygments (>=2.19.1)"] + +[[package]] +name = "pyparsing" +version = "3.2.1" +description = "pyparsing module - Classes and methods to define and execute parsing grammars" +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "pyparsing-3.2.1-py3-none-any.whl", hash = "sha256:506ff4f4386c4cec0590ec19e6302d3aedb992fdc02c761e90416f158dacf8e1"}, + {file = "pyparsing-3.2.1.tar.gz", hash = "sha256:61980854fd66de3a90028d679a954d5f2623e83144b5afe5ee86f43d762e5f0a"}, +] + +[package.extras] +diagrams = ["jinja2", "railroad-diagrams"] + [[package]] name = "pytest" version = "7.4.4" description = "pytest: simple powerful testing with Python" optional = false python-versions = ">=3.7" -groups = ["docs", "test"] +groups = ["test"] files = [ {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"}, {file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"}, @@ -1118,13 +1371,28 @@ pytest = ">=6.0,<8.0" [package.extras] testing = ["pytest-asyncio (==0.21.*)", "pytest-cov (==4.*)"] +[[package]] +name = "python-dateutil" +version = "2.9.0.post0" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["docs"] +files = [ + {file = "python-dateutil-2.9.0.post0.tar.gz", hash = "sha256:37dd54208da7e1cd875388217d5e00ebd4179249f90fb72437e91a35459a0ad3"}, + {file = "python_dateutil-2.9.0.post0-py2.py3-none-any.whl", hash = "sha256:a8b2bc7bffae282281c8140a97d3aa9c14da0b136dfe83f850eea9a5f7470427"}, +] + +[package.dependencies] +six = ">=1.5" + [[package]] name = "pyyaml" version = "6.0.1" description = "YAML parser and emitter for Python" optional = false python-versions = ">=3.6" -groups = ["dev"] +groups = ["dev", "docs"] files = [ {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"}, {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"}, @@ -1179,6 +1447,21 @@ files = [ {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"}, ] +[[package]] +name = "pyyaml-env-tag" +version = "1.1" +description = "A custom YAML tag for referencing environment variables in YAML files." +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "pyyaml_env_tag-1.1-py3-none-any.whl", hash = "sha256:17109e1a528561e32f026364712fee1264bc2ea6715120891174ed1b980d2e04"}, + {file = "pyyaml_env_tag-1.1.tar.gz", hash = "sha256:2eb38b75a2d21ee0475d6d97ec19c63287a7e140231e4214969d0eac923cd7ff"}, +] + +[package.dependencies] +pyyaml = "*" + [[package]] name = "requests" version = "2.32.4" @@ -1235,7 +1518,7 @@ version = "78.1.1" description = "Easily download, build, install, upgrade, and uninstall Python packages" optional = false python-versions = ">=3.9" -groups = ["main", "dev", "docs", "release"] +groups = ["main", "dev"] files = [ {file = "setuptools-78.1.1-py3-none-any.whl", hash = "sha256:c3a9c4211ff4c309edb8b8c4f1cbfa7ae324c4ba9f91ff254e3d305b9fd54561"}, {file = "setuptools-78.1.1.tar.gz", hash = "sha256:fcc17fd9cd898242f6b4adfaca46137a9edef687f43e6f78469692a5e70d851d"}, @@ -1250,6 +1533,18 @@ enabler = ["pytest-enabler (>=2.2)"] test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21) ; python_version >= \"3.9\" and sys_platform != \"cygwin\"", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf ; sys_platform != \"cygwin\"", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"] type = ["importlib_metadata (>=7.0.2) ; python_version < \"3.10\"", "jaraco.develop (>=7.21) ; sys_platform != \"cygwin\"", "mypy (==1.14.*)", "pytest-mypy"] +[[package]] +name = "six" +version = "1.17.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["docs"] +files = [ + {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, + {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, +] + [[package]] name = "slotscheck" version = "0.19.1" @@ -1278,236 +1573,6 @@ files = [ {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, ] -[[package]] -name = "snowballstemmer" -version = "2.2.0" -description = "This package provides 29 stemmers for 28 languages generated from Snowball algorithms." -optional = false -python-versions = "*" -groups = ["docs"] -files = [ - {file = "snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a"}, - {file = "snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1"}, -] - -[[package]] -name = "soupsieve" -version = "2.5" -description = "A modern CSS selector implementation for Beautiful Soup." -optional = false -python-versions = ">=3.8" -groups = ["docs"] -files = [ - {file = "soupsieve-2.5-py3-none-any.whl", hash = "sha256:eaa337ff55a1579b6549dc679565eac1e3d000563bcb1c8ab0d0fefbc0c2cdc7"}, - {file = "soupsieve-2.5.tar.gz", hash = "sha256:5663d5a7b3bfaeee0bc4372e7fc48f9cff4940b3eec54a6451cc5299f1097690"}, -] - -[[package]] -name = "sphinx" -version = "7.3.7" -description = "Python documentation generator" -optional = false -python-versions = ">=3.9" -groups = ["docs"] -files = [ - {file = "sphinx-7.3.7-py3-none-any.whl", hash = "sha256:413f75440be4cacf328f580b4274ada4565fb2187d696a84970c23f77b64d8c3"}, - {file = "sphinx-7.3.7.tar.gz", hash = "sha256:a4a7db75ed37531c05002d56ed6948d4c42f473a36f46e1382b0bd76ca9627bc"}, -] - -[package.dependencies] -alabaster = ">=0.7.14,<0.8.0" -babel = ">=2.9" -colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} -docutils = ">=0.18.1,<0.22" -imagesize = ">=1.3" -importlib-metadata = {version = ">=4.8", markers = "python_version < \"3.10\""} -Jinja2 = ">=3.0" -packaging = ">=21.0" -Pygments = ">=2.14" -requests = ">=2.25.0" -snowballstemmer = ">=2.0" -sphinxcontrib-applehelp = "*" -sphinxcontrib-devhelp = "*" -sphinxcontrib-htmlhelp = ">=2.0.0" -sphinxcontrib-jsmath = "*" -sphinxcontrib-qthelp = "*" -sphinxcontrib-serializinghtml = ">=1.1.9" -tomli = {version = ">=2", markers = "python_version < \"3.11\""} - -[package.extras] -docs = ["sphinxcontrib-websupport"] -lint = ["flake8 (>=3.5.0)", "importlib_metadata", "mypy (==1.9.0)", "pytest (>=6.0)", "ruff (==0.3.7)", "sphinx-lint", "tomli", "types-docutils", "types-requests"] -test = ["cython (>=3.0)", "defusedxml (>=0.7.1)", "pytest (>=6.0)", "setuptools (>=67.0)"] - -[[package]] -name = "sphinx-autodoc-typehints" -version = "2.3.0" -description = "Type hints (PEP 484) support for the Sphinx autodoc extension" -optional = false -python-versions = ">=3.9" -groups = ["docs"] -files = [ - {file = "sphinx_autodoc_typehints-2.3.0-py3-none-any.whl", hash = "sha256:3098e2c6d0ba99eacd013eb06861acc9b51c6e595be86ab05c08ee5506ac0c67"}, - {file = "sphinx_autodoc_typehints-2.3.0.tar.gz", hash = "sha256:535c78ed2d6a1bad393ba9f3dfa2602cf424e2631ee207263e07874c38fde084"}, -] - -[package.dependencies] -sphinx = ">=7.3.5" - -[package.extras] -docs = ["furo (>=2024.1.29)"] -numpy = ["nptyping (>=2.5)"] -testing = ["covdefaults (>=2.3)", "coverage (>=7.4.4)", "defusedxml (>=0.7.1)", "diff-cover (>=9)", "pytest (>=8.1.1)", "pytest-cov (>=5)", "sphobjinv (>=2.3.1)", "typing-extensions (>=4.11)"] - -[[package]] -name = "sphinx-basic-ng" -version = "1.0.0b2" -description = "A modern skeleton for Sphinx themes." -optional = false -python-versions = ">=3.7" -groups = ["docs"] -files = [ - {file = "sphinx_basic_ng-1.0.0b2-py3-none-any.whl", hash = "sha256:eb09aedbabfb650607e9b4b68c9d240b90b1e1be221d6ad71d61c52e29f7932b"}, - {file = "sphinx_basic_ng-1.0.0b2.tar.gz", hash = "sha256:9ec55a47c90c8c002b5960c57492ec3021f5193cb26cebc2dc4ea226848651c9"}, -] - -[package.dependencies] -sphinx = ">=4.0" - -[package.extras] -docs = ["furo", "ipython", "myst-parser", "sphinx-copybutton", "sphinx-inline-tabs"] - -[[package]] -name = "sphinx-copybutton" -version = "0.5.2" -description = "Add a copy button to each of your code cells." -optional = false -python-versions = ">=3.7" -groups = ["docs"] -files = [ - {file = "sphinx-copybutton-0.5.2.tar.gz", hash = "sha256:4cf17c82fb9646d1bc9ca92ac280813a3b605d8c421225fd9913154103ee1fbd"}, - {file = "sphinx_copybutton-0.5.2-py3-none-any.whl", hash = "sha256:fb543fd386d917746c9a2c50360c7905b605726b9355cd26e9974857afeae06e"}, -] - -[package.dependencies] -sphinx = ">=1.8" - -[package.extras] -code-style = ["pre-commit (==2.12.1)"] -rtd = ["ipython", "myst-nb", "sphinx", "sphinx-book-theme", "sphinx-examples"] - -[[package]] -name = "sphinxcontrib-applehelp" -version = "1.0.4" -description = "sphinxcontrib-applehelp is a Sphinx extension which outputs Apple help books" -optional = false -python-versions = ">=3.8" -groups = ["docs"] -files = [ - {file = "sphinxcontrib-applehelp-1.0.4.tar.gz", hash = "sha256:828f867945bbe39817c210a1abfd1bc4895c8b73fcaade56d45357a348a07d7e"}, - {file = "sphinxcontrib_applehelp-1.0.4-py3-none-any.whl", hash = "sha256:29d341f67fb0f6f586b23ad80e072c8e6ad0b48417db2bde114a4c9746feb228"}, -] - -[package.extras] -lint = ["docutils-stubs", "flake8", "mypy"] -test = ["pytest"] - -[[package]] -name = "sphinxcontrib-devhelp" -version = "1.0.2" -description = "sphinxcontrib-devhelp is a sphinx extension which outputs Devhelp document." -optional = false -python-versions = ">=3.5" -groups = ["docs"] -files = [ - {file = "sphinxcontrib-devhelp-1.0.2.tar.gz", hash = "sha256:ff7f1afa7b9642e7060379360a67e9c41e8f3121f2ce9164266f61b9f4b338e4"}, - {file = "sphinxcontrib_devhelp-1.0.2-py2.py3-none-any.whl", hash = "sha256:8165223f9a335cc1af7ffe1ed31d2871f325254c0423bc0c4c7cd1c1e4734a2e"}, -] - -[package.extras] -lint = ["docutils-stubs", "flake8", "mypy"] -test = ["pytest"] - -[[package]] -name = "sphinxcontrib-htmlhelp" -version = "2.0.1" -description = "sphinxcontrib-htmlhelp is a sphinx extension which renders HTML help files" -optional = false -python-versions = ">=3.8" -groups = ["docs"] -files = [ - {file = "sphinxcontrib-htmlhelp-2.0.1.tar.gz", hash = "sha256:0cbdd302815330058422b98a113195c9249825d681e18f11e8b1f78a2f11efff"}, - {file = "sphinxcontrib_htmlhelp-2.0.1-py3-none-any.whl", hash = "sha256:c38cb46dccf316c79de6e5515e1770414b797162b23cd3d06e67020e1d2a6903"}, -] - -[package.extras] -lint = ["docutils-stubs", "flake8", "mypy"] -test = ["html5lib", "pytest"] - -[[package]] -name = "sphinxcontrib-jsmath" -version = "1.0.1" -description = "A sphinx extension which renders display math in HTML via JavaScript" -optional = false -python-versions = ">=3.5" -groups = ["docs"] -files = [ - {file = "sphinxcontrib-jsmath-1.0.1.tar.gz", hash = "sha256:a9925e4a4587247ed2191a22df5f6970656cb8ca2bd6284309578f2153e0c4b8"}, - {file = "sphinxcontrib_jsmath-1.0.1-py2.py3-none-any.whl", hash = "sha256:2ec2eaebfb78f3f2078e73666b1415417a116cc848b72e5172e596c871103178"}, -] - -[package.extras] -test = ["flake8", "mypy", "pytest"] - -[[package]] -name = "sphinxcontrib-qthelp" -version = "1.0.3" -description = "sphinxcontrib-qthelp is a sphinx extension which outputs QtHelp document." -optional = false -python-versions = ">=3.5" -groups = ["docs"] -files = [ - {file = "sphinxcontrib-qthelp-1.0.3.tar.gz", hash = "sha256:4c33767ee058b70dba89a6fc5c1892c0d57a54be67ddd3e7875a18d14cba5a72"}, - {file = "sphinxcontrib_qthelp-1.0.3-py2.py3-none-any.whl", hash = "sha256:bd9fc24bcb748a8d51fd4ecaade681350aa63009a347a8c14e637895444dfab6"}, -] - -[package.extras] -lint = ["docutils-stubs", "flake8", "mypy"] -test = ["pytest"] - -[[package]] -name = "sphinxcontrib-serializinghtml" -version = "2.0.0" -description = "sphinxcontrib-serializinghtml is a sphinx extension which outputs \"serialized\" HTML files (json and pickle)" -optional = false -python-versions = ">=3.9" -groups = ["docs"] -files = [ - {file = "sphinxcontrib_serializinghtml-2.0.0-py3-none-any.whl", hash = "sha256:6e2cb0eef194e10c27ec0023bfeb25badbbb5868244cf5bc5bdc04e4464bf331"}, - {file = "sphinxcontrib_serializinghtml-2.0.0.tar.gz", hash = "sha256:e9d912827f872c029017a53f0ef2180b327c3f7fd23c87229f7a8e8b70031d4d"}, -] - -[package.extras] -lint = ["mypy", "ruff (==0.5.5)", "types-docutils"] -standalone = ["Sphinx (>=5)"] -test = ["pytest"] - -[[package]] -name = "sphinxcontrib-towncrier" -version = "0.4.0a0" -description = "An RST directive for injecting a Towncrier-generated changelog draft containing fragments for the unreleased (next) project version" -optional = false -python-versions = ">=3.6" -groups = ["docs"] -files = [ - {file = "sphinxcontrib-towncrier-0.4.0a0.tar.gz", hash = "sha256:d9b1513fc07781432dd3a0b2ca797cfe0e99e9b5bc5e5c8bf112d5d142afb6dc"}, - {file = "sphinxcontrib_towncrier-0.4.0a0-py3-none-any.whl", hash = "sha256:ec734e3d0920e2ce26e99681119f398a9e1fc0aa6c2d7ed1f052f1219dcd4653"}, -] - -[package.dependencies] -sphinx = "*" -towncrier = ">=19.2" - [[package]] name = "taskipy" version = "1.14.1" @@ -1583,25 +1648,25 @@ files = [ [[package]] name = "towncrier" -version = "23.11.0" +version = "24.8.0" description = "Building newsfiles for your project." optional = false python-versions = ">=3.8" groups = ["docs", "release"] files = [ - {file = "towncrier-23.11.0-py3-none-any.whl", hash = "sha256:2e519ca619426d189e3c98c99558fe8be50c9ced13ea1fc20a4a353a95d2ded7"}, - {file = "towncrier-23.11.0.tar.gz", hash = "sha256:13937c247e3f8ae20ac44d895cf5f96a60ad46cfdcc1671759530d7837d9ee5d"}, + {file = "towncrier-24.8.0-py3-none-any.whl", hash = "sha256:9343209592b839209cdf28c339ba45792fbfe9775b5f9c177462fd693e127d8d"}, + {file = "towncrier-24.8.0.tar.gz", hash = "sha256:013423ee7eed102b2f393c287d22d95f66f1a3ea10a4baa82d298001a7f18af3"}, ] [package.dependencies] click = "*" +importlib-metadata = {version = ">=4.6", markers = "python_version < \"3.10\""} importlib-resources = {version = ">=5", markers = "python_version < \"3.10\""} -incremental = "*" jinja2 = "*" tomli = {version = "*", markers = "python_version < \"3.11\""} [package.extras] -dev = ["furo", "packaging", "sphinx (>=5)", "twisted"] +dev = ["furo (>=2024.05.06)", "nox", "packaging", "sphinx (>=5)", "twisted"] [[package]] name = "typing-extensions" @@ -1609,7 +1674,7 @@ version = "4.14.0" description = "Backported and Experimental Type Hints for Python 3.9+" optional = false python-versions = ">=3.9" -groups = ["main", "test"] +groups = ["main", "docs", "test"] files = [ {file = "typing_extensions-4.14.0-py3-none-any.whl", hash = "sha256:a1514509136dd0b477638fc68d6a91497af5076466ad0fa6c338e44e359944af"}, {file = "typing_extensions-4.14.0.tar.gz", hash = "sha256:8676b788e32f02ab42d9e7c61324048ae4c6d844a399eebace3d4979d75ceef4"}, @@ -1633,6 +1698,21 @@ h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] +[[package]] +name = "verspec" +version = "0.1.0" +description = "Flexible version handling" +optional = false +python-versions = "*" +groups = ["docs"] +files = [ + {file = "verspec-0.1.0-py3-none-any.whl", hash = "sha256:741877d5633cc9464c45a469ae2a31e801e6dbbaa85b9675d481cda100f11c31"}, + {file = "verspec-0.1.0.tar.gz", hash = "sha256:c4504ca697b2056cdb4bfa7121461f5a0e81809255b41c03dda4ba823637c01e"}, +] + +[package.extras] +test = ["coverage", "flake8 (>=3.7)", "mypy", "pretend", "pytest"] + [[package]] name = "virtualenv" version = "20.26.6" @@ -1654,6 +1734,49 @@ platformdirs = ">=3.9.1,<5" docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2,!=7.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8) ; platform_python_implementation == \"PyPy\" or platform_python_implementation == \"CPython\" and sys_platform == \"win32\" and python_version >= \"3.13\"", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10) ; platform_python_implementation == \"CPython\""] +[[package]] +name = "watchdog" +version = "6.0.0" +description = "Filesystem events monitoring" +optional = false +python-versions = ">=3.9" +groups = ["docs"] +files = [ + {file = "watchdog-6.0.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d1cdb490583ebd691c012b3d6dae011000fe42edb7a82ece80965b42abd61f26"}, + {file = "watchdog-6.0.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bc64ab3bdb6a04d69d4023b29422170b74681784ffb9463ed4870cf2f3e66112"}, + {file = "watchdog-6.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c897ac1b55c5a1461e16dae288d22bb2e412ba9807df8397a635d88f671d36c3"}, + {file = "watchdog-6.0.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:6eb11feb5a0d452ee41f824e271ca311a09e250441c262ca2fd7ebcf2461a06c"}, + {file = "watchdog-6.0.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ef810fbf7b781a5a593894e4f439773830bdecb885e6880d957d5b9382a960d2"}, + {file = "watchdog-6.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:afd0fe1b2270917c5e23c2a65ce50c2a4abb63daafb0d419fde368e272a76b7c"}, + {file = "watchdog-6.0.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:bdd4e6f14b8b18c334febb9c4425a878a2ac20efd1e0b231978e7b150f92a948"}, + {file = "watchdog-6.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:c7c15dda13c4eb00d6fb6fc508b3c0ed88b9d5d374056b239c4ad1611125c860"}, + {file = "watchdog-6.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6f10cb2d5902447c7d0da897e2c6768bca89174d0c6e1e30abec5421af97a5b0"}, + {file = "watchdog-6.0.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:490ab2ef84f11129844c23fb14ecf30ef3d8a6abafd3754a6f75ca1e6654136c"}, + {file = "watchdog-6.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:76aae96b00ae814b181bb25b1b98076d5fc84e8a53cd8885a318b42b6d3a5134"}, + {file = "watchdog-6.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:a175f755fc2279e0b7312c0035d52e27211a5bc39719dd529625b1930917345b"}, + {file = "watchdog-6.0.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:e6f0e77c9417e7cd62af82529b10563db3423625c5fce018430b249bf977f9e8"}, + {file = "watchdog-6.0.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:90c8e78f3b94014f7aaae121e6b909674df5b46ec24d6bebc45c44c56729af2a"}, + {file = "watchdog-6.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e7631a77ffb1f7d2eefa4445ebbee491c720a5661ddf6df3498ebecae5ed375c"}, + {file = "watchdog-6.0.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:c7ac31a19f4545dd92fc25d200694098f42c9a8e391bc00bdd362c5736dbf881"}, + {file = "watchdog-6.0.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:9513f27a1a582d9808cf21a07dae516f0fab1cf2d7683a742c498b93eedabb11"}, + {file = "watchdog-6.0.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:7a0e56874cfbc4b9b05c60c8a1926fedf56324bb08cfbc188969777940aef3aa"}, + {file = "watchdog-6.0.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:e6439e374fc012255b4ec786ae3c4bc838cd7309a540e5fe0952d03687d8804e"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_aarch64.whl", hash = "sha256:7607498efa04a3542ae3e05e64da8202e58159aa1fa4acddf7678d34a35d4f13"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_armv7l.whl", hash = "sha256:9041567ee8953024c83343288ccc458fd0a2d811d6a0fd68c4c22609e3490379"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_i686.whl", hash = "sha256:82dc3e3143c7e38ec49d61af98d6558288c415eac98486a5c581726e0737c00e"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_ppc64.whl", hash = "sha256:212ac9b8bf1161dc91bd09c048048a95ca3a4c4f5e5d4a7d1b1a7d5752a7f96f"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_ppc64le.whl", hash = "sha256:e3df4cbb9a450c6d49318f6d14f4bbc80d763fa587ba46ec86f99f9e6876bb26"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_s390x.whl", hash = "sha256:2cce7cfc2008eb51feb6aab51251fd79b85d9894e98ba847408f662b3395ca3c"}, + {file = "watchdog-6.0.0-py3-none-manylinux2014_x86_64.whl", hash = "sha256:20ffe5b202af80ab4266dcd3e91aae72bf2da48c0d33bdb15c66658e685e94e2"}, + {file = "watchdog-6.0.0-py3-none-win32.whl", hash = "sha256:07df1fdd701c5d4c8e55ef6cf55b8f0120fe1aef7ef39a1c6fc6bc2e606d517a"}, + {file = "watchdog-6.0.0-py3-none-win_amd64.whl", hash = "sha256:cbafb470cf848d93b5d013e2ecb245d4aa1c8fd0504e863ccefa32445359d680"}, + {file = "watchdog-6.0.0-py3-none-win_ia64.whl", hash = "sha256:a1914259fa9e1454315171103c6a30961236f508b9b623eae470268bbcc6a22f"}, + {file = "watchdog-6.0.0.tar.gz", hash = "sha256:9ddf7c82fda3ae8e24decda1338ede66e1c99883db93711d8fb941eaa2d8c282"}, +] + +[package.extras] +watchmedo = ["PyYAML (>=3.10)"] + [[package]] name = "zipp" version = "3.19.1" @@ -1661,11 +1784,11 @@ description = "Backport of pathlib-compatible object wrapper for zip files" optional = false python-versions = ">=3.8" groups = ["docs", "release"] -markers = "python_version == \"3.9\"" files = [ {file = "zipp-3.19.1-py3-none-any.whl", hash = "sha256:2828e64edb5386ea6a52e7ba7cdb17bb30a73a858f5eb6eb93d8d36f5ea26091"}, {file = "zipp-3.19.1.tar.gz", hash = "sha256:35427f6d5594f4acf82d25541438348c26736fa9b3afa2754bcd63cdb99d8e8f"}, ] +markers = {release = "python_version == \"3.9\""} [package.extras] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] @@ -1674,4 +1797,4 @@ test = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-it [metadata] lock-version = "2.1" python-versions = ">=3.9,<4" -content-hash = "136b7b04ae04dde42733bed8e23bfed4ac4e6c888d056e88c41b9ec8ab3fc45e" +content-hash = "e2de4bcf6cb9bd8544e8f55193fdd2a19044b4ae715768896eef186cb3385b39" diff --git a/pyproject.toml b/pyproject.toml index 75ac2497..3c07f1c1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,7 +54,7 @@ slotscheck = ">=0.16.1,<0.20.0" basedpyright = "^1.13.3" [tool.poetry.group.release.dependencies] -towncrier = ">=23,<24.7" # temporary pin, as 24.7 is incompatible with sphinxcontrib-towncrier +towncrier = "^24.7.0" [tool.poetry.group.release-ci] optional = true @@ -63,15 +63,12 @@ optional = true poetry-dynamic-versioning = ">=1.4.0,<1.9" [tool.poetry.group.docs.dependencies] -sphinx = ">=6.2.1,<8.0.0" -tomli = { version = "^2.0.1", python = "<3.11" } -m2r2 = "^0.3.3.post2" -packaging = ">=23.1,<26.0" -sphinx-autodoc-typehints = ">=1.23,<3.0" -sphinx-copybutton = "^0.5.2" -furo = ">=2022.12.7" -sphinxcontrib-towncrier = ">=0.3.2,<0.5.0" -pytest = "^7.3.1" # Required to import the gen_test_serializable function to list it in the docs +mkdocs = "^1.6.1" +mkdocs-material = "^9.5.30" +mike = "^2.1.2" +markdown-exec = { extras = ["ansi"], version = "^1.9.3" } +mkdocstrings-python-betterrefs = "^1.0.2" +towncrier = "^24.7.0" [tool.poetry.group.docs-ci] optional = true @@ -137,17 +134,6 @@ ignore = [ "D203", # Blank line required before class docstring "D213", # Multi-line summary should start at second line (incompatible with D212) "D301", # Use r""" if any backslashes in a docstring - "D405", # Section name should be properly capitalized - "D406", # Section name should end with a newline - "D407", # Missing dashed underline after section - "D408", # Section underline should be in the line following the section's name - "D409", # Section underline should match the length of its name - "D410", # Missing blank line after section - "D411", # Missing blank line before section - "D412", # No blank lines allowed between a section header and its content - "D413", # Missing blank line after last section - "D414", # Section has no content - "D416", # Section name should end with a colon "D417", # Missing argument descrition in the docstring "ANN002", # Missing type annotation for *args @@ -196,12 +182,9 @@ ignore = [ "ANN", # flake8-annotations "S101", # Use of assert ] -"docs/conf.py" = [ - "INP", # allow implicit namespace (pep 420) -] -"docs/extensions/*" = [ - "D", # pydocstyle - "INP", # allow implicit namespace (pep 420) +"docs/scripts/*" = [ + "INP", # allow implicit namespace (pep 420) + "T201", # allow prints ] ".github/scripts/*" = [ "D", # pydocstyle @@ -229,6 +212,9 @@ max-statements = 250 [tool.ruff.lint.flake8-tidy-imports] ban-relative-imports = "all" +[tool.ruff.lint.pydocstyle] +convention = "google" + [tool.ruff.format] line-ending = "lf" @@ -306,7 +292,7 @@ retest = "pytest -v --last-failed" test-nocov = "pytest -v --no-cov --failed-first" retest-nocov = "pytest -v --no-cov --last-failed" changelog-preview = "towncrier build --draft --version next" -docs = "sphinx-build -b dirhtml -d ./docs/_build/doctrees -W -E -T --keep-going ./docs ./docs/_build/html" +docs = "mkdocs serve" [tool.poetry-dynamic-versioning] enable = true diff --git a/tests/helpers.py b/tests/helpers.py index 54832b71..bfd7bce8 100644 --- a/tests/helpers.py +++ b/tests/helpers.py @@ -31,8 +31,8 @@ def synchronize(f: Callable[P, Coroutine[Any, Any, T]]) -> Callable[P, T]: """Take an asynchronous function, and return a synchronous alternative. This is needed because we sometimes want to test asynchronous behavior in a synchronous test function, - where we can't simply await something. This function uses `asyncio.run` and generates a wrapper - around the original asynchronous function, that awaits the result in a blocking synchronous way, + where we can't simply await something. This function uses [`asyncio.run`][asyncio.run] and generates a + wrapper around the original asynchronous function, that awaits the result in a blocking synchronous way, returning the obtained value. """ @@ -45,18 +45,18 @@ def wrapper(*args: P.args, **kwargs: P.kwargs) -> T: class SynchronizedMixin: """Class acting as another wrapped object, with all async methods synchronized. - This class needs :attr:`._WRAPPED_ATTRIBUTE` class variable to be set as the name of the internally + This class needs [`_WRAPPED_ATTRIBUTE`][.] class variable to be set as the name of the internally held attribute, holding the object we'll be wrapping around. Child classes of this mixin will have their lookup logic changed, to instead perform a lookup on the wrapped attribute. Only if that lookup fails, we fallback to this class, meaning if both the wrapped attribute and this class have some attribute defined, the attribute from the wrapped - object is returned. The only exceptions to this are lookup of the ``_WRAPPED_ATTRIBUTE`` variable, - and of the attribute name stored under the ``_WRAPPED_ATTRIBUTE`` (the wrapped object). + object is returned. The only exceptions to this are lookup of the `_WRAPPED_ATTRIBUTE` variable, + and of the attribute name stored under the `_WRAPPED_ATTRIBUTE` (the wrapped object). If the attribute held by the wrapped object is an asynchronous function, instead of returning it - directly, the :func:`.synchronize` function will be called, returning a wrapped synchronous - alternative for the requested async function. + directly, the [`synchronize`][(m).] function will be called, returning a wrapped synchronous alternative + for the requested async function. This is useful when we need to quickly create a synchronous alternative to a class holding async methods. However it isn't useful in production, since will cause typing issues (attributes will be accessible, but @@ -69,8 +69,8 @@ class SynchronizedMixin: def __getattribute__(self, /, name: str) -> Any: """Return attributes of the wrapped object, if the attribute is a coroutine function, synchronize it. - The only exception to this behavior is getting the :attr:`._WRAPPED_ATTRIBUTE` variable itself, or the - attribute named as the content of the ``_WRAPPED_ATTRIBUTE`` variable. All other attribute access will + The only exception to this behavior is getting the [`_WRAPPED_ATTRIBUTE`][..] variable itself, or the + attribute named as the content of the `_WRAPPED_ATTRIBUTE` variable. All other attribute access will be delegated to the wrapped attribute. If the wrapped object doesn't have given attribute, the lookup will fallback to regular lookup for variables belonging to this class. """ @@ -91,9 +91,9 @@ def __getattribute__(self, /, name: str) -> Any: def __setattr__(self, /, name: str, value: object) -> None: """Allow for changing attributes of the wrapped object. - * If wrapped object isn't yet set, fall back to :meth:`~object.__setattr__` of this class. + * If wrapped object isn't yet set, fall back to [`__setattr__`][?object.] of this class. * If wrapped object doesn't already contain the attribute we want to set, also fallback to this class. - * Otherwise, run ``__setattr__`` on it to update it. + * Otherwise, run `__setattr__` on it to update it. """ try: wrapped = getattr(self, self._WRAPPED_ATTRIBUTE) @@ -107,29 +107,29 @@ def __setattr__(self, /, name: str, value: object) -> None: class UnpropagatingMockMixin(Generic[T_Mock]): - """Provides common functionality for our :class:`~unittest.mock.Mock` classes. + """Provides common functionality for our [`Mock`][unittest.mock.] classes. By default, mock objects propagate themselves by returning a new instance of the same mock class, with same initialization attributes. This is done whenever we're accessing new attributes that mock class. This propagation makes sense for simple mocks without any additional restrictions, however when - dealing with limited mocks to some ``spec_set``, it doesn't usually make sense to propagate - those same ``spec_set`` restrictions, since we generally don't have attributes/methods of a + dealing with limited mocks to some `spec_set`, it doesn't usually make sense to propagate + those same `spec_set` restrictions, since we generally don't have attributes/methods of a class be of/return the same class. This mixin class stops this propagation, and instead returns instances of specified mock class, - defined in :attr:`.child_mock_type` class variable, which is by default set to - :class:`~unittest.mock.MagicMock`, as it can safely represent most objects. + defined in [`child_mock_type`][.] class variable, which is by default set to [`MagicMock`][unittest.mock.], + as it can safely represent most objects. - .. note: + Note: This propagation handling will only be done for the mock classes that inherited from this - mixin class. That means if the :attr:`.child_mock_type` is one of the regular mock classes, + mixin class. That means if the [`child_mock_type`][.] is one of the regular mock classes, and the mock is propagated, a regular mock class is returned as that new attribute. This regular class then won't have the same overrides, and will therefore propagate itself, like any other mock class would. - If you wish to counteract this, you can set the :attr:`.child_mock_type` to a mock class + If you wish to counteract this, you can set the [`child_mock_type`][.] to a mock class that also inherits from this mixin class, perhaps to your class itself, overriding any propagation recursively. """ @@ -142,11 +142,11 @@ class be of/return the same class. _extract_mock_name: Callable[[], str] def _get_child_mock(self, **kwargs) -> T_Mock: - """Make :attr:`.child_mock_type`` instances instead of instances of the same class. + """Make [`child_mock_type`][..] instances instead of instances of the same class. By default, this method creates a new mock instance of the same original class, and passes over the same initialization arguments. This overrides that behavior to instead create an - instance of :attr:`.child_mock_type` class. + instance of `child_mock_type` class. """ # Mocks can be sealed, in which case we wouldn't want to allow propagation of any kind # and rather raise an AttributeError, informing that given attr isn't accessible @@ -163,9 +163,9 @@ def _get_child_mock(self, **kwargs) -> T_Mock: class CustomMockMixin(UnpropagatingMockMixin[T_Mock], Generic[T_Mock]): """Provides common functionality for our custom mock types. - * Stops propagation of same ``spec_set`` restricted mock in child mocks - (see :class:`.UnpropagatingMockMixin` for more info) - * Allows using the ``spec_set`` attribute as class attribute + * Stops propagation of same `spec_set` restricted mock in child mocks + (see [`UnpropagatingMockMixin`][(m).] for more info) + * Allows using the `spec_set` attribute as class attribute """ spec_set = None @@ -184,13 +184,15 @@ def isexception(obj: object) -> TypeIs[type[Exception] | TestExc]: class TestExc(NamedTuple): """Named tuple to check if an exception is raised with a specific message. - :param exception: The exception type. - :param match: If specified, a string containing a regular expression, or a regular expression object, that is - tested against the string representation of the exception using :func:`re.search`. + Args: + exception: The exception type. + match: + If specified, a string containing a regular expression, or a regular expression object, that is + tested against the string representation of the exception using [`re.search`][re.search]. - :param kwargs: The keyword arguments passed to the exception. + kwargs: The keyword arguments passed to the exception. - If :attr:`kwargs` is not None, the exception instance will need to have the same attributes with the same values. + If [`kwargs`][.] is not `None`, the exception instance will need to have the same attributes with the same values. """ exception: type[Exception] | tuple[type[Exception], ...] @@ -199,7 +201,7 @@ class TestExc(NamedTuple): @classmethod def from_exception(cls, exception: type[Exception] | tuple[type[Exception], ...] | TestExc) -> TestExc: - """Create a :class:`TestExc` from an exception, does nothing if the object is already a :class:`TestExc`.""" + """Create a [`TestExc`][(m).] from an exception, does nothing if the object is already a `TestExc`.""" if isinstance(exception, TestExc): return exception return cls(exception) @@ -218,31 +220,32 @@ def gen_serializable_test( This function generates tests for the serialization, deserialization, validation, and deserialization error handling - :param context: The context to add the test functions to. This is usually `globals()`. - :param cls: The serializable class to test. - :param fields: A list of tuples containing the field names and types of the serializable class. - :param serialize_deserialize: A list of tuples containing: - - The tuple representing the arguments to pass to the :class:`mcproto.utils.abc.Serializable` class - - The expected bytes - :param validation_fail: A list of tuples containing the arguments to pass to the - :class:`mcproto.utils.abc.Serializable` class and the expected exception, either as is or wrapped in a - :class:`TestExc` object. - :param deserialization_fail: A list of tuples containing the bytes to pass to the :meth:`deserialize` method of the - class and the expected exception, either as is or wrapped in a :class:`TestExc` object. + Args: + context: The context to add the test functions to. This is usually `globals()`. + cls: The serializable class to test. + fields: A list of tuples containing the field names and types of the serializable class. + serialize_deserialize: + A list of tuples containing: + + - The tuple representing the arguments to pass to the [`Serializable`][mcproto.utils.abc.] class + - The expected bytes + validation_fail: + A list of tuples containing the arguments to pass to the [`Serializable`][mcproto.utils.abc.] class + and the expected exception, either as is or wrapped in a [`TestExc`][(m).] object. + deserialization_fail: + A list of tuples containing the bytes to pass to the + [`deserialize`][mcproto.utils.abc.Serializable.deserialize] method of the class and the expected exception, + either as is or wrapped in a [`TestExc`][(m).] object. Example usage: + See `tests.mcproto.utils.test_serializable.py` (specifically the `ToyClass`) - .. literalinclude:: /../tests/mcproto/utils/test_serializable.py - :start-after: # region ToyClass - :linenos: - :language: python + This will add 1 class test with 4 test functions containing the tests for serialization, deserialization, + validation, and deserialization error handling - This will add 1 class test with 4 test functions containing the tests for serialization, deserialization, - validation, and deserialization error handling - - .. note:: - The test cases will use :meth:`__eq__` to compare the objects, so make sure to implement it in the class if - you are not using the autogenerated method from :func:`attrs.define`. + Note: + The test cases will use `__eq__` to compare the objects, so make sure to implement it in the class if + you are not using the autogenerated method from [`attrs.define`][attrs.define]. """ # This holds the parameters for the serialization and deserialization tests diff --git a/tests/mcproto/protocol/helpers.py b/tests/mcproto/protocol/helpers.py index 569ecf99..d5fad777 100644 --- a/tests/mcproto/protocol/helpers.py +++ b/tests/mcproto/protocol/helpers.py @@ -14,18 +14,19 @@ def __init__(self, *a, **kw): @override def __call__(self, data: bytes) -> None: - """Override mock's ``__call__`` to extend our :attr:`.combined_data` bytearray. + """Override mock's `__call__` to extend our `combined_data` bytearray. This allows us to keep track of exactly what data was written by the mocked write function - in total, rather than only having tools like :meth:`.assert_called_with`, which might let us - get the data from individual calls, but not the combined data, which is what we'll need. + in total, rather than only having tools like [`assert_called_with`][?unittest.mock.Mock.], + which might let us get the data from individual calls, but not the combined data, which is + what we'll need. """ self.combined_data.extend(data) return super().__call__(data) @override def assert_has_data(self, data: bytearray, ensure_called: bool = True) -> None: - """Ensure that the combined write data by the mocked function matches expected ``data``.""" + """Ensure that the combined write data by the mocked function matches expected `data`.""" if ensure_called: self.assert_called() @@ -48,10 +49,10 @@ def __init__(self, *a, combined_data: bytearray | None = None, **kw): @override def __call__(self, length: int) -> bytearray: - """Override mock's __call__ to make it return part of our :attr:`.combined_data` bytearray. + """Override mock's `__call__` to make it return part of our `combined_data` bytearray. This allows us to make the return value always be the next requested part (length) of - the :attr:`.combined_data`. It would be difficult to replicate this with regular mocks, + the `combined_data`. It would be difficult to replicate this with regular mocks, because some functions can end up making multiple read calls, and each time the result needs to be different (the next part). """ @@ -61,7 +62,7 @@ def __call__(self, length: int) -> bytearray: @override def assert_read_everything(self, ensure_called: bool = True) -> None: - """Ensure that the passed :attr:`.combined_data` was fully read and depleted.""" + """Ensure that the passed `combined_data` was fully read and depleted.""" if ensure_called: self.assert_called() diff --git a/tests/mcproto/protocol/test_base_io.py b/tests/mcproto/protocol/test_base_io.py index 86e4586b..f4aa0f68 100644 --- a/tests/mcproto/protocol/test_base_io.py +++ b/tests/mcproto/protocol/test_base_io.py @@ -30,20 +30,20 @@ class SyncWriter(BaseSyncWriter): - """Initializable concrete implementation of :class:`~mcproto.protocol.base_io.BaseSyncWriter` ABC.""" + """Initializable concrete implementation of [`BaseSyncWriter`][mcproto.protocol.base_io.] ABC.""" @override def write(self, data: bytes | bytearray) -> None: """Concrete implementation of abstract write method. - Since :class:`abc.ABC` classes can't be initialized if they have any abstract methods + Since [`ABC`][?abc.] classes can't be initialized if they have any abstract methods which weren't overridden with a concrete implementations, this is a fake implementation, without any actual logic, purely to allow the initialization of this class. - This method is expected to be mocked using :class:`~tests.mcproto.protocol.helpers.WriteFunctionMock` + This method is expected to be mocked using [`WriteFunctionMock`][tests.mcproto.protocol.helpers.] if it's supposed to get called during testing. - If this method gets called without being mocked, it will raise :exc:`NotImplementedError`. + If this method gets called without being mocked, it will raise [`NotImplementedError`][NotImplementedError]. """ raise NotImplementedError( "This concrete override of abstract write method isn't intended for actual use!\n" @@ -55,20 +55,20 @@ def write(self, data: bytes | bytearray) -> None: class SyncReader(BaseSyncReader): - """Testable concrete implementation of :class:`~mcproto.protocol.base_io.BaseSyncReader` ABC.""" + """Testable concrete implementation of [`BaseSyncReader`][mcproto.protocol.base_io.] ABC.""" @override def read(self, length: int) -> bytes: """Concrete implementation of abstract read method. - Since :class:`abc.ABC` classes can't be initialized if they have any abstract methods + Since [`ABC`][?abc.] classes can't be initialized if they have any abstract methods which weren't overridden with a concrete implementations, this is a fake implementation, without any actual logic, purely to allow the initialization of this class. - This method is expected to be mocked using :class:`~tests.mcproto.protocol.helpers.ReadFunctionMock` + This method is expected to be mocked using [`ReadFunctionMock`][tests.mcproto.protocol.helpers.] if it's supposed to get called during testing. - If this method gets called without being mocked, it will raise :exc:`NotImplementedError`. + If this method gets called without being mocked, it will raise [`NotImplementedError`][NotImplementedError]. """ raise NotImplementedError( "This concrete override of abstract read method isn't intended for actual use!\n" @@ -80,20 +80,20 @@ def read(self, length: int) -> bytes: class AsyncWriter(BaseAsyncWriter): - """Initializable concrete implementation of :class:`~mcproto.protocol.base_io.BaseAsyncWriter` ABC.""" + """Initializable concrete implementation of [`BaseAsyncWriter`][mcproto.protocol.base_io.] ABC.""" @override async def write(self, data: bytes | bytearray) -> None: """Concrete implementation of abstract write method. - Since :class:`abc.ABC` classes can't be initialized if they have any abstract methods + Since [`ABC`][?abc.] classes can't be initialized if they have any abstract methods which weren't overridden with a concrete implementations, this is a fake implementation, without any actual logic, purely to allow the initialization of this class. - This method is expected to be mocked using :class:`~tests.mcproto.protocol.helpers.WriteFunctionAsyncMock` + This method is expected to be mocked using [`WriteFunctionAsyncMock`][tests.mcproto.protocol.helpers.] if it's supposed to get called during testing. - If this method gets called without being mocked, it will raise :exc:`NotImplementedError`. + If this method gets called without being mocked, it will raise [`NotImplementedError`][NotImplementedError] """ raise NotImplementedError( "This concrete override of abstract write method isn't intended for actual use!\n" @@ -105,20 +105,20 @@ async def write(self, data: bytes | bytearray) -> None: class AsyncReader(BaseAsyncReader): - """Testable concrete implementation of BaseAsyncReader ABC.""" + """Initializable concrete implementation of [`BaseAsyncReader`][mcproto.protocol.base_io.] ABC.""" @override async def read(self, length: int) -> bytes: """Concrete implementation of abstract read method. - Since :class:`abc.ABC` classes can't be initialized if they have any abstract methods + Since [`ABC`][?abc.] classes can't be initialized if they have any abstract methods which weren't overridden with a concrete implementations, this is a fake implementation, without any actual logic, purely to allow the initialization of this class. - This method is expected to be mocked using :class:`~tests.mcproto.protocol.helpers.ReadFunctionAsyncMock` + This method is expected to be mocked using [`ReadFunctionAsyncMock`][tests.mcproto.protocol.helpers.] if it's supposed to get called during testing. - If this method gets called without being mocked, it will raise :exc:`NotImplementedError`. + If this method gets called without being mocked, it will raise [`NotImplementedError`][NotImplementedError]. """ raise NotImplementedError( "This concrete override of abstract read method isn't intended for actual use!\n" @@ -134,9 +134,9 @@ async def read(self, length: int) -> bytes: class WrappedAsyncReader(SynchronizedMixin): - """Wrapped synchronous implementation of asynchronous :class:`.AsyncReader` class. + """Wrapped synchronous implementation of asynchronous [`AsyncReader`][(m).] class. - This essentially mimics :class:`~mcproto.protocol.base_io.BaseSyncReader`. + This essentially mimics [`BaseSyncReader`][mcproto.protocol.base_io.]. """ _WRAPPED_ATTRIBUTE = "_reader" @@ -146,9 +146,9 @@ def __init__(self): class WrappedAsyncWriter(SynchronizedMixin): - """Wrapped synchronous implementation of asynchronous :class:`.AsyncWriter` class. + """Wrapped synchronous implementation of asynchronous [`AsyncWriter`][(m).] class. - This essentially mimics :class:`~mcproto.protocol.base_io.BaseSyncWriter`. + This essentially mimics [`BaseSyncWriter`][mcproto.protocol.base_io.]. """ _WRAPPED_ATTRIBUTE = "_writer" @@ -255,7 +255,7 @@ def test_write_value_out_of_range( fmt: INT_FORMATS_TYPE, value: Any, ): - """Test writing out of range values for the given format raises :exc:`struct.error`.""" + """Test writing out of range values for the given format raises `struct.error`.""" with pytest.raises(struct.error): self.writer.write_value(fmt, value) @@ -289,7 +289,7 @@ def test_write_varuint(self, number: int, expected_bytes: list[int], write_mock: ], ) def test_write_varuint_out_of_range(self, write_value: int, max_bits: int): - """Test writing out of range varuints raises :exc:`ValueError`.""" + """Test writing out of range varuints raises `ValueError`.""" with pytest.raises(ValueError): self.writer._write_varuint(write_value, max_bits=max_bits) # pyright: ignore[reportPrivateUsage] @@ -359,12 +359,12 @@ def test_write_utf(self, string: str, expected_bytes: list[int], write_mock: Wri @pytest.mark.skipif(platform.system() == "Windows", reason="environment variable limit on Windows") def test_write_utf_limit(self, write_mock: WriteFunctionMock): - """Test writing a UTF string too big raises a :exc:`ValueError`.""" + """Test writing a UTF string too big raises a `ValueError`.""" with pytest.raises(ValueError, match="Maximum character limit for writing strings is 32767 characters."): self.writer.write_utf("a" * (32768)) def test_write_optional_true(self, method_mock: Mock | AsyncMock, write_mock: WriteFunctionMock): - """Test writing non-``None`` value writes ``True`` and runs the writer function.""" + """Test writing non-`None` value writes `True` and runs the writer function.""" mock_v = Mock() mock_f = method_mock() _ = self.writer.write_optional(mock_v, mock_f) @@ -372,7 +372,7 @@ def test_write_optional_true(self, method_mock: Mock | AsyncMock, write_mock: Wr write_mock.assert_has_data(bytearray([1])) def test_write_optional_false(self, method_mock: Mock | AsyncMock, write_mock: WriteFunctionMock): - """Test writing ``None`` value should write ``False`` and skip running the writer function.""" + """Test writing `None` value should write `False` and skip running the writer function.""" mock_f = method_mock() _ = self.writer.write_optional(None, mock_f) mock_f.assert_not_called() @@ -485,7 +485,7 @@ def test_read_varuint(self, read_bytes: list[int], expected_value: int, read_moc ], ) def test_read_varuint_out_of_range(self, read_bytes: list[int], max_bits: int, read_mock: ReadFunctionMock): - """Test reading out-of-range varuints raises :exc:`IOError`.""" + """Test reading out-of-range varuints raises `IOError`.""" read_mock.combined_data = bytearray(read_bytes) with pytest.raises(IOError): _ = self.reader._read_varuint(max_bits=max_bits) # pyright: ignore[reportPrivateUsage] @@ -572,14 +572,14 @@ def test_read_utf_limit(self, read_bytes: list[int], read_mock: ReadFunctionMock _ = self.reader.read_utf() def test_read_optional_true(self, method_mock: Mock | AsyncMock, read_mock: ReadFunctionMock): - """Test reading optional runs reader function when first bool is ``True``.""" + """Test reading optional runs reader function when first bool is `True`.""" mock_f = method_mock() read_mock.combined_data = bytearray([1]) _ = self.reader.read_optional(mock_f) mock_f.assert_called_once_with() def test_read_optional_false(self, method_mock: Mock | AsyncMock, read_mock: ReadFunctionMock): - """Test reading optional doesn't run reader function when first bool is ``False``.""" + """Test reading optional doesn't run reader function when first bool is `False`.""" mock_f = method_mock() read_mock.combined_data = bytearray([0]) _ = self.reader.read_optional(mock_f) @@ -591,7 +591,7 @@ def test_read_optional_false(self, method_mock: Mock | AsyncMock, read_mock: Rea class TestBaseSyncWriter(WriterTests[SyncWriter]): - """Tests for individual write methods implemented in :class:`~mcproto.protocol.base_io.BaseSyncWriter`.""" + """Tests for individual write methods implemented in [`BaseSyncWriter`][mcproto.protocol.base_io.].""" @override @classmethod @@ -600,7 +600,7 @@ def setup_class(cls): class TestBaseSyncReader(ReaderTests[SyncReader]): - """Tests for individual write methods implemented in :class:`~mcproto.protocol.base_io.BaseSyncReader`.""" + """Tests for individual write methods implemented in [`BaseSyncReader`][mcproto.protocol.base_io.].""" @override @classmethod @@ -609,7 +609,7 @@ def setup_class(cls): class TestBaseAsyncWriter(WriterTests[WrappedAsyncWriter]): - """Tests for individual write methods implemented in :class:`~mcproto.protocol.base_io.BaseSyncReader`.""" + """Tests for individual write methods implemented in [`BaseSyncReader`][mcproto.protocol.base_io.].""" @override @classmethod @@ -618,7 +618,7 @@ def setup_class(cls): class TestBaseAsyncReader(ReaderTests[WrappedAsyncReader]): - """Tests for individual write methods implemented in :class:`~mcproto.protocol.base_io.BaseSyncReader`.""" + """Tests for individual write methods implemented in [`BaseSyncReader`][mcproto.protocol.base_io.].""" @override @classmethod diff --git a/tests/mcproto/protocol/test_utils.py b/tests/mcproto/protocol/test_utils.py index 3757d89d..e33a747e 100644 --- a/tests/mcproto/protocol/test_utils.py +++ b/tests/mcproto/protocol/test_utils.py @@ -48,7 +48,7 @@ def test_to_twos_complement_negative(number: int, bits: int, expected_out: int): ], ) def test_to_twos_complement_range(number: int, bits: int): - """Test conversion to two's complement format for out of range numbers raises :exc:`ValueError`.""" + """Test conversion to two's complement format for out of range numbers raises `ValueError`.""" with pytest.raises(ValueError, match="out of range"): _ = to_twos_complement(number, bits) @@ -94,6 +94,6 @@ def test_from_twos_complement_negative(number: int, bits: int, expected_out: int ], ) def test_from_twos_complement_range(number: int, bits: int): - """Test conversion from two's complement format for out of range numbers raises :exc:`ValueError`.""" + """Test conversion from two's complement format for out of range numbers raises `ValueError`.""" with pytest.raises(ValueError, match="out of range"): _ = from_twos_complement(number, bits) diff --git a/tests/mcproto/test_connection.py b/tests/mcproto/test_connection.py index 7104554a..fa9eaef9 100644 --- a/tests/mcproto/test_connection.py +++ b/tests/mcproto/test_connection.py @@ -16,8 +16,8 @@ class MockSocket(CustomMockMixin[MagicMock], MagicMock): # pyright: ignore[reportUnsafeMultipleInheritance] """Mock version of a socket (synchronous), using our mocked writer and reader methods. - See :class:`tests.mcproto.protocol.helpers.ReadFunctionMock` and - :class:`tests.mcproto.protocol.helpers.WriteFunctionMock`. + See [`ReadFunctionMock`][tests.mcproto.protocol.helpers.] and + [`WriteFunctionMock`][tests.mcproto.protocol.helpers.]. """ spec_set = socket.socket @@ -31,7 +31,7 @@ def __init__(self, *args, read_data: bytearray | None = None, **kwargs) -> None: @override def send(self, data: bytes | bytearray) -> None: - """Mock version of send method, raising :exc:`OSError` if the socket was closed.""" + """Mock version of send method, raising [`OSError`][OSError] if the socket was closed.""" if self._closed: raise OSError(errno.EBADF, "Bad file descriptor") if isinstance(data, bytearray): @@ -40,14 +40,14 @@ def send(self, data: bytes | bytearray) -> None: @override def recv(self, length: int) -> bytearray: - """Mock version of recv method, raising :exc:`OSError` if the socket was closed.""" + """Mock version of recv method, raising [`OSError`][OSError] if the socket was closed.""" if self._closed: raise OSError(errno.EBADF, "Bad file descriptor") return self._recv(length) @override def close(self) -> None: - """Mock version of close method, setting :attr:`_closed` bool flag.""" + """Mock version of close method, setting `_closed` bool flag.""" self._closed = True @override @@ -56,7 +56,7 @@ def shutdown(self, __how: int, /) -> None: class MockStreamWriter(CustomMockMixin[MagicMock], MagicMock): # pyright: ignore[reportUnsafeMultipleInheritance]] - """Mock version of :class:`asyncio.StreamWriter` using our mocked writer method.""" + """Mock version of [`asyncio.StreamWriter`][asyncio.StreamWriter] using our mocked writer method.""" spec_set = asyncio.StreamWriter @@ -68,7 +68,7 @@ def __init__(self, *args, **kwargs): @override def write(self, data: bytes | bytearray) -> None: - """Mock version of write method, raising :exc:`OSError` if the writer was closed.""" + """Mock version of write method, raising [`OSError`][OSError] if the writer was closed.""" if self._closed: raise OSError(errno.EBADF, "Bad file descriptor") if isinstance(data, bytearray): @@ -77,12 +77,12 @@ def write(self, data: bytes | bytearray) -> None: @override def close(self) -> None: - """Mock version of close method, setting :attr:`_closed` bool flag.""" + """Mock version of close method, setting `_closed` bool flag.""" self._closed = True class MockStreamReader(CustomMockMixin[MagicMock], MagicMock): # pyright: ignore[reportUnsafeMultipleInheritance]] - """Mock version of :class:`asyncio.StreamReader` using our mocked reader method.""" + """Mock version of [`asyncio.StreamReader`][asyncio.StreamReader] using our mocked reader method.""" spec_set = asyncio.StreamReader @@ -101,7 +101,7 @@ class TestTCPSyncConnection: """Collection of tests for the synchronous TCP connection class.""" def make_connection(self, read_data: bytearray | None = None) -> TCPSyncConnection[MockSocket]: - """Create a new connection class using the :class:`MockSocket` class.""" + """Create a new connection class using the [`MockSocket`][(m).] class.""" if read_data is not None: read_data = read_data.copy() @@ -118,7 +118,7 @@ def test_read(self): conn.socket._recv.assert_read_everything() # pyright: ignore[reportPrivateUsage] def test_read_more_data_than_sent(self): - """Test reading more data than available raises :exc:`IOError`.""" + """Test reading more data than available raises [`IOError`][IOError].""" data = bytearray("test", "utf-8") conn = self.make_connection(data) @@ -189,7 +189,7 @@ def make_connection( self, read_data: bytearray | None = None, ) -> TCPAsyncConnection[MockStreamReader, MockStreamWriter]: - """Create a new connection class using the :class:`MockStreamReader` and :class:`MockStreamWriter` classes.""" + """Create a new connection class using [`MockStreamReader`][(m).] and [`MockStreamWriter`][(m).] classes.""" if read_data is not None: read_data = read_data.copy() @@ -206,7 +206,7 @@ async def test_read(self): conn.reader._read.assert_read_everything() # pyright: ignore[reportPrivateUsage] async def test_read_more_data_than_sent(self): - """Test reading more data than available raises :exc:`IOError`.""" + """Test reading more data than available raises [`IOError`][IOError].""" data = bytearray("test", "utf-8") conn = self.make_connection(data) diff --git a/tests/mcproto/utils/test_deprecation.py b/tests/mcproto/utils/test_deprecation.py index a1dd112b..1118aaa4 100644 --- a/tests/mcproto/utils/test_deprecation.py +++ b/tests/mcproto/utils/test_deprecation.py @@ -10,10 +10,10 @@ def _patch_project_version(monkeypatch: pytest.MonkeyPatch, version: str | None): - """Patch the project version reported by importlib.metadata.version. + """Patch the project version reported by `importlib.metadata.version`. This is used to simulate different project versions for testing purposes. - If ``version`` is ``None``, a :exc:`~importlib.metadata.PackageNotFoundError` will be raised + If `version` is `None`, a [`PackageNotFoundError`][?importlib.metadata.] will be raised when trying to get the project version. """ orig_version_func = importlib.metadata.version diff --git a/tests/mcproto/utils/test_serializable.py b/tests/mcproto/utils/test_serializable.py index 4fd22db5..b04d0fb0 100644 --- a/tests/mcproto/utils/test_serializable.py +++ b/tests/mcproto/utils/test_serializable.py @@ -24,7 +24,7 @@ def __init__(self, message: str, additional_data: Any): @final @define(init=True) class ToyClass(Serializable): - """Toy class for testing demonstrating the use of gen_serializable_test on `Serializable`.""" + """Toy class for testing demonstrating the use of `gen_serializable_test` on `Serializable`.""" a: int b: str | int